1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2014 Broadcom Corporation
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/firmware.h>
9 #include <linux/pci.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/interrupt.h>
13 #include <linux/bcma/bcma.h>
14 #include <linux/sched.h>
15 #include <asm/unaligned.h>
16
17 #include <soc.h>
18 #include <chipcommon.h>
19 #include <brcmu_utils.h>
20 #include <brcmu_wifi.h>
21 #include <brcm_hw_ids.h>
22
23 /* Custom brcmf_err() that takes bus arg and passes it further */
24 #define brcmf_err(bus, fmt, ...) \
25 do { \
26 if (IS_ENABLED(CONFIG_BRCMDBG) || \
27 IS_ENABLED(CONFIG_BRCM_TRACING) || \
28 net_ratelimit()) \
29 __brcmf_err(bus, __func__, fmt, ##__VA_ARGS__); \
30 } while (0)
31
32 #include "debug.h"
33 #include "bus.h"
34 #include "commonring.h"
35 #include "msgbuf.h"
36 #include "pcie.h"
37 #include "firmware.h"
38 #include "chip.h"
39 #include "core.h"
40 #include "common.h"
41
42
43 enum brcmf_pcie_state {
44 BRCMFMAC_PCIE_STATE_DOWN,
45 BRCMFMAC_PCIE_STATE_UP
46 };
47
48 BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
49 BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
50 BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie");
51 BRCMF_FW_CLM_DEF(4356, "brcmfmac4356-pcie");
52 BRCMF_FW_CLM_DEF(43570, "brcmfmac43570-pcie");
53 BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
54 BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
55 BRCMF_FW_DEF(4364, "brcmfmac4364-pcie");
56 BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
57 BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
58 BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
59 BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
60 BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
61
62 static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
63 BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
64 BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
65 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
66 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
67 BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
68 BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
69 BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
70 BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
71 BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
72 BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
73 BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
74 BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFFF, 4364),
75 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
76 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
77 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
78 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
79 BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
80 BRCMF_FW_ENTRY(BRCM_CC_43666_CHIP_ID, 0xFFFFFFF0, 4366C),
81 BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
82 };
83
84 #define BRCMF_PCIE_FW_UP_TIMEOUT 5000 /* msec */
85
86 #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
87
88 /* backplane addres space accessed by BAR0 */
89 #define BRCMF_PCIE_BAR0_WINDOW 0x80
90 #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
91 #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
92
93 #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
94 #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
95
96 #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
97 #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
98
99 #define BRCMF_PCIE_REG_INTSTATUS 0x90
100 #define BRCMF_PCIE_REG_INTMASK 0x94
101 #define BRCMF_PCIE_REG_SBMBX 0x98
102
103 #define BRCMF_PCIE_REG_LINK_STATUS_CTRL 0xBC
104
105 #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
106 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
107 #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
108 #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
109 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
110 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140
111 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144
112
113 #define BRCMF_PCIE2_INTA 0x01
114 #define BRCMF_PCIE2_INTB 0x02
115
116 #define BRCMF_PCIE_INT_0 0x01
117 #define BRCMF_PCIE_INT_1 0x02
118 #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
119 BRCMF_PCIE_INT_1)
120
121 #define BRCMF_PCIE_MB_INT_FN0_0 0x0100
122 #define BRCMF_PCIE_MB_INT_FN0_1 0x0200
123 #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
124 #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
125 #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
126 #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
127 #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
128 #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
129 #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
130 #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
131
132 #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
133 BRCMF_PCIE_MB_INT_D2H0_DB1 | \
134 BRCMF_PCIE_MB_INT_D2H1_DB0 | \
135 BRCMF_PCIE_MB_INT_D2H1_DB1 | \
136 BRCMF_PCIE_MB_INT_D2H2_DB0 | \
137 BRCMF_PCIE_MB_INT_D2H2_DB1 | \
138 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
139 BRCMF_PCIE_MB_INT_D2H3_DB1)
140
141 #define BRCMF_PCIE_SHARED_VERSION_7 7
142 #define BRCMF_PCIE_MIN_SHARED_VERSION 5
143 #define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7
144 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
145 #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
146 #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
147 #define BRCMF_PCIE_SHARED_HOSTRDY_DB1 0x10000000
148
149 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
150 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
151
152 #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
153 #define BRCMF_SHARED_RING_BASE_OFFSET 52
154 #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
155 #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
156 #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
157 #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
158 #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
159 #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
160 #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
161 #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
162 #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
163
164 #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
165 #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
166 #define BRCMF_RING_H2D_RING_MEM_OFFSET 4
167 #define BRCMF_RING_H2D_RING_STATE_OFFSET 8
168
169 #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
170 #define BRCMF_RING_MAX_ITEM_OFFSET 4
171 #define BRCMF_RING_LEN_ITEMS_OFFSET 6
172 #define BRCMF_RING_MEM_SZ 16
173 #define BRCMF_RING_STATE_SZ 8
174
175 #define BRCMF_DEF_MAX_RXBUFPOST 255
176
177 #define BRCMF_CONSOLE_BUFADDR_OFFSET 8
178 #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
179 #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
180
181 #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
182 #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
183
184 #define BRCMF_D2H_DEV_D3_ACK 0x00000001
185 #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
186 #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
187 #define BRCMF_D2H_DEV_FWHALT 0x10000000
188
189 #define BRCMF_H2D_HOST_D3_INFORM 0x00000001
190 #define BRCMF_H2D_HOST_DS_ACK 0x00000002
191 #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
192 #define BRCMF_H2D_HOST_D0_INFORM 0x00000010
193
194 #define BRCMF_PCIE_MBDATA_TIMEOUT msecs_to_jiffies(2000)
195
196 #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
197 #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
198 #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
199 #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
200 #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
201 #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
202 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
203 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
204 #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
205 #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
206 #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
207 #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
208 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
209
210 /* Magic number at a magic location to find RAM size */
211 #define BRCMF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */
212 #define BRCMF_RAMSIZE_OFFSET 0x6c
213
214
215 struct brcmf_pcie_console {
216 u32 base_addr;
217 u32 buf_addr;
218 u32 bufsize;
219 u32 read_idx;
220 u8 log_str[256];
221 u8 log_idx;
222 };
223
224 struct brcmf_pcie_shared_info {
225 u32 tcm_base_address;
226 u32 flags;
227 struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
228 struct brcmf_pcie_ringbuf *flowrings;
229 u16 max_rxbufpost;
230 u16 max_flowrings;
231 u16 max_submissionrings;
232 u16 max_completionrings;
233 u32 rx_dataoffset;
234 u32 htod_mb_data_addr;
235 u32 dtoh_mb_data_addr;
236 u32 ring_info_addr;
237 struct brcmf_pcie_console console;
238 void *scratch;
239 dma_addr_t scratch_dmahandle;
240 void *ringupd;
241 dma_addr_t ringupd_dmahandle;
242 u8 version;
243 };
244
245 struct brcmf_pcie_core_info {
246 u32 base;
247 u32 wrapbase;
248 };
249
250 struct brcmf_pciedev_info {
251 enum brcmf_pcie_state state;
252 bool in_irq;
253 struct pci_dev *pdev;
254 char fw_name[BRCMF_FW_NAME_LEN];
255 char nvram_name[BRCMF_FW_NAME_LEN];
256 void __iomem *regs;
257 void __iomem *tcm;
258 u32 ram_base;
259 u32 ram_size;
260 struct brcmf_chip *ci;
261 u32 coreid;
262 struct brcmf_pcie_shared_info shared;
263 wait_queue_head_t mbdata_resp_wait;
264 bool mbdata_completed;
265 bool irq_allocated;
266 bool wowl_enabled;
267 u8 dma_idx_sz;
268 void *idxbuf;
269 u32 idxbuf_sz;
270 dma_addr_t idxbuf_dmahandle;
271 u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
272 void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
273 u16 value);
274 struct brcmf_mp_device *settings;
275 };
276
277 struct brcmf_pcie_ringbuf {
278 struct brcmf_commonring commonring;
279 dma_addr_t dma_handle;
280 u32 w_idx_addr;
281 u32 r_idx_addr;
282 struct brcmf_pciedev_info *devinfo;
283 u8 id;
284 };
285
286 /**
287 * struct brcmf_pcie_dhi_ringinfo - dongle/host interface shared ring info
288 *
289 * @ringmem: dongle memory pointer to ring memory location
290 * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers
291 * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers
292 * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers
293 * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers
294 * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers
295 * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers
296 * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers
297 * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers
298 * @max_flowrings: maximum number of tx flow rings supported.
299 * @max_submissionrings: maximum number of submission rings(h2d) supported.
300 * @max_completionrings: maximum number of completion rings(d2h) supported.
301 */
302 struct brcmf_pcie_dhi_ringinfo {
303 __le32 ringmem;
304 __le32 h2d_w_idx_ptr;
305 __le32 h2d_r_idx_ptr;
306 __le32 d2h_w_idx_ptr;
307 __le32 d2h_r_idx_ptr;
308 struct msgbuf_buf_addr h2d_w_idx_hostaddr;
309 struct msgbuf_buf_addr h2d_r_idx_hostaddr;
310 struct msgbuf_buf_addr d2h_w_idx_hostaddr;
311 struct msgbuf_buf_addr d2h_r_idx_hostaddr;
312 __le16 max_flowrings;
313 __le16 max_submissionrings;
314 __le16 max_completionrings;
315 };
316
317 static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
318 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
319 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
320 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
321 BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
322 BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
323 };
324
325 static const u32 brcmf_ring_itemsize_pre_v7[BRCMF_NROF_COMMON_MSGRINGS] = {
326 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
327 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
328 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
329 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7,
330 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7
331 };
332
333 static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
334 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
335 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
336 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
337 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
338 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
339 };
340
341 static void brcmf_pcie_setup(struct device *dev, int ret,
342 struct brcmf_fw_request *fwreq);
343 static struct brcmf_fw_request *
344 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
345
346 static u32
brcmf_pcie_read_reg32(struct brcmf_pciedev_info * devinfo,u32 reg_offset)347 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
348 {
349 void __iomem *address = devinfo->regs + reg_offset;
350
351 return (ioread32(address));
352 }
353
354
355 static void
brcmf_pcie_write_reg32(struct brcmf_pciedev_info * devinfo,u32 reg_offset,u32 value)356 brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
357 u32 value)
358 {
359 void __iomem *address = devinfo->regs + reg_offset;
360
361 iowrite32(value, address);
362 }
363
364
365 static u8
brcmf_pcie_read_tcm8(struct brcmf_pciedev_info * devinfo,u32 mem_offset)366 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
367 {
368 void __iomem *address = devinfo->tcm + mem_offset;
369
370 return (ioread8(address));
371 }
372
373
374 static u16
brcmf_pcie_read_tcm16(struct brcmf_pciedev_info * devinfo,u32 mem_offset)375 brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
376 {
377 void __iomem *address = devinfo->tcm + mem_offset;
378
379 return (ioread16(address));
380 }
381
382
383 static void
brcmf_pcie_write_tcm16(struct brcmf_pciedev_info * devinfo,u32 mem_offset,u16 value)384 brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
385 u16 value)
386 {
387 void __iomem *address = devinfo->tcm + mem_offset;
388
389 iowrite16(value, address);
390 }
391
392
393 static u16
brcmf_pcie_read_idx(struct brcmf_pciedev_info * devinfo,u32 mem_offset)394 brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
395 {
396 u16 *address = devinfo->idxbuf + mem_offset;
397
398 return (*(address));
399 }
400
401
402 static void
brcmf_pcie_write_idx(struct brcmf_pciedev_info * devinfo,u32 mem_offset,u16 value)403 brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
404 u16 value)
405 {
406 u16 *address = devinfo->idxbuf + mem_offset;
407
408 *(address) = value;
409 }
410
411
412 static u32
brcmf_pcie_read_tcm32(struct brcmf_pciedev_info * devinfo,u32 mem_offset)413 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
414 {
415 void __iomem *address = devinfo->tcm + mem_offset;
416
417 return (ioread32(address));
418 }
419
420
421 static void
brcmf_pcie_write_tcm32(struct brcmf_pciedev_info * devinfo,u32 mem_offset,u32 value)422 brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
423 u32 value)
424 {
425 void __iomem *address = devinfo->tcm + mem_offset;
426
427 iowrite32(value, address);
428 }
429
430
431 static u32
brcmf_pcie_read_ram32(struct brcmf_pciedev_info * devinfo,u32 mem_offset)432 brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
433 {
434 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
435
436 return (ioread32(addr));
437 }
438
439
440 static void
brcmf_pcie_write_ram32(struct brcmf_pciedev_info * devinfo,u32 mem_offset,u32 value)441 brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
442 u32 value)
443 {
444 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
445
446 iowrite32(value, addr);
447 }
448
449
450 static void
brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info * devinfo,u32 mem_offset,void * srcaddr,u32 len)451 brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
452 void *srcaddr, u32 len)
453 {
454 void __iomem *address = devinfo->tcm + mem_offset;
455 __le32 *src32;
456 __le16 *src16;
457 u8 *src8;
458
459 if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
460 if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
461 src8 = (u8 *)srcaddr;
462 while (len) {
463 iowrite8(*src8, address);
464 address++;
465 src8++;
466 len--;
467 }
468 } else {
469 len = len / 2;
470 src16 = (__le16 *)srcaddr;
471 while (len) {
472 iowrite16(le16_to_cpu(*src16), address);
473 address += 2;
474 src16++;
475 len--;
476 }
477 }
478 } else {
479 len = len / 4;
480 src32 = (__le32 *)srcaddr;
481 while (len) {
482 iowrite32(le32_to_cpu(*src32), address);
483 address += 4;
484 src32++;
485 len--;
486 }
487 }
488 }
489
490
491 static void
brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info * devinfo,u32 mem_offset,void * dstaddr,u32 len)492 brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
493 void *dstaddr, u32 len)
494 {
495 void __iomem *address = devinfo->tcm + mem_offset;
496 __le32 *dst32;
497 __le16 *dst16;
498 u8 *dst8;
499
500 if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
501 if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
502 dst8 = (u8 *)dstaddr;
503 while (len) {
504 *dst8 = ioread8(address);
505 address++;
506 dst8++;
507 len--;
508 }
509 } else {
510 len = len / 2;
511 dst16 = (__le16 *)dstaddr;
512 while (len) {
513 *dst16 = cpu_to_le16(ioread16(address));
514 address += 2;
515 dst16++;
516 len--;
517 }
518 }
519 } else {
520 len = len / 4;
521 dst32 = (__le32 *)dstaddr;
522 while (len) {
523 *dst32 = cpu_to_le32(ioread32(address));
524 address += 4;
525 dst32++;
526 len--;
527 }
528 }
529 }
530
531
532 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
533 CHIPCREGOFFS(reg), value)
534
535
536 static void
brcmf_pcie_select_core(struct brcmf_pciedev_info * devinfo,u16 coreid)537 brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
538 {
539 const struct pci_dev *pdev = devinfo->pdev;
540 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
541 struct brcmf_core *core;
542 u32 bar0_win;
543
544 core = brcmf_chip_get_core(devinfo->ci, coreid);
545 if (core) {
546 bar0_win = core->base;
547 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
548 if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
549 &bar0_win) == 0) {
550 if (bar0_win != core->base) {
551 bar0_win = core->base;
552 pci_write_config_dword(pdev,
553 BRCMF_PCIE_BAR0_WINDOW,
554 bar0_win);
555 }
556 }
557 } else {
558 brcmf_err(bus, "Unsupported core selected %x\n", coreid);
559 }
560 }
561
562
brcmf_pcie_reset_device(struct brcmf_pciedev_info * devinfo)563 static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
564 {
565 struct brcmf_core *core;
566 u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
567 BRCMF_PCIE_CFGREG_PM_CSR,
568 BRCMF_PCIE_CFGREG_MSI_CAP,
569 BRCMF_PCIE_CFGREG_MSI_ADDR_L,
570 BRCMF_PCIE_CFGREG_MSI_ADDR_H,
571 BRCMF_PCIE_CFGREG_MSI_DATA,
572 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
573 BRCMF_PCIE_CFGREG_RBAR_CTRL,
574 BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
575 BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
576 BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
577 u32 i;
578 u32 val;
579 u32 lsc;
580
581 if (!devinfo->ci)
582 return;
583
584 /* Disable ASPM */
585 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
586 pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
587 &lsc);
588 val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
589 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
590 val);
591
592 /* Watchdog reset */
593 brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
594 WRITECC32(devinfo, watchdog, 4);
595 msleep(100);
596
597 /* Restore ASPM */
598 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
599 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
600 lsc);
601
602 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
603 if (core->rev <= 13) {
604 for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
605 brcmf_pcie_write_reg32(devinfo,
606 BRCMF_PCIE_PCIE2REG_CONFIGADDR,
607 cfg_offset[i]);
608 val = brcmf_pcie_read_reg32(devinfo,
609 BRCMF_PCIE_PCIE2REG_CONFIGDATA);
610 brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
611 cfg_offset[i], val);
612 brcmf_pcie_write_reg32(devinfo,
613 BRCMF_PCIE_PCIE2REG_CONFIGDATA,
614 val);
615 }
616 }
617 }
618
619
brcmf_pcie_attach(struct brcmf_pciedev_info * devinfo)620 static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
621 {
622 u32 config;
623
624 /* BAR1 window may not be sized properly */
625 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
626 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
627 config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
628 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
629
630 device_wakeup_enable(&devinfo->pdev->dev);
631 }
632
633
brcmf_pcie_enter_download_state(struct brcmf_pciedev_info * devinfo)634 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
635 {
636 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
637 brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
638 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
639 5);
640 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
641 0);
642 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
643 7);
644 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
645 0);
646 }
647 return 0;
648 }
649
650
brcmf_pcie_exit_download_state(struct brcmf_pciedev_info * devinfo,u32 resetintr)651 static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
652 u32 resetintr)
653 {
654 struct brcmf_core *core;
655
656 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
657 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
658 brcmf_chip_resetcore(core, 0, 0, 0);
659 }
660
661 if (!brcmf_chip_set_active(devinfo->ci, resetintr))
662 return -EINVAL;
663 return 0;
664 }
665
666
667 static int
brcmf_pcie_send_mb_data(struct brcmf_pciedev_info * devinfo,u32 htod_mb_data)668 brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
669 {
670 struct brcmf_pcie_shared_info *shared;
671 struct brcmf_core *core;
672 u32 addr;
673 u32 cur_htod_mb_data;
674 u32 i;
675
676 shared = &devinfo->shared;
677 addr = shared->htod_mb_data_addr;
678 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
679
680 if (cur_htod_mb_data != 0)
681 brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
682 cur_htod_mb_data);
683
684 i = 0;
685 while (cur_htod_mb_data != 0) {
686 msleep(10);
687 i++;
688 if (i > 100)
689 return -EIO;
690 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
691 }
692
693 brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
694 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
695
696 /* Send mailbox interrupt twice as a hardware workaround */
697 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
698 if (core->rev <= 13)
699 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
700
701 return 0;
702 }
703
704
brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info * devinfo)705 static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
706 {
707 struct brcmf_pcie_shared_info *shared;
708 u32 addr;
709 u32 dtoh_mb_data;
710
711 shared = &devinfo->shared;
712 addr = shared->dtoh_mb_data_addr;
713 dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
714
715 if (!dtoh_mb_data)
716 return;
717
718 brcmf_pcie_write_tcm32(devinfo, addr, 0);
719
720 brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
721 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
722 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
723 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
724 brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
725 }
726 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
727 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
728 if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
729 brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
730 devinfo->mbdata_completed = true;
731 wake_up(&devinfo->mbdata_resp_wait);
732 }
733 if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) {
734 brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
735 brcmf_fw_crashed(&devinfo->pdev->dev);
736 }
737 }
738
739
brcmf_pcie_bus_console_init(struct brcmf_pciedev_info * devinfo)740 static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
741 {
742 struct brcmf_pcie_shared_info *shared;
743 struct brcmf_pcie_console *console;
744 u32 addr;
745
746 shared = &devinfo->shared;
747 console = &shared->console;
748 addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
749 console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
750
751 addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
752 console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
753 addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
754 console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
755
756 brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
757 console->base_addr, console->buf_addr, console->bufsize);
758 }
759
760 /**
761 * brcmf_pcie_bus_console_read - reads firmware messages
762 *
763 * @devinfo: pointer to the device data structure
764 * @error: specifies if error has occurred (prints messages unconditionally)
765 */
brcmf_pcie_bus_console_read(struct brcmf_pciedev_info * devinfo,bool error)766 static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
767 bool error)
768 {
769 struct pci_dev *pdev = devinfo->pdev;
770 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
771 struct brcmf_pcie_console *console;
772 u32 addr;
773 u8 ch;
774 u32 newidx;
775
776 if (!error && !BRCMF_FWCON_ON())
777 return;
778
779 console = &devinfo->shared.console;
780 addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
781 newidx = brcmf_pcie_read_tcm32(devinfo, addr);
782 while (newidx != console->read_idx) {
783 addr = console->buf_addr + console->read_idx;
784 ch = brcmf_pcie_read_tcm8(devinfo, addr);
785 console->read_idx++;
786 if (console->read_idx == console->bufsize)
787 console->read_idx = 0;
788 if (ch == '\r')
789 continue;
790 console->log_str[console->log_idx] = ch;
791 console->log_idx++;
792 if ((ch != '\n') &&
793 (console->log_idx == (sizeof(console->log_str) - 2))) {
794 ch = '\n';
795 console->log_str[console->log_idx] = ch;
796 console->log_idx++;
797 }
798 if (ch == '\n') {
799 console->log_str[console->log_idx] = 0;
800 if (error)
801 __brcmf_err(bus, __func__, "CONSOLE: %s",
802 console->log_str);
803 else
804 pr_debug("CONSOLE: %s", console->log_str);
805 console->log_idx = 0;
806 }
807 }
808 }
809
810
brcmf_pcie_intr_disable(struct brcmf_pciedev_info * devinfo)811 static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
812 {
813 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
814 }
815
816
brcmf_pcie_intr_enable(struct brcmf_pciedev_info * devinfo)817 static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
818 {
819 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
820 BRCMF_PCIE_MB_INT_D2H_DB |
821 BRCMF_PCIE_MB_INT_FN0_0 |
822 BRCMF_PCIE_MB_INT_FN0_1);
823 }
824
brcmf_pcie_hostready(struct brcmf_pciedev_info * devinfo)825 static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
826 {
827 if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
828 brcmf_pcie_write_reg32(devinfo,
829 BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
830 }
831
brcmf_pcie_quick_check_isr(int irq,void * arg)832 static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
833 {
834 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
835
836 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
837 brcmf_pcie_intr_disable(devinfo);
838 brcmf_dbg(PCIE, "Enter\n");
839 return IRQ_WAKE_THREAD;
840 }
841 return IRQ_NONE;
842 }
843
844
brcmf_pcie_isr_thread(int irq,void * arg)845 static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
846 {
847 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
848 u32 status;
849
850 devinfo->in_irq = true;
851 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
852 brcmf_dbg(PCIE, "Enter %x\n", status);
853 if (status) {
854 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
855 status);
856 if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
857 BRCMF_PCIE_MB_INT_FN0_1))
858 brcmf_pcie_handle_mb_data(devinfo);
859 if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
860 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
861 brcmf_proto_msgbuf_rx_trigger(
862 &devinfo->pdev->dev);
863 }
864 }
865 brcmf_pcie_bus_console_read(devinfo, false);
866 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
867 brcmf_pcie_intr_enable(devinfo);
868 devinfo->in_irq = false;
869 return IRQ_HANDLED;
870 }
871
872
brcmf_pcie_request_irq(struct brcmf_pciedev_info * devinfo)873 static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
874 {
875 struct pci_dev *pdev = devinfo->pdev;
876 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
877
878 brcmf_pcie_intr_disable(devinfo);
879
880 brcmf_dbg(PCIE, "Enter\n");
881
882 pci_enable_msi(pdev);
883 if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr,
884 brcmf_pcie_isr_thread, IRQF_SHARED,
885 "brcmf_pcie_intr", devinfo)) {
886 pci_disable_msi(pdev);
887 brcmf_err(bus, "Failed to request IRQ %d\n", pdev->irq);
888 return -EIO;
889 }
890 devinfo->irq_allocated = true;
891 return 0;
892 }
893
894
brcmf_pcie_release_irq(struct brcmf_pciedev_info * devinfo)895 static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
896 {
897 struct pci_dev *pdev = devinfo->pdev;
898 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
899 u32 status;
900 u32 count;
901
902 if (!devinfo->irq_allocated)
903 return;
904
905 brcmf_pcie_intr_disable(devinfo);
906 free_irq(pdev->irq, devinfo);
907 pci_disable_msi(pdev);
908
909 msleep(50);
910 count = 0;
911 while ((devinfo->in_irq) && (count < 20)) {
912 msleep(50);
913 count++;
914 }
915 if (devinfo->in_irq)
916 brcmf_err(bus, "Still in IRQ (processing) !!!\n");
917
918 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
919 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
920
921 devinfo->irq_allocated = false;
922 }
923
924
brcmf_pcie_ring_mb_write_rptr(void * ctx)925 static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
926 {
927 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
928 struct brcmf_pciedev_info *devinfo = ring->devinfo;
929 struct brcmf_commonring *commonring = &ring->commonring;
930
931 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
932 return -EIO;
933
934 brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
935 commonring->w_ptr, ring->id);
936
937 devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
938
939 return 0;
940 }
941
942
brcmf_pcie_ring_mb_write_wptr(void * ctx)943 static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
944 {
945 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
946 struct brcmf_pciedev_info *devinfo = ring->devinfo;
947 struct brcmf_commonring *commonring = &ring->commonring;
948
949 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
950 return -EIO;
951
952 brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
953 commonring->r_ptr, ring->id);
954
955 devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
956
957 return 0;
958 }
959
960
brcmf_pcie_ring_mb_ring_bell(void * ctx)961 static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
962 {
963 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
964 struct brcmf_pciedev_info *devinfo = ring->devinfo;
965
966 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
967 return -EIO;
968
969 brcmf_dbg(PCIE, "RING !\n");
970 /* Any arbitrary value will do, lets use 1 */
971 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
972
973 return 0;
974 }
975
976
brcmf_pcie_ring_mb_update_rptr(void * ctx)977 static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
978 {
979 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
980 struct brcmf_pciedev_info *devinfo = ring->devinfo;
981 struct brcmf_commonring *commonring = &ring->commonring;
982
983 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
984 return -EIO;
985
986 commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
987
988 brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
989 commonring->w_ptr, ring->id);
990
991 return 0;
992 }
993
994
brcmf_pcie_ring_mb_update_wptr(void * ctx)995 static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
996 {
997 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
998 struct brcmf_pciedev_info *devinfo = ring->devinfo;
999 struct brcmf_commonring *commonring = &ring->commonring;
1000
1001 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
1002 return -EIO;
1003
1004 commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
1005
1006 brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
1007 commonring->r_ptr, ring->id);
1008
1009 return 0;
1010 }
1011
1012
1013 static void *
brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info * devinfo,u32 size,u32 tcm_dma_phys_addr,dma_addr_t * dma_handle)1014 brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
1015 u32 size, u32 tcm_dma_phys_addr,
1016 dma_addr_t *dma_handle)
1017 {
1018 void *ring;
1019 u64 address;
1020
1021 ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
1022 GFP_KERNEL);
1023 if (!ring)
1024 return NULL;
1025
1026 address = (u64)*dma_handle;
1027 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
1028 address & 0xffffffff);
1029 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
1030
1031 return (ring);
1032 }
1033
1034
1035 static struct brcmf_pcie_ringbuf *
brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info * devinfo,u32 ring_id,u32 tcm_ring_phys_addr)1036 brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
1037 u32 tcm_ring_phys_addr)
1038 {
1039 void *dma_buf;
1040 dma_addr_t dma_handle;
1041 struct brcmf_pcie_ringbuf *ring;
1042 u32 size;
1043 u32 addr;
1044 const u32 *ring_itemsize_array;
1045
1046 if (devinfo->shared.version < BRCMF_PCIE_SHARED_VERSION_7)
1047 ring_itemsize_array = brcmf_ring_itemsize_pre_v7;
1048 else
1049 ring_itemsize_array = brcmf_ring_itemsize;
1050
1051 size = brcmf_ring_max_item[ring_id] * ring_itemsize_array[ring_id];
1052 dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
1053 tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
1054 &dma_handle);
1055 if (!dma_buf)
1056 return NULL;
1057
1058 addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
1059 brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
1060 addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
1061 brcmf_pcie_write_tcm16(devinfo, addr, ring_itemsize_array[ring_id]);
1062
1063 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1064 if (!ring) {
1065 dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
1066 dma_handle);
1067 return NULL;
1068 }
1069 brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
1070 ring_itemsize_array[ring_id], dma_buf);
1071 ring->dma_handle = dma_handle;
1072 ring->devinfo = devinfo;
1073 brcmf_commonring_register_cb(&ring->commonring,
1074 brcmf_pcie_ring_mb_ring_bell,
1075 brcmf_pcie_ring_mb_update_rptr,
1076 brcmf_pcie_ring_mb_update_wptr,
1077 brcmf_pcie_ring_mb_write_rptr,
1078 brcmf_pcie_ring_mb_write_wptr, ring);
1079
1080 return (ring);
1081 }
1082
1083
brcmf_pcie_release_ringbuffer(struct device * dev,struct brcmf_pcie_ringbuf * ring)1084 static void brcmf_pcie_release_ringbuffer(struct device *dev,
1085 struct brcmf_pcie_ringbuf *ring)
1086 {
1087 void *dma_buf;
1088 u32 size;
1089
1090 if (!ring)
1091 return;
1092
1093 dma_buf = ring->commonring.buf_addr;
1094 if (dma_buf) {
1095 size = ring->commonring.depth * ring->commonring.item_len;
1096 dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
1097 }
1098 kfree(ring);
1099 }
1100
1101
brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info * devinfo)1102 static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
1103 {
1104 u32 i;
1105
1106 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1107 brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
1108 devinfo->shared.commonrings[i]);
1109 devinfo->shared.commonrings[i] = NULL;
1110 }
1111 kfree(devinfo->shared.flowrings);
1112 devinfo->shared.flowrings = NULL;
1113 if (devinfo->idxbuf) {
1114 dma_free_coherent(&devinfo->pdev->dev,
1115 devinfo->idxbuf_sz,
1116 devinfo->idxbuf,
1117 devinfo->idxbuf_dmahandle);
1118 devinfo->idxbuf = NULL;
1119 }
1120 }
1121
1122
brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info * devinfo)1123 static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
1124 {
1125 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1126 struct brcmf_pcie_ringbuf *ring;
1127 struct brcmf_pcie_ringbuf *rings;
1128 u32 d2h_w_idx_ptr;
1129 u32 d2h_r_idx_ptr;
1130 u32 h2d_w_idx_ptr;
1131 u32 h2d_r_idx_ptr;
1132 u32 ring_mem_ptr;
1133 u32 i;
1134 u64 address;
1135 u32 bufsz;
1136 u8 idx_offset;
1137 struct brcmf_pcie_dhi_ringinfo ringinfo;
1138 u16 max_flowrings;
1139 u16 max_submissionrings;
1140 u16 max_completionrings;
1141
1142 memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr,
1143 sizeof(ringinfo));
1144 if (devinfo->shared.version >= 6) {
1145 max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings);
1146 max_flowrings = le16_to_cpu(ringinfo.max_flowrings);
1147 max_completionrings = le16_to_cpu(ringinfo.max_completionrings);
1148 } else {
1149 max_submissionrings = le16_to_cpu(ringinfo.max_flowrings);
1150 max_flowrings = max_submissionrings -
1151 BRCMF_NROF_H2D_COMMON_MSGRINGS;
1152 max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
1153 }
1154
1155 if (devinfo->dma_idx_sz != 0) {
1156 bufsz = (max_submissionrings + max_completionrings) *
1157 devinfo->dma_idx_sz * 2;
1158 devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
1159 &devinfo->idxbuf_dmahandle,
1160 GFP_KERNEL);
1161 if (!devinfo->idxbuf)
1162 devinfo->dma_idx_sz = 0;
1163 }
1164
1165 if (devinfo->dma_idx_sz == 0) {
1166 d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr);
1167 d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr);
1168 h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr);
1169 h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr);
1170 idx_offset = sizeof(u32);
1171 devinfo->write_ptr = brcmf_pcie_write_tcm16;
1172 devinfo->read_ptr = brcmf_pcie_read_tcm16;
1173 brcmf_dbg(PCIE, "Using TCM indices\n");
1174 } else {
1175 memset(devinfo->idxbuf, 0, bufsz);
1176 devinfo->idxbuf_sz = bufsz;
1177 idx_offset = devinfo->dma_idx_sz;
1178 devinfo->write_ptr = brcmf_pcie_write_idx;
1179 devinfo->read_ptr = brcmf_pcie_read_idx;
1180
1181 h2d_w_idx_ptr = 0;
1182 address = (u64)devinfo->idxbuf_dmahandle;
1183 ringinfo.h2d_w_idx_hostaddr.low_addr =
1184 cpu_to_le32(address & 0xffffffff);
1185 ringinfo.h2d_w_idx_hostaddr.high_addr =
1186 cpu_to_le32(address >> 32);
1187
1188 h2d_r_idx_ptr = h2d_w_idx_ptr +
1189 max_submissionrings * idx_offset;
1190 address += max_submissionrings * idx_offset;
1191 ringinfo.h2d_r_idx_hostaddr.low_addr =
1192 cpu_to_le32(address & 0xffffffff);
1193 ringinfo.h2d_r_idx_hostaddr.high_addr =
1194 cpu_to_le32(address >> 32);
1195
1196 d2h_w_idx_ptr = h2d_r_idx_ptr +
1197 max_submissionrings * idx_offset;
1198 address += max_submissionrings * idx_offset;
1199 ringinfo.d2h_w_idx_hostaddr.low_addr =
1200 cpu_to_le32(address & 0xffffffff);
1201 ringinfo.d2h_w_idx_hostaddr.high_addr =
1202 cpu_to_le32(address >> 32);
1203
1204 d2h_r_idx_ptr = d2h_w_idx_ptr +
1205 max_completionrings * idx_offset;
1206 address += max_completionrings * idx_offset;
1207 ringinfo.d2h_r_idx_hostaddr.low_addr =
1208 cpu_to_le32(address & 0xffffffff);
1209 ringinfo.d2h_r_idx_hostaddr.high_addr =
1210 cpu_to_le32(address >> 32);
1211
1212 memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr,
1213 &ringinfo, sizeof(ringinfo));
1214 brcmf_dbg(PCIE, "Using host memory indices\n");
1215 }
1216
1217 ring_mem_ptr = le32_to_cpu(ringinfo.ringmem);
1218
1219 for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
1220 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1221 if (!ring)
1222 goto fail;
1223 ring->w_idx_addr = h2d_w_idx_ptr;
1224 ring->r_idx_addr = h2d_r_idx_ptr;
1225 ring->id = i;
1226 devinfo->shared.commonrings[i] = ring;
1227
1228 h2d_w_idx_ptr += idx_offset;
1229 h2d_r_idx_ptr += idx_offset;
1230 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1231 }
1232
1233 for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
1234 i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1235 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1236 if (!ring)
1237 goto fail;
1238 ring->w_idx_addr = d2h_w_idx_ptr;
1239 ring->r_idx_addr = d2h_r_idx_ptr;
1240 ring->id = i;
1241 devinfo->shared.commonrings[i] = ring;
1242
1243 d2h_w_idx_ptr += idx_offset;
1244 d2h_r_idx_ptr += idx_offset;
1245 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1246 }
1247
1248 devinfo->shared.max_flowrings = max_flowrings;
1249 devinfo->shared.max_submissionrings = max_submissionrings;
1250 devinfo->shared.max_completionrings = max_completionrings;
1251 rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL);
1252 if (!rings)
1253 goto fail;
1254
1255 brcmf_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings);
1256
1257 for (i = 0; i < max_flowrings; i++) {
1258 ring = &rings[i];
1259 ring->devinfo = devinfo;
1260 ring->id = i + BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1261 brcmf_commonring_register_cb(&ring->commonring,
1262 brcmf_pcie_ring_mb_ring_bell,
1263 brcmf_pcie_ring_mb_update_rptr,
1264 brcmf_pcie_ring_mb_update_wptr,
1265 brcmf_pcie_ring_mb_write_rptr,
1266 brcmf_pcie_ring_mb_write_wptr,
1267 ring);
1268 ring->w_idx_addr = h2d_w_idx_ptr;
1269 ring->r_idx_addr = h2d_r_idx_ptr;
1270 h2d_w_idx_ptr += idx_offset;
1271 h2d_r_idx_ptr += idx_offset;
1272 }
1273 devinfo->shared.flowrings = rings;
1274
1275 return 0;
1276
1277 fail:
1278 brcmf_err(bus, "Allocating ring buffers failed\n");
1279 brcmf_pcie_release_ringbuffers(devinfo);
1280 return -ENOMEM;
1281 }
1282
1283
1284 static void
brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info * devinfo)1285 brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1286 {
1287 if (devinfo->shared.scratch)
1288 dma_free_coherent(&devinfo->pdev->dev,
1289 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1290 devinfo->shared.scratch,
1291 devinfo->shared.scratch_dmahandle);
1292 if (devinfo->shared.ringupd)
1293 dma_free_coherent(&devinfo->pdev->dev,
1294 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1295 devinfo->shared.ringupd,
1296 devinfo->shared.ringupd_dmahandle);
1297 }
1298
brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info * devinfo)1299 static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1300 {
1301 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1302 u64 address;
1303 u32 addr;
1304
1305 devinfo->shared.scratch =
1306 dma_alloc_coherent(&devinfo->pdev->dev,
1307 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1308 &devinfo->shared.scratch_dmahandle,
1309 GFP_KERNEL);
1310 if (!devinfo->shared.scratch)
1311 goto fail;
1312
1313 addr = devinfo->shared.tcm_base_address +
1314 BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
1315 address = (u64)devinfo->shared.scratch_dmahandle;
1316 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1317 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1318 addr = devinfo->shared.tcm_base_address +
1319 BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
1320 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1321
1322 devinfo->shared.ringupd =
1323 dma_alloc_coherent(&devinfo->pdev->dev,
1324 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1325 &devinfo->shared.ringupd_dmahandle,
1326 GFP_KERNEL);
1327 if (!devinfo->shared.ringupd)
1328 goto fail;
1329
1330 addr = devinfo->shared.tcm_base_address +
1331 BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
1332 address = (u64)devinfo->shared.ringupd_dmahandle;
1333 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1334 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1335 addr = devinfo->shared.tcm_base_address +
1336 BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
1337 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
1338 return 0;
1339
1340 fail:
1341 brcmf_err(bus, "Allocating scratch buffers failed\n");
1342 brcmf_pcie_release_scratchbuffers(devinfo);
1343 return -ENOMEM;
1344 }
1345
1346
brcmf_pcie_down(struct device * dev)1347 static void brcmf_pcie_down(struct device *dev)
1348 {
1349 }
1350
1351
brcmf_pcie_tx(struct device * dev,struct sk_buff * skb)1352 static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
1353 {
1354 return 0;
1355 }
1356
1357
brcmf_pcie_tx_ctlpkt(struct device * dev,unsigned char * msg,uint len)1358 static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
1359 uint len)
1360 {
1361 return 0;
1362 }
1363
1364
brcmf_pcie_rx_ctlpkt(struct device * dev,unsigned char * msg,uint len)1365 static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
1366 uint len)
1367 {
1368 return 0;
1369 }
1370
1371
brcmf_pcie_wowl_config(struct device * dev,bool enabled)1372 static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
1373 {
1374 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1375 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1376 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1377
1378 brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
1379 devinfo->wowl_enabled = enabled;
1380 }
1381
1382
brcmf_pcie_get_ramsize(struct device * dev)1383 static size_t brcmf_pcie_get_ramsize(struct device *dev)
1384 {
1385 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1386 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1387 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1388
1389 return devinfo->ci->ramsize - devinfo->ci->srsize;
1390 }
1391
1392
brcmf_pcie_get_memdump(struct device * dev,void * data,size_t len)1393 static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
1394 {
1395 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1396 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1397 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1398
1399 brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
1400 brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
1401 return 0;
1402 }
1403
1404 static
brcmf_pcie_get_fwname(struct device * dev,const char * ext,u8 * fw_name)1405 int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
1406 {
1407 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1408 struct brcmf_fw_request *fwreq;
1409 struct brcmf_fw_name fwnames[] = {
1410 { ext, fw_name },
1411 };
1412
1413 fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
1414 brcmf_pcie_fwnames,
1415 ARRAY_SIZE(brcmf_pcie_fwnames),
1416 fwnames, ARRAY_SIZE(fwnames));
1417 if (!fwreq)
1418 return -ENOMEM;
1419
1420 kfree(fwreq);
1421 return 0;
1422 }
1423
brcmf_pcie_reset(struct device * dev)1424 static int brcmf_pcie_reset(struct device *dev)
1425 {
1426 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1427 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1428 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1429 struct brcmf_fw_request *fwreq;
1430 int err;
1431
1432 brcmf_pcie_intr_disable(devinfo);
1433
1434 brcmf_pcie_bus_console_read(devinfo, true);
1435
1436 brcmf_detach(dev);
1437
1438 brcmf_pcie_release_irq(devinfo);
1439 brcmf_pcie_release_scratchbuffers(devinfo);
1440 brcmf_pcie_release_ringbuffers(devinfo);
1441 brcmf_pcie_reset_device(devinfo);
1442
1443 fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1444 if (!fwreq) {
1445 dev_err(dev, "Failed to prepare FW request\n");
1446 return -ENOMEM;
1447 }
1448
1449 err = brcmf_fw_get_firmwares(dev, fwreq, brcmf_pcie_setup);
1450 if (err) {
1451 dev_err(dev, "Failed to prepare FW request\n");
1452 kfree(fwreq);
1453 }
1454
1455 return err;
1456 }
1457
1458 static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
1459 .txdata = brcmf_pcie_tx,
1460 .stop = brcmf_pcie_down,
1461 .txctl = brcmf_pcie_tx_ctlpkt,
1462 .rxctl = brcmf_pcie_rx_ctlpkt,
1463 .wowl_config = brcmf_pcie_wowl_config,
1464 .get_ramsize = brcmf_pcie_get_ramsize,
1465 .get_memdump = brcmf_pcie_get_memdump,
1466 .get_fwname = brcmf_pcie_get_fwname,
1467 .reset = brcmf_pcie_reset,
1468 };
1469
1470
1471 static void
brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info * devinfo,u8 * data,u32 data_len)1472 brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info *devinfo, u8 *data,
1473 u32 data_len)
1474 {
1475 __le32 *field;
1476 u32 newsize;
1477
1478 if (data_len < BRCMF_RAMSIZE_OFFSET + 8)
1479 return;
1480
1481 field = (__le32 *)&data[BRCMF_RAMSIZE_OFFSET];
1482 if (le32_to_cpup(field) != BRCMF_RAMSIZE_MAGIC)
1483 return;
1484 field++;
1485 newsize = le32_to_cpup(field);
1486
1487 brcmf_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n",
1488 newsize);
1489 devinfo->ci->ramsize = newsize;
1490 }
1491
1492
1493 static int
brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info * devinfo,u32 sharedram_addr)1494 brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
1495 u32 sharedram_addr)
1496 {
1497 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1498 struct brcmf_pcie_shared_info *shared;
1499 u32 addr;
1500
1501 shared = &devinfo->shared;
1502 shared->tcm_base_address = sharedram_addr;
1503
1504 shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
1505 shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK);
1506 brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
1507 if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
1508 (shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
1509 brcmf_err(bus, "Unsupported PCIE version %d\n",
1510 shared->version);
1511 return -EINVAL;
1512 }
1513
1514 /* check firmware support dma indicies */
1515 if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
1516 if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
1517 devinfo->dma_idx_sz = sizeof(u16);
1518 else
1519 devinfo->dma_idx_sz = sizeof(u32);
1520 }
1521
1522 addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
1523 shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
1524 if (shared->max_rxbufpost == 0)
1525 shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
1526
1527 addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
1528 shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
1529
1530 addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
1531 shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1532
1533 addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
1534 shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1535
1536 addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
1537 shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1538
1539 brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
1540 shared->max_rxbufpost, shared->rx_dataoffset);
1541
1542 brcmf_pcie_bus_console_init(devinfo);
1543
1544 return 0;
1545 }
1546
1547
brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info * devinfo,const struct firmware * fw,void * nvram,u32 nvram_len)1548 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
1549 const struct firmware *fw, void *nvram,
1550 u32 nvram_len)
1551 {
1552 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1553 u32 sharedram_addr;
1554 u32 sharedram_addr_written;
1555 u32 loop_counter;
1556 int err;
1557 u32 address;
1558 u32 resetintr;
1559
1560 brcmf_dbg(PCIE, "Halt ARM.\n");
1561 err = brcmf_pcie_enter_download_state(devinfo);
1562 if (err)
1563 return err;
1564
1565 brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
1566 brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
1567 (void *)fw->data, fw->size);
1568
1569 resetintr = get_unaligned_le32(fw->data);
1570 release_firmware(fw);
1571
1572 /* reset last 4 bytes of RAM address. to be used for shared
1573 * area. This identifies when FW is running
1574 */
1575 brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
1576
1577 if (nvram) {
1578 brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
1579 address = devinfo->ci->rambase + devinfo->ci->ramsize -
1580 nvram_len;
1581 brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
1582 brcmf_fw_nvram_free(nvram);
1583 } else {
1584 brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
1585 devinfo->nvram_name);
1586 }
1587
1588 sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
1589 devinfo->ci->ramsize -
1590 4);
1591 brcmf_dbg(PCIE, "Bring ARM in running state\n");
1592 err = brcmf_pcie_exit_download_state(devinfo, resetintr);
1593 if (err)
1594 return err;
1595
1596 brcmf_dbg(PCIE, "Wait for FW init\n");
1597 sharedram_addr = sharedram_addr_written;
1598 loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
1599 while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
1600 msleep(50);
1601 sharedram_addr = brcmf_pcie_read_ram32(devinfo,
1602 devinfo->ci->ramsize -
1603 4);
1604 loop_counter--;
1605 }
1606 if (sharedram_addr == sharedram_addr_written) {
1607 brcmf_err(bus, "FW failed to initialize\n");
1608 return -ENODEV;
1609 }
1610 if (sharedram_addr < devinfo->ci->rambase ||
1611 sharedram_addr >= devinfo->ci->rambase + devinfo->ci->ramsize) {
1612 brcmf_err(bus, "Invalid shared RAM address 0x%08x\n",
1613 sharedram_addr);
1614 return -ENODEV;
1615 }
1616 brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
1617
1618 return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
1619 }
1620
1621
brcmf_pcie_get_resource(struct brcmf_pciedev_info * devinfo)1622 static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
1623 {
1624 struct pci_dev *pdev = devinfo->pdev;
1625 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
1626 int err;
1627 phys_addr_t bar0_addr, bar1_addr;
1628 ulong bar1_size;
1629
1630 err = pci_enable_device(pdev);
1631 if (err) {
1632 brcmf_err(bus, "pci_enable_device failed err=%d\n", err);
1633 return err;
1634 }
1635
1636 pci_set_master(pdev);
1637
1638 /* Bar-0 mapped address */
1639 bar0_addr = pci_resource_start(pdev, 0);
1640 /* Bar-1 mapped address */
1641 bar1_addr = pci_resource_start(pdev, 2);
1642 /* read Bar-1 mapped memory range */
1643 bar1_size = pci_resource_len(pdev, 2);
1644 if ((bar1_size == 0) || (bar1_addr == 0)) {
1645 brcmf_err(bus, "BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
1646 bar1_size, (unsigned long long)bar1_addr);
1647 return -EINVAL;
1648 }
1649
1650 devinfo->regs = ioremap(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
1651 devinfo->tcm = ioremap(bar1_addr, bar1_size);
1652
1653 if (!devinfo->regs || !devinfo->tcm) {
1654 brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs,
1655 devinfo->tcm);
1656 return -EINVAL;
1657 }
1658 brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
1659 devinfo->regs, (unsigned long long)bar0_addr);
1660 brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n",
1661 devinfo->tcm, (unsigned long long)bar1_addr,
1662 (unsigned int)bar1_size);
1663
1664 return 0;
1665 }
1666
1667
brcmf_pcie_release_resource(struct brcmf_pciedev_info * devinfo)1668 static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
1669 {
1670 if (devinfo->tcm)
1671 iounmap(devinfo->tcm);
1672 if (devinfo->regs)
1673 iounmap(devinfo->regs);
1674
1675 pci_disable_device(devinfo->pdev);
1676 }
1677
1678
brcmf_pcie_buscore_prep_addr(const struct pci_dev * pdev,u32 addr)1679 static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
1680 {
1681 u32 ret_addr;
1682
1683 ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
1684 addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
1685 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
1686
1687 return ret_addr;
1688 }
1689
1690
brcmf_pcie_buscore_read32(void * ctx,u32 addr)1691 static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
1692 {
1693 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1694
1695 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1696 return brcmf_pcie_read_reg32(devinfo, addr);
1697 }
1698
1699
brcmf_pcie_buscore_write32(void * ctx,u32 addr,u32 value)1700 static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
1701 {
1702 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1703
1704 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1705 brcmf_pcie_write_reg32(devinfo, addr, value);
1706 }
1707
1708
brcmf_pcie_buscoreprep(void * ctx)1709 static int brcmf_pcie_buscoreprep(void *ctx)
1710 {
1711 return brcmf_pcie_get_resource(ctx);
1712 }
1713
1714
brcmf_pcie_buscore_reset(void * ctx,struct brcmf_chip * chip)1715 static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
1716 {
1717 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1718 u32 val;
1719
1720 devinfo->ci = chip;
1721 brcmf_pcie_reset_device(devinfo);
1722
1723 val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
1724 if (val != 0xffffffff)
1725 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
1726 val);
1727
1728 return 0;
1729 }
1730
1731
brcmf_pcie_buscore_activate(void * ctx,struct brcmf_chip * chip,u32 rstvec)1732 static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
1733 u32 rstvec)
1734 {
1735 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1736
1737 brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
1738 }
1739
1740
1741 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1742 .prepare = brcmf_pcie_buscoreprep,
1743 .reset = brcmf_pcie_buscore_reset,
1744 .activate = brcmf_pcie_buscore_activate,
1745 .read32 = brcmf_pcie_buscore_read32,
1746 .write32 = brcmf_pcie_buscore_write32,
1747 };
1748
1749 #define BRCMF_PCIE_FW_CODE 0
1750 #define BRCMF_PCIE_FW_NVRAM 1
1751
brcmf_pcie_setup(struct device * dev,int ret,struct brcmf_fw_request * fwreq)1752 static void brcmf_pcie_setup(struct device *dev, int ret,
1753 struct brcmf_fw_request *fwreq)
1754 {
1755 const struct firmware *fw;
1756 void *nvram;
1757 struct brcmf_bus *bus;
1758 struct brcmf_pciedev *pcie_bus_dev;
1759 struct brcmf_pciedev_info *devinfo;
1760 struct brcmf_commonring **flowrings;
1761 u32 i, nvram_len;
1762
1763 /* check firmware loading result */
1764 if (ret)
1765 goto fail;
1766
1767 bus = dev_get_drvdata(dev);
1768 pcie_bus_dev = bus->bus_priv.pcie;
1769 devinfo = pcie_bus_dev->devinfo;
1770 brcmf_pcie_attach(devinfo);
1771
1772 fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary;
1773 nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data;
1774 nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len;
1775 kfree(fwreq);
1776
1777 ret = brcmf_chip_get_raminfo(devinfo->ci);
1778 if (ret) {
1779 brcmf_err(bus, "Failed to get RAM info\n");
1780 goto fail;
1781 }
1782
1783 /* Some of the firmwares have the size of the memory of the device
1784 * defined inside the firmware. This is because part of the memory in
1785 * the device is shared and the devision is determined by FW. Parse
1786 * the firmware and adjust the chip memory size now.
1787 */
1788 brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size);
1789
1790 ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
1791 if (ret)
1792 goto fail;
1793
1794 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
1795
1796 ret = brcmf_pcie_init_ringbuffers(devinfo);
1797 if (ret)
1798 goto fail;
1799
1800 ret = brcmf_pcie_init_scratchbuffers(devinfo);
1801 if (ret)
1802 goto fail;
1803
1804 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
1805 ret = brcmf_pcie_request_irq(devinfo);
1806 if (ret)
1807 goto fail;
1808
1809 /* hook the commonrings in the bus structure. */
1810 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
1811 bus->msgbuf->commonrings[i] =
1812 &devinfo->shared.commonrings[i]->commonring;
1813
1814 flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings),
1815 GFP_KERNEL);
1816 if (!flowrings)
1817 goto fail;
1818
1819 for (i = 0; i < devinfo->shared.max_flowrings; i++)
1820 flowrings[i] = &devinfo->shared.flowrings[i].commonring;
1821 bus->msgbuf->flowrings = flowrings;
1822
1823 bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
1824 bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
1825 bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings;
1826
1827 init_waitqueue_head(&devinfo->mbdata_resp_wait);
1828
1829 brcmf_pcie_intr_enable(devinfo);
1830 brcmf_pcie_hostready(devinfo);
1831
1832 ret = brcmf_attach(&devinfo->pdev->dev);
1833 if (ret)
1834 goto fail;
1835
1836 brcmf_pcie_bus_console_read(devinfo, false);
1837
1838 return;
1839
1840 fail:
1841 device_release_driver(dev);
1842 }
1843
1844 static struct brcmf_fw_request *
brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info * devinfo)1845 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
1846 {
1847 struct brcmf_fw_request *fwreq;
1848 struct brcmf_fw_name fwnames[] = {
1849 { ".bin", devinfo->fw_name },
1850 { ".txt", devinfo->nvram_name },
1851 };
1852
1853 fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
1854 brcmf_pcie_fwnames,
1855 ARRAY_SIZE(brcmf_pcie_fwnames),
1856 fwnames, ARRAY_SIZE(fwnames));
1857 if (!fwreq)
1858 return NULL;
1859
1860 fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
1861 fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
1862 fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
1863 fwreq->board_type = devinfo->settings->board_type;
1864 /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
1865 fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
1866 fwreq->bus_nr = devinfo->pdev->bus->number;
1867
1868 return fwreq;
1869 }
1870
1871 static int
brcmf_pcie_probe(struct pci_dev * pdev,const struct pci_device_id * id)1872 brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1873 {
1874 int ret;
1875 struct brcmf_fw_request *fwreq;
1876 struct brcmf_pciedev_info *devinfo;
1877 struct brcmf_pciedev *pcie_bus_dev;
1878 struct brcmf_bus *bus;
1879
1880 brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
1881
1882 ret = -ENOMEM;
1883 devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
1884 if (devinfo == NULL)
1885 return ret;
1886
1887 devinfo->pdev = pdev;
1888 pcie_bus_dev = NULL;
1889 devinfo->ci = brcmf_chip_attach(devinfo, pdev->device,
1890 &brcmf_pcie_buscore_ops);
1891 if (IS_ERR(devinfo->ci)) {
1892 ret = PTR_ERR(devinfo->ci);
1893 devinfo->ci = NULL;
1894 goto fail;
1895 }
1896
1897 pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
1898 if (pcie_bus_dev == NULL) {
1899 ret = -ENOMEM;
1900 goto fail;
1901 }
1902
1903 devinfo->settings = brcmf_get_module_param(&devinfo->pdev->dev,
1904 BRCMF_BUSTYPE_PCIE,
1905 devinfo->ci->chip,
1906 devinfo->ci->chiprev);
1907 if (!devinfo->settings) {
1908 ret = -ENOMEM;
1909 goto fail;
1910 }
1911
1912 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
1913 if (!bus) {
1914 ret = -ENOMEM;
1915 goto fail;
1916 }
1917 bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
1918 if (!bus->msgbuf) {
1919 ret = -ENOMEM;
1920 kfree(bus);
1921 goto fail;
1922 }
1923
1924 /* hook it all together. */
1925 pcie_bus_dev->devinfo = devinfo;
1926 pcie_bus_dev->bus = bus;
1927 bus->dev = &pdev->dev;
1928 bus->bus_priv.pcie = pcie_bus_dev;
1929 bus->ops = &brcmf_pcie_bus_ops;
1930 bus->proto_type = BRCMF_PROTO_MSGBUF;
1931 bus->chip = devinfo->coreid;
1932 bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
1933 dev_set_drvdata(&pdev->dev, bus);
1934
1935 ret = brcmf_alloc(&devinfo->pdev->dev, devinfo->settings);
1936 if (ret)
1937 goto fail_bus;
1938
1939 fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1940 if (!fwreq) {
1941 ret = -ENOMEM;
1942 goto fail_brcmf;
1943 }
1944
1945 ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup);
1946 if (ret < 0) {
1947 kfree(fwreq);
1948 goto fail_brcmf;
1949 }
1950 return 0;
1951
1952 fail_brcmf:
1953 brcmf_free(&devinfo->pdev->dev);
1954 fail_bus:
1955 kfree(bus->msgbuf);
1956 kfree(bus);
1957 fail:
1958 brcmf_err(NULL, "failed %x:%x\n", pdev->vendor, pdev->device);
1959 brcmf_pcie_release_resource(devinfo);
1960 if (devinfo->ci)
1961 brcmf_chip_detach(devinfo->ci);
1962 if (devinfo->settings)
1963 brcmf_release_module_param(devinfo->settings);
1964 kfree(pcie_bus_dev);
1965 kfree(devinfo);
1966 return ret;
1967 }
1968
1969
1970 static void
brcmf_pcie_remove(struct pci_dev * pdev)1971 brcmf_pcie_remove(struct pci_dev *pdev)
1972 {
1973 struct brcmf_pciedev_info *devinfo;
1974 struct brcmf_bus *bus;
1975
1976 brcmf_dbg(PCIE, "Enter\n");
1977
1978 bus = dev_get_drvdata(&pdev->dev);
1979 if (bus == NULL)
1980 return;
1981
1982 devinfo = bus->bus_priv.pcie->devinfo;
1983
1984 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
1985 if (devinfo->ci)
1986 brcmf_pcie_intr_disable(devinfo);
1987
1988 brcmf_detach(&pdev->dev);
1989 brcmf_free(&pdev->dev);
1990
1991 kfree(bus->bus_priv.pcie);
1992 kfree(bus->msgbuf->flowrings);
1993 kfree(bus->msgbuf);
1994 kfree(bus);
1995
1996 brcmf_pcie_release_irq(devinfo);
1997 brcmf_pcie_release_scratchbuffers(devinfo);
1998 brcmf_pcie_release_ringbuffers(devinfo);
1999 brcmf_pcie_reset_device(devinfo);
2000 brcmf_pcie_release_resource(devinfo);
2001
2002 if (devinfo->ci)
2003 brcmf_chip_detach(devinfo->ci);
2004 if (devinfo->settings)
2005 brcmf_release_module_param(devinfo->settings);
2006
2007 kfree(devinfo);
2008 dev_set_drvdata(&pdev->dev, NULL);
2009 }
2010
2011
2012 #ifdef CONFIG_PM
2013
2014
brcmf_pcie_pm_enter_D3(struct device * dev)2015 static int brcmf_pcie_pm_enter_D3(struct device *dev)
2016 {
2017 struct brcmf_pciedev_info *devinfo;
2018 struct brcmf_bus *bus;
2019
2020 brcmf_dbg(PCIE, "Enter\n");
2021
2022 bus = dev_get_drvdata(dev);
2023 devinfo = bus->bus_priv.pcie->devinfo;
2024
2025 brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
2026
2027 devinfo->mbdata_completed = false;
2028 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
2029
2030 wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed,
2031 BRCMF_PCIE_MBDATA_TIMEOUT);
2032 if (!devinfo->mbdata_completed) {
2033 brcmf_err(bus, "Timeout on response for entering D3 substate\n");
2034 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
2035 return -EIO;
2036 }
2037
2038 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
2039
2040 return 0;
2041 }
2042
2043
brcmf_pcie_pm_leave_D3(struct device * dev)2044 static int brcmf_pcie_pm_leave_D3(struct device *dev)
2045 {
2046 struct brcmf_pciedev_info *devinfo;
2047 struct brcmf_bus *bus;
2048 struct pci_dev *pdev;
2049 int err;
2050
2051 brcmf_dbg(PCIE, "Enter\n");
2052
2053 bus = dev_get_drvdata(dev);
2054 devinfo = bus->bus_priv.pcie->devinfo;
2055 brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
2056
2057 /* Check if device is still up and running, if so we are ready */
2058 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
2059 brcmf_dbg(PCIE, "Try to wakeup device....\n");
2060 if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
2061 goto cleanup;
2062 brcmf_dbg(PCIE, "Hot resume, continue....\n");
2063 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
2064 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
2065 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
2066 brcmf_pcie_intr_enable(devinfo);
2067 brcmf_pcie_hostready(devinfo);
2068 return 0;
2069 }
2070
2071 cleanup:
2072 brcmf_chip_detach(devinfo->ci);
2073 devinfo->ci = NULL;
2074 pdev = devinfo->pdev;
2075 brcmf_pcie_remove(pdev);
2076
2077 err = brcmf_pcie_probe(pdev, NULL);
2078 if (err)
2079 __brcmf_err(NULL, __func__, "probe after resume failed, err=%d\n", err);
2080
2081 return err;
2082 }
2083
2084
2085 static const struct dev_pm_ops brcmf_pciedrvr_pm = {
2086 .suspend = brcmf_pcie_pm_enter_D3,
2087 .resume = brcmf_pcie_pm_leave_D3,
2088 .freeze = brcmf_pcie_pm_enter_D3,
2089 .restore = brcmf_pcie_pm_leave_D3,
2090 };
2091
2092
2093 #endif /* CONFIG_PM */
2094
2095
2096 #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2097 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2098 #define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \
2099 BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2100 subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2101
2102 static const struct pci_device_id brcmf_pcie_devid_table[] = {
2103 BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
2104 BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355),
2105 BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID),
2106 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
2107 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
2108 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
2109 BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
2110 BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
2111 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
2112 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
2113 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
2114 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
2115 BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID),
2116 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
2117 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
2118 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
2119 BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365),
2120 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
2121 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
2122 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
2123 BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
2124 { /* end: all zeroes */ }
2125 };
2126
2127
2128 MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
2129
2130
2131 static struct pci_driver brcmf_pciedrvr = {
2132 .node = {},
2133 .name = KBUILD_MODNAME,
2134 .id_table = brcmf_pcie_devid_table,
2135 .probe = brcmf_pcie_probe,
2136 .remove = brcmf_pcie_remove,
2137 #ifdef CONFIG_PM
2138 .driver.pm = &brcmf_pciedrvr_pm,
2139 #endif
2140 .driver.coredump = brcmf_dev_coredump,
2141 };
2142
2143
brcmf_pcie_register(void)2144 int brcmf_pcie_register(void)
2145 {
2146 brcmf_dbg(PCIE, "Enter\n");
2147 return pci_register_driver(&brcmf_pciedrvr);
2148 }
2149
2150
brcmf_pcie_exit(void)2151 void brcmf_pcie_exit(void)
2152 {
2153 brcmf_dbg(PCIE, "Enter\n");
2154 pci_unregister_driver(&brcmf_pciedrvr);
2155 }
2156