1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020-21 Intel Corporation.
4 */
5
6 #include <linux/delay.h>
7
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_devlink.h"
10 #include "iosm_ipc_flash.h"
11 #include "iosm_ipc_imem.h"
12 #include "iosm_ipc_port.h"
13
14 /* Check the wwan ips if it is valid with Channel as input. */
ipc_imem_check_wwan_ips(struct ipc_mem_channel * chnl)15 static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
16 {
17 if (chnl)
18 return chnl->ctype == IPC_CTYPE_WWAN &&
19 chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
20 return false;
21 }
22
ipc_imem_msg_send_device_sleep(struct iosm_imem * ipc_imem,u32 state)23 static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
24 {
25 union ipc_msg_prep_args prep_args = {
26 .sleep.target = 1,
27 .sleep.state = state,
28 };
29
30 ipc_imem->device_sleep = state;
31
32 return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
33 IPC_MSG_PREP_SLEEP, &prep_args, NULL);
34 }
35
ipc_imem_dl_skb_alloc(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe)36 static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
37 struct ipc_pipe *pipe)
38 {
39 /* limit max. nr of entries */
40 if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
41 return false;
42
43 return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
44 }
45
46 /* This timer handler will retry DL buff allocation if a pipe has no free buf
47 * and gives doorbell if TD is available
48 */
ipc_imem_tq_td_alloc_timer(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)49 static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
50 void *msg, size_t size)
51 {
52 bool new_buffers_available = false;
53 bool retry_allocation = false;
54 int i;
55
56 for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
57 struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
58
59 if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
60 continue;
61
62 while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
63 new_buffers_available = true;
64
65 if (pipe->nr_of_queued_entries == 0)
66 retry_allocation = true;
67 }
68
69 if (new_buffers_available)
70 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
71 IPC_HP_DL_PROCESS);
72
73 if (retry_allocation) {
74 ipc_imem->hrtimer_period =
75 ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
76 if (!hrtimer_active(&ipc_imem->td_alloc_timer))
77 hrtimer_start(&ipc_imem->td_alloc_timer,
78 ipc_imem->hrtimer_period,
79 HRTIMER_MODE_REL);
80 }
81 return 0;
82 }
83
ipc_imem_td_alloc_timer_cb(struct hrtimer * hr_timer)84 static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
85 {
86 struct iosm_imem *ipc_imem =
87 container_of(hr_timer, struct iosm_imem, td_alloc_timer);
88 /* Post an async tasklet event to trigger HP update Doorbell */
89 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
90 0, false);
91 return HRTIMER_NORESTART;
92 }
93
94 /* Fast update timer tasklet handler to trigger HP update */
ipc_imem_tq_fast_update_timer_cb(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)95 static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
96 void *msg, size_t size)
97 {
98 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
99 IPC_HP_FAST_TD_UPD_TMR);
100
101 return 0;
102 }
103
104 static enum hrtimer_restart
ipc_imem_fast_update_timer_cb(struct hrtimer * hr_timer)105 ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
106 {
107 struct iosm_imem *ipc_imem =
108 container_of(hr_timer, struct iosm_imem, fast_update_timer);
109 /* Post an async tasklet event to trigger HP update Doorbell */
110 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
111 NULL, 0, false);
112 return HRTIMER_NORESTART;
113 }
114
ipc_imem_setup_cp_mux_cap_init(struct iosm_imem * ipc_imem,struct ipc_mux_config * cfg)115 static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
116 struct ipc_mux_config *cfg)
117 {
118 ipc_mmio_update_cp_capability(ipc_imem->mmio);
119
120 if (!ipc_imem->mmio->has_mux_lite) {
121 dev_err(ipc_imem->dev, "Failed to get Mux capability.");
122 return -EINVAL;
123 }
124
125 cfg->protocol = MUX_LITE;
126
127 cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
128 MUX_UL_ON_CREDITS :
129 MUX_UL;
130
131 /* The instance ID is same as channel ID because this is been reused
132 * for channel alloc function.
133 */
134 cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
135 cfg->nr_sessions = IPC_MEM_MUX_IP_SESSION_ENTRIES;
136
137 return 0;
138 }
139
ipc_imem_msg_send_feature_set(struct iosm_imem * ipc_imem,unsigned int reset_enable,bool atomic_ctx)140 void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
141 unsigned int reset_enable, bool atomic_ctx)
142 {
143 union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
144 reset_enable };
145
146 if (atomic_ctx)
147 ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
148 IPC_MSG_PREP_FEATURE_SET, &prep_args,
149 NULL);
150 else
151 ipc_protocol_msg_send(ipc_imem->ipc_protocol,
152 IPC_MSG_PREP_FEATURE_SET, &prep_args);
153 }
154
ipc_imem_td_update_timer_start(struct iosm_imem * ipc_imem)155 void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
156 {
157 /* Use the TD update timer only in the runtime phase */
158 if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
159 /* trigger the doorbell irq on CP directly. */
160 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
161 IPC_HP_TD_UPD_TMR_START);
162 return;
163 }
164
165 if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
166 ipc_imem->hrtimer_period =
167 ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
168 if (!hrtimer_active(&ipc_imem->tdupdate_timer))
169 hrtimer_start(&ipc_imem->tdupdate_timer,
170 ipc_imem->hrtimer_period,
171 HRTIMER_MODE_REL);
172 }
173 }
174
ipc_imem_hrtimer_stop(struct hrtimer * hr_timer)175 void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
176 {
177 if (hrtimer_active(hr_timer))
178 hrtimer_cancel(hr_timer);
179 }
180
ipc_imem_ul_write_td(struct iosm_imem * ipc_imem)181 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
182 {
183 struct ipc_mem_channel *channel;
184 bool hpda_ctrl_pending = false;
185 struct sk_buff_head *ul_list;
186 bool hpda_pending = false;
187 struct ipc_pipe *pipe;
188 int i;
189
190 /* Analyze the uplink pipe of all active channels. */
191 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
192 channel = &ipc_imem->channels[i];
193
194 if (channel->state != IMEM_CHANNEL_ACTIVE)
195 continue;
196
197 pipe = &channel->ul_pipe;
198
199 /* Get the reference to the skbuf accumulator list. */
200 ul_list = &channel->ul_list;
201
202 /* Fill the transfer descriptor with the uplink buffer info. */
203 if (!ipc_imem_check_wwan_ips(channel)) {
204 hpda_ctrl_pending |=
205 ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
206 pipe, ul_list);
207 } else {
208 hpda_pending |=
209 ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
210 pipe, ul_list);
211 }
212 }
213
214 /* forced HP update needed for non data channels */
215 if (hpda_ctrl_pending) {
216 hpda_pending = false;
217 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
218 IPC_HP_UL_WRITE_TD);
219 }
220
221 return hpda_pending;
222 }
223
ipc_imem_ipc_init_check(struct iosm_imem * ipc_imem)224 void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
225 {
226 int timeout = IPC_MODEM_BOOT_TIMEOUT;
227
228 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
229
230 /* Trigger the CP interrupt to enter the init state. */
231 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
232 IPC_MEM_DEVICE_IPC_INIT);
233 /* Wait for the CP update. */
234 do {
235 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
236 ipc_imem->ipc_requested_state) {
237 /* Prepare the MMIO space */
238 ipc_mmio_config(ipc_imem->mmio);
239
240 /* Trigger the CP irq to enter the running state. */
241 ipc_imem->ipc_requested_state =
242 IPC_MEM_DEVICE_IPC_RUNNING;
243 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
244 IPC_MEM_DEVICE_IPC_RUNNING);
245
246 return;
247 }
248 msleep(20);
249 } while (--timeout);
250
251 /* timeout */
252 dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
253 ipc_imem_phase_get_string(ipc_imem->phase),
254 ipc_mmio_get_ipc_state(ipc_imem->mmio));
255
256 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
257 }
258
259 /* Analyze the packet type and distribute it. */
ipc_imem_dl_skb_process(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe,struct sk_buff * skb)260 static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
261 struct ipc_pipe *pipe, struct sk_buff *skb)
262 {
263 u16 port_id;
264
265 if (!skb)
266 return;
267
268 /* An AT/control or IP packet is expected. */
269 switch (pipe->channel->ctype) {
270 case IPC_CTYPE_CTRL:
271 port_id = pipe->channel->channel_id;
272 if (port_id == IPC_MEM_CTRL_CHL_ID_7)
273 ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
274 skb);
275 else
276 wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
277 skb);
278 break;
279
280 case IPC_CTYPE_WWAN:
281 if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
282 ipc_mux_dl_decode(ipc_imem->mux, skb);
283 break;
284 default:
285 dev_err(ipc_imem->dev, "Invalid channel type");
286 break;
287 }
288 }
289
290 /* Process the downlink data and pass them to the char or net layer. */
ipc_imem_dl_pipe_process(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe)291 static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
292 struct ipc_pipe *pipe)
293 {
294 s32 cnt = 0, processed_td_cnt = 0;
295 struct ipc_mem_channel *channel;
296 u32 head = 0, tail = 0;
297 bool processed = false;
298 struct sk_buff *skb;
299
300 channel = pipe->channel;
301
302 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
303 &tail);
304 if (pipe->old_tail != tail) {
305 if (pipe->old_tail < tail)
306 cnt = tail - pipe->old_tail;
307 else
308 cnt = pipe->nr_of_entries - pipe->old_tail + tail;
309 }
310
311 processed_td_cnt = cnt;
312
313 /* Seek for pipes with pending DL data. */
314 while (cnt--) {
315 skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
316
317 /* Analyze the packet type and distribute it. */
318 ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
319 }
320
321 /* try to allocate new empty DL SKbs from head..tail - 1*/
322 while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
323 processed = true;
324
325 if (processed && !ipc_imem_check_wwan_ips(channel)) {
326 /* Force HP update for non IP channels */
327 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
328 IPC_HP_DL_PROCESS);
329 processed = false;
330
331 /* If Fast Update timer is already running then stop */
332 ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
333 }
334
335 /* Any control channel process will get immediate HP update.
336 * Start Fast update timer only for IP channel if all the TDs were
337 * used in last process.
338 */
339 if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
340 ipc_imem->hrtimer_period =
341 ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
342 hrtimer_start(&ipc_imem->fast_update_timer,
343 ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
344 }
345
346 if (ipc_imem->app_notify_dl_pend)
347 complete(&ipc_imem->dl_pend_sem);
348 }
349
350 /* process open uplink pipe */
ipc_imem_ul_pipe_process(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe)351 static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
352 struct ipc_pipe *pipe)
353 {
354 struct ipc_mem_channel *channel;
355 u32 tail = 0, head = 0;
356 struct sk_buff *skb;
357 s32 cnt = 0;
358
359 channel = pipe->channel;
360
361 /* Get the internal phase. */
362 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
363 &tail);
364
365 if (pipe->old_tail != tail) {
366 if (pipe->old_tail < tail)
367 cnt = tail - pipe->old_tail;
368 else
369 cnt = pipe->nr_of_entries - pipe->old_tail + tail;
370 }
371
372 /* Free UL buffers. */
373 while (cnt--) {
374 skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
375
376 if (!skb)
377 continue;
378
379 /* If the user app was suspended in uplink direction - blocking
380 * write, resume it.
381 */
382 if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
383 complete(&channel->ul_sem);
384
385 /* Free the skbuf element. */
386 if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
387 if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
388 ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
389 else
390 dev_err(ipc_imem->dev,
391 "OP Type is UL_MUX, unknown if_id %d",
392 channel->if_id);
393 } else {
394 ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
395 }
396 }
397
398 /* Trace channel stats for IP UL pipe. */
399 if (ipc_imem_check_wwan_ips(pipe->channel))
400 ipc_mux_check_n_restart_tx(ipc_imem->mux);
401
402 if (ipc_imem->app_notify_ul_pend)
403 complete(&ipc_imem->ul_pend_sem);
404 }
405
406 /* Executes the irq. */
ipc_imem_rom_irq_exec(struct iosm_imem * ipc_imem)407 static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
408 {
409 struct ipc_mem_channel *channel;
410
411 channel = ipc_imem->ipc_devlink->devlink_sio.channel;
412 ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
413 complete(&channel->ul_sem);
414 }
415
416 /* Execute the UL bundle timer actions, generating the doorbell irq. */
ipc_imem_tq_td_update_timer_cb(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)417 static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
418 void *msg, size_t size)
419 {
420 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
421 IPC_HP_TD_UPD_TMR);
422 return 0;
423 }
424
425 /* Consider link power management in the runtime phase. */
ipc_imem_slp_control_exec(struct iosm_imem * ipc_imem)426 static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
427 {
428 /* link will go down, Test pending UL packets.*/
429 if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
430 hrtimer_active(&ipc_imem->tdupdate_timer)) {
431 /* Generate the doorbell irq. */
432 ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
433 /* Stop the TD update timer. */
434 ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
435 /* Stop the fast update timer. */
436 ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
437 }
438 }
439
440 /* Execute startup timer and wait for delayed start (e.g. NAND) */
ipc_imem_tq_startup_timer_cb(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)441 static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
442 void *msg, size_t size)
443 {
444 /* Update & check the current operation phase. */
445 if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
446 return -EIO;
447
448 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
449 IPC_MEM_DEVICE_IPC_UNINIT) {
450 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
451
452 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
453 IPC_MEM_DEVICE_IPC_INIT);
454
455 ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
456 /* reduce period to 100 ms to check for mmio init state */
457 if (!hrtimer_active(&ipc_imem->startup_timer))
458 hrtimer_start(&ipc_imem->startup_timer,
459 ipc_imem->hrtimer_period,
460 HRTIMER_MODE_REL);
461 } else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
462 IPC_MEM_DEVICE_IPC_INIT) {
463 /* Startup complete - disable timer */
464 ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
465
466 /* Prepare the MMIO space */
467 ipc_mmio_config(ipc_imem->mmio);
468 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
469 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
470 IPC_MEM_DEVICE_IPC_RUNNING);
471 }
472
473 return 0;
474 }
475
ipc_imem_startup_timer_cb(struct hrtimer * hr_timer)476 static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
477 {
478 enum hrtimer_restart result = HRTIMER_NORESTART;
479 struct iosm_imem *ipc_imem =
480 container_of(hr_timer, struct iosm_imem, startup_timer);
481
482 if (ktime_to_ns(ipc_imem->hrtimer_period)) {
483 hrtimer_forward_now(&ipc_imem->startup_timer,
484 ipc_imem->hrtimer_period);
485 result = HRTIMER_RESTART;
486 }
487
488 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
489 NULL, 0, false);
490 return result;
491 }
492
493 /* Get the CP execution stage */
494 static enum ipc_mem_exec_stage
ipc_imem_get_exec_stage_buffered(struct iosm_imem * ipc_imem)495 ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
496 {
497 return (ipc_imem->phase == IPC_P_RUN &&
498 ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
499 ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
500 ipc_mmio_get_exec_stage(ipc_imem->mmio);
501 }
502
503 /* Callback to send the modem ready uevent */
ipc_imem_send_mdm_rdy_cb(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)504 static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
505 void *msg, size_t size)
506 {
507 enum ipc_mem_exec_stage exec_stage =
508 ipc_imem_get_exec_stage_buffered(ipc_imem);
509
510 if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
511 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
512
513 return 0;
514 }
515
516 /* This function is executed in a task context via an ipc_worker object,
517 * as the creation or removal of device can't be done from tasklet.
518 */
ipc_imem_run_state_worker(struct work_struct * instance)519 static void ipc_imem_run_state_worker(struct work_struct *instance)
520 {
521 struct ipc_chnl_cfg chnl_cfg_port = { 0 };
522 struct ipc_mux_config mux_cfg;
523 struct iosm_imem *ipc_imem;
524 u8 ctrl_chl_idx = 0;
525
526 ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
527
528 if (ipc_imem->phase != IPC_P_RUN) {
529 dev_err(ipc_imem->dev,
530 "Modem link down. Exit run state worker.");
531 return;
532 }
533
534 if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
535 ipc_devlink_deinit(ipc_imem->ipc_devlink);
536
537 if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
538 ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
539
540 ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
541 if (ipc_imem->mux)
542 ipc_imem->mux->wwan = ipc_imem->wwan;
543
544 while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
545 if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
546 ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
547 if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
548 ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
549 chnl_cfg_port,
550 IRQ_MOD_OFF);
551 ipc_imem->ipc_port[ctrl_chl_idx] =
552 ipc_port_init(ipc_imem, chnl_cfg_port);
553 }
554 }
555 ctrl_chl_idx++;
556 }
557
558 ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
559 false);
560
561 /* Complete all memory stores before setting bit */
562 smp_mb__before_atomic();
563
564 set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
565
566 /* Complete all memory stores after setting bit */
567 smp_mb__after_atomic();
568 }
569
ipc_imem_handle_irq(struct iosm_imem * ipc_imem,int irq)570 static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
571 {
572 enum ipc_mem_device_ipc_state curr_ipc_status;
573 enum ipc_phase old_phase, phase;
574 bool retry_allocation = false;
575 bool ul_pending = false;
576 int i;
577
578 if (irq != IMEM_IRQ_DONT_CARE)
579 ipc_imem->ev_irq_pending[irq] = false;
580
581 /* Get the internal phase. */
582 old_phase = ipc_imem->phase;
583
584 if (old_phase == IPC_P_OFF_REQ) {
585 dev_dbg(ipc_imem->dev,
586 "[%s]: Ignoring MSI. Deinit sequence in progress!",
587 ipc_imem_phase_get_string(old_phase));
588 return;
589 }
590
591 /* Update the phase controlled by CP. */
592 phase = ipc_imem_phase_update(ipc_imem);
593
594 switch (phase) {
595 case IPC_P_RUN:
596 if (!ipc_imem->enter_runtime) {
597 /* Excute the transition from flash/boot to runtime. */
598 ipc_imem->enter_runtime = 1;
599
600 /* allow device to sleep, default value is
601 * IPC_HOST_SLEEP_ENTER_SLEEP
602 */
603 ipc_imem_msg_send_device_sleep(ipc_imem,
604 ipc_imem->device_sleep);
605
606 ipc_imem_msg_send_feature_set(ipc_imem,
607 IPC_MEM_INBAND_CRASH_SIG,
608 true);
609 }
610
611 curr_ipc_status =
612 ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
613
614 /* check ipc_status change */
615 if (ipc_imem->ipc_status != curr_ipc_status) {
616 ipc_imem->ipc_status = curr_ipc_status;
617
618 if (ipc_imem->ipc_status ==
619 IPC_MEM_DEVICE_IPC_RUNNING) {
620 schedule_work(&ipc_imem->run_state_worker);
621 }
622 }
623
624 /* Consider power management in the runtime phase. */
625 ipc_imem_slp_control_exec(ipc_imem);
626 break; /* Continue with skbuf processing. */
627
628 /* Unexpected phases. */
629 case IPC_P_OFF:
630 case IPC_P_OFF_REQ:
631 dev_err(ipc_imem->dev, "confused phase %s",
632 ipc_imem_phase_get_string(phase));
633 return;
634
635 case IPC_P_PSI:
636 if (old_phase != IPC_P_ROM)
637 break;
638
639 fallthrough;
640 /* On CP the PSI phase is already active. */
641
642 case IPC_P_ROM:
643 /* Before CP ROM driver starts the PSI image, it sets
644 * the exit_code field on the doorbell scratchpad and
645 * triggers the irq.
646 */
647 ipc_imem_rom_irq_exec(ipc_imem);
648 return;
649
650 default:
651 break;
652 }
653
654 /* process message ring */
655 ipc_protocol_msg_process(ipc_imem, irq);
656
657 /* process all open pipes */
658 for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
659 struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
660 struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
661
662 if (dl_pipe->is_open &&
663 (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
664 ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
665
666 if (dl_pipe->nr_of_queued_entries == 0)
667 retry_allocation = true;
668 }
669
670 if (ul_pipe->is_open)
671 ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
672 }
673
674 /* Try to generate new ADB or ADGH. */
675 if (ipc_mux_ul_data_encode(ipc_imem->mux))
676 ipc_imem_td_update_timer_start(ipc_imem);
677
678 /* Continue the send procedure with accumulated SIO or NETIF packets.
679 * Reset the debounce flags.
680 */
681 ul_pending |= ipc_imem_ul_write_td(ipc_imem);
682
683 /* if UL data is pending restart TD update timer */
684 if (ul_pending) {
685 ipc_imem->hrtimer_period =
686 ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
687 if (!hrtimer_active(&ipc_imem->tdupdate_timer))
688 hrtimer_start(&ipc_imem->tdupdate_timer,
689 ipc_imem->hrtimer_period,
690 HRTIMER_MODE_REL);
691 }
692
693 /* If CP has executed the transition
694 * from IPC_INIT to IPC_RUNNING in the PSI
695 * phase, wake up the flash app to open the pipes.
696 */
697 if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
698 ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
699 ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
700 IPC_MEM_DEVICE_IPC_RUNNING) {
701 complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
702 }
703
704 /* Reset the expected CP state. */
705 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
706
707 if (retry_allocation) {
708 ipc_imem->hrtimer_period =
709 ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
710 if (!hrtimer_active(&ipc_imem->td_alloc_timer))
711 hrtimer_start(&ipc_imem->td_alloc_timer,
712 ipc_imem->hrtimer_period,
713 HRTIMER_MODE_REL);
714 }
715 }
716
717 /* Callback by tasklet for handling interrupt events. */
ipc_imem_tq_irq_cb(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)718 static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
719 size_t size)
720 {
721 ipc_imem_handle_irq(ipc_imem, arg);
722
723 return 0;
724 }
725
ipc_imem_ul_send(struct iosm_imem * ipc_imem)726 void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
727 {
728 /* start doorbell irq delay timer if UL is pending */
729 if (ipc_imem_ul_write_td(ipc_imem))
730 ipc_imem_td_update_timer_start(ipc_imem);
731 }
732
733 /* Check the execution stage and update the AP phase */
ipc_imem_phase_update_check(struct iosm_imem * ipc_imem,enum ipc_mem_exec_stage stage)734 static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
735 enum ipc_mem_exec_stage stage)
736 {
737 switch (stage) {
738 case IPC_MEM_EXEC_STAGE_BOOT:
739 if (ipc_imem->phase != IPC_P_ROM) {
740 /* Send this event only once */
741 ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
742 }
743
744 ipc_imem->phase = IPC_P_ROM;
745 break;
746
747 case IPC_MEM_EXEC_STAGE_PSI:
748 ipc_imem->phase = IPC_P_PSI;
749 break;
750
751 case IPC_MEM_EXEC_STAGE_EBL:
752 ipc_imem->phase = IPC_P_EBL;
753 break;
754
755 case IPC_MEM_EXEC_STAGE_RUN:
756 if (ipc_imem->phase != IPC_P_RUN &&
757 ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
758 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
759 }
760 ipc_imem->phase = IPC_P_RUN;
761 break;
762
763 case IPC_MEM_EXEC_STAGE_CRASH:
764 if (ipc_imem->phase != IPC_P_CRASH)
765 ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
766
767 ipc_imem->phase = IPC_P_CRASH;
768 break;
769
770 case IPC_MEM_EXEC_STAGE_CD_READY:
771 if (ipc_imem->phase != IPC_P_CD_READY)
772 ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
773 ipc_imem->phase = IPC_P_CD_READY;
774 break;
775
776 default:
777 /* unknown exec stage:
778 * assume that link is down and send info to listeners
779 */
780 ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
781 break;
782 }
783
784 return ipc_imem->phase;
785 }
786
787 /* Send msg to device to open pipe */
ipc_imem_pipe_open(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe)788 static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
789 struct ipc_pipe *pipe)
790 {
791 union ipc_msg_prep_args prep_args = {
792 .pipe_open.pipe = pipe,
793 };
794
795 if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
796 IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
797 pipe->is_open = true;
798
799 return pipe->is_open;
800 }
801
802 /* Allocates the TDs for the given pipe along with firing HP update DB. */
ipc_imem_tq_pipe_td_alloc(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)803 static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
804 void *msg, size_t size)
805 {
806 struct ipc_pipe *dl_pipe = msg;
807 bool processed = false;
808 int i;
809
810 for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
811 processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
812
813 /* Trigger the doorbell irq to inform CP that new downlink buffers are
814 * available.
815 */
816 if (processed)
817 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
818
819 return 0;
820 }
821
822 static enum hrtimer_restart
ipc_imem_td_update_timer_cb(struct hrtimer * hr_timer)823 ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
824 {
825 struct iosm_imem *ipc_imem =
826 container_of(hr_timer, struct iosm_imem, tdupdate_timer);
827
828 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
829 NULL, 0, false);
830 return HRTIMER_NORESTART;
831 }
832
833 /* Get the CP execution state and map it to the AP phase. */
ipc_imem_phase_update(struct iosm_imem * ipc_imem)834 enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
835 {
836 enum ipc_mem_exec_stage exec_stage =
837 ipc_imem_get_exec_stage_buffered(ipc_imem);
838 /* If the CP stage is undef, return the internal precalculated phase. */
839 return ipc_imem->phase == IPC_P_OFF_REQ ?
840 ipc_imem->phase :
841 ipc_imem_phase_update_check(ipc_imem, exec_stage);
842 }
843
ipc_imem_phase_get_string(enum ipc_phase phase)844 const char *ipc_imem_phase_get_string(enum ipc_phase phase)
845 {
846 switch (phase) {
847 case IPC_P_RUN:
848 return "A-RUN";
849
850 case IPC_P_OFF:
851 return "A-OFF";
852
853 case IPC_P_ROM:
854 return "A-ROM";
855
856 case IPC_P_PSI:
857 return "A-PSI";
858
859 case IPC_P_EBL:
860 return "A-EBL";
861
862 case IPC_P_CRASH:
863 return "A-CRASH";
864
865 case IPC_P_CD_READY:
866 return "A-CD_READY";
867
868 case IPC_P_OFF_REQ:
869 return "A-OFF_REQ";
870
871 default:
872 return "A-???";
873 }
874 }
875
ipc_imem_pipe_close(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe)876 void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
877 {
878 union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
879
880 pipe->is_open = false;
881 ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
882 &prep_args);
883
884 ipc_imem_pipe_cleanup(ipc_imem, pipe);
885 }
886
ipc_imem_channel_close(struct iosm_imem * ipc_imem,int channel_id)887 void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
888 {
889 struct ipc_mem_channel *channel;
890
891 if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
892 dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
893 return;
894 }
895
896 channel = &ipc_imem->channels[channel_id];
897
898 if (channel->state == IMEM_CHANNEL_FREE) {
899 dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
900 channel_id, channel->state);
901 return;
902 }
903
904 /* Free only the channel id in the CP power off mode. */
905 if (channel->state == IMEM_CHANNEL_RESERVED)
906 /* Release only the channel id. */
907 goto channel_free;
908
909 if (ipc_imem->phase == IPC_P_RUN) {
910 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
911 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
912 }
913
914 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
915 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
916
917 channel_free:
918 ipc_imem_channel_free(channel);
919 }
920
ipc_imem_channel_open(struct iosm_imem * ipc_imem,int channel_id,u32 db_id)921 struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
922 int channel_id, u32 db_id)
923 {
924 struct ipc_mem_channel *channel;
925
926 if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
927 dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
928 return NULL;
929 }
930
931 channel = &ipc_imem->channels[channel_id];
932
933 channel->state = IMEM_CHANNEL_ACTIVE;
934
935 if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
936 goto ul_pipe_err;
937
938 if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
939 goto dl_pipe_err;
940
941 /* Allocate the downlink buffers in tasklet context. */
942 if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
943 &channel->dl_pipe, 0, false)) {
944 dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
945 goto task_failed;
946 }
947
948 /* Active channel. */
949 return channel;
950 task_failed:
951 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
952 dl_pipe_err:
953 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
954 ul_pipe_err:
955 ipc_imem_channel_free(channel);
956 return NULL;
957 }
958
ipc_imem_pm_suspend(struct iosm_imem * ipc_imem)959 void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
960 {
961 ipc_protocol_suspend(ipc_imem->ipc_protocol);
962 }
963
ipc_imem_pm_s2idle_sleep(struct iosm_imem * ipc_imem,bool sleep)964 void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
965 {
966 ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
967 }
968
ipc_imem_pm_resume(struct iosm_imem * ipc_imem)969 void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
970 {
971 enum ipc_mem_exec_stage stage;
972
973 if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
974 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
975 ipc_imem_phase_update_check(ipc_imem, stage);
976 }
977 }
978
ipc_imem_channel_free(struct ipc_mem_channel * channel)979 void ipc_imem_channel_free(struct ipc_mem_channel *channel)
980 {
981 /* Reset dynamic channel elements. */
982 channel->state = IMEM_CHANNEL_FREE;
983 }
984
ipc_imem_channel_alloc(struct iosm_imem * ipc_imem,int index,enum ipc_ctype ctype)985 int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
986 enum ipc_ctype ctype)
987 {
988 struct ipc_mem_channel *channel;
989 int i;
990
991 /* Find channel of given type/index */
992 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
993 channel = &ipc_imem->channels[i];
994 if (channel->ctype == ctype && channel->index == index)
995 break;
996 }
997
998 if (i >= ipc_imem->nr_of_channels) {
999 dev_dbg(ipc_imem->dev,
1000 "no channel definition for index=%d ctype=%d", index,
1001 ctype);
1002 return -ECHRNG;
1003 }
1004
1005 if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
1006 dev_dbg(ipc_imem->dev, "channel is in use");
1007 return -EBUSY;
1008 }
1009
1010 if (channel->ctype == IPC_CTYPE_WWAN &&
1011 index == IPC_MEM_MUX_IP_CH_IF_ID)
1012 channel->if_id = index;
1013
1014 channel->channel_id = index;
1015 channel->state = IMEM_CHANNEL_RESERVED;
1016
1017 return i;
1018 }
1019
ipc_imem_channel_init(struct iosm_imem * ipc_imem,enum ipc_ctype ctype,struct ipc_chnl_cfg chnl_cfg,u32 irq_moderation)1020 void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1021 struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1022 {
1023 struct ipc_mem_channel *channel;
1024
1025 if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1026 chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1027 dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1028 chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1029 return;
1030 }
1031
1032 if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1033 dev_err(ipc_imem->dev, "too many channels");
1034 return;
1035 }
1036
1037 channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1038 channel->channel_id = ipc_imem->nr_of_channels;
1039 channel->ctype = ctype;
1040 channel->index = chnl_cfg.id;
1041 channel->net_err_count = 0;
1042 channel->state = IMEM_CHANNEL_FREE;
1043 ipc_imem->nr_of_channels++;
1044
1045 ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1046 IRQ_MOD_OFF);
1047
1048 skb_queue_head_init(&channel->ul_list);
1049
1050 init_completion(&channel->ul_sem);
1051 }
1052
ipc_imem_channel_update(struct iosm_imem * ipc_imem,int id,struct ipc_chnl_cfg chnl_cfg,u32 irq_moderation)1053 void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1054 struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1055 {
1056 struct ipc_mem_channel *channel;
1057
1058 if (id < 0 || id >= ipc_imem->nr_of_channels) {
1059 dev_err(ipc_imem->dev, "invalid channel id %d", id);
1060 return;
1061 }
1062
1063 channel = &ipc_imem->channels[id];
1064
1065 if (channel->state != IMEM_CHANNEL_FREE &&
1066 channel->state != IMEM_CHANNEL_RESERVED) {
1067 dev_err(ipc_imem->dev, "invalid channel state %d",
1068 channel->state);
1069 return;
1070 }
1071
1072 channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1073 channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1074 channel->ul_pipe.is_open = false;
1075 channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1076 channel->ul_pipe.channel = channel;
1077 channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1078 channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1079 channel->ul_pipe.irq_moderation = irq_moderation;
1080 channel->ul_pipe.buf_size = 0;
1081
1082 channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1083 channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1084 channel->dl_pipe.is_open = false;
1085 channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1086 channel->dl_pipe.channel = channel;
1087 channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1088 channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1089 channel->dl_pipe.irq_moderation = irq_moderation;
1090 channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1091 }
1092
ipc_imem_channel_reset(struct iosm_imem * ipc_imem)1093 static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1094 {
1095 int i;
1096
1097 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1098 struct ipc_mem_channel *channel;
1099
1100 channel = &ipc_imem->channels[i];
1101
1102 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1103 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1104
1105 ipc_imem_channel_free(channel);
1106 }
1107 }
1108
ipc_imem_pipe_cleanup(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe)1109 void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1110 {
1111 struct sk_buff *skb;
1112
1113 /* Force pipe to closed state also when not explicitly closed through
1114 * ipc_imem_pipe_close()
1115 */
1116 pipe->is_open = false;
1117
1118 /* Empty the uplink skb accumulator. */
1119 while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1120 ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1121
1122 ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1123 }
1124
1125 /* Send IPC protocol uninit to the modem when Link is active. */
ipc_imem_device_ipc_uninit(struct iosm_imem * ipc_imem)1126 static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1127 {
1128 int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1129 enum ipc_mem_device_ipc_state ipc_state;
1130
1131 /* When PCIe link is up set IPC_UNINIT
1132 * of the modem otherwise ignore it when PCIe link down happens.
1133 */
1134 if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1135 /* set modem to UNINIT
1136 * (in case we want to reload the AP driver without resetting
1137 * the modem)
1138 */
1139 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1140 IPC_MEM_DEVICE_IPC_UNINIT);
1141 ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1142
1143 /* Wait for maximum 30ms to allow the Modem to uninitialize the
1144 * protocol.
1145 */
1146 while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1147 (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1148 (timeout > 0)) {
1149 usleep_range(1000, 1250);
1150 timeout--;
1151 ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1152 }
1153 }
1154 }
1155
ipc_imem_cleanup(struct iosm_imem * ipc_imem)1156 void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1157 {
1158 ipc_imem->phase = IPC_P_OFF_REQ;
1159
1160 /* forward MDM_NOT_READY to listeners */
1161 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1162
1163 hrtimer_cancel(&ipc_imem->td_alloc_timer);
1164 hrtimer_cancel(&ipc_imem->tdupdate_timer);
1165 hrtimer_cancel(&ipc_imem->fast_update_timer);
1166 hrtimer_cancel(&ipc_imem->startup_timer);
1167
1168 /* cancel the workqueue */
1169 cancel_work_sync(&ipc_imem->run_state_worker);
1170
1171 if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1172 ipc_mux_deinit(ipc_imem->mux);
1173 ipc_wwan_deinit(ipc_imem->wwan);
1174 ipc_port_deinit(ipc_imem->ipc_port);
1175 }
1176
1177 if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
1178 ipc_devlink_deinit(ipc_imem->ipc_devlink);
1179
1180 ipc_imem_device_ipc_uninit(ipc_imem);
1181 ipc_imem_channel_reset(ipc_imem);
1182
1183 ipc_protocol_deinit(ipc_imem->ipc_protocol);
1184 ipc_task_deinit(ipc_imem->ipc_task);
1185
1186 kfree(ipc_imem->ipc_task);
1187 kfree(ipc_imem->mmio);
1188
1189 ipc_imem->phase = IPC_P_OFF;
1190 }
1191
1192 /* After CP has unblocked the PCIe link, save the start address of the doorbell
1193 * scratchpad and prepare the shared memory region. If the flashing to RAM
1194 * procedure shall be executed, copy the chip information from the doorbell
1195 * scratchtpad to the application buffer and wake up the flash app.
1196 */
ipc_imem_config(struct iosm_imem * ipc_imem)1197 static int ipc_imem_config(struct iosm_imem *ipc_imem)
1198 {
1199 enum ipc_phase phase;
1200
1201 /* Initialize the semaphore for the blocking read UL/DL transfer. */
1202 init_completion(&ipc_imem->ul_pend_sem);
1203
1204 init_completion(&ipc_imem->dl_pend_sem);
1205
1206 /* clear internal flags */
1207 ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1208 ipc_imem->enter_runtime = 0;
1209
1210 phase = ipc_imem_phase_update(ipc_imem);
1211
1212 /* Either CP shall be in the power off or power on phase. */
1213 switch (phase) {
1214 case IPC_P_ROM:
1215 ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1216 /* poll execution stage (for delayed start, e.g. NAND) */
1217 if (!hrtimer_active(&ipc_imem->startup_timer))
1218 hrtimer_start(&ipc_imem->startup_timer,
1219 ipc_imem->hrtimer_period,
1220 HRTIMER_MODE_REL);
1221 return 0;
1222
1223 case IPC_P_PSI:
1224 case IPC_P_EBL:
1225 case IPC_P_RUN:
1226 /* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
1227 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1228
1229 /* Verify the exepected initial state. */
1230 if (ipc_imem->ipc_requested_state ==
1231 ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1232 ipc_imem_ipc_init_check(ipc_imem);
1233
1234 return 0;
1235 }
1236 dev_err(ipc_imem->dev,
1237 "ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1238 ipc_mmio_get_ipc_state(ipc_imem->mmio));
1239 break;
1240 case IPC_P_CRASH:
1241 case IPC_P_CD_READY:
1242 dev_dbg(ipc_imem->dev,
1243 "Modem is in phase %d, reset Modem to collect CD",
1244 phase);
1245 return 0;
1246 default:
1247 dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1248 break;
1249 }
1250
1251 complete(&ipc_imem->dl_pend_sem);
1252 complete(&ipc_imem->ul_pend_sem);
1253 ipc_imem->phase = IPC_P_OFF;
1254 return -EIO;
1255 }
1256
1257 /* Pass the dev ptr to the shared memory driver and request the entry points */
ipc_imem_init(struct iosm_pcie * pcie,unsigned int device_id,void __iomem * mmio,struct device * dev)1258 struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1259 void __iomem *mmio, struct device *dev)
1260 {
1261 struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
1262 enum ipc_mem_exec_stage stage;
1263
1264 if (!ipc_imem)
1265 return NULL;
1266
1267 /* Save the device address. */
1268 ipc_imem->pcie = pcie;
1269 ipc_imem->dev = dev;
1270
1271 ipc_imem->pci_device_id = device_id;
1272
1273 ipc_imem->cp_version = 0;
1274 ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1275
1276 /* Reset the max number of configured channels */
1277 ipc_imem->nr_of_channels = 0;
1278
1279 /* allocate IPC MMIO */
1280 ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1281 if (!ipc_imem->mmio) {
1282 dev_err(ipc_imem->dev, "failed to initialize mmio region");
1283 goto mmio_init_fail;
1284 }
1285
1286 ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1287 GFP_KERNEL);
1288
1289 /* Create tasklet for event handling*/
1290 if (!ipc_imem->ipc_task)
1291 goto ipc_task_fail;
1292
1293 if (ipc_task_init(ipc_imem->ipc_task))
1294 goto ipc_task_init_fail;
1295
1296 ipc_imem->ipc_task->dev = ipc_imem->dev;
1297
1298 INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1299
1300 ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1301
1302 if (!ipc_imem->ipc_protocol)
1303 goto protocol_init_fail;
1304
1305 /* The phase is set to power off. */
1306 ipc_imem->phase = IPC_P_OFF;
1307
1308 hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1309 HRTIMER_MODE_REL);
1310 ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1311
1312 hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1313 HRTIMER_MODE_REL);
1314 ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1315
1316 hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1317 HRTIMER_MODE_REL);
1318 ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1319
1320 hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1321 HRTIMER_MODE_REL);
1322 ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1323
1324 if (ipc_imem_config(ipc_imem)) {
1325 dev_err(ipc_imem->dev, "failed to initialize the imem");
1326 goto imem_config_fail;
1327 }
1328
1329 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1330 if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
1331 /* Alloc and Register devlink */
1332 ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
1333 if (!ipc_imem->ipc_devlink) {
1334 dev_err(ipc_imem->dev, "Devlink register failed");
1335 goto imem_config_fail;
1336 }
1337
1338 if (ipc_flash_link_establish(ipc_imem))
1339 goto devlink_channel_fail;
1340
1341 set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
1342 }
1343 return ipc_imem;
1344 devlink_channel_fail:
1345 ipc_devlink_deinit(ipc_imem->ipc_devlink);
1346 imem_config_fail:
1347 hrtimer_cancel(&ipc_imem->td_alloc_timer);
1348 hrtimer_cancel(&ipc_imem->fast_update_timer);
1349 hrtimer_cancel(&ipc_imem->tdupdate_timer);
1350 hrtimer_cancel(&ipc_imem->startup_timer);
1351 protocol_init_fail:
1352 cancel_work_sync(&ipc_imem->run_state_worker);
1353 ipc_task_deinit(ipc_imem->ipc_task);
1354 ipc_task_init_fail:
1355 kfree(ipc_imem->ipc_task);
1356 ipc_task_fail:
1357 kfree(ipc_imem->mmio);
1358 mmio_init_fail:
1359 kfree(ipc_imem);
1360 return NULL;
1361 }
1362
ipc_imem_irq_process(struct iosm_imem * ipc_imem,int irq)1363 void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1364 {
1365 /* Debounce IPC_EV_IRQ. */
1366 if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1367 ipc_imem->ev_irq_pending[irq] = true;
1368 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1369 NULL, 0, false);
1370 }
1371 }
1372
ipc_imem_td_update_timer_suspend(struct iosm_imem * ipc_imem,bool suspend)1373 void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1374 {
1375 ipc_imem->td_update_timer_suspended = suspend;
1376 }
1377
1378 /* Verify the CP execution state, copy the chip info,
1379 * change the execution phase to ROM
1380 */
ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem * ipc_imem,int arg,void * msg,size_t msgsize)1381 static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
1382 int arg, void *msg,
1383 size_t msgsize)
1384 {
1385 enum ipc_mem_exec_stage stage;
1386 struct sk_buff *skb;
1387 int rc = -EINVAL;
1388 size_t size;
1389
1390 /* Test the CP execution state. */
1391 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1392 if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
1393 dev_err(ipc_imem->dev,
1394 "Execution_stage: expected BOOT, received = %X", stage);
1395 goto trigger_chip_info_fail;
1396 }
1397 /* Allocate a new sk buf for the chip info. */
1398 size = ipc_imem->mmio->chip_info_size;
1399 if (size > IOSM_CHIP_INFO_SIZE_MAX)
1400 goto trigger_chip_info_fail;
1401
1402 skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
1403 if (!skb) {
1404 dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
1405 rc = -ENOMEM;
1406 goto trigger_chip_info_fail;
1407 }
1408 /* Copy the chip info characters into the ipc_skb. */
1409 ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
1410 /* First change to the ROM boot phase. */
1411 dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
1412 ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
1413 ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
1414 rc = 0;
1415 trigger_chip_info_fail:
1416 return rc;
1417 }
1418
ipc_imem_devlink_trigger_chip_info(struct iosm_imem * ipc_imem)1419 int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
1420 {
1421 return ipc_task_queue_send_task(ipc_imem,
1422 ipc_imem_devlink_trigger_chip_info_cb,
1423 0, NULL, 0, true);
1424 }
1425