1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2017-2021 NXP
3
4 #include <linux/dma-mapping.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/delay.h>
8 #include <linux/rpmsg.h>
9 #include <sound/core.h>
10 #include <sound/pcm.h>
11 #include <sound/pcm_params.h>
12 #include <sound/dmaengine_pcm.h>
13 #include <sound/soc.h>
14
15 #include "imx-pcm.h"
16 #include "fsl_rpmsg.h"
17 #include "imx-pcm-rpmsg.h"
18
19 static struct snd_pcm_hardware imx_rpmsg_pcm_hardware = {
20 .info = SNDRV_PCM_INFO_INTERLEAVED |
21 SNDRV_PCM_INFO_BLOCK_TRANSFER |
22 SNDRV_PCM_INFO_MMAP |
23 SNDRV_PCM_INFO_MMAP_VALID |
24 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
25 SNDRV_PCM_INFO_PAUSE |
26 SNDRV_PCM_INFO_RESUME,
27 .buffer_bytes_max = IMX_DEFAULT_DMABUF_SIZE,
28 .period_bytes_min = 512,
29 .period_bytes_max = 65536,
30 .periods_min = 2,
31 .periods_max = 6000,
32 .fifo_size = 0,
33 };
34
imx_rpmsg_pcm_send_message(struct rpmsg_msg * msg,struct rpmsg_info * info)35 static int imx_rpmsg_pcm_send_message(struct rpmsg_msg *msg,
36 struct rpmsg_info *info)
37 {
38 struct rpmsg_device *rpdev = info->rpdev;
39 int ret = 0;
40
41 mutex_lock(&info->msg_lock);
42 if (!rpdev) {
43 dev_err(info->dev, "rpmsg channel not ready\n");
44 mutex_unlock(&info->msg_lock);
45 return -EINVAL;
46 }
47
48 dev_dbg(&rpdev->dev, "send cmd %d\n", msg->s_msg.header.cmd);
49
50 if (!(msg->s_msg.header.type == MSG_TYPE_C))
51 reinit_completion(&info->cmd_complete);
52
53 ret = rpmsg_send(rpdev->ept, (void *)&msg->s_msg,
54 sizeof(struct rpmsg_s_msg));
55 if (ret) {
56 dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
57 mutex_unlock(&info->msg_lock);
58 return ret;
59 }
60
61 /* No receive msg for TYPE_C command */
62 if (msg->s_msg.header.type == MSG_TYPE_C) {
63 mutex_unlock(&info->msg_lock);
64 return 0;
65 }
66
67 /* wait response from rpmsg */
68 ret = wait_for_completion_timeout(&info->cmd_complete,
69 msecs_to_jiffies(RPMSG_TIMEOUT));
70 if (!ret) {
71 dev_err(&rpdev->dev, "rpmsg_send cmd %d timeout!\n",
72 msg->s_msg.header.cmd);
73 mutex_unlock(&info->msg_lock);
74 return -ETIMEDOUT;
75 }
76
77 memcpy(&msg->r_msg, &info->r_msg, sizeof(struct rpmsg_r_msg));
78 memcpy(&info->msg[msg->r_msg.header.cmd].r_msg,
79 &msg->r_msg, sizeof(struct rpmsg_r_msg));
80
81 /*
82 * Reset the buffer pointer to be zero, actully we have
83 * set the buffer pointer to be zero in imx_rpmsg_terminate_all
84 * But if there is timer task queued in queue, after it is
85 * executed the buffer pointer will be changed, so need to
86 * reset it again with TERMINATE command.
87 */
88 switch (msg->s_msg.header.cmd) {
89 case TX_TERMINATE:
90 info->msg[TX_POINTER].r_msg.param.buffer_offset = 0;
91 break;
92 case RX_TERMINATE:
93 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0;
94 break;
95 default:
96 break;
97 }
98
99 dev_dbg(&rpdev->dev, "cmd:%d, resp %d\n", msg->s_msg.header.cmd,
100 info->r_msg.param.resp);
101
102 mutex_unlock(&info->msg_lock);
103
104 return 0;
105 }
106
imx_rpmsg_insert_workqueue(struct snd_pcm_substream * substream,struct rpmsg_msg * msg,struct rpmsg_info * info)107 static int imx_rpmsg_insert_workqueue(struct snd_pcm_substream *substream,
108 struct rpmsg_msg *msg,
109 struct rpmsg_info *info)
110 {
111 unsigned long flags;
112 int ret = 0;
113
114 /*
115 * Queue the work to workqueue.
116 * If the queue is full, drop the message.
117 */
118 spin_lock_irqsave(&info->wq_lock, flags);
119 if (info->work_write_index != info->work_read_index) {
120 int index = info->work_write_index;
121
122 memcpy(&info->work_list[index].msg, msg,
123 sizeof(struct rpmsg_s_msg));
124
125 queue_work(info->rpmsg_wq, &info->work_list[index].work);
126 info->work_write_index++;
127 info->work_write_index %= WORK_MAX_NUM;
128 } else {
129 info->msg_drop_count[substream->stream]++;
130 ret = -EPIPE;
131 }
132 spin_unlock_irqrestore(&info->wq_lock, flags);
133
134 return ret;
135 }
136
imx_rpmsg_pcm_hw_params(struct snd_soc_component * component,struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)137 static int imx_rpmsg_pcm_hw_params(struct snd_soc_component *component,
138 struct snd_pcm_substream *substream,
139 struct snd_pcm_hw_params *params)
140 {
141 struct rpmsg_info *info = dev_get_drvdata(component->dev);
142 struct rpmsg_msg *msg;
143
144 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
145 msg = &info->msg[TX_HW_PARAM];
146 msg->s_msg.header.cmd = TX_HW_PARAM;
147 } else {
148 msg = &info->msg[RX_HW_PARAM];
149 msg->s_msg.header.cmd = RX_HW_PARAM;
150 }
151
152 msg->s_msg.param.rate = params_rate(params);
153
154 switch (params_format(params)) {
155 case SNDRV_PCM_FORMAT_S16_LE:
156 msg->s_msg.param.format = RPMSG_S16_LE;
157 break;
158 case SNDRV_PCM_FORMAT_S24_LE:
159 msg->s_msg.param.format = RPMSG_S24_LE;
160 break;
161 case SNDRV_PCM_FORMAT_DSD_U16_LE:
162 msg->s_msg.param.format = RPMSG_DSD_U16_LE;
163 break;
164 case SNDRV_PCM_FORMAT_DSD_U32_LE:
165 msg->s_msg.param.format = RPMSG_DSD_U32_LE;
166 break;
167 default:
168 msg->s_msg.param.format = RPMSG_S32_LE;
169 break;
170 }
171
172 switch (params_channels(params)) {
173 case 1:
174 msg->s_msg.param.channels = RPMSG_CH_LEFT;
175 break;
176 case 2:
177 msg->s_msg.param.channels = RPMSG_CH_STEREO;
178 break;
179 default:
180 msg->s_msg.param.channels = params_channels(params);
181 break;
182 }
183
184 info->send_message(msg, info);
185
186 return 0;
187 }
188
imx_rpmsg_pcm_pointer(struct snd_soc_component * component,struct snd_pcm_substream * substream)189 static snd_pcm_uframes_t imx_rpmsg_pcm_pointer(struct snd_soc_component *component,
190 struct snd_pcm_substream *substream)
191 {
192 struct rpmsg_info *info = dev_get_drvdata(component->dev);
193 struct rpmsg_msg *msg;
194 unsigned int pos = 0;
195 int buffer_tail = 0;
196
197 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
198 msg = &info->msg[TX_PERIOD_DONE + MSG_TYPE_A_NUM];
199 else
200 msg = &info->msg[RX_PERIOD_DONE + MSG_TYPE_A_NUM];
201
202 buffer_tail = msg->r_msg.param.buffer_tail;
203 pos = buffer_tail * snd_pcm_lib_period_bytes(substream);
204
205 return bytes_to_frames(substream->runtime, pos);
206 }
207
imx_rpmsg_timer_callback(struct timer_list * t)208 static void imx_rpmsg_timer_callback(struct timer_list *t)
209 {
210 struct stream_timer *stream_timer =
211 from_timer(stream_timer, t, timer);
212 struct snd_pcm_substream *substream = stream_timer->substream;
213 struct rpmsg_info *info = stream_timer->info;
214 struct rpmsg_msg *msg;
215
216 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
217 msg = &info->msg[TX_PERIOD_DONE + MSG_TYPE_A_NUM];
218 msg->s_msg.header.cmd = TX_PERIOD_DONE;
219 } else {
220 msg = &info->msg[RX_PERIOD_DONE + MSG_TYPE_A_NUM];
221 msg->s_msg.header.cmd = RX_PERIOD_DONE;
222 }
223
224 imx_rpmsg_insert_workqueue(substream, msg, info);
225 }
226
imx_rpmsg_pcm_open(struct snd_soc_component * component,struct snd_pcm_substream * substream)227 static int imx_rpmsg_pcm_open(struct snd_soc_component *component,
228 struct snd_pcm_substream *substream)
229 {
230 struct rpmsg_info *info = dev_get_drvdata(component->dev);
231 struct rpmsg_msg *msg;
232 int ret = 0;
233 int cmd;
234
235 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
236 msg = &info->msg[TX_OPEN];
237 msg->s_msg.header.cmd = TX_OPEN;
238
239 /* reinitialize buffer counter*/
240 cmd = TX_PERIOD_DONE + MSG_TYPE_A_NUM;
241 info->msg[cmd].s_msg.param.buffer_tail = 0;
242 info->msg[cmd].r_msg.param.buffer_tail = 0;
243 info->msg[TX_POINTER].r_msg.param.buffer_offset = 0;
244
245 } else {
246 msg = &info->msg[RX_OPEN];
247 msg->s_msg.header.cmd = RX_OPEN;
248
249 /* reinitialize buffer counter*/
250 cmd = RX_PERIOD_DONE + MSG_TYPE_A_NUM;
251 info->msg[cmd].s_msg.param.buffer_tail = 0;
252 info->msg[cmd].r_msg.param.buffer_tail = 0;
253 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0;
254 }
255
256 info->send_message(msg, info);
257
258 imx_rpmsg_pcm_hardware.period_bytes_max =
259 imx_rpmsg_pcm_hardware.buffer_bytes_max / 2;
260
261 snd_soc_set_runtime_hwparams(substream, &imx_rpmsg_pcm_hardware);
262
263 ret = snd_pcm_hw_constraint_integer(substream->runtime,
264 SNDRV_PCM_HW_PARAM_PERIODS);
265 if (ret < 0)
266 return ret;
267
268 info->msg_drop_count[substream->stream] = 0;
269
270 /* Create timer*/
271 info->stream_timer[substream->stream].info = info;
272 info->stream_timer[substream->stream].substream = substream;
273 timer_setup(&info->stream_timer[substream->stream].timer,
274 imx_rpmsg_timer_callback, 0);
275 return ret;
276 }
277
imx_rpmsg_pcm_close(struct snd_soc_component * component,struct snd_pcm_substream * substream)278 static int imx_rpmsg_pcm_close(struct snd_soc_component *component,
279 struct snd_pcm_substream *substream)
280 {
281 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
282 struct rpmsg_info *info = dev_get_drvdata(component->dev);
283 struct rpmsg_msg *msg;
284
285 /* Flush work in workqueue to make TX_CLOSE is the last message */
286 flush_workqueue(info->rpmsg_wq);
287
288 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
289 msg = &info->msg[TX_CLOSE];
290 msg->s_msg.header.cmd = TX_CLOSE;
291 } else {
292 msg = &info->msg[RX_CLOSE];
293 msg->s_msg.header.cmd = RX_CLOSE;
294 }
295
296 info->send_message(msg, info);
297
298 del_timer(&info->stream_timer[substream->stream].timer);
299
300 rtd->dai_link->ignore_suspend = 0;
301
302 if (info->msg_drop_count[substream->stream])
303 dev_warn(rtd->dev, "Msg is dropped!, number is %d\n",
304 info->msg_drop_count[substream->stream]);
305
306 return 0;
307 }
308
imx_rpmsg_pcm_prepare(struct snd_soc_component * component,struct snd_pcm_substream * substream)309 static int imx_rpmsg_pcm_prepare(struct snd_soc_component *component,
310 struct snd_pcm_substream *substream)
311 {
312 struct snd_pcm_runtime *runtime = substream->runtime;
313 struct snd_soc_pcm_runtime *rtd = substream->private_data;
314 struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
315 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev);
316
317 /*
318 * NON-MMAP mode, NONBLOCK, Version 2, enable lpa in dts
319 * four conditions to determine the lpa is enabled.
320 */
321 if ((runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
322 runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) &&
323 rpmsg->enable_lpa) {
324 /*
325 * Ignore suspend operation in low power mode
326 * M core will continue playback music on A core suspend.
327 */
328 rtd->dai_link->ignore_suspend = 1;
329 rpmsg->force_lpa = 1;
330 } else {
331 rpmsg->force_lpa = 0;
332 }
333
334 return 0;
335 }
336
imx_rpmsg_pcm_dma_complete(void * arg)337 static void imx_rpmsg_pcm_dma_complete(void *arg)
338 {
339 struct snd_pcm_substream *substream = arg;
340
341 snd_pcm_period_elapsed(substream);
342 }
343
imx_rpmsg_prepare_and_submit(struct snd_soc_component * component,struct snd_pcm_substream * substream)344 static int imx_rpmsg_prepare_and_submit(struct snd_soc_component *component,
345 struct snd_pcm_substream *substream)
346 {
347 struct rpmsg_info *info = dev_get_drvdata(component->dev);
348 struct rpmsg_msg *msg;
349
350 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
351 msg = &info->msg[TX_BUFFER];
352 msg->s_msg.header.cmd = TX_BUFFER;
353 } else {
354 msg = &info->msg[RX_BUFFER];
355 msg->s_msg.header.cmd = RX_BUFFER;
356 }
357
358 /* Send buffer address and buffer size */
359 msg->s_msg.param.buffer_addr = substream->runtime->dma_addr;
360 msg->s_msg.param.buffer_size = snd_pcm_lib_buffer_bytes(substream);
361 msg->s_msg.param.period_size = snd_pcm_lib_period_bytes(substream);
362 msg->s_msg.param.buffer_tail = 0;
363
364 info->num_period[substream->stream] = msg->s_msg.param.buffer_size /
365 msg->s_msg.param.period_size;
366
367 info->callback[substream->stream] = imx_rpmsg_pcm_dma_complete;
368 info->callback_param[substream->stream] = substream;
369
370 return imx_rpmsg_insert_workqueue(substream, msg, info);
371 }
372
imx_rpmsg_async_issue_pending(struct snd_soc_component * component,struct snd_pcm_substream * substream)373 static int imx_rpmsg_async_issue_pending(struct snd_soc_component *component,
374 struct snd_pcm_substream *substream)
375 {
376 struct rpmsg_info *info = dev_get_drvdata(component->dev);
377 struct rpmsg_msg *msg;
378
379 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
380 msg = &info->msg[TX_START];
381 msg->s_msg.header.cmd = TX_START;
382 } else {
383 msg = &info->msg[RX_START];
384 msg->s_msg.header.cmd = RX_START;
385 }
386
387 return imx_rpmsg_insert_workqueue(substream, msg, info);
388 }
389
imx_rpmsg_restart(struct snd_soc_component * component,struct snd_pcm_substream * substream)390 static int imx_rpmsg_restart(struct snd_soc_component *component,
391 struct snd_pcm_substream *substream)
392 {
393 struct rpmsg_info *info = dev_get_drvdata(component->dev);
394 struct rpmsg_msg *msg;
395
396 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
397 msg = &info->msg[TX_RESTART];
398 msg->s_msg.header.cmd = TX_RESTART;
399 } else {
400 msg = &info->msg[RX_RESTART];
401 msg->s_msg.header.cmd = RX_RESTART;
402 }
403
404 return imx_rpmsg_insert_workqueue(substream, msg, info);
405 }
406
imx_rpmsg_pause(struct snd_soc_component * component,struct snd_pcm_substream * substream)407 static int imx_rpmsg_pause(struct snd_soc_component *component,
408 struct snd_pcm_substream *substream)
409 {
410 struct rpmsg_info *info = dev_get_drvdata(component->dev);
411 struct rpmsg_msg *msg;
412
413 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
414 msg = &info->msg[TX_PAUSE];
415 msg->s_msg.header.cmd = TX_PAUSE;
416 } else {
417 msg = &info->msg[RX_PAUSE];
418 msg->s_msg.header.cmd = RX_PAUSE;
419 }
420
421 return imx_rpmsg_insert_workqueue(substream, msg, info);
422 }
423
imx_rpmsg_terminate_all(struct snd_soc_component * component,struct snd_pcm_substream * substream)424 static int imx_rpmsg_terminate_all(struct snd_soc_component *component,
425 struct snd_pcm_substream *substream)
426 {
427 struct rpmsg_info *info = dev_get_drvdata(component->dev);
428 struct rpmsg_msg *msg;
429 int cmd;
430
431 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
432 msg = &info->msg[TX_TERMINATE];
433 msg->s_msg.header.cmd = TX_TERMINATE;
434 /* Clear buffer count*/
435 cmd = TX_PERIOD_DONE + MSG_TYPE_A_NUM;
436 info->msg[cmd].s_msg.param.buffer_tail = 0;
437 info->msg[cmd].r_msg.param.buffer_tail = 0;
438 info->msg[TX_POINTER].r_msg.param.buffer_offset = 0;
439 } else {
440 msg = &info->msg[RX_TERMINATE];
441 msg->s_msg.header.cmd = RX_TERMINATE;
442 /* Clear buffer count*/
443 cmd = RX_PERIOD_DONE + MSG_TYPE_A_NUM;
444 info->msg[cmd].s_msg.param.buffer_tail = 0;
445 info->msg[cmd].r_msg.param.buffer_tail = 0;
446 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0;
447 }
448
449 del_timer(&info->stream_timer[substream->stream].timer);
450
451 return imx_rpmsg_insert_workqueue(substream, msg, info);
452 }
453
imx_rpmsg_pcm_trigger(struct snd_soc_component * component,struct snd_pcm_substream * substream,int cmd)454 static int imx_rpmsg_pcm_trigger(struct snd_soc_component *component,
455 struct snd_pcm_substream *substream, int cmd)
456 {
457 struct snd_pcm_runtime *runtime = substream->runtime;
458 struct snd_soc_pcm_runtime *rtd = substream->private_data;
459 struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
460 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev);
461 int ret = 0;
462
463 switch (cmd) {
464 case SNDRV_PCM_TRIGGER_START:
465 ret = imx_rpmsg_prepare_and_submit(component, substream);
466 if (ret)
467 return ret;
468 ret = imx_rpmsg_async_issue_pending(component, substream);
469 break;
470 case SNDRV_PCM_TRIGGER_RESUME:
471 if (rpmsg->force_lpa)
472 break;
473 fallthrough;
474 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
475 ret = imx_rpmsg_restart(component, substream);
476 break;
477 case SNDRV_PCM_TRIGGER_SUSPEND:
478 if (!rpmsg->force_lpa) {
479 if (runtime->info & SNDRV_PCM_INFO_PAUSE)
480 ret = imx_rpmsg_pause(component, substream);
481 else
482 ret = imx_rpmsg_terminate_all(component, substream);
483 }
484 break;
485 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
486 ret = imx_rpmsg_pause(component, substream);
487 break;
488 case SNDRV_PCM_TRIGGER_STOP:
489 ret = imx_rpmsg_terminate_all(component, substream);
490 break;
491 default:
492 return -EINVAL;
493 }
494
495 if (ret)
496 return ret;
497
498 return 0;
499 }
500
501 /*
502 * imx_rpmsg_pcm_ack
503 *
504 * Send the period index to M core through rpmsg, but not send
505 * all the period index to M core, reduce some unnessesary msg
506 * to reduce the pressure of rpmsg bandwidth.
507 */
imx_rpmsg_pcm_ack(struct snd_soc_component * component,struct snd_pcm_substream * substream)508 static int imx_rpmsg_pcm_ack(struct snd_soc_component *component,
509 struct snd_pcm_substream *substream)
510 {
511 struct snd_pcm_runtime *runtime = substream->runtime;
512 struct snd_soc_pcm_runtime *rtd = substream->private_data;
513 struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
514 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev);
515 struct rpmsg_info *info = dev_get_drvdata(component->dev);
516 snd_pcm_uframes_t period_size = runtime->period_size;
517 snd_pcm_sframes_t avail;
518 struct timer_list *timer;
519 struct rpmsg_msg *msg;
520 unsigned long flags;
521 int buffer_tail = 0;
522 int written_num;
523
524 if (!rpmsg->force_lpa)
525 return 0;
526
527 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
528 msg = &info->msg[TX_PERIOD_DONE + MSG_TYPE_A_NUM];
529 msg->s_msg.header.cmd = TX_PERIOD_DONE;
530 } else {
531 msg = &info->msg[RX_PERIOD_DONE + MSG_TYPE_A_NUM];
532 msg->s_msg.header.cmd = RX_PERIOD_DONE;
533 }
534
535 msg->s_msg.header.type = MSG_TYPE_C;
536
537 buffer_tail = (frames_to_bytes(runtime, runtime->control->appl_ptr) %
538 snd_pcm_lib_buffer_bytes(substream));
539 buffer_tail = buffer_tail / snd_pcm_lib_period_bytes(substream);
540
541 /* There is update for period index */
542 if (buffer_tail != msg->s_msg.param.buffer_tail) {
543 written_num = buffer_tail - msg->s_msg.param.buffer_tail;
544 if (written_num < 0)
545 written_num += runtime->periods;
546
547 msg->s_msg.param.buffer_tail = buffer_tail;
548
549 /* The notification message is updated to latest */
550 spin_lock_irqsave(&info->lock[substream->stream], flags);
551 memcpy(&info->notify[substream->stream], msg,
552 sizeof(struct rpmsg_s_msg));
553 info->notify_updated[substream->stream] = true;
554 spin_unlock_irqrestore(&info->lock[substream->stream], flags);
555
556 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
557 avail = snd_pcm_playback_hw_avail(runtime);
558 else
559 avail = snd_pcm_capture_hw_avail(runtime);
560
561 timer = &info->stream_timer[substream->stream].timer;
562 /*
563 * If the data in the buffer is less than one period before
564 * this fill, which means the data may not enough on M
565 * core side, we need to send message immediately to let
566 * M core know the pointer is updated.
567 * if there is more than one period data in the buffer before
568 * this fill, which means the data is enough on M core side,
569 * we can delay one period (using timer) to send the message
570 * for reduce the message number in workqueue, because the
571 * pointer may be updated by ack function later, we can
572 * send latest pointer to M core side.
573 */
574 if ((avail - written_num * period_size) <= period_size) {
575 imx_rpmsg_insert_workqueue(substream, msg, info);
576 } else if (rpmsg->force_lpa && !timer_pending(timer)) {
577 int time_msec;
578
579 time_msec = (int)(runtime->period_size * 1000 / runtime->rate);
580 mod_timer(timer, jiffies + msecs_to_jiffies(time_msec));
581 }
582 }
583
584 return 0;
585 }
586
imx_rpmsg_pcm_new(struct snd_soc_component * component,struct snd_soc_pcm_runtime * rtd)587 static int imx_rpmsg_pcm_new(struct snd_soc_component *component,
588 struct snd_soc_pcm_runtime *rtd)
589 {
590 struct snd_card *card = rtd->card->snd_card;
591 struct snd_pcm *pcm = rtd->pcm;
592 struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
593 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev);
594 int ret;
595
596 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
597 if (ret)
598 return ret;
599
600 imx_rpmsg_pcm_hardware.buffer_bytes_max = rpmsg->buffer_size;
601 return snd_pcm_set_fixed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV_WC,
602 pcm->card->dev, rpmsg->buffer_size);
603 }
604
605 static const struct snd_soc_component_driver imx_rpmsg_soc_component = {
606 .name = IMX_PCM_DRV_NAME,
607 .pcm_construct = imx_rpmsg_pcm_new,
608 .open = imx_rpmsg_pcm_open,
609 .close = imx_rpmsg_pcm_close,
610 .hw_params = imx_rpmsg_pcm_hw_params,
611 .trigger = imx_rpmsg_pcm_trigger,
612 .pointer = imx_rpmsg_pcm_pointer,
613 .ack = imx_rpmsg_pcm_ack,
614 .prepare = imx_rpmsg_pcm_prepare,
615 };
616
imx_rpmsg_pcm_work(struct work_struct * work)617 static void imx_rpmsg_pcm_work(struct work_struct *work)
618 {
619 struct work_of_rpmsg *work_of_rpmsg;
620 bool is_notification = false;
621 struct rpmsg_info *info;
622 struct rpmsg_msg msg;
623 unsigned long flags;
624
625 work_of_rpmsg = container_of(work, struct work_of_rpmsg, work);
626 info = work_of_rpmsg->info;
627
628 /*
629 * Every work in the work queue, first we check if there
630 * is update for period is filled, because there may be not
631 * enough data in M core side, need to let M core know
632 * data is updated immediately.
633 */
634 spin_lock_irqsave(&info->lock[TX], flags);
635 if (info->notify_updated[TX]) {
636 memcpy(&msg, &info->notify[TX], sizeof(struct rpmsg_s_msg));
637 info->notify_updated[TX] = false;
638 spin_unlock_irqrestore(&info->lock[TX], flags);
639 info->send_message(&msg, info);
640 } else {
641 spin_unlock_irqrestore(&info->lock[TX], flags);
642 }
643
644 spin_lock_irqsave(&info->lock[RX], flags);
645 if (info->notify_updated[RX]) {
646 memcpy(&msg, &info->notify[RX], sizeof(struct rpmsg_s_msg));
647 info->notify_updated[RX] = false;
648 spin_unlock_irqrestore(&info->lock[RX], flags);
649 info->send_message(&msg, info);
650 } else {
651 spin_unlock_irqrestore(&info->lock[RX], flags);
652 }
653
654 /* Skip the notification message for it has been processed above */
655 if (work_of_rpmsg->msg.s_msg.header.type == MSG_TYPE_C &&
656 (work_of_rpmsg->msg.s_msg.header.cmd == TX_PERIOD_DONE ||
657 work_of_rpmsg->msg.s_msg.header.cmd == RX_PERIOD_DONE))
658 is_notification = true;
659
660 if (!is_notification)
661 info->send_message(&work_of_rpmsg->msg, info);
662
663 /* update read index */
664 spin_lock_irqsave(&info->wq_lock, flags);
665 info->work_read_index++;
666 info->work_read_index %= WORK_MAX_NUM;
667 spin_unlock_irqrestore(&info->wq_lock, flags);
668 }
669
imx_rpmsg_pcm_probe(struct platform_device * pdev)670 static int imx_rpmsg_pcm_probe(struct platform_device *pdev)
671 {
672 struct snd_soc_component *component;
673 struct rpmsg_info *info;
674 int ret, i;
675
676 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
677 if (!info)
678 return -ENOMEM;
679
680 platform_set_drvdata(pdev, info);
681
682 info->rpdev = container_of(pdev->dev.parent, struct rpmsg_device, dev);
683 info->dev = &pdev->dev;
684 /* Setup work queue */
685 info->rpmsg_wq = alloc_ordered_workqueue(info->rpdev->id.name,
686 WQ_HIGHPRI |
687 WQ_UNBOUND |
688 WQ_FREEZABLE);
689 if (!info->rpmsg_wq) {
690 dev_err(&pdev->dev, "workqueue create failed\n");
691 return -ENOMEM;
692 }
693
694 /* Write index initialize 1, make it differ with the read index */
695 info->work_write_index = 1;
696 info->send_message = imx_rpmsg_pcm_send_message;
697
698 for (i = 0; i < WORK_MAX_NUM; i++) {
699 INIT_WORK(&info->work_list[i].work, imx_rpmsg_pcm_work);
700 info->work_list[i].info = info;
701 }
702
703 /* Initialize msg */
704 for (i = 0; i < MSG_MAX_NUM; i++) {
705 info->msg[i].s_msg.header.cate = IMX_RPMSG_AUDIO;
706 info->msg[i].s_msg.header.major = IMX_RMPSG_MAJOR;
707 info->msg[i].s_msg.header.minor = IMX_RMPSG_MINOR;
708 info->msg[i].s_msg.header.type = MSG_TYPE_A;
709 info->msg[i].s_msg.param.audioindex = 0;
710 }
711
712 init_completion(&info->cmd_complete);
713 mutex_init(&info->msg_lock);
714 spin_lock_init(&info->lock[TX]);
715 spin_lock_init(&info->lock[RX]);
716 spin_lock_init(&info->wq_lock);
717
718 ret = devm_snd_soc_register_component(&pdev->dev,
719 &imx_rpmsg_soc_component,
720 NULL, 0);
721 if (ret)
722 goto fail;
723
724 component = snd_soc_lookup_component(&pdev->dev, NULL);
725 if (!component) {
726 ret = -EINVAL;
727 goto fail;
728 }
729
730 /* platform component name is used by machine driver to link with */
731 component->name = info->rpdev->id.name;
732
733 #ifdef CONFIG_DEBUG_FS
734 component->debugfs_prefix = "rpmsg";
735 #endif
736
737 return 0;
738
739 fail:
740 if (info->rpmsg_wq)
741 destroy_workqueue(info->rpmsg_wq);
742
743 return ret;
744 }
745
imx_rpmsg_pcm_remove(struct platform_device * pdev)746 static int imx_rpmsg_pcm_remove(struct platform_device *pdev)
747 {
748 struct rpmsg_info *info = platform_get_drvdata(pdev);
749
750 if (info->rpmsg_wq)
751 destroy_workqueue(info->rpmsg_wq);
752
753 return 0;
754 }
755
756 #ifdef CONFIG_PM
imx_rpmsg_pcm_runtime_resume(struct device * dev)757 static int imx_rpmsg_pcm_runtime_resume(struct device *dev)
758 {
759 struct rpmsg_info *info = dev_get_drvdata(dev);
760
761 cpu_latency_qos_add_request(&info->pm_qos_req, 0);
762
763 return 0;
764 }
765
imx_rpmsg_pcm_runtime_suspend(struct device * dev)766 static int imx_rpmsg_pcm_runtime_suspend(struct device *dev)
767 {
768 struct rpmsg_info *info = dev_get_drvdata(dev);
769
770 cpu_latency_qos_remove_request(&info->pm_qos_req);
771
772 return 0;
773 }
774 #endif
775
776 #ifdef CONFIG_PM_SLEEP
imx_rpmsg_pcm_suspend(struct device * dev)777 static int imx_rpmsg_pcm_suspend(struct device *dev)
778 {
779 struct rpmsg_info *info = dev_get_drvdata(dev);
780 struct rpmsg_msg *rpmsg_tx;
781 struct rpmsg_msg *rpmsg_rx;
782
783 rpmsg_tx = &info->msg[TX_SUSPEND];
784 rpmsg_rx = &info->msg[RX_SUSPEND];
785
786 rpmsg_tx->s_msg.header.cmd = TX_SUSPEND;
787 info->send_message(rpmsg_tx, info);
788
789 rpmsg_rx->s_msg.header.cmd = RX_SUSPEND;
790 info->send_message(rpmsg_rx, info);
791
792 return 0;
793 }
794
imx_rpmsg_pcm_resume(struct device * dev)795 static int imx_rpmsg_pcm_resume(struct device *dev)
796 {
797 struct rpmsg_info *info = dev_get_drvdata(dev);
798 struct rpmsg_msg *rpmsg_tx;
799 struct rpmsg_msg *rpmsg_rx;
800
801 rpmsg_tx = &info->msg[TX_RESUME];
802 rpmsg_rx = &info->msg[RX_RESUME];
803
804 rpmsg_tx->s_msg.header.cmd = TX_RESUME;
805 info->send_message(rpmsg_tx, info);
806
807 rpmsg_rx->s_msg.header.cmd = RX_RESUME;
808 info->send_message(rpmsg_rx, info);
809
810 return 0;
811 }
812 #endif /* CONFIG_PM_SLEEP */
813
814 static const struct dev_pm_ops imx_rpmsg_pcm_pm_ops = {
815 SET_RUNTIME_PM_OPS(imx_rpmsg_pcm_runtime_suspend,
816 imx_rpmsg_pcm_runtime_resume,
817 NULL)
818 SET_SYSTEM_SLEEP_PM_OPS(imx_rpmsg_pcm_suspend,
819 imx_rpmsg_pcm_resume)
820 };
821
822 static struct platform_driver imx_pcm_rpmsg_driver = {
823 .probe = imx_rpmsg_pcm_probe,
824 .remove = imx_rpmsg_pcm_remove,
825 .driver = {
826 .name = IMX_PCM_DRV_NAME,
827 .pm = &imx_rpmsg_pcm_pm_ops,
828 },
829 };
830 module_platform_driver(imx_pcm_rpmsg_driver);
831
832 MODULE_DESCRIPTION("Freescale SoC Audio RPMSG PCM interface");
833 MODULE_AUTHOR("Shengjiu Wang <shengjiu.wang@nxp.com>");
834 MODULE_ALIAS("platform:" IMX_PCM_DRV_NAME);
835 MODULE_LICENSE("GPL v2");
836