1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015, Sony Mobile Communications AB.
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
5 */
6
7 #include <linux/interrupt.h>
8 #include <linux/list.h>
9 #include <linux/io.h>
10 #include <linux/of.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/mailbox_client.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_wakeirq.h>
18 #include <linux/regmap.h>
19 #include <linux/soc/qcom/smem.h>
20 #include <linux/soc/qcom/smem_state.h>
21 #include <linux/spinlock.h>
22
23 /*
24 * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
25 * of a single 32-bit value between two processors. Each value has a single
26 * writer (the local side) and a single reader (the remote side). Values are
27 * uniquely identified in the system by the directed edge (local processor ID
28 * to remote processor ID) and a string identifier.
29 *
30 * Each processor is responsible for creating the outgoing SMEM items and each
31 * item is writable by the local processor and readable by the remote
32 * processor. By using two separate SMEM items that are single-reader and
33 * single-writer, SMP2P does not require any remote locking mechanisms.
34 *
35 * The driver uses the Linux GPIO and interrupt framework to expose a virtual
36 * GPIO for each outbound entry and a virtual interrupt controller for each
37 * inbound entry.
38 */
39
40 #define SMP2P_MAX_ENTRY 16
41 #define SMP2P_MAX_ENTRY_NAME 16
42
43 #define SMP2P_FEATURE_SSR_ACK 0x1
44 #define SMP2P_FLAGS_RESTART_DONE_BIT 0
45 #define SMP2P_FLAGS_RESTART_ACK_BIT 1
46
47 #define SMP2P_MAGIC 0x504d5324
48 #define SMP2P_ALL_FEATURES SMP2P_FEATURE_SSR_ACK
49
50 /**
51 * struct smp2p_smem_item - in memory communication structure
52 * @magic: magic number
53 * @version: version - must be 1
54 * @features: features flag - currently unused
55 * @local_pid: processor id of sending end
56 * @remote_pid: processor id of receiving end
57 * @total_entries: number of entries - always SMP2P_MAX_ENTRY
58 * @valid_entries: number of allocated entries
59 * @flags:
60 * @entries: individual communication entries
61 * @name: name of the entry
62 * @value: content of the entry
63 */
64 struct smp2p_smem_item {
65 u32 magic;
66 u8 version;
67 unsigned features:24;
68 u16 local_pid;
69 u16 remote_pid;
70 u16 total_entries;
71 u16 valid_entries;
72 u32 flags;
73
74 struct {
75 u8 name[SMP2P_MAX_ENTRY_NAME];
76 u32 value;
77 } entries[SMP2P_MAX_ENTRY];
78 } __packed;
79
80 /**
81 * struct smp2p_entry - driver context matching one entry
82 * @node: list entry to keep track of allocated entries
83 * @smp2p: reference to the device driver context
84 * @name: name of the entry, to match against smp2p_smem_item
85 * @value: pointer to smp2p_smem_item entry value
86 * @last_value: last handled value
87 * @domain: irq_domain for inbound entries
88 * @irq_enabled:bitmap to track enabled irq bits
89 * @irq_rising: bitmap to mark irq bits for rising detection
90 * @irq_falling:bitmap to mark irq bits for falling detection
91 * @state: smem state handle
92 * @lock: spinlock to protect read-modify-write of the value
93 */
94 struct smp2p_entry {
95 struct list_head node;
96 struct qcom_smp2p *smp2p;
97
98 const char *name;
99 u32 *value;
100 u32 last_value;
101
102 struct irq_domain *domain;
103 DECLARE_BITMAP(irq_enabled, 32);
104 DECLARE_BITMAP(irq_rising, 32);
105 DECLARE_BITMAP(irq_falling, 32);
106
107 struct qcom_smem_state *state;
108
109 spinlock_t lock;
110 };
111
112 #define SMP2P_INBOUND 0
113 #define SMP2P_OUTBOUND 1
114
115 /**
116 * struct qcom_smp2p - device driver context
117 * @dev: device driver handle
118 * @in: pointer to the inbound smem item
119 * @out: pointer to the outbound smem item
120 * @smem_items: ids of the two smem items
121 * @valid_entries: already scanned inbound entries
122 * @local_pid: processor id of the inbound edge
123 * @remote_pid: processor id of the outbound edge
124 * @ipc_regmap: regmap for the outbound ipc
125 * @ipc_offset: offset within the regmap
126 * @ipc_bit: bit in regmap@offset to kick to signal remote processor
127 * @mbox_client: mailbox client handle
128 * @mbox_chan: apcs ipc mailbox channel handle
129 * @inbound: list of inbound entries
130 * @outbound: list of outbound entries
131 */
132 struct qcom_smp2p {
133 struct device *dev;
134
135 struct smp2p_smem_item *in;
136 struct smp2p_smem_item *out;
137
138 unsigned smem_items[SMP2P_OUTBOUND + 1];
139
140 unsigned valid_entries;
141
142 bool ssr_ack_enabled;
143 bool ssr_ack;
144 bool negotiation_done;
145
146 unsigned local_pid;
147 unsigned remote_pid;
148
149 struct regmap *ipc_regmap;
150 int ipc_offset;
151 int ipc_bit;
152
153 struct mbox_client mbox_client;
154 struct mbox_chan *mbox_chan;
155
156 struct list_head inbound;
157 struct list_head outbound;
158 };
159
qcom_smp2p_kick(struct qcom_smp2p * smp2p)160 static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
161 {
162 /* Make sure any updated data is written before the kick */
163 wmb();
164
165 if (smp2p->mbox_chan) {
166 mbox_send_message(smp2p->mbox_chan, NULL);
167 mbox_client_txdone(smp2p->mbox_chan, 0);
168 } else {
169 regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
170 }
171 }
172
qcom_smp2p_check_ssr(struct qcom_smp2p * smp2p)173 static bool qcom_smp2p_check_ssr(struct qcom_smp2p *smp2p)
174 {
175 struct smp2p_smem_item *in = smp2p->in;
176 bool restart;
177
178 if (!smp2p->ssr_ack_enabled)
179 return false;
180
181 restart = in->flags & BIT(SMP2P_FLAGS_RESTART_DONE_BIT);
182
183 return restart != smp2p->ssr_ack;
184 }
185
qcom_smp2p_do_ssr_ack(struct qcom_smp2p * smp2p)186 static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p)
187 {
188 struct smp2p_smem_item *out = smp2p->out;
189 u32 val;
190
191 smp2p->ssr_ack = !smp2p->ssr_ack;
192
193 val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
194 if (smp2p->ssr_ack)
195 val |= BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
196 out->flags = val;
197
198 qcom_smp2p_kick(smp2p);
199 }
200
qcom_smp2p_negotiate(struct qcom_smp2p * smp2p)201 static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p)
202 {
203 struct smp2p_smem_item *out = smp2p->out;
204 struct smp2p_smem_item *in = smp2p->in;
205
206 if (in->version == out->version) {
207 out->features &= in->features;
208
209 if (out->features & SMP2P_FEATURE_SSR_ACK)
210 smp2p->ssr_ack_enabled = true;
211
212 smp2p->negotiation_done = true;
213 }
214 }
215
qcom_smp2p_notify_in(struct qcom_smp2p * smp2p)216 static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
217 {
218 struct smp2p_smem_item *in;
219 struct smp2p_entry *entry;
220 int irq_pin;
221 u32 status;
222 char buf[SMP2P_MAX_ENTRY_NAME];
223 u32 val;
224 int i;
225
226 in = smp2p->in;
227
228 /* Match newly created entries */
229 for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
230 list_for_each_entry(entry, &smp2p->inbound, node) {
231 memcpy(buf, in->entries[i].name, sizeof(buf));
232 if (!strcmp(buf, entry->name)) {
233 entry->value = &in->entries[i].value;
234 break;
235 }
236 }
237 }
238 smp2p->valid_entries = i;
239
240 /* Fire interrupts based on any value changes */
241 list_for_each_entry(entry, &smp2p->inbound, node) {
242 /* Ignore entries not yet allocated by the remote side */
243 if (!entry->value)
244 continue;
245
246 val = readl(entry->value);
247
248 status = val ^ entry->last_value;
249 entry->last_value = val;
250
251 /* No changes of this entry? */
252 if (!status)
253 continue;
254
255 for_each_set_bit(i, entry->irq_enabled, 32) {
256 if (!(status & BIT(i)))
257 continue;
258
259 if ((val & BIT(i) && test_bit(i, entry->irq_rising)) ||
260 (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) {
261 irq_pin = irq_find_mapping(entry->domain, i);
262 handle_nested_irq(irq_pin);
263 }
264 }
265 }
266 }
267
268 /**
269 * qcom_smp2p_intr() - interrupt handler for incoming notifications
270 * @irq: unused
271 * @data: smp2p driver context
272 *
273 * Handle notifications from the remote side to handle newly allocated entries
274 * or any changes to the state bits of existing entries.
275 */
qcom_smp2p_intr(int irq,void * data)276 static irqreturn_t qcom_smp2p_intr(int irq, void *data)
277 {
278 struct smp2p_smem_item *in;
279 struct qcom_smp2p *smp2p = data;
280 unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND];
281 unsigned int pid = smp2p->remote_pid;
282 bool ack_restart;
283 size_t size;
284
285 in = smp2p->in;
286
287 /* Acquire smem item, if not already found */
288 if (!in) {
289 in = qcom_smem_get(pid, smem_id, &size);
290 if (IS_ERR(in)) {
291 dev_err(smp2p->dev,
292 "Unable to acquire remote smp2p item\n");
293 goto out;
294 }
295
296 smp2p->in = in;
297 }
298
299 if (!smp2p->negotiation_done)
300 qcom_smp2p_negotiate(smp2p);
301
302 if (smp2p->negotiation_done) {
303 ack_restart = qcom_smp2p_check_ssr(smp2p);
304 qcom_smp2p_notify_in(smp2p);
305
306 if (ack_restart)
307 qcom_smp2p_do_ssr_ack(smp2p);
308 }
309
310 out:
311 return IRQ_HANDLED;
312 }
313
smp2p_mask_irq(struct irq_data * irqd)314 static void smp2p_mask_irq(struct irq_data *irqd)
315 {
316 struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
317 irq_hw_number_t irq = irqd_to_hwirq(irqd);
318
319 clear_bit(irq, entry->irq_enabled);
320 }
321
smp2p_unmask_irq(struct irq_data * irqd)322 static void smp2p_unmask_irq(struct irq_data *irqd)
323 {
324 struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
325 irq_hw_number_t irq = irqd_to_hwirq(irqd);
326
327 set_bit(irq, entry->irq_enabled);
328 }
329
smp2p_set_irq_type(struct irq_data * irqd,unsigned int type)330 static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
331 {
332 struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
333 irq_hw_number_t irq = irqd_to_hwirq(irqd);
334
335 if (!(type & IRQ_TYPE_EDGE_BOTH))
336 return -EINVAL;
337
338 if (type & IRQ_TYPE_EDGE_RISING)
339 set_bit(irq, entry->irq_rising);
340 else
341 clear_bit(irq, entry->irq_rising);
342
343 if (type & IRQ_TYPE_EDGE_FALLING)
344 set_bit(irq, entry->irq_falling);
345 else
346 clear_bit(irq, entry->irq_falling);
347
348 return 0;
349 }
350
351 static struct irq_chip smp2p_irq_chip = {
352 .name = "smp2p",
353 .irq_mask = smp2p_mask_irq,
354 .irq_unmask = smp2p_unmask_irq,
355 .irq_set_type = smp2p_set_irq_type,
356 };
357
smp2p_irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)358 static int smp2p_irq_map(struct irq_domain *d,
359 unsigned int irq,
360 irq_hw_number_t hw)
361 {
362 struct smp2p_entry *entry = d->host_data;
363
364 irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
365 irq_set_chip_data(irq, entry);
366 irq_set_nested_thread(irq, 1);
367 irq_set_noprobe(irq);
368
369 return 0;
370 }
371
372 static const struct irq_domain_ops smp2p_irq_ops = {
373 .map = smp2p_irq_map,
374 .xlate = irq_domain_xlate_twocell,
375 };
376
qcom_smp2p_inbound_entry(struct qcom_smp2p * smp2p,struct smp2p_entry * entry,struct device_node * node)377 static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
378 struct smp2p_entry *entry,
379 struct device_node *node)
380 {
381 entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
382 if (!entry->domain) {
383 dev_err(smp2p->dev, "failed to add irq_domain\n");
384 return -ENOMEM;
385 }
386
387 return 0;
388 }
389
smp2p_update_bits(void * data,u32 mask,u32 value)390 static int smp2p_update_bits(void *data, u32 mask, u32 value)
391 {
392 struct smp2p_entry *entry = data;
393 unsigned long flags;
394 u32 orig;
395 u32 val;
396
397 spin_lock_irqsave(&entry->lock, flags);
398 val = orig = readl(entry->value);
399 val &= ~mask;
400 val |= value;
401 writel(val, entry->value);
402 spin_unlock_irqrestore(&entry->lock, flags);
403
404 if (val != orig)
405 qcom_smp2p_kick(entry->smp2p);
406
407 return 0;
408 }
409
410 static const struct qcom_smem_state_ops smp2p_state_ops = {
411 .update_bits = smp2p_update_bits,
412 };
413
qcom_smp2p_outbound_entry(struct qcom_smp2p * smp2p,struct smp2p_entry * entry,struct device_node * node)414 static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
415 struct smp2p_entry *entry,
416 struct device_node *node)
417 {
418 struct smp2p_smem_item *out = smp2p->out;
419 char buf[SMP2P_MAX_ENTRY_NAME] = {};
420
421 /* Allocate an entry from the smem item */
422 strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
423 memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
424
425 /* Make the logical entry reference the physical value */
426 entry->value = &out->entries[out->valid_entries].value;
427
428 out->valid_entries++;
429
430 entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
431 if (IS_ERR(entry->state)) {
432 dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
433 return PTR_ERR(entry->state);
434 }
435
436 return 0;
437 }
438
qcom_smp2p_alloc_outbound_item(struct qcom_smp2p * smp2p)439 static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
440 {
441 struct smp2p_smem_item *out;
442 unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
443 unsigned pid = smp2p->remote_pid;
444 int ret;
445
446 ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
447 if (ret < 0 && ret != -EEXIST) {
448 if (ret != -EPROBE_DEFER)
449 dev_err(smp2p->dev,
450 "unable to allocate local smp2p item\n");
451 return ret;
452 }
453
454 out = qcom_smem_get(pid, smem_id, NULL);
455 if (IS_ERR(out)) {
456 dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
457 return PTR_ERR(out);
458 }
459
460 memset(out, 0, sizeof(*out));
461 out->magic = SMP2P_MAGIC;
462 out->local_pid = smp2p->local_pid;
463 out->remote_pid = smp2p->remote_pid;
464 out->total_entries = SMP2P_MAX_ENTRY;
465 out->valid_entries = 0;
466 out->features = SMP2P_ALL_FEATURES;
467
468 /*
469 * Make sure the rest of the header is written before we validate the
470 * item by writing a valid version number.
471 */
472 wmb();
473 out->version = 1;
474
475 qcom_smp2p_kick(smp2p);
476
477 smp2p->out = out;
478
479 return 0;
480 }
481
smp2p_parse_ipc(struct qcom_smp2p * smp2p)482 static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
483 {
484 struct device_node *syscon;
485 struct device *dev = smp2p->dev;
486 const char *key;
487 int ret;
488
489 syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
490 if (!syscon) {
491 dev_err(dev, "no qcom,ipc node\n");
492 return -ENODEV;
493 }
494
495 smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
496 if (IS_ERR(smp2p->ipc_regmap))
497 return PTR_ERR(smp2p->ipc_regmap);
498
499 key = "qcom,ipc";
500 ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
501 if (ret < 0) {
502 dev_err(dev, "no offset in %s\n", key);
503 return -EINVAL;
504 }
505
506 ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
507 if (ret < 0) {
508 dev_err(dev, "no bit in %s\n", key);
509 return -EINVAL;
510 }
511
512 return 0;
513 }
514
qcom_smp2p_probe(struct platform_device * pdev)515 static int qcom_smp2p_probe(struct platform_device *pdev)
516 {
517 struct smp2p_entry *entry;
518 struct device_node *node;
519 struct qcom_smp2p *smp2p;
520 const char *key;
521 int irq;
522 int ret;
523
524 smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
525 if (!smp2p)
526 return -ENOMEM;
527
528 smp2p->dev = &pdev->dev;
529 INIT_LIST_HEAD(&smp2p->inbound);
530 INIT_LIST_HEAD(&smp2p->outbound);
531
532 platform_set_drvdata(pdev, smp2p);
533
534 key = "qcom,smem";
535 ret = of_property_read_u32_array(pdev->dev.of_node, key,
536 smp2p->smem_items, 2);
537 if (ret)
538 return ret;
539
540 key = "qcom,local-pid";
541 ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
542 if (ret)
543 goto report_read_failure;
544
545 key = "qcom,remote-pid";
546 ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
547 if (ret)
548 goto report_read_failure;
549
550 irq = platform_get_irq(pdev, 0);
551 if (irq < 0)
552 return irq;
553
554 smp2p->mbox_client.dev = &pdev->dev;
555 smp2p->mbox_client.knows_txdone = true;
556 smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
557 if (IS_ERR(smp2p->mbox_chan)) {
558 if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
559 return PTR_ERR(smp2p->mbox_chan);
560
561 smp2p->mbox_chan = NULL;
562
563 ret = smp2p_parse_ipc(smp2p);
564 if (ret)
565 return ret;
566 }
567
568 ret = qcom_smp2p_alloc_outbound_item(smp2p);
569 if (ret < 0)
570 goto release_mbox;
571
572 for_each_available_child_of_node(pdev->dev.of_node, node) {
573 entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
574 if (!entry) {
575 ret = -ENOMEM;
576 of_node_put(node);
577 goto unwind_interfaces;
578 }
579
580 entry->smp2p = smp2p;
581 spin_lock_init(&entry->lock);
582
583 ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
584 if (ret < 0) {
585 of_node_put(node);
586 goto unwind_interfaces;
587 }
588
589 if (of_property_read_bool(node, "interrupt-controller")) {
590 ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
591 if (ret < 0) {
592 of_node_put(node);
593 goto unwind_interfaces;
594 }
595
596 list_add(&entry->node, &smp2p->inbound);
597 } else {
598 ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
599 if (ret < 0) {
600 of_node_put(node);
601 goto unwind_interfaces;
602 }
603
604 list_add(&entry->node, &smp2p->outbound);
605 }
606 }
607
608 /* Kick the outgoing edge after allocating entries */
609 qcom_smp2p_kick(smp2p);
610
611 ret = devm_request_threaded_irq(&pdev->dev, irq,
612 NULL, qcom_smp2p_intr,
613 IRQF_ONESHOT,
614 "smp2p", (void *)smp2p);
615 if (ret) {
616 dev_err(&pdev->dev, "failed to request interrupt\n");
617 goto unwind_interfaces;
618 }
619
620 /*
621 * Treat smp2p interrupt as wakeup source, but keep it disabled
622 * by default. User space can decide enabling it depending on its
623 * use cases. For example if remoteproc crashes and device wants
624 * to handle it immediatedly (e.g. to not miss phone calls) it can
625 * enable wakeup source from user space, while other devices which
626 * do not have proper autosleep feature may want to handle it with
627 * other wakeup events (e.g. Power button) instead waking up immediately.
628 */
629 device_set_wakeup_capable(&pdev->dev, true);
630
631 ret = dev_pm_set_wake_irq(&pdev->dev, irq);
632 if (ret)
633 goto set_wake_irq_fail;
634
635 return 0;
636
637 set_wake_irq_fail:
638 dev_pm_clear_wake_irq(&pdev->dev);
639
640 unwind_interfaces:
641 list_for_each_entry(entry, &smp2p->inbound, node)
642 irq_domain_remove(entry->domain);
643
644 list_for_each_entry(entry, &smp2p->outbound, node)
645 qcom_smem_state_unregister(entry->state);
646
647 smp2p->out->valid_entries = 0;
648
649 release_mbox:
650 mbox_free_channel(smp2p->mbox_chan);
651
652 return ret;
653
654 report_read_failure:
655 dev_err(&pdev->dev, "failed to read %s\n", key);
656 return -EINVAL;
657 }
658
qcom_smp2p_remove(struct platform_device * pdev)659 static int qcom_smp2p_remove(struct platform_device *pdev)
660 {
661 struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
662 struct smp2p_entry *entry;
663
664 dev_pm_clear_wake_irq(&pdev->dev);
665
666 list_for_each_entry(entry, &smp2p->inbound, node)
667 irq_domain_remove(entry->domain);
668
669 list_for_each_entry(entry, &smp2p->outbound, node)
670 qcom_smem_state_unregister(entry->state);
671
672 mbox_free_channel(smp2p->mbox_chan);
673
674 smp2p->out->valid_entries = 0;
675
676 return 0;
677 }
678
679 static const struct of_device_id qcom_smp2p_of_match[] = {
680 { .compatible = "qcom,smp2p" },
681 {}
682 };
683 MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
684
685 static struct platform_driver qcom_smp2p_driver = {
686 .probe = qcom_smp2p_probe,
687 .remove = qcom_smp2p_remove,
688 .driver = {
689 .name = "qcom_smp2p",
690 .of_match_table = qcom_smp2p_of_match,
691 },
692 };
693 module_platform_driver(qcom_smp2p_driver);
694
695 MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
696 MODULE_LICENSE("GPL v2");
697