blob: 17f8d23d073c4cac4b6a5ab0bc558e16034fa82f [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 */
5
6#include <linux/clk/tegra.h>
7#include <linux/genalloc.h>
8#include <linux/mailbox_client.h>
9#include <linux/of.h>
10#include <linux/of_address.h>
11#include <linux/of_device.h>
12#include <linux/platform_device.h>
13#include <linux/pm.h>
14#include <linux/semaphore.h>
15#include <linux/sched/clock.h>
16
17#include <soc/tegra/bpmp.h>
18#include <soc/tegra/bpmp-abi.h>
19#include <soc/tegra/ivc.h>
20
21#include "bpmp-private.h"
22
23#define MSG_ACK BIT(0)
24#define MSG_RING BIT(1)
25#define TAG_SZ 32
26
27static inline const struct tegra_bpmp_ops *
28channel_to_ops(struct tegra_bpmp_channel *channel)
29{
30 struct tegra_bpmp *bpmp = channel->bpmp;
31
32 return bpmp->soc->ops;
33}
34
35struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
36{
37 struct platform_device *pdev;
38 struct tegra_bpmp *bpmp;
39 struct device_node *np;
40
41 np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
42 if (!np)
43 return ERR_PTR(-ENOENT);
44
45 pdev = of_find_device_by_node(np);
46 if (!pdev) {
47 bpmp = ERR_PTR(-ENODEV);
48 goto put;
49 }
50
51 bpmp = platform_get_drvdata(pdev);
52 if (!bpmp) {
53 bpmp = ERR_PTR(-EPROBE_DEFER);
54 put_device(&pdev->dev);
55 goto put;
56 }
57
58put:
59 of_node_put(np);
60 return bpmp;
61}
62EXPORT_SYMBOL_GPL(tegra_bpmp_get);
63
64void tegra_bpmp_put(struct tegra_bpmp *bpmp)
65{
66 if (bpmp)
67 put_device(bpmp->dev);
68}
69EXPORT_SYMBOL_GPL(tegra_bpmp_put);
70
71static int
72tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
73{
74 struct tegra_bpmp *bpmp = channel->bpmp;
75 unsigned int count;
76 int index;
77
78 count = bpmp->soc->channels.thread.count;
79
80 index = channel - channel->bpmp->threaded_channels;
81 if (index < 0 || index >= count)
82 return -EINVAL;
83
84 return index;
85}
86
87static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
88{
89 return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
90 (msg->rx.size <= MSG_DATA_MIN_SZ) &&
91 (msg->tx.size == 0 || msg->tx.data) &&
92 (msg->rx.size == 0 || msg->rx.data);
93}
94
95static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
96{
97 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
98
99 return ops->is_response_ready(channel);
100}
101
102static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
103{
104 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
105
106 return ops->is_request_ready(channel);
107}
108
109static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
110{
111 unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
112 ktime_t end;
113
114 end = ktime_add_us(ktime_get(), timeout);
115
116 do {
117 if (tegra_bpmp_is_response_ready(channel))
118 return 0;
119 } while (ktime_before(ktime_get(), end));
120
121 return -ETIMEDOUT;
122}
123
124static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
125{
126 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
127
128 return ops->ack_response(channel);
129}
130
131static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
132{
133 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
134
135 return ops->ack_request(channel);
136}
137
138static bool
139tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
140{
141 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
142
143 return ops->is_request_channel_free(channel);
144}
145
146static bool
147tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
148{
149 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
150
151 return ops->is_response_channel_free(channel);
152}
153
154static int
155tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
156{
157 unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
158 ktime_t start, now;
159
160 start = ns_to_ktime(local_clock());
161
162 do {
163 if (tegra_bpmp_is_request_channel_free(channel))
164 return 0;
165
166 now = ns_to_ktime(local_clock());
167 } while (ktime_us_delta(now, start) < timeout);
168
169 return -ETIMEDOUT;
170}
171
172static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
173{
174 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
175
176 return ops->post_request(channel);
177}
178
179static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
180{
181 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
182
183 return ops->post_response(channel);
184}
185
186static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
187{
188 return bpmp->soc->ops->ring_doorbell(bpmp);
189}
190
191static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
192 void *data, size_t size, int *ret)
193{
194 int err;
195
196 if (data && size > 0)
197 memcpy(data, channel->ib->data, size);
198
199 err = tegra_bpmp_ack_response(channel);
200 if (err < 0)
201 return err;
202
203 *ret = channel->ib->code;
204
205 return 0;
206}
207
208static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
209 void *data, size_t size, int *ret)
210{
211 struct tegra_bpmp *bpmp = channel->bpmp;
212 unsigned long flags;
213 ssize_t err;
214 int index;
215
216 index = tegra_bpmp_channel_get_thread_index(channel);
217 if (index < 0) {
218 err = index;
219 goto unlock;
220 }
221
222 spin_lock_irqsave(&bpmp->lock, flags);
223 err = __tegra_bpmp_channel_read(channel, data, size, ret);
224 clear_bit(index, bpmp->threaded.allocated);
225 spin_unlock_irqrestore(&bpmp->lock, flags);
226
227unlock:
228 up(&bpmp->threaded.lock);
229
230 return err;
231}
232
233static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
234 unsigned int mrq, unsigned long flags,
235 const void *data, size_t size)
236{
237 channel->ob->code = mrq;
238 channel->ob->flags = flags;
239
240 if (data && size > 0)
241 memcpy(channel->ob->data, data, size);
242
243 return tegra_bpmp_post_request(channel);
244}
245
246static struct tegra_bpmp_channel *
247tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
248 const void *data, size_t size)
249{
250 unsigned long timeout = bpmp->soc->channels.thread.timeout;
251 unsigned int count = bpmp->soc->channels.thread.count;
252 struct tegra_bpmp_channel *channel;
253 unsigned long flags;
254 unsigned int index;
255 int err;
256
257 err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
258 if (err < 0)
259 return ERR_PTR(err);
260
261 spin_lock_irqsave(&bpmp->lock, flags);
262
263 index = find_first_zero_bit(bpmp->threaded.allocated, count);
264 if (index == count) {
265 err = -EBUSY;
266 goto unlock;
267 }
268
269 channel = &bpmp->threaded_channels[index];
270
271 if (!tegra_bpmp_is_request_channel_free(channel)) {
272 err = -EBUSY;
273 goto unlock;
274 }
275
276 set_bit(index, bpmp->threaded.allocated);
277
278 err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
279 data, size);
280 if (err < 0)
281 goto clear_allocated;
282
283 set_bit(index, bpmp->threaded.busy);
284
285 spin_unlock_irqrestore(&bpmp->lock, flags);
286 return channel;
287
288clear_allocated:
289 clear_bit(index, bpmp->threaded.allocated);
290unlock:
291 spin_unlock_irqrestore(&bpmp->lock, flags);
292 up(&bpmp->threaded.lock);
293
294 return ERR_PTR(err);
295}
296
297static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
298 unsigned int mrq, unsigned long flags,
299 const void *data, size_t size)
300{
301 int err;
302
303 err = tegra_bpmp_wait_request_channel_free(channel);
304 if (err < 0)
305 return err;
306
307 return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
308}
309
310int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
311 struct tegra_bpmp_message *msg)
312{
313 struct tegra_bpmp_channel *channel;
314 int err;
315
316 if (WARN_ON(!irqs_disabled()))
317 return -EPERM;
318
319 if (!tegra_bpmp_message_valid(msg))
320 return -EINVAL;
321
322 channel = bpmp->tx_channel;
323
324 spin_lock(&bpmp->atomic_tx_lock);
325
326 err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
327 msg->tx.data, msg->tx.size);
328 if (err < 0) {
329 spin_unlock(&bpmp->atomic_tx_lock);
330 return err;
331 }
332
333 spin_unlock(&bpmp->atomic_tx_lock);
334
335 err = tegra_bpmp_ring_doorbell(bpmp);
336 if (err < 0)
337 return err;
338
339 err = tegra_bpmp_wait_response(channel);
340 if (err < 0)
341 return err;
342
343 return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
344 &msg->rx.ret);
345}
346EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
347
348int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
349 struct tegra_bpmp_message *msg)
350{
351 struct tegra_bpmp_channel *channel;
352 unsigned long timeout;
353 int err;
354
355 if (WARN_ON(irqs_disabled()))
356 return -EPERM;
357
358 if (!tegra_bpmp_message_valid(msg))
359 return -EINVAL;
360
361 channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
362 msg->tx.size);
363 if (IS_ERR(channel))
364 return PTR_ERR(channel);
365
366 err = tegra_bpmp_ring_doorbell(bpmp);
367 if (err < 0)
368 return err;
369
370 timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
371
372 err = wait_for_completion_timeout(&channel->completion, timeout);
373 if (err == 0)
374 return -ETIMEDOUT;
375
376 return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
377 &msg->rx.ret);
378}
379EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
380
381static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
382 unsigned int mrq)
383{
384 struct tegra_bpmp_mrq *entry;
385
386 list_for_each_entry(entry, &bpmp->mrqs, list)
387 if (entry->mrq == mrq)
388 return entry;
389
390 return NULL;
391}
392
393void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
394 const void *data, size_t size)
395{
396 unsigned long flags = channel->ib->flags;
397 struct tegra_bpmp *bpmp = channel->bpmp;
398 int err;
399
400 if (WARN_ON(size > MSG_DATA_MIN_SZ))
401 return;
402
403 err = tegra_bpmp_ack_request(channel);
404 if (WARN_ON(err < 0))
405 return;
406
407 if ((flags & MSG_ACK) == 0)
408 return;
409
410 if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
411 return;
412
413 channel->ob->code = code;
414
415 if (data && size > 0)
416 memcpy(channel->ob->data, data, size);
417
418 err = tegra_bpmp_post_response(channel);
419 if (WARN_ON(err < 0))
420 return;
421
422 if (flags & MSG_RING) {
423 err = tegra_bpmp_ring_doorbell(bpmp);
424 if (WARN_ON(err < 0))
425 return;
426 }
427}
428EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
429
430static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
431 unsigned int mrq,
432 struct tegra_bpmp_channel *channel)
433{
434 struct tegra_bpmp_mrq *entry;
435 u32 zero = 0;
436
437 spin_lock(&bpmp->lock);
438
439 entry = tegra_bpmp_find_mrq(bpmp, mrq);
440 if (!entry) {
441 spin_unlock(&bpmp->lock);
442 tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
443 return;
444 }
445
446 entry->handler(mrq, channel, entry->data);
447
448 spin_unlock(&bpmp->lock);
449}
450
451int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
452 tegra_bpmp_mrq_handler_t handler, void *data)
453{
454 struct tegra_bpmp_mrq *entry;
455 unsigned long flags;
456
457 if (!handler)
458 return -EINVAL;
459
460 entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
461 if (!entry)
462 return -ENOMEM;
463
464 spin_lock_irqsave(&bpmp->lock, flags);
465
466 entry->mrq = mrq;
467 entry->handler = handler;
468 entry->data = data;
469 list_add(&entry->list, &bpmp->mrqs);
470
471 spin_unlock_irqrestore(&bpmp->lock, flags);
472
473 return 0;
474}
475EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
476
477void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
478{
479 struct tegra_bpmp_mrq *entry;
480 unsigned long flags;
481
482 spin_lock_irqsave(&bpmp->lock, flags);
483
484 entry = tegra_bpmp_find_mrq(bpmp, mrq);
485 if (!entry)
486 goto unlock;
487
488 list_del(&entry->list);
489 devm_kfree(bpmp->dev, entry);
490
491unlock:
492 spin_unlock_irqrestore(&bpmp->lock, flags);
493}
494EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
495
496bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
497{
498 struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) };
499 struct mrq_query_abi_response resp;
500 struct tegra_bpmp_message msg = {
501 .mrq = MRQ_QUERY_ABI,
502 .tx = {
503 .data = &req,
504 .size = sizeof(req),
505 },
506 .rx = {
507 .data = &resp,
508 .size = sizeof(resp),
509 },
510 };
511 int ret;
512
513 ret = tegra_bpmp_transfer(bpmp, &msg);
514 if (ret || msg.rx.ret)
515 return false;
516
517 return resp.status == 0;
518}
519EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported);
520
521static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
522 struct tegra_bpmp_channel *channel,
523 void *data)
524{
525 struct mrq_ping_request *request;
526 struct mrq_ping_response response;
527
528 request = (struct mrq_ping_request *)channel->ib->data;
529
530 memset(&response, 0, sizeof(response));
531 response.reply = request->challenge << 1;
532
533 tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
534}
535
536static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
537{
538 struct mrq_ping_response response;
539 struct mrq_ping_request request;
540 struct tegra_bpmp_message msg;
541 unsigned long flags;
542 ktime_t start, end;
543 int err;
544
545 memset(&request, 0, sizeof(request));
546 request.challenge = 1;
547
548 memset(&response, 0, sizeof(response));
549
550 memset(&msg, 0, sizeof(msg));
551 msg.mrq = MRQ_PING;
552 msg.tx.data = &request;
553 msg.tx.size = sizeof(request);
554 msg.rx.data = &response;
555 msg.rx.size = sizeof(response);
556
557 local_irq_save(flags);
558 start = ktime_get();
559 err = tegra_bpmp_transfer_atomic(bpmp, &msg);
560 end = ktime_get();
561 local_irq_restore(flags);
562
563 if (!err)
564 dev_dbg(bpmp->dev,
565 "ping ok: challenge: %u, response: %u, time: %lld\n",
566 request.challenge, response.reply,
567 ktime_to_us(ktime_sub(end, start)));
568
569 return err;
570}
571
572/* deprecated version of tag query */
573static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp *bpmp, char *tag,
574 size_t size)
575{
576 struct mrq_query_tag_request request;
577 struct tegra_bpmp_message msg;
578 unsigned long flags;
579 dma_addr_t phys;
580 void *virt;
581 int err;
582
583 if (size != TAG_SZ)
584 return -EINVAL;
585
586 virt = dma_alloc_coherent(bpmp->dev, TAG_SZ, &phys,
587 GFP_KERNEL | GFP_DMA32);
588 if (!virt)
589 return -ENOMEM;
590
591 memset(&request, 0, sizeof(request));
592 request.addr = phys;
593
594 memset(&msg, 0, sizeof(msg));
595 msg.mrq = MRQ_QUERY_TAG;
596 msg.tx.data = &request;
597 msg.tx.size = sizeof(request);
598
599 local_irq_save(flags);
600 err = tegra_bpmp_transfer_atomic(bpmp, &msg);
601 local_irq_restore(flags);
602
603 if (err == 0)
604 memcpy(tag, virt, TAG_SZ);
605
606 dma_free_coherent(bpmp->dev, TAG_SZ, virt, phys);
607
608 return err;
609}
610
611static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
612 size_t size)
613{
614 if (tegra_bpmp_mrq_is_supported(bpmp, MRQ_QUERY_FW_TAG)) {
615 struct mrq_query_fw_tag_response resp;
616 struct tegra_bpmp_message msg = {
617 .mrq = MRQ_QUERY_FW_TAG,
618 .rx = {
619 .data = &resp,
620 .size = sizeof(resp),
621 },
622 };
623 int err;
624
625 if (size != sizeof(resp.tag))
626 return -EINVAL;
627
628 err = tegra_bpmp_transfer(bpmp, &msg);
629
630 if (err)
631 return err;
632 if (msg.rx.ret < 0)
633 return -EINVAL;
634
635 memcpy(tag, resp.tag, sizeof(resp.tag));
636 return 0;
637 }
638
639 return tegra_bpmp_get_firmware_tag_old(bpmp, tag, size);
640}
641
642static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
643{
644 unsigned long flags = channel->ob->flags;
645
646 if ((flags & MSG_RING) == 0)
647 return;
648
649 complete(&channel->completion);
650}
651
652void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
653{
654 struct tegra_bpmp_channel *channel;
655 unsigned int i, count;
656 unsigned long *busy;
657
658 channel = bpmp->rx_channel;
659 count = bpmp->soc->channels.thread.count;
660 busy = bpmp->threaded.busy;
661
662 if (tegra_bpmp_is_request_ready(channel))
663 tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
664
665 spin_lock(&bpmp->lock);
666
667 for_each_set_bit(i, busy, count) {
668 struct tegra_bpmp_channel *channel;
669
670 channel = &bpmp->threaded_channels[i];
671
672 if (tegra_bpmp_is_response_ready(channel)) {
673 tegra_bpmp_channel_signal(channel);
674 clear_bit(i, busy);
675 }
676 }
677
678 spin_unlock(&bpmp->lock);
679}
680
681static int tegra_bpmp_probe(struct platform_device *pdev)
682{
683 struct tegra_bpmp *bpmp;
684 char tag[TAG_SZ];
685 size_t size;
686 int err;
687
688 bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
689 if (!bpmp)
690 return -ENOMEM;
691
692 bpmp->soc = of_device_get_match_data(&pdev->dev);
693 bpmp->dev = &pdev->dev;
694
695 INIT_LIST_HEAD(&bpmp->mrqs);
696 spin_lock_init(&bpmp->lock);
697
698 bpmp->threaded.count = bpmp->soc->channels.thread.count;
699 sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
700
701 size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
702
703 bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
704 if (!bpmp->threaded.allocated)
705 return -ENOMEM;
706
707 bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
708 if (!bpmp->threaded.busy)
709 return -ENOMEM;
710
711 spin_lock_init(&bpmp->atomic_tx_lock);
712 bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
713 GFP_KERNEL);
714 if (!bpmp->tx_channel)
715 return -ENOMEM;
716
717 bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
718 GFP_KERNEL);
719 if (!bpmp->rx_channel)
720 return -ENOMEM;
721
722 bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
723 sizeof(*bpmp->threaded_channels),
724 GFP_KERNEL);
725 if (!bpmp->threaded_channels)
726 return -ENOMEM;
727
728 err = bpmp->soc->ops->init(bpmp);
729 if (err < 0)
730 return err;
731
732 err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
733 tegra_bpmp_mrq_handle_ping, bpmp);
734 if (err < 0)
735 goto deinit;
736
737 err = tegra_bpmp_ping(bpmp);
738 if (err < 0) {
739 dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
740 goto free_mrq;
741 }
742
743 err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag));
744 if (err < 0) {
745 dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
746 goto free_mrq;
747 }
748
749 dev_info(&pdev->dev, "firmware: %.*s\n", (int)sizeof(tag), tag);
750
751 platform_set_drvdata(pdev, bpmp);
752
753 err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
754 if (err < 0)
755 goto free_mrq;
756
757 if (of_find_property(pdev->dev.of_node, "#clock-cells", NULL)) {
758 err = tegra_bpmp_init_clocks(bpmp);
759 if (err < 0)
760 goto free_mrq;
761 }
762
763 if (of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
764 err = tegra_bpmp_init_resets(bpmp);
765 if (err < 0)
766 goto free_mrq;
767 }
768
769 if (of_find_property(pdev->dev.of_node, "#power-domain-cells", NULL)) {
770 err = tegra_bpmp_init_powergates(bpmp);
771 if (err < 0)
772 goto free_mrq;
773 }
774
775 err = tegra_bpmp_init_debugfs(bpmp);
776 if (err < 0)
777 dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
778
779 return 0;
780
781free_mrq:
782 tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
783deinit:
784 if (bpmp->soc->ops->deinit)
785 bpmp->soc->ops->deinit(bpmp);
786
787 return err;
788}
789
790static int __maybe_unused tegra_bpmp_resume(struct device *dev)
791{
792 struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
793
794 if (bpmp->soc->ops->resume)
795 return bpmp->soc->ops->resume(bpmp);
796 else
797 return 0;
798}
799
800static const struct dev_pm_ops tegra_bpmp_pm_ops = {
801 .resume_early = tegra_bpmp_resume,
802};
803
804#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
805 IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
806 IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
807static const struct tegra_bpmp_soc tegra186_soc = {
808 .channels = {
809 .cpu_tx = {
810 .offset = 3,
811 .timeout = 60 * USEC_PER_SEC,
812 },
813 .thread = {
814 .offset = 0,
815 .count = 3,
816 .timeout = 600 * USEC_PER_SEC,
817 },
818 .cpu_rx = {
819 .offset = 13,
820 .timeout = 0,
821 },
822 },
823 .ops = &tegra186_bpmp_ops,
824 .num_resets = 193,
825};
826#endif
827
828#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
829static const struct tegra_bpmp_soc tegra210_soc = {
830 .channels = {
831 .cpu_tx = {
832 .offset = 0,
833 .count = 1,
834 .timeout = 60 * USEC_PER_SEC,
835 },
836 .thread = {
837 .offset = 4,
838 .count = 1,
839 .timeout = 600 * USEC_PER_SEC,
840 },
841 .cpu_rx = {
842 .offset = 8,
843 .count = 1,
844 .timeout = 0,
845 },
846 },
847 .ops = &tegra210_bpmp_ops,
848};
849#endif
850
851static const struct of_device_id tegra_bpmp_match[] = {
852#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
853 IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
854 { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
855#endif
856#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
857 { .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
858#endif
859 { }
860};
861
862static struct platform_driver tegra_bpmp_driver = {
863 .driver = {
864 .name = "tegra-bpmp",
865 .of_match_table = tegra_bpmp_match,
866 .pm = &tegra_bpmp_pm_ops,
867 },
868 .probe = tegra_bpmp_probe,
869};
870
871static int __init tegra_bpmp_init(void)
872{
873 return platform_driver_register(&tegra_bpmp_driver);
874}
875core_initcall(tegra_bpmp_init);