blob: 9dae841b616746e825552c05ad496b86e5f05bf7 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
14 * Copyright (C) 2018 ARM Ltd.
15 */
16
17#include <linux/bitmap.h>
18#include <linux/export.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/ktime.h>
22#include <linux/mailbox_client.h>
23#include <linux/module.h>
24#include <linux/of_address.h>
25#include <linux/of_device.h>
26#include <linux/processor.h>
27#include <linux/semaphore.h>
28#include <linux/slab.h>
29
30#include "common.h"
31
32#define MSG_ID_MASK GENMASK(7, 0)
33#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
34#define MSG_TYPE_MASK GENMASK(9, 8)
35#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
36#define MSG_TYPE_COMMAND 0
37#define MSG_TYPE_DELAYED_RESP 2
38#define MSG_TYPE_NOTIFICATION 3
39#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
40#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
41#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
42#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
43#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
44
45enum scmi_error_codes {
46 SCMI_SUCCESS = 0, /* Success */
47 SCMI_ERR_SUPPORT = -1, /* Not supported */
48 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
49 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
50 SCMI_ERR_ENTRY = -4, /* Not found */
51 SCMI_ERR_RANGE = -5, /* Value out of range */
52 SCMI_ERR_BUSY = -6, /* Device busy */
53 SCMI_ERR_COMMS = -7, /* Communication Error */
54 SCMI_ERR_GENERIC = -8, /* Generic Error */
55 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
56 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
57};
58
59/* List of all SCMI devices active in system */
60static LIST_HEAD(scmi_list);
61/* Protection for the entire list */
62static DEFINE_MUTEX(scmi_list_mutex);
63
64/**
65 * struct scmi_xfers_info - Structure to manage transfer information
66 *
67 * @xfer_block: Preallocated Message array
68 * @xfer_alloc_table: Bitmap table for allocated messages.
69 * Index of this bitmap table is also used for message
70 * sequence identifier.
71 * @xfer_lock: Protection for message allocation
72 */
73struct scmi_xfers_info {
74 struct scmi_xfer *xfer_block;
75 unsigned long *xfer_alloc_table;
76 spinlock_t xfer_lock;
77};
78
79/**
80 * struct scmi_desc - Description of SoC integration
81 *
82 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
83 * @max_msg: Maximum number of messages that can be pending
84 * simultaneously in the system
85 * @max_msg_size: Maximum size of data per message that can be handled.
86 */
87struct scmi_desc {
88 int max_rx_timeout_ms;
89 int max_msg;
90 int max_msg_size;
91};
92
93/**
94 * struct scmi_chan_info - Structure representing a SCMI channel information
95 *
96 * @cl: Mailbox Client
97 * @chan: Transmit/Receive mailbox channel
98 * @payload: Transmit/Receive mailbox channel payload area
99 * @dev: Reference to device in the SCMI hierarchy corresponding to this
100 * channel
101 * @handle: Pointer to SCMI entity handle
102 */
103struct scmi_chan_info {
104 struct mbox_client cl;
105 struct mbox_chan *chan;
106 void __iomem *payload;
107 struct device *dev;
108 struct scmi_handle *handle;
109};
110
111/**
112 * struct scmi_info - Structure representing a SCMI instance
113 *
114 * @dev: Device pointer
115 * @desc: SoC description for this instance
116 * @handle: Instance of SCMI handle to send to clients
117 * @version: SCMI revision information containing protocol version,
118 * implementation version and (sub-)vendor identification.
119 * @tx_minfo: Universal Transmit Message management info
120 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
121 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
122 * @protocols_imp: List of protocols implemented, currently maximum of
123 * MAX_PROTOCOLS_IMP elements allocated by the base protocol
124 * @node: List head
125 * @users: Number of users of this instance
126 */
127struct scmi_info {
128 struct device *dev;
129 const struct scmi_desc *desc;
130 struct scmi_revision_info version;
131 struct scmi_handle handle;
132 struct scmi_xfers_info tx_minfo;
133 struct idr tx_idr;
134 struct idr rx_idr;
135 u8 *protocols_imp;
136 struct list_head node;
137 int users;
138};
139
140#define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
141#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
142
143/*
144 * SCMI specification requires all parameters, message headers, return
145 * arguments or any protocol data to be expressed in little endian
146 * format only.
147 */
148struct scmi_shared_mem {
149 __le32 reserved;
150 __le32 channel_status;
151#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
152#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
153 __le32 reserved1[2];
154 __le32 flags;
155#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
156 __le32 length;
157 __le32 msg_header;
158 u8 msg_payload[0];
159};
160
161static const int scmi_linux_errmap[] = {
162 /* better than switch case as long as return value is continuous */
163 0, /* SCMI_SUCCESS */
164 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
165 -EINVAL, /* SCMI_ERR_PARAM */
166 -EACCES, /* SCMI_ERR_ACCESS */
167 -ENOENT, /* SCMI_ERR_ENTRY */
168 -ERANGE, /* SCMI_ERR_RANGE */
169 -EBUSY, /* SCMI_ERR_BUSY */
170 -ECOMM, /* SCMI_ERR_COMMS */
171 -EIO, /* SCMI_ERR_GENERIC */
172 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
173 -EPROTO, /* SCMI_ERR_PROTOCOL */
174};
175
176static inline int scmi_to_linux_errno(int errno)
177{
178 int err_idx = -errno;
179
180 if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
181 return scmi_linux_errmap[err_idx];
182 return -EIO;
183}
184
185/**
186 * scmi_dump_header_dbg() - Helper to dump a message header.
187 *
188 * @dev: Device pointer corresponding to the SCMI entity
189 * @hdr: pointer to header.
190 */
191static inline void scmi_dump_header_dbg(struct device *dev,
192 struct scmi_msg_hdr *hdr)
193{
194 dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
195 hdr->id, hdr->seq, hdr->protocol_id);
196}
197
198static void scmi_fetch_response(struct scmi_xfer *xfer,
199 struct scmi_shared_mem __iomem *mem)
200{
201 xfer->hdr.status = ioread32(mem->msg_payload);
202 /* Skip the length of header and status in payload area i.e 8 bytes */
203 xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
204
205 /* Take a copy to the rx buffer.. */
206 memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
207}
208
209/**
210 * pack_scmi_header() - packs and returns 32-bit header
211 *
212 * @hdr: pointer to header containing all the information on message id,
213 * protocol id and sequence id.
214 *
215 * Return: 32-bit packed message header to be sent to the platform.
216 */
217static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
218{
219 return FIELD_PREP(MSG_ID_MASK, hdr->id) |
220 FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
221 FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
222}
223
224/**
225 * unpack_scmi_header() - unpacks and records message and protocol id
226 *
227 * @msg_hdr: 32-bit packed message header sent from the platform
228 * @hdr: pointer to header to fetch message and protocol id.
229 */
230static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
231{
232 hdr->id = MSG_XTRACT_ID(msg_hdr);
233 hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
234}
235
236/**
237 * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
238 *
239 * @cl: client pointer
240 * @m: mailbox message
241 *
242 * This function prepares the shared memory which contains the header and the
243 * payload.
244 */
245static void scmi_tx_prepare(struct mbox_client *cl, void *m)
246{
247 struct scmi_xfer *t = m;
248 struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
249 struct scmi_shared_mem __iomem *mem = cinfo->payload;
250
251 /*
252 * Ideally channel must be free by now unless OS timeout last
253 * request and platform continued to process the same, wait
254 * until it releases the shared memory, otherwise we may endup
255 * overwriting its response with new message payload or vice-versa
256 */
257 spin_until_cond(ioread32(&mem->channel_status) &
258 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
259 /* Mark channel busy + clear error */
260 iowrite32(0x0, &mem->channel_status);
261 iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
262 &mem->flags);
263 iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
264 iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
265 if (t->tx.buf)
266 memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
267}
268
269/**
270 * scmi_xfer_get() - Allocate one message
271 *
272 * @handle: Pointer to SCMI entity handle
273 * @minfo: Pointer to Tx/Rx Message management info based on channel type
274 *
275 * Helper function which is used by various message functions that are
276 * exposed to clients of this driver for allocating a message traffic event.
277 *
278 * This function can sleep depending on pending requests already in the system
279 * for the SCMI entity. Further, this also holds a spinlock to maintain
280 * integrity of internal data structures.
281 *
282 * Return: 0 if all went fine, else corresponding error.
283 */
284static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
285 struct scmi_xfers_info *minfo)
286{
287 u16 xfer_id;
288 struct scmi_xfer *xfer;
289 unsigned long flags, bit_pos;
290 struct scmi_info *info = handle_to_scmi_info(handle);
291
292 /* Keep the locked section as small as possible */
293 spin_lock_irqsave(&minfo->xfer_lock, flags);
294 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
295 info->desc->max_msg);
296 if (bit_pos == info->desc->max_msg) {
297 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
298 return ERR_PTR(-ENOMEM);
299 }
300 set_bit(bit_pos, minfo->xfer_alloc_table);
301 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
302
303 xfer_id = bit_pos;
304
305 xfer = &minfo->xfer_block[xfer_id];
306 xfer->hdr.seq = xfer_id;
307 reinit_completion(&xfer->done);
308
309 return xfer;
310}
311
312/**
313 * __scmi_xfer_put() - Release a message
314 *
315 * @minfo: Pointer to Tx/Rx Message management info based on channel type
316 * @xfer: message that was reserved by scmi_xfer_get
317 *
318 * This holds a spinlock to maintain integrity of internal data structures.
319 */
320static void
321__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
322{
323 unsigned long flags;
324
325 /*
326 * Keep the locked section as small as possible
327 * NOTE: we might escape with smp_mb and no lock here..
328 * but just be conservative and symmetric.
329 */
330 spin_lock_irqsave(&minfo->xfer_lock, flags);
331 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
332 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
333}
334
335/**
336 * scmi_rx_callback() - mailbox client callback for receive messages
337 *
338 * @cl: client pointer
339 * @m: mailbox message
340 *
341 * Processes one received message to appropriate transfer information and
342 * signals completion of the transfer.
343 *
344 * NOTE: This function will be invoked in IRQ context, hence should be
345 * as optimal as possible.
346 */
347static void scmi_rx_callback(struct mbox_client *cl, void *m)
348{
349 u8 msg_type;
350 u32 msg_hdr;
351 u16 xfer_id;
352 struct scmi_xfer *xfer;
353 struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
354 struct device *dev = cinfo->dev;
355 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
356 struct scmi_xfers_info *minfo = &info->tx_minfo;
357 struct scmi_shared_mem __iomem *mem = cinfo->payload;
358
359 msg_hdr = ioread32(&mem->msg_header);
360 msg_type = MSG_XTRACT_TYPE(msg_hdr);
361 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
362
363 if (msg_type == MSG_TYPE_NOTIFICATION)
364 return; /* Notifications not yet supported */
365
366 /* Are we even expecting this? */
367 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
368 dev_err(dev, "message for %d is not expected!\n", xfer_id);
369 return;
370 }
371
372 xfer = &minfo->xfer_block[xfer_id];
373
374 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
375 if (msg_type == MSG_TYPE_DELAYED_RESP)
376 xfer->rx.len = info->desc->max_msg_size;
377
378 scmi_dump_header_dbg(dev, &xfer->hdr);
379
380 scmi_fetch_response(xfer, mem);
381
382 if (msg_type == MSG_TYPE_DELAYED_RESP)
383 complete(xfer->async_done);
384 else
385 complete(&xfer->done);
386}
387
388/**
389 * scmi_xfer_put() - Release a transmit message
390 *
391 * @handle: Pointer to SCMI entity handle
392 * @xfer: message that was reserved by scmi_xfer_get
393 */
394void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
395{
396 struct scmi_info *info = handle_to_scmi_info(handle);
397
398 __scmi_xfer_put(&info->tx_minfo, xfer);
399}
400
401static bool
402scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
403{
404 struct scmi_shared_mem __iomem *mem = cinfo->payload;
405 u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
406
407 if (xfer->hdr.seq != xfer_id)
408 return false;
409
410 return ioread32(&mem->channel_status) &
411 (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
412 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
413}
414
415#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
416
417static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
418 struct scmi_xfer *xfer, ktime_t stop)
419{
420 ktime_t __cur = ktime_get();
421
422 return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
423}
424
425/**
426 * scmi_do_xfer() - Do one transfer
427 *
428 * @handle: Pointer to SCMI entity handle
429 * @xfer: Transfer to initiate and wait for response
430 *
431 * Return: -ETIMEDOUT in case of no response, if transmit error,
432 * return corresponding error, else if all goes well,
433 * return 0.
434 */
435int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
436{
437 int ret;
438 int timeout;
439 struct scmi_info *info = handle_to_scmi_info(handle);
440 struct device *dev = info->dev;
441 struct scmi_chan_info *cinfo;
442
443 cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
444 if (unlikely(!cinfo))
445 return -EINVAL;
446
447 ret = mbox_send_message(cinfo->chan, xfer);
448 if (ret < 0) {
449 dev_dbg(dev, "mbox send fail %d\n", ret);
450 return ret;
451 }
452
453 /* mbox_send_message returns non-negative value on success, so reset */
454 ret = 0;
455
456 if (xfer->hdr.poll_completion) {
457 ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
458
459 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
460
461 if (ktime_before(ktime_get(), stop))
462 scmi_fetch_response(xfer, cinfo->payload);
463 else
464 ret = -ETIMEDOUT;
465 } else {
466 /* And we wait for the response. */
467 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
468 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
469 dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
470 (void *)_RET_IP_);
471 ret = -ETIMEDOUT;
472 }
473 }
474
475 if (!ret && xfer->hdr.status)
476 ret = scmi_to_linux_errno(xfer->hdr.status);
477
478 /*
479 * NOTE: we might prefer not to need the mailbox ticker to manage the
480 * transfer queueing since the protocol layer queues things by itself.
481 * Unfortunately, we have to kick the mailbox framework after we have
482 * received our message.
483 */
484 mbox_client_txdone(cinfo->chan, ret);
485
486 return ret;
487}
488
489void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
490 struct scmi_xfer *xfer)
491{
492 struct scmi_info *info = handle_to_scmi_info(handle);
493
494 xfer->rx.len = info->desc->max_msg_size;
495}
496
497#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
498
499/**
500 * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
501 * response is received
502 *
503 * @handle: Pointer to SCMI entity handle
504 * @xfer: Transfer to initiate and wait for response
505 *
506 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
507 * return corresponding error, else if all goes well, return 0.
508 */
509int scmi_do_xfer_with_response(const struct scmi_handle *handle,
510 struct scmi_xfer *xfer)
511{
512 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
513 DECLARE_COMPLETION_ONSTACK(async_response);
514
515 xfer->async_done = &async_response;
516
517 ret = scmi_do_xfer(handle, xfer);
518 if (!ret) {
519 if (!wait_for_completion_timeout(xfer->async_done, timeout))
520 ret = -ETIMEDOUT;
521 else if (xfer->hdr.status)
522 ret = scmi_to_linux_errno(xfer->hdr.status);
523 }
524
525 xfer->async_done = NULL;
526 return ret;
527}
528
529/**
530 * scmi_xfer_get_init() - Allocate and initialise one message for transmit
531 *
532 * @handle: Pointer to SCMI entity handle
533 * @msg_id: Message identifier
534 * @prot_id: Protocol identifier for the message
535 * @tx_size: transmit message size
536 * @rx_size: receive message size
537 * @p: pointer to the allocated and initialised message
538 *
539 * This function allocates the message using @scmi_xfer_get and
540 * initialise the header.
541 *
542 * Return: 0 if all went fine with @p pointing to message, else
543 * corresponding error.
544 */
545int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
546 size_t tx_size, size_t rx_size, struct scmi_xfer **p)
547{
548 int ret;
549 struct scmi_xfer *xfer;
550 struct scmi_info *info = handle_to_scmi_info(handle);
551 struct scmi_xfers_info *minfo = &info->tx_minfo;
552 struct device *dev = info->dev;
553
554 /* Ensure we have sane transfer sizes */
555 if (rx_size > info->desc->max_msg_size ||
556 tx_size > info->desc->max_msg_size)
557 return -ERANGE;
558
559 xfer = scmi_xfer_get(handle, minfo);
560 if (IS_ERR(xfer)) {
561 ret = PTR_ERR(xfer);
562 dev_err(dev, "failed to get free message slot(%d)\n", ret);
563 return ret;
564 }
565
566 xfer->tx.len = tx_size;
567 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
568 xfer->hdr.id = msg_id;
569 xfer->hdr.protocol_id = prot_id;
570 xfer->hdr.poll_completion = false;
571
572 *p = xfer;
573
574 return 0;
575}
576
577/**
578 * scmi_version_get() - command to get the revision of the SCMI entity
579 *
580 * @handle: Pointer to SCMI entity handle
581 * @protocol: Protocol identifier for the message
582 * @version: Holds returned version of protocol.
583 *
584 * Updates the SCMI information in the internal data structure.
585 *
586 * Return: 0 if all went fine, else return appropriate error.
587 */
588int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
589 u32 *version)
590{
591 int ret;
592 __le32 *rev_info;
593 struct scmi_xfer *t;
594
595 ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
596 sizeof(*version), &t);
597 if (ret)
598 return ret;
599
600 ret = scmi_do_xfer(handle, t);
601 if (!ret) {
602 rev_info = t->rx.buf;
603 *version = le32_to_cpu(*rev_info);
604 }
605
606 scmi_xfer_put(handle, t);
607 return ret;
608}
609
610void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
611 u8 *prot_imp)
612{
613 struct scmi_info *info = handle_to_scmi_info(handle);
614
615 info->protocols_imp = prot_imp;
616}
617
618static bool
619scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
620{
621 int i;
622 struct scmi_info *info = handle_to_scmi_info(handle);
623
624 if (!info->protocols_imp)
625 return false;
626
627 for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
628 if (info->protocols_imp[i] == prot_id)
629 return true;
630 return false;
631}
632
633/**
634 * scmi_handle_get() - Get the SCMI handle for a device
635 *
636 * @dev: pointer to device for which we want SCMI handle
637 *
638 * NOTE: The function does not track individual clients of the framework
639 * and is expected to be maintained by caller of SCMI protocol library.
640 * scmi_handle_put must be balanced with successful scmi_handle_get
641 *
642 * Return: pointer to handle if successful, NULL on error
643 */
644struct scmi_handle *scmi_handle_get(struct device *dev)
645{
646 struct list_head *p;
647 struct scmi_info *info;
648 struct scmi_handle *handle = NULL;
649
650 mutex_lock(&scmi_list_mutex);
651 list_for_each(p, &scmi_list) {
652 info = list_entry(p, struct scmi_info, node);
653 if (dev->parent == info->dev) {
654 handle = &info->handle;
655 info->users++;
656 break;
657 }
658 }
659 mutex_unlock(&scmi_list_mutex);
660
661 return handle;
662}
663
664/**
665 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
666 *
667 * @handle: handle acquired by scmi_handle_get
668 *
669 * NOTE: The function does not track individual clients of the framework
670 * and is expected to be maintained by caller of SCMI protocol library.
671 * scmi_handle_put must be balanced with successful scmi_handle_get
672 *
673 * Return: 0 is successfully released
674 * if null was passed, it returns -EINVAL;
675 */
676int scmi_handle_put(const struct scmi_handle *handle)
677{
678 struct scmi_info *info;
679
680 if (!handle)
681 return -EINVAL;
682
683 info = handle_to_scmi_info(handle);
684 mutex_lock(&scmi_list_mutex);
685 if (!WARN_ON(!info->users))
686 info->users--;
687 mutex_unlock(&scmi_list_mutex);
688
689 return 0;
690}
691
692static int scmi_xfer_info_init(struct scmi_info *sinfo)
693{
694 int i;
695 struct scmi_xfer *xfer;
696 struct device *dev = sinfo->dev;
697 const struct scmi_desc *desc = sinfo->desc;
698 struct scmi_xfers_info *info = &sinfo->tx_minfo;
699
700 /* Pre-allocated messages, no more than what hdr.seq can support */
701 if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
702 dev_err(dev,
703 "Invalid maximum messages %d, not in range [1 - %lu]\n",
704 desc->max_msg, MSG_TOKEN_MAX);
705 return -EINVAL;
706 }
707
708 info->xfer_block = devm_kcalloc(dev, desc->max_msg,
709 sizeof(*info->xfer_block), GFP_KERNEL);
710 if (!info->xfer_block)
711 return -ENOMEM;
712
713 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
714 sizeof(long), GFP_KERNEL);
715 if (!info->xfer_alloc_table)
716 return -ENOMEM;
717
718 /* Pre-initialize the buffer pointer to pre-allocated buffers */
719 for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
720 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
721 GFP_KERNEL);
722 if (!xfer->rx.buf)
723 return -ENOMEM;
724
725 xfer->tx.buf = xfer->rx.buf;
726 init_completion(&xfer->done);
727 }
728
729 spin_lock_init(&info->xfer_lock);
730
731 return 0;
732}
733
734static int scmi_mailbox_check(struct device_node *np, int idx)
735{
736 return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells",
737 idx, NULL);
738}
739
740static int scmi_mailbox_chan_validate(struct device *cdev)
741{
742 int num_mb, num_sh, ret = 0;
743 struct device_node *np = cdev->of_node;
744
745 num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
746 num_sh = of_count_phandle_with_args(np, "shmem", NULL);
747 /* Bail out if mboxes and shmem descriptors are inconsistent */
748 if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) {
749 dev_warn(cdev, "Invalid channel descriptor for '%s'\n",
750 of_node_full_name(np));
751 return -EINVAL;
752 }
753
754 if (num_sh > 1) {
755 struct device_node *np_tx, *np_rx;
756
757 np_tx = of_parse_phandle(np, "shmem", 0);
758 np_rx = of_parse_phandle(np, "shmem", 1);
759 /* SCMI Tx and Rx shared mem areas have to be distinct */
760 if (!np_tx || !np_rx || np_tx == np_rx) {
761 dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
762 of_node_full_name(np));
763 ret = -EINVAL;
764 }
765
766 of_node_put(np_tx);
767 of_node_put(np_rx);
768 }
769
770 return ret;
771}
772
773static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
774 int prot_id, bool tx)
775{
776 int ret, idx;
777 struct resource res;
778 resource_size_t size;
779 struct device_node *shmem, *np = dev->of_node;
780 struct scmi_chan_info *cinfo;
781 struct mbox_client *cl;
782 struct idr *idr;
783 const char *desc = tx ? "Tx" : "Rx";
784
785 /* Transmit channel is first entry i.e. index 0 */
786 idx = tx ? 0 : 1;
787 idr = tx ? &info->tx_idr : &info->rx_idr;
788
789 if (scmi_mailbox_check(np, idx)) {
790 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
791 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
792 return -EINVAL;
793 goto idr_alloc;
794 }
795
796 ret = scmi_mailbox_chan_validate(dev);
797 if (ret)
798 return ret;
799
800 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
801 if (!cinfo)
802 return -ENOMEM;
803
804 cinfo->dev = dev;
805
806 cl = &cinfo->cl;
807 cl->dev = dev;
808 cl->rx_callback = scmi_rx_callback;
809 cl->tx_prepare = tx ? scmi_tx_prepare : NULL;
810 cl->tx_block = false;
811 cl->knows_txdone = tx;
812
813 shmem = of_parse_phandle(np, "shmem", idx);
814 ret = of_address_to_resource(shmem, 0, &res);
815 of_node_put(shmem);
816 if (ret) {
817 dev_err(dev, "failed to get SCMI %s payload memory\n", desc);
818 return ret;
819 }
820
821 size = resource_size(&res);
822 cinfo->payload = devm_ioremap(info->dev, res.start, size);
823 if (!cinfo->payload) {
824 dev_err(dev, "failed to ioremap SCMI %s payload\n", desc);
825 return -EADDRNOTAVAIL;
826 }
827
828 cinfo->chan = mbox_request_channel(cl, idx);
829 if (IS_ERR(cinfo->chan)) {
830 ret = PTR_ERR(cinfo->chan);
831 if (ret != -EPROBE_DEFER)
832 dev_err(dev, "failed to request SCMI %s mailbox\n",
833 desc);
834 return ret;
835 }
836
837idr_alloc:
838 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
839 if (ret != prot_id) {
840 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
841 return ret;
842 }
843
844 cinfo->handle = &info->handle;
845 return 0;
846}
847
848static inline int
849scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
850{
851 int ret = scmi_mbox_chan_setup(info, dev, prot_id, true);
852
853 if (!ret) /* Rx is optional, hence no error check */
854 scmi_mbox_chan_setup(info, dev, prot_id, false);
855
856 return ret;
857}
858
859static inline void
860scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
861 int prot_id)
862{
863 struct scmi_device *sdev;
864
865 sdev = scmi_device_create(np, info->dev, prot_id);
866 if (!sdev) {
867 dev_err(info->dev, "failed to create %d protocol device\n",
868 prot_id);
869 return;
870 }
871
872 if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) {
873 dev_err(&sdev->dev, "failed to setup transport\n");
874 scmi_device_destroy(sdev);
875 return;
876 }
877
878 /* setup handle now as the transport is ready */
879 scmi_set_handle(sdev);
880}
881
882static int scmi_probe(struct platform_device *pdev)
883{
884 int ret;
885 struct scmi_handle *handle;
886 const struct scmi_desc *desc;
887 struct scmi_info *info;
888 struct device *dev = &pdev->dev;
889 struct device_node *child, *np = dev->of_node;
890
891 /* Only mailbox method supported, check for the presence of one */
892 if (scmi_mailbox_check(np, 0)) {
893 dev_err(dev, "no mailbox found in %pOF\n", np);
894 return -EINVAL;
895 }
896
897 desc = of_device_get_match_data(dev);
898 if (!desc)
899 return -EINVAL;
900
901 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
902 if (!info)
903 return -ENOMEM;
904
905 info->dev = dev;
906 info->desc = desc;
907 INIT_LIST_HEAD(&info->node);
908
909 ret = scmi_xfer_info_init(info);
910 if (ret)
911 return ret;
912
913 platform_set_drvdata(pdev, info);
914 idr_init(&info->tx_idr);
915 idr_init(&info->rx_idr);
916
917 handle = &info->handle;
918 handle->dev = info->dev;
919 handle->version = &info->version;
920
921 ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
922 if (ret)
923 return ret;
924
925 ret = scmi_base_protocol_init(handle);
926 if (ret) {
927 dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
928 return ret;
929 }
930
931 mutex_lock(&scmi_list_mutex);
932 list_add_tail(&info->node, &scmi_list);
933 mutex_unlock(&scmi_list_mutex);
934
935 for_each_available_child_of_node(np, child) {
936 u32 prot_id;
937
938 if (of_property_read_u32(child, "reg", &prot_id))
939 continue;
940
941 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
942 dev_err(dev, "Out of range protocol %d\n", prot_id);
943
944 if (!scmi_is_protocol_implemented(handle, prot_id)) {
945 dev_err(dev, "SCMI protocol %d not implemented\n",
946 prot_id);
947 continue;
948 }
949
950 scmi_create_protocol_device(child, info, prot_id);
951 }
952
953 return 0;
954}
955
956static int scmi_mbox_free_channel(int id, void *p, void *data)
957{
958 struct scmi_chan_info *cinfo = p;
959 struct idr *idr = data;
960
961 if (!IS_ERR_OR_NULL(cinfo->chan)) {
962 mbox_free_channel(cinfo->chan);
963 cinfo->chan = NULL;
964 }
965
966 idr_remove(idr, id);
967
968 return 0;
969}
970
971static int scmi_remove(struct platform_device *pdev)
972{
973 int ret = 0;
974 struct scmi_info *info = platform_get_drvdata(pdev);
975 struct idr *idr = &info->tx_idr;
976
977 mutex_lock(&scmi_list_mutex);
978 if (info->users)
979 ret = -EBUSY;
980 else
981 list_del(&info->node);
982 mutex_unlock(&scmi_list_mutex);
983
984 if (ret)
985 return ret;
986
987 /* Safe to free channels since no more users */
988 ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
989 idr_destroy(&info->tx_idr);
990
991 idr = &info->rx_idr;
992 ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
993 idr_destroy(&info->rx_idr);
994
995 return ret;
996}
997
998static const struct scmi_desc scmi_generic_desc = {
999 .max_rx_timeout_ms = 30, /* We may increase this if required */
1000 .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
1001 .max_msg_size = 128,
1002};
1003
1004/* Each compatible listed below must have descriptor associated with it */
1005static const struct of_device_id scmi_of_match[] = {
1006 { .compatible = "arm,scmi", .data = &scmi_generic_desc },
1007 { /* Sentinel */ },
1008};
1009
1010MODULE_DEVICE_TABLE(of, scmi_of_match);
1011
1012static struct platform_driver scmi_driver = {
1013 .driver = {
1014 .name = "arm-scmi",
1015 .of_match_table = scmi_of_match,
1016 },
1017 .probe = scmi_probe,
1018 .remove = scmi_remove,
1019};
1020
1021module_platform_driver(scmi_driver);
1022
1023MODULE_ALIAS("platform:arm-scmi");
1024MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
1025MODULE_DESCRIPTION("ARM SCMI protocol driver");
1026MODULE_LICENSE("GPL v2");