blob: 391c6d4855ff491eff8104d01e92b4993b6ff2bb [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/sched/signal.h>
18#include <linux/wait.h>
19#include <linux/delay.h>
20#include <linux/slab.h>
21#include <linux/pm_runtime.h>
22
23#include <linux/mei.h>
24
25#include "mei_dev.h"
26#include "hbm.h"
27#include "client.h"
28
29/**
30 * mei_me_cl_init - initialize me client
31 *
32 * @me_cl: me client
33 */
34void mei_me_cl_init(struct mei_me_client *me_cl)
35{
36 INIT_LIST_HEAD(&me_cl->list);
37 kref_init(&me_cl->refcnt);
38}
39
40/**
41 * mei_me_cl_get - increases me client refcount
42 *
43 * @me_cl: me client
44 *
45 * Locking: called under "dev->device_lock" lock
46 *
47 * Return: me client or NULL
48 */
49struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
50{
51 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
52 return me_cl;
53
54 return NULL;
55}
56
57/**
58 * mei_me_cl_release - free me client
59 *
60 * Locking: called under "dev->device_lock" lock
61 *
62 * @ref: me_client refcount
63 */
64static void mei_me_cl_release(struct kref *ref)
65{
66 struct mei_me_client *me_cl =
67 container_of(ref, struct mei_me_client, refcnt);
68
69 kfree(me_cl);
70}
71
72/**
73 * mei_me_cl_put - decrease me client refcount and free client if necessary
74 *
75 * Locking: called under "dev->device_lock" lock
76 *
77 * @me_cl: me client
78 */
79void mei_me_cl_put(struct mei_me_client *me_cl)
80{
81 if (me_cl)
82 kref_put(&me_cl->refcnt, mei_me_cl_release);
83}
84
85/**
86 * __mei_me_cl_del - delete me client from the list and decrease
87 * reference counter
88 *
89 * @dev: mei device
90 * @me_cl: me client
91 *
92 * Locking: dev->me_clients_rwsem
93 */
94static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
95{
96 if (!me_cl)
97 return;
98
99 list_del_init(&me_cl->list);
100 mei_me_cl_put(me_cl);
101}
102
103/**
104 * mei_me_cl_del - delete me client from the list and decrease
105 * reference counter
106 *
107 * @dev: mei device
108 * @me_cl: me client
109 */
110void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
111{
112 down_write(&dev->me_clients_rwsem);
113 __mei_me_cl_del(dev, me_cl);
114 up_write(&dev->me_clients_rwsem);
115}
116
117/**
118 * mei_me_cl_add - add me client to the list
119 *
120 * @dev: mei device
121 * @me_cl: me client
122 */
123void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
124{
125 down_write(&dev->me_clients_rwsem);
126 list_add(&me_cl->list, &dev->me_clients);
127 up_write(&dev->me_clients_rwsem);
128}
129
130/**
131 * __mei_me_cl_by_uuid - locate me client by uuid
132 * increases ref count
133 *
134 * @dev: mei device
135 * @uuid: me client uuid
136 *
137 * Return: me client or NULL if not found
138 *
139 * Locking: dev->me_clients_rwsem
140 */
141static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
142 const uuid_le *uuid)
143{
144 struct mei_me_client *me_cl;
145 const uuid_le *pn;
146
147 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
148
149 list_for_each_entry(me_cl, &dev->me_clients, list) {
150 pn = &me_cl->props.protocol_name;
151 if (uuid_le_cmp(*uuid, *pn) == 0)
152 return mei_me_cl_get(me_cl);
153 }
154
155 return NULL;
156}
157
158/**
159 * mei_me_cl_by_uuid - locate me client by uuid
160 * increases ref count
161 *
162 * @dev: mei device
163 * @uuid: me client uuid
164 *
165 * Return: me client or NULL if not found
166 *
167 * Locking: dev->me_clients_rwsem
168 */
169struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
170 const uuid_le *uuid)
171{
172 struct mei_me_client *me_cl;
173
174 down_read(&dev->me_clients_rwsem);
175 me_cl = __mei_me_cl_by_uuid(dev, uuid);
176 up_read(&dev->me_clients_rwsem);
177
178 return me_cl;
179}
180
181/**
182 * mei_me_cl_by_id - locate me client by client id
183 * increases ref count
184 *
185 * @dev: the device structure
186 * @client_id: me client id
187 *
188 * Return: me client or NULL if not found
189 *
190 * Locking: dev->me_clients_rwsem
191 */
192struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
193{
194
195 struct mei_me_client *__me_cl, *me_cl = NULL;
196
197 down_read(&dev->me_clients_rwsem);
198 list_for_each_entry(__me_cl, &dev->me_clients, list) {
199 if (__me_cl->client_id == client_id) {
200 me_cl = mei_me_cl_get(__me_cl);
201 break;
202 }
203 }
204 up_read(&dev->me_clients_rwsem);
205
206 return me_cl;
207}
208
209/**
210 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
211 * increases ref count
212 *
213 * @dev: the device structure
214 * @uuid: me client uuid
215 * @client_id: me client id
216 *
217 * Return: me client or null if not found
218 *
219 * Locking: dev->me_clients_rwsem
220 */
221static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
222 const uuid_le *uuid, u8 client_id)
223{
224 struct mei_me_client *me_cl;
225 const uuid_le *pn;
226
227 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
228
229 list_for_each_entry(me_cl, &dev->me_clients, list) {
230 pn = &me_cl->props.protocol_name;
231 if (uuid_le_cmp(*uuid, *pn) == 0 &&
232 me_cl->client_id == client_id)
233 return mei_me_cl_get(me_cl);
234 }
235
236 return NULL;
237}
238
239
240/**
241 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
242 * increases ref count
243 *
244 * @dev: the device structure
245 * @uuid: me client uuid
246 * @client_id: me client id
247 *
248 * Return: me client or null if not found
249 */
250struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
251 const uuid_le *uuid, u8 client_id)
252{
253 struct mei_me_client *me_cl;
254
255 down_read(&dev->me_clients_rwsem);
256 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
257 up_read(&dev->me_clients_rwsem);
258
259 return me_cl;
260}
261
262/**
263 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
264 *
265 * @dev: the device structure
266 * @uuid: me client uuid
267 *
268 * Locking: called under "dev->device_lock" lock
269 */
270void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
271{
272 struct mei_me_client *me_cl;
273
274 dev_dbg(dev->dev, "remove %pUl\n", uuid);
275
276 down_write(&dev->me_clients_rwsem);
277 me_cl = __mei_me_cl_by_uuid(dev, uuid);
278 __mei_me_cl_del(dev, me_cl);
279 mei_me_cl_put(me_cl);
280 up_write(&dev->me_clients_rwsem);
281}
282
283/**
284 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
285 *
286 * @dev: the device structure
287 * @uuid: me client uuid
288 * @id: me client id
289 *
290 * Locking: called under "dev->device_lock" lock
291 */
292void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
293{
294 struct mei_me_client *me_cl;
295
296 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
297
298 down_write(&dev->me_clients_rwsem);
299 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
300 __mei_me_cl_del(dev, me_cl);
301 mei_me_cl_put(me_cl);
302 up_write(&dev->me_clients_rwsem);
303}
304
305/**
306 * mei_me_cl_rm_all - remove all me clients
307 *
308 * @dev: the device structure
309 *
310 * Locking: called under "dev->device_lock" lock
311 */
312void mei_me_cl_rm_all(struct mei_device *dev)
313{
314 struct mei_me_client *me_cl, *next;
315
316 down_write(&dev->me_clients_rwsem);
317 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
318 __mei_me_cl_del(dev, me_cl);
319 up_write(&dev->me_clients_rwsem);
320}
321
322/**
323 * mei_cl_cmp_id - tells if the clients are the same
324 *
325 * @cl1: host client 1
326 * @cl2: host client 2
327 *
328 * Return: true - if the clients has same host and me ids
329 * false - otherwise
330 */
331static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
332 const struct mei_cl *cl2)
333{
334 return cl1 && cl2 &&
335 (cl1->host_client_id == cl2->host_client_id) &&
336 (mei_cl_me_id(cl1) == mei_cl_me_id(cl2));
337}
338
339/**
340 * mei_io_cb_free - free mei_cb_private related memory
341 *
342 * @cb: mei callback struct
343 */
344void mei_io_cb_free(struct mei_cl_cb *cb)
345{
346 if (cb == NULL)
347 return;
348
349 list_del(&cb->list);
350 kfree(cb->buf.data);
351 kfree(cb);
352}
353
354/**
355 * mei_io_cb_init - allocate and initialize io callback
356 *
357 * @cl: mei client
358 * @type: operation type
359 * @fp: pointer to file structure
360 *
361 * Return: mei_cl_cb pointer or NULL;
362 */
363static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
364 enum mei_cb_file_ops type,
365 const struct file *fp)
366{
367 struct mei_cl_cb *cb;
368
369 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
370 if (!cb)
371 return NULL;
372
373 INIT_LIST_HEAD(&cb->list);
374 cb->fp = fp;
375 cb->cl = cl;
376 cb->buf_idx = 0;
377 cb->fop_type = type;
378 return cb;
379}
380
381/**
382 * __mei_io_list_flush_cl - removes and frees cbs belonging to cl.
383 *
384 * @head: an instance of our list structure
385 * @cl: host client, can be NULL for flushing the whole list
386 * @free: whether to free the cbs
387 */
388static void __mei_io_list_flush_cl(struct list_head *head,
389 const struct mei_cl *cl, bool free)
390{
391 struct mei_cl_cb *cb, *next;
392
393 /* enable removing everything if no cl is specified */
394 list_for_each_entry_safe(cb, next, head, list) {
395 if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
396 list_del_init(&cb->list);
397 if (free)
398 mei_io_cb_free(cb);
399 }
400 }
401}
402
403/**
404 * mei_io_list_flush_cl - removes list entry belonging to cl.
405 *
406 * @head: An instance of our list structure
407 * @cl: host client
408 */
409static inline void mei_io_list_flush_cl(struct list_head *head,
410 const struct mei_cl *cl)
411{
412 __mei_io_list_flush_cl(head, cl, false);
413}
414
415/**
416 * mei_io_list_free_cl - removes cb belonging to cl and free them
417 *
418 * @head: An instance of our list structure
419 * @cl: host client
420 */
421static inline void mei_io_list_free_cl(struct list_head *head,
422 const struct mei_cl *cl)
423{
424 __mei_io_list_flush_cl(head, cl, true);
425}
426
427/**
428 * mei_io_list_free_fp - free cb from a list that matches file pointer
429 *
430 * @head: io list
431 * @fp: file pointer (matching cb file object), may be NULL
432 */
433static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
434{
435 struct mei_cl_cb *cb, *next;
436
437 list_for_each_entry_safe(cb, next, head, list)
438 if (!fp || fp == cb->fp)
439 mei_io_cb_free(cb);
440}
441
442/**
443 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
444 *
445 * @cl: host client
446 * @length: size of the buffer
447 * @fop_type: operation type
448 * @fp: associated file pointer (might be NULL)
449 *
450 * Return: cb on success and NULL on failure
451 */
452struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
453 enum mei_cb_file_ops fop_type,
454 const struct file *fp)
455{
456 struct mei_cl_cb *cb;
457
458 cb = mei_io_cb_init(cl, fop_type, fp);
459 if (!cb)
460 return NULL;
461
462 if (length == 0)
463 return cb;
464
465 cb->buf.data = kmalloc(length, GFP_KERNEL);
466 if (!cb->buf.data) {
467 mei_io_cb_free(cb);
468 return NULL;
469 }
470 cb->buf.size = length;
471
472 return cb;
473}
474
475/**
476 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
477 * and enqueuing of the control commands cb
478 *
479 * @cl: host client
480 * @length: size of the buffer
481 * @fop_type: operation type
482 * @fp: associated file pointer (might be NULL)
483 *
484 * Return: cb on success and NULL on failure
485 * Locking: called under "dev->device_lock" lock
486 */
487struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
488 enum mei_cb_file_ops fop_type,
489 const struct file *fp)
490{
491 struct mei_cl_cb *cb;
492
493 /* for RX always allocate at least client's mtu */
494 if (length)
495 length = max_t(size_t, length, mei_cl_mtu(cl));
496
497 cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
498 if (!cb)
499 return NULL;
500
501 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
502 return cb;
503}
504
505/**
506 * mei_cl_read_cb - find this cl's callback in the read list
507 * for a specific file
508 *
509 * @cl: host client
510 * @fp: file pointer (matching cb file object), may be NULL
511 *
512 * Return: cb on success, NULL if cb is not found
513 */
514struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
515{
516 struct mei_cl_cb *cb;
517
518 list_for_each_entry(cb, &cl->rd_completed, list)
519 if (!fp || fp == cb->fp)
520 return cb;
521
522 return NULL;
523}
524
525/**
526 * mei_cl_flush_queues - flushes queue lists belonging to cl.
527 *
528 * @cl: host client
529 * @fp: file pointer (matching cb file object), may be NULL
530 *
531 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
532 */
533int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
534{
535 struct mei_device *dev;
536
537 if (WARN_ON(!cl || !cl->dev))
538 return -EINVAL;
539
540 dev = cl->dev;
541
542 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
543 mei_io_list_free_cl(&cl->dev->write_list, cl);
544 mei_io_list_free_cl(&cl->dev->write_waiting_list, cl);
545 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
546 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
547 mei_io_list_free_fp(&cl->rd_pending, fp);
548 mei_io_list_free_fp(&cl->rd_completed, fp);
549
550 return 0;
551}
552
553/**
554 * mei_cl_init - initializes cl.
555 *
556 * @cl: host client to be initialized
557 * @dev: mei device
558 */
559static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
560{
561 memset(cl, 0, sizeof(struct mei_cl));
562 init_waitqueue_head(&cl->wait);
563 init_waitqueue_head(&cl->rx_wait);
564 init_waitqueue_head(&cl->tx_wait);
565 init_waitqueue_head(&cl->ev_wait);
566 INIT_LIST_HEAD(&cl->rd_completed);
567 INIT_LIST_HEAD(&cl->rd_pending);
568 INIT_LIST_HEAD(&cl->link);
569 cl->writing_state = MEI_IDLE;
570 cl->state = MEI_FILE_UNINITIALIZED;
571 cl->dev = dev;
572}
573
574/**
575 * mei_cl_allocate - allocates cl structure and sets it up.
576 *
577 * @dev: mei device
578 * Return: The allocated file or NULL on failure
579 */
580struct mei_cl *mei_cl_allocate(struct mei_device *dev)
581{
582 struct mei_cl *cl;
583
584 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
585 if (!cl)
586 return NULL;
587
588 mei_cl_init(cl, dev);
589
590 return cl;
591}
592
593/**
594 * mei_cl_link - allocate host id in the host map
595 *
596 * @cl: host client
597 *
598 * Return: 0 on success
599 * -EINVAL on incorrect values
600 * -EMFILE if open count exceeded.
601 */
602int mei_cl_link(struct mei_cl *cl)
603{
604 struct mei_device *dev;
605 int id;
606
607 if (WARN_ON(!cl || !cl->dev))
608 return -EINVAL;
609
610 dev = cl->dev;
611
612 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
613 if (id >= MEI_CLIENTS_MAX) {
614 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
615 return -EMFILE;
616 }
617
618 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
619 dev_err(dev->dev, "open_handle_count exceeded %d",
620 MEI_MAX_OPEN_HANDLE_COUNT);
621 return -EMFILE;
622 }
623
624 dev->open_handle_count++;
625
626 cl->host_client_id = id;
627 list_add_tail(&cl->link, &dev->file_list);
628
629 set_bit(id, dev->host_clients_map);
630
631 cl->state = MEI_FILE_INITIALIZING;
632
633 cl_dbg(dev, cl, "link cl\n");
634 return 0;
635}
636
637/**
638 * mei_cl_unlink - remove host client from the list
639 *
640 * @cl: host client
641 *
642 * Return: always 0
643 */
644int mei_cl_unlink(struct mei_cl *cl)
645{
646 struct mei_device *dev;
647
648 /* don't shout on error exit path */
649 if (!cl)
650 return 0;
651
652 if (WARN_ON(!cl->dev))
653 return 0;
654
655 dev = cl->dev;
656
657 cl_dbg(dev, cl, "unlink client");
658
659 if (dev->open_handle_count > 0)
660 dev->open_handle_count--;
661
662 /* never clear the 0 bit */
663 if (cl->host_client_id)
664 clear_bit(cl->host_client_id, dev->host_clients_map);
665
666 list_del_init(&cl->link);
667
668 cl->state = MEI_FILE_UNINITIALIZED;
669 cl->writing_state = MEI_IDLE;
670
671 WARN_ON(!list_empty(&cl->rd_completed) ||
672 !list_empty(&cl->rd_pending) ||
673 !list_empty(&cl->link));
674
675 return 0;
676}
677
678void mei_host_client_init(struct mei_device *dev)
679{
680 dev->dev_state = MEI_DEV_ENABLED;
681 dev->reset_count = 0;
682
683 schedule_work(&dev->bus_rescan_work);
684
685 pm_runtime_mark_last_busy(dev->dev);
686 dev_dbg(dev->dev, "rpm: autosuspend\n");
687 pm_request_autosuspend(dev->dev);
688}
689
690/**
691 * mei_hbuf_acquire - try to acquire host buffer
692 *
693 * @dev: the device structure
694 * Return: true if host buffer was acquired
695 */
696bool mei_hbuf_acquire(struct mei_device *dev)
697{
698 if (mei_pg_state(dev) == MEI_PG_ON ||
699 mei_pg_in_transition(dev)) {
700 dev_dbg(dev->dev, "device is in pg\n");
701 return false;
702 }
703
704 if (!dev->hbuf_is_ready) {
705 dev_dbg(dev->dev, "hbuf is not ready\n");
706 return false;
707 }
708
709 dev->hbuf_is_ready = false;
710
711 return true;
712}
713
714/**
715 * mei_cl_wake_all - wake up readers, writers and event waiters so
716 * they can be interrupted
717 *
718 * @cl: host client
719 */
720static void mei_cl_wake_all(struct mei_cl *cl)
721{
722 struct mei_device *dev = cl->dev;
723
724 /* synchronized under device mutex */
725 if (waitqueue_active(&cl->rx_wait)) {
726 cl_dbg(dev, cl, "Waking up reading client!\n");
727 wake_up_interruptible(&cl->rx_wait);
728 }
729 /* synchronized under device mutex */
730 if (waitqueue_active(&cl->tx_wait)) {
731 cl_dbg(dev, cl, "Waking up writing client!\n");
732 wake_up_interruptible(&cl->tx_wait);
733 }
734 /* synchronized under device mutex */
735 if (waitqueue_active(&cl->ev_wait)) {
736 cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
737 wake_up_interruptible(&cl->ev_wait);
738 }
739 /* synchronized under device mutex */
740 if (waitqueue_active(&cl->wait)) {
741 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
742 wake_up(&cl->wait);
743 }
744}
745
746/**
747 * mei_cl_set_disconnected - set disconnected state and clear
748 * associated states and resources
749 *
750 * @cl: host client
751 */
752static void mei_cl_set_disconnected(struct mei_cl *cl)
753{
754 struct mei_device *dev = cl->dev;
755
756 if (cl->state == MEI_FILE_DISCONNECTED ||
757 cl->state <= MEI_FILE_INITIALIZING)
758 return;
759
760 cl->state = MEI_FILE_DISCONNECTED;
761 mei_io_list_free_cl(&dev->write_list, cl);
762 mei_io_list_free_cl(&dev->write_waiting_list, cl);
763 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
764 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
765 mei_cl_wake_all(cl);
766 cl->rx_flow_ctrl_creds = 0;
767 cl->tx_flow_ctrl_creds = 0;
768 cl->timer_count = 0;
769
770 mei_cl_bus_module_put(cl);
771
772 if (!cl->me_cl)
773 return;
774
775 if (!WARN_ON(cl->me_cl->connect_count == 0))
776 cl->me_cl->connect_count--;
777
778 if (cl->me_cl->connect_count == 0)
779 cl->me_cl->tx_flow_ctrl_creds = 0;
780
781 mei_me_cl_put(cl->me_cl);
782 cl->me_cl = NULL;
783}
784
785static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
786{
787 if (!mei_me_cl_get(me_cl))
788 return -ENOENT;
789
790 /* only one connection is allowed for fixed address clients */
791 if (me_cl->props.fixed_address) {
792 if (me_cl->connect_count) {
793 mei_me_cl_put(me_cl);
794 return -EBUSY;
795 }
796 }
797
798 cl->me_cl = me_cl;
799 cl->state = MEI_FILE_CONNECTING;
800 cl->me_cl->connect_count++;
801
802 return 0;
803}
804
805/*
806 * mei_cl_send_disconnect - send disconnect request
807 *
808 * @cl: host client
809 * @cb: callback block
810 *
811 * Return: 0, OK; otherwise, error.
812 */
813static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
814{
815 struct mei_device *dev;
816 int ret;
817
818 dev = cl->dev;
819
820 ret = mei_hbm_cl_disconnect_req(dev, cl);
821 cl->status = ret;
822 if (ret) {
823 cl->state = MEI_FILE_DISCONNECT_REPLY;
824 return ret;
825 }
826
827 list_move_tail(&cb->list, &dev->ctrl_rd_list);
828 cl->timer_count = MEI_CONNECT_TIMEOUT;
829 mei_schedule_stall_timer(dev);
830
831 return 0;
832}
833
834/**
835 * mei_cl_irq_disconnect - processes close related operation from
836 * interrupt thread context - send disconnect request
837 *
838 * @cl: client
839 * @cb: callback block.
840 * @cmpl_list: complete list.
841 *
842 * Return: 0, OK; otherwise, error.
843 */
844int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
845 struct list_head *cmpl_list)
846{
847 struct mei_device *dev = cl->dev;
848 u32 msg_slots;
849 int slots;
850 int ret;
851
852 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
853 slots = mei_hbuf_empty_slots(dev);
854
855 if (slots < msg_slots)
856 return -EMSGSIZE;
857
858 ret = mei_cl_send_disconnect(cl, cb);
859 if (ret)
860 list_move_tail(&cb->list, cmpl_list);
861
862 return ret;
863}
864
865/**
866 * __mei_cl_disconnect - disconnect host client from the me one
867 * internal function runtime pm has to be already acquired
868 *
869 * @cl: host client
870 *
871 * Return: 0 on success, <0 on failure.
872 */
873static int __mei_cl_disconnect(struct mei_cl *cl)
874{
875 struct mei_device *dev;
876 struct mei_cl_cb *cb;
877 int rets;
878
879 dev = cl->dev;
880
881 cl->state = MEI_FILE_DISCONNECTING;
882
883 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
884 if (!cb) {
885 rets = -ENOMEM;
886 goto out;
887 }
888
889 if (mei_hbuf_acquire(dev)) {
890 rets = mei_cl_send_disconnect(cl, cb);
891 if (rets) {
892 cl_err(dev, cl, "failed to disconnect.\n");
893 goto out;
894 }
895 }
896
897 mutex_unlock(&dev->device_lock);
898 wait_event_timeout(cl->wait,
899 cl->state == MEI_FILE_DISCONNECT_REPLY ||
900 cl->state == MEI_FILE_DISCONNECTED,
901 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
902 mutex_lock(&dev->device_lock);
903
904 rets = cl->status;
905 if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
906 cl->state != MEI_FILE_DISCONNECTED) {
907 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
908 rets = -ETIME;
909 }
910
911out:
912 /* we disconnect also on error */
913 mei_cl_set_disconnected(cl);
914 if (!rets)
915 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
916
917 mei_io_cb_free(cb);
918 return rets;
919}
920
921/**
922 * mei_cl_disconnect - disconnect host client from the me one
923 *
924 * @cl: host client
925 *
926 * Locking: called under "dev->device_lock" lock
927 *
928 * Return: 0 on success, <0 on failure.
929 */
930int mei_cl_disconnect(struct mei_cl *cl)
931{
932 struct mei_device *dev;
933 int rets;
934
935 if (WARN_ON(!cl || !cl->dev))
936 return -ENODEV;
937
938 dev = cl->dev;
939
940 cl_dbg(dev, cl, "disconnecting");
941
942 if (!mei_cl_is_connected(cl))
943 return 0;
944
945 if (mei_cl_is_fixed_address(cl)) {
946 mei_cl_set_disconnected(cl);
947 return 0;
948 }
949
950 rets = pm_runtime_get(dev->dev);
951 if (rets < 0 && rets != -EINPROGRESS) {
952 pm_runtime_put_noidle(dev->dev);
953 cl_err(dev, cl, "rpm: get failed %d\n", rets);
954 return rets;
955 }
956
957 rets = __mei_cl_disconnect(cl);
958
959 cl_dbg(dev, cl, "rpm: autosuspend\n");
960 pm_runtime_mark_last_busy(dev->dev);
961 pm_runtime_put_autosuspend(dev->dev);
962
963 return rets;
964}
965
966
967/**
968 * mei_cl_is_other_connecting - checks if other
969 * client with the same me client id is connecting
970 *
971 * @cl: private data of the file object
972 *
973 * Return: true if other client is connected, false - otherwise.
974 */
975static bool mei_cl_is_other_connecting(struct mei_cl *cl)
976{
977 struct mei_device *dev;
978 struct mei_cl_cb *cb;
979
980 dev = cl->dev;
981
982 list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
983 if (cb->fop_type == MEI_FOP_CONNECT &&
984 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
985 return true;
986 }
987
988 return false;
989}
990
991/**
992 * mei_cl_send_connect - send connect request
993 *
994 * @cl: host client
995 * @cb: callback block
996 *
997 * Return: 0, OK; otherwise, error.
998 */
999static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1000{
1001 struct mei_device *dev;
1002 int ret;
1003
1004 dev = cl->dev;
1005
1006 ret = mei_hbm_cl_connect_req(dev, cl);
1007 cl->status = ret;
1008 if (ret) {
1009 cl->state = MEI_FILE_DISCONNECT_REPLY;
1010 return ret;
1011 }
1012
1013 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1014 cl->timer_count = MEI_CONNECT_TIMEOUT;
1015 mei_schedule_stall_timer(dev);
1016 return 0;
1017}
1018
1019/**
1020 * mei_cl_irq_connect - send connect request in irq_thread context
1021 *
1022 * @cl: host client
1023 * @cb: callback block
1024 * @cmpl_list: complete list
1025 *
1026 * Return: 0, OK; otherwise, error.
1027 */
1028int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1029 struct list_head *cmpl_list)
1030{
1031 struct mei_device *dev = cl->dev;
1032 u32 msg_slots;
1033 int slots;
1034 int rets;
1035
1036 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
1037 slots = mei_hbuf_empty_slots(dev);
1038
1039 if (mei_cl_is_other_connecting(cl))
1040 return 0;
1041
1042 if (slots < msg_slots)
1043 return -EMSGSIZE;
1044
1045 rets = mei_cl_send_connect(cl, cb);
1046 if (rets)
1047 list_move_tail(&cb->list, cmpl_list);
1048
1049 return rets;
1050}
1051
1052/**
1053 * mei_cl_connect - connect host client to the me one
1054 *
1055 * @cl: host client
1056 * @me_cl: me client
1057 * @fp: pointer to file structure
1058 *
1059 * Locking: called under "dev->device_lock" lock
1060 *
1061 * Return: 0 on success, <0 on failure.
1062 */
1063int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1064 const struct file *fp)
1065{
1066 struct mei_device *dev;
1067 struct mei_cl_cb *cb;
1068 int rets;
1069
1070 if (WARN_ON(!cl || !cl->dev || !me_cl))
1071 return -ENODEV;
1072
1073 dev = cl->dev;
1074
1075 if (!mei_cl_bus_module_get(cl))
1076 return -ENODEV;
1077
1078 rets = mei_cl_set_connecting(cl, me_cl);
1079 if (rets)
1080 goto nortpm;
1081
1082 if (mei_cl_is_fixed_address(cl)) {
1083 cl->state = MEI_FILE_CONNECTED;
1084 rets = 0;
1085 goto nortpm;
1086 }
1087
1088 rets = pm_runtime_get(dev->dev);
1089 if (rets < 0 && rets != -EINPROGRESS) {
1090 pm_runtime_put_noidle(dev->dev);
1091 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1092 goto nortpm;
1093 }
1094
1095 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1096 if (!cb) {
1097 rets = -ENOMEM;
1098 goto out;
1099 }
1100
1101 /* run hbuf acquire last so we don't have to undo */
1102 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
1103 rets = mei_cl_send_connect(cl, cb);
1104 if (rets)
1105 goto out;
1106 }
1107
1108 mutex_unlock(&dev->device_lock);
1109 wait_event_timeout(cl->wait,
1110 (cl->state == MEI_FILE_CONNECTED ||
1111 cl->state == MEI_FILE_DISCONNECTED ||
1112 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
1113 cl->state == MEI_FILE_DISCONNECT_REPLY),
1114 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1115 mutex_lock(&dev->device_lock);
1116
1117 if (!mei_cl_is_connected(cl)) {
1118 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1119 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1120 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
1121 /* ignore disconnect return valuue;
1122 * in case of failure reset will be invoked
1123 */
1124 __mei_cl_disconnect(cl);
1125 rets = -EFAULT;
1126 goto out;
1127 }
1128
1129 /* timeout or something went really wrong */
1130 if (!cl->status)
1131 cl->status = -EFAULT;
1132 }
1133
1134 rets = cl->status;
1135out:
1136 cl_dbg(dev, cl, "rpm: autosuspend\n");
1137 pm_runtime_mark_last_busy(dev->dev);
1138 pm_runtime_put_autosuspend(dev->dev);
1139
1140 mei_io_cb_free(cb);
1141
1142nortpm:
1143 if (!mei_cl_is_connected(cl))
1144 mei_cl_set_disconnected(cl);
1145
1146 return rets;
1147}
1148
1149/**
1150 * mei_cl_alloc_linked - allocate and link host client
1151 *
1152 * @dev: the device structure
1153 *
1154 * Return: cl on success ERR_PTR on failure
1155 */
1156struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
1157{
1158 struct mei_cl *cl;
1159 int ret;
1160
1161 cl = mei_cl_allocate(dev);
1162 if (!cl) {
1163 ret = -ENOMEM;
1164 goto err;
1165 }
1166
1167 ret = mei_cl_link(cl);
1168 if (ret)
1169 goto err;
1170
1171 return cl;
1172err:
1173 kfree(cl);
1174 return ERR_PTR(ret);
1175}
1176
1177/**
1178 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
1179 *
1180 * @cl: host client
1181 *
1182 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
1183 */
1184static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
1185{
1186 if (WARN_ON(!cl || !cl->me_cl))
1187 return -EINVAL;
1188
1189 if (cl->tx_flow_ctrl_creds > 0)
1190 return 1;
1191
1192 if (mei_cl_is_fixed_address(cl))
1193 return 1;
1194
1195 if (mei_cl_is_single_recv_buf(cl)) {
1196 if (cl->me_cl->tx_flow_ctrl_creds > 0)
1197 return 1;
1198 }
1199 return 0;
1200}
1201
1202/**
1203 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1204 * for a client
1205 *
1206 * @cl: host client
1207 *
1208 * Return:
1209 * 0 on success
1210 * -EINVAL when ctrl credits are <= 0
1211 */
1212static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
1213{
1214 if (WARN_ON(!cl || !cl->me_cl))
1215 return -EINVAL;
1216
1217 if (mei_cl_is_fixed_address(cl))
1218 return 0;
1219
1220 if (mei_cl_is_single_recv_buf(cl)) {
1221 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
1222 return -EINVAL;
1223 cl->me_cl->tx_flow_ctrl_creds--;
1224 } else {
1225 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
1226 return -EINVAL;
1227 cl->tx_flow_ctrl_creds--;
1228 }
1229 return 0;
1230}
1231
1232/**
1233 * mei_cl_notify_fop2req - convert fop to proper request
1234 *
1235 * @fop: client notification start response command
1236 *
1237 * Return: MEI_HBM_NOTIFICATION_START/STOP
1238 */
1239u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1240{
1241 if (fop == MEI_FOP_NOTIFY_START)
1242 return MEI_HBM_NOTIFICATION_START;
1243 else
1244 return MEI_HBM_NOTIFICATION_STOP;
1245}
1246
1247/**
1248 * mei_cl_notify_req2fop - convert notification request top file operation type
1249 *
1250 * @req: hbm notification request type
1251 *
1252 * Return: MEI_FOP_NOTIFY_START/STOP
1253 */
1254enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1255{
1256 if (req == MEI_HBM_NOTIFICATION_START)
1257 return MEI_FOP_NOTIFY_START;
1258 else
1259 return MEI_FOP_NOTIFY_STOP;
1260}
1261
1262/**
1263 * mei_cl_irq_notify - send notification request in irq_thread context
1264 *
1265 * @cl: client
1266 * @cb: callback block.
1267 * @cmpl_list: complete list.
1268 *
1269 * Return: 0 on such and error otherwise.
1270 */
1271int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1272 struct list_head *cmpl_list)
1273{
1274 struct mei_device *dev = cl->dev;
1275 u32 msg_slots;
1276 int slots;
1277 int ret;
1278 bool request;
1279
1280 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
1281 slots = mei_hbuf_empty_slots(dev);
1282
1283 if (slots < msg_slots)
1284 return -EMSGSIZE;
1285
1286 request = mei_cl_notify_fop2req(cb->fop_type);
1287 ret = mei_hbm_cl_notify_req(dev, cl, request);
1288 if (ret) {
1289 cl->status = ret;
1290 list_move_tail(&cb->list, cmpl_list);
1291 return ret;
1292 }
1293
1294 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1295 return 0;
1296}
1297
1298/**
1299 * mei_cl_notify_request - send notification stop/start request
1300 *
1301 * @cl: host client
1302 * @fp: associate request with file
1303 * @request: 1 for start or 0 for stop
1304 *
1305 * Locking: called under "dev->device_lock" lock
1306 *
1307 * Return: 0 on such and error otherwise.
1308 */
1309int mei_cl_notify_request(struct mei_cl *cl,
1310 const struct file *fp, u8 request)
1311{
1312 struct mei_device *dev;
1313 struct mei_cl_cb *cb;
1314 enum mei_cb_file_ops fop_type;
1315 int rets;
1316
1317 if (WARN_ON(!cl || !cl->dev))
1318 return -ENODEV;
1319
1320 dev = cl->dev;
1321
1322 if (!dev->hbm_f_ev_supported) {
1323 cl_dbg(dev, cl, "notifications not supported\n");
1324 return -EOPNOTSUPP;
1325 }
1326
1327 if (!mei_cl_is_connected(cl))
1328 return -ENODEV;
1329
1330 rets = pm_runtime_get(dev->dev);
1331 if (rets < 0 && rets != -EINPROGRESS) {
1332 pm_runtime_put_noidle(dev->dev);
1333 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1334 return rets;
1335 }
1336
1337 fop_type = mei_cl_notify_req2fop(request);
1338 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
1339 if (!cb) {
1340 rets = -ENOMEM;
1341 goto out;
1342 }
1343
1344 if (mei_hbuf_acquire(dev)) {
1345 if (mei_hbm_cl_notify_req(dev, cl, request)) {
1346 rets = -ENODEV;
1347 goto out;
1348 }
1349 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1350 }
1351
1352 mutex_unlock(&dev->device_lock);
1353 wait_event_timeout(cl->wait,
1354 cl->notify_en == request || !mei_cl_is_connected(cl),
1355 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1356 mutex_lock(&dev->device_lock);
1357
1358 if (cl->notify_en != request && !cl->status)
1359 cl->status = -EFAULT;
1360
1361 rets = cl->status;
1362
1363out:
1364 cl_dbg(dev, cl, "rpm: autosuspend\n");
1365 pm_runtime_mark_last_busy(dev->dev);
1366 pm_runtime_put_autosuspend(dev->dev);
1367
1368 mei_io_cb_free(cb);
1369 return rets;
1370}
1371
1372/**
1373 * mei_cl_notify - raise notification
1374 *
1375 * @cl: host client
1376 *
1377 * Locking: called under "dev->device_lock" lock
1378 */
1379void mei_cl_notify(struct mei_cl *cl)
1380{
1381 struct mei_device *dev;
1382
1383 if (!cl || !cl->dev)
1384 return;
1385
1386 dev = cl->dev;
1387
1388 if (!cl->notify_en)
1389 return;
1390
1391 cl_dbg(dev, cl, "notify event");
1392 cl->notify_ev = true;
1393 if (!mei_cl_bus_notify_event(cl))
1394 wake_up_interruptible(&cl->ev_wait);
1395
1396 if (cl->ev_async)
1397 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
1398
1399}
1400
1401/**
1402 * mei_cl_notify_get - get or wait for notification event
1403 *
1404 * @cl: host client
1405 * @block: this request is blocking
1406 * @notify_ev: true if notification event was received
1407 *
1408 * Locking: called under "dev->device_lock" lock
1409 *
1410 * Return: 0 on such and error otherwise.
1411 */
1412int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1413{
1414 struct mei_device *dev;
1415 int rets;
1416
1417 *notify_ev = false;
1418
1419 if (WARN_ON(!cl || !cl->dev))
1420 return -ENODEV;
1421
1422 dev = cl->dev;
1423
1424 if (!dev->hbm_f_ev_supported) {
1425 cl_dbg(dev, cl, "notifications not supported\n");
1426 return -EOPNOTSUPP;
1427 }
1428
1429 if (!mei_cl_is_connected(cl))
1430 return -ENODEV;
1431
1432 if (cl->notify_ev)
1433 goto out;
1434
1435 if (!block)
1436 return -EAGAIN;
1437
1438 mutex_unlock(&dev->device_lock);
1439 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1440 mutex_lock(&dev->device_lock);
1441
1442 if (rets < 0)
1443 return rets;
1444
1445out:
1446 *notify_ev = cl->notify_ev;
1447 cl->notify_ev = false;
1448 return 0;
1449}
1450
1451/**
1452 * mei_cl_read_start - the start read client message function.
1453 *
1454 * @cl: host client
1455 * @length: number of bytes to read
1456 * @fp: pointer to file structure
1457 *
1458 * Return: 0 on success, <0 on failure.
1459 */
1460int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
1461{
1462 struct mei_device *dev;
1463 struct mei_cl_cb *cb;
1464 int rets;
1465
1466 if (WARN_ON(!cl || !cl->dev))
1467 return -ENODEV;
1468
1469 dev = cl->dev;
1470
1471 if (!mei_cl_is_connected(cl))
1472 return -ENODEV;
1473
1474 if (!mei_me_cl_is_active(cl->me_cl)) {
1475 cl_err(dev, cl, "no such me client\n");
1476 return -ENOTTY;
1477 }
1478
1479 if (mei_cl_is_fixed_address(cl))
1480 return 0;
1481
1482 /* HW currently supports only one pending read */
1483 if (cl->rx_flow_ctrl_creds)
1484 return -EBUSY;
1485
1486 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
1487 if (!cb)
1488 return -ENOMEM;
1489
1490 rets = pm_runtime_get(dev->dev);
1491 if (rets < 0 && rets != -EINPROGRESS) {
1492 pm_runtime_put_noidle(dev->dev);
1493 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1494 goto nortpm;
1495 }
1496
1497 rets = 0;
1498 if (mei_hbuf_acquire(dev)) {
1499 rets = mei_hbm_cl_flow_control_req(dev, cl);
1500 if (rets < 0)
1501 goto out;
1502
1503 list_move_tail(&cb->list, &cl->rd_pending);
1504 }
1505 cl->rx_flow_ctrl_creds++;
1506
1507out:
1508 cl_dbg(dev, cl, "rpm: autosuspend\n");
1509 pm_runtime_mark_last_busy(dev->dev);
1510 pm_runtime_put_autosuspend(dev->dev);
1511nortpm:
1512 if (rets)
1513 mei_io_cb_free(cb);
1514
1515 return rets;
1516}
1517
1518/**
1519 * mei_cl_irq_write - write a message to device
1520 * from the interrupt thread context
1521 *
1522 * @cl: client
1523 * @cb: callback block.
1524 * @cmpl_list: complete list.
1525 *
1526 * Return: 0, OK; otherwise error.
1527 */
1528int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1529 struct list_head *cmpl_list)
1530{
1531 struct mei_device *dev;
1532 struct mei_msg_data *buf;
1533 struct mei_msg_hdr mei_hdr;
1534 size_t len;
1535 u32 msg_slots;
1536 int slots;
1537 int rets;
1538 bool first_chunk;
1539
1540 if (WARN_ON(!cl || !cl->dev))
1541 return -ENODEV;
1542
1543 dev = cl->dev;
1544
1545 buf = &cb->buf;
1546
1547 first_chunk = cb->buf_idx == 0;
1548
1549 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
1550 if (rets < 0)
1551 goto err;
1552
1553 if (rets == 0) {
1554 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1555 return 0;
1556 }
1557
1558 slots = mei_hbuf_empty_slots(dev);
1559 len = buf->size - cb->buf_idx;
1560 msg_slots = mei_data2slots(len);
1561
1562 mei_hdr.host_addr = mei_cl_host_addr(cl);
1563 mei_hdr.me_addr = mei_cl_me_id(cl);
1564 mei_hdr.reserved = 0;
1565 mei_hdr.internal = cb->internal;
1566
1567 if (slots >= msg_slots) {
1568 mei_hdr.length = len;
1569 mei_hdr.msg_complete = 1;
1570 /* Split the message only if we can write the whole host buffer */
1571 } else if (slots == dev->hbuf_depth) {
1572 msg_slots = slots;
1573 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
1574 mei_hdr.length = len;
1575 mei_hdr.msg_complete = 0;
1576 } else {
1577 /* wait for next time the host buffer is empty */
1578 return 0;
1579 }
1580
1581 cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n",
1582 cb->buf.size, cb->buf_idx);
1583
1584 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
1585 if (rets)
1586 goto err;
1587
1588 cl->status = 0;
1589 cl->writing_state = MEI_WRITING;
1590 cb->buf_idx += mei_hdr.length;
1591 cb->completed = mei_hdr.msg_complete == 1;
1592
1593 if (first_chunk) {
1594 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1595 rets = -EIO;
1596 goto err;
1597 }
1598 }
1599
1600 if (mei_hdr.msg_complete)
1601 list_move_tail(&cb->list, &dev->write_waiting_list);
1602
1603 return 0;
1604
1605err:
1606 cl->status = rets;
1607 list_move_tail(&cb->list, cmpl_list);
1608 return rets;
1609}
1610
1611/**
1612 * mei_cl_write - submit a write cb to mei device
1613 * assumes device_lock is locked
1614 *
1615 * @cl: host client
1616 * @cb: write callback with filled data
1617 *
1618 * Return: number of bytes sent on success, <0 on failure.
1619 */
1620int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1621{
1622 struct mei_device *dev;
1623 struct mei_msg_data *buf;
1624 struct mei_msg_hdr mei_hdr;
1625 int size;
1626 int rets;
1627 bool blocking;
1628
1629 if (WARN_ON(!cl || !cl->dev))
1630 return -ENODEV;
1631
1632 if (WARN_ON(!cb))
1633 return -EINVAL;
1634
1635 dev = cl->dev;
1636
1637 buf = &cb->buf;
1638 size = buf->size;
1639 blocking = cb->blocking;
1640
1641 cl_dbg(dev, cl, "size=%d\n", size);
1642
1643 rets = pm_runtime_get(dev->dev);
1644 if (rets < 0 && rets != -EINPROGRESS) {
1645 pm_runtime_put_noidle(dev->dev);
1646 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1647 goto free;
1648 }
1649
1650 cb->buf_idx = 0;
1651 cl->writing_state = MEI_IDLE;
1652
1653 mei_hdr.host_addr = mei_cl_host_addr(cl);
1654 mei_hdr.me_addr = mei_cl_me_id(cl);
1655 mei_hdr.reserved = 0;
1656 mei_hdr.msg_complete = 0;
1657 mei_hdr.internal = cb->internal;
1658
1659 rets = mei_cl_tx_flow_ctrl_creds(cl);
1660 if (rets < 0)
1661 goto err;
1662
1663 if (rets == 0) {
1664 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1665 rets = size;
1666 goto out;
1667 }
1668 if (!mei_hbuf_acquire(dev)) {
1669 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
1670 rets = size;
1671 goto out;
1672 }
1673
1674 /* Check for a maximum length */
1675 if (size > mei_hbuf_max_len(dev)) {
1676 mei_hdr.length = mei_hbuf_max_len(dev);
1677 mei_hdr.msg_complete = 0;
1678 } else {
1679 mei_hdr.length = size;
1680 mei_hdr.msg_complete = 1;
1681 }
1682
1683 rets = mei_write_message(dev, &mei_hdr, buf->data);
1684 if (rets)
1685 goto err;
1686
1687 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
1688 if (rets)
1689 goto err;
1690
1691 cl->writing_state = MEI_WRITING;
1692 cb->buf_idx = mei_hdr.length;
1693 cb->completed = mei_hdr.msg_complete == 1;
1694
1695out:
1696 if (mei_hdr.msg_complete)
1697 list_add_tail(&cb->list, &dev->write_waiting_list);
1698 else
1699 list_add_tail(&cb->list, &dev->write_list);
1700
1701 cb = NULL;
1702 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
1703
1704 mutex_unlock(&dev->device_lock);
1705 rets = wait_event_interruptible(cl->tx_wait,
1706 cl->writing_state == MEI_WRITE_COMPLETE ||
1707 (!mei_cl_is_connected(cl)));
1708 mutex_lock(&dev->device_lock);
1709 /* wait_event_interruptible returns -ERESTARTSYS */
1710 if (rets) {
1711 if (signal_pending(current))
1712 rets = -EINTR;
1713 goto err;
1714 }
1715 if (cl->writing_state != MEI_WRITE_COMPLETE) {
1716 rets = -EFAULT;
1717 goto err;
1718 }
1719 }
1720
1721 rets = size;
1722err:
1723 cl_dbg(dev, cl, "rpm: autosuspend\n");
1724 pm_runtime_mark_last_busy(dev->dev);
1725 pm_runtime_put_autosuspend(dev->dev);
1726free:
1727 mei_io_cb_free(cb);
1728
1729 return rets;
1730}
1731
1732
1733/**
1734 * mei_cl_complete - processes completed operation for a client
1735 *
1736 * @cl: private data of the file object.
1737 * @cb: callback block.
1738 */
1739void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1740{
1741 struct mei_device *dev = cl->dev;
1742
1743 switch (cb->fop_type) {
1744 case MEI_FOP_WRITE:
1745 mei_io_cb_free(cb);
1746 cl->writing_state = MEI_WRITE_COMPLETE;
1747 if (waitqueue_active(&cl->tx_wait)) {
1748 wake_up_interruptible(&cl->tx_wait);
1749 } else {
1750 pm_runtime_mark_last_busy(dev->dev);
1751 pm_request_autosuspend(dev->dev);
1752 }
1753 break;
1754
1755 case MEI_FOP_READ:
1756 list_add_tail(&cb->list, &cl->rd_completed);
1757 if (!mei_cl_is_fixed_address(cl) &&
1758 !WARN_ON(!cl->rx_flow_ctrl_creds))
1759 cl->rx_flow_ctrl_creds--;
1760 if (!mei_cl_bus_rx_event(cl))
1761 wake_up_interruptible(&cl->rx_wait);
1762 break;
1763
1764 case MEI_FOP_CONNECT:
1765 case MEI_FOP_DISCONNECT:
1766 case MEI_FOP_NOTIFY_STOP:
1767 case MEI_FOP_NOTIFY_START:
1768 if (waitqueue_active(&cl->wait))
1769 wake_up(&cl->wait);
1770
1771 break;
1772 case MEI_FOP_DISCONNECT_RSP:
1773 mei_io_cb_free(cb);
1774 mei_cl_set_disconnected(cl);
1775 break;
1776 default:
1777 BUG_ON(0);
1778 }
1779}
1780
1781
1782/**
1783 * mei_cl_all_disconnect - disconnect forcefully all connected clients
1784 *
1785 * @dev: mei device
1786 */
1787void mei_cl_all_disconnect(struct mei_device *dev)
1788{
1789 struct mei_cl *cl;
1790
1791 list_for_each_entry(cl, &dev->file_list, link)
1792 mei_cl_set_disconnected(cl);
1793}