blob: 96f4a503ff9d165b72e1b1f17abb07b629fd110a [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
3 * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
4 * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <linux/cdev.h>
21#include <linux/debugfs.h>
22#include <linux/completion.h>
23#include <linux/device.h>
24#include <linux/err.h>
25#include <linux/freezer.h>
26#include <linux/fs.h>
27#include <linux/init.h>
28#include <linux/list.h>
29#include <linux/poll.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33#include <linux/virtio.h>
34#include <linux/virtio_console.h>
35#include <linux/wait.h>
36#include <linux/workqueue.h>
37#include <linux/module.h>
38#include "../tty/hvc/hvc_console.h"
39
40/*
41 * This is a global struct for storing common data for all the devices
42 * this driver handles.
43 *
44 * Mainly, it has a linked list for all the consoles in one place so
45 * that callbacks from hvc for get_chars(), put_chars() work properly
46 * across multiple devices and multiple ports per device.
47 */
48struct ports_driver_data {
49 /* Used for registering chardevs */
50 struct class *class;
51
52 /* Used for exporting per-port information to debugfs */
53 struct dentry *debugfs_dir;
54
55 /* List of all the devices we're handling */
56 struct list_head portdevs;
57
58 /* Number of devices this driver is handling */
59 unsigned int index;
60
61 /*
62 * This is used to keep track of the number of hvc consoles
63 * spawned by this driver. This number is given as the first
64 * argument to hvc_alloc(). To correctly map an initial
65 * console spawned via hvc_instantiate to the console being
66 * hooked up via hvc_alloc, we need to pass the same vtermno.
67 *
68 * We also just assume the first console being initialised was
69 * the first one that got used as the initial console.
70 */
71 unsigned int next_vtermno;
72
73 /* All the console devices handled by this driver */
74 struct list_head consoles;
75};
76static struct ports_driver_data pdrvdata;
77
78DEFINE_SPINLOCK(pdrvdata_lock);
79DECLARE_COMPLETION(early_console_added);
80
81/* This struct holds information that's relevant only for console ports */
82struct console {
83 /* We'll place all consoles in a list in the pdrvdata struct */
84 struct list_head list;
85
86 /* The hvc device associated with this console port */
87 struct hvc_struct *hvc;
88
89 /* The size of the console */
90 struct winsize ws;
91
92 /*
93 * This number identifies the number that we used to register
94 * with hvc in hvc_instantiate() and hvc_alloc(); this is the
95 * number passed on by the hvc callbacks to us to
96 * differentiate between the other console ports handled by
97 * this driver
98 */
99 u32 vtermno;
100};
101
102struct port_buffer {
103 char *buf;
104
105 /* size of the buffer in *buf above */
106 size_t size;
107
108 /* used length of the buffer */
109 size_t len;
110 /* offset in the buf from which to consume data */
111 size_t offset;
112};
113
114/*
115 * This is a per-device struct that stores data common to all the
116 * ports for that device (vdev->priv).
117 */
118struct ports_device {
119 /* Next portdev in the list, head is in the pdrvdata struct */
120 struct list_head list;
121
122 /*
123 * Workqueue handlers where we process deferred work after
124 * notification
125 */
126 struct work_struct control_work;
127 struct work_struct config_work;
128
129 struct list_head ports;
130
131 /* To protect the list of ports */
132 spinlock_t ports_lock;
133
134 /* To protect the vq operations for the control channel */
135 spinlock_t c_ivq_lock;
136 spinlock_t c_ovq_lock;
137
138 /* The current config space is stored here */
139 struct virtio_console_config config;
140
141 /* The virtio device we're associated with */
142 struct virtio_device *vdev;
143
144 /*
145 * A couple of virtqueues for the control channel: one for
146 * guest->host transfers, one for host->guest transfers
147 */
148 struct virtqueue *c_ivq, *c_ovq;
149
150 /* Array of per-port IO virtqueues */
151 struct virtqueue **in_vqs, **out_vqs;
152
153 /* Used for numbering devices for sysfs and debugfs */
154 unsigned int drv_index;
155
156 /* Major number for this device. Ports will be created as minors. */
157 int chr_major;
158};
159
160struct port_stats {
161 unsigned long bytes_sent, bytes_received, bytes_discarded;
162};
163
164/* This struct holds the per-port data */
165struct port {
166 /* Next port in the list, head is in the ports_device */
167 struct list_head list;
168
169 /* Pointer to the parent virtio_console device */
170 struct ports_device *portdev;
171
172 /* The current buffer from which data has to be fed to readers */
173 struct port_buffer *inbuf;
174
175 /*
176 * To protect the operations on the in_vq associated with this
177 * port. Has to be a spinlock because it can be called from
178 * interrupt context (get_char()).
179 */
180 spinlock_t inbuf_lock;
181
182 /* Protect the operations on the out_vq. */
183 spinlock_t outvq_lock;
184
185 /* The IO vqs for this port */
186 struct virtqueue *in_vq, *out_vq;
187
188 /* File in the debugfs directory that exposes this port's information */
189 struct dentry *debugfs_file;
190
191 /*
192 * Keep count of the bytes sent, received and discarded for
193 * this port for accounting and debugging purposes. These
194 * counts are not reset across port open / close events.
195 */
196 struct port_stats stats;
197
198 /*
199 * The entries in this struct will be valid if this port is
200 * hooked up to an hvc console
201 */
202 struct console cons;
203
204 /* Each port associates with a separate char device */
205 struct cdev *cdev;
206 struct device *dev;
207
208 /* Reference-counting to handle port hot-unplugs and file operations */
209 struct kref kref;
210
211 /* A waitqueue for poll() or blocking read operations */
212 wait_queue_head_t waitqueue;
213
214 /* The 'name' of the port that we expose via sysfs properties */
215 char *name;
216
217 /* We can notify apps of host connect / disconnect events via SIGIO */
218 struct fasync_struct *async_queue;
219
220 /* The 'id' to identify the port with the Host */
221 u32 id;
222
223 bool outvq_full;
224
225 /* Is the host device open */
226 bool host_connected;
227
228 /* We should allow only one process to open a port */
229 bool guest_connected;
230};
231
232/* This is the very early arch-specified put chars function. */
233static int (*early_put_chars)(u32, const char *, int);
234
235static struct port *find_port_by_vtermno(u32 vtermno)
236{
237 struct port *port;
238 struct console *cons;
239 unsigned long flags;
240
241 spin_lock_irqsave(&pdrvdata_lock, flags);
242 list_for_each_entry(cons, &pdrvdata.consoles, list) {
243 if (cons->vtermno == vtermno) {
244 port = container_of(cons, struct port, cons);
245 goto out;
246 }
247 }
248 port = NULL;
249out:
250 spin_unlock_irqrestore(&pdrvdata_lock, flags);
251 return port;
252}
253
254static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
255 dev_t dev)
256{
257 struct port *port;
258 unsigned long flags;
259
260 spin_lock_irqsave(&portdev->ports_lock, flags);
261 list_for_each_entry(port, &portdev->ports, list) {
262 if (port->cdev->dev == dev) {
263 kref_get(&port->kref);
264 goto out;
265 }
266 }
267 port = NULL;
268out:
269 spin_unlock_irqrestore(&portdev->ports_lock, flags);
270
271 return port;
272}
273
274static struct port *find_port_by_devt(dev_t dev)
275{
276 struct ports_device *portdev;
277 struct port *port;
278 unsigned long flags;
279
280 spin_lock_irqsave(&pdrvdata_lock, flags);
281 list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
282 port = find_port_by_devt_in_portdev(portdev, dev);
283 if (port)
284 goto out;
285 }
286 port = NULL;
287out:
288 spin_unlock_irqrestore(&pdrvdata_lock, flags);
289 return port;
290}
291
292static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
293{
294 struct port *port;
295 unsigned long flags;
296
297 spin_lock_irqsave(&portdev->ports_lock, flags);
298 list_for_each_entry(port, &portdev->ports, list)
299 if (port->id == id)
300 goto out;
301 port = NULL;
302out:
303 spin_unlock_irqrestore(&portdev->ports_lock, flags);
304
305 return port;
306}
307
308static struct port *find_port_by_vq(struct ports_device *portdev,
309 struct virtqueue *vq)
310{
311 struct port *port;
312 unsigned long flags;
313
314 spin_lock_irqsave(&portdev->ports_lock, flags);
315 list_for_each_entry(port, &portdev->ports, list)
316 if (port->in_vq == vq || port->out_vq == vq)
317 goto out;
318 port = NULL;
319out:
320 spin_unlock_irqrestore(&portdev->ports_lock, flags);
321 return port;
322}
323
324static bool is_console_port(struct port *port)
325{
326 if (port->cons.hvc)
327 return true;
328 return false;
329}
330
331static inline bool use_multiport(struct ports_device *portdev)
332{
333 /*
334 * This condition can be true when put_chars is called from
335 * early_init
336 */
337 if (!portdev->vdev)
338 return 0;
339 return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
340}
341
342static void free_buf(struct port_buffer *buf)
343{
344 kfree(buf->buf);
345 kfree(buf);
346}
347
348static struct port_buffer *alloc_buf(size_t buf_size)
349{
350 struct port_buffer *buf;
351
352 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
353 if (!buf)
354 goto fail;
355 buf->buf = kzalloc(buf_size, GFP_KERNEL);
356 if (!buf->buf)
357 goto free_buf;
358 buf->len = 0;
359 buf->offset = 0;
360 buf->size = buf_size;
361 return buf;
362
363free_buf:
364 kfree(buf);
365fail:
366 return NULL;
367}
368
369/* Callers should take appropriate locks */
370static struct port_buffer *get_inbuf(struct port *port)
371{
372 struct port_buffer *buf;
373 unsigned int len;
374
375 if (port->inbuf)
376 return port->inbuf;
377
378 buf = virtqueue_get_buf(port->in_vq, &len);
379 if (buf) {
380 buf->len = len;
381 buf->offset = 0;
382 port->stats.bytes_received += len;
383 }
384 return buf;
385}
386
387/*
388 * Create a scatter-gather list representing our input buffer and put
389 * it in the queue.
390 *
391 * Callers should take appropriate locks.
392 */
393static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
394{
395 struct scatterlist sg[1];
396 int ret;
397
398 sg_init_one(sg, buf->buf, buf->size);
399
400 ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC);
401 virtqueue_kick(vq);
402 return ret;
403}
404
405/* Discard any unread data this port has. Callers lockers. */
406static void discard_port_data(struct port *port)
407{
408 struct port_buffer *buf;
409 unsigned int err;
410
411 if (!port->portdev) {
412 /* Device has been unplugged. vqs are already gone. */
413 return;
414 }
415 buf = get_inbuf(port);
416
417 err = 0;
418 while (buf) {
419 port->stats.bytes_discarded += buf->len - buf->offset;
420 if (add_inbuf(port->in_vq, buf) < 0) {
421 err++;
422 free_buf(buf);
423 }
424 port->inbuf = NULL;
425 buf = get_inbuf(port);
426 }
427 if (err)
428 dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
429 err);
430}
431
432static bool port_has_data(struct port *port)
433{
434 unsigned long flags;
435 bool ret;
436
437 ret = false;
438 spin_lock_irqsave(&port->inbuf_lock, flags);
439 port->inbuf = get_inbuf(port);
440 if (port->inbuf)
441 ret = true;
442
443 spin_unlock_irqrestore(&port->inbuf_lock, flags);
444 return ret;
445}
446
447static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
448 unsigned int event, unsigned int value)
449{
450 struct scatterlist sg[1];
451 struct virtio_console_control cpkt;
452 struct virtqueue *vq;
453 unsigned int len;
454
455 if (!use_multiport(portdev))
456 return 0;
457
458 cpkt.id = port_id;
459 cpkt.event = event;
460 cpkt.value = value;
461
462 vq = portdev->c_ovq;
463
464 sg_init_one(sg, &cpkt, sizeof(cpkt));
465
466 spin_lock(&portdev->c_ovq_lock);
467 if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) {
468 virtqueue_kick(vq);
469 while (!virtqueue_get_buf(vq, &len))
470 cpu_relax();
471 }
472 spin_unlock(&portdev->c_ovq_lock);
473 return 0;
474}
475
476static ssize_t send_control_msg(struct port *port, unsigned int event,
477 unsigned int value)
478{
479 /* Did the port get unplugged before userspace closed it? */
480 if (port->portdev)
481 return __send_control_msg(port->portdev, port->id, event, value);
482 return 0;
483}
484
485/* Callers must take the port->outvq_lock */
486static void reclaim_consumed_buffers(struct port *port)
487{
488 void *buf;
489 unsigned int len;
490
491 if (!port->portdev) {
492 /* Device has been unplugged. vqs are already gone. */
493 return;
494 }
495 while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
496 kfree(buf);
497 port->outvq_full = false;
498 }
499}
500
501static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
502 bool nonblock)
503{
504 struct scatterlist sg[1];
505 struct virtqueue *out_vq;
506 ssize_t ret;
507 unsigned long flags;
508 unsigned int len;
509
510 out_vq = port->out_vq;
511
512 spin_lock_irqsave(&port->outvq_lock, flags);
513
514 reclaim_consumed_buffers(port);
515
516 sg_init_one(sg, in_buf, in_count);
517 ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf, GFP_ATOMIC);
518
519 /* Tell Host to go! */
520 virtqueue_kick(out_vq);
521
522 if (ret < 0) {
523 in_count = 0;
524 goto done;
525 }
526
527 if (ret == 0)
528 port->outvq_full = true;
529
530 if (nonblock)
531 goto done;
532
533 /*
534 * Wait till the host acknowledges it pushed out the data we
535 * sent. This is done for data from the hvc_console; the tty
536 * operations are performed with spinlocks held so we can't
537 * sleep here. An alternative would be to copy the data to a
538 * buffer and relax the spinning requirement. The downside is
539 * we need to kmalloc a GFP_ATOMIC buffer each time the
540 * console driver writes something out.
541 */
542 while (!virtqueue_get_buf(out_vq, &len))
543 cpu_relax();
544done:
545 spin_unlock_irqrestore(&port->outvq_lock, flags);
546
547 port->stats.bytes_sent += in_count;
548 /*
549 * We're expected to return the amount of data we wrote -- all
550 * of it
551 */
552 return in_count;
553}
554
555/*
556 * Give out the data that's requested from the buffer that we have
557 * queued up.
558 */
559static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
560 bool to_user)
561{
562 struct port_buffer *buf;
563 unsigned long flags;
564
565 if (!out_count || !port_has_data(port))
566 return 0;
567
568 buf = port->inbuf;
569 out_count = min(out_count, buf->len - buf->offset);
570
571 if (to_user) {
572 ssize_t ret;
573
574 ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
575 if (ret)
576 return -EFAULT;
577 } else {
578 memcpy(out_buf, buf->buf + buf->offset, out_count);
579 }
580
581 buf->offset += out_count;
582
583 if (buf->offset == buf->len) {
584 /*
585 * We're done using all the data in this buffer.
586 * Re-queue so that the Host can send us more data.
587 */
588 spin_lock_irqsave(&port->inbuf_lock, flags);
589 port->inbuf = NULL;
590
591 if (add_inbuf(port->in_vq, buf) < 0)
592 dev_warn(port->dev, "failed add_buf\n");
593
594 spin_unlock_irqrestore(&port->inbuf_lock, flags);
595 }
596 /* Return the number of bytes actually copied */
597 return out_count;
598}
599
600/* The condition that must be true for polling to end */
601static bool will_read_block(struct port *port)
602{
603 if (!port->guest_connected) {
604 /* Port got hot-unplugged. Let's exit. */
605 return false;
606 }
607 return !port_has_data(port) && port->host_connected;
608}
609
610static bool will_write_block(struct port *port)
611{
612 bool ret;
613
614 if (!port->guest_connected) {
615 /* Port got hot-unplugged. Let's exit. */
616 return false;
617 }
618 if (!port->host_connected)
619 return true;
620
621 spin_lock_irq(&port->outvq_lock);
622 /*
623 * Check if the Host has consumed any buffers since we last
624 * sent data (this is only applicable for nonblocking ports).
625 */
626 reclaim_consumed_buffers(port);
627 ret = port->outvq_full;
628 spin_unlock_irq(&port->outvq_lock);
629
630 return ret;
631}
632
633static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
634 size_t count, loff_t *offp)
635{
636 struct port *port;
637 ssize_t ret;
638
639 port = filp->private_data;
640
641 /* Port is hot-unplugged. */
642 if (!port->guest_connected)
643 return -ENODEV;
644
645 if (!port_has_data(port)) {
646 /*
647 * If nothing's connected on the host just return 0 in
648 * case of list_empty; this tells the userspace app
649 * that there's no connection
650 */
651 if (!port->host_connected)
652 return 0;
653 if (filp->f_flags & O_NONBLOCK)
654 return -EAGAIN;
655
656 ret = wait_event_freezable(port->waitqueue,
657 !will_read_block(port));
658 if (ret < 0)
659 return ret;
660 }
661 /* Port got hot-unplugged while we were waiting above. */
662 if (!port->guest_connected)
663 return -ENODEV;
664 /*
665 * We could've received a disconnection message while we were
666 * waiting for more data.
667 *
668 * This check is not clubbed in the if() statement above as we
669 * might receive some data as well as the host could get
670 * disconnected after we got woken up from our wait. So we
671 * really want to give off whatever data we have and only then
672 * check for host_connected.
673 */
674 if (!port_has_data(port) && !port->host_connected)
675 return 0;
676
677 return fill_readbuf(port, ubuf, count, true);
678}
679
680static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
681 size_t count, loff_t *offp)
682{
683 struct port *port;
684 char *buf;
685 ssize_t ret;
686 bool nonblock;
687
688 /* Userspace could be out to fool us */
689 if (!count)
690 return 0;
691
692 port = filp->private_data;
693
694 nonblock = filp->f_flags & O_NONBLOCK;
695
696 if (will_write_block(port)) {
697 if (nonblock)
698 return -EAGAIN;
699
700 ret = wait_event_freezable(port->waitqueue,
701 !will_write_block(port));
702 if (ret < 0)
703 return ret;
704 }
705 /* Port got hot-unplugged. */
706 if (!port->guest_connected)
707 return -ENODEV;
708
709 count = min((size_t)(32 * 1024), count);
710
711 buf = kmalloc(count, GFP_KERNEL);
712 if (!buf)
713 return -ENOMEM;
714
715 ret = copy_from_user(buf, ubuf, count);
716 if (ret) {
717 ret = -EFAULT;
718 goto free_buf;
719 }
720
721 /*
722 * We now ask send_buf() to not spin for generic ports -- we
723 * can re-use the same code path that non-blocking file
724 * descriptors take for blocking file descriptors since the
725 * wait is already done and we're certain the write will go
726 * through to the host.
727 */
728 nonblock = true;
729 ret = send_buf(port, buf, count, nonblock);
730
731 if (nonblock && ret > 0)
732 goto out;
733
734free_buf:
735 kfree(buf);
736out:
737 return ret;
738}
739
740static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
741{
742 struct port *port;
743 unsigned int ret;
744
745 port = filp->private_data;
746 poll_wait(filp, &port->waitqueue, wait);
747
748 if (!port->guest_connected) {
749 /* Port got unplugged */
750 return POLLHUP;
751 }
752 ret = 0;
753 if (!will_read_block(port))
754 ret |= POLLIN | POLLRDNORM;
755 if (!will_write_block(port))
756 ret |= POLLOUT;
757 if (!port->host_connected)
758 ret |= POLLHUP;
759
760 return ret;
761}
762
763static void remove_port(struct kref *kref);
764
765static int port_fops_release(struct inode *inode, struct file *filp)
766{
767 struct port *port;
768
769 port = filp->private_data;
770
771 /* Notify host of port being closed */
772 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
773
774 spin_lock_irq(&port->inbuf_lock);
775 port->guest_connected = false;
776
777 discard_port_data(port);
778
779 spin_unlock_irq(&port->inbuf_lock);
780
781 spin_lock_irq(&port->outvq_lock);
782 reclaim_consumed_buffers(port);
783 spin_unlock_irq(&port->outvq_lock);
784
785 /*
786 * Locks aren't necessary here as a port can't be opened after
787 * unplug, and if a port isn't unplugged, a kref would already
788 * exist for the port. Plus, taking ports_lock here would
789 * create a dependency on other locks taken by functions
790 * inside remove_port if we're the last holder of the port,
791 * creating many problems.
792 */
793 kref_put(&port->kref, remove_port);
794
795 return 0;
796}
797
798static int port_fops_open(struct inode *inode, struct file *filp)
799{
800 struct cdev *cdev = inode->i_cdev;
801 struct port *port;
802 int ret;
803
804 /* We get the port with a kref here */
805 port = find_port_by_devt(cdev->dev);
806 if (!port) {
807 /* Port was unplugged before we could proceed */
808 return -ENXIO;
809 }
810 filp->private_data = port;
811
812 /*
813 * Don't allow opening of console port devices -- that's done
814 * via /dev/hvc
815 */
816 if (is_console_port(port)) {
817 ret = -ENXIO;
818 goto out;
819 }
820
821 /* Allow only one process to open a particular port at a time */
822 spin_lock_irq(&port->inbuf_lock);
823 if (port->guest_connected) {
824 spin_unlock_irq(&port->inbuf_lock);
825 ret = -EMFILE;
826 goto out;
827 }
828
829 port->guest_connected = true;
830 spin_unlock_irq(&port->inbuf_lock);
831
832 spin_lock_irq(&port->outvq_lock);
833 /*
834 * There might be a chance that we missed reclaiming a few
835 * buffers in the window of the port getting previously closed
836 * and opening now.
837 */
838 reclaim_consumed_buffers(port);
839 spin_unlock_irq(&port->outvq_lock);
840
841 nonseekable_open(inode, filp);
842
843 /* Notify host of port being opened */
844 send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
845
846 return 0;
847out:
848 kref_put(&port->kref, remove_port);
849 return ret;
850}
851
852static int port_fops_fasync(int fd, struct file *filp, int mode)
853{
854 struct port *port;
855
856 port = filp->private_data;
857 return fasync_helper(fd, filp, mode, &port->async_queue);
858}
859
860/*
861 * The file operations that we support: programs in the guest can open
862 * a console device, read from it, write to it, poll for data and
863 * close it. The devices are at
864 * /dev/vport<device number>p<port number>
865 */
866static const struct file_operations port_fops = {
867 .owner = THIS_MODULE,
868 .open = port_fops_open,
869 .read = port_fops_read,
870 .write = port_fops_write,
871 .poll = port_fops_poll,
872 .release = port_fops_release,
873 .fasync = port_fops_fasync,
874 .llseek = no_llseek,
875};
876
877/*
878 * The put_chars() callback is pretty straightforward.
879 *
880 * We turn the characters into a scatter-gather list, add it to the
881 * output queue and then kick the Host. Then we sit here waiting for
882 * it to finish: inefficient in theory, but in practice
883 * implementations will do it immediately (lguest's Launcher does).
884 */
885static int put_chars(u32 vtermno, const char *buf, int count)
886{
887 struct port *port;
888
889 if (unlikely(early_put_chars))
890 return early_put_chars(vtermno, buf, count);
891
892 port = find_port_by_vtermno(vtermno);
893 if (!port)
894 return -EPIPE;
895
896 return send_buf(port, (void *)buf, count, false);
897}
898
899/*
900 * get_chars() is the callback from the hvc_console infrastructure
901 * when an interrupt is received.
902 *
903 * We call out to fill_readbuf that gets us the required data from the
904 * buffers that are queued up.
905 */
906static int get_chars(u32 vtermno, char *buf, int count)
907{
908 struct port *port;
909
910 /* If we've not set up the port yet, we have no input to give. */
911 if (unlikely(early_put_chars))
912 return 0;
913
914 port = find_port_by_vtermno(vtermno);
915 if (!port)
916 return -EPIPE;
917
918 /* If we don't have an input queue yet, we can't get input. */
919 BUG_ON(!port->in_vq);
920
921 return fill_readbuf(port, buf, count, false);
922}
923
924static void resize_console(struct port *port)
925{
926 struct virtio_device *vdev;
927
928 /* The port could have been hot-unplugged */
929 if (!port || !is_console_port(port))
930 return;
931
932 vdev = port->portdev->vdev;
933 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
934 hvc_resize(port->cons.hvc, port->cons.ws);
935}
936
937/* We set the configuration at this point, since we now have a tty */
938static int notifier_add_vio(struct hvc_struct *hp, int data)
939{
940 struct port *port;
941
942 port = find_port_by_vtermno(hp->vtermno);
943 if (!port)
944 return -EINVAL;
945
946 hp->irq_requested = 1;
947 resize_console(port);
948
949 return 0;
950}
951
952static void notifier_del_vio(struct hvc_struct *hp, int data)
953{
954 hp->irq_requested = 0;
955}
956
957/* The operations for console ports. */
958static const struct hv_ops hv_ops = {
959 .get_chars = get_chars,
960 .put_chars = put_chars,
961 .notifier_add = notifier_add_vio,
962 .notifier_del = notifier_del_vio,
963 .notifier_hangup = notifier_del_vio,
964};
965
966/*
967 * Console drivers are initialized very early so boot messages can go
968 * out, so we do things slightly differently from the generic virtio
969 * initialization of the net and block drivers.
970 *
971 * At this stage, the console is output-only. It's too early to set
972 * up a virtqueue, so we let the drivers do some boutique early-output
973 * thing.
974 */
975int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
976{
977 early_put_chars = put_chars;
978 return hvc_instantiate(0, 0, &hv_ops);
979}
980
981int init_port_console(struct port *port)
982{
983 int ret;
984
985 /*
986 * The Host's telling us this port is a console port. Hook it
987 * up with an hvc console.
988 *
989 * To set up and manage our virtual console, we call
990 * hvc_alloc().
991 *
992 * The first argument of hvc_alloc() is the virtual console
993 * number. The second argument is the parameter for the
994 * notification mechanism (like irq number). We currently
995 * leave this as zero, virtqueues have implicit notifications.
996 *
997 * The third argument is a "struct hv_ops" containing the
998 * put_chars() get_chars(), notifier_add() and notifier_del()
999 * pointers. The final argument is the output buffer size: we
1000 * can do any size, so we put PAGE_SIZE here.
1001 */
1002 port->cons.vtermno = pdrvdata.next_vtermno;
1003
1004 port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
1005 if (IS_ERR(port->cons.hvc)) {
1006 ret = PTR_ERR(port->cons.hvc);
1007 dev_err(port->dev,
1008 "error %d allocating hvc for port\n", ret);
1009 port->cons.hvc = NULL;
1010 return ret;
1011 }
1012 spin_lock_irq(&pdrvdata_lock);
1013 pdrvdata.next_vtermno++;
1014 list_add_tail(&port->cons.list, &pdrvdata.consoles);
1015 spin_unlock_irq(&pdrvdata_lock);
1016 port->guest_connected = true;
1017
1018 /*
1019 * Start using the new console output if this is the first
1020 * console to come up.
1021 */
1022 if (early_put_chars)
1023 early_put_chars = NULL;
1024
1025 /* Notify host of port being opened */
1026 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
1027
1028 return 0;
1029}
1030
1031static ssize_t show_port_name(struct device *dev,
1032 struct device_attribute *attr, char *buffer)
1033{
1034 struct port *port;
1035
1036 port = dev_get_drvdata(dev);
1037
1038 return sprintf(buffer, "%s\n", port->name);
1039}
1040
1041static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL);
1042
1043static struct attribute *port_sysfs_entries[] = {
1044 &dev_attr_name.attr,
1045 NULL
1046};
1047
1048static struct attribute_group port_attribute_group = {
1049 .name = NULL, /* put in device directory */
1050 .attrs = port_sysfs_entries,
1051};
1052
1053static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
1054 size_t count, loff_t *offp)
1055{
1056 struct port *port;
1057 char *buf;
1058 ssize_t ret, out_offset, out_count;
1059
1060 out_count = 1024;
1061 buf = kmalloc(out_count, GFP_KERNEL);
1062 if (!buf)
1063 return -ENOMEM;
1064
1065 port = filp->private_data;
1066 out_offset = 0;
1067 out_offset += snprintf(buf + out_offset, out_count,
1068 "name: %s\n", port->name ? port->name : "");
1069 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1070 "guest_connected: %d\n", port->guest_connected);
1071 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1072 "host_connected: %d\n", port->host_connected);
1073 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1074 "outvq_full: %d\n", port->outvq_full);
1075 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1076 "bytes_sent: %lu\n", port->stats.bytes_sent);
1077 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1078 "bytes_received: %lu\n",
1079 port->stats.bytes_received);
1080 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1081 "bytes_discarded: %lu\n",
1082 port->stats.bytes_discarded);
1083 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1084 "is_console: %s\n",
1085 is_console_port(port) ? "yes" : "no");
1086 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1087 "console_vtermno: %u\n", port->cons.vtermno);
1088
1089 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
1090 kfree(buf);
1091 return ret;
1092}
1093
1094static const struct file_operations port_debugfs_ops = {
1095 .owner = THIS_MODULE,
1096 .open = simple_open,
1097 .read = debugfs_read,
1098};
1099
1100static void set_console_size(struct port *port, u16 rows, u16 cols)
1101{
1102 if (!port || !is_console_port(port))
1103 return;
1104
1105 port->cons.ws.ws_row = rows;
1106 port->cons.ws.ws_col = cols;
1107}
1108
1109static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1110{
1111 struct port_buffer *buf;
1112 unsigned int nr_added_bufs;
1113 int ret;
1114
1115 nr_added_bufs = 0;
1116 do {
1117 buf = alloc_buf(PAGE_SIZE);
1118 if (!buf)
1119 break;
1120
1121 spin_lock_irq(lock);
1122 ret = add_inbuf(vq, buf);
1123 if (ret < 0) {
1124 spin_unlock_irq(lock);
1125 free_buf(buf);
1126 break;
1127 }
1128 nr_added_bufs++;
1129 spin_unlock_irq(lock);
1130 } while (ret > 0);
1131
1132 return nr_added_bufs;
1133}
1134
1135static void send_sigio_to_port(struct port *port)
1136{
1137 if (port->async_queue && port->guest_connected)
1138 kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
1139}
1140
1141static int add_port(struct ports_device *portdev, u32 id)
1142{
1143 char debugfs_name[16];
1144 struct port *port;
1145 struct port_buffer *buf;
1146 dev_t devt;
1147 unsigned int nr_added_bufs;
1148 int err;
1149
1150 port = kmalloc(sizeof(*port), GFP_KERNEL);
1151 if (!port) {
1152 err = -ENOMEM;
1153 goto fail;
1154 }
1155 kref_init(&port->kref);
1156
1157 port->portdev = portdev;
1158 port->id = id;
1159
1160 port->name = NULL;
1161 port->inbuf = NULL;
1162 port->cons.hvc = NULL;
1163 port->async_queue = NULL;
1164
1165 port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
1166
1167 port->host_connected = port->guest_connected = false;
1168 port->stats = (struct port_stats) { 0 };
1169
1170 port->outvq_full = false;
1171
1172 port->in_vq = portdev->in_vqs[port->id];
1173 port->out_vq = portdev->out_vqs[port->id];
1174
1175 port->cdev = cdev_alloc();
1176 if (!port->cdev) {
1177 dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
1178 err = -ENOMEM;
1179 goto free_port;
1180 }
1181 port->cdev->ops = &port_fops;
1182
1183 devt = MKDEV(portdev->chr_major, id);
1184 err = cdev_add(port->cdev, devt, 1);
1185 if (err < 0) {
1186 dev_err(&port->portdev->vdev->dev,
1187 "Error %d adding cdev for port %u\n", err, id);
1188 goto free_cdev;
1189 }
1190 port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
1191 devt, port, "vport%up%u",
1192 port->portdev->drv_index, id);
1193 if (IS_ERR(port->dev)) {
1194 err = PTR_ERR(port->dev);
1195 dev_err(&port->portdev->vdev->dev,
1196 "Error %d creating device for port %u\n",
1197 err, id);
1198 goto free_cdev;
1199 }
1200
1201 spin_lock_init(&port->inbuf_lock);
1202 spin_lock_init(&port->outvq_lock);
1203 init_waitqueue_head(&port->waitqueue);
1204
1205 /* Fill the in_vq with buffers so the host can send us data. */
1206 nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
1207 if (!nr_added_bufs) {
1208 dev_err(port->dev, "Error allocating inbufs\n");
1209 err = -ENOMEM;
1210 goto free_device;
1211 }
1212
1213 /*
1214 * If we're not using multiport support, this has to be a console port
1215 */
1216 if (!use_multiport(port->portdev)) {
1217 err = init_port_console(port);
1218 if (err)
1219 goto free_inbufs;
1220 }
1221
1222 spin_lock_irq(&portdev->ports_lock);
1223 list_add_tail(&port->list, &port->portdev->ports);
1224 spin_unlock_irq(&portdev->ports_lock);
1225
1226 /*
1227 * Tell the Host we're set so that it can send us various
1228 * configuration parameters for this port (eg, port name,
1229 * caching, whether this is a console port, etc.)
1230 */
1231 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1232
1233 if (pdrvdata.debugfs_dir) {
1234 /*
1235 * Finally, create the debugfs file that we can use to
1236 * inspect a port's state at any time
1237 */
1238 sprintf(debugfs_name, "vport%up%u",
1239 port->portdev->drv_index, id);
1240 port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
1241 pdrvdata.debugfs_dir,
1242 port,
1243 &port_debugfs_ops);
1244 }
1245 return 0;
1246
1247free_inbufs:
1248 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1249 free_buf(buf);
1250free_device:
1251 device_destroy(pdrvdata.class, port->dev->devt);
1252free_cdev:
1253 cdev_del(port->cdev);
1254free_port:
1255 kfree(port);
1256fail:
1257 /* The host might want to notify management sw about port add failure */
1258 __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
1259 return err;
1260}
1261
1262/* No users remain, remove all port-specific data. */
1263static void remove_port(struct kref *kref)
1264{
1265 struct port *port;
1266
1267 port = container_of(kref, struct port, kref);
1268
1269 kfree(port);
1270}
1271
1272static void remove_port_data(struct port *port)
1273{
1274 struct port_buffer *buf;
1275
1276 /* Remove unused data this port might have received. */
1277 discard_port_data(port);
1278
1279 reclaim_consumed_buffers(port);
1280
1281 /* Remove buffers we queued up for the Host to send us data in. */
1282 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1283 free_buf(buf);
1284}
1285
1286/*
1287 * Port got unplugged. Remove port from portdev's list and drop the
1288 * kref reference. If no userspace has this port opened, it will
1289 * result in immediate removal the port.
1290 */
1291static void unplug_port(struct port *port)
1292{
1293 spin_lock_irq(&port->portdev->ports_lock);
1294 list_del(&port->list);
1295 spin_unlock_irq(&port->portdev->ports_lock);
1296
1297 if (port->guest_connected) {
1298 /* Let the app know the port is going down. */
1299 send_sigio_to_port(port);
1300
1301 /* Do this after sigio is actually sent */
1302 port->guest_connected = false;
1303 port->host_connected = false;
1304
1305 wake_up_interruptible(&port->waitqueue);
1306 }
1307
1308 if (is_console_port(port)) {
1309 spin_lock_irq(&pdrvdata_lock);
1310 list_del(&port->cons.list);
1311 spin_unlock_irq(&pdrvdata_lock);
1312 hvc_remove(port->cons.hvc);
1313 }
1314
1315 remove_port_data(port);
1316
1317 /*
1318 * We should just assume the device itself has gone off --
1319 * else a close on an open port later will try to send out a
1320 * control message.
1321 */
1322 port->portdev = NULL;
1323
1324 sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1325 device_destroy(pdrvdata.class, port->dev->devt);
1326 cdev_del(port->cdev);
1327
1328 kfree(port->name);
1329
1330 debugfs_remove(port->debugfs_file);
1331
1332 /*
1333 * Locks around here are not necessary - a port can't be
1334 * opened after we removed the port struct from ports_list
1335 * above.
1336 */
1337 kref_put(&port->kref, remove_port);
1338}
1339
1340/* Any private messages that the Host and Guest want to share */
1341static void handle_control_message(struct ports_device *portdev,
1342 struct port_buffer *buf)
1343{
1344 struct virtio_console_control *cpkt;
1345 struct port *port;
1346 size_t name_size;
1347 int err;
1348
1349 cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
1350
1351 port = find_port_by_id(portdev, cpkt->id);
1352 if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) {
1353 /* No valid header at start of buffer. Drop it. */
1354 dev_dbg(&portdev->vdev->dev,
1355 "Invalid index %u in control packet\n", cpkt->id);
1356 return;
1357 }
1358
1359 switch (cpkt->event) {
1360 case VIRTIO_CONSOLE_PORT_ADD:
1361 if (port) {
1362 dev_dbg(&portdev->vdev->dev,
1363 "Port %u already added\n", port->id);
1364 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1365 break;
1366 }
1367 if (cpkt->id >= portdev->config.max_nr_ports) {
1368 dev_warn(&portdev->vdev->dev,
1369 "Request for adding port with out-of-bound id %u, max. supported id: %u\n",
1370 cpkt->id, portdev->config.max_nr_ports - 1);
1371 break;
1372 }
1373 add_port(portdev, cpkt->id);
1374 break;
1375 case VIRTIO_CONSOLE_PORT_REMOVE:
1376 unplug_port(port);
1377 break;
1378 case VIRTIO_CONSOLE_CONSOLE_PORT:
1379 if (!cpkt->value)
1380 break;
1381 if (is_console_port(port))
1382 break;
1383
1384 init_port_console(port);
1385 complete(&early_console_added);
1386 /*
1387 * Could remove the port here in case init fails - but
1388 * have to notify the host first.
1389 */
1390 break;
1391 case VIRTIO_CONSOLE_RESIZE: {
1392 struct {
1393 __u16 rows;
1394 __u16 cols;
1395 } size;
1396
1397 if (!is_console_port(port))
1398 break;
1399
1400 memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
1401 sizeof(size));
1402 set_console_size(port, size.rows, size.cols);
1403
1404 port->cons.hvc->irq_requested = 1;
1405 resize_console(port);
1406 break;
1407 }
1408 case VIRTIO_CONSOLE_PORT_OPEN:
1409 port->host_connected = cpkt->value;
1410 wake_up_interruptible(&port->waitqueue);
1411 /*
1412 * If the host port got closed and the host had any
1413 * unconsumed buffers, we'll be able to reclaim them
1414 * now.
1415 */
1416 spin_lock_irq(&port->outvq_lock);
1417 reclaim_consumed_buffers(port);
1418 spin_unlock_irq(&port->outvq_lock);
1419
1420 /*
1421 * If the guest is connected, it'll be interested in
1422 * knowing the host connection state changed.
1423 */
1424 send_sigio_to_port(port);
1425 break;
1426 case VIRTIO_CONSOLE_PORT_NAME:
1427 /*
1428 * If we woke up after hibernation, we can get this
1429 * again. Skip it in that case.
1430 */
1431 if (port->name)
1432 break;
1433
1434 /*
1435 * Skip the size of the header and the cpkt to get the size
1436 * of the name that was sent
1437 */
1438 name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
1439
1440 port->name = kmalloc(name_size, GFP_KERNEL);
1441 if (!port->name) {
1442 dev_err(port->dev,
1443 "Not enough space to store port name\n");
1444 break;
1445 }
1446 strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
1447 name_size - 1);
1448 port->name[name_size - 1] = 0;
1449
1450 /*
1451 * Since we only have one sysfs attribute, 'name',
1452 * create it only if we have a name for the port.
1453 */
1454 err = sysfs_create_group(&port->dev->kobj,
1455 &port_attribute_group);
1456 if (err) {
1457 dev_err(port->dev,
1458 "Error %d creating sysfs device attributes\n",
1459 err);
1460 } else {
1461 /*
1462 * Generate a udev event so that appropriate
1463 * symlinks can be created based on udev
1464 * rules.
1465 */
1466 kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
1467 }
1468 break;
1469 }
1470}
1471
1472static void control_work_handler(struct work_struct *work)
1473{
1474 struct ports_device *portdev;
1475 struct virtqueue *vq;
1476 struct port_buffer *buf;
1477 unsigned int len;
1478
1479 portdev = container_of(work, struct ports_device, control_work);
1480 vq = portdev->c_ivq;
1481
1482 spin_lock(&portdev->c_ivq_lock);
1483 while ((buf = virtqueue_get_buf(vq, &len))) {
1484 spin_unlock(&portdev->c_ivq_lock);
1485
1486 buf->len = len;
1487 buf->offset = 0;
1488
1489 handle_control_message(portdev, buf);
1490
1491 spin_lock(&portdev->c_ivq_lock);
1492 if (add_inbuf(portdev->c_ivq, buf) < 0) {
1493 dev_warn(&portdev->vdev->dev,
1494 "Error adding buffer to queue\n");
1495 free_buf(buf);
1496 }
1497 }
1498 spin_unlock(&portdev->c_ivq_lock);
1499}
1500
1501static void out_intr(struct virtqueue *vq)
1502{
1503 struct port *port;
1504
1505 port = find_port_by_vq(vq->vdev->priv, vq);
1506 if (!port)
1507 return;
1508
1509 wake_up_interruptible(&port->waitqueue);
1510}
1511
1512static void in_intr(struct virtqueue *vq)
1513{
1514 struct port *port;
1515 unsigned long flags;
1516
1517 port = find_port_by_vq(vq->vdev->priv, vq);
1518 if (!port)
1519 return;
1520
1521 spin_lock_irqsave(&port->inbuf_lock, flags);
1522 port->inbuf = get_inbuf(port);
1523
1524 /*
1525 * Don't queue up data when port is closed. This condition
1526 * can be reached when a console port is not yet connected (no
1527 * tty is spawned) and the host sends out data to console
1528 * ports. For generic serial ports, the host won't
1529 * (shouldn't) send data till the guest is connected.
1530 */
1531 if (!port->guest_connected)
1532 discard_port_data(port);
1533
1534 spin_unlock_irqrestore(&port->inbuf_lock, flags);
1535
1536 wake_up_interruptible(&port->waitqueue);
1537
1538 /* Send a SIGIO indicating new data in case the process asked for it */
1539 send_sigio_to_port(port);
1540
1541 if (is_console_port(port) && hvc_poll(port->cons.hvc))
1542 hvc_kick();
1543}
1544
1545static void control_intr(struct virtqueue *vq)
1546{
1547 struct ports_device *portdev;
1548
1549 portdev = vq->vdev->priv;
1550 schedule_work(&portdev->control_work);
1551}
1552
1553static void config_intr(struct virtio_device *vdev)
1554{
1555 struct ports_device *portdev;
1556
1557 portdev = vdev->priv;
1558
1559 if (!use_multiport(portdev))
1560 schedule_work(&portdev->config_work);
1561}
1562
1563static void config_work_handler(struct work_struct *work)
1564{
1565 struct ports_device *portdev;
1566
1567 portdev = container_of(work, struct ports_device, control_work);
1568 if (!use_multiport(portdev)) {
1569 struct virtio_device *vdev;
1570 struct port *port;
1571 u16 rows, cols;
1572
1573 vdev = portdev->vdev;
1574 vdev->config->get(vdev,
1575 offsetof(struct virtio_console_config, cols),
1576 &cols, sizeof(u16));
1577 vdev->config->get(vdev,
1578 offsetof(struct virtio_console_config, rows),
1579 &rows, sizeof(u16));
1580
1581 port = find_port_by_id(portdev, 0);
1582 set_console_size(port, rows, cols);
1583
1584 /*
1585 * We'll use this way of resizing only for legacy
1586 * support. For newer userspace
1587 * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
1588 * to indicate console size changes so that it can be
1589 * done per-port.
1590 */
1591 resize_console(port);
1592 }
1593}
1594
1595static int init_vqs(struct ports_device *portdev)
1596{
1597 vq_callback_t **io_callbacks;
1598 char **io_names;
1599 struct virtqueue **vqs;
1600 u32 i, j, nr_ports, nr_queues;
1601 int err;
1602
1603 nr_ports = portdev->config.max_nr_ports;
1604 nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
1605
1606 vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
1607 io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL);
1608 io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL);
1609 portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1610 GFP_KERNEL);
1611 portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1612 GFP_KERNEL);
1613 if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
1614 !portdev->out_vqs) {
1615 err = -ENOMEM;
1616 goto free;
1617 }
1618
1619 /*
1620 * For backward compat (newer host but older guest), the host
1621 * spawns a console port first and also inits the vqs for port
1622 * 0 before others.
1623 */
1624 j = 0;
1625 io_callbacks[j] = in_intr;
1626 io_callbacks[j + 1] = out_intr;
1627 io_names[j] = "input";
1628 io_names[j + 1] = "output";
1629 j += 2;
1630
1631 if (use_multiport(portdev)) {
1632 io_callbacks[j] = control_intr;
1633 io_callbacks[j + 1] = NULL;
1634 io_names[j] = "control-i";
1635 io_names[j + 1] = "control-o";
1636
1637 for (i = 1; i < nr_ports; i++) {
1638 j += 2;
1639 io_callbacks[j] = in_intr;
1640 io_callbacks[j + 1] = out_intr;
1641 io_names[j] = "input";
1642 io_names[j + 1] = "output";
1643 }
1644 }
1645 /* Find the queues. */
1646 err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs,
1647 io_callbacks,
1648 (const char **)io_names);
1649 if (err)
1650 goto free;
1651
1652 j = 0;
1653 portdev->in_vqs[0] = vqs[0];
1654 portdev->out_vqs[0] = vqs[1];
1655 j += 2;
1656 if (use_multiport(portdev)) {
1657 portdev->c_ivq = vqs[j];
1658 portdev->c_ovq = vqs[j + 1];
1659
1660 for (i = 1; i < nr_ports; i++) {
1661 j += 2;
1662 portdev->in_vqs[i] = vqs[j];
1663 portdev->out_vqs[i] = vqs[j + 1];
1664 }
1665 }
1666 kfree(io_names);
1667 kfree(io_callbacks);
1668 kfree(vqs);
1669
1670 return 0;
1671
1672free:
1673 kfree(portdev->out_vqs);
1674 kfree(portdev->in_vqs);
1675 kfree(io_names);
1676 kfree(io_callbacks);
1677 kfree(vqs);
1678
1679 return err;
1680}
1681
1682static const struct file_operations portdev_fops = {
1683 .owner = THIS_MODULE,
1684};
1685
1686static void remove_vqs(struct ports_device *portdev)
1687{
1688 portdev->vdev->config->del_vqs(portdev->vdev);
1689 kfree(portdev->in_vqs);
1690 kfree(portdev->out_vqs);
1691}
1692
1693static void remove_controlq_data(struct ports_device *portdev)
1694{
1695 struct port_buffer *buf;
1696 unsigned int len;
1697
1698 if (!use_multiport(portdev))
1699 return;
1700
1701 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
1702 free_buf(buf);
1703
1704 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
1705 free_buf(buf);
1706}
1707
1708/*
1709 * Once we're further in boot, we get probed like any other virtio
1710 * device.
1711 *
1712 * If the host also supports multiple console ports, we check the
1713 * config space to see how many ports the host has spawned. We
1714 * initialize each port found.
1715 */
1716static int __devinit virtcons_probe(struct virtio_device *vdev)
1717{
1718 struct ports_device *portdev;
1719 int err;
1720 bool multiport;
1721 bool early = early_put_chars != NULL;
1722
1723 /* Ensure to read early_put_chars now */
1724 barrier();
1725
1726 portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
1727 if (!portdev) {
1728 err = -ENOMEM;
1729 goto fail;
1730 }
1731
1732 /* Attach this portdev to this virtio_device, and vice-versa. */
1733 portdev->vdev = vdev;
1734 vdev->priv = portdev;
1735
1736 spin_lock_irq(&pdrvdata_lock);
1737 portdev->drv_index = pdrvdata.index++;
1738 spin_unlock_irq(&pdrvdata_lock);
1739
1740 portdev->chr_major = register_chrdev(0, "virtio-portsdev",
1741 &portdev_fops);
1742 if (portdev->chr_major < 0) {
1743 dev_err(&vdev->dev,
1744 "Error %d registering chrdev for device %u\n",
1745 portdev->chr_major, portdev->drv_index);
1746 err = portdev->chr_major;
1747 goto free;
1748 }
1749
1750 multiport = false;
1751 portdev->config.max_nr_ports = 1;
1752 if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
1753 offsetof(struct virtio_console_config,
1754 max_nr_ports),
1755 &portdev->config.max_nr_ports) == 0)
1756 multiport = true;
1757
1758 err = init_vqs(portdev);
1759 if (err < 0) {
1760 dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
1761 goto free_chrdev;
1762 }
1763
1764 spin_lock_init(&portdev->ports_lock);
1765 INIT_LIST_HEAD(&portdev->ports);
1766
1767 INIT_WORK(&portdev->config_work, &config_work_handler);
1768 INIT_WORK(&portdev->control_work, &control_work_handler);
1769
1770 if (multiport) {
1771 unsigned int nr_added_bufs;
1772
1773 spin_lock_init(&portdev->c_ivq_lock);
1774 spin_lock_init(&portdev->c_ovq_lock);
1775
1776 nr_added_bufs = fill_queue(portdev->c_ivq,
1777 &portdev->c_ivq_lock);
1778 if (!nr_added_bufs) {
1779 dev_err(&vdev->dev,
1780 "Error allocating buffers for control queue\n");
1781 err = -ENOMEM;
1782 goto free_vqs;
1783 }
1784 } else {
1785 /*
1786 * For backward compatibility: Create a console port
1787 * if we're running on older host.
1788 */
1789 add_port(portdev, 0);
1790 }
1791
1792 spin_lock_irq(&pdrvdata_lock);
1793 list_add_tail(&portdev->list, &pdrvdata.portdevs);
1794 spin_unlock_irq(&pdrvdata_lock);
1795
1796 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1797 VIRTIO_CONSOLE_DEVICE_READY, 1);
1798
1799 /*
1800 * If there was an early virtio console, assume that there are no
1801 * other consoles. We need to wait until the hvc_alloc matches the
1802 * hvc_instantiate, otherwise tty_open will complain, resulting in
1803 * a "Warning: unable to open an initial console" boot failure.
1804 * Without multiport this is done in add_port above. With multiport
1805 * this might take some host<->guest communication - thus we have to
1806 * wait.
1807 */
1808 if (multiport && early)
1809 wait_for_completion(&early_console_added);
1810
1811 return 0;
1812
1813free_vqs:
1814 /* The host might want to notify mgmt sw about device add failure */
1815 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1816 VIRTIO_CONSOLE_DEVICE_READY, 0);
1817 remove_vqs(portdev);
1818free_chrdev:
1819 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1820free:
1821 kfree(portdev);
1822fail:
1823 return err;
1824}
1825
1826static void virtcons_remove(struct virtio_device *vdev)
1827{
1828 struct ports_device *portdev;
1829 struct port *port, *port2;
1830
1831 portdev = vdev->priv;
1832
1833 spin_lock_irq(&pdrvdata_lock);
1834 list_del(&portdev->list);
1835 spin_unlock_irq(&pdrvdata_lock);
1836
1837 /* Disable interrupts for vqs */
1838 vdev->config->reset(vdev);
1839 /* Finish up work that's lined up */
1840 if (use_multiport(portdev))
1841 cancel_work_sync(&portdev->control_work);
1842 else
1843 cancel_work_sync(&portdev->config_work);
1844
1845 list_for_each_entry_safe(port, port2, &portdev->ports, list)
1846 unplug_port(port);
1847
1848 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1849
1850 /*
1851 * When yanking out a device, we immediately lose the
1852 * (device-side) queues. So there's no point in keeping the
1853 * guest side around till we drop our final reference. This
1854 * also means that any ports which are in an open state will
1855 * have to just stop using the port, as the vqs are going
1856 * away.
1857 */
1858 remove_controlq_data(portdev);
1859 remove_vqs(portdev);
1860 kfree(portdev);
1861}
1862
1863static struct virtio_device_id id_table[] = {
1864 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
1865 { 0 },
1866};
1867
1868static unsigned int features[] = {
1869 VIRTIO_CONSOLE_F_SIZE,
1870 VIRTIO_CONSOLE_F_MULTIPORT,
1871};
1872
1873#ifdef CONFIG_PM
1874static int virtcons_freeze(struct virtio_device *vdev)
1875{
1876 struct ports_device *portdev;
1877 struct port *port;
1878
1879 portdev = vdev->priv;
1880
1881 vdev->config->reset(vdev);
1882
1883 virtqueue_disable_cb(portdev->c_ivq);
1884 cancel_work_sync(&portdev->control_work);
1885 cancel_work_sync(&portdev->config_work);
1886 /*
1887 * Once more: if control_work_handler() was running, it would
1888 * enable the cb as the last step.
1889 */
1890 virtqueue_disable_cb(portdev->c_ivq);
1891 remove_controlq_data(portdev);
1892
1893 list_for_each_entry(port, &portdev->ports, list) {
1894 virtqueue_disable_cb(port->in_vq);
1895 virtqueue_disable_cb(port->out_vq);
1896 /*
1897 * We'll ask the host later if the new invocation has
1898 * the port opened or closed.
1899 */
1900 port->host_connected = false;
1901 remove_port_data(port);
1902 }
1903 remove_vqs(portdev);
1904
1905 return 0;
1906}
1907
1908static int virtcons_restore(struct virtio_device *vdev)
1909{
1910 struct ports_device *portdev;
1911 struct port *port;
1912 int ret;
1913
1914 portdev = vdev->priv;
1915
1916 ret = init_vqs(portdev);
1917 if (ret)
1918 return ret;
1919
1920 if (use_multiport(portdev))
1921 fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
1922
1923 list_for_each_entry(port, &portdev->ports, list) {
1924 port->in_vq = portdev->in_vqs[port->id];
1925 port->out_vq = portdev->out_vqs[port->id];
1926
1927 fill_queue(port->in_vq, &port->inbuf_lock);
1928
1929 /* Get port open/close status on the host */
1930 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1931
1932 /*
1933 * If a port was open at the time of suspending, we
1934 * have to let the host know that it's still open.
1935 */
1936 if (port->guest_connected)
1937 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
1938 }
1939 return 0;
1940}
1941#endif
1942
1943static struct virtio_driver virtio_console = {
1944 .feature_table = features,
1945 .feature_table_size = ARRAY_SIZE(features),
1946 .driver.name = KBUILD_MODNAME,
1947 .driver.owner = THIS_MODULE,
1948 .id_table = id_table,
1949 .probe = virtcons_probe,
1950 .remove = virtcons_remove,
1951 .config_changed = config_intr,
1952#ifdef CONFIG_PM
1953 .freeze = virtcons_freeze,
1954 .restore = virtcons_restore,
1955#endif
1956};
1957
1958static int __init init(void)
1959{
1960 int err;
1961
1962 pdrvdata.class = class_create(THIS_MODULE, "virtio-ports");
1963 if (IS_ERR(pdrvdata.class)) {
1964 err = PTR_ERR(pdrvdata.class);
1965 pr_err("Error %d creating virtio-ports class\n", err);
1966 return err;
1967 }
1968
1969 pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
1970 if (!pdrvdata.debugfs_dir) {
1971 pr_warning("Error %ld creating debugfs dir for virtio-ports\n",
1972 PTR_ERR(pdrvdata.debugfs_dir));
1973 }
1974 INIT_LIST_HEAD(&pdrvdata.consoles);
1975 INIT_LIST_HEAD(&pdrvdata.portdevs);
1976
1977 return register_virtio_driver(&virtio_console);
1978}
1979
1980static void __exit fini(void)
1981{
1982 unregister_virtio_driver(&virtio_console);
1983
1984 class_destroy(pdrvdata.class);
1985 if (pdrvdata.debugfs_dir)
1986 debugfs_remove_recursive(pdrvdata.debugfs_dir);
1987}
1988module_init(init);
1989module_exit(fini);
1990
1991MODULE_DEVICE_TABLE(virtio, id_table);
1992MODULE_DESCRIPTION("Virtio console driver");
1993MODULE_LICENSE("GPL");