blob: 5f5c5de31f104a321496cdb3141e1e84fec608ea [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * vhost transport for vsock
3 *
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2.
9 */
10#include <linux/miscdevice.h>
11#include <linux/atomic.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/vmalloc.h>
15#include <net/sock.h>
16#include <linux/virtio_vsock.h>
17#include <linux/vhost.h>
18#include <linux/hashtable.h>
19
20#include <net/af_vsock.h>
21#include "vhost.h"
22
23#define VHOST_VSOCK_DEFAULT_HOST_CID 2
24/* Max number of bytes transferred before requeueing the job.
25 * Using this limit prevents one virtqueue from starving others. */
26#define VHOST_VSOCK_WEIGHT 0x80000
27/* Max number of packets transferred before requeueing the job.
28 * Using this limit prevents one virtqueue from starving others with
29 * small pkts.
30 */
31#define VHOST_VSOCK_PKT_WEIGHT 256
32
33enum {
34 VHOST_VSOCK_FEATURES = VHOST_FEATURES,
35};
36
37/* Used to track all the vhost_vsock instances on the system. */
38static DEFINE_SPINLOCK(vhost_vsock_lock);
39static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
40
41struct vhost_vsock {
42 struct vhost_dev dev;
43 struct vhost_virtqueue vqs[2];
44
45 /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
46 struct hlist_node hash;
47
48 struct vhost_work send_pkt_work;
49 spinlock_t send_pkt_list_lock;
50 struct list_head send_pkt_list; /* host->guest pending packets */
51
52 atomic_t queued_replies;
53
54 u32 guest_cid;
55};
56
57static u32 vhost_transport_get_local_cid(void)
58{
59 return VHOST_VSOCK_DEFAULT_HOST_CID;
60}
61
62/* Callers that dereference the return value must hold vhost_vsock_lock or the
63 * RCU read lock.
64 */
65static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
66{
67 struct vhost_vsock *vsock;
68
69 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
70 u32 other_cid = vsock->guest_cid;
71
72 /* Skip instances that have no CID yet */
73 if (other_cid == 0)
74 continue;
75
76 if (other_cid == guest_cid)
77 return vsock;
78
79 }
80
81 return NULL;
82}
83
84static void
85vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
86 struct vhost_virtqueue *vq)
87{
88 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
89 int pkts = 0, total_len = 0;
90 bool added = false;
91 bool restart_tx = false;
92
93 mutex_lock(&vq->mutex);
94
95 if (!vq->private_data)
96 goto out;
97
98 /* Avoid further vmexits, we're already processing the virtqueue */
99 vhost_disable_notify(&vsock->dev, vq);
100
101 do {
102 struct virtio_vsock_pkt *pkt;
103 struct iov_iter iov_iter;
104 unsigned out, in;
105 size_t nbytes;
106 size_t iov_len, payload_len;
107 int head;
108
109 spin_lock_bh(&vsock->send_pkt_list_lock);
110 if (list_empty(&vsock->send_pkt_list)) {
111 spin_unlock_bh(&vsock->send_pkt_list_lock);
112 vhost_enable_notify(&vsock->dev, vq);
113 break;
114 }
115
116 pkt = list_first_entry(&vsock->send_pkt_list,
117 struct virtio_vsock_pkt, list);
118 list_del_init(&pkt->list);
119 spin_unlock_bh(&vsock->send_pkt_list_lock);
120
121 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
122 &out, &in, NULL, NULL);
123 if (head < 0) {
124 spin_lock_bh(&vsock->send_pkt_list_lock);
125 list_add(&pkt->list, &vsock->send_pkt_list);
126 spin_unlock_bh(&vsock->send_pkt_list_lock);
127 break;
128 }
129
130 if (head == vq->num) {
131 spin_lock_bh(&vsock->send_pkt_list_lock);
132 list_add(&pkt->list, &vsock->send_pkt_list);
133 spin_unlock_bh(&vsock->send_pkt_list_lock);
134
135 /* We cannot finish yet if more buffers snuck in while
136 * re-enabling notify.
137 */
138 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
139 vhost_disable_notify(&vsock->dev, vq);
140 continue;
141 }
142 break;
143 }
144
145 if (out) {
146 virtio_transport_free_pkt(pkt);
147 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
148 break;
149 }
150
151 iov_len = iov_length(&vq->iov[out], in);
152 if (iov_len < sizeof(pkt->hdr)) {
153 virtio_transport_free_pkt(pkt);
154 vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
155 break;
156 }
157
158 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
159 payload_len = pkt->len - pkt->off;
160
161 /* If the packet is greater than the space available in the
162 * buffer, we split it using multiple buffers.
163 */
164 if (payload_len > iov_len - sizeof(pkt->hdr))
165 payload_len = iov_len - sizeof(pkt->hdr);
166
167 /* Set the correct length in the header */
168 pkt->hdr.len = cpu_to_le32(payload_len);
169
170 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
171 if (nbytes != sizeof(pkt->hdr)) {
172 virtio_transport_free_pkt(pkt);
173 vq_err(vq, "Faulted on copying pkt hdr\n");
174 break;
175 }
176
177 nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
178 &iov_iter);
179 if (nbytes != payload_len) {
180 virtio_transport_free_pkt(pkt);
181 vq_err(vq, "Faulted on copying pkt buf\n");
182 break;
183 }
184
185 vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
186 added = true;
187
188 /* Deliver to monitoring devices all correctly transmitted
189 * packets.
190 */
191 virtio_transport_deliver_tap_pkt(pkt);
192
193 pkt->off += payload_len;
194 total_len += payload_len;
195
196 /* If we didn't send all the payload we can requeue the packet
197 * to send it with the next available buffer.
198 */
199 if (pkt->off < pkt->len) {
200 spin_lock_bh(&vsock->send_pkt_list_lock);
201 list_add(&pkt->list, &vsock->send_pkt_list);
202 spin_unlock_bh(&vsock->send_pkt_list_lock);
203 } else {
204 if (pkt->reply) {
205 int val;
206
207 val = atomic_dec_return(&vsock->queued_replies);
208
209 /* Do we have resources to resume tx
210 * processing?
211 */
212 if (val + 1 == tx_vq->num)
213 restart_tx = true;
214 }
215
216 virtio_transport_free_pkt(pkt);
217 }
218 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
219 if (added)
220 vhost_signal(&vsock->dev, vq);
221
222out:
223 mutex_unlock(&vq->mutex);
224
225 if (restart_tx)
226 vhost_poll_queue(&tx_vq->poll);
227}
228
229static void vhost_transport_send_pkt_work(struct vhost_work *work)
230{
231 struct vhost_virtqueue *vq;
232 struct vhost_vsock *vsock;
233
234 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
235 vq = &vsock->vqs[VSOCK_VQ_RX];
236
237 vhost_transport_do_send_pkt(vsock, vq);
238}
239
240static int
241vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
242{
243 struct vhost_vsock *vsock;
244 int len = pkt->len;
245
246 rcu_read_lock();
247
248 /* Find the vhost_vsock according to guest context id */
249 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
250 if (!vsock) {
251 rcu_read_unlock();
252 virtio_transport_free_pkt(pkt);
253 return -ENODEV;
254 }
255
256 if (pkt->reply)
257 atomic_inc(&vsock->queued_replies);
258
259 spin_lock_bh(&vsock->send_pkt_list_lock);
260 list_add_tail(&pkt->list, &vsock->send_pkt_list);
261 spin_unlock_bh(&vsock->send_pkt_list_lock);
262
263 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
264
265 rcu_read_unlock();
266 return len;
267}
268
269static int
270vhost_transport_cancel_pkt(struct vsock_sock *vsk)
271{
272 struct vhost_vsock *vsock;
273 struct virtio_vsock_pkt *pkt, *n;
274 int cnt = 0;
275 int ret = -ENODEV;
276 LIST_HEAD(freeme);
277
278 rcu_read_lock();
279
280 /* Find the vhost_vsock according to guest context id */
281 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
282 if (!vsock)
283 goto out;
284
285 spin_lock_bh(&vsock->send_pkt_list_lock);
286 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
287 if (pkt->vsk != vsk)
288 continue;
289 list_move(&pkt->list, &freeme);
290 }
291 spin_unlock_bh(&vsock->send_pkt_list_lock);
292
293 list_for_each_entry_safe(pkt, n, &freeme, list) {
294 if (pkt->reply)
295 cnt++;
296 list_del(&pkt->list);
297 virtio_transport_free_pkt(pkt);
298 }
299
300 if (cnt) {
301 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
302 int new_cnt;
303
304 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
305 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
306 vhost_poll_queue(&tx_vq->poll);
307 }
308
309 ret = 0;
310out:
311 rcu_read_unlock();
312 return ret;
313}
314
315static struct virtio_vsock_pkt *
316vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
317 unsigned int out, unsigned int in)
318{
319 struct virtio_vsock_pkt *pkt;
320 struct iov_iter iov_iter;
321 size_t nbytes;
322 size_t len;
323
324 if (in != 0) {
325 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
326 return NULL;
327 }
328
329 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
330 if (!pkt)
331 return NULL;
332
333 len = iov_length(vq->iov, out);
334 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
335
336 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
337 if (nbytes != sizeof(pkt->hdr)) {
338 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
339 sizeof(pkt->hdr), nbytes);
340 kfree(pkt);
341 return NULL;
342 }
343
344 if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
345 pkt->len = le32_to_cpu(pkt->hdr.len);
346
347 /* No payload */
348 if (!pkt->len)
349 return pkt;
350
351 /* The pkt is too big */
352 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
353 kfree(pkt);
354 return NULL;
355 }
356
357 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
358 if (!pkt->buf) {
359 kfree(pkt);
360 return NULL;
361 }
362
363 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
364 if (nbytes != pkt->len) {
365 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
366 pkt->len, nbytes);
367 virtio_transport_free_pkt(pkt);
368 return NULL;
369 }
370
371 return pkt;
372}
373
374/* Is there space left for replies to rx packets? */
375static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
376{
377 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
378 int val;
379
380 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
381 val = atomic_read(&vsock->queued_replies);
382
383 return val < vq->num;
384}
385
386static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
387{
388 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
389 poll.work);
390 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
391 dev);
392 struct virtio_vsock_pkt *pkt;
393 int head, pkts = 0, total_len = 0;
394 unsigned int out, in;
395 bool added = false;
396
397 mutex_lock(&vq->mutex);
398
399 if (!vq->private_data)
400 goto out;
401
402 vhost_disable_notify(&vsock->dev, vq);
403 do {
404 u32 len;
405
406 if (!vhost_vsock_more_replies(vsock)) {
407 /* Stop tx until the device processes already
408 * pending replies. Leave tx virtqueue
409 * callbacks disabled.
410 */
411 goto no_more_replies;
412 }
413
414 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
415 &out, &in, NULL, NULL);
416 if (head < 0)
417 break;
418
419 if (head == vq->num) {
420 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
421 vhost_disable_notify(&vsock->dev, vq);
422 continue;
423 }
424 break;
425 }
426
427 pkt = vhost_vsock_alloc_pkt(vq, out, in);
428 if (!pkt) {
429 vq_err(vq, "Faulted on pkt\n");
430 continue;
431 }
432
433 len = pkt->len;
434
435 /* Deliver to monitoring devices all received packets */
436 virtio_transport_deliver_tap_pkt(pkt);
437
438 /* Only accept correctly addressed packets */
439 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
440 le64_to_cpu(pkt->hdr.dst_cid) ==
441 vhost_transport_get_local_cid())
442 virtio_transport_recv_pkt(pkt);
443 else
444 virtio_transport_free_pkt(pkt);
445
446 len += sizeof(pkt->hdr);
447 vhost_add_used(vq, head, len);
448 total_len += len;
449 added = true;
450 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
451
452no_more_replies:
453 if (added)
454 vhost_signal(&vsock->dev, vq);
455
456out:
457 mutex_unlock(&vq->mutex);
458}
459
460static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
461{
462 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
463 poll.work);
464 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
465 dev);
466
467 vhost_transport_do_send_pkt(vsock, vq);
468}
469
470static int vhost_vsock_start(struct vhost_vsock *vsock)
471{
472 struct vhost_virtqueue *vq;
473 size_t i;
474 int ret;
475
476 mutex_lock(&vsock->dev.mutex);
477
478 ret = vhost_dev_check_owner(&vsock->dev);
479 if (ret)
480 goto err;
481
482 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
483 vq = &vsock->vqs[i];
484
485 mutex_lock(&vq->mutex);
486
487 if (!vhost_vq_access_ok(vq)) {
488 ret = -EFAULT;
489 goto err_vq;
490 }
491
492 if (!vq->private_data) {
493 vq->private_data = vsock;
494 ret = vhost_vq_init_access(vq);
495 if (ret)
496 goto err_vq;
497 }
498
499 mutex_unlock(&vq->mutex);
500 }
501
502 mutex_unlock(&vsock->dev.mutex);
503 return 0;
504
505err_vq:
506 vq->private_data = NULL;
507 mutex_unlock(&vq->mutex);
508
509 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
510 vq = &vsock->vqs[i];
511
512 mutex_lock(&vq->mutex);
513 vq->private_data = NULL;
514 mutex_unlock(&vq->mutex);
515 }
516err:
517 mutex_unlock(&vsock->dev.mutex);
518 return ret;
519}
520
521static int vhost_vsock_stop(struct vhost_vsock *vsock)
522{
523 size_t i;
524 int ret;
525
526 mutex_lock(&vsock->dev.mutex);
527
528 ret = vhost_dev_check_owner(&vsock->dev);
529 if (ret)
530 goto err;
531
532 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
533 struct vhost_virtqueue *vq = &vsock->vqs[i];
534
535 mutex_lock(&vq->mutex);
536 vq->private_data = NULL;
537 mutex_unlock(&vq->mutex);
538 }
539
540err:
541 mutex_unlock(&vsock->dev.mutex);
542 return ret;
543}
544
545static void vhost_vsock_free(struct vhost_vsock *vsock)
546{
547 kvfree(vsock);
548}
549
550static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
551{
552 struct vhost_virtqueue **vqs;
553 struct vhost_vsock *vsock;
554 int ret;
555
556 /* This struct is large and allocation could fail, fall back to vmalloc
557 * if there is no other way.
558 */
559 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
560 if (!vsock)
561 return -ENOMEM;
562
563 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
564 if (!vqs) {
565 ret = -ENOMEM;
566 goto out;
567 }
568
569 vsock->guest_cid = 0; /* no CID assigned yet */
570
571 atomic_set(&vsock->queued_replies, 0);
572
573 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
574 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
575 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
576 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
577
578 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
579 UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
580 VHOST_VSOCK_WEIGHT);
581
582 file->private_data = vsock;
583 spin_lock_init(&vsock->send_pkt_list_lock);
584 INIT_LIST_HEAD(&vsock->send_pkt_list);
585 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
586 return 0;
587
588out:
589 vhost_vsock_free(vsock);
590 return ret;
591}
592
593static void vhost_vsock_flush(struct vhost_vsock *vsock)
594{
595 int i;
596
597 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
598 if (vsock->vqs[i].handle_kick)
599 vhost_poll_flush(&vsock->vqs[i].poll);
600 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
601}
602
603static void vhost_vsock_reset_orphans(struct sock *sk)
604{
605 struct vsock_sock *vsk = vsock_sk(sk);
606
607 /* vmci_transport.c doesn't take sk_lock here either. At least we're
608 * under vsock_table_lock so the sock cannot disappear while we're
609 * executing.
610 */
611
612 /* If the peer is still valid, no need to reset connection */
613 if (vhost_vsock_get(vsk->remote_addr.svm_cid))
614 return;
615
616 /* If the close timeout is pending, let it expire. This avoids races
617 * with the timeout callback.
618 */
619 if (vsk->close_work_scheduled)
620 return;
621
622 sock_set_flag(sk, SOCK_DONE);
623 vsk->peer_shutdown = SHUTDOWN_MASK;
624 sk->sk_state = SS_UNCONNECTED;
625 sk->sk_err = ECONNRESET;
626 sk->sk_error_report(sk);
627}
628
629static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
630{
631 struct vhost_vsock *vsock = file->private_data;
632
633 spin_lock_bh(&vhost_vsock_lock);
634 if (vsock->guest_cid)
635 hash_del_rcu(&vsock->hash);
636 spin_unlock_bh(&vhost_vsock_lock);
637
638 /* Wait for other CPUs to finish using vsock */
639 synchronize_rcu();
640
641 /* Iterating over all connections for all CIDs to find orphans is
642 * inefficient. Room for improvement here. */
643 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
644
645 vhost_vsock_stop(vsock);
646 vhost_vsock_flush(vsock);
647 vhost_dev_stop(&vsock->dev);
648
649 spin_lock_bh(&vsock->send_pkt_list_lock);
650 while (!list_empty(&vsock->send_pkt_list)) {
651 struct virtio_vsock_pkt *pkt;
652
653 pkt = list_first_entry(&vsock->send_pkt_list,
654 struct virtio_vsock_pkt, list);
655 list_del_init(&pkt->list);
656 virtio_transport_free_pkt(pkt);
657 }
658 spin_unlock_bh(&vsock->send_pkt_list_lock);
659
660 vhost_dev_cleanup(&vsock->dev);
661 kfree(vsock->dev.vqs);
662 vhost_vsock_free(vsock);
663 return 0;
664}
665
666static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
667{
668 struct vhost_vsock *other;
669
670 /* Refuse reserved CIDs */
671 if (guest_cid <= VMADDR_CID_HOST ||
672 guest_cid == U32_MAX)
673 return -EINVAL;
674
675 /* 64-bit CIDs are not yet supported */
676 if (guest_cid > U32_MAX)
677 return -EINVAL;
678
679 /* Refuse if CID is already in use */
680 spin_lock_bh(&vhost_vsock_lock);
681 other = vhost_vsock_get(guest_cid);
682 if (other && other != vsock) {
683 spin_unlock_bh(&vhost_vsock_lock);
684 return -EADDRINUSE;
685 }
686
687 if (vsock->guest_cid)
688 hash_del_rcu(&vsock->hash);
689
690 vsock->guest_cid = guest_cid;
691 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
692 spin_unlock_bh(&vhost_vsock_lock);
693
694 return 0;
695}
696
697static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
698{
699 struct vhost_virtqueue *vq;
700 int i;
701
702 if (features & ~VHOST_VSOCK_FEATURES)
703 return -EOPNOTSUPP;
704
705 mutex_lock(&vsock->dev.mutex);
706 if ((features & (1 << VHOST_F_LOG_ALL)) &&
707 !vhost_log_access_ok(&vsock->dev)) {
708 mutex_unlock(&vsock->dev.mutex);
709 return -EFAULT;
710 }
711
712 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
713 vq = &vsock->vqs[i];
714 mutex_lock(&vq->mutex);
715 vq->acked_features = features;
716 mutex_unlock(&vq->mutex);
717 }
718 mutex_unlock(&vsock->dev.mutex);
719 return 0;
720}
721
722static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
723 unsigned long arg)
724{
725 struct vhost_vsock *vsock = f->private_data;
726 void __user *argp = (void __user *)arg;
727 u64 guest_cid;
728 u64 features;
729 int start;
730 int r;
731
732 switch (ioctl) {
733 case VHOST_VSOCK_SET_GUEST_CID:
734 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
735 return -EFAULT;
736 return vhost_vsock_set_cid(vsock, guest_cid);
737 case VHOST_VSOCK_SET_RUNNING:
738 if (copy_from_user(&start, argp, sizeof(start)))
739 return -EFAULT;
740 if (start)
741 return vhost_vsock_start(vsock);
742 else
743 return vhost_vsock_stop(vsock);
744 case VHOST_GET_FEATURES:
745 features = VHOST_VSOCK_FEATURES;
746 if (copy_to_user(argp, &features, sizeof(features)))
747 return -EFAULT;
748 return 0;
749 case VHOST_SET_FEATURES:
750 if (copy_from_user(&features, argp, sizeof(features)))
751 return -EFAULT;
752 return vhost_vsock_set_features(vsock, features);
753 default:
754 mutex_lock(&vsock->dev.mutex);
755 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
756 if (r == -ENOIOCTLCMD)
757 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
758 else
759 vhost_vsock_flush(vsock);
760 mutex_unlock(&vsock->dev.mutex);
761 return r;
762 }
763}
764
765#ifdef CONFIG_COMPAT
766static long vhost_vsock_dev_compat_ioctl(struct file *f, unsigned int ioctl,
767 unsigned long arg)
768{
769 return vhost_vsock_dev_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
770}
771#endif
772
773static const struct file_operations vhost_vsock_fops = {
774 .owner = THIS_MODULE,
775 .open = vhost_vsock_dev_open,
776 .release = vhost_vsock_dev_release,
777 .llseek = noop_llseek,
778 .unlocked_ioctl = vhost_vsock_dev_ioctl,
779#ifdef CONFIG_COMPAT
780 .compat_ioctl = vhost_vsock_dev_compat_ioctl,
781#endif
782};
783
784static struct miscdevice vhost_vsock_misc = {
785 .minor = VHOST_VSOCK_MINOR,
786 .name = "vhost-vsock",
787 .fops = &vhost_vsock_fops,
788};
789
790static struct virtio_transport vhost_transport = {
791 .transport = {
792 .get_local_cid = vhost_transport_get_local_cid,
793
794 .init = virtio_transport_do_socket_init,
795 .destruct = virtio_transport_destruct,
796 .release = virtio_transport_release,
797 .connect = virtio_transport_connect,
798 .shutdown = virtio_transport_shutdown,
799 .cancel_pkt = vhost_transport_cancel_pkt,
800
801 .dgram_enqueue = virtio_transport_dgram_enqueue,
802 .dgram_dequeue = virtio_transport_dgram_dequeue,
803 .dgram_bind = virtio_transport_dgram_bind,
804 .dgram_allow = virtio_transport_dgram_allow,
805
806 .stream_enqueue = virtio_transport_stream_enqueue,
807 .stream_dequeue = virtio_transport_stream_dequeue,
808 .stream_has_data = virtio_transport_stream_has_data,
809 .stream_has_space = virtio_transport_stream_has_space,
810 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
811 .stream_is_active = virtio_transport_stream_is_active,
812 .stream_allow = virtio_transport_stream_allow,
813
814 .notify_poll_in = virtio_transport_notify_poll_in,
815 .notify_poll_out = virtio_transport_notify_poll_out,
816 .notify_recv_init = virtio_transport_notify_recv_init,
817 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
818 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
819 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
820 .notify_send_init = virtio_transport_notify_send_init,
821 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
822 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
823 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
824
825 .set_buffer_size = virtio_transport_set_buffer_size,
826 .set_min_buffer_size = virtio_transport_set_min_buffer_size,
827 .set_max_buffer_size = virtio_transport_set_max_buffer_size,
828 .get_buffer_size = virtio_transport_get_buffer_size,
829 .get_min_buffer_size = virtio_transport_get_min_buffer_size,
830 .get_max_buffer_size = virtio_transport_get_max_buffer_size,
831 },
832
833 .send_pkt = vhost_transport_send_pkt,
834};
835
836static int __init vhost_vsock_init(void)
837{
838 int ret;
839
840 ret = vsock_core_init(&vhost_transport.transport);
841 if (ret < 0)
842 return ret;
843 return misc_register(&vhost_vsock_misc);
844};
845
846static void __exit vhost_vsock_exit(void)
847{
848 misc_deregister(&vhost_vsock_misc);
849 vsock_core_exit();
850};
851
852module_init(vhost_vsock_init);
853module_exit(vhost_vsock_exit);
854MODULE_LICENSE("GPL v2");
855MODULE_AUTHOR("Asias He");
856MODULE_DESCRIPTION("vhost transport for vsock ");
857MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
858MODULE_ALIAS("devname:vhost-vsock");