blob: 54498343ebfba8e1490bcbdaf16fa9832b5e8f6f [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2014-2015 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <dev/virtio.h>
24#include <dev/virtio/virtio_ring.h>
25
26#include <debug.h>
27#include <assert.h>
28#include <trace.h>
29#include <compiler.h>
30#include <list.h>
31#include <err.h>
32#include <stdlib.h>
33#include <string.h>
34#include <pow2.h>
35#include <lk/init.h>
36#include <kernel/thread.h>
37#include <kernel/vm.h>
38#include <platform/interrupts.h>
39
40#include "virtio_priv.h"
41
42#if WITH_DEV_VIRTIO_BLOCK
43#include <dev/virtio/block.h>
44#endif
45#if WITH_DEV_VIRTIO_NET
46#include <dev/virtio/net.h>
47#endif
48#if WITH_DEV_VIRTIO_GPU
49#include <dev/virtio/gpu.h>
50#endif
51
52#define LOCAL_TRACE 0
53
54static struct virtio_device *devices;
55
56static void dump_mmio_config(const volatile struct virtio_mmio_config *mmio)
57{
58 printf("mmio at %p\n", mmio);
59 printf("\tmagic 0x%x\n", mmio->magic);
60 printf("\tversion 0x%x\n", mmio->version);
61 printf("\tdevice_id 0x%x\n", mmio->device_id);
62 printf("\tvendor_id 0x%x\n", mmio->vendor_id);
63 printf("\thost_features 0x%x\n", mmio->host_features);
64 printf("\tguest_page_size %u\n", mmio->guest_page_size);
65 printf("\tqnum %u\n", mmio->queue_num);
66 printf("\tqnum_max %u\n", mmio->queue_num_max);
67 printf("\tqnum_align %u\n", mmio->queue_align);
68 printf("\tqnum_pfn %u\n", mmio->queue_pfn);
69 printf("\tstatus 0x%x\n", mmio->status);
70}
71
72void virtio_dump_desc(const struct vring_desc *desc)
73{
74 printf("vring descriptor %p\n", desc);
75 printf("\taddr 0x%llx\n", desc->addr);
76 printf("\tlen 0x%x\n", desc->len);
77 printf("\tflags 0x%hhx\n", desc->flags);
78 printf("\tnext 0x%hhx\n", desc->next);
79}
80
81static enum handler_return virtio_mmio_irq(void *arg)
82{
83 struct virtio_device *dev = (struct virtio_device *)arg;
84 LTRACEF("dev %p, index %u\n", dev, dev->index);
85
86 uint32_t irq_status = dev->mmio_config->interrupt_status;
87 LTRACEF("status 0x%x\n", irq_status);
88
89 enum handler_return ret = INT_NO_RESCHEDULE;
90 if (irq_status & 0x1) { /* used ring update */
91 // XXX is this safe?
92 dev->mmio_config->interrupt_ack = 0x1;
93
94 /* cycle through all the active rings */
95 for (uint r = 0; r < MAX_VIRTIO_RINGS; r++) {
96 if ((dev->active_rings_bitmap & (1<<r)) == 0)
97 continue;
98
99 struct vring *ring = &dev->ring[r];
100 LTRACEF("ring %u: used flags 0x%hhx idx 0x%hhx last_used %u\n", r, ring->used->flags, ring->used->idx, ring->last_used);
101
102 uint cur_idx = ring->used->idx;
103 for (uint i = ring->last_used; i != (cur_idx & ring->num_mask); i = (i + 1) & ring->num_mask) {
104 LTRACEF("looking at idx %u\n", i);
105
106 // process chain
107 struct vring_used_elem *used_elem = &ring->used->ring[i];
108 LTRACEF("id %u, len %u\n", used_elem->id, used_elem->len);
109
110 DEBUG_ASSERT(dev->irq_driver_callback);
111 ret |= dev->irq_driver_callback(dev, r, used_elem);
112
113 ring->last_used = (ring->last_used + 1) & ring->num_mask;
114 }
115 }
116 }
117 if (irq_status & 0x2) { /* config change */
118 dev->mmio_config->interrupt_ack = 0x2;
119
120 if (dev->config_change_callback) {
121 ret |= dev->config_change_callback(dev);
122 }
123 }
124
125 LTRACEF("exiting irq\n");
126
127 return ret;
128}
129
130int virtio_mmio_detect(void *ptr, uint count, const uint irqs[])
131{
132 LTRACEF("ptr %p, count %u\n", ptr, count);
133
134 DEBUG_ASSERT(ptr);
135 DEBUG_ASSERT(irqs);
136 DEBUG_ASSERT(!devices);
137
138 /* allocate an array big enough to hold a list of devices */
139 devices = calloc(count, sizeof(struct virtio_device));
140 if (!devices)
141 return ERR_NO_MEMORY;
142
143 int found = 0;
144 for (uint i = 0; i < count; i++) {
145 volatile struct virtio_mmio_config *mmio = (struct virtio_mmio_config *)((uint8_t *)ptr + i * 0x200);
146 struct virtio_device *dev = &devices[i];
147
148 dev->index = i;
149 dev->irq = irqs[i];
150
151 mask_interrupt(irqs[i]);
152 register_int_handler(irqs[i], &virtio_mmio_irq, (void *)dev);
153
154 LTRACEF("looking at magic 0x%x version 0x%x did 0x%x vid 0x%x\n",
155 mmio->magic, mmio->version, mmio->device_id, mmio->vendor_id);
156
157 if (mmio->magic != VIRTIO_MMIO_MAGIC) {
158 continue;
159 }
160
161#if LOCAL_TRACE
162 if (mmio->device_id != 0) {
163 dump_mmio_config(mmio);
164 }
165#endif
166
167#if WITH_DEV_VIRTIO_BLOCK
168 if (mmio->device_id == 2) { // block device
169 LTRACEF("found block device\n");
170
171 dev->mmio_config = mmio;
172 dev->config_ptr = (void *)mmio->config;
173
174 status_t err = virtio_block_init(dev, mmio->host_features);
175 if (err >= 0) {
176 // good device
177 dev->valid = true;
178
179 if (dev->irq_driver_callback)
180 unmask_interrupt(dev->irq);
181
182 // XXX quick test code, remove
183#if 0
184 uint8_t buf[512];
185 memset(buf, 0x99, sizeof(buf));
186 virtio_block_read_write(dev, buf, 0, sizeof(buf), false);
187 hexdump8_ex(buf, sizeof(buf), 0);
188
189 buf[0]++;
190 virtio_block_read_write(dev, buf, 0, sizeof(buf), true);
191
192 virtio_block_read_write(dev, buf, 0, sizeof(buf), false);
193 hexdump8_ex(buf, sizeof(buf), 0);
194#endif
195 }
196
197 }
198#endif // WITH_DEV_VIRTIO_BLOCK
199#if WITH_DEV_VIRTIO_NET
200 if (mmio->device_id == 1) { // network device
201 LTRACEF("found net device\n");
202
203 dev->mmio_config = mmio;
204 dev->config_ptr = (void *)mmio->config;
205
206 status_t err = virtio_net_init(dev, mmio->host_features);
207 if (err >= 0) {
208 // good device
209 dev->valid = true;
210
211 if (dev->irq_driver_callback)
212 unmask_interrupt(dev->irq);
213 }
214 }
215#endif // WITH_DEV_VIRTIO_NET
216#if WITH_DEV_VIRTIO_GPU
217 if (mmio->device_id == 0x10) { // virtio-gpu
218 LTRACEF("found gpu device\n");
219
220 dev->mmio_config = mmio;
221 dev->config_ptr = (void *)mmio->config;
222
223 status_t err = virtio_gpu_init(dev, mmio->host_features);
224 if (err >= 0) {
225 // good device
226 dev->valid = true;
227
228 if (dev->irq_driver_callback)
229 unmask_interrupt(dev->irq);
230
231 virtio_gpu_start(dev);
232 }
233 }
234#endif // WITH_DEV_VIRTIO_GPU
235
236 if (dev->valid)
237 found++;
238 }
239
240 return found;
241}
242
243void virtio_free_desc(struct virtio_device *dev, uint ring_index, uint16_t desc_index)
244{
245 LTRACEF("dev %p ring %u index %u free_count %u\n", dev, ring_index, desc_index, dev->ring[ring_index].free_count);
246 dev->ring[ring_index].desc[desc_index].next = dev->ring[ring_index].free_list;
247 dev->ring[ring_index].free_list = desc_index;
248 dev->ring[ring_index].free_count++;
249}
250
251uint16_t virtio_alloc_desc(struct virtio_device *dev, uint ring_index)
252{
253 if (dev->ring[ring_index].free_count == 0)
254 return 0xffff;
255
256 DEBUG_ASSERT(dev->ring[ring_index].free_list != 0xffff);
257
258 uint16_t i = dev->ring[ring_index].free_list;
259 struct vring_desc *desc = &dev->ring[ring_index].desc[i];
260 dev->ring[ring_index].free_list = desc->next;
261
262 dev->ring[ring_index].free_count--;
263
264 return i;
265}
266
267struct vring_desc *virtio_alloc_desc_chain(struct virtio_device *dev, uint ring_index, size_t count, uint16_t *start_index)
268{
269 if (dev->ring[ring_index].free_count < count)
270 return NULL;
271
272 /* start popping entries off the chain */
273 struct vring_desc *last = 0;
274 uint16_t last_index = 0;
275 while (count > 0) {
276 uint16_t i = dev->ring[ring_index].free_list;
277 struct vring_desc *desc = &dev->ring[ring_index].desc[i];
278
279 dev->ring[ring_index].free_list = desc->next;
280 dev->ring[ring_index].free_count--;
281
282 if (last) {
283 desc->flags = VRING_DESC_F_NEXT;
284 desc->next = last_index;
285 } else {
286 // first one
287 desc->flags = 0;
288 desc->next = 0;
289 }
290 last = desc;
291 last_index = i;
292 count--;
293 }
294
295 if (start_index)
296 *start_index = last_index;
297
298 return last;
299}
300
301void virtio_submit_chain(struct virtio_device *dev, uint ring_index, uint16_t desc_index)
302{
303 LTRACEF("dev %p, ring %u, desc %u\n", dev, ring_index, desc_index);
304
305 /* add the chain to the available list */
306 struct vring_avail *avail = dev->ring[ring_index].avail;
307
308 avail->ring[avail->idx & dev->ring[ring_index].num_mask] = desc_index;
309 DSB;
310 avail->idx++;
311
312#if LOCAL_TRACE
313 hexdump(avail, 16);
314#endif
315}
316
317void virtio_kick(struct virtio_device *dev, uint ring_index)
318{
319 LTRACEF("dev %p, ring %u\n", dev, ring_index);
320
321 dev->mmio_config->queue_notify = ring_index;
322 DSB;
323}
324
325status_t virtio_alloc_ring(struct virtio_device *dev, uint index, uint16_t len)
326{
327 LTRACEF("dev %p, index %u, len %u\n", dev, index, len);
328
329 DEBUG_ASSERT(dev);
330 DEBUG_ASSERT(len > 0 && ispow2(len));
331 DEBUG_ASSERT(index < MAX_VIRTIO_RINGS);
332
333 if (len == 0 || !ispow2(len))
334 return ERR_INVALID_ARGS;
335
336 struct vring *ring = &dev->ring[index];
337
338 /* allocate a ring */
339 size_t size = vring_size(len, PAGE_SIZE);
340 LTRACEF("need %zu bytes\n", size);
341
342#if WITH_KERNEL_VM
343 void *vptr;
344 status_t err = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "virtio_ring", size, &vptr, 0, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE);
345 if (err < 0)
346 return ERR_NO_MEMORY;
347
348 LTRACEF("allocated virtio_ring at va %p\n", vptr);
349
350 /* compute the physical address */
351 paddr_t pa;
352 err = arch_mmu_query((vaddr_t)vptr, &pa, NULL);
353 if (err < 0) {
354 return ERR_NO_MEMORY;
355 }
356
357 LTRACEF("virtio_ring at pa 0x%lx\n", pa);
358#else
359 void *vptr = memalign(PAGE_SIZE, size);
360 if (!vptr)
361 return ERR_NO_MEMORY;
362
363 LTRACEF("ptr %p\n", vptr);
364 memset(vptr, 0, size);
365
366 /* compute the physical address */
367 paddr_t pa = (paddr_t)vptr;
368#endif
369
370 /* initialize the ring */
371 vring_init(ring, len, vptr, PAGE_SIZE);
372 dev->ring[index].free_list = 0xffff;
373 dev->ring[index].free_count = 0;
374
375 /* add all the descriptors to the free list */
376 for (uint i = 0; i < len; i++) {
377 virtio_free_desc(dev, index, i);
378 }
379
380 /* register the ring with the device */
381 DEBUG_ASSERT(dev->mmio_config);
382 dev->mmio_config->guest_page_size = PAGE_SIZE;
383 dev->mmio_config->queue_sel = index;
384 dev->mmio_config->queue_num = len;
385 dev->mmio_config->queue_align = PAGE_SIZE;
386 dev->mmio_config->queue_pfn = pa / PAGE_SIZE;
387
388 /* mark the ring active */
389 dev->active_rings_bitmap |= (1 << index);
390
391 return NO_ERROR;
392}
393
394void virtio_reset_device(struct virtio_device *dev)
395{
396 dev->mmio_config->status = 0;
397}
398
399void virtio_status_acknowledge_driver(struct virtio_device *dev)
400{
401 dev->mmio_config->status |= VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER;
402}
403
404void virtio_status_driver_ok(struct virtio_device *dev)
405{
406 dev->mmio_config->status |= VIRTIO_STATUS_DRIVER_OK;
407}
408
409void virtio_init(uint level)
410{
411}
412
413LK_INIT_HOOK(virtio, &virtio_init, LK_INIT_LEVEL_THREADING);
414