[Feature]add MT2731_MP2_MR2_SVN388 baseline version
Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/bsp/lk/dev/virtio/block/include/dev/virtio/block.h b/src/bsp/lk/dev/virtio/block/include/dev/virtio/block.h
new file mode 100644
index 0000000..042121d
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/block/include/dev/virtio/block.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <compiler.h>
+#include <sys/types.h>
+#include <dev/virtio.h>
+
+status_t virtio_block_init(struct virtio_device *dev, uint32_t host_features) __NONNULL();
+
+ssize_t virtio_block_read_write(struct virtio_device *dev, void *buf, off_t offset, size_t len, bool write) __NONNULL();
+
diff --git a/src/bsp/lk/dev/virtio/block/rules.mk b/src/bsp/lk/dev/virtio/block/rules.mk
new file mode 100644
index 0000000..f744bcb
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/block/rules.mk
@@ -0,0 +1,13 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+MODULE_SRCS += \
+ $(LOCAL_DIR)/virtio-block.c \
+
+MODULE_DEPS += \
+ dev/virtio \
+ lib/bio
+
+
+include make/module.mk
diff --git a/src/bsp/lk/dev/virtio/block/virtio-block.c b/src/bsp/lk/dev/virtio/block/virtio-block.c
new file mode 100644
index 0000000..61db486
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/block/virtio-block.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2014-2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <dev/virtio/block.h>
+
+#include <debug.h>
+#include <assert.h>
+#include <trace.h>
+#include <compiler.h>
+#include <list.h>
+#include <err.h>
+#include <kernel/thread.h>
+#include <kernel/event.h>
+#include <kernel/mutex.h>
+#include <kernel/vm.h>
+#include <lib/bio.h>
+
+#define LOCAL_TRACE 0
+
+struct virtio_blk_config {
+ uint64_t capacity;
+ uint32_t size_max;
+ uint32_t seg_max;
+ struct virtio_blk_geometry {
+ uint16_t cylinders;
+ uint8_t heads;
+ uint8_t sectors;
+ } geometry;
+ uint32_t blk_size;
+} __PACKED;
+
+struct virtio_blk_req {
+ uint32_t type;
+ uint32_t ioprio;
+ uint64_t sector;
+} __PACKED;
+
+#define VIRTIO_BLK_F_BARRIER (1<<0)
+#define VIRTIO_BLK_F_SIZE_MAX (1<<1)
+#define VIRTIO_BLK_F_SEG_MAX (1<<2)
+#define VIRTIO_BLK_F_GEOMETRY (1<<4)
+#define VIRTIO_BLK_F_RO (1<<5)
+#define VIRTIO_BLK_F_BLK_SIZE (1<<6)
+#define VIRTIO_BLK_F_SCSI (1<<7)
+#define VIRTIO_BLK_F_FLUSH (1<<9)
+#define VIRTIO_BLK_F_TOPOLOGY (1<<10)
+#define VIRTIO_BLK_F_CONFIG_WCE (1<<11)
+
+#define VIRTIO_BLK_T_IN 0
+#define VIRTIO_BLK_T_OUT 1
+#define VIRTIO_BLK_T_FLUSH 4
+
+#define VIRTIO_BLK_S_OK 0
+#define VIRTIO_BLK_S_IOERR 1
+#define VIRTIO_BLK_S_UNSUPP 2
+
+static enum handler_return virtio_block_irq_driver_callback(struct virtio_device *dev, uint ring, const struct vring_used_elem *e);
+static ssize_t virtio_bdev_read_block(struct bdev *bdev, void *buf, bnum_t block, uint count);
+static ssize_t virtio_bdev_write_block(struct bdev *bdev, const void *buf, bnum_t block, uint count);
+
+struct virtio_block_dev {
+ struct virtio_device *dev;
+
+ mutex_t lock;
+ event_t io_event;
+
+ /* bio block device */
+ bdev_t bdev;
+
+ /* one blk_req structure for io, not crossing a page boundary */
+ struct virtio_blk_req *blk_req;
+ paddr_t blk_req_phys;
+
+ /* one uint8_t response word */
+ uint8_t blk_response;
+ paddr_t blk_response_phys;
+};
+
+status_t virtio_block_init(struct virtio_device *dev, uint32_t host_features)
+{
+ LTRACEF("dev %p, host_features 0x%x\n", dev, host_features);
+
+ /* allocate a new block device */
+ struct virtio_block_dev *bdev = malloc(sizeof(struct virtio_block_dev));
+ if (!bdev)
+ return ERR_NO_MEMORY;
+
+ mutex_init(&bdev->lock);
+ event_init(&bdev->io_event, false, EVENT_FLAG_AUTOUNSIGNAL);
+
+ bdev->dev = dev;
+ dev->priv = bdev;
+
+ bdev->blk_req = memalign(sizeof(struct virtio_blk_req), sizeof(struct virtio_blk_req));
+#if WITH_KERNEL_VM
+ arch_mmu_query((vaddr_t)bdev->blk_req, &bdev->blk_req_phys, NULL);
+#else
+ bdev->blk_freq_phys = (uint64_t)(uintptr_t)bdev->blk_req;
+#endif
+ LTRACEF("blk_req structure at %p (0x%lx phys)\n", bdev->blk_req, bdev->blk_req_phys);
+
+#if WITH_KERNEL_VM
+ arch_mmu_query((vaddr_t)&bdev->blk_response, &bdev->blk_response_phys, NULL);
+#else
+ bdev->blk_response_phys = (uint64_t)(uintptr_t)&bdev->blk_response;
+#endif
+
+ /* make sure the device is reset */
+ virtio_reset_device(dev);
+
+ volatile struct virtio_blk_config *config = (struct virtio_blk_config *)dev->config_ptr;
+
+ LTRACEF("capacity 0x%llx\n", config->capacity);
+ LTRACEF("size_max 0x%x\n", config->size_max);
+ LTRACEF("seg_max 0x%x\n", config->seg_max);
+ LTRACEF("blk_size 0x%x\n", config->blk_size);
+
+ /* ack and set the driver status bit */
+ virtio_status_acknowledge_driver(dev);
+
+ // XXX check features bits and ack/nak them
+
+ /* allocate a virtio ring */
+ virtio_alloc_ring(dev, 0, 256);
+
+ /* set our irq handler */
+ dev->irq_driver_callback = &virtio_block_irq_driver_callback;
+
+ /* set DRIVER_OK */
+ virtio_status_driver_ok(dev);
+
+ /* construct the block device */
+ static uint8_t found_index = 0;
+ char buf[16];
+ snprintf(buf, sizeof(buf), "virtio%u", found_index++);
+ bio_initialize_bdev(&bdev->bdev, buf,
+ config->blk_size, config->capacity,
+ 0, NULL, BIO_FLAGS_NONE);
+
+ /* override our block device hooks */
+ bdev->bdev.read_block = &virtio_bdev_read_block;
+ bdev->bdev.write_block = &virtio_bdev_write_block;
+
+ bio_register_device(&bdev->bdev);
+
+ printf("found virtio block device of size %lld\n", config->capacity * config->blk_size);
+
+ return NO_ERROR;
+}
+
+static enum handler_return virtio_block_irq_driver_callback(struct virtio_device *dev, uint ring, const struct vring_used_elem *e)
+{
+ struct virtio_block_dev *bdev = (struct virtio_block_dev *)dev->priv;
+
+ LTRACEF("dev %p, ring %u, e %p, id %u, len %u\n", dev, ring, e, e->id, e->len);
+
+ /* parse our descriptor chain, add back to the free queue */
+ uint16_t i = e->id;
+ for (;;) {
+ int next;
+ struct vring_desc *desc = virtio_desc_index_to_desc(dev, ring, i);
+
+ //virtio_dump_desc(desc);
+
+ if (desc->flags & VRING_DESC_F_NEXT) {
+ next = desc->next;
+ } else {
+ /* end of chain */
+ next = -1;
+ }
+
+ virtio_free_desc(dev, ring, i);
+
+ if (next < 0)
+ break;
+ i = next;
+ }
+
+ /* signal our event */
+ event_signal(&bdev->io_event, false);
+
+ return INT_RESCHEDULE;
+}
+
+ssize_t virtio_block_read_write(struct virtio_device *dev, void *buf, off_t offset, size_t len, bool write)
+{
+ struct virtio_block_dev *bdev = (struct virtio_block_dev *)dev->priv;
+
+ uint16_t i;
+ struct vring_desc *desc;
+ paddr_t pa;
+ vaddr_t va = (vaddr_t)buf;
+
+ LTRACEF("dev %p, buf %p, offset 0x%llx, len %zu\n", dev, buf, offset, len);
+
+ mutex_acquire(&bdev->lock);
+
+ /* set up the request */
+ bdev->blk_req->type = write ? VIRTIO_BLK_T_OUT : VIRTIO_BLK_T_IN;
+ bdev->blk_req->ioprio = 0;
+ bdev->blk_req->sector = offset / 512;
+ LTRACEF("blk_req type %u ioprio %u sector %llu\n",
+ bdev->blk_req->type, bdev->blk_req->ioprio, bdev->blk_req->sector);
+
+ /* put together a transfer */
+ desc = virtio_alloc_desc_chain(dev, 0, 3, &i);
+ LTRACEF("after alloc chain desc %p, i %u\n", desc, i);
+
+ // XXX not cache safe.
+ // At the moment only tested on arm qemu, which doesn't emulate cache.
+
+ /* set up the descriptor pointing to the head */
+ desc->addr = bdev->blk_req_phys;
+ desc->len = sizeof(struct virtio_blk_req);
+ desc->flags |= VRING_DESC_F_NEXT;
+
+ /* set up the descriptor pointing to the buffer */
+ desc = virtio_desc_index_to_desc(dev, 0, desc->next);
+#if WITH_KERNEL_VM
+ /* translate the first buffer */
+ arch_mmu_query(va, &pa, NULL);
+ desc->addr = (uint64_t)pa;
+ /* desc->len is filled in below */
+#else
+ desc->addr = (uint64_t)(uintptr_t)buf;
+ desc->len = len;
+#endif
+ desc->flags |= write ? 0 : VRING_DESC_F_WRITE; /* mark buffer as write-only if its a block read */
+ desc->flags |= VRING_DESC_F_NEXT;
+
+#if WITH_KERNEL_VM
+ /* see if we need to add more descriptors due to scatter gather */
+ paddr_t next_pa = PAGE_ALIGN(pa + 1);
+ desc->len = MIN(next_pa - pa, len);
+ LTRACEF("first descriptor va 0x%lx desc->addr 0x%llx desc->len %u\n", va, desc->addr, desc->len);
+ len -= desc->len;
+ while (len > 0) {
+ /* amount of source buffer handled by this iteration of the loop */
+ size_t len_tohandle = MIN(len, PAGE_SIZE);
+
+ /* translate the next page in the buffer */
+ va = PAGE_ALIGN(va + 1);
+ arch_mmu_query(va, &pa, NULL);
+ LTRACEF("va now 0x%lx, pa 0x%lx, next_pa 0x%lx, remaining len %zu\n", va, pa, next_pa, len);
+
+ /* is the new translated physical address contiguous to the last one? */
+ if (next_pa == pa) {
+ LTRACEF("extending last one by %zu bytes\n", len_tohandle);
+ desc->len += len_tohandle;
+ } else {
+ uint16_t next_i = virtio_alloc_desc(dev, 0);
+ struct vring_desc *next_desc = virtio_desc_index_to_desc(dev, 0, next_i);
+ DEBUG_ASSERT(next_desc);
+
+ LTRACEF("doesn't extend, need new desc, allocated desc %i (%p)\n", next_i, next_desc);
+
+ /* fill this descriptor in and put it after the last one but before the response descriptor */
+ next_desc->addr = (uint64_t)pa;
+ next_desc->len = len_tohandle;
+ next_desc->flags = write ? 0 : VRING_DESC_F_WRITE; /* mark buffer as write-only if its a block read */
+ next_desc->flags |= VRING_DESC_F_NEXT;
+ next_desc->next = desc->next;
+ desc->next = next_i;
+
+ desc = next_desc;
+ }
+ len -= len_tohandle;
+ next_pa += PAGE_SIZE;
+ }
+#endif
+
+ /* set up the descriptor pointing to the response */
+ desc = virtio_desc_index_to_desc(dev, 0, desc->next);
+ desc->addr = bdev->blk_response_phys;
+ desc->len = 1;
+ desc->flags = VRING_DESC_F_WRITE;
+
+ /* submit the transfer */
+ virtio_submit_chain(dev, 0, i);
+
+ /* kick it off */
+ virtio_kick(dev, 0);
+
+ /* wait for the transfer to complete */
+ event_wait(&bdev->io_event);
+
+ LTRACEF("status 0x%hhx\n", bdev->blk_response);
+
+ mutex_release(&bdev->lock);
+
+ return len;
+}
+
+static ssize_t virtio_bdev_read_block(struct bdev *bdev, void *buf, bnum_t block, uint count)
+{
+ struct virtio_block_dev *dev = containerof(bdev, struct virtio_block_dev, bdev);
+
+ LTRACEF("dev %p, buf %p, block 0x%x, count %u\n", bdev, buf, block, count);
+
+ if (virtio_block_read_write(dev->dev, buf, (off_t)block * dev->bdev.block_size,
+ count * dev->bdev.block_size, false) == 0) {
+ return count * dev->bdev.block_size;
+ } else {
+ return ERR_IO;
+ }
+}
+
+static ssize_t virtio_bdev_write_block(struct bdev *bdev, const void *buf, bnum_t block, uint count)
+{
+ struct virtio_block_dev *dev = containerof(bdev, struct virtio_block_dev, bdev);
+
+ LTRACEF("dev %p, buf %p, block 0x%x, count %u\n", bdev, buf, block, count);
+
+ if (virtio_block_read_write(dev->dev, (void *)buf, (off_t)block * dev->bdev.block_size,
+ count * dev->bdev.block_size, true) == 0) {
+ return count * dev->bdev.block_size;
+ } else {
+ return ERR_IO;
+ }
+}
+
diff --git a/src/bsp/lk/dev/virtio/gpu/include/dev/virtio/gpu.h b/src/bsp/lk/dev/virtio/gpu/include/dev/virtio/gpu.h
new file mode 100644
index 0000000..2300697
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/gpu/include/dev/virtio/gpu.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <compiler.h>
+#include <sys/types.h>
+#include <dev/virtio.h>
+
+status_t virtio_gpu_init(struct virtio_device *dev, uint32_t host_features) __NONNULL();
+
+status_t virtio_gpu_start(struct virtio_device *dev) __NONNULL();
+
diff --git a/src/bsp/lk/dev/virtio/gpu/rules.mk b/src/bsp/lk/dev/virtio/gpu/rules.mk
new file mode 100644
index 0000000..8210208
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/gpu/rules.mk
@@ -0,0 +1,12 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+MODULE_SRCS += \
+ $(LOCAL_DIR)/virtio-gpu.c \
+
+MODULE_DEPS += \
+ dev/virtio \
+ lib/gfx
+
+include make/module.mk
diff --git a/src/bsp/lk/dev/virtio/gpu/virtio-gpu.c b/src/bsp/lk/dev/virtio/gpu/virtio-gpu.c
new file mode 100644
index 0000000..d9b24c7
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/gpu/virtio-gpu.c
@@ -0,0 +1,577 @@
+/*
+ * Copyright (c) 2014-2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <dev/virtio/gpu.h>
+
+#include <debug.h>
+#include <assert.h>
+#include <trace.h>
+#include <compiler.h>
+#include <list.h>
+#include <err.h>
+#include <string.h>
+#include <kernel/thread.h>
+#include <kernel/event.h>
+#include <kernel/mutex.h>
+#include <kernel/vm.h>
+#include <lib/gfx.h>
+#include <dev/display.h>
+
+#include "virtio_gpu.h"
+
+#define LOCAL_TRACE 0
+
+static enum handler_return virtio_gpu_irq_driver_callback(struct virtio_device *dev, uint ring, const struct vring_used_elem *e);
+static enum handler_return virtio_gpu_config_change_callback(struct virtio_device *dev);
+static int virtio_gpu_flush_thread(void *arg);
+
+struct virtio_gpu_dev {
+ struct virtio_device *dev;
+
+ mutex_t lock;
+ event_t io_event;
+
+ void *gpu_request;
+ paddr_t gpu_request_phys;
+
+ /* a saved copy of the display */
+ struct virtio_gpu_display_one pmode;
+ int pmode_id;
+
+ /* resource id that is set as scanout */
+ uint32_t display_resource_id;
+
+ /* next resource id */
+ uint32_t next_resource_id;
+
+ event_t flush_event;
+
+ /* framebuffer */
+ void *fb;
+};
+
+static struct virtio_gpu_dev *the_gdev;
+
+static status_t send_command_response(struct virtio_gpu_dev *gdev, const void *cmd, size_t cmd_len, void **_res, size_t res_len)
+{
+ DEBUG_ASSERT(gdev);
+ DEBUG_ASSERT(cmd);
+ DEBUG_ASSERT(_res);
+ DEBUG_ASSERT(cmd_len + res_len < PAGE_SIZE);
+
+ LTRACEF("gdev %p, cmd %p, cmd_len %zu, res %p, res_len %zu\n", gdev, cmd, cmd_len, _res, res_len);
+
+ uint16_t i;
+ struct vring_desc *desc = virtio_alloc_desc_chain(gdev->dev, 0, 2, &i);
+ DEBUG_ASSERT(desc);
+
+ memcpy(gdev->gpu_request, cmd, cmd_len);
+
+ desc->addr = gdev->gpu_request_phys;
+ desc->len = cmd_len;
+ desc->flags |= VRING_DESC_F_NEXT;
+
+ /* set the second descriptor to the response with the write bit set */
+ desc = virtio_desc_index_to_desc(gdev->dev, 0, desc->next);
+ DEBUG_ASSERT(desc);
+
+ void *res = (void *)((uint8_t *)gdev->gpu_request + cmd_len);
+ *_res = res;
+ paddr_t res_phys = gdev->gpu_request_phys + cmd_len;
+ memset(res, 0, res_len);
+
+ desc->addr = res_phys;
+ desc->len = res_len;
+ desc->flags = VRING_DESC_F_WRITE;
+
+ /* submit the transfer */
+ virtio_submit_chain(gdev->dev, 0, i);
+
+ /* kick it off */
+ virtio_kick(gdev->dev, 0);
+
+ /* wait for result */
+ event_wait(&gdev->io_event);
+
+ return NO_ERROR;
+}
+
+static status_t get_display_info(struct virtio_gpu_dev *gdev)
+{
+ status_t err;
+
+ LTRACEF("gdev %p\n", gdev);
+
+ DEBUG_ASSERT(gdev);
+
+ /* grab a lock to keep this single message at a time */
+ mutex_acquire(&gdev->lock);
+
+ /* construct the get display info message */
+ struct virtio_gpu_ctrl_hdr req;
+ memset(&req, 0, sizeof(req));
+ req.type = VIRTIO_GPU_CMD_GET_DISPLAY_INFO;
+
+ /* send the message and get a response */
+ struct virtio_gpu_resp_display_info *info;
+ err = send_command_response(gdev, &req, sizeof(req), (void **)&info, sizeof(*info));
+ DEBUG_ASSERT(err == NO_ERROR);
+ if (err < NO_ERROR) {
+ mutex_release(&gdev->lock);
+ return ERR_NOT_FOUND;
+ }
+
+ /* we got response */
+ if (info->hdr.type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO) {
+ mutex_release(&gdev->lock);
+ return ERR_NOT_FOUND;
+ }
+
+ LTRACEF("response:\n");
+ for (uint i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
+ if (info->pmodes[i].enabled) {
+ LTRACEF("%u: x %u y %u w %u h %u flags 0x%x\n", i,
+ info->pmodes[i].r.x, info->pmodes[i].r.y, info->pmodes[i].r.width, info->pmodes[i].r.height,
+ info->pmodes[i].flags);
+ if (gdev->pmode_id < 0) {
+ /* save the first valid pmode we see */
+ memcpy(&gdev->pmode, &info->pmodes[i], sizeof(gdev->pmode));
+ gdev->pmode_id = i;
+ }
+ }
+ }
+
+ /* release the lock */
+ mutex_release(&gdev->lock);
+
+ return NO_ERROR;
+}
+
+static status_t allocate_2d_resource(struct virtio_gpu_dev *gdev, uint32_t *resource_id, uint32_t width, uint32_t height)
+{
+ status_t err;
+
+ LTRACEF("gdev %p\n", gdev);
+
+ DEBUG_ASSERT(gdev);
+ DEBUG_ASSERT(resource_id);
+
+ /* grab a lock to keep this single message at a time */
+ mutex_acquire(&gdev->lock);
+
+ /* construct the request */
+ struct virtio_gpu_resource_create_2d req;
+ memset(&req, 0, sizeof(req));
+
+ req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_CREATE_2D;
+ req.resource_id = gdev->next_resource_id++;
+ *resource_id = req.resource_id;
+ req.format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
+ req.width = width;
+ req.height = height;
+
+ /* send the command and get a response */
+ struct virtio_gpu_ctrl_hdr *res;
+ err = send_command_response(gdev, &req, sizeof(req), (void **)&res, sizeof(*res));
+ DEBUG_ASSERT(err == NO_ERROR);
+
+ /* see if we got a valid response */
+ LTRACEF("response type 0x%x\n", res->type);
+ err = (res->type == VIRTIO_GPU_RESP_OK_NODATA) ? NO_ERROR : ERR_NO_MEMORY;
+
+ /* release the lock */
+ mutex_release(&gdev->lock);
+
+ return err;
+}
+
+static status_t attach_backing(struct virtio_gpu_dev *gdev, uint32_t resource_id, void *ptr, size_t buf_len)
+{
+ status_t err;
+
+ LTRACEF("gdev %p, resource_id %u, ptr %p, buf_len %zu\n", gdev, resource_id, ptr, buf_len);
+
+ DEBUG_ASSERT(gdev);
+ DEBUG_ASSERT(ptr);
+
+ /* grab a lock to keep this single message at a time */
+ mutex_acquire(&gdev->lock);
+
+ /* construct the request */
+ struct {
+ struct virtio_gpu_resource_attach_backing req;
+ struct virtio_gpu_mem_entry mem;
+ } req;
+ memset(&req, 0, sizeof(req));
+
+ req.req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING;
+ req.req.resource_id = resource_id;
+ req.req.nr_entries = 1;
+
+ paddr_t pa;
+ arch_mmu_query((vaddr_t)ptr, &pa, NULL);
+ req.mem.addr = pa;
+ req.mem.length = buf_len;
+
+ /* send the command and get a response */
+ struct virtio_gpu_ctrl_hdr *res;
+ err = send_command_response(gdev, &req, sizeof(req), (void **)&res, sizeof(*res));
+ DEBUG_ASSERT(err == NO_ERROR);
+
+ /* see if we got a valid response */
+ LTRACEF("response type 0x%x\n", res->type);
+ err = (res->type == VIRTIO_GPU_RESP_OK_NODATA) ? NO_ERROR : ERR_NO_MEMORY;
+
+ /* release the lock */
+ mutex_release(&gdev->lock);
+
+ return err;
+}
+
+static status_t set_scanout(struct virtio_gpu_dev *gdev, uint32_t scanout_id, uint32_t resource_id, uint32_t width, uint32_t height)
+{
+ status_t err;
+
+ LTRACEF("gdev %p, scanout_id %u, resource_id %u, width %u, height %u\n", gdev, scanout_id, resource_id, width, height);
+
+ /* grab a lock to keep this single message at a time */
+ mutex_acquire(&gdev->lock);
+
+ /* construct the request */
+ struct virtio_gpu_set_scanout req;
+ memset(&req, 0, sizeof(req));
+
+ req.hdr.type = VIRTIO_GPU_CMD_SET_SCANOUT;
+ req.r.x = req.r.y = 0;
+ req.r.width = width;
+ req.r.height = height;
+ req.scanout_id = scanout_id;
+ req.resource_id = resource_id;
+
+ /* send the command and get a response */
+ struct virtio_gpu_ctrl_hdr *res;
+ err = send_command_response(gdev, &req, sizeof(req), (void **)&res, sizeof(*res));
+ DEBUG_ASSERT(err == NO_ERROR);
+
+ /* see if we got a valid response */
+ LTRACEF("response type 0x%x\n", res->type);
+ err = (res->type == VIRTIO_GPU_RESP_OK_NODATA) ? NO_ERROR : ERR_NO_MEMORY;
+
+ /* release the lock */
+ mutex_release(&gdev->lock);
+
+ return err;
+}
+
+static status_t flush_resource(struct virtio_gpu_dev *gdev, uint32_t resource_id, uint32_t width, uint32_t height)
+{
+ status_t err;
+
+ LTRACEF("gdev %p, resource_id %u, width %u, height %u\n", gdev, resource_id, width, height);
+
+ /* grab a lock to keep this single message at a time */
+ mutex_acquire(&gdev->lock);
+
+ /* construct the request */
+ struct virtio_gpu_resource_flush req;
+ memset(&req, 0, sizeof(req));
+
+ req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH;
+ req.r.x = req.r.y = 0;
+ req.r.width = width;
+ req.r.height = height;
+ req.resource_id = resource_id;
+
+ /* send the command and get a response */
+ struct virtio_gpu_ctrl_hdr *res;
+ err = send_command_response(gdev, &req, sizeof(req), (void **)&res, sizeof(*res));
+ DEBUG_ASSERT(err == NO_ERROR);
+
+ /* see if we got a valid response */
+ LTRACEF("response type 0x%x\n", res->type);
+ err = (res->type == VIRTIO_GPU_RESP_OK_NODATA) ? NO_ERROR : ERR_NO_MEMORY;
+
+ /* release the lock */
+ mutex_release(&gdev->lock);
+
+ return err;
+}
+
+static status_t transfer_to_host_2d(struct virtio_gpu_dev *gdev, uint32_t resource_id, uint32_t width, uint32_t height)
+{
+ status_t err;
+
+ LTRACEF("gdev %p, resource_id %u, width %u, height %u\n", gdev, resource_id, width, height);
+
+ /* grab a lock to keep this single message at a time */
+ mutex_acquire(&gdev->lock);
+
+ /* construct the request */
+ struct virtio_gpu_transfer_to_host_2d req;
+ memset(&req, 0, sizeof(req));
+
+ req.hdr.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D;
+ req.r.x = req.r.y = 0;
+ req.r.width = width;
+ req.r.height = height;
+ req.offset = 0;
+ req.resource_id = resource_id;
+
+ /* send the command and get a response */
+ struct virtio_gpu_ctrl_hdr *res;
+ err = send_command_response(gdev, &req, sizeof(req), (void **)&res, sizeof(*res));
+ DEBUG_ASSERT(err == NO_ERROR);
+
+ /* see if we got a valid response */
+ LTRACEF("response type 0x%x\n", res->type);
+ err = (res->type == VIRTIO_GPU_RESP_OK_NODATA) ? NO_ERROR : ERR_NO_MEMORY;
+
+ /* release the lock */
+ mutex_release(&gdev->lock);
+
+ return err;
+}
+
+status_t virtio_gpu_start(struct virtio_device *dev)
+{
+ status_t err;
+
+ LTRACEF("dev %p\n", dev);
+
+ struct virtio_gpu_dev *gdev = (struct virtio_gpu_dev *)dev->priv;
+
+ /* get the display info and see if we find a valid pmode */
+ err = get_display_info(gdev);
+ if (err < 0) {
+ LTRACEF("failed to get display info\n");
+ return err;
+ }
+
+ if (gdev->pmode_id < 0) {
+ LTRACEF("we failed to find a pmode, exiting\n");
+ return ERR_NOT_FOUND;
+ }
+
+ /* allocate a resource */
+ err = allocate_2d_resource(gdev, &gdev->display_resource_id, gdev->pmode.r.width, gdev->pmode.r.height);
+ if (err < 0) {
+ LTRACEF("failed to allocate 2d resource\n");
+ return err;
+ }
+
+ /* attach a backing store to the resource */
+ size_t len = gdev->pmode.r.width * gdev->pmode.r.height * 4;
+ gdev->fb = pmm_alloc_kpages(ROUNDUP(len, PAGE_SIZE) / PAGE_SIZE, NULL);
+ if (!gdev->fb) {
+ TRACEF("failed to allocate framebuffer, wanted 0x%zx bytes\n", len);
+ return ERR_NO_MEMORY;
+ }
+
+ printf("virtio-gpu: framebuffer at %p, 0x%zx bytes\n", gdev->fb, len);
+
+ err = attach_backing(gdev, gdev->display_resource_id, gdev->fb, len);
+ if (err < 0) {
+ LTRACEF("failed to attach backing store\n");
+ return err;
+ }
+
+ /* attach this resource as a scanout */
+ err = set_scanout(gdev, gdev->pmode_id, gdev->display_resource_id, gdev->pmode.r.width, gdev->pmode.r.height);
+ if (err < 0) {
+ LTRACEF("failed to set scanout\n");
+ return err;
+ }
+
+ /* create the flush thread */
+ thread_t *t;
+ t = thread_create("virtio gpu flusher", &virtio_gpu_flush_thread, (void *)gdev, HIGH_PRIORITY, DEFAULT_STACK_SIZE);
+ thread_detach_and_resume(t);
+
+ /* kick it once */
+ event_signal(&gdev->flush_event, true);
+
+ LTRACE_EXIT;
+
+ return NO_ERROR;
+}
+
+
+static void dump_gpu_config(const volatile struct virtio_gpu_config *config)
+{
+ LTRACEF("events_read 0x%x\n", config->events_read);
+ LTRACEF("events_clear 0x%x\n", config->events_clear);
+ LTRACEF("num_scanouts 0x%x\n", config->num_scanouts);
+ LTRACEF("reserved 0x%x\n", config->reserved);
+}
+
+status_t virtio_gpu_init(struct virtio_device *dev, uint32_t host_features)
+{
+ LTRACEF("dev %p, host_features 0x%x\n", dev, host_features);
+
+ /* allocate a new gpu device */
+ struct virtio_gpu_dev *gdev = malloc(sizeof(struct virtio_gpu_dev));
+ if (!gdev)
+ return ERR_NO_MEMORY;
+
+ mutex_init(&gdev->lock);
+ event_init(&gdev->io_event, false, EVENT_FLAG_AUTOUNSIGNAL);
+ event_init(&gdev->flush_event, false, EVENT_FLAG_AUTOUNSIGNAL);
+
+ gdev->dev = dev;
+ dev->priv = gdev;
+
+ gdev->pmode_id = -1;
+ gdev->next_resource_id = 1;
+
+ /* allocate memory for a gpu request */
+#if WITH_KERNEL_VM
+ gdev->gpu_request = pmm_alloc_kpage();
+ gdev->gpu_request_phys = kvaddr_to_paddr(gdev->gpu_request);
+#else
+ gdev->gpu_request = malloc(sizeof(struct virtio_gpu_resp_display_info)); // XXX get size better
+ gdev->gpu_request_phys = (paddr_t)gdev->gpu_request;
+#endif
+
+ /* make sure the device is reset */
+ virtio_reset_device(dev);
+
+ volatile struct virtio_gpu_config *config = (struct virtio_gpu_config *)dev->config_ptr;
+ dump_gpu_config(config);
+
+ /* ack and set the driver status bit */
+ virtio_status_acknowledge_driver(dev);
+
+ // XXX check features bits and ack/nak them
+
+ /* allocate a virtio ring */
+ virtio_alloc_ring(dev, 0, 16);
+
+ /* set our irq handler */
+ dev->irq_driver_callback = &virtio_gpu_irq_driver_callback;
+ dev->config_change_callback = &virtio_gpu_config_change_callback;
+
+ /* set DRIVER_OK */
+ virtio_status_driver_ok(dev);
+
+ /* save the main device we've found */
+ the_gdev = gdev;
+
+ printf("found virtio gpu device\n");
+
+ return NO_ERROR;
+}
+
+static enum handler_return virtio_gpu_irq_driver_callback(struct virtio_device *dev, uint ring, const struct vring_used_elem *e)
+{
+ struct virtio_gpu_dev *gdev = (struct virtio_gpu_dev *)dev->priv;
+
+ LTRACEF("dev %p, ring %u, e %p, id %u, len %u\n", dev, ring, e, e->id, e->len);
+
+ /* parse our descriptor chain, add back to the free queue */
+ uint16_t i = e->id;
+ for (;;) {
+ int next;
+ struct vring_desc *desc = virtio_desc_index_to_desc(dev, ring, i);
+
+ //virtio_dump_desc(desc);
+
+ if (desc->flags & VRING_DESC_F_NEXT) {
+ next = desc->next;
+ } else {
+ /* end of chain */
+ next = -1;
+ }
+
+ virtio_free_desc(dev, ring, i);
+
+ if (next < 0)
+ break;
+ i = next;
+ }
+
+ /* signal our event */
+ event_signal(&gdev->io_event, false);
+
+ return INT_RESCHEDULE;
+}
+
+static enum handler_return virtio_gpu_config_change_callback(struct virtio_device *dev)
+{
+ struct virtio_gpu_dev *gdev = (struct virtio_gpu_dev *)dev->priv;
+
+ LTRACEF("gdev %p\n", gdev);
+
+ volatile struct virtio_gpu_config *config = (struct virtio_gpu_config *)dev->config_ptr;
+ dump_gpu_config(config);
+
+ return INT_RESCHEDULE;
+}
+
+static int virtio_gpu_flush_thread(void *arg)
+{
+ struct virtio_gpu_dev *gdev = (struct virtio_gpu_dev *)arg;
+ status_t err;
+
+ for (;;) {
+ event_wait(&gdev->flush_event);
+
+ /* transfer to host 2d */
+ err = transfer_to_host_2d(gdev, gdev->display_resource_id, gdev->pmode.r.width, gdev->pmode.r.height);
+ if (err < 0) {
+ LTRACEF("failed to flush resource\n");
+ continue;
+ }
+
+ /* resource flush */
+ err = flush_resource(gdev, gdev->display_resource_id, gdev->pmode.r.width, gdev->pmode.r.height);
+ if (err < 0) {
+ LTRACEF("failed to flush resource\n");
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+void virtio_gpu_gfx_flush(uint starty, uint endy)
+{
+ event_signal(&the_gdev->flush_event, !arch_ints_disabled());
+}
+
+status_t display_get_info(struct display_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ if (!the_gdev)
+ return ERR_NOT_FOUND;
+
+ info->framebuffer = the_gdev->fb;
+ info->format = GFX_FORMAT_RGB_x888;
+ info->width = the_gdev->pmode.r.width;
+ info->height = the_gdev->pmode.r.height;
+ info->stride = info->width;
+ info->flush = virtio_gpu_gfx_flush;
+
+ return NO_ERROR;
+}
+
+
diff --git a/src/bsp/lk/dev/virtio/gpu/virtio_gpu.h b/src/bsp/lk/dev/virtio/gpu/virtio_gpu.h
new file mode 100644
index 0000000..19ed6d2
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/gpu/virtio_gpu.h
@@ -0,0 +1,208 @@
+/*
+ * Virtio GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2014
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This header is BSD licensed so anyone can use the definitions
+ * to implement compatible drivers/servers:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* taken from qemu sources */
+
+#ifndef VIRTIO_GPU_HW_H
+#define VIRTIO_GPU_HW_H
+
+#include <stdint.h>
+
+enum virtio_gpu_ctrl_type {
+ VIRTIO_GPU_UNDEFINED = 0,
+
+ /* 2d commands */
+ VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100,
+ VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
+ VIRTIO_GPU_CMD_RESOURCE_UNREF,
+ VIRTIO_GPU_CMD_SET_SCANOUT,
+ VIRTIO_GPU_CMD_RESOURCE_FLUSH,
+ VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
+ VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
+ VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
+
+ /* cursor commands */
+ VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
+ VIRTIO_GPU_CMD_MOVE_CURSOR,
+
+ /* success responses */
+ VIRTIO_GPU_RESP_OK_NODATA = 0x1100,
+ VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
+
+ /* error responses */
+ VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
+ VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
+ VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
+ VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID,
+ VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID,
+ VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
+};
+
+#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+
+struct virtio_gpu_ctrl_hdr {
+ uint32_t type;
+ uint32_t flags;
+ uint64_t fence_id;
+ uint32_t ctx_id;
+ uint32_t padding;
+};
+
+/* data passed in the cursor vq */
+
+struct virtio_gpu_cursor_pos {
+ uint32_t scanout_id;
+ uint32_t x;
+ uint32_t y;
+ uint32_t padding;
+};
+
+/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */
+struct virtio_gpu_update_cursor {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_cursor_pos pos; /* update & move */
+ uint32_t resource_id; /* update only */
+ uint32_t hot_x; /* update only */
+ uint32_t hot_y; /* update only */
+ uint32_t padding;
+};
+
+/* data passed in the control vq, 2d related */
+
+struct virtio_gpu_rect {
+ uint32_t x;
+ uint32_t y;
+ uint32_t width;
+ uint32_t height;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_UNREF */
+struct virtio_gpu_resource_unref {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t resource_id;
+ uint32_t padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */
+struct virtio_gpu_resource_create_2d {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t resource_id;
+ uint32_t format;
+ uint32_t width;
+ uint32_t height;
+};
+
+/* VIRTIO_GPU_CMD_SET_SCANOUT */
+struct virtio_gpu_set_scanout {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_rect r;
+ uint32_t scanout_id;
+ uint32_t resource_id;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_FLUSH */
+struct virtio_gpu_resource_flush {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_rect r;
+ uint32_t resource_id;
+ uint32_t padding;
+};
+
+/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */
+struct virtio_gpu_transfer_to_host_2d {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_rect r;
+ uint64_t offset;
+ uint32_t resource_id;
+ uint32_t padding;
+};
+
+struct virtio_gpu_mem_entry {
+ uint64_t addr;
+ uint32_t length;
+ uint32_t padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */
+struct virtio_gpu_resource_attach_backing {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t resource_id;
+ uint32_t nr_entries;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */
+struct virtio_gpu_resource_detach_backing {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t resource_id;
+ uint32_t padding;
+};
+
+/* VIRTIO_GPU_RESP_OK_DISPLAY_INFO */
+#define VIRTIO_GPU_MAX_SCANOUTS 16
+struct virtio_gpu_resp_display_info {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_display_one {
+ struct virtio_gpu_rect r;
+ uint32_t enabled;
+ uint32_t flags;
+ } pmodes[VIRTIO_GPU_MAX_SCANOUTS];
+};
+
+#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
+
+struct virtio_gpu_config {
+ uint32_t events_read;
+ uint32_t events_clear;
+ uint32_t num_scanouts;
+ uint32_t reserved;
+};
+
+/* simple formats for fbcon/X use */
+enum virtio_gpu_formats {
+ VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM = 1,
+ VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM = 2,
+ VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM = 3,
+ VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM = 4,
+
+ VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM = 67,
+ VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM = 68,
+
+ VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM = 121,
+ VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134,
+};
+
+#endif
diff --git a/src/bsp/lk/dev/virtio/include/dev/virtio.h b/src/bsp/lk/dev/virtio/include/dev/virtio.h
new file mode 100644
index 0000000..3403602
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/include/dev/virtio.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <compiler.h>
+#include <assert.h>
+#include <list.h>
+#include <sys/types.h>
+#include <dev/virtio/virtio_ring.h>
+
+/* detect a virtio mmio hardware block
+ * returns number of devices found */
+int virtio_mmio_detect(void *ptr, uint count, const uint irqs[]);
+
+#define MAX_VIRTIO_RINGS 4
+
+struct virtio_mmio_config;
+
+struct virtio_device {
+ bool valid;
+
+ uint index;
+ uint irq;
+
+ volatile struct virtio_mmio_config *mmio_config;
+ void *config_ptr;
+
+ void *priv; /* a place for the driver to put private data */
+
+ enum handler_return (*irq_driver_callback)(struct virtio_device *dev, uint ring, const struct vring_used_elem *e);
+ enum handler_return (*config_change_callback)(struct virtio_device *dev);
+
+ /* virtio rings */
+ uint32_t active_rings_bitmap;
+ struct vring ring[MAX_VIRTIO_RINGS];
+};
+
+void virtio_reset_device(struct virtio_device *dev);
+void virtio_status_acknowledge_driver(struct virtio_device *dev);
+void virtio_status_driver_ok(struct virtio_device *dev);
+
+/* api used by devices to interact with the virtio bus */
+status_t virtio_alloc_ring(struct virtio_device *dev, uint index, uint16_t len) __NONNULL();
+
+/* add a descriptor at index desc_index to the free list on ring_index */
+void virtio_free_desc(struct virtio_device *dev, uint ring_index, uint16_t desc_index);
+
+/* allocate a descriptor off the free list, 0xffff is error */
+uint16_t virtio_alloc_desc(struct virtio_device *dev, uint ring_index);
+
+/* allocate a descriptor chain the free list */
+struct vring_desc *virtio_alloc_desc_chain(struct virtio_device *dev, uint ring_index, size_t count, uint16_t *start_index);
+
+static inline struct vring_desc *virtio_desc_index_to_desc(struct virtio_device *dev, uint ring_index, uint16_t desc_index)
+{
+ DEBUG_ASSERT(desc_index != 0xffff);
+ return &dev->ring[ring_index].desc[desc_index];
+}
+
+void virtio_dump_desc(const struct vring_desc *desc);
+
+/* submit a chain to the avail list */
+void virtio_submit_chain(struct virtio_device *dev, uint ring_index, uint16_t desc_index);
+
+void virtio_kick(struct virtio_device *dev, uint ring_idnex);
+
+
diff --git a/src/bsp/lk/dev/virtio/include/dev/virtio/virtio_ring.h b/src/bsp/lk/dev/virtio/include/dev/virtio/virtio_ring.h
new file mode 100644
index 0000000..bf68bf9
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/include/dev/virtio/virtio_ring.h
@@ -0,0 +1,176 @@
+/* taken from linux source 3.15 at include/uapi/linux/virtio_ring.h */
+#ifndef _UAPI_LINUX_VIRTIO_RING_H
+#define _UAPI_LINUX_VIRTIO_RING_H
+/* An interface for efficient virtio implementation, currently for use by KVM
+ * and lguest, but hopefully others soon. Do NOT change this since it will
+ * break existing servers and clients.
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright Rusty Russell IBM Corporation 2007. */
+#include <stdint.h>
+#include <pow2.h>
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me when
+ * you add a buffer. It's unreliable, so it's simply an optimization. Guest
+ * will still kick if it's out of buffers. */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't interrupt me
+ * when you consume a buffer. It's unreliable, so it's simply an
+ * optimization. */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+/* The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field. */
+/* The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field. */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
+/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
+struct vring_desc {
+ /* Address (guest-physical). */
+ uint64_t addr;
+ /* Length. */
+ uint32_t len;
+ /* The flags as indicated above. */
+ uint16_t flags;
+ /* We chain unused descriptors via this, too */
+ uint16_t next;
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[];
+};
+
+/* u32 is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was used (written to) */
+ uint32_t len;
+};
+
+struct vring_used {
+ uint16_t flags;
+ uint16_t idx;
+ struct vring_used_elem ring[];
+};
+
+struct vring {
+ uint32_t num;
+ uint32_t num_mask;
+
+ uint16_t free_list; /* head of a free list of descriptors per ring. 0xffff is NULL */
+ uint16_t free_count;
+
+ uint16_t last_used;
+
+ struct vring_desc *desc;
+
+ struct vring_avail *avail;
+
+ struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which looks
+ * like this. We assume num is a power of 2.
+ *
+ * struct vring
+ * {
+ * // The actual descriptors (16 bytes each)
+ * struct vring_desc desc[num];
+ *
+ * // A ring of available descriptor heads with free-running index.
+ * uint16_t avail_flags;
+ * uint16_t avail_idx;
+ * uint16_t available[num];
+ * uint16_t used_event_idx;
+ *
+ * // Padding to the next align boundary.
+ * char pad[];
+ *
+ * // A ring of used descriptor heads with free-running index.
+ * uint16_t used_flags;
+ * uint16_t used_idx;
+ * struct vring_used_elem used[num];
+ * uint16_t avail_event_idx;
+ * };
+ */
+/* We publish the used event index at the end of the available ring, and vice
+ * versa. They are at the end for backwards compatibility. */
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
+
+static inline void vring_init(struct vring *vr, unsigned int num, void *p,
+ unsigned long align)
+{
+ vr->num = num;
+ vr->num_mask = (1 << log2_uint(num)) - 1;
+ vr->free_list = 0xffff;
+ vr->free_count = 0;
+ vr->last_used = 0;
+ vr->desc = p;
+ vr->avail = p + num*sizeof(struct vring_desc);
+ vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(uint16_t)
+ + align-1) & ~(align - 1));
+}
+
+static inline unsigned vring_size(unsigned int num, unsigned long align)
+{
+ return ((sizeof(struct vring_desc) * num + sizeof(uint16_t) * (3 + num)
+ + align - 1) & ~(align - 1))
+ + sizeof(uint16_t) * 3 + sizeof(struct vring_used_elem) * num;
+}
+
+/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
+/* Assuming a given event_idx value from the other size, if
+ * we have just incremented index from old to new_idx,
+ * should we trigger an event? */
+static inline int vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+ /* Note: Xen has similar logic for notification hold-off
+ * in include/xen/interface/io/ring.h with req_event and req_prod
+ * corresponding to event_idx + 1 and new_idx respectively.
+ * Note also that req_event and req_prod in Xen start at 1,
+ * event indexes in virtio start at 0. */
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
+#endif /* _UAPI_LINUX_VIRTIO_RING_H */
+
diff --git a/src/bsp/lk/dev/virtio/net/include/dev/virtio/net.h b/src/bsp/lk/dev/virtio/net/include/dev/virtio/net.h
new file mode 100644
index 0000000..48ac6a0
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/net/include/dev/virtio/net.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <compiler.h>
+#include <sys/types.h>
+#include <dev/virtio.h>
+
+status_t virtio_net_init(struct virtio_device *dev, uint32_t host_features) __NONNULL();
+status_t virtio_net_start(void);
+
+/* return the count of virtio interfaces found */
+int virtio_net_found(void);
+
+status_t virtio_net_get_mac_addr(uint8_t mac_addr[6]);
+
+struct pktbuf;
+extern status_t virtio_net_send_minip_pkt(struct pktbuf *p);
+
diff --git a/src/bsp/lk/dev/virtio/net/rules.mk b/src/bsp/lk/dev/virtio/net/rules.mk
new file mode 100644
index 0000000..2f782d2
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/net/rules.mk
@@ -0,0 +1,12 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+MODULE_SRCS += \
+ $(LOCAL_DIR)/virtio-net.c
+
+MODULE_DEPS += \
+ dev/virtio \
+ lib/minip
+
+include make/module.mk
diff --git a/src/bsp/lk/dev/virtio/net/virtio-net.c b/src/bsp/lk/dev/virtio/net/virtio-net.c
new file mode 100644
index 0000000..2d0102d
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/net/virtio-net.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <dev/virtio/net.h>
+
+#include <debug.h>
+#include <assert.h>
+#include <trace.h>
+#include <compiler.h>
+#include <list.h>
+#include <string.h>
+#include <err.h>
+#include <kernel/thread.h>
+#include <kernel/event.h>
+#include <kernel/spinlock.h>
+#include <kernel/vm.h>
+#include <lib/pktbuf.h>
+#include <lib/minip.h>
+
+#define LOCAL_TRACE 0
+
+struct virtio_net_config {
+ uint8_t mac[6];
+ uint16_t status;
+ uint16_t max_virtqueue_pairs;
+} __PACKED;
+
+struct virtio_net_hdr {
+ uint8_t flags;
+ uint8_t gso_type;
+ uint16_t hdr_len;
+ uint16_t gso_size;
+ uint16_t csum_start;
+ uint16_t csum_offset;
+ uint16_t num_buffers; // unused in tx
+} __PACKED;
+
+#define VIRTIO_NET_F_CSUM (1<<0)
+#define VIRTIO_NET_F_GUEST_CSUM (1<<1)
+#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS (1<<2)
+#define VIRTIO_NET_F_MAC (1<<5)
+#define VIRTIO_NET_F_GSO (1<<6)
+#define VIRTIO_NET_F_GUEST_TSO4 (1<<7)
+#define VIRTIO_NET_F_GUEST_TSO6 (1<<8)
+#define VIRTIO_NET_F_GUEST_ECN (1<<9)
+#define VIRTIO_NET_F_GUEST_UFO (1<<10)
+#define VIRTIO_NET_F_HOST_TSO4 (1<<11)
+#define VIRTIO_NET_F_HOST_TSO6 (1<<12)
+#define VIRTIO_NET_F_HOST_ECN (1<<13)
+#define VIRTIO_NET_F_HOST_UFO (1<<14)
+#define VIRTIO_NET_F_MRG_RXBUF (1<<15)
+#define VIRTIO_NET_F_STATUS (1<<16)
+#define VIRTIO_NET_F_CTRL_VQ (1<<17)
+#define VIRTIO_NET_F_CTRL_RX (1<<18)
+#define VIRTIO_NET_F_CTRL_VLAN (1<<19)
+#define VIRTIO_NET_F_GUEST_ANNOUNCE (1<<21)
+#define VIRTIO_NET_F_MQ (1<<22)
+#define VIRTIO_NET_F_CTRL_MAC_ADDR (1<<23)
+
+#define VIRTIO_NET_S_LINK_UP (1<<0)
+#define VIRTIO_NET_S_ANNOUNCE (1<<1)
+
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 16
+
+#define RING_RX 0
+#define RING_TX 1
+
+#define VIRTIO_NET_MSS 1514
+
+struct virtio_net_dev {
+ struct virtio_device *dev;
+ bool started;
+
+ struct virtio_net_config *config;
+
+ spin_lock_t lock;
+ event_t rx_event;
+
+ /* list of active tx/rx packets to be freed at irq time */
+ pktbuf_t *pending_tx_packet[TX_RING_SIZE];
+ pktbuf_t *pending_rx_packet[RX_RING_SIZE];
+
+ uint tx_pending_count;
+ struct list_node completed_rx_queue;
+};
+
+static enum handler_return virtio_net_irq_driver_callback(struct virtio_device *dev, uint ring, const struct vring_used_elem *e);
+static int virtio_net_rx_worker(void *arg);
+static status_t virtio_net_queue_rx(struct virtio_net_dev *ndev, pktbuf_t *p);
+
+// XXX remove need for this
+static struct virtio_net_dev *the_ndev;
+
+static void dump_feature_bits(uint32_t feature)
+{
+ printf("virtio-net host features (0x%x):", feature);
+ if (feature & VIRTIO_NET_F_CSUM) printf(" CSUM");
+ if (feature & VIRTIO_NET_F_GUEST_CSUM) printf(" GUEST_CSUM");
+ if (feature & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) printf(" CTRL_GUEST_OFFLOADS");
+ if (feature & VIRTIO_NET_F_MAC) printf(" MAC");
+ if (feature & VIRTIO_NET_F_GSO) printf(" GSO");
+ if (feature & VIRTIO_NET_F_GUEST_TSO4) printf(" GUEST_TSO4");
+ if (feature & VIRTIO_NET_F_GUEST_TSO6) printf(" GUEST_TSO6");
+ if (feature & VIRTIO_NET_F_GUEST_ECN) printf(" GUEST_ECN");
+ if (feature & VIRTIO_NET_F_GUEST_UFO) printf(" GUEST_UFO");
+ if (feature & VIRTIO_NET_F_HOST_TSO4) printf(" HOST_TSO4");
+ if (feature & VIRTIO_NET_F_HOST_TSO6) printf(" HOST_TSO6");
+ if (feature & VIRTIO_NET_F_HOST_ECN) printf(" HOST_ECN");
+ if (feature & VIRTIO_NET_F_HOST_UFO) printf(" HOST_UFO");
+ if (feature & VIRTIO_NET_F_MRG_RXBUF) printf(" MRG_RXBUF");
+ if (feature & VIRTIO_NET_F_STATUS) printf(" STATUS");
+ if (feature & VIRTIO_NET_F_CTRL_VQ) printf(" CTRL_VQ");
+ if (feature & VIRTIO_NET_F_CTRL_RX) printf(" CTRL_RX");
+ if (feature & VIRTIO_NET_F_CTRL_VLAN) printf(" CTRL_VLAN");
+ if (feature & VIRTIO_NET_F_GUEST_ANNOUNCE) printf(" GUEST_ANNOUNCE");
+ if (feature & VIRTIO_NET_F_MQ) printf(" MQ");
+ if (feature & VIRTIO_NET_F_CTRL_MAC_ADDR) printf(" CTRL_MAC_ADDR");
+ printf("\n");
+}
+
+status_t virtio_net_init(struct virtio_device *dev, uint32_t host_features)
+{
+ LTRACEF("dev %p, host_features 0x%x\n", dev, host_features);
+
+ /* allocate a new net device */
+ struct virtio_net_dev *ndev = calloc(1, sizeof(struct virtio_net_dev));
+ if (!ndev)
+ return ERR_NO_MEMORY;
+
+ ndev->dev = dev;
+ dev->priv = ndev;
+ ndev->started = false;
+
+ ndev->lock = SPIN_LOCK_INITIAL_VALUE;
+ event_init(&ndev->rx_event, false, EVENT_FLAG_AUTOUNSIGNAL);
+ list_initialize(&ndev->completed_rx_queue);
+
+ ndev->config = (struct virtio_net_config *)dev->config_ptr;
+
+ /* ack and set the driver status bit */
+ virtio_status_acknowledge_driver(dev);
+
+ // XXX check features bits and ack/nak them
+ dump_feature_bits(host_features);
+
+ /* set our irq handler */
+ dev->irq_driver_callback = &virtio_net_irq_driver_callback;
+
+ /* set DRIVER_OK */
+ virtio_status_driver_ok(dev);
+
+ /* allocate a pair of virtio rings */
+ virtio_alloc_ring(dev, RING_RX, RX_RING_SIZE); // rx
+ virtio_alloc_ring(dev, RING_TX, TX_RING_SIZE); // tx
+
+ the_ndev = ndev;
+
+ return NO_ERROR;
+}
+
+status_t virtio_net_start(void)
+{
+ if (the_ndev->started)
+ return ERR_ALREADY_STARTED;
+
+ the_ndev->started = true;
+
+ /* start the rx worker thread */
+ thread_resume(thread_create("virtio_net_rx", &virtio_net_rx_worker, (void *)the_ndev, HIGH_PRIORITY, DEFAULT_STACK_SIZE));
+
+ /* queue up a bunch of rxes */
+ for (uint i = 0; i < RX_RING_SIZE - 1; i++) {
+ pktbuf_t *p = pktbuf_alloc();
+ if (p) {
+ virtio_net_queue_rx(the_ndev, p);
+ }
+ }
+
+ return NO_ERROR;
+}
+
+static status_t virtio_net_queue_tx_pktbuf(struct virtio_net_dev *ndev, pktbuf_t *p2)
+{
+ struct virtio_device *vdev = ndev->dev;
+
+ uint16_t i;
+ pktbuf_t *p;
+
+ DEBUG_ASSERT(ndev);
+
+ p = pktbuf_alloc();
+ if (!p)
+ return ERR_NO_MEMORY;
+
+ /* point our header to the base of the first pktbuf */
+ struct virtio_net_hdr *hdr = pktbuf_append(p, sizeof(struct virtio_net_hdr) - 2);
+ memset(hdr, 0, p->dlen);
+
+ spin_lock_saved_state_t state;
+ spin_lock_irqsave(&ndev->lock, state);
+
+ /* only queue if we have enough tx descriptors */
+ if (ndev->tx_pending_count + 2 > TX_RING_SIZE)
+ goto nodesc;
+
+ /* allocate a chain of descriptors for our transfer */
+ struct vring_desc *desc = virtio_alloc_desc_chain(vdev, RING_TX, 2, &i);
+ if (!desc) {
+ spin_unlock_irqrestore(&ndev->lock, state);
+
+nodesc:
+ TRACEF("out of virtio tx descriptors, tx_pending_count %u\n", ndev->tx_pending_count);
+ pktbuf_free(p, true);
+
+ return ERR_NO_MEMORY;
+ }
+
+ ndev->tx_pending_count += 2;
+
+ /* save a pointer to our pktbufs for the irq handler to free */
+ LTRACEF("saving pointer to pkt in index %u and %u\n", i, desc->next);
+ DEBUG_ASSERT(ndev->pending_tx_packet[i] == NULL);
+ DEBUG_ASSERT(ndev->pending_tx_packet[desc->next] == NULL);
+ ndev->pending_tx_packet[i] = p;
+ ndev->pending_tx_packet[desc->next] = p2;
+
+ /* set up the descriptor pointing to the header */
+ desc->addr = pktbuf_data_phys(p);
+ desc->len = p->dlen;
+ desc->flags |= VRING_DESC_F_NEXT;
+
+ /* set up the descriptor pointing to the buffer */
+ desc = virtio_desc_index_to_desc(vdev, RING_TX, desc->next);
+ desc->addr = pktbuf_data_phys(p2);
+ desc->len = p2->dlen;
+ desc->flags = 0;
+
+ /* submit the transfer */
+ virtio_submit_chain(vdev, RING_TX, i);
+
+ /* kick it off */
+ virtio_kick(vdev, RING_TX);
+
+ spin_unlock_irqrestore(&ndev->lock, state);
+
+ return NO_ERROR;
+}
+
+/* variant of the above function that copies the buffer into a pktbuf before sending */
+static status_t virtio_net_queue_tx(struct virtio_net_dev *ndev, const void *buf, size_t len)
+{
+ DEBUG_ASSERT(ndev);
+ DEBUG_ASSERT(buf);
+
+ pktbuf_t *p = pktbuf_alloc();
+ if (!p)
+ return ERR_NO_MEMORY;
+
+ /* copy the outgoing packet into the pktbuf */
+ p->data = p->buffer;
+ p->dlen = len;
+ memcpy(p->data, buf, len);
+
+ /* call through to the variant of the function that takes a pre-populated pktbuf */
+ status_t err = virtio_net_queue_tx_pktbuf(ndev, p);
+ if (err < 0) {
+ pktbuf_free(p, true);
+ }
+
+ return err;
+}
+
+static status_t virtio_net_queue_rx(struct virtio_net_dev *ndev, pktbuf_t *p)
+{
+ struct virtio_device *vdev = ndev->dev;
+
+ DEBUG_ASSERT(ndev);
+ DEBUG_ASSERT(p);
+
+ /* point our header to the base of the pktbuf */
+ p->data = p->buffer;
+ struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)p->data;
+ memset(hdr, 0, sizeof(struct virtio_net_hdr) - 2);
+
+ p->dlen = sizeof(struct virtio_net_hdr) - 2 + VIRTIO_NET_MSS;
+
+ spin_lock_saved_state_t state;
+ spin_lock_irqsave(&ndev->lock, state);
+
+ /* allocate a chain of descriptors for our transfer */
+ uint16_t i;
+ struct vring_desc *desc = virtio_alloc_desc_chain(vdev, RING_RX, 1, &i);
+ DEBUG_ASSERT(desc); /* shouldn't be possible not to have a descriptor ready */
+
+ /* save a pointer to our pktbufs for the irq handler to use */
+ DEBUG_ASSERT(ndev->pending_rx_packet[i] == NULL);
+ ndev->pending_rx_packet[i] = p;
+
+ /* set up the descriptor pointing to the header */
+ desc->addr = pktbuf_data_phys(p);
+ desc->len = p->dlen;
+ desc->flags = VRING_DESC_F_WRITE;
+
+ /* submit the transfer */
+ virtio_submit_chain(vdev, RING_RX, i);
+
+ /* kick it off */
+ virtio_kick(vdev, RING_RX);
+
+ spin_unlock_irqrestore(&ndev->lock, state);
+
+ return NO_ERROR;
+}
+
+static enum handler_return virtio_net_irq_driver_callback(struct virtio_device *dev, uint ring, const struct vring_used_elem *e)
+{
+ struct virtio_net_dev *ndev = (struct virtio_net_dev *)dev->priv;
+
+ LTRACEF("dev %p, ring %u, e %p, id %u, len %u\n", dev, ring, e, e->id, e->len);
+
+ spin_lock(&ndev->lock);
+
+ /* parse our descriptor chain, add back to the free queue */
+ uint16_t i = e->id;
+ for (;;) {
+ int next;
+ struct vring_desc *desc = virtio_desc_index_to_desc(dev, ring, i);
+
+ if (desc->flags & VRING_DESC_F_NEXT) {
+ next = desc->next;
+ } else {
+ /* end of chain */
+ next = -1;
+ }
+
+ virtio_free_desc(dev, ring, i);
+
+ if (ring == RING_RX) {
+ /* put the freed rx buffer in a queue */
+ pktbuf_t *p = ndev->pending_rx_packet[i];
+ ndev->pending_rx_packet[i] = NULL;
+
+ DEBUG_ASSERT(p);
+ LTRACEF("rx pktbuf %p filled\n", p);
+
+ /* trim the pktbuf according to the written length in the used element descriptor */
+ if (e->len > (sizeof(struct virtio_net_hdr) - 2 + VIRTIO_NET_MSS)) {
+ TRACEF("bad used len on RX %u\n", e->len);
+ p->dlen = 0;
+ } else {
+ p->dlen = e->len;
+ }
+
+ list_add_tail(&ndev->completed_rx_queue, &p->list);
+ } else { // ring == RING_TX
+ /* free the pktbuf associated with the tx packet we just consumed */
+ pktbuf_t *p = ndev->pending_tx_packet[i];
+ ndev->pending_tx_packet[i] = NULL;
+ ndev->tx_pending_count--;
+
+ DEBUG_ASSERT(p);
+ LTRACEF("freeing pktbuf %p\n", p);
+
+ pktbuf_free(p, false);
+ }
+
+ if (next < 0)
+ break;
+ i = next;
+ }
+
+ spin_unlock(&ndev->lock);
+
+ /* if rx ring, signal our event */
+ if (ring == 0) {
+ event_signal(&ndev->rx_event, false);
+ }
+
+ return INT_RESCHEDULE;
+}
+
+static int virtio_net_rx_worker(void *arg)
+{
+ struct virtio_net_dev *ndev = (struct virtio_net_dev *)arg;
+
+ for (;;) {
+ event_wait(&ndev->rx_event);
+
+ /* pull some packets from the received queue */
+ for (;;) {
+ spin_lock_saved_state_t state;
+ spin_lock_irqsave(&ndev->lock, state);
+
+ pktbuf_t *p = list_remove_head_type(&ndev->completed_rx_queue, pktbuf_t, list);
+
+ spin_unlock_irqrestore(&ndev->lock, state);
+
+ if (!p)
+ break; /* nothing left in the queue, go back to waiting */
+
+ LTRACEF("got packet len %u\n", p->dlen);
+
+ /* process our packet */
+ struct virtio_net_hdr *hdr = pktbuf_consume(p, sizeof(struct virtio_net_hdr) - 2);
+ if (hdr) {
+ /* call up into the stack */
+ minip_rx_driver_callback(p);
+ }
+
+ /* requeue the pktbuf in the rx queue */
+ virtio_net_queue_rx(ndev, p);
+ }
+ }
+ return 0;
+}
+
+int virtio_net_found(void)
+{
+ return the_ndev ? 1 : 0;
+}
+
+status_t virtio_net_get_mac_addr(uint8_t mac_addr[6])
+{
+ if (!the_ndev)
+ return ERR_NOT_FOUND;
+
+ memcpy(mac_addr, the_ndev->config->mac, 6);
+
+ return NO_ERROR;
+}
+
+status_t virtio_net_send_minip_pkt(pktbuf_t *p)
+{
+ LTRACEF("p %p, dlen %u, flags 0x%x\n", p, p->dlen, p->flags);
+
+ DEBUG_ASSERT(p && p->dlen);
+
+ if ((p->flags & PKTBUF_FLAG_EOF) == 0) {
+ /* can't handle multi part packets yet */
+ PANIC_UNIMPLEMENTED;
+
+ return ERR_NOT_IMPLEMENTED;
+ }
+
+ /* hand the pktbuf off to the nic, it owns the pktbuf from now on out unless it fails */
+ status_t err = virtio_net_queue_tx_pktbuf(the_ndev, p);
+ if (err < 0) {
+ pktbuf_free(p, true);
+ }
+
+ return err;
+}
+
diff --git a/src/bsp/lk/dev/virtio/rules.mk b/src/bsp/lk/dev/virtio/rules.mk
new file mode 100644
index 0000000..22c8c0b
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/rules.mk
@@ -0,0 +1,8 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+MODULE_SRCS += \
+ $(LOCAL_DIR)/virtio.c
+
+include make/module.mk
diff --git a/src/bsp/lk/dev/virtio/virtio.c b/src/bsp/lk/dev/virtio/virtio.c
new file mode 100644
index 0000000..5449834
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/virtio.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (c) 2014-2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <dev/virtio.h>
+#include <dev/virtio/virtio_ring.h>
+
+#include <debug.h>
+#include <assert.h>
+#include <trace.h>
+#include <compiler.h>
+#include <list.h>
+#include <err.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pow2.h>
+#include <lk/init.h>
+#include <kernel/thread.h>
+#include <kernel/vm.h>
+#include <platform/interrupts.h>
+
+#include "virtio_priv.h"
+
+#if WITH_DEV_VIRTIO_BLOCK
+#include <dev/virtio/block.h>
+#endif
+#if WITH_DEV_VIRTIO_NET
+#include <dev/virtio/net.h>
+#endif
+#if WITH_DEV_VIRTIO_GPU
+#include <dev/virtio/gpu.h>
+#endif
+
+#define LOCAL_TRACE 0
+
+static struct virtio_device *devices;
+
+static void dump_mmio_config(const volatile struct virtio_mmio_config *mmio)
+{
+ printf("mmio at %p\n", mmio);
+ printf("\tmagic 0x%x\n", mmio->magic);
+ printf("\tversion 0x%x\n", mmio->version);
+ printf("\tdevice_id 0x%x\n", mmio->device_id);
+ printf("\tvendor_id 0x%x\n", mmio->vendor_id);
+ printf("\thost_features 0x%x\n", mmio->host_features);
+ printf("\tguest_page_size %u\n", mmio->guest_page_size);
+ printf("\tqnum %u\n", mmio->queue_num);
+ printf("\tqnum_max %u\n", mmio->queue_num_max);
+ printf("\tqnum_align %u\n", mmio->queue_align);
+ printf("\tqnum_pfn %u\n", mmio->queue_pfn);
+ printf("\tstatus 0x%x\n", mmio->status);
+}
+
+void virtio_dump_desc(const struct vring_desc *desc)
+{
+ printf("vring descriptor %p\n", desc);
+ printf("\taddr 0x%llx\n", desc->addr);
+ printf("\tlen 0x%x\n", desc->len);
+ printf("\tflags 0x%hhx\n", desc->flags);
+ printf("\tnext 0x%hhx\n", desc->next);
+}
+
+static enum handler_return virtio_mmio_irq(void *arg)
+{
+ struct virtio_device *dev = (struct virtio_device *)arg;
+ LTRACEF("dev %p, index %u\n", dev, dev->index);
+
+ uint32_t irq_status = dev->mmio_config->interrupt_status;
+ LTRACEF("status 0x%x\n", irq_status);
+
+ enum handler_return ret = INT_NO_RESCHEDULE;
+ if (irq_status & 0x1) { /* used ring update */
+ // XXX is this safe?
+ dev->mmio_config->interrupt_ack = 0x1;
+
+ /* cycle through all the active rings */
+ for (uint r = 0; r < MAX_VIRTIO_RINGS; r++) {
+ if ((dev->active_rings_bitmap & (1<<r)) == 0)
+ continue;
+
+ struct vring *ring = &dev->ring[r];
+ LTRACEF("ring %u: used flags 0x%hhx idx 0x%hhx last_used %u\n", r, ring->used->flags, ring->used->idx, ring->last_used);
+
+ uint cur_idx = ring->used->idx;
+ for (uint i = ring->last_used; i != (cur_idx & ring->num_mask); i = (i + 1) & ring->num_mask) {
+ LTRACEF("looking at idx %u\n", i);
+
+ // process chain
+ struct vring_used_elem *used_elem = &ring->used->ring[i];
+ LTRACEF("id %u, len %u\n", used_elem->id, used_elem->len);
+
+ DEBUG_ASSERT(dev->irq_driver_callback);
+ ret |= dev->irq_driver_callback(dev, r, used_elem);
+
+ ring->last_used = (ring->last_used + 1) & ring->num_mask;
+ }
+ }
+ }
+ if (irq_status & 0x2) { /* config change */
+ dev->mmio_config->interrupt_ack = 0x2;
+
+ if (dev->config_change_callback) {
+ ret |= dev->config_change_callback(dev);
+ }
+ }
+
+ LTRACEF("exiting irq\n");
+
+ return ret;
+}
+
+int virtio_mmio_detect(void *ptr, uint count, const uint irqs[])
+{
+ LTRACEF("ptr %p, count %u\n", ptr, count);
+
+ DEBUG_ASSERT(ptr);
+ DEBUG_ASSERT(irqs);
+ DEBUG_ASSERT(!devices);
+
+ /* allocate an array big enough to hold a list of devices */
+ devices = calloc(count, sizeof(struct virtio_device));
+ if (!devices)
+ return ERR_NO_MEMORY;
+
+ int found = 0;
+ for (uint i = 0; i < count; i++) {
+ volatile struct virtio_mmio_config *mmio = (struct virtio_mmio_config *)((uint8_t *)ptr + i * 0x200);
+ struct virtio_device *dev = &devices[i];
+
+ dev->index = i;
+ dev->irq = irqs[i];
+
+ mask_interrupt(irqs[i]);
+ register_int_handler(irqs[i], &virtio_mmio_irq, (void *)dev);
+
+ LTRACEF("looking at magic 0x%x version 0x%x did 0x%x vid 0x%x\n",
+ mmio->magic, mmio->version, mmio->device_id, mmio->vendor_id);
+
+ if (mmio->magic != VIRTIO_MMIO_MAGIC) {
+ continue;
+ }
+
+#if LOCAL_TRACE
+ if (mmio->device_id != 0) {
+ dump_mmio_config(mmio);
+ }
+#endif
+
+#if WITH_DEV_VIRTIO_BLOCK
+ if (mmio->device_id == 2) { // block device
+ LTRACEF("found block device\n");
+
+ dev->mmio_config = mmio;
+ dev->config_ptr = (void *)mmio->config;
+
+ status_t err = virtio_block_init(dev, mmio->host_features);
+ if (err >= 0) {
+ // good device
+ dev->valid = true;
+
+ if (dev->irq_driver_callback)
+ unmask_interrupt(dev->irq);
+
+ // XXX quick test code, remove
+#if 0
+ uint8_t buf[512];
+ memset(buf, 0x99, sizeof(buf));
+ virtio_block_read_write(dev, buf, 0, sizeof(buf), false);
+ hexdump8_ex(buf, sizeof(buf), 0);
+
+ buf[0]++;
+ virtio_block_read_write(dev, buf, 0, sizeof(buf), true);
+
+ virtio_block_read_write(dev, buf, 0, sizeof(buf), false);
+ hexdump8_ex(buf, sizeof(buf), 0);
+#endif
+ }
+
+ }
+#endif // WITH_DEV_VIRTIO_BLOCK
+#if WITH_DEV_VIRTIO_NET
+ if (mmio->device_id == 1) { // network device
+ LTRACEF("found net device\n");
+
+ dev->mmio_config = mmio;
+ dev->config_ptr = (void *)mmio->config;
+
+ status_t err = virtio_net_init(dev, mmio->host_features);
+ if (err >= 0) {
+ // good device
+ dev->valid = true;
+
+ if (dev->irq_driver_callback)
+ unmask_interrupt(dev->irq);
+ }
+ }
+#endif // WITH_DEV_VIRTIO_NET
+#if WITH_DEV_VIRTIO_GPU
+ if (mmio->device_id == 0x10) { // virtio-gpu
+ LTRACEF("found gpu device\n");
+
+ dev->mmio_config = mmio;
+ dev->config_ptr = (void *)mmio->config;
+
+ status_t err = virtio_gpu_init(dev, mmio->host_features);
+ if (err >= 0) {
+ // good device
+ dev->valid = true;
+
+ if (dev->irq_driver_callback)
+ unmask_interrupt(dev->irq);
+
+ virtio_gpu_start(dev);
+ }
+ }
+#endif // WITH_DEV_VIRTIO_GPU
+
+ if (dev->valid)
+ found++;
+ }
+
+ return found;
+}
+
+void virtio_free_desc(struct virtio_device *dev, uint ring_index, uint16_t desc_index)
+{
+ LTRACEF("dev %p ring %u index %u free_count %u\n", dev, ring_index, desc_index, dev->ring[ring_index].free_count);
+ dev->ring[ring_index].desc[desc_index].next = dev->ring[ring_index].free_list;
+ dev->ring[ring_index].free_list = desc_index;
+ dev->ring[ring_index].free_count++;
+}
+
+uint16_t virtio_alloc_desc(struct virtio_device *dev, uint ring_index)
+{
+ if (dev->ring[ring_index].free_count == 0)
+ return 0xffff;
+
+ DEBUG_ASSERT(dev->ring[ring_index].free_list != 0xffff);
+
+ uint16_t i = dev->ring[ring_index].free_list;
+ struct vring_desc *desc = &dev->ring[ring_index].desc[i];
+ dev->ring[ring_index].free_list = desc->next;
+
+ dev->ring[ring_index].free_count--;
+
+ return i;
+}
+
+struct vring_desc *virtio_alloc_desc_chain(struct virtio_device *dev, uint ring_index, size_t count, uint16_t *start_index)
+{
+ if (dev->ring[ring_index].free_count < count)
+ return NULL;
+
+ /* start popping entries off the chain */
+ struct vring_desc *last = 0;
+ uint16_t last_index = 0;
+ while (count > 0) {
+ uint16_t i = dev->ring[ring_index].free_list;
+ struct vring_desc *desc = &dev->ring[ring_index].desc[i];
+
+ dev->ring[ring_index].free_list = desc->next;
+ dev->ring[ring_index].free_count--;
+
+ if (last) {
+ desc->flags = VRING_DESC_F_NEXT;
+ desc->next = last_index;
+ } else {
+ // first one
+ desc->flags = 0;
+ desc->next = 0;
+ }
+ last = desc;
+ last_index = i;
+ count--;
+ }
+
+ if (start_index)
+ *start_index = last_index;
+
+ return last;
+}
+
+void virtio_submit_chain(struct virtio_device *dev, uint ring_index, uint16_t desc_index)
+{
+ LTRACEF("dev %p, ring %u, desc %u\n", dev, ring_index, desc_index);
+
+ /* add the chain to the available list */
+ struct vring_avail *avail = dev->ring[ring_index].avail;
+
+ avail->ring[avail->idx & dev->ring[ring_index].num_mask] = desc_index;
+ DSB;
+ avail->idx++;
+
+#if LOCAL_TRACE
+ hexdump(avail, 16);
+#endif
+}
+
+void virtio_kick(struct virtio_device *dev, uint ring_index)
+{
+ LTRACEF("dev %p, ring %u\n", dev, ring_index);
+
+ dev->mmio_config->queue_notify = ring_index;
+ DSB;
+}
+
+status_t virtio_alloc_ring(struct virtio_device *dev, uint index, uint16_t len)
+{
+ LTRACEF("dev %p, index %u, len %u\n", dev, index, len);
+
+ DEBUG_ASSERT(dev);
+ DEBUG_ASSERT(len > 0 && ispow2(len));
+ DEBUG_ASSERT(index < MAX_VIRTIO_RINGS);
+
+ if (len == 0 || !ispow2(len))
+ return ERR_INVALID_ARGS;
+
+ struct vring *ring = &dev->ring[index];
+
+ /* allocate a ring */
+ size_t size = vring_size(len, PAGE_SIZE);
+ LTRACEF("need %zu bytes\n", size);
+
+#if WITH_KERNEL_VM
+ void *vptr;
+ status_t err = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "virtio_ring", size, &vptr, 0, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE);
+ if (err < 0)
+ return ERR_NO_MEMORY;
+
+ LTRACEF("allocated virtio_ring at va %p\n", vptr);
+
+ /* compute the physical address */
+ paddr_t pa;
+ err = arch_mmu_query((vaddr_t)vptr, &pa, NULL);
+ if (err < 0) {
+ return ERR_NO_MEMORY;
+ }
+
+ LTRACEF("virtio_ring at pa 0x%lx\n", pa);
+#else
+ void *vptr = memalign(PAGE_SIZE, size);
+ if (!vptr)
+ return ERR_NO_MEMORY;
+
+ LTRACEF("ptr %p\n", vptr);
+ memset(vptr, 0, size);
+
+ /* compute the physical address */
+ paddr_t pa = (paddr_t)vptr;
+#endif
+
+ /* initialize the ring */
+ vring_init(ring, len, vptr, PAGE_SIZE);
+ dev->ring[index].free_list = 0xffff;
+ dev->ring[index].free_count = 0;
+
+ /* add all the descriptors to the free list */
+ for (uint i = 0; i < len; i++) {
+ virtio_free_desc(dev, index, i);
+ }
+
+ /* register the ring with the device */
+ DEBUG_ASSERT(dev->mmio_config);
+ dev->mmio_config->guest_page_size = PAGE_SIZE;
+ dev->mmio_config->queue_sel = index;
+ dev->mmio_config->queue_num = len;
+ dev->mmio_config->queue_align = PAGE_SIZE;
+ dev->mmio_config->queue_pfn = pa / PAGE_SIZE;
+
+ /* mark the ring active */
+ dev->active_rings_bitmap |= (1 << index);
+
+ return NO_ERROR;
+}
+
+void virtio_reset_device(struct virtio_device *dev)
+{
+ dev->mmio_config->status = 0;
+}
+
+void virtio_status_acknowledge_driver(struct virtio_device *dev)
+{
+ dev->mmio_config->status |= VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER;
+}
+
+void virtio_status_driver_ok(struct virtio_device *dev)
+{
+ dev->mmio_config->status |= VIRTIO_STATUS_DRIVER_OK;
+}
+
+void virtio_init(uint level)
+{
+}
+
+LK_INIT_HOOK(virtio, &virtio_init, LK_INIT_LEVEL_THREADING);
+
diff --git a/src/bsp/lk/dev/virtio/virtio_priv.h b/src/bsp/lk/dev/virtio/virtio_priv.h
new file mode 100644
index 0000000..28437c8
--- /dev/null
+++ b/src/bsp/lk/dev/virtio/virtio_priv.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <compiler.h>
+#include <stdint.h>
+
+struct virtio_mmio_config {
+/* 0x00 */ uint32_t magic;
+ uint32_t version;
+ uint32_t device_id;
+ uint32_t vendor_id;
+/* 0x10 */ uint32_t host_features;
+ uint32_t host_features_sel;
+ uint32_t __reserved0[2];
+/* 0x20 */ uint32_t guest_features;
+ uint32_t guest_features_sel;
+ uint32_t guest_page_size;
+ uint32_t __reserved1[1];
+/* 0x30 */ uint32_t queue_sel;
+ uint32_t queue_num_max;
+ uint32_t queue_num;
+ uint32_t queue_align;
+/* 0x40 */ uint32_t queue_pfn;
+ uint32_t __reserved2[3];
+/* 0x50 */ uint32_t queue_notify;
+ uint32_t __reserved3[3];
+/* 0x60 */ uint32_t interrupt_status;
+ uint32_t interrupt_ack;
+ uint32_t __reserved4[2];
+/* 0x70 */ uint32_t status;
+ uint8_t __reserved5[0x8c];
+/* 0x100 */ uint32_t config[0];
+};
+
+STATIC_ASSERT(sizeof(struct virtio_mmio_config) == 0x100);
+
+#define VIRTIO_MMIO_MAGIC 0x74726976 // 'virt'
+
+#define VIRTIO_STATUS_ACKNOWLEDGE (1<<0)
+#define VIRTIO_STATUS_DRIVER (1<<1)
+#define VIRTIO_STATUS_DRIVER_OK (1<<2)
+#define VIRTIO_STATUS_FEATURES_OK (1<<3)
+#define VIRTIO_STATUS_DEVICE_NEEDS_RESET (1<<6)
+#define VIRTIO_STATUS_FAILED (1<<7)