ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/gpu/drm/qxl/Kconfig b/marvell/linux/drivers/gpu/drm/qxl/Kconfig
new file mode 100644
index 0000000..d0d691b
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_QXL
+	tristate "QXL virtual GPU"
+	depends on DRM && PCI && MMU
+	select DRM_KMS_HELPER
+	select DRM_TTM
+	select CRC32
+	help
+	  QXL virtual GPU for Spice virtualization desktop integration.
+	  Do not enable this driver unless your distro ships a corresponding
+	  X.org QXL driver that can handle kernel modesetting.
diff --git a/marvell/linux/drivers/gpu/drm/qxl/Makefile b/marvell/linux/drivers/gpu/drm/qxl/Makefile
new file mode 100644
index 0000000..1b6c201
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_release.o qxl_prime.o
+
+obj-$(CONFIG_DRM_QXL)+= qxl.o
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_cmd.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_cmd.c
new file mode 100644
index 0000000..d082c19
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+/* QXL cmd/ring handling */
+
+#include <linux/delay.h>
+
+#include <drm/drm_util.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
+
+struct ring {
+	struct qxl_ring_header      header;
+	uint8_t                     elements[0];
+};
+
+struct qxl_ring {
+	struct ring	       *ring;
+	int			element_size;
+	int			n_elements;
+	int			prod_notify;
+	wait_queue_head_t      *push_event;
+	spinlock_t             lock;
+};
+
+void qxl_ring_free(struct qxl_ring *ring)
+{
+	kfree(ring);
+}
+
+void qxl_ring_init_hdr(struct qxl_ring *ring)
+{
+	ring->ring->header.notify_on_prod = ring->n_elements;
+}
+
+struct qxl_ring *
+qxl_ring_create(struct qxl_ring_header *header,
+		int element_size,
+		int n_elements,
+		int prod_notify,
+		bool set_prod_notify,
+		wait_queue_head_t *push_event)
+{
+	struct qxl_ring *ring;
+
+	ring = kmalloc(sizeof(*ring), GFP_KERNEL);
+	if (!ring)
+		return NULL;
+
+	ring->ring = (struct ring *)header;
+	ring->element_size = element_size;
+	ring->n_elements = n_elements;
+	ring->prod_notify = prod_notify;
+	ring->push_event = push_event;
+	if (set_prod_notify)
+		qxl_ring_init_hdr(ring);
+	spin_lock_init(&ring->lock);
+	return ring;
+}
+
+static int qxl_check_header(struct qxl_ring *ring)
+{
+	int ret;
+	struct qxl_ring_header *header = &(ring->ring->header);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ring->lock, flags);
+	ret = header->prod - header->cons < header->num_items;
+	if (ret == 0)
+		header->notify_on_cons = header->cons + 1;
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return ret;
+}
+
+int qxl_check_idle(struct qxl_ring *ring)
+{
+	int ret;
+	struct qxl_ring_header *header = &(ring->ring->header);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ring->lock, flags);
+	ret = header->prod == header->cons;
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return ret;
+}
+
+int qxl_ring_push(struct qxl_ring *ring,
+		  const void *new_elt, bool interruptible)
+{
+	struct qxl_ring_header *header = &(ring->ring->header);
+	uint8_t *elt;
+	int idx, ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ring->lock, flags);
+	if (header->prod - header->cons == header->num_items) {
+		header->notify_on_cons = header->cons + 1;
+		mb();
+		spin_unlock_irqrestore(&ring->lock, flags);
+		if (!drm_can_sleep()) {
+			while (!qxl_check_header(ring))
+				udelay(1);
+		} else {
+			if (interruptible) {
+				ret = wait_event_interruptible(*ring->push_event,
+							       qxl_check_header(ring));
+				if (ret)
+					return ret;
+			} else {
+				wait_event(*ring->push_event,
+					   qxl_check_header(ring));
+			}
+
+		}
+		spin_lock_irqsave(&ring->lock, flags);
+	}
+
+	idx = header->prod & (ring->n_elements - 1);
+	elt = ring->ring->elements + idx * ring->element_size;
+
+	memcpy((void *)elt, new_elt, ring->element_size);
+
+	header->prod++;
+
+	mb();
+
+	if (header->prod == header->notify_on_prod)
+		outb(0, ring->prod_notify);
+
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return 0;
+}
+
+static bool qxl_ring_pop(struct qxl_ring *ring,
+			 void *element)
+{
+	volatile struct qxl_ring_header *header = &(ring->ring->header);
+	volatile uint8_t *ring_elt;
+	int idx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ring->lock, flags);
+	if (header->cons == header->prod) {
+		header->notify_on_prod = header->cons + 1;
+		spin_unlock_irqrestore(&ring->lock, flags);
+		return false;
+	}
+
+	idx = header->cons & (ring->n_elements - 1);
+	ring_elt = ring->ring->elements + idx * ring->element_size;
+
+	memcpy(element, (void *)ring_elt, ring->element_size);
+
+	header->cons++;
+
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return true;
+}
+
+int
+qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+			      uint32_t type, bool interruptible)
+{
+	struct qxl_command cmd;
+
+	cmd.type = type;
+	cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
+
+	return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
+}
+
+int
+qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+			     uint32_t type, bool interruptible)
+{
+	struct qxl_command cmd;
+
+	cmd.type = type;
+	cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
+
+	return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
+}
+
+bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
+{
+	if (!qxl_check_idle(qdev->release_ring)) {
+		schedule_work(&qdev->gc_work);
+		if (flush)
+			flush_work(&qdev->gc_work);
+		return true;
+	}
+	return false;
+}
+
+int qxl_garbage_collect(struct qxl_device *qdev)
+{
+	struct qxl_release *release;
+	uint64_t id, next_id;
+	int i = 0;
+	union qxl_release_info *info;
+
+	while (qxl_ring_pop(qdev->release_ring, &id)) {
+		DRM_DEBUG_DRIVER("popped %lld\n", id);
+		while (id) {
+			release = qxl_release_from_id_locked(qdev, id);
+			if (release == NULL)
+				break;
+
+			info = qxl_release_map(qdev, release);
+			next_id = info->next;
+			qxl_release_unmap(qdev, release, info);
+
+			DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id,
+					 next_id);
+
+			switch (release->type) {
+			case QXL_RELEASE_DRAWABLE:
+			case QXL_RELEASE_SURFACE_CMD:
+			case QXL_RELEASE_CURSOR_CMD:
+				break;
+			default:
+				DRM_ERROR("unexpected release type\n");
+				break;
+			}
+			id = next_id;
+
+			qxl_release_free(qdev, release);
+			++i;
+		}
+	}
+
+	DRM_DEBUG_DRIVER("%d\n", i);
+
+	return i;
+}
+
+int qxl_alloc_bo_reserved(struct qxl_device *qdev,
+			  struct qxl_release *release,
+			  unsigned long size,
+			  struct qxl_bo **_bo)
+{
+	struct qxl_bo *bo;
+	int ret;
+
+	ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
+			    false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
+	if (ret) {
+		DRM_ERROR("failed to allocate VRAM BO\n");
+		return ret;
+	}
+	ret = qxl_release_list_add(release, bo);
+	if (ret)
+		goto out_unref;
+
+	*_bo = bo;
+	return 0;
+out_unref:
+	qxl_bo_unref(&bo);
+	return ret;
+}
+
+static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
+{
+	int irq_num;
+	long addr = qdev->io_base + port;
+	int ret;
+
+	mutex_lock(&qdev->async_io_mutex);
+	irq_num = atomic_read(&qdev->irq_received_io_cmd);
+	if (qdev->last_sent_io_cmd > irq_num) {
+		if (intr)
+			ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
+							       atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+		else
+			ret = wait_event_timeout(qdev->io_cmd_event,
+						 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+		/* 0 is timeout, just bail the "hw" has gone away */
+		if (ret <= 0)
+			goto out;
+		irq_num = atomic_read(&qdev->irq_received_io_cmd);
+	}
+	outb(val, addr);
+	qdev->last_sent_io_cmd = irq_num + 1;
+	if (intr)
+		ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
+						       atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+	else
+		ret = wait_event_timeout(qdev->io_cmd_event,
+					 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+out:
+	if (ret > 0)
+		ret = 0;
+	mutex_unlock(&qdev->async_io_mutex);
+	return ret;
+}
+
+static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
+{
+	int ret;
+
+restart:
+	ret = wait_for_io_cmd_user(qdev, val, port, false);
+	if (ret == -ERESTARTSYS)
+		goto restart;
+}
+
+int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
+			const struct qxl_rect *area)
+{
+	int surface_id;
+	uint32_t surface_width, surface_height;
+	int ret;
+
+	if (!surf->hw_surf_alloc)
+		DRM_ERROR("got io update area with no hw surface\n");
+
+	if (surf->is_primary)
+		surface_id = 0;
+	else
+		surface_id = surf->surface_id;
+	surface_width = surf->surf.width;
+	surface_height = surf->surf.height;
+
+	if (area->left < 0 || area->top < 0 ||
+	    area->right > surface_width || area->bottom > surface_height)
+		return -EINVAL;
+
+	mutex_lock(&qdev->update_area_mutex);
+	qdev->ram_header->update_area = *area;
+	qdev->ram_header->update_surface = surface_id;
+	ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
+	mutex_unlock(&qdev->update_area_mutex);
+	return ret;
+}
+
+void qxl_io_notify_oom(struct qxl_device *qdev)
+{
+	outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
+}
+
+void qxl_io_flush_release(struct qxl_device *qdev)
+{
+	outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
+}
+
+void qxl_io_flush_surfaces(struct qxl_device *qdev)
+{
+	wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
+}
+
+void qxl_io_destroy_primary(struct qxl_device *qdev)
+{
+	wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
+	qdev->primary_bo->is_primary = false;
+	drm_gem_object_put_unlocked(&qdev->primary_bo->tbo.base);
+	qdev->primary_bo = NULL;
+}
+
+void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo)
+{
+	struct qxl_surface_create *create;
+
+	if (WARN_ON(qdev->primary_bo))
+		return;
+
+	DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
+	create = &qdev->ram_header->create_surface;
+	create->format = bo->surf.format;
+	create->width = bo->surf.width;
+	create->height = bo->surf.height;
+	create->stride = bo->surf.stride;
+	create->mem = qxl_bo_physical_address(qdev, bo, 0);
+
+	DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr);
+
+	create->flags = QXL_SURF_FLAG_KEEP_DATA;
+	create->type = QXL_SURF_TYPE_PRIMARY;
+
+	wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
+	qdev->primary_bo = bo;
+	qdev->primary_bo->is_primary = true;
+	drm_gem_object_get(&qdev->primary_bo->tbo.base);
+}
+
+void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
+{
+	DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id);
+	wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
+}
+
+void qxl_io_reset(struct qxl_device *qdev)
+{
+	outb(0, qdev->io_base + QXL_IO_RESET);
+}
+
+void qxl_io_monitors_config(struct qxl_device *qdev)
+{
+	wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
+}
+
+int qxl_surface_id_alloc(struct qxl_device *qdev,
+		      struct qxl_bo *surf)
+{
+	uint32_t handle;
+	int idr_ret;
+	int count = 0;
+again:
+	idr_preload(GFP_ATOMIC);
+	spin_lock(&qdev->surf_id_idr_lock);
+	idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
+	spin_unlock(&qdev->surf_id_idr_lock);
+	idr_preload_end();
+	if (idr_ret < 0)
+		return idr_ret;
+	handle = idr_ret;
+
+	if (handle >= qdev->rom->n_surfaces) {
+		count++;
+		spin_lock(&qdev->surf_id_idr_lock);
+		idr_remove(&qdev->surf_id_idr, handle);
+		spin_unlock(&qdev->surf_id_idr_lock);
+		qxl_reap_surface_id(qdev, 2);
+		goto again;
+	}
+	surf->surface_id = handle;
+
+	spin_lock(&qdev->surf_id_idr_lock);
+	qdev->last_alloced_surf_id = handle;
+	spin_unlock(&qdev->surf_id_idr_lock);
+	return 0;
+}
+
+void qxl_surface_id_dealloc(struct qxl_device *qdev,
+			    uint32_t surface_id)
+{
+	spin_lock(&qdev->surf_id_idr_lock);
+	idr_remove(&qdev->surf_id_idr, surface_id);
+	spin_unlock(&qdev->surf_id_idr_lock);
+}
+
+int qxl_hw_surface_alloc(struct qxl_device *qdev,
+			 struct qxl_bo *surf)
+{
+	struct qxl_surface_cmd *cmd;
+	struct qxl_release *release;
+	int ret;
+
+	if (surf->hw_surf_alloc)
+		return 0;
+
+	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
+						 NULL,
+						 &release);
+	if (ret)
+		return ret;
+
+	ret = qxl_release_reserve_list(release, true);
+	if (ret) {
+		qxl_release_free(qdev, release);
+		return ret;
+	}
+	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_SURFACE_CMD_CREATE;
+	cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
+	cmd->u.surface_create.format = surf->surf.format;
+	cmd->u.surface_create.width = surf->surf.width;
+	cmd->u.surface_create.height = surf->surf.height;
+	cmd->u.surface_create.stride = surf->surf.stride;
+	cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
+	cmd->surface_id = surf->surface_id;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	surf->surf_create = release;
+
+	/* no need to add a release to the fence for this surface bo,
+	   since it is only released when we ask to destroy the surface
+	   and it would never signal otherwise */
+	qxl_release_fence_buffer_objects(release);
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+	surf->hw_surf_alloc = true;
+	spin_lock(&qdev->surf_id_idr_lock);
+	idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
+	spin_unlock(&qdev->surf_id_idr_lock);
+	return 0;
+}
+
+int qxl_hw_surface_dealloc(struct qxl_device *qdev,
+			   struct qxl_bo *surf)
+{
+	struct qxl_surface_cmd *cmd;
+	struct qxl_release *release;
+	int ret;
+	int id;
+
+	if (!surf->hw_surf_alloc)
+		return 0;
+
+	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
+						 surf->surf_create,
+						 &release);
+	if (ret)
+		return ret;
+
+	surf->surf_create = NULL;
+	/* remove the surface from the idr, but not the surface id yet */
+	spin_lock(&qdev->surf_id_idr_lock);
+	idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
+	spin_unlock(&qdev->surf_id_idr_lock);
+	surf->hw_surf_alloc = false;
+
+	id = surf->surface_id;
+	surf->surface_id = 0;
+
+	release->surface_release_id = id;
+	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_SURFACE_CMD_DESTROY;
+	cmd->surface_id = id;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	qxl_release_fence_buffer_objects(release);
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+	return 0;
+}
+
+static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
+{
+	struct qxl_rect rect;
+	int ret;
+
+	/* if we are evicting, we need to make sure the surface is up
+	   to date */
+	rect.left = 0;
+	rect.right = surf->surf.width;
+	rect.top = 0;
+	rect.bottom = surf->surf.height;
+retry:
+	ret = qxl_io_update_area(qdev, surf, &rect);
+	if (ret == -ERESTARTSYS)
+		goto retry;
+	return ret;
+}
+
+static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
+{
+	/* no need to update area if we are just freeing the surface normally */
+	if (do_update_area)
+		qxl_update_surface(qdev, surf);
+
+	/* nuke the surface id at the hw */
+	qxl_hw_surface_dealloc(qdev, surf);
+}
+
+void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
+{
+	mutex_lock(&qdev->surf_evict_mutex);
+	qxl_surface_evict_locked(qdev, surf, do_update_area);
+	mutex_unlock(&qdev->surf_evict_mutex);
+}
+
+static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
+{
+	int ret;
+
+	ret = qxl_bo_reserve(surf, false);
+	if (ret)
+		return ret;
+
+	if (stall)
+		mutex_unlock(&qdev->surf_evict_mutex);
+
+	ret = ttm_bo_wait(&surf->tbo, true, !stall);
+
+	if (stall)
+		mutex_lock(&qdev->surf_evict_mutex);
+	if (ret) {
+		qxl_bo_unreserve(surf);
+		return ret;
+	}
+
+	qxl_surface_evict_locked(qdev, surf, true);
+	qxl_bo_unreserve(surf);
+	return 0;
+}
+
+static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
+{
+	int num_reaped = 0;
+	int i, ret;
+	bool stall = false;
+	int start = 0;
+
+	mutex_lock(&qdev->surf_evict_mutex);
+again:
+
+	spin_lock(&qdev->surf_id_idr_lock);
+	start = qdev->last_alloced_surf_id + 1;
+	spin_unlock(&qdev->surf_id_idr_lock);
+
+	for (i = start; i < start + qdev->rom->n_surfaces; i++) {
+		void *objptr;
+		int surfid = i % qdev->rom->n_surfaces;
+
+		/* this avoids the case where the objects is in the
+		   idr but has been evicted half way - its makes
+		   the idr lookup atomic with the eviction */
+		spin_lock(&qdev->surf_id_idr_lock);
+		objptr = idr_find(&qdev->surf_id_idr, surfid);
+		spin_unlock(&qdev->surf_id_idr_lock);
+
+		if (!objptr)
+			continue;
+
+		ret = qxl_reap_surf(qdev, objptr, stall);
+		if (ret == 0)
+			num_reaped++;
+		if (num_reaped >= max_to_reap)
+			break;
+	}
+	if (num_reaped == 0 && stall == false) {
+		stall = true;
+		goto again;
+	}
+
+	mutex_unlock(&qdev->surf_evict_mutex);
+	if (num_reaped) {
+		usleep_range(500, 1000);
+		qxl_queue_garbage_collect(qdev, true);
+	}
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_debugfs.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_debugfs.c
new file mode 100644
index 0000000..a4f4175
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ *  Alon Levy <alevy@redhat.com>
+ */
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#if defined(CONFIG_DEBUG_FS)
+static int
+qxl_debugfs_irq_received(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct qxl_device *qdev = node->minor->dev->dev_private;
+
+	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
+	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
+	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
+	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
+	seq_printf(m, "%d\n", qdev->irq_received_error);
+	return 0;
+}
+
+static int
+qxl_debugfs_buffers_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct qxl_device *qdev = node->minor->dev->dev_private;
+	struct qxl_bo *bo;
+
+	list_for_each_entry(bo, &qdev->gem.objects, list) {
+		struct dma_resv_list *fobj;
+		int rel;
+
+		rcu_read_lock();
+		fobj = rcu_dereference(bo->tbo.base.resv->fence);
+		rel = fobj ? fobj->shared_count : 0;
+		rcu_read_unlock();
+
+		seq_printf(m, "size %ld, pc %d, num releases %d\n",
+			   (unsigned long)bo->tbo.base.size,
+			   bo->pin_count, rel);
+	}
+	return 0;
+}
+
+static struct drm_info_list qxl_debugfs_list[] = {
+	{ "irq_received", qxl_debugfs_irq_received, 0, NULL },
+	{ "qxl_buffers", qxl_debugfs_buffers_info, 0, NULL },
+};
+#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
+#endif
+
+int
+qxl_debugfs_init(struct drm_minor *minor)
+{
+#if defined(CONFIG_DEBUG_FS)
+	int r;
+	struct qxl_device *dev =
+		(struct qxl_device *) minor->dev->dev_private;
+
+	drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
+				 minor->debugfs_root, minor);
+
+	r = qxl_ttm_debugfs_init(dev);
+	if (r) {
+		DRM_ERROR("Failed to init TTM debugfs\n");
+		return r;
+	}
+#endif
+	return 0;
+}
+
+int qxl_debugfs_add_files(struct qxl_device *qdev,
+			  struct drm_info_list *files,
+			  unsigned int nfiles)
+{
+	unsigned int i;
+
+	for (i = 0; i < qdev->debugfs_count; i++) {
+		if (qdev->debugfs[i].files == files) {
+			/* Already registered */
+			return 0;
+		}
+	}
+
+	i = qdev->debugfs_count + 1;
+	if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
+		DRM_ERROR("Reached maximum number of debugfs components.\n");
+		DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
+		return -EINVAL;
+	}
+	qdev->debugfs[qdev->debugfs_count].files = files;
+	qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
+	qdev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+	drm_debugfs_create_files(files, nfiles,
+				 qdev->ddev.primary->debugfs_root,
+				 qdev->ddev.primary);
+#endif
+	return 0;
+}
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_dev.h b/marvell/linux/drivers/gpu/drm/qxl/qxl_dev.h
new file mode 100644
index 0000000..a0ee416
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_dev.h
@@ -0,0 +1,878 @@
+/*
+   Copyright (C) 2009 Red Hat, Inc.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+	 notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above copyright
+	 notice, this list of conditions and the following disclaimer in
+	 the documentation and/or other materials provided with the
+	 distribution.
+       * Neither the name of the copyright holder nor the names of its
+	 contributors may be used to endorse or promote products derived
+	 from this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
+   IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+   TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+   PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef H_QXL_DEV
+#define H_QXL_DEV
+
+#include <linux/types.h>
+
+/*
+ * from spice-protocol
+ * Release 0.10.0
+ */
+
+/* enums.h */
+
+enum SpiceImageType {
+	SPICE_IMAGE_TYPE_BITMAP,
+	SPICE_IMAGE_TYPE_QUIC,
+	SPICE_IMAGE_TYPE_RESERVED,
+	SPICE_IMAGE_TYPE_LZ_PLT = 100,
+	SPICE_IMAGE_TYPE_LZ_RGB,
+	SPICE_IMAGE_TYPE_GLZ_RGB,
+	SPICE_IMAGE_TYPE_FROM_CACHE,
+	SPICE_IMAGE_TYPE_SURFACE,
+	SPICE_IMAGE_TYPE_JPEG,
+	SPICE_IMAGE_TYPE_FROM_CACHE_LOSSLESS,
+	SPICE_IMAGE_TYPE_ZLIB_GLZ_RGB,
+	SPICE_IMAGE_TYPE_JPEG_ALPHA,
+
+	SPICE_IMAGE_TYPE_ENUM_END
+};
+
+enum SpiceBitmapFmt {
+	SPICE_BITMAP_FMT_INVALID,
+	SPICE_BITMAP_FMT_1BIT_LE,
+	SPICE_BITMAP_FMT_1BIT_BE,
+	SPICE_BITMAP_FMT_4BIT_LE,
+	SPICE_BITMAP_FMT_4BIT_BE,
+	SPICE_BITMAP_FMT_8BIT,
+	SPICE_BITMAP_FMT_16BIT,
+	SPICE_BITMAP_FMT_24BIT,
+	SPICE_BITMAP_FMT_32BIT,
+	SPICE_BITMAP_FMT_RGBA,
+
+	SPICE_BITMAP_FMT_ENUM_END
+};
+
+enum SpiceSurfaceFmt {
+	SPICE_SURFACE_FMT_INVALID,
+	SPICE_SURFACE_FMT_1_A,
+	SPICE_SURFACE_FMT_8_A = 8,
+	SPICE_SURFACE_FMT_16_555 = 16,
+	SPICE_SURFACE_FMT_32_xRGB = 32,
+	SPICE_SURFACE_FMT_16_565 = 80,
+	SPICE_SURFACE_FMT_32_ARGB = 96,
+
+	SPICE_SURFACE_FMT_ENUM_END
+};
+
+enum SpiceClipType {
+	SPICE_CLIP_TYPE_NONE,
+	SPICE_CLIP_TYPE_RECTS,
+
+	SPICE_CLIP_TYPE_ENUM_END
+};
+
+enum SpiceRopd {
+	SPICE_ROPD_INVERS_SRC = (1 << 0),
+	SPICE_ROPD_INVERS_BRUSH = (1 << 1),
+	SPICE_ROPD_INVERS_DEST = (1 << 2),
+	SPICE_ROPD_OP_PUT = (1 << 3),
+	SPICE_ROPD_OP_OR = (1 << 4),
+	SPICE_ROPD_OP_AND = (1 << 5),
+	SPICE_ROPD_OP_XOR = (1 << 6),
+	SPICE_ROPD_OP_BLACKNESS = (1 << 7),
+	SPICE_ROPD_OP_WHITENESS = (1 << 8),
+	SPICE_ROPD_OP_INVERS = (1 << 9),
+	SPICE_ROPD_INVERS_RES = (1 << 10),
+
+	SPICE_ROPD_MASK = 0x7ff
+};
+
+enum SpiceBrushType {
+	SPICE_BRUSH_TYPE_NONE,
+	SPICE_BRUSH_TYPE_SOLID,
+	SPICE_BRUSH_TYPE_PATTERN,
+
+	SPICE_BRUSH_TYPE_ENUM_END
+};
+
+enum SpiceCursorType {
+	SPICE_CURSOR_TYPE_ALPHA,
+	SPICE_CURSOR_TYPE_MONO,
+	SPICE_CURSOR_TYPE_COLOR4,
+	SPICE_CURSOR_TYPE_COLOR8,
+	SPICE_CURSOR_TYPE_COLOR16,
+	SPICE_CURSOR_TYPE_COLOR24,
+	SPICE_CURSOR_TYPE_COLOR32,
+
+	SPICE_CURSOR_TYPE_ENUM_END
+};
+
+/* qxl_dev.h */
+
+#pragma pack(push, 1)
+
+#define REDHAT_PCI_VENDOR_ID 0x1b36
+
+/* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */
+#define QXL_DEVICE_ID_STABLE 0x0100
+
+enum {
+	QXL_REVISION_STABLE_V04 = 0x01,
+	QXL_REVISION_STABLE_V06 = 0x02,
+	QXL_REVISION_STABLE_V10 = 0x03,
+	QXL_REVISION_STABLE_V12 = 0x04,
+};
+
+#define QXL_DEVICE_ID_DEVEL 0x01ff
+#define QXL_REVISION_DEVEL 0x01
+
+#define QXL_ROM_MAGIC (*(uint32_t *)"QXRO")
+#define QXL_RAM_MAGIC (*(uint32_t *)"QXRA")
+
+enum {
+	QXL_RAM_RANGE_INDEX,
+	QXL_VRAM_RANGE_INDEX,
+	QXL_ROM_RANGE_INDEX,
+	QXL_IO_RANGE_INDEX,
+
+	QXL_PCI_RANGES
+};
+
+/* qxl-1 compat: append only */
+enum {
+	QXL_IO_NOTIFY_CMD,
+	QXL_IO_NOTIFY_CURSOR,
+	QXL_IO_UPDATE_AREA,
+	QXL_IO_UPDATE_IRQ,
+	QXL_IO_NOTIFY_OOM,
+	QXL_IO_RESET,
+	QXL_IO_SET_MODE,                  /* qxl-1 */
+	QXL_IO_LOG,
+	/* appended for qxl-2 */
+	QXL_IO_MEMSLOT_ADD,
+	QXL_IO_MEMSLOT_DEL,
+	QXL_IO_DETACH_PRIMARY,
+	QXL_IO_ATTACH_PRIMARY,
+	QXL_IO_CREATE_PRIMARY,
+	QXL_IO_DESTROY_PRIMARY,
+	QXL_IO_DESTROY_SURFACE_WAIT,
+	QXL_IO_DESTROY_ALL_SURFACES,
+	/* appended for qxl-3 */
+	QXL_IO_UPDATE_AREA_ASYNC,
+	QXL_IO_MEMSLOT_ADD_ASYNC,
+	QXL_IO_CREATE_PRIMARY_ASYNC,
+	QXL_IO_DESTROY_PRIMARY_ASYNC,
+	QXL_IO_DESTROY_SURFACE_ASYNC,
+	QXL_IO_DESTROY_ALL_SURFACES_ASYNC,
+	QXL_IO_FLUSH_SURFACES_ASYNC,
+	QXL_IO_FLUSH_RELEASE,
+	/* appended for qxl-4 */
+	QXL_IO_MONITORS_CONFIG_ASYNC,
+
+	QXL_IO_RANGE_SIZE
+};
+
+typedef uint64_t QXLPHYSICAL;
+typedef int32_t QXLFIXED; /* fixed 28.4 */
+
+struct qxl_point_fix {
+	QXLFIXED x;
+	QXLFIXED y;
+};
+
+struct qxl_point {
+	int32_t x;
+	int32_t y;
+};
+
+struct qxl_point_1_6 {
+	int16_t x;
+	int16_t y;
+};
+
+struct qxl_rect {
+	int32_t top;
+	int32_t left;
+	int32_t bottom;
+	int32_t right;
+};
+
+struct qxl_urect {
+	uint32_t top;
+	uint32_t left;
+	uint32_t bottom;
+	uint32_t right;
+};
+
+/* qxl-1 compat: append only */
+struct qxl_rom {
+	uint32_t magic;
+	uint32_t id;
+	uint32_t update_id;
+	uint32_t compression_level;
+	uint32_t log_level;
+	uint32_t mode;			  /* qxl-1 */
+	uint32_t modes_offset;
+	uint32_t num_io_pages;
+	uint32_t pages_offset;		  /* qxl-1 */
+	uint32_t draw_area_offset;	  /* qxl-1 */
+	uint32_t surface0_area_size;	  /* qxl-1 name: draw_area_size */
+	uint32_t ram_header_offset;
+	uint32_t mm_clock;
+	/* appended for qxl-2 */
+	uint32_t n_surfaces;
+	uint64_t flags;
+	uint8_t slots_start;
+	uint8_t slots_end;
+	uint8_t slot_gen_bits;
+	uint8_t slot_id_bits;
+	uint8_t slot_generation;
+	/* appended for qxl-4 */
+	uint8_t client_present;
+	uint8_t client_capabilities[58];
+	uint32_t client_monitors_config_crc;
+	struct {
+		uint16_t count;
+	uint16_t padding;
+		struct qxl_urect heads[64];
+	} client_monitors_config;
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_mode {
+	uint32_t id;
+	uint32_t x_res;
+	uint32_t y_res;
+	uint32_t bits;
+	uint32_t stride;
+	uint32_t x_mili;
+	uint32_t y_mili;
+	uint32_t orientation;
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_modes {
+	uint32_t n_modes;
+	struct qxl_mode modes[0];
+};
+
+/* qxl-1 compat: append only */
+enum qxl_cmd_type {
+	QXL_CMD_NOP,
+	QXL_CMD_DRAW,
+	QXL_CMD_UPDATE,
+	QXL_CMD_CURSOR,
+	QXL_CMD_MESSAGE,
+	QXL_CMD_SURFACE,
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_command {
+	QXLPHYSICAL data;
+	uint32_t type;
+	uint32_t padding;
+};
+
+#define QXL_COMMAND_FLAG_COMPAT		(1<<0)
+#define QXL_COMMAND_FLAG_COMPAT_16BPP	(2<<0)
+
+struct qxl_command_ext {
+	struct qxl_command cmd;
+	uint32_t group_id;
+	uint32_t flags;
+};
+
+struct qxl_mem_slot {
+	uint64_t mem_start;
+	uint64_t mem_end;
+};
+
+#define QXL_SURF_TYPE_PRIMARY	   0
+
+#define QXL_SURF_FLAG_KEEP_DATA	   (1 << 0)
+
+struct qxl_surface_create {
+	uint32_t width;
+	uint32_t height;
+	int32_t stride;
+	uint32_t format;
+	uint32_t position;
+	uint32_t mouse_mode;
+	uint32_t flags;
+	uint32_t type;
+	QXLPHYSICAL mem;
+};
+
+#define QXL_COMMAND_RING_SIZE 32
+#define QXL_CURSOR_RING_SIZE 32
+#define QXL_RELEASE_RING_SIZE 8
+
+#define QXL_LOG_BUF_SIZE 4096
+
+#define QXL_INTERRUPT_DISPLAY (1 << 0)
+#define QXL_INTERRUPT_CURSOR (1 << 1)
+#define QXL_INTERRUPT_IO_CMD (1 << 2)
+#define QXL_INTERRUPT_ERROR  (1 << 3)
+#define QXL_INTERRUPT_CLIENT (1 << 4)
+#define QXL_INTERRUPT_CLIENT_MONITORS_CONFIG  (1 << 5)
+
+struct qxl_ring_header {
+	uint32_t num_items;
+	uint32_t prod;
+	uint32_t notify_on_prod;
+	uint32_t cons;
+	uint32_t notify_on_cons;
+};
+
+/* qxl-1 compat: append only */
+struct qxl_ram_header {
+	uint32_t magic;
+	uint32_t int_pending;
+	uint32_t int_mask;
+	uint8_t log_buf[QXL_LOG_BUF_SIZE];
+	struct qxl_ring_header  cmd_ring_hdr;
+	struct qxl_command	cmd_ring[QXL_COMMAND_RING_SIZE];
+	struct qxl_ring_header  cursor_ring_hdr;
+	struct qxl_command	cursor_ring[QXL_CURSOR_RING_SIZE];
+	struct qxl_ring_header  release_ring_hdr;
+	uint64_t		release_ring[QXL_RELEASE_RING_SIZE];
+	struct qxl_rect update_area;
+	/* appended for qxl-2 */
+	uint32_t update_surface;
+	struct qxl_mem_slot mem_slot;
+	struct qxl_surface_create create_surface;
+	uint64_t flags;
+
+	/* appended for qxl-4 */
+
+	/* used by QXL_IO_MONITORS_CONFIG_ASYNC */
+	QXLPHYSICAL monitors_config;
+	uint8_t guest_capabilities[64];
+};
+
+union qxl_release_info {
+	uint64_t id;	  /* in  */
+	uint64_t next;	  /* out */
+};
+
+struct qxl_release_info_ext {
+	union qxl_release_info *info;
+	uint32_t group_id;
+};
+
+struct qxl_data_chunk {
+	uint32_t data_size;
+	QXLPHYSICAL prev_chunk;
+	QXLPHYSICAL next_chunk;
+	uint8_t data[0];
+};
+
+struct qxl_message {
+	union qxl_release_info release_info;
+	uint8_t data[0];
+};
+
+struct qxl_compat_update_cmd {
+	union qxl_release_info release_info;
+	struct qxl_rect area;
+	uint32_t update_id;
+};
+
+struct qxl_update_cmd {
+	union qxl_release_info release_info;
+	struct qxl_rect area;
+	uint32_t update_id;
+	uint32_t surface_id;
+};
+
+struct qxl_cursor_header {
+	uint64_t unique;
+	uint16_t type;
+	uint16_t width;
+	uint16_t height;
+	uint16_t hot_spot_x;
+	uint16_t hot_spot_y;
+};
+
+struct qxl_cursor {
+	struct qxl_cursor_header header;
+	uint32_t data_size;
+	struct qxl_data_chunk chunk;
+};
+
+enum {
+	QXL_CURSOR_SET,
+	QXL_CURSOR_MOVE,
+	QXL_CURSOR_HIDE,
+	QXL_CURSOR_TRAIL,
+};
+
+#define QXL_CURSOR_DEVICE_DATA_SIZE 128
+
+struct qxl_cursor_cmd {
+	union qxl_release_info release_info;
+	uint8_t type;
+	union {
+		struct {
+			struct qxl_point_1_6 position;
+			uint8_t visible;
+			QXLPHYSICAL shape;
+		} set;
+		struct {
+			uint16_t length;
+			uint16_t frequency;
+		} trail;
+		struct qxl_point_1_6 position;
+	} u;
+	/* todo: dynamic size from rom */
+	uint8_t device_data[QXL_CURSOR_DEVICE_DATA_SIZE];
+};
+
+enum {
+	QXL_DRAW_NOP,
+	QXL_DRAW_FILL,
+	QXL_DRAW_OPAQUE,
+	QXL_DRAW_COPY,
+	QXL_COPY_BITS,
+	QXL_DRAW_BLEND,
+	QXL_DRAW_BLACKNESS,
+	QXL_DRAW_WHITENESS,
+	QXL_DRAW_INVERS,
+	QXL_DRAW_ROP3,
+	QXL_DRAW_STROKE,
+	QXL_DRAW_TEXT,
+	QXL_DRAW_TRANSPARENT,
+	QXL_DRAW_ALPHA_BLEND,
+	QXL_DRAW_COMPOSITE
+};
+
+struct qxl_raster_glyph {
+	struct qxl_point render_pos;
+	struct qxl_point glyph_origin;
+	uint16_t width;
+	uint16_t height;
+	uint8_t data[0];
+};
+
+struct qxl_string {
+	uint32_t data_size;
+	uint16_t length;
+	uint16_t flags;
+	struct qxl_data_chunk chunk;
+};
+
+struct qxl_copy_bits {
+	struct qxl_point src_pos;
+};
+
+enum qxl_effect_type {
+	QXL_EFFECT_BLEND = 0,
+	QXL_EFFECT_OPAQUE = 1,
+	QXL_EFFECT_REVERT_ON_DUP = 2,
+	QXL_EFFECT_BLACKNESS_ON_DUP = 3,
+	QXL_EFFECT_WHITENESS_ON_DUP = 4,
+	QXL_EFFECT_NOP_ON_DUP = 5,
+	QXL_EFFECT_NOP = 6,
+	QXL_EFFECT_OPAQUE_BRUSH = 7
+};
+
+struct qxl_pattern {
+	QXLPHYSICAL pat;
+	struct qxl_point pos;
+};
+
+struct qxl_brush {
+	uint32_t type;
+	union {
+		uint32_t color;
+		struct qxl_pattern pattern;
+	} u;
+};
+
+struct qxl_q_mask {
+	uint8_t flags;
+	struct qxl_point pos;
+	QXLPHYSICAL bitmap;
+};
+
+struct qxl_fill {
+	struct qxl_brush brush;
+	uint16_t rop_descriptor;
+	struct qxl_q_mask mask;
+};
+
+struct qxl_opaque {
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+	struct qxl_brush brush;
+	uint16_t rop_descriptor;
+	uint8_t scale_mode;
+	struct qxl_q_mask mask;
+};
+
+struct qxl_copy {
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+	uint16_t rop_descriptor;
+	uint8_t scale_mode;
+	struct qxl_q_mask mask;
+};
+
+struct qxl_transparent {
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+	uint32_t src_color;
+	uint32_t true_color;
+};
+
+struct qxl_alpha_blend {
+	uint16_t alpha_flags;
+	uint8_t alpha;
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+};
+
+struct qxl_compat_alpha_blend {
+	uint8_t alpha;
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+};
+
+struct qxl_rop_3 {
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+	struct qxl_brush brush;
+	uint8_t rop3;
+	uint8_t scale_mode;
+	struct qxl_q_mask mask;
+};
+
+struct qxl_line_attr {
+	uint8_t flags;
+	uint8_t join_style;
+	uint8_t end_style;
+	uint8_t style_nseg;
+	QXLFIXED width;
+	QXLFIXED miter_limit;
+	QXLPHYSICAL style;
+};
+
+struct qxl_stroke {
+	QXLPHYSICAL path;
+	struct qxl_line_attr attr;
+	struct qxl_brush brush;
+	uint16_t fore_mode;
+	uint16_t back_mode;
+};
+
+struct qxl_text {
+	QXLPHYSICAL str;
+	struct qxl_rect back_area;
+	struct qxl_brush fore_brush;
+	struct qxl_brush back_brush;
+	uint16_t fore_mode;
+	uint16_t back_mode;
+};
+
+struct qxl_mask {
+	struct qxl_q_mask mask;
+};
+
+struct qxl_clip {
+	uint32_t type;
+	QXLPHYSICAL data;
+};
+
+enum qxl_operator {
+	QXL_OP_CLEAR			 = 0x00,
+	QXL_OP_SOURCE			 = 0x01,
+	QXL_OP_DST			 = 0x02,
+	QXL_OP_OVER			 = 0x03,
+	QXL_OP_OVER_REVERSE		 = 0x04,
+	QXL_OP_IN			 = 0x05,
+	QXL_OP_IN_REVERSE		 = 0x06,
+	QXL_OP_OUT			 = 0x07,
+	QXL_OP_OUT_REVERSE		 = 0x08,
+	QXL_OP_ATOP			 = 0x09,
+	QXL_OP_ATOP_REVERSE		 = 0x0a,
+	QXL_OP_XOR			 = 0x0b,
+	QXL_OP_ADD			 = 0x0c,
+	QXL_OP_SATURATE			 = 0x0d,
+	/* Note the jump here from 0x0d to 0x30 */
+	QXL_OP_MULTIPLY			 = 0x30,
+	QXL_OP_SCREEN			 = 0x31,
+	QXL_OP_OVERLAY			 = 0x32,
+	QXL_OP_DARKEN			 = 0x33,
+	QXL_OP_LIGHTEN			 = 0x34,
+	QXL_OP_COLOR_DODGE		 = 0x35,
+	QXL_OP_COLOR_BURN		 = 0x36,
+	QXL_OP_HARD_LIGHT		 = 0x37,
+	QXL_OP_SOFT_LIGHT		 = 0x38,
+	QXL_OP_DIFFERENCE		 = 0x39,
+	QXL_OP_EXCLUSION		 = 0x3a,
+	QXL_OP_HSL_HUE			 = 0x3b,
+	QXL_OP_HSL_SATURATION		 = 0x3c,
+	QXL_OP_HSL_COLOR		 = 0x3d,
+	QXL_OP_HSL_LUMINOSITY		 = 0x3e
+};
+
+struct qxl_transform {
+	uint32_t	t00;
+	uint32_t	t01;
+	uint32_t	t02;
+	uint32_t	t10;
+	uint32_t	t11;
+	uint32_t	t12;
+};
+
+/* The flags field has the following bit fields:
+ *
+ *     operator:		[  0 -  7 ]
+ *     src_filter:		[  8 - 10 ]
+ *     mask_filter:		[ 11 - 13 ]
+ *     src_repeat:		[ 14 - 15 ]
+ *     mask_repeat:		[ 16 - 17 ]
+ *     component_alpha:		[ 18 - 18 ]
+ *     reserved:		[ 19 - 31 ]
+ *
+ * The repeat and filter values are those of pixman:
+ *		REPEAT_NONE =		0
+ *              REPEAT_NORMAL =		1
+ *		REPEAT_PAD =		2
+ *		REPEAT_REFLECT =	3
+ *
+ * The filter values are:
+ *		FILTER_NEAREST =	0
+ *		FILTER_BILINEAR	=	1
+ */
+struct qxl_composite {
+	uint32_t		flags;
+
+	QXLPHYSICAL			src;
+	QXLPHYSICAL			src_transform;	/* May be NULL */
+	QXLPHYSICAL			mask;		/* May be NULL */
+	QXLPHYSICAL			mask_transform;	/* May be NULL */
+	struct qxl_point_1_6	src_origin;
+	struct qxl_point_1_6	mask_origin;
+};
+
+struct qxl_compat_drawable {
+	union qxl_release_info release_info;
+	uint8_t effect;
+	uint8_t type;
+	uint16_t bitmap_offset;
+	struct qxl_rect bitmap_area;
+	struct qxl_rect bbox;
+	struct qxl_clip clip;
+	uint32_t mm_time;
+	union {
+		struct qxl_fill fill;
+		struct qxl_opaque opaque;
+		struct qxl_copy copy;
+		struct qxl_transparent transparent;
+		struct qxl_compat_alpha_blend alpha_blend;
+		struct qxl_copy_bits copy_bits;
+		struct qxl_copy blend;
+		struct qxl_rop_3 rop3;
+		struct qxl_stroke stroke;
+		struct qxl_text text;
+		struct qxl_mask blackness;
+		struct qxl_mask invers;
+		struct qxl_mask whiteness;
+	} u;
+};
+
+struct qxl_drawable {
+	union qxl_release_info release_info;
+	uint32_t surface_id;
+	uint8_t effect;
+	uint8_t type;
+	uint8_t self_bitmap;
+	struct qxl_rect self_bitmap_area;
+	struct qxl_rect bbox;
+	struct qxl_clip clip;
+	uint32_t mm_time;
+	int32_t surfaces_dest[3];
+	struct qxl_rect surfaces_rects[3];
+	union {
+		struct qxl_fill fill;
+		struct qxl_opaque opaque;
+		struct qxl_copy copy;
+		struct qxl_transparent transparent;
+		struct qxl_alpha_blend alpha_blend;
+		struct qxl_copy_bits copy_bits;
+		struct qxl_copy blend;
+		struct qxl_rop_3 rop3;
+		struct qxl_stroke stroke;
+		struct qxl_text text;
+		struct qxl_mask blackness;
+		struct qxl_mask invers;
+		struct qxl_mask whiteness;
+		struct qxl_composite composite;
+	} u;
+};
+
+enum qxl_surface_cmd_type {
+	QXL_SURFACE_CMD_CREATE,
+	QXL_SURFACE_CMD_DESTROY,
+};
+
+struct qxl_surface {
+	uint32_t format;
+	uint32_t width;
+	uint32_t height;
+	int32_t stride;
+	QXLPHYSICAL data;
+};
+
+struct qxl_surface_cmd {
+	union qxl_release_info release_info;
+	uint32_t surface_id;
+	uint8_t type;
+	uint32_t flags;
+	union {
+		struct qxl_surface surface_create;
+	} u;
+};
+
+struct qxl_clip_rects {
+	uint32_t num_rects;
+	struct qxl_data_chunk chunk;
+};
+
+enum {
+	QXL_PATH_BEGIN = (1 << 0),
+	QXL_PATH_END = (1 << 1),
+	QXL_PATH_CLOSE = (1 << 3),
+	QXL_PATH_BEZIER = (1 << 4),
+};
+
+struct qxl_path_seg {
+	uint32_t flags;
+	uint32_t count;
+	struct qxl_point_fix points[0];
+};
+
+struct qxl_path {
+	uint32_t data_size;
+	struct qxl_data_chunk chunk;
+};
+
+enum {
+	QXL_IMAGE_GROUP_DRIVER,
+	QXL_IMAGE_GROUP_DEVICE,
+	QXL_IMAGE_GROUP_RED,
+	QXL_IMAGE_GROUP_DRIVER_DONT_CACHE,
+};
+
+struct qxl_image_id {
+	uint32_t group;
+	uint32_t unique;
+};
+
+union qxl_image_id_union {
+	struct qxl_image_id id;
+	uint64_t value;
+};
+
+enum qxl_image_flags {
+	QXL_IMAGE_CACHE = (1 << 0),
+	QXL_IMAGE_HIGH_BITS_SET = (1 << 1),
+};
+
+enum qxl_bitmap_flags {
+	QXL_BITMAP_DIRECT = (1 << 0),
+	QXL_BITMAP_UNSTABLE = (1 << 1),
+	QXL_BITMAP_TOP_DOWN = (1 << 2), /* == SPICE_BITMAP_FLAGS_TOP_DOWN */
+};
+
+#define QXL_SET_IMAGE_ID(image, _group, _unique) {              \
+	(image)->descriptor.id = (((uint64_t)_unique) << 32) | _group;	\
+}
+
+struct qxl_image_descriptor {
+	uint64_t id;
+	uint8_t type;
+	uint8_t flags;
+	uint32_t width;
+	uint32_t height;
+};
+
+struct qxl_palette {
+	uint64_t unique;
+	uint16_t num_ents;
+	uint32_t ents[0];
+};
+
+struct qxl_bitmap {
+	uint8_t format;
+	uint8_t flags;
+	uint32_t x;
+	uint32_t y;
+	uint32_t stride;
+	QXLPHYSICAL palette;
+	QXLPHYSICAL data; /* data[0] ? */
+};
+
+struct qxl_surface_id {
+	uint32_t surface_id;
+};
+
+struct qxl_encoder_data {
+	uint32_t data_size;
+	uint8_t data[0];
+};
+
+struct qxl_image {
+	struct qxl_image_descriptor descriptor;
+	union { /* variable length */
+		struct qxl_bitmap bitmap;
+		struct qxl_encoder_data quic;
+		struct qxl_surface_id surface_image;
+	} u;
+};
+
+/* A QXLHead is a single monitor output backed by a QXLSurface.
+ * x and y offsets are unsigned since they are used in relation to
+ * the given surface, not the same as the x, y coordinates in the guest
+ * screen reference frame. */
+struct qxl_head {
+	uint32_t id;
+	uint32_t surface_id;
+	uint32_t width;
+	uint32_t height;
+	uint32_t x;
+	uint32_t y;
+	uint32_t flags;
+};
+
+struct qxl_monitors_config {
+	uint16_t count;
+	uint16_t max_allowed; /* If it is 0 no fixed limit is given by the
+				 driver */
+	struct qxl_head heads[0];
+};
+
+#pragma pack(pop)
+
+#endif /* _H_QXL_DEV */
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_display.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_display.c
new file mode 100644
index 0000000..677dd56
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_display.c
@@ -0,0 +1,1249 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static bool qxl_head_enabled(struct qxl_head *head)
+{
+	return head->width && head->height;
+}
+
+static int qxl_alloc_client_monitors_config(struct qxl_device *qdev,
+		unsigned int count)
+{
+	if (qdev->client_monitors_config &&
+	    count > qdev->client_monitors_config->count) {
+		kfree(qdev->client_monitors_config);
+		qdev->client_monitors_config = NULL;
+	}
+	if (!qdev->client_monitors_config) {
+		qdev->client_monitors_config = kzalloc(
+				struct_size(qdev->client_monitors_config,
+				heads, count), GFP_KERNEL);
+		if (!qdev->client_monitors_config)
+			return -ENOMEM;
+	}
+	qdev->client_monitors_config->count = count;
+	return 0;
+}
+
+enum {
+	MONITORS_CONFIG_MODIFIED,
+	MONITORS_CONFIG_UNCHANGED,
+	MONITORS_CONFIG_BAD_CRC,
+	MONITORS_CONFIG_ERROR,
+};
+
+static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
+{
+	int i;
+	int num_monitors;
+	uint32_t crc;
+	int status = MONITORS_CONFIG_UNCHANGED;
+
+	num_monitors = qdev->rom->client_monitors_config.count;
+	crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
+		  sizeof(qdev->rom->client_monitors_config));
+	if (crc != qdev->rom->client_monitors_config_crc)
+		return MONITORS_CONFIG_BAD_CRC;
+	if (!num_monitors) {
+		DRM_DEBUG_KMS("no client monitors configured\n");
+		return status;
+	}
+	if (num_monitors > qxl_num_crtc) {
+		DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n",
+			      qxl_num_crtc, num_monitors);
+		num_monitors = qxl_num_crtc;
+	} else {
+		num_monitors = qdev->rom->client_monitors_config.count;
+	}
+	if (qdev->client_monitors_config
+	      && (num_monitors != qdev->client_monitors_config->count)) {
+		status = MONITORS_CONFIG_MODIFIED;
+	}
+	if (qxl_alloc_client_monitors_config(qdev, num_monitors)) {
+		status = MONITORS_CONFIG_ERROR;
+		return status;
+	}
+	/* we copy max from the client but it isn't used */
+	qdev->client_monitors_config->max_allowed = qxl_num_crtc;
+	for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) {
+		struct qxl_urect *c_rect =
+			&qdev->rom->client_monitors_config.heads[i];
+		struct qxl_head *client_head =
+			&qdev->client_monitors_config->heads[i];
+		if (client_head->x != c_rect->left) {
+			client_head->x = c_rect->left;
+			status = MONITORS_CONFIG_MODIFIED;
+		}
+		if (client_head->y != c_rect->top) {
+			client_head->y = c_rect->top;
+			status = MONITORS_CONFIG_MODIFIED;
+		}
+		if (client_head->width != c_rect->right - c_rect->left) {
+			client_head->width = c_rect->right - c_rect->left;
+			status = MONITORS_CONFIG_MODIFIED;
+		}
+		if (client_head->height != c_rect->bottom - c_rect->top) {
+			client_head->height = c_rect->bottom - c_rect->top;
+			status = MONITORS_CONFIG_MODIFIED;
+		}
+		if (client_head->surface_id != 0) {
+			client_head->surface_id = 0;
+			status = MONITORS_CONFIG_MODIFIED;
+		}
+		if (client_head->id != i) {
+			client_head->id = i;
+			status = MONITORS_CONFIG_MODIFIED;
+		}
+		if (client_head->flags != 0) {
+			client_head->flags = 0;
+			status = MONITORS_CONFIG_MODIFIED;
+		}
+		DRM_DEBUG_KMS("read %dx%d+%d+%d\n", client_head->width, client_head->height,
+			  client_head->x, client_head->y);
+	}
+
+	return status;
+}
+
+static void qxl_update_offset_props(struct qxl_device *qdev)
+{
+	struct drm_device *dev = &qdev->ddev;
+	struct drm_connector *connector;
+	struct qxl_output *output;
+	struct qxl_head *head;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		output = drm_connector_to_qxl_output(connector);
+
+		head = &qdev->client_monitors_config->heads[output->index];
+
+		drm_object_property_set_value(&connector->base,
+			dev->mode_config.suggested_x_property, head->x);
+		drm_object_property_set_value(&connector->base,
+			dev->mode_config.suggested_y_property, head->y);
+	}
+}
+
+void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
+{
+	struct drm_device *dev = &qdev->ddev;
+	int status, retries;
+
+	for (retries = 0; retries < 10; retries++) {
+		status = qxl_display_copy_rom_client_monitors_config(qdev);
+		if (status != MONITORS_CONFIG_BAD_CRC)
+			break;
+		udelay(5);
+	}
+	if (status == MONITORS_CONFIG_ERROR) {
+		DRM_DEBUG_KMS("ignoring client monitors config: error");
+		return;
+	}
+	if (status == MONITORS_CONFIG_BAD_CRC) {
+		DRM_DEBUG_KMS("ignoring client monitors config: bad crc");
+		return;
+	}
+	if (status == MONITORS_CONFIG_UNCHANGED) {
+		DRM_DEBUG_KMS("ignoring client monitors config: unchanged");
+		return;
+	}
+
+	drm_modeset_lock_all(dev);
+	qxl_update_offset_props(qdev);
+	drm_modeset_unlock_all(dev);
+	if (!drm_helper_hpd_irq_event(dev)) {
+		/* notify that the monitor configuration changed, to
+		   adjust at the arbitrary resolution */
+		drm_kms_helper_hotplug_event(dev);
+	}
+}
+
+static int qxl_check_mode(struct qxl_device *qdev,
+			  unsigned int width,
+			  unsigned int height)
+{
+	unsigned int stride;
+	unsigned int size;
+
+	if (check_mul_overflow(width, 4u, &stride))
+		return -EINVAL;
+	if (check_mul_overflow(stride, height, &size))
+		return -EINVAL;
+	if (size > qdev->vram_size)
+		return -ENOMEM;
+	return 0;
+}
+
+static int qxl_check_framebuffer(struct qxl_device *qdev,
+				 struct qxl_bo *bo)
+{
+	return qxl_check_mode(qdev, bo->surf.width, bo->surf.height);
+}
+
+static int qxl_add_mode(struct drm_connector *connector,
+			unsigned int width,
+			unsigned int height,
+			bool preferred)
+{
+	struct drm_device *dev = connector->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_display_mode *mode = NULL;
+	int rc;
+
+	rc = qxl_check_mode(qdev, width, height);
+	if (rc != 0)
+		return 0;
+
+	mode = drm_cvt_mode(dev, width, height, 60, false, false, false);
+	if (!mode)
+		return 0;
+
+	if (preferred)
+		mode->type |= DRM_MODE_TYPE_PREFERRED;
+	mode->hdisplay = width;
+	mode->vdisplay = height;
+	drm_mode_set_name(mode);
+	drm_mode_probed_add(connector, mode);
+	return 1;
+}
+
+static int qxl_add_monitors_config_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_output *output = drm_connector_to_qxl_output(connector);
+	int h = output->index;
+	struct qxl_head *head;
+
+	if (!qdev->monitors_config)
+		return 0;
+	if (h >= qxl_num_crtc)
+		return 0;
+	if (!qdev->client_monitors_config)
+		return 0;
+	if (h >= qdev->client_monitors_config->count)
+		return 0;
+
+	head = &qdev->client_monitors_config->heads[h];
+	DRM_DEBUG_KMS("head %d is %dx%d\n", h, head->width, head->height);
+
+	return qxl_add_mode(connector, head->width, head->height, true);
+}
+
+static struct mode_size {
+	int w;
+	int h;
+} extra_modes[] = {
+	{ 720,  480},
+	{1152,  768},
+	{1280,  854},
+};
+
+static int qxl_add_extra_modes(struct drm_connector *connector)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < ARRAY_SIZE(extra_modes); i++)
+		ret += qxl_add_mode(connector,
+				    extra_modes[i].w,
+				    extra_modes[i].h,
+				    false);
+	return ret;
+}
+
+static void qxl_send_monitors_config(struct qxl_device *qdev)
+{
+	int i;
+
+	BUG_ON(!qdev->ram_header->monitors_config);
+
+	if (qdev->monitors_config->count == 0)
+		return;
+
+	for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
+		struct qxl_head *head = &qdev->monitors_config->heads[i];
+
+		if (head->y > 8192 || head->x > 8192 ||
+		    head->width > 8192 || head->height > 8192) {
+			DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
+				  i, head->width, head->height,
+				  head->x, head->y);
+			return;
+		}
+	}
+	qxl_io_monitors_config(qdev);
+}
+
+static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
+					    const char *reason)
+{
+	struct drm_device *dev = crtc->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+	struct qxl_head head;
+	int oldcount, i = qcrtc->index;
+
+	if (!qdev->primary_bo) {
+		DRM_DEBUG_KMS("no primary surface, skip (%s)\n", reason);
+		return;
+	}
+
+	if (!qdev->monitors_config || qxl_num_crtc <= i)
+		return;
+
+	head.id = i;
+	head.flags = 0;
+	head.surface_id = 0;
+	oldcount = qdev->monitors_config->count;
+	if (crtc->state->active) {
+		struct drm_display_mode *mode = &crtc->mode;
+
+		head.width = mode->hdisplay;
+		head.height = mode->vdisplay;
+		head.x = crtc->x;
+		head.y = crtc->y;
+		if (qdev->monitors_config->count < i + 1)
+			qdev->monitors_config->count = i + 1;
+		if (qdev->primary_bo == qdev->dumb_shadow_bo)
+			head.x += qdev->dumb_heads[i].x;
+	} else if (i > 0) {
+		head.width = 0;
+		head.height = 0;
+		head.x = 0;
+		head.y = 0;
+		if (qdev->monitors_config->count == i + 1)
+			qdev->monitors_config->count = i;
+	} else {
+		DRM_DEBUG_KMS("inactive head 0, skip (%s)\n", reason);
+		return;
+	}
+
+	if (head.width  == qdev->monitors_config->heads[i].width  &&
+	    head.height == qdev->monitors_config->heads[i].height &&
+	    head.x      == qdev->monitors_config->heads[i].x      &&
+	    head.y      == qdev->monitors_config->heads[i].y      &&
+	    oldcount    == qdev->monitors_config->count)
+		return;
+
+	DRM_DEBUG_KMS("head %d, %dx%d, at +%d+%d, %s (%s)\n",
+		      i, head.width, head.height, head.x, head.y,
+		      crtc->state->active ? "on" : "off", reason);
+	if (oldcount != qdev->monitors_config->count)
+		DRM_DEBUG_KMS("active heads %d -> %d (%d total)\n",
+			      oldcount, qdev->monitors_config->count,
+			      qxl_num_crtc);
+
+	qdev->monitors_config->heads[i] = head;
+	qdev->monitors_config->max_allowed = qxl_num_crtc;
+	qxl_send_monitors_config(qdev);
+}
+
+static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
+				  struct drm_crtc_state *old_crtc_state)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_pending_vblank_event *event;
+	unsigned long flags;
+
+	if (crtc->state && crtc->state->event) {
+		event = crtc->state->event;
+		crtc->state->event = NULL;
+
+		spin_lock_irqsave(&dev->event_lock, flags);
+		drm_crtc_send_vblank_event(crtc, event);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	}
+
+	qxl_crtc_update_monitors_config(crtc, "flush");
+}
+
+static void qxl_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
+
+	qxl_bo_unref(&qxl_crtc->cursor_bo);
+	drm_crtc_cleanup(crtc);
+	kfree(qxl_crtc);
+}
+
+static const struct drm_crtc_funcs qxl_crtc_funcs = {
+	.set_config = drm_atomic_helper_set_config,
+	.destroy = qxl_crtc_destroy,
+	.page_flip = drm_atomic_helper_page_flip,
+	.reset = drm_atomic_helper_crtc_reset,
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
+					 struct drm_file *file_priv,
+					 unsigned int flags, unsigned int color,
+					 struct drm_clip_rect *clips,
+					 unsigned int num_clips)
+{
+	/* TODO: vmwgfx where this was cribbed from had locking. Why? */
+	struct qxl_device *qdev = fb->dev->dev_private;
+	struct drm_clip_rect norect;
+	struct qxl_bo *qobj;
+	bool is_primary;
+	int inc = 1;
+
+	drm_modeset_lock_all(fb->dev);
+
+	qobj = gem_to_qxl_bo(fb->obj[0]);
+	/* if we aren't primary surface ignore this */
+	is_primary = qobj->shadow ? qobj->shadow->is_primary : qobj->is_primary;
+	if (!is_primary) {
+		drm_modeset_unlock_all(fb->dev);
+		return 0;
+	}
+
+	if (!num_clips) {
+		num_clips = 1;
+		clips = &norect;
+		norect.x1 = norect.y1 = 0;
+		norect.x2 = fb->width;
+		norect.y2 = fb->height;
+	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+		num_clips /= 2;
+		inc = 2; /* skip source rects */
+	}
+
+	qxl_draw_dirty_fb(qdev, fb, qobj, flags, color,
+			  clips, num_clips, inc, 0);
+
+	drm_modeset_unlock_all(fb->dev);
+
+	return 0;
+}
+
+static const struct drm_framebuffer_funcs qxl_fb_funcs = {
+	.destroy = drm_gem_fb_destroy,
+	.dirty = qxl_framebuffer_surface_dirty,
+	.create_handle = drm_gem_fb_create_handle,
+};
+
+static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
+				   struct drm_crtc_state *old_state)
+{
+	qxl_crtc_update_monitors_config(crtc, "enable");
+}
+
+static void qxl_crtc_atomic_disable(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_state)
+{
+	qxl_crtc_update_monitors_config(crtc, "disable");
+}
+
+static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
+	.atomic_flush = qxl_crtc_atomic_flush,
+	.atomic_enable = qxl_crtc_atomic_enable,
+	.atomic_disable = qxl_crtc_atomic_disable,
+};
+
+static int qxl_primary_atomic_check(struct drm_plane *plane,
+				    struct drm_plane_state *state)
+{
+	struct qxl_device *qdev = plane->dev->dev_private;
+	struct qxl_bo *bo;
+
+	if (!state->crtc || !state->fb)
+		return 0;
+
+	bo = gem_to_qxl_bo(state->fb->obj[0]);
+
+	return qxl_check_framebuffer(qdev, bo);
+}
+
+static int qxl_primary_apply_cursor(struct drm_plane *plane)
+{
+	struct drm_device *dev = plane->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_framebuffer *fb = plane->state->fb;
+	struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
+	struct qxl_cursor_cmd *cmd;
+	struct qxl_release *release;
+	int ret = 0;
+
+	if (!qcrtc->cursor_bo)
+		return 0;
+
+	ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+					 QXL_RELEASE_CURSOR_CMD,
+					 &release, NULL);
+	if (ret)
+		return ret;
+
+	ret = qxl_release_list_add(release, qcrtc->cursor_bo);
+	if (ret)
+		goto out_free_release;
+
+	ret = qxl_release_reserve_list(release, false);
+	if (ret)
+		goto out_free_release;
+
+	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_CURSOR_SET;
+	cmd->u.set.position.x = plane->state->crtc_x + fb->hot_x;
+	cmd->u.set.position.y = plane->state->crtc_y + fb->hot_y;
+
+	cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
+
+	cmd->u.set.visible = 1;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	qxl_release_fence_buffer_objects(release);
+	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+
+	return ret;
+
+out_free_release:
+	qxl_release_free(qdev, release);
+	return ret;
+}
+
+static void qxl_primary_atomic_update(struct drm_plane *plane,
+				      struct drm_plane_state *old_state)
+{
+	struct qxl_device *qdev = plane->dev->dev_private;
+	struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
+	struct qxl_bo *primary;
+	struct drm_clip_rect norect = {
+	    .x1 = 0,
+	    .y1 = 0,
+	    .x2 = plane->state->fb->width,
+	    .y2 = plane->state->fb->height
+	};
+	uint32_t dumb_shadow_offset = 0;
+
+	primary = bo->shadow ? bo->shadow : bo;
+
+	if (!primary->is_primary) {
+		if (qdev->primary_bo)
+			qxl_io_destroy_primary(qdev);
+		qxl_io_create_primary(qdev, primary);
+		qxl_primary_apply_cursor(plane);
+	}
+
+	if (bo->is_dumb)
+		dumb_shadow_offset =
+			qdev->dumb_heads[plane->state->crtc->index].x;
+
+	qxl_draw_dirty_fb(qdev, plane->state->fb, bo, 0, 0, &norect, 1, 1,
+			  dumb_shadow_offset);
+}
+
+static void qxl_primary_atomic_disable(struct drm_plane *plane,
+				       struct drm_plane_state *old_state)
+{
+	struct qxl_device *qdev = plane->dev->dev_private;
+
+	if (old_state->fb) {
+		struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]);
+
+		if (bo->is_primary) {
+			qxl_io_destroy_primary(qdev);
+			bo->is_primary = false;
+		}
+	}
+}
+
+static void qxl_cursor_atomic_update(struct drm_plane *plane,
+				     struct drm_plane_state *old_state)
+{
+	struct drm_device *dev = plane->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_framebuffer *fb = plane->state->fb;
+	struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
+	struct qxl_release *release;
+	struct qxl_cursor_cmd *cmd;
+	struct qxl_cursor *cursor;
+	struct drm_gem_object *obj;
+	struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
+	int ret;
+	void *user_ptr;
+	int size = 64*64*4;
+
+	ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+					 QXL_RELEASE_CURSOR_CMD,
+					 &release, NULL);
+	if (ret)
+		return;
+
+	if (fb != old_state->fb) {
+		obj = fb->obj[0];
+		user_bo = gem_to_qxl_bo(obj);
+
+		/* pinning is done in the prepare/cleanup framevbuffer */
+		ret = qxl_bo_kmap(user_bo, &user_ptr);
+		if (ret)
+			goto out_free_release;
+
+		ret = qxl_alloc_bo_reserved(qdev, release,
+					    sizeof(struct qxl_cursor) + size,
+					    &cursor_bo);
+		if (ret)
+			goto out_kunmap;
+
+		ret = qxl_bo_pin(cursor_bo);
+		if (ret)
+			goto out_free_bo;
+
+		ret = qxl_release_reserve_list(release, true);
+		if (ret)
+			goto out_unpin;
+
+		ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+		if (ret)
+			goto out_backoff;
+
+		cursor->header.unique = 0;
+		cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
+		cursor->header.width = 64;
+		cursor->header.height = 64;
+		cursor->header.hot_spot_x = fb->hot_x;
+		cursor->header.hot_spot_y = fb->hot_y;
+		cursor->data_size = size;
+		cursor->chunk.next_chunk = 0;
+		cursor->chunk.prev_chunk = 0;
+		cursor->chunk.data_size = size;
+		memcpy(cursor->chunk.data, user_ptr, size);
+		qxl_bo_kunmap(cursor_bo);
+		qxl_bo_kunmap(user_bo);
+
+		cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
+		cmd->u.set.visible = 1;
+		cmd->u.set.shape = qxl_bo_physical_address(qdev,
+							   cursor_bo, 0);
+		cmd->type = QXL_CURSOR_SET;
+
+		old_cursor_bo = qcrtc->cursor_bo;
+		qcrtc->cursor_bo = cursor_bo;
+		cursor_bo = NULL;
+	} else {
+
+		ret = qxl_release_reserve_list(release, true);
+		if (ret)
+			goto out_free_release;
+
+		cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
+		cmd->type = QXL_CURSOR_MOVE;
+	}
+
+	cmd->u.position.x = plane->state->crtc_x + fb->hot_x;
+	cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
+
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+	qxl_release_fence_buffer_objects(release);
+	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+
+	if (old_cursor_bo != NULL)
+		qxl_bo_unpin(old_cursor_bo);
+	qxl_bo_unref(&old_cursor_bo);
+	qxl_bo_unref(&cursor_bo);
+
+	return;
+
+out_backoff:
+	qxl_release_backoff_reserve_list(release);
+out_unpin:
+	qxl_bo_unpin(cursor_bo);
+out_free_bo:
+	qxl_bo_unref(&cursor_bo);
+out_kunmap:
+	qxl_bo_kunmap(user_bo);
+out_free_release:
+	qxl_release_free(qdev, release);
+	return;
+
+}
+
+static void qxl_cursor_atomic_disable(struct drm_plane *plane,
+				      struct drm_plane_state *old_state)
+{
+	struct qxl_device *qdev = plane->dev->dev_private;
+	struct qxl_release *release;
+	struct qxl_cursor_cmd *cmd;
+	int ret;
+
+	ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+					 QXL_RELEASE_CURSOR_CMD,
+					 &release, NULL);
+	if (ret)
+		return;
+
+	ret = qxl_release_reserve_list(release, true);
+	if (ret) {
+		qxl_release_free(qdev, release);
+		return;
+	}
+
+	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_CURSOR_HIDE;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	qxl_release_fence_buffer_objects(release);
+	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+}
+
+static void qxl_update_dumb_head(struct qxl_device *qdev,
+				 int index, struct qxl_bo *bo)
+{
+	uint32_t width, height;
+
+	if (index >= qdev->monitors_config->max_allowed)
+		return;
+
+	if (bo && bo->is_dumb) {
+		width = bo->surf.width;
+		height = bo->surf.height;
+	} else {
+		width = 0;
+		height = 0;
+	}
+
+	if (qdev->dumb_heads[index].width == width &&
+	    qdev->dumb_heads[index].height == height)
+		return;
+
+	DRM_DEBUG("#%d: %dx%d -> %dx%d\n", index,
+		  qdev->dumb_heads[index].width,
+		  qdev->dumb_heads[index].height,
+		  width, height);
+	qdev->dumb_heads[index].width = width;
+	qdev->dumb_heads[index].height = height;
+}
+
+static void qxl_calc_dumb_shadow(struct qxl_device *qdev,
+				 struct qxl_surface *surf)
+{
+	struct qxl_head *head;
+	int i;
+
+	memset(surf, 0, sizeof(*surf));
+	for (i = 0; i < qdev->monitors_config->max_allowed; i++) {
+		head = qdev->dumb_heads + i;
+		head->x = surf->width;
+		surf->width += head->width;
+		if (surf->height < head->height)
+			surf->height = head->height;
+	}
+	if (surf->width < 64)
+		surf->width = 64;
+	if (surf->height < 64)
+		surf->height = 64;
+	surf->format = SPICE_SURFACE_FMT_32_xRGB;
+	surf->stride = surf->width * 4;
+
+	if (!qdev->dumb_shadow_bo ||
+	    qdev->dumb_shadow_bo->surf.width != surf->width ||
+	    qdev->dumb_shadow_bo->surf.height != surf->height)
+		DRM_DEBUG("%dx%d\n", surf->width, surf->height);
+}
+
+static int qxl_plane_prepare_fb(struct drm_plane *plane,
+				struct drm_plane_state *new_state)
+{
+	struct qxl_device *qdev = plane->dev->dev_private;
+	struct drm_gem_object *obj;
+	struct qxl_bo *user_bo;
+	struct qxl_surface surf;
+	int ret;
+
+	if (!new_state->fb)
+		return 0;
+
+	obj = new_state->fb->obj[0];
+	user_bo = gem_to_qxl_bo(obj);
+
+	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
+	    user_bo->is_dumb) {
+		qxl_update_dumb_head(qdev, new_state->crtc->index,
+				     user_bo);
+		qxl_calc_dumb_shadow(qdev, &surf);
+		if (!qdev->dumb_shadow_bo ||
+		    qdev->dumb_shadow_bo->surf.width  != surf.width ||
+		    qdev->dumb_shadow_bo->surf.height != surf.height) {
+			if (qdev->dumb_shadow_bo) {
+				drm_gem_object_put_unlocked
+					(&qdev->dumb_shadow_bo->tbo.base);
+				qdev->dumb_shadow_bo = NULL;
+			}
+			qxl_bo_create(qdev, surf.height * surf.stride,
+				      true, true, QXL_GEM_DOMAIN_SURFACE, &surf,
+				      &qdev->dumb_shadow_bo);
+		}
+		if (user_bo->shadow != qdev->dumb_shadow_bo) {
+			if (user_bo->shadow) {
+				drm_gem_object_put_unlocked
+					(&user_bo->shadow->tbo.base);
+				user_bo->shadow = NULL;
+			}
+			drm_gem_object_get(&qdev->dumb_shadow_bo->tbo.base);
+			user_bo->shadow = qdev->dumb_shadow_bo;
+		}
+	}
+
+	ret = qxl_bo_pin(user_bo);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void qxl_plane_cleanup_fb(struct drm_plane *plane,
+				 struct drm_plane_state *old_state)
+{
+	struct drm_gem_object *obj;
+	struct qxl_bo *user_bo;
+
+	if (!old_state->fb) {
+		/*
+		 * we never executed prepare_fb, so there's nothing to
+		 * unpin.
+		 */
+		return;
+	}
+
+	obj = old_state->fb->obj[0];
+	user_bo = gem_to_qxl_bo(obj);
+	qxl_bo_unpin(user_bo);
+
+	if (old_state->fb != plane->state->fb && user_bo->shadow) {
+		drm_gem_object_put_unlocked(&user_bo->shadow->tbo.base);
+		user_bo->shadow = NULL;
+	}
+}
+
+static const uint32_t qxl_cursor_plane_formats[] = {
+	DRM_FORMAT_ARGB8888,
+};
+
+static const struct drm_plane_helper_funcs qxl_cursor_helper_funcs = {
+	.atomic_update = qxl_cursor_atomic_update,
+	.atomic_disable = qxl_cursor_atomic_disable,
+	.prepare_fb = qxl_plane_prepare_fb,
+	.cleanup_fb = qxl_plane_cleanup_fb,
+};
+
+static const struct drm_plane_funcs qxl_cursor_plane_funcs = {
+	.update_plane	= drm_atomic_helper_update_plane,
+	.disable_plane	= drm_atomic_helper_disable_plane,
+	.destroy	= drm_primary_helper_destroy,
+	.reset		= drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const uint32_t qxl_primary_plane_formats[] = {
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+};
+
+static const struct drm_plane_helper_funcs primary_helper_funcs = {
+	.atomic_check = qxl_primary_atomic_check,
+	.atomic_update = qxl_primary_atomic_update,
+	.atomic_disable = qxl_primary_atomic_disable,
+	.prepare_fb = qxl_plane_prepare_fb,
+	.cleanup_fb = qxl_plane_cleanup_fb,
+};
+
+static const struct drm_plane_funcs qxl_primary_plane_funcs = {
+	.update_plane	= drm_atomic_helper_update_plane,
+	.disable_plane	= drm_atomic_helper_disable_plane,
+	.destroy	= drm_primary_helper_destroy,
+	.reset		= drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static struct drm_plane *qxl_create_plane(struct qxl_device *qdev,
+					  unsigned int possible_crtcs,
+					  enum drm_plane_type type)
+{
+	const struct drm_plane_helper_funcs *helper_funcs = NULL;
+	struct drm_plane *plane;
+	const struct drm_plane_funcs *funcs;
+	const uint32_t *formats;
+	int num_formats;
+	int err;
+
+	if (type == DRM_PLANE_TYPE_PRIMARY) {
+		funcs = &qxl_primary_plane_funcs;
+		formats = qxl_primary_plane_formats;
+		num_formats = ARRAY_SIZE(qxl_primary_plane_formats);
+		helper_funcs = &primary_helper_funcs;
+	} else if (type == DRM_PLANE_TYPE_CURSOR) {
+		funcs = &qxl_cursor_plane_funcs;
+		formats = qxl_cursor_plane_formats;
+		helper_funcs = &qxl_cursor_helper_funcs;
+		num_formats = ARRAY_SIZE(qxl_cursor_plane_formats);
+	} else {
+		return ERR_PTR(-EINVAL);
+	}
+
+	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+	if (!plane)
+		return ERR_PTR(-ENOMEM);
+
+	err = drm_universal_plane_init(&qdev->ddev, plane, possible_crtcs,
+				       funcs, formats, num_formats,
+				       NULL, type, NULL);
+	if (err)
+		goto free_plane;
+
+	drm_plane_helper_add(plane, helper_funcs);
+
+	return plane;
+
+free_plane:
+	kfree(plane);
+	return ERR_PTR(-EINVAL);
+}
+
+static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
+{
+	struct qxl_crtc *qxl_crtc;
+	struct drm_plane *primary, *cursor;
+	struct qxl_device *qdev = dev->dev_private;
+	int r;
+
+	qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
+	if (!qxl_crtc)
+		return -ENOMEM;
+
+	primary = qxl_create_plane(qdev, 1 << crtc_id, DRM_PLANE_TYPE_PRIMARY);
+	if (IS_ERR(primary)) {
+		r = -ENOMEM;
+		goto free_mem;
+	}
+
+	cursor = qxl_create_plane(qdev, 1 << crtc_id, DRM_PLANE_TYPE_CURSOR);
+	if (IS_ERR(cursor)) {
+		r = -ENOMEM;
+		goto clean_primary;
+	}
+
+	r = drm_crtc_init_with_planes(dev, &qxl_crtc->base, primary, cursor,
+				      &qxl_crtc_funcs, NULL);
+	if (r)
+		goto clean_cursor;
+
+	qxl_crtc->index = crtc_id;
+	drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
+	return 0;
+
+clean_cursor:
+	drm_plane_cleanup(cursor);
+	kfree(cursor);
+clean_primary:
+	drm_plane_cleanup(primary);
+	kfree(primary);
+free_mem:
+	kfree(qxl_crtc);
+	return r;
+}
+
+static int qxl_conn_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_output *output = drm_connector_to_qxl_output(connector);
+	unsigned int pwidth = 1024;
+	unsigned int pheight = 768;
+	int ret = 0;
+
+	if (qdev->client_monitors_config) {
+		struct qxl_head *head;
+		head = &qdev->client_monitors_config->heads[output->index];
+		if (head->width)
+			pwidth = head->width;
+		if (head->height)
+			pheight = head->height;
+	}
+
+	ret += drm_add_modes_noedid(connector, 8192, 8192);
+	ret += qxl_add_extra_modes(connector);
+	ret += qxl_add_monitors_config_modes(connector);
+	drm_set_preferred_mode(connector, pwidth, pheight);
+	return ret;
+}
+
+static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
+			       struct drm_display_mode *mode)
+{
+	struct drm_device *ddev = connector->dev;
+	struct qxl_device *qdev = ddev->dev_private;
+
+	if (qxl_check_mode(qdev, mode->hdisplay, mode->vdisplay) != 0)
+		return MODE_BAD;
+
+	return MODE_OK;
+}
+
+static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
+{
+	struct qxl_output *qxl_output =
+		drm_connector_to_qxl_output(connector);
+
+	DRM_DEBUG("\n");
+	return &qxl_output->enc;
+}
+
+static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
+};
+
+static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
+	.get_modes = qxl_conn_get_modes,
+	.mode_valid = qxl_conn_mode_valid,
+	.best_encoder = qxl_best_encoder,
+};
+
+static enum drm_connector_status qxl_conn_detect(
+			struct drm_connector *connector,
+			bool force)
+{
+	struct qxl_output *output =
+		drm_connector_to_qxl_output(connector);
+	struct drm_device *ddev = connector->dev;
+	struct qxl_device *qdev = ddev->dev_private;
+	bool connected = false;
+
+	/* The first monitor is always connected */
+	if (!qdev->client_monitors_config) {
+		if (output->index == 0)
+			connected = true;
+	} else
+		connected = qdev->client_monitors_config->count > output->index &&
+		     qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
+
+	DRM_DEBUG("#%d connected: %d\n", output->index, connected);
+
+	return connected ? connector_status_connected
+			 : connector_status_disconnected;
+}
+
+static void qxl_conn_destroy(struct drm_connector *connector)
+{
+	struct qxl_output *qxl_output =
+		drm_connector_to_qxl_output(connector);
+
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+	kfree(qxl_output);
+}
+
+static const struct drm_connector_funcs qxl_connector_funcs = {
+	.detect = qxl_conn_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = qxl_conn_destroy,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static void qxl_enc_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs qxl_enc_funcs = {
+	.destroy = qxl_enc_destroy,
+};
+
+static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
+{
+	if (qdev->hotplug_mode_update_property)
+		return 0;
+
+	qdev->hotplug_mode_update_property =
+		drm_property_create_range(&qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
+					  "hotplug_mode_update", 0, 1);
+
+	return 0;
+}
+
+static int qdev_output_init(struct drm_device *dev, int num_output)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_output *qxl_output;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+
+	qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
+	if (!qxl_output)
+		return -ENOMEM;
+
+	qxl_output->index = num_output;
+
+	connector = &qxl_output->base;
+	encoder = &qxl_output->enc;
+	drm_connector_init(dev, &qxl_output->base,
+			   &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
+
+	drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
+			 DRM_MODE_ENCODER_VIRTUAL, NULL);
+
+	/* we get HPD via client monitors config */
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+	encoder->possible_crtcs = 1 << num_output;
+	drm_connector_attach_encoder(&qxl_output->base,
+					  &qxl_output->enc);
+	drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
+	drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
+
+	drm_object_attach_property(&connector->base,
+				   qdev->hotplug_mode_update_property, 0);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_x_property, 0);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_y_property, 0);
+	return 0;
+}
+
+static struct drm_framebuffer *
+qxl_user_framebuffer_create(struct drm_device *dev,
+			    struct drm_file *file_priv,
+			    const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd,
+					    &qxl_fb_funcs);
+}
+
+static const struct drm_mode_config_funcs qxl_mode_funcs = {
+	.fb_create = qxl_user_framebuffer_create,
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = drm_atomic_helper_commit,
+};
+
+int qxl_create_monitors_object(struct qxl_device *qdev)
+{
+	int ret;
+	struct drm_gem_object *gobj;
+	int monitors_config_size = sizeof(struct qxl_monitors_config) +
+		qxl_num_crtc * sizeof(struct qxl_head);
+
+	ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
+				    QXL_GEM_DOMAIN_VRAM,
+				    false, false, NULL, &gobj);
+	if (ret) {
+		DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret);
+		return -ENOMEM;
+	}
+	qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
+
+	ret = qxl_bo_pin(qdev->monitors_config_bo);
+	if (ret)
+		return ret;
+
+	qxl_bo_kmap(qdev->monitors_config_bo, NULL);
+
+	qdev->monitors_config = qdev->monitors_config_bo->kptr;
+	qdev->ram_header->monitors_config =
+		qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
+
+	memset(qdev->monitors_config, 0, monitors_config_size);
+	qdev->dumb_heads = kcalloc(qxl_num_crtc, sizeof(qdev->dumb_heads[0]),
+				   GFP_KERNEL);
+	if (!qdev->dumb_heads) {
+		qxl_destroy_monitors_object(qdev);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+int qxl_destroy_monitors_object(struct qxl_device *qdev)
+{
+	int ret;
+
+	qdev->monitors_config = NULL;
+	qdev->ram_header->monitors_config = 0;
+
+	qxl_bo_kunmap(qdev->monitors_config_bo);
+	ret = qxl_bo_unpin(qdev->monitors_config_bo);
+	if (ret)
+		return ret;
+
+	qxl_bo_unref(&qdev->monitors_config_bo);
+	return 0;
+}
+
+int qxl_modeset_init(struct qxl_device *qdev)
+{
+	int i;
+	int ret;
+
+	drm_mode_config_init(&qdev->ddev);
+
+	ret = qxl_create_monitors_object(qdev);
+	if (ret)
+		return ret;
+
+	qdev->ddev.mode_config.funcs = (void *)&qxl_mode_funcs;
+
+	/* modes will be validated against the framebuffer size */
+	qdev->ddev.mode_config.min_width = 0;
+	qdev->ddev.mode_config.min_height = 0;
+	qdev->ddev.mode_config.max_width = 8192;
+	qdev->ddev.mode_config.max_height = 8192;
+
+	qdev->ddev.mode_config.fb_base = qdev->vram_base;
+
+	drm_mode_create_suggested_offset_properties(&qdev->ddev);
+	qxl_mode_create_hotplug_mode_update_property(qdev);
+
+	for (i = 0 ; i < qxl_num_crtc; ++i) {
+		qdev_crtc_init(&qdev->ddev, i);
+		qdev_output_init(&qdev->ddev, i);
+	}
+
+	qxl_display_read_client_monitors_config(qdev);
+
+	drm_mode_config_reset(&qdev->ddev);
+	return 0;
+}
+
+void qxl_modeset_fini(struct qxl_device *qdev)
+{
+	if (qdev->dumb_shadow_bo) {
+		drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base);
+		qdev->dumb_shadow_bo = NULL;
+	}
+	qxl_destroy_monitors_object(qdev);
+	drm_mode_config_cleanup(&qdev->ddev);
+}
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_draw.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_draw.c
new file mode 100644
index 0000000..3599db0
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_draw.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_fourcc.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static int alloc_clips(struct qxl_device *qdev,
+		       struct qxl_release *release,
+		       unsigned int num_clips,
+		       struct qxl_bo **clips_bo)
+{
+	int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
+
+	return qxl_alloc_bo_reserved(qdev, release, size, clips_bo);
+}
+
+/* returns a pointer to the already allocated qxl_rect array inside
+ * the qxl_clip_rects. This is *not* the same as the memory allocated
+ * on the device, it is offset to qxl_clip_rects.chunk.data */
+static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
+					      unsigned int num_clips,
+					      struct qxl_bo *clips_bo)
+{
+	struct qxl_clip_rects *dev_clips;
+	int ret;
+
+	ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips);
+	if (ret) {
+		return NULL;
+	}
+	dev_clips->num_rects = num_clips;
+	dev_clips->chunk.next_chunk = 0;
+	dev_clips->chunk.prev_chunk = 0;
+	dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips;
+	return (struct qxl_rect *)dev_clips->chunk.data;
+}
+
+static int
+alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
+{
+	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
+					  QXL_RELEASE_DRAWABLE, release, NULL);
+}
+
+static void
+free_drawable(struct qxl_device *qdev, struct qxl_release *release)
+{
+	qxl_release_free(qdev, release);
+}
+
+/* release needs to be reserved at this point */
+static int
+make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
+	      const struct qxl_rect *rect,
+	      struct qxl_release *release)
+{
+	struct qxl_drawable *drawable;
+	int i;
+
+	drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+	if (!drawable)
+		return -ENOMEM;
+
+	drawable->type = type;
+
+	drawable->surface_id = surface;		/* Only primary for now */
+	drawable->effect = QXL_EFFECT_OPAQUE;
+	drawable->self_bitmap = 0;
+	drawable->self_bitmap_area.top = 0;
+	drawable->self_bitmap_area.left = 0;
+	drawable->self_bitmap_area.bottom = 0;
+	drawable->self_bitmap_area.right = 0;
+	/* FIXME: add clipping */
+	drawable->clip.type = SPICE_CLIP_TYPE_NONE;
+
+	/*
+	 * surfaces_dest[i] should apparently be filled out with the
+	 * surfaces that we depend on, and surface_rects should be
+	 * filled with the rectangles of those surfaces that we
+	 * are going to use.
+	 */
+	for (i = 0; i < 3; ++i)
+		drawable->surfaces_dest[i] = -1;
+
+	if (rect)
+		drawable->bbox = *rect;
+
+	drawable->mm_time = qdev->rom->mm_clock;
+	qxl_release_unmap(qdev, release, &drawable->release_info);
+	return 0;
+}
+
+/* push a draw command using the given clipping rectangles as
+ * the sources from the shadow framebuffer.
+ *
+ * Right now implementing with a single draw and a clip list. Clip
+ * lists are known to be a problem performance wise, this can be solved
+ * by treating them differently in the server.
+ */
+void qxl_draw_dirty_fb(struct qxl_device *qdev,
+		       struct drm_framebuffer *fb,
+		       struct qxl_bo *bo,
+		       unsigned int flags, unsigned int color,
+		       struct drm_clip_rect *clips,
+		       unsigned int num_clips, int inc,
+		       uint32_t dumb_shadow_offset)
+{
+	/*
+	 * TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
+	 * send a fill command instead, much cheaper.
+	 *
+	 * See include/drm/drm_mode.h
+	 */
+	struct drm_clip_rect *clips_ptr;
+	int i;
+	int left, right, top, bottom;
+	int width, height;
+	struct qxl_drawable *drawable;
+	struct qxl_rect drawable_rect;
+	struct qxl_rect *rects;
+	int stride = fb->pitches[0];
+	/* depth is not actually interesting, we don't mask with it */
+	int depth = fb->format->cpp[0] * 8;
+	uint8_t *surface_base;
+	struct qxl_release *release;
+	struct qxl_bo *clips_bo;
+	struct qxl_drm_image *dimage;
+	int ret;
+
+	ret = alloc_drawable(qdev, &release);
+	if (ret)
+		return;
+
+	clips->x1 += dumb_shadow_offset;
+	clips->x2 += dumb_shadow_offset;
+
+	left = clips->x1;
+	right = clips->x2;
+	top = clips->y1;
+	bottom = clips->y2;
+
+	/* skip the first clip rect */
+	for (i = 1, clips_ptr = clips + inc;
+	     i < num_clips; i++, clips_ptr += inc) {
+		left = min_t(int, left, (int)clips_ptr->x1);
+		right = max_t(int, right, (int)clips_ptr->x2);
+		top = min_t(int, top, (int)clips_ptr->y1);
+		bottom = max_t(int, bottom, (int)clips_ptr->y2);
+	}
+
+	width = right - left;
+	height = bottom - top;
+
+	ret = alloc_clips(qdev, release, num_clips, &clips_bo);
+	if (ret)
+		goto out_free_drawable;
+
+	ret = qxl_image_alloc_objects(qdev, release,
+				      &dimage,
+				      height, stride);
+	if (ret)
+		goto out_free_clips;
+
+	/* do a reservation run over all the objects we just allocated */
+	ret = qxl_release_reserve_list(release, true);
+	if (ret)
+		goto out_free_image;
+
+	drawable_rect.left = left;
+	drawable_rect.right = right;
+	drawable_rect.top = top;
+	drawable_rect.bottom = bottom;
+
+	ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
+			    release);
+	if (ret)
+		goto out_release_backoff;
+
+	ret = qxl_bo_kmap(bo, (void **)&surface_base);
+	if (ret)
+		goto out_release_backoff;
+
+	ret = qxl_image_init(qdev, release, dimage, surface_base,
+			     left - dumb_shadow_offset,
+			     top, width, height, depth, stride);
+	qxl_bo_kunmap(bo);
+	if (ret)
+		goto out_release_backoff;
+
+	rects = drawable_set_clipping(qdev, num_clips, clips_bo);
+	if (!rects) {
+		ret = -EINVAL;
+		goto out_release_backoff;
+	}
+	drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+
+	drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
+	drawable->clip.data = qxl_bo_physical_address(qdev,
+						      clips_bo, 0);
+
+	drawable->u.copy.src_area.top = 0;
+	drawable->u.copy.src_area.bottom = height;
+	drawable->u.copy.src_area.left = 0;
+	drawable->u.copy.src_area.right = width;
+
+	drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
+	drawable->u.copy.scale_mode = 0;
+	drawable->u.copy.mask.flags = 0;
+	drawable->u.copy.mask.pos.x = 0;
+	drawable->u.copy.mask.pos.y = 0;
+	drawable->u.copy.mask.bitmap = 0;
+
+	drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0);
+	qxl_release_unmap(qdev, release, &drawable->release_info);
+
+	clips_ptr = clips;
+	for (i = 0; i < num_clips; i++, clips_ptr += inc) {
+		rects[i].left   = clips_ptr->x1;
+		rects[i].right  = clips_ptr->x2;
+		rects[i].top    = clips_ptr->y1;
+		rects[i].bottom = clips_ptr->y2;
+	}
+	qxl_bo_kunmap(clips_bo);
+
+	qxl_release_fence_buffer_objects(release);
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+
+out_release_backoff:
+	if (ret)
+		qxl_release_backoff_reserve_list(release);
+out_free_image:
+	qxl_image_free_objects(qdev, dimage);
+out_free_clips:
+	qxl_bo_unref(&clips_bo);
+out_free_drawable:
+	/* only free drawable on error */
+	if (ret)
+		free_drawable(qdev, release);
+
+}
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_drv.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_drv.c
new file mode 100644
index 0000000..265bfe9
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_drv.c
@@ -0,0 +1,321 @@
+/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */
+/* qxl_drv.c -- QXL driver -*- linux-c -*-
+ *
+ * Copyright 2011 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dave Airlie <airlie@redhat.com>
+ *    Alon Levy <alevy@redhat.com>
+ */
+
+#include "qxl_drv.h"
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <drm/drm.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_probe_helper.h>
+
+#include "qxl_object.h"
+
+static const struct pci_device_id pciidlist[] = {
+	{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
+	  0xffff00, 0 },
+	{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8,
+	  0xffff00, 0 },
+	{ 0, 0, 0 },
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int qxl_modeset = -1;
+int qxl_num_crtc = 4;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, qxl_modeset, int, 0400);
+
+MODULE_PARM_DESC(num_heads, "Number of virtual crtcs to expose (default 4)");
+module_param_named(num_heads, qxl_num_crtc, int, 0400);
+
+static struct drm_driver qxl_driver;
+static struct pci_driver qxl_pci_driver;
+
+static bool is_vga(struct pci_dev *pdev)
+{
+	return pdev->class == PCI_CLASS_DISPLAY_VGA << 8;
+}
+
+static int
+qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct qxl_device *qdev;
+	int ret;
+
+	if (pdev->revision < 4) {
+		DRM_ERROR("qxl too old, doesn't support client_monitors_config,"
+			  " use xf86-video-qxl in user mode");
+		return -EINVAL; /* TODO: ENODEV ? */
+	}
+
+	qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
+	if (!qdev)
+		return -ENOMEM;
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		goto free_dev;
+
+	ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
+	if (ret)
+		goto disable_pci;
+
+	if (is_vga(pdev)) {
+		ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO);
+		if (ret) {
+			DRM_ERROR("can't get legacy vga ioports\n");
+			goto disable_pci;
+		}
+	}
+
+	ret = qxl_device_init(qdev, &qxl_driver, pdev);
+	if (ret)
+		goto put_vga;
+
+	ret = qxl_modeset_init(qdev);
+	if (ret)
+		goto unload;
+
+	drm_kms_helper_poll_init(&qdev->ddev);
+
+	/* Complete initialization. */
+	ret = drm_dev_register(&qdev->ddev, ent->driver_data);
+	if (ret)
+		goto modeset_cleanup;
+
+	drm_fbdev_generic_setup(&qdev->ddev, 32);
+	return 0;
+
+modeset_cleanup:
+	qxl_modeset_fini(qdev);
+unload:
+	qxl_device_fini(qdev);
+put_vga:
+	if (is_vga(pdev))
+		vga_put(pdev, VGA_RSRC_LEGACY_IO);
+disable_pci:
+	pci_disable_device(pdev);
+free_dev:
+	kfree(qdev);
+	return ret;
+}
+
+static void
+qxl_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct qxl_device *qdev = dev->dev_private;
+
+	drm_dev_unregister(dev);
+
+	qxl_modeset_fini(qdev);
+	qxl_device_fini(qdev);
+	if (is_vga(pdev))
+		vga_put(pdev, VGA_RSRC_LEGACY_IO);
+
+	dev->dev_private = NULL;
+	kfree(qdev);
+	drm_dev_put(dev);
+}
+
+static const struct file_operations qxl_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.poll = drm_poll,
+	.read = drm_read,
+	.mmap = qxl_mmap,
+};
+
+static int qxl_drm_freeze(struct drm_device *dev)
+{
+	struct pci_dev *pdev = dev->pdev;
+	struct qxl_device *qdev = dev->dev_private;
+	int ret;
+
+	ret = drm_mode_config_helper_suspend(dev);
+	if (ret)
+		return ret;
+
+	qxl_destroy_monitors_object(qdev);
+	qxl_surf_evict(qdev);
+	qxl_vram_evict(qdev);
+
+	while (!qxl_check_idle(qdev->command_ring));
+	while (!qxl_check_idle(qdev->release_ring))
+		qxl_queue_garbage_collect(qdev, 1);
+
+	pci_save_state(pdev);
+
+	return 0;
+}
+
+static int qxl_drm_resume(struct drm_device *dev, bool thaw)
+{
+	struct qxl_device *qdev = dev->dev_private;
+
+	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+	if (!thaw) {
+		qxl_reinit_memslots(qdev);
+		qxl_ring_init_hdr(qdev->release_ring);
+	}
+
+	qxl_create_monitors_object(qdev);
+	return drm_mode_config_helper_resume(dev);
+}
+
+static int qxl_pm_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	int error;
+
+	error = qxl_drm_freeze(drm_dev);
+	if (error)
+		return error;
+
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+	return 0;
+}
+
+static int qxl_pm_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	if (pci_enable_device(pdev)) {
+		return -EIO;
+	}
+
+	return qxl_drm_resume(drm_dev, false);
+}
+
+static int qxl_pm_thaw(struct device *dev)
+{
+	struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+	return qxl_drm_resume(drm_dev, true);
+}
+
+static int qxl_pm_freeze(struct device *dev)
+{
+	struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+	return qxl_drm_freeze(drm_dev);
+}
+
+static int qxl_pm_restore(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct qxl_device *qdev = drm_dev->dev_private;
+
+	qxl_io_reset(qdev);
+	return qxl_drm_resume(drm_dev, false);
+}
+
+static const struct dev_pm_ops qxl_pm_ops = {
+	.suspend = qxl_pm_suspend,
+	.resume = qxl_pm_resume,
+	.freeze = qxl_pm_freeze,
+	.thaw = qxl_pm_thaw,
+	.poweroff = qxl_pm_freeze,
+	.restore = qxl_pm_restore,
+};
+static struct pci_driver qxl_pci_driver = {
+	 .name = DRIVER_NAME,
+	 .id_table = pciidlist,
+	 .probe = qxl_pci_probe,
+	 .remove = qxl_pci_remove,
+	 .driver.pm = &qxl_pm_ops,
+};
+
+static struct drm_driver qxl_driver = {
+	.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+
+	.dumb_create = qxl_mode_dumb_create,
+	.dumb_map_offset = qxl_mode_dumb_mmap,
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = qxl_debugfs_init,
+#endif
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_pin = qxl_gem_prime_pin,
+	.gem_prime_unpin = qxl_gem_prime_unpin,
+	.gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
+	.gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
+	.gem_prime_vmap = qxl_gem_prime_vmap,
+	.gem_prime_vunmap = qxl_gem_prime_vunmap,
+	.gem_prime_mmap = qxl_gem_prime_mmap,
+	.gem_free_object_unlocked = qxl_gem_object_free,
+	.gem_open_object = qxl_gem_object_open,
+	.gem_close_object = qxl_gem_object_close,
+	.fops = &qxl_fops,
+	.ioctls = qxl_ioctls,
+	.irq_handler = qxl_irq_handler,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = 0,
+	.minor = 1,
+	.patchlevel = 0,
+};
+
+static int __init qxl_init(void)
+{
+	if (vgacon_text_force() && qxl_modeset == -1)
+		return -EINVAL;
+
+	if (qxl_modeset == 0)
+		return -EINVAL;
+	qxl_driver.num_ioctls = qxl_max_ioctls;
+	return pci_register_driver(&qxl_pci_driver);
+}
+
+static void __exit qxl_exit(void)
+{
+	pci_unregister_driver(&qxl_pci_driver);
+}
+
+module_init(qxl_init);
+module_exit(qxl_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_drv.h b/marvell/linux/drivers/gpu/drm/qxl/qxl_drv.h
new file mode 100644
index 0000000..9e034c5
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_drv.h
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#ifndef QXL_DRV_H
+#define QXL_DRV_H
+
+/*
+ * Definitions taken from spice-protocol, plus kernel driver specific bits.
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_gem.h>
+#include <drm/qxl_drm.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "qxl_dev.h"
+
+#define DRIVER_AUTHOR		"Dave Airlie"
+
+#define DRIVER_NAME		"qxl"
+#define DRIVER_DESC		"RH QXL"
+#define DRIVER_DATE		"20120117"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
+
+#define QXL_DEBUGFS_MAX_COMPONENTS		32
+
+extern int qxl_num_crtc;
+extern int qxl_max_ioctls;
+
+#define QXL_INTERRUPT_MASK (\
+	QXL_INTERRUPT_DISPLAY |\
+	QXL_INTERRUPT_CURSOR |\
+	QXL_INTERRUPT_IO_CMD |\
+	QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
+
+struct qxl_bo {
+	struct ttm_buffer_object	tbo;
+
+	/* Protected by gem.mutex */
+	struct list_head		list;
+	/* Protected by tbo.reserved */
+	struct ttm_place		placements[3];
+	struct ttm_placement		placement;
+	struct ttm_bo_kmap_obj		kmap;
+	unsigned int pin_count;
+	void				*kptr;
+	unsigned int                    map_count;
+	int                             type;
+
+	/* Constant after initialization */
+	unsigned int is_primary:1; /* is this now a primary surface */
+	unsigned int is_dumb:1;
+	struct qxl_bo *shadow;
+	unsigned int hw_surf_alloc:1;
+	struct qxl_surface surf;
+	uint32_t surface_id;
+	struct qxl_release *surf_create;
+};
+#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, tbo.base)
+#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
+
+struct qxl_gem {
+	struct mutex		mutex;
+	struct list_head	objects;
+};
+
+struct qxl_bo_list {
+	struct ttm_validate_buffer tv;
+};
+
+struct qxl_crtc {
+	struct drm_crtc base;
+	int index;
+
+	struct qxl_bo *cursor_bo;
+};
+
+struct qxl_output {
+	int index;
+	struct drm_connector base;
+	struct drm_encoder enc;
+};
+
+#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
+#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
+#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
+
+struct qxl_mman {
+	struct ttm_bo_device		bdev;
+};
+
+struct qxl_memslot {
+	int             index;
+	const char      *name;
+	uint8_t		generation;
+	uint64_t	start_phys_addr;
+	uint64_t	size;
+	uint64_t	high_bits;
+	uint64_t        gpu_offset;
+};
+
+enum {
+	QXL_RELEASE_DRAWABLE,
+	QXL_RELEASE_SURFACE_CMD,
+	QXL_RELEASE_CURSOR_CMD,
+};
+
+/* drm_ prefix to differentiate from qxl_release_info in
+ * spice-protocol/qxl_dev.h */
+#define QXL_MAX_RES 96
+struct qxl_release {
+	struct dma_fence base;
+
+	int id;
+	int type;
+	struct qxl_bo *release_bo;
+	uint32_t release_offset;
+	uint32_t surface_release_id;
+	struct ww_acquire_ctx ticket;
+	struct list_head bos;
+};
+
+struct qxl_drm_chunk {
+	struct list_head head;
+	struct qxl_bo *bo;
+};
+
+struct qxl_drm_image {
+	struct qxl_bo *bo;
+	struct list_head chunk_list;
+};
+
+struct qxl_fb_image {
+	struct qxl_device *qdev;
+	uint32_t pseudo_palette[16];
+	struct fb_image fb_image;
+	uint32_t visual;
+};
+
+struct qxl_draw_fill {
+	struct qxl_device *qdev;
+	struct qxl_rect rect;
+	uint32_t color;
+	uint16_t rop;
+};
+
+/*
+ * Debugfs
+ */
+struct qxl_debugfs {
+	struct drm_info_list	*files;
+	unsigned int num_files;
+};
+
+int qxl_debugfs_add_files(struct qxl_device *rdev,
+			     struct drm_info_list *files,
+			     unsigned int nfiles);
+int qxl_debugfs_fence_init(struct qxl_device *rdev);
+
+struct qxl_device;
+
+struct qxl_device {
+	struct drm_device ddev;
+
+	resource_size_t vram_base, vram_size;
+	resource_size_t surfaceram_base, surfaceram_size;
+	resource_size_t rom_base, rom_size;
+	struct qxl_rom *rom;
+
+	struct qxl_mode *modes;
+	struct qxl_bo *monitors_config_bo;
+	struct qxl_monitors_config *monitors_config;
+
+	/* last received client_monitors_config */
+	struct qxl_monitors_config *client_monitors_config;
+
+	int io_base;
+	void *ram;
+	struct qxl_mman		mman;
+	struct qxl_gem		gem;
+
+	void *ram_physical;
+
+	struct qxl_ring *release_ring;
+	struct qxl_ring *command_ring;
+	struct qxl_ring *cursor_ring;
+
+	struct qxl_ram_header *ram_header;
+
+	struct qxl_bo *primary_bo;
+	struct qxl_bo *dumb_shadow_bo;
+	struct qxl_head *dumb_heads;
+
+	struct qxl_memslot main_slot;
+	struct qxl_memslot surfaces_slot;
+
+	spinlock_t	release_lock;
+	struct idr	release_idr;
+	uint32_t	release_seqno;
+	spinlock_t release_idr_lock;
+	struct mutex	async_io_mutex;
+	unsigned int last_sent_io_cmd;
+
+	/* interrupt handling */
+	atomic_t irq_received;
+	atomic_t irq_received_display;
+	atomic_t irq_received_cursor;
+	atomic_t irq_received_io_cmd;
+	unsigned int irq_received_error;
+	wait_queue_head_t display_event;
+	wait_queue_head_t cursor_event;
+	wait_queue_head_t io_cmd_event;
+	struct work_struct client_monitors_config_work;
+
+	/* debugfs */
+	struct qxl_debugfs	debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
+	unsigned int debugfs_count;
+
+	struct mutex		update_area_mutex;
+
+	struct idr	surf_id_idr;
+	spinlock_t surf_id_idr_lock;
+	int last_alloced_surf_id;
+
+	struct mutex surf_evict_mutex;
+	struct io_mapping *vram_mapping;
+	struct io_mapping *surface_mapping;
+
+	/* */
+	struct mutex release_mutex;
+	struct qxl_bo *current_release_bo[3];
+	int current_release_bo_offset[3];
+
+	struct work_struct gc_work;
+
+	struct drm_property *hotplug_mode_update_property;
+	int monitors_config_width;
+	int monitors_config_height;
+};
+
+extern const struct drm_ioctl_desc qxl_ioctls[];
+extern int qxl_max_ioctl;
+
+int qxl_device_init(struct qxl_device *qdev, struct drm_driver *drv,
+		    struct pci_dev *pdev);
+void qxl_device_fini(struct qxl_device *qdev);
+
+int qxl_modeset_init(struct qxl_device *qdev);
+void qxl_modeset_fini(struct qxl_device *qdev);
+
+int qxl_bo_init(struct qxl_device *qdev);
+void qxl_bo_fini(struct qxl_device *qdev);
+
+void qxl_reinit_memslots(struct qxl_device *qdev);
+int qxl_surf_evict(struct qxl_device *qdev);
+int qxl_vram_evict(struct qxl_device *qdev);
+
+struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
+				 int element_size,
+				 int n_elements,
+				 int prod_notify,
+				 bool set_prod_notify,
+				 wait_queue_head_t *push_event);
+void qxl_ring_free(struct qxl_ring *ring);
+void qxl_ring_init_hdr(struct qxl_ring *ring);
+int qxl_check_idle(struct qxl_ring *ring);
+
+static inline uint64_t
+qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
+			unsigned long offset)
+{
+	struct qxl_memslot *slot =
+		(bo->tbo.mem.mem_type == TTM_PL_VRAM)
+		? &qdev->main_slot : &qdev->surfaces_slot;
+
+	WARN_ON_ONCE((bo->tbo.offset & slot->gpu_offset) != slot->gpu_offset);
+
+	/* TODO - need to hold one of the locks to read tbo.offset */
+	return slot->high_bits | (bo->tbo.offset - slot->gpu_offset + offset);
+}
+
+/* qxl_display.c */
+void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
+int qxl_create_monitors_object(struct qxl_device *qdev);
+int qxl_destroy_monitors_object(struct qxl_device *qdev);
+
+/* qxl_gem.c */
+void qxl_gem_init(struct qxl_device *qdev);
+void qxl_gem_fini(struct qxl_device *qdev);
+int qxl_gem_object_create(struct qxl_device *qdev, int size,
+			  int alignment, int initial_domain,
+			  bool discardable, bool kernel,
+			  struct qxl_surface *surf,
+			  struct drm_gem_object **obj);
+int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+				      struct drm_file *file_priv,
+				      u32 domain,
+				      size_t size,
+				      struct qxl_surface *surf,
+				      struct qxl_bo **qobj,
+				      uint32_t *handle);
+void qxl_gem_object_free(struct drm_gem_object *gobj);
+int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+void qxl_gem_object_close(struct drm_gem_object *obj,
+			  struct drm_file *file_priv);
+void qxl_bo_force_delete(struct qxl_device *qdev);
+int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+
+/* qxl_dumb.c */
+int qxl_mode_dumb_create(struct drm_file *file_priv,
+			 struct drm_device *dev,
+			 struct drm_mode_create_dumb *args);
+int qxl_mode_dumb_mmap(struct drm_file *filp,
+		       struct drm_device *dev,
+		       uint32_t handle, uint64_t *offset_p);
+
+/* qxl ttm */
+int qxl_ttm_init(struct qxl_device *qdev);
+void qxl_ttm_fini(struct qxl_device *qdev);
+int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* qxl image */
+
+int qxl_image_init(struct qxl_device *qdev,
+		   struct qxl_release *release,
+		   struct qxl_drm_image *dimage,
+		   const uint8_t *data,
+		   int x, int y, int width, int height,
+		   int depth, int stride);
+int
+qxl_image_alloc_objects(struct qxl_device *qdev,
+			struct qxl_release *release,
+			struct qxl_drm_image **image_ptr,
+			int height, int stride);
+void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
+
+void qxl_update_screen(struct qxl_device *qxl);
+
+/* qxl io operations (qxl_cmd.c) */
+
+void qxl_io_create_primary(struct qxl_device *qdev,
+			   struct qxl_bo *bo);
+void qxl_io_destroy_primary(struct qxl_device *qdev);
+void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
+void qxl_io_notify_oom(struct qxl_device *qdev);
+
+int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
+		       const struct qxl_rect *area);
+
+void qxl_io_reset(struct qxl_device *qdev);
+void qxl_io_monitors_config(struct qxl_device *qdev);
+int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible);
+void qxl_io_flush_release(struct qxl_device *qdev);
+void qxl_io_flush_surfaces(struct qxl_device *qdev);
+
+union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
+					struct qxl_release *release);
+void qxl_release_unmap(struct qxl_device *qdev,
+		       struct qxl_release *release,
+		       union qxl_release_info *info);
+int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo);
+int qxl_release_reserve_list(struct qxl_release *release, bool no_intr);
+void qxl_release_backoff_reserve_list(struct qxl_release *release);
+void qxl_release_fence_buffer_objects(struct qxl_release *release);
+
+int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
+				       enum qxl_surface_cmd_type surface_cmd_type,
+				       struct qxl_release *create_rel,
+				       struct qxl_release **release);
+int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+			       int type, struct qxl_release **release,
+			       struct qxl_bo **rbo);
+
+int
+qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+			      uint32_t type, bool interruptible);
+int
+qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+			     uint32_t type, bool interruptible);
+int qxl_alloc_bo_reserved(struct qxl_device *qdev,
+			  struct qxl_release *release,
+			  unsigned long size,
+			  struct qxl_bo **_bo);
+/* qxl drawing commands */
+
+void qxl_draw_dirty_fb(struct qxl_device *qdev,
+		       struct drm_framebuffer *fb,
+		       struct qxl_bo *bo,
+		       unsigned int flags, unsigned int color,
+		       struct drm_clip_rect *clips,
+		       unsigned int num_clips, int inc,
+		       uint32_t dumb_shadow_offset);
+
+void qxl_release_free(struct qxl_device *qdev,
+		      struct qxl_release *release);
+
+/* used by qxl_debugfs_release */
+struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
+						   uint64_t id);
+
+bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush);
+int qxl_garbage_collect(struct qxl_device *qdev);
+
+/* debugfs */
+
+int qxl_debugfs_init(struct drm_minor *minor);
+int qxl_ttm_debugfs_init(struct qxl_device *qdev);
+
+/* qxl_prime.c */
+int qxl_gem_prime_pin(struct drm_gem_object *obj);
+void qxl_gem_prime_unpin(struct drm_gem_object *obj);
+struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *qxl_gem_prime_import_sg_table(
+	struct drm_device *dev, struct dma_buf_attachment *attach,
+	struct sg_table *sgt);
+void *qxl_gem_prime_vmap(struct drm_gem_object *obj);
+void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int qxl_gem_prime_mmap(struct drm_gem_object *obj,
+				struct vm_area_struct *vma);
+
+/* qxl_irq.c */
+int qxl_irq_init(struct qxl_device *qdev);
+irqreturn_t qxl_irq_handler(int irq, void *arg);
+
+int qxl_debugfs_add_files(struct qxl_device *qdev,
+			  struct drm_info_list *files,
+			  unsigned int nfiles);
+
+int qxl_surface_id_alloc(struct qxl_device *qdev,
+			 struct qxl_bo *surf);
+void qxl_surface_id_dealloc(struct qxl_device *qdev,
+			    uint32_t surface_id);
+int qxl_hw_surface_alloc(struct qxl_device *qdev,
+			 struct qxl_bo *surf);
+int qxl_hw_surface_dealloc(struct qxl_device *qdev,
+			   struct qxl_bo *surf);
+
+int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
+
+struct qxl_drv_surface *
+qxl_surface_lookup(struct drm_device *dev, int surface_id);
+void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
+
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_dumb.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_dumb.c
new file mode 100644
index 0000000..4a0dc13
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/* dumb ioctls implementation */
+
+int qxl_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_bo *qobj;
+	uint32_t handle;
+	int r;
+	struct qxl_surface surf;
+	uint32_t pitch, format;
+
+	pitch = args->width * ((args->bpp + 1) / 8);
+	args->size = pitch * args->height;
+	args->size = ALIGN(args->size, PAGE_SIZE);
+
+	switch (args->bpp) {
+	case 16:
+		format = SPICE_SURFACE_FMT_16_565;
+		break;
+	case 32:
+		format = SPICE_SURFACE_FMT_32_xRGB;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	surf.width = args->width;
+	surf.height = args->height;
+	surf.stride = pitch;
+	surf.format = format;
+	surf.data = 0;
+
+	r = qxl_gem_object_create_with_handle(qdev, file_priv,
+					      QXL_GEM_DOMAIN_SURFACE,
+					      args->size, &surf, &qobj,
+					      &handle);
+	if (r)
+		return r;
+	qobj->is_dumb = true;
+	args->pitch = pitch;
+	args->handle = handle;
+	return 0;
+}
+
+int qxl_mode_dumb_mmap(struct drm_file *file_priv,
+		       struct drm_device *dev,
+		       uint32_t handle, uint64_t *offset_p)
+{
+	struct drm_gem_object *gobj;
+	struct qxl_bo *qobj;
+
+	BUG_ON(!offset_p);
+	gobj = drm_gem_object_lookup(file_priv, handle);
+	if (gobj == NULL)
+		return -ENOENT;
+	qobj = gem_to_qxl_bo(gobj);
+	*offset_p = qxl_bo_mmap_offset(qobj);
+	drm_gem_object_put_unlocked(gobj);
+	return 0;
+}
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_gem.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_gem.c
new file mode 100644
index 0000000..69f37db
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_gem.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <drm/drm.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+void qxl_gem_object_free(struct drm_gem_object *gobj)
+{
+	struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
+	struct qxl_device *qdev;
+	struct ttm_buffer_object *tbo;
+
+	qdev = (struct qxl_device *)gobj->dev->dev_private;
+
+	qxl_surface_evict(qdev, qobj, false);
+
+	tbo = &qobj->tbo;
+	ttm_bo_put(tbo);
+}
+
+int qxl_gem_object_create(struct qxl_device *qdev, int size,
+			  int alignment, int initial_domain,
+			  bool discardable, bool kernel,
+			  struct qxl_surface *surf,
+			  struct drm_gem_object **obj)
+{
+	struct qxl_bo *qbo;
+	int r;
+
+	*obj = NULL;
+	/* At least align on page size */
+	if (alignment < PAGE_SIZE)
+		alignment = PAGE_SIZE;
+	r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
+	if (r) {
+		if (r != -ERESTARTSYS)
+			DRM_ERROR(
+			"Failed to allocate GEM object (%d, %d, %u, %d)\n",
+				  size, initial_domain, alignment, r);
+		return r;
+	}
+	*obj = &qbo->tbo.base;
+
+	mutex_lock(&qdev->gem.mutex);
+	list_add_tail(&qbo->list, &qdev->gem.objects);
+	mutex_unlock(&qdev->gem.mutex);
+
+	return 0;
+}
+
+int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+				      struct drm_file *file_priv,
+				      u32 domain,
+				      size_t size,
+				      struct qxl_surface *surf,
+				      struct qxl_bo **qobj,
+				      uint32_t *handle)
+{
+	struct drm_gem_object *gobj;
+	int r;
+
+	BUG_ON(!qobj);
+	BUG_ON(!handle);
+
+	r = qxl_gem_object_create(qdev, size, 0,
+				  domain,
+				  false, false, surf,
+				  &gobj);
+	if (r)
+		return -ENOMEM;
+	r = drm_gem_handle_create(file_priv, gobj, handle);
+	if (r)
+		return r;
+	/* drop reference from allocate - handle holds it now */
+	*qobj = gem_to_qxl_bo(gobj);
+	drm_gem_object_put_unlocked(gobj);
+	return 0;
+}
+
+int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+	return 0;
+}
+
+void qxl_gem_object_close(struct drm_gem_object *obj,
+			  struct drm_file *file_priv)
+{
+}
+
+void qxl_gem_init(struct qxl_device *qdev)
+{
+	INIT_LIST_HEAD(&qdev->gem.objects);
+}
+
+void qxl_gem_fini(struct qxl_device *qdev)
+{
+	qxl_bo_force_delete(qdev);
+}
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_image.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_image.c
new file mode 100644
index 0000000..60ab715
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_image.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static int
+qxl_allocate_chunk(struct qxl_device *qdev,
+		   struct qxl_release *release,
+		   struct qxl_drm_image *image,
+		   unsigned int chunk_size)
+{
+	struct qxl_drm_chunk *chunk;
+	int ret;
+
+	chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
+	if (!chunk)
+		return -ENOMEM;
+
+	ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
+	if (ret) {
+		kfree(chunk);
+		return ret;
+	}
+
+	list_add_tail(&chunk->head, &image->chunk_list);
+	return 0;
+}
+
+int
+qxl_image_alloc_objects(struct qxl_device *qdev,
+			struct qxl_release *release,
+			struct qxl_drm_image **image_ptr,
+			int height, int stride)
+{
+	struct qxl_drm_image *image;
+	int ret;
+
+	image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
+	if (!image)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&image->chunk_list);
+
+	ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
+	if (ret) {
+		kfree(image);
+		return ret;
+	}
+
+	ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
+	if (ret) {
+		qxl_bo_unref(&image->bo);
+		kfree(image);
+		return ret;
+	}
+	*image_ptr = image;
+	return 0;
+}
+
+void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
+{
+	struct qxl_drm_chunk *chunk, *tmp;
+
+	list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
+		qxl_bo_unref(&chunk->bo);
+		kfree(chunk);
+	}
+
+	qxl_bo_unref(&dimage->bo);
+	kfree(dimage);
+}
+
+static int
+qxl_image_init_helper(struct qxl_device *qdev,
+		      struct qxl_release *release,
+		      struct qxl_drm_image *dimage,
+		      const uint8_t *data,
+		      int width, int height,
+		      int depth, unsigned int hash,
+		      int stride)
+{
+	struct qxl_drm_chunk *drv_chunk;
+	struct qxl_image *image;
+	struct qxl_data_chunk *chunk;
+	int i;
+	int chunk_stride;
+	int linesize = width * depth / 8;
+	struct qxl_bo *chunk_bo, *image_bo;
+	void *ptr;
+	/* Chunk */
+	/* FIXME: Check integer overflow */
+	/* TODO: variable number of chunks */
+
+	drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
+
+	chunk_bo = drv_chunk->bo;
+	chunk_stride = stride; /* TODO: should use linesize, but it renders
+				  wrong (check the bitmaps are sent correctly
+				  first) */
+
+	ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
+	chunk = ptr;
+	chunk->data_size = height * chunk_stride;
+	chunk->prev_chunk = 0;
+	chunk->next_chunk = 0;
+	qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+
+	{
+		void *k_data, *i_data;
+		int remain;
+		int page;
+		int size;
+
+		if (stride == linesize && chunk_stride == stride) {
+			remain = linesize * height;
+			page = 0;
+			i_data = (void *)data;
+
+			while (remain > 0) {
+				ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
+
+				if (page == 0) {
+					chunk = ptr;
+					k_data = chunk->data;
+					size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
+				} else {
+					k_data = ptr;
+					size = PAGE_SIZE;
+				}
+				size = min(size, remain);
+
+				memcpy(k_data, i_data, size);
+
+				qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+				i_data += size;
+				remain -= size;
+				page++;
+			}
+		} else {
+			unsigned int page_base, page_offset, out_offset;
+
+			for (i = 0 ; i < height ; ++i) {
+				i_data = (void *)data + i * stride;
+				remain = linesize;
+				out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride;
+
+				while (remain > 0) {
+					page_base = out_offset & PAGE_MASK;
+					page_offset = offset_in_page(out_offset);
+					size = min((int)(PAGE_SIZE - page_offset), remain);
+
+					ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
+					k_data = ptr + page_offset;
+					memcpy(k_data, i_data, size);
+					qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+					remain -= size;
+					i_data += size;
+					out_offset += size;
+				}
+			}
+		}
+	}
+	qxl_bo_kunmap(chunk_bo);
+
+	image_bo = dimage->bo;
+	ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
+	image = ptr;
+
+	image->descriptor.id = 0;
+	image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
+
+	image->descriptor.flags = 0;
+	image->descriptor.width = width;
+	image->descriptor.height = height;
+
+	switch (depth) {
+	case 1:
+		/* TODO: BE? check by arch? */
+		image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
+		break;
+	case 24:
+		image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
+		break;
+	case 32:
+		image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
+		break;
+	default:
+		DRM_ERROR("unsupported image bit depth\n");
+		qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
+		return -EINVAL;
+	}
+	image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
+	image->u.bitmap.x = width;
+	image->u.bitmap.y = height;
+	image->u.bitmap.stride = chunk_stride;
+	image->u.bitmap.palette = 0;
+	image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
+
+	qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
+
+	return 0;
+}
+
+int qxl_image_init(struct qxl_device *qdev,
+		     struct qxl_release *release,
+		     struct qxl_drm_image *dimage,
+		     const uint8_t *data,
+		     int x, int y, int width, int height,
+		     int depth, int stride)
+{
+	data += y * stride + x * (depth / 8);
+	return qxl_image_init_helper(qdev, release, dimage, data,
+				       width, height, depth, 0, stride);
+}
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_ioctl.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_ioctl.c
new file mode 100644
index 0000000..72f3f1b
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/*
+ * TODO: allocating a new gem(in qxl_bo) for each request.
+ * This is wasteful since bo's are page aligned.
+ */
+static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_alloc *qxl_alloc = data;
+	int ret;
+	struct qxl_bo *qobj;
+	uint32_t handle;
+	u32 domain = QXL_GEM_DOMAIN_VRAM;
+
+	if (qxl_alloc->size == 0) {
+		DRM_ERROR("invalid size %d\n", qxl_alloc->size);
+		return -EINVAL;
+	}
+	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
+						domain,
+						qxl_alloc->size,
+						NULL,
+						&qobj, &handle);
+	if (ret) {
+		DRM_ERROR("%s: failed to create gem ret=%d\n",
+			  __func__, ret);
+		return -ENOMEM;
+	}
+	qxl_alloc->handle = handle;
+	return 0;
+}
+
+static int qxl_map_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_map *qxl_map = data;
+
+	return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
+				  &qxl_map->offset);
+}
+
+struct qxl_reloc_info {
+	int type;
+	struct qxl_bo *dst_bo;
+	uint32_t dst_offset;
+	struct qxl_bo *src_bo;
+	int src_offset;
+};
+
+/*
+ * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
+ * are on vram).
+ * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
+ */
+static void
+apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
+{
+	void *reloc_page;
+
+	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+	*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
+											      info->src_bo,
+											      info->src_offset);
+	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
+}
+
+static void
+apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
+{
+	uint32_t id = 0;
+	void *reloc_page;
+
+	if (info->src_bo && !info->src_bo->is_primary)
+		id = info->src_bo->surface_id;
+
+	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+	*(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
+	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
+}
+
+/* return holding the reference to this object */
+static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
+			      struct qxl_release *release, struct qxl_bo **qbo_p)
+{
+	struct drm_gem_object *gobj;
+	struct qxl_bo *qobj;
+	int ret;
+
+	gobj = drm_gem_object_lookup(file_priv, handle);
+	if (!gobj)
+		return -EINVAL;
+
+	qobj = gem_to_qxl_bo(gobj);
+
+	ret = qxl_release_list_add(release, qobj);
+	drm_gem_object_put_unlocked(gobj);
+	if (ret)
+		return ret;
+
+	*qbo_p = qobj;
+	return 0;
+}
+
+/*
+ * Usage of execbuffer:
+ * Relocations need to take into account the full QXLDrawable size.
+ * However, the command as passed from user space must *not* contain the initial
+ * QXLReleaseInfo struct (first XXX bytes)
+ */
+static int qxl_process_single_command(struct qxl_device *qdev,
+				      struct drm_qxl_command *cmd,
+				      struct drm_file *file_priv)
+{
+	struct qxl_reloc_info *reloc_info;
+	int release_type;
+	struct qxl_release *release;
+	struct qxl_bo *cmd_bo;
+	void *fb_cmd;
+	int i, ret, num_relocs;
+	int unwritten;
+
+	switch (cmd->type) {
+	case QXL_CMD_DRAW:
+		release_type = QXL_RELEASE_DRAWABLE;
+		break;
+	case QXL_CMD_SURFACE:
+	case QXL_CMD_CURSOR:
+	default:
+		DRM_DEBUG("Only draw commands in execbuffers\n");
+		return -EINVAL;
+		break;
+	}
+
+	if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
+		return -EINVAL;
+
+	if (!access_ok(u64_to_user_ptr(cmd->command),
+		       cmd->command_size))
+		return -EFAULT;
+
+	reloc_info = kmalloc_array(cmd->relocs_num,
+				   sizeof(struct qxl_reloc_info), GFP_KERNEL);
+	if (!reloc_info)
+		return -ENOMEM;
+
+	ret = qxl_alloc_release_reserved(qdev,
+					 sizeof(union qxl_release_info) +
+					 cmd->command_size,
+					 release_type,
+					 &release,
+					 &cmd_bo);
+	if (ret)
+		goto out_free_reloc;
+
+	/* TODO copy slow path code from i915 */
+	fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
+	unwritten = __copy_from_user_inatomic_nocache
+		(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
+		 u64_to_user_ptr(cmd->command), cmd->command_size);
+
+	{
+		struct qxl_drawable *draw = fb_cmd;
+
+		draw->mm_time = qdev->rom->mm_clock;
+	}
+
+	qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
+	if (unwritten) {
+		DRM_ERROR("got unwritten %d\n", unwritten);
+		ret = -EFAULT;
+		goto out_free_release;
+	}
+
+	/* fill out reloc info structs */
+	num_relocs = 0;
+	for (i = 0; i < cmd->relocs_num; ++i) {
+		struct drm_qxl_reloc reloc;
+		struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
+
+		if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
+			ret = -EFAULT;
+			goto out_free_bos;
+		}
+
+		/* add the bos to the list of bos to validate -
+		   need to validate first then process relocs? */
+		if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
+			DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
+
+			ret = -EINVAL;
+			goto out_free_bos;
+		}
+		reloc_info[i].type = reloc.reloc_type;
+
+		if (reloc.dst_handle) {
+			ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
+						 &reloc_info[i].dst_bo);
+			if (ret)
+				goto out_free_bos;
+			reloc_info[i].dst_offset = reloc.dst_offset;
+		} else {
+			reloc_info[i].dst_bo = cmd_bo;
+			reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
+		}
+		num_relocs++;
+
+		/* reserve and validate the reloc dst bo */
+		if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
+			ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
+						 &reloc_info[i].src_bo);
+			if (ret)
+				goto out_free_bos;
+			reloc_info[i].src_offset = reloc.src_offset;
+		} else {
+			reloc_info[i].src_bo = NULL;
+			reloc_info[i].src_offset = 0;
+		}
+	}
+
+	/* validate all buffers */
+	ret = qxl_release_reserve_list(release, false);
+	if (ret)
+		goto out_free_bos;
+
+	for (i = 0; i < cmd->relocs_num; ++i) {
+		if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
+			apply_reloc(qdev, &reloc_info[i]);
+		else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
+			apply_surf_reloc(qdev, &reloc_info[i]);
+	}
+
+	qxl_release_fence_buffer_objects(release);
+	ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
+
+out_free_bos:
+out_free_release:
+	if (ret)
+		qxl_release_free(qdev, release);
+out_free_reloc:
+	kfree(reloc_info);
+	return ret;
+}
+
+static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_execbuffer *execbuffer = data;
+	struct drm_qxl_command user_cmd;
+	int cmd_num;
+	int ret;
+
+	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
+
+		struct drm_qxl_command __user *commands =
+			u64_to_user_ptr(execbuffer->commands);
+
+		if (copy_from_user(&user_cmd, commands + cmd_num,
+				       sizeof(user_cmd)))
+			return -EFAULT;
+
+		ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_update_area *update_area = data;
+	struct qxl_rect area = {.left = update_area->left,
+				.top = update_area->top,
+				.right = update_area->right,
+				.bottom = update_area->bottom};
+	int ret;
+	struct drm_gem_object *gobj = NULL;
+	struct qxl_bo *qobj = NULL;
+	struct ttm_operation_ctx ctx = { true, false };
+
+	if (update_area->left >= update_area->right ||
+	    update_area->top >= update_area->bottom)
+		return -EINVAL;
+
+	gobj = drm_gem_object_lookup(file, update_area->handle);
+	if (gobj == NULL)
+		return -ENOENT;
+
+	qobj = gem_to_qxl_bo(gobj);
+
+	ret = qxl_bo_reserve(qobj, false);
+	if (ret)
+		goto out;
+
+	if (!qobj->pin_count) {
+		qxl_ttm_placement_from_domain(qobj, qobj->type, false);
+		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
+		if (unlikely(ret))
+			goto out;
+	}
+
+	ret = qxl_bo_check_id(qdev, qobj);
+	if (ret)
+		goto out2;
+	if (!qobj->surface_id)
+		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
+	ret = qxl_io_update_area(qdev, qobj, &area);
+
+out2:
+	qxl_bo_unreserve(qobj);
+
+out:
+	drm_gem_object_put_unlocked(gobj);
+	return ret;
+}
+
+static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_getparam *param = data;
+
+	switch (param->param) {
+	case QXL_PARAM_NUM_SURFACES:
+		param->value = qdev->rom->n_surfaces;
+		break;
+	case QXL_PARAM_MAX_RELOCS:
+		param->value = QXL_MAX_RES;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_clientcap *param = data;
+	int byte, idx;
+
+	byte = param->index / 8;
+	idx = param->index % 8;
+
+	if (dev->pdev->revision < 4)
+		return -ENOSYS;
+
+	if (byte >= 58)
+		return -ENOSYS;
+
+	if (qdev->rom->client_capabilities[byte] & (1 << idx))
+		return 0;
+	return -ENOSYS;
+}
+
+static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_alloc_surf *param = data;
+	struct qxl_bo *qobj;
+	int handle;
+	int ret;
+	int size, actual_stride;
+	struct qxl_surface surf;
+
+	/* work out size allocate bo with handle */
+	actual_stride = param->stride < 0 ? -param->stride : param->stride;
+	size = actual_stride * param->height + actual_stride;
+
+	surf.format = param->format;
+	surf.width = param->width;
+	surf.height = param->height;
+	surf.stride = param->stride;
+	surf.data = 0;
+
+	ret = qxl_gem_object_create_with_handle(qdev, file,
+						QXL_GEM_DOMAIN_SURFACE,
+						size,
+						&surf,
+						&qobj, &handle);
+	if (ret) {
+		DRM_ERROR("%s: failed to create gem ret=%d\n",
+			  __func__, ret);
+		return -ENOMEM;
+	} else
+		param->handle = handle;
+	return ret;
+}
+
+const struct drm_ioctl_desc qxl_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),
+
+	DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),
+
+	DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
+							DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
+							DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
+							DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
+							DRM_AUTH),
+
+	DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
+			  DRM_AUTH),
+};
+
+int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_irq.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_irq.c
new file mode 100644
index 0000000..8435af1
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_irq.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <linux/pci.h>
+
+#include <drm/drm_irq.h>
+
+#include "qxl_drv.h"
+
+irqreturn_t qxl_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
+	uint32_t pending;
+
+	pending = xchg(&qdev->ram_header->int_pending, 0);
+
+	if (!pending)
+		return IRQ_NONE;
+
+	atomic_inc(&qdev->irq_received);
+
+	if (pending & QXL_INTERRUPT_DISPLAY) {
+		atomic_inc(&qdev->irq_received_display);
+		wake_up_all(&qdev->display_event);
+		qxl_queue_garbage_collect(qdev, false);
+	}
+	if (pending & QXL_INTERRUPT_CURSOR) {
+		atomic_inc(&qdev->irq_received_cursor);
+		wake_up_all(&qdev->cursor_event);
+	}
+	if (pending & QXL_INTERRUPT_IO_CMD) {
+		atomic_inc(&qdev->irq_received_io_cmd);
+		wake_up_all(&qdev->io_cmd_event);
+	}
+	if (pending & QXL_INTERRUPT_ERROR) {
+		/* TODO: log it, reset device (only way to exit this condition)
+		 * (do it a certain number of times, afterwards admit defeat,
+		 * to avoid endless loops).
+		 */
+		qdev->irq_received_error++;
+		DRM_WARN("driver is in bug mode\n");
+	}
+	if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) {
+		schedule_work(&qdev->client_monitors_config_work);
+	}
+	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+	outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ);
+	return IRQ_HANDLED;
+}
+
+static void qxl_client_monitors_config_work_func(struct work_struct *work)
+{
+	struct qxl_device *qdev = container_of(work, struct qxl_device,
+					       client_monitors_config_work);
+
+	qxl_display_read_client_monitors_config(qdev);
+}
+
+int qxl_irq_init(struct qxl_device *qdev)
+{
+	int ret;
+
+	init_waitqueue_head(&qdev->display_event);
+	init_waitqueue_head(&qdev->cursor_event);
+	init_waitqueue_head(&qdev->io_cmd_event);
+	INIT_WORK(&qdev->client_monitors_config_work,
+		  qxl_client_monitors_config_work_func);
+	atomic_set(&qdev->irq_received, 0);
+	atomic_set(&qdev->irq_received_display, 0);
+	atomic_set(&qdev->irq_received_cursor, 0);
+	atomic_set(&qdev->irq_received_io_cmd, 0);
+	qdev->irq_received_error = 0;
+	ret = drm_irq_install(&qdev->ddev, qdev->ddev.pdev->irq);
+	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed installing irq: %d\n", ret);
+		return 1;
+	}
+	return 0;
+}
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_kms.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_kms.c
new file mode 100644
index 0000000..9bdbe0d
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_kms.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <linux/io-mapping.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_probe_helper.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+int qxl_log_level;
+
+static bool qxl_check_device(struct qxl_device *qdev)
+{
+	struct qxl_rom *rom = qdev->rom;
+
+	if (rom->magic != 0x4f525851) {
+		DRM_ERROR("bad rom signature %x\n", rom->magic);
+		return false;
+	}
+
+	DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
+	DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
+		 rom->log_level);
+	DRM_INFO("%d io pages at offset 0x%x\n",
+		 rom->num_io_pages, rom->pages_offset);
+	DRM_INFO("%d byte draw area at offset 0x%x\n",
+		 rom->surface0_area_size, rom->draw_area_offset);
+
+	qdev->vram_size = rom->surface0_area_size;
+	DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
+	return true;
+}
+
+static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot)
+{
+	qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
+	qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size;
+	qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index);
+}
+
+static void setup_slot(struct qxl_device *qdev,
+		       struct qxl_memslot *slot,
+		       unsigned int slot_index,
+		       const char *slot_name,
+		       unsigned long start_phys_addr,
+		       unsigned long size)
+{
+	uint64_t high_bits;
+
+	slot->index = slot_index;
+	slot->name = slot_name;
+	slot->start_phys_addr = start_phys_addr;
+	slot->size = size;
+
+	setup_hw_slot(qdev, slot);
+
+	slot->generation = qdev->rom->slot_generation;
+	high_bits = (qdev->rom->slots_start + slot->index)
+		<< qdev->rom->slot_gen_bits;
+	high_bits |= slot->generation;
+	high_bits <<= (64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits));
+	slot->high_bits = high_bits;
+
+	DRM_INFO("slot %d (%s): base 0x%08lx, size 0x%08lx, gpu_offset 0x%lx\n",
+		 slot->index, slot->name,
+		 (unsigned long)slot->start_phys_addr,
+		 (unsigned long)slot->size,
+		 (unsigned long)slot->gpu_offset);
+}
+
+void qxl_reinit_memslots(struct qxl_device *qdev)
+{
+	setup_hw_slot(qdev, &qdev->main_slot);
+	setup_hw_slot(qdev, &qdev->surfaces_slot);
+}
+
+static void qxl_gc_work(struct work_struct *work)
+{
+	struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
+
+	qxl_garbage_collect(qdev);
+}
+
+int qxl_device_init(struct qxl_device *qdev,
+		    struct drm_driver *drv,
+		    struct pci_dev *pdev)
+{
+	int r, sb;
+
+	r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
+	if (r) {
+		pr_err("Unable to init drm dev");
+		goto error;
+	}
+
+	qdev->ddev.pdev = pdev;
+	pci_set_drvdata(pdev, &qdev->ddev);
+	qdev->ddev.dev_private = qdev;
+
+	mutex_init(&qdev->gem.mutex);
+	mutex_init(&qdev->update_area_mutex);
+	mutex_init(&qdev->release_mutex);
+	mutex_init(&qdev->surf_evict_mutex);
+	qxl_gem_init(qdev);
+
+	qdev->rom_base = pci_resource_start(pdev, 2);
+	qdev->rom_size = pci_resource_len(pdev, 2);
+	qdev->vram_base = pci_resource_start(pdev, 0);
+	qdev->io_base = pci_resource_start(pdev, 3);
+
+	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
+	if (!qdev->vram_mapping) {
+		pr_err("Unable to create vram_mapping");
+		r = -ENOMEM;
+		goto error;
+	}
+
+	if (pci_resource_len(pdev, 4) > 0) {
+		/* 64bit surface bar present */
+		sb = 4;
+		qdev->surfaceram_base = pci_resource_start(pdev, sb);
+		qdev->surfaceram_size = pci_resource_len(pdev, sb);
+		qdev->surface_mapping =
+			io_mapping_create_wc(qdev->surfaceram_base,
+					     qdev->surfaceram_size);
+	}
+	if (qdev->surface_mapping == NULL) {
+		/* 64bit surface bar not present (or mapping failed) */
+		sb = 1;
+		qdev->surfaceram_base = pci_resource_start(pdev, sb);
+		qdev->surfaceram_size = pci_resource_len(pdev, sb);
+		qdev->surface_mapping =
+			io_mapping_create_wc(qdev->surfaceram_base,
+					     qdev->surfaceram_size);
+		if (!qdev->surface_mapping) {
+			pr_err("Unable to create surface_mapping");
+			r = -ENOMEM;
+			goto vram_mapping_free;
+		}
+	}
+
+	DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
+		 (unsigned long long)qdev->vram_base,
+		 (unsigned long long)pci_resource_end(pdev, 0),
+		 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
+		 (int)pci_resource_len(pdev, 0) / 1024,
+		 (unsigned long long)qdev->surfaceram_base,
+		 (unsigned long long)pci_resource_end(pdev, sb),
+		 (int)qdev->surfaceram_size / 1024 / 1024,
+		 (int)qdev->surfaceram_size / 1024,
+		 (sb == 4) ? "64bit" : "32bit");
+
+	qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
+	if (!qdev->rom) {
+		pr_err("Unable to ioremap ROM\n");
+		r = -ENOMEM;
+		goto surface_mapping_free;
+	}
+
+	if (!qxl_check_device(qdev)) {
+		r = -ENODEV;
+		goto rom_unmap;
+	}
+
+	r = qxl_bo_init(qdev);
+	if (r) {
+		DRM_ERROR("bo init failed %d\n", r);
+		goto rom_unmap;
+	}
+
+	qdev->ram_header = ioremap(qdev->vram_base +
+				   qdev->rom->ram_header_offset,
+				   sizeof(*qdev->ram_header));
+	if (!qdev->ram_header) {
+		DRM_ERROR("Unable to ioremap RAM header\n");
+		r = -ENOMEM;
+		goto bo_fini;
+	}
+
+	qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
+					     sizeof(struct qxl_command),
+					     QXL_COMMAND_RING_SIZE,
+					     qdev->io_base + QXL_IO_NOTIFY_CMD,
+					     false,
+					     &qdev->display_event);
+	if (!qdev->command_ring) {
+		DRM_ERROR("Unable to create command ring\n");
+		r = -ENOMEM;
+		goto ram_header_unmap;
+	}
+
+	qdev->cursor_ring = qxl_ring_create(
+				&(qdev->ram_header->cursor_ring_hdr),
+				sizeof(struct qxl_command),
+				QXL_CURSOR_RING_SIZE,
+				qdev->io_base + QXL_IO_NOTIFY_CURSOR,
+				false,
+				&qdev->cursor_event);
+
+	if (!qdev->cursor_ring) {
+		DRM_ERROR("Unable to create cursor ring\n");
+		r = -ENOMEM;
+		goto command_ring_free;
+	}
+
+	qdev->release_ring = qxl_ring_create(
+				&(qdev->ram_header->release_ring_hdr),
+				sizeof(uint64_t),
+				QXL_RELEASE_RING_SIZE, 0, true,
+				NULL);
+
+	if (!qdev->release_ring) {
+		DRM_ERROR("Unable to create release ring\n");
+		r = -ENOMEM;
+		goto cursor_ring_free;
+	}
+
+	idr_init(&qdev->release_idr);
+	spin_lock_init(&qdev->release_idr_lock);
+	spin_lock_init(&qdev->release_lock);
+
+	idr_init(&qdev->surf_id_idr);
+	spin_lock_init(&qdev->surf_id_idr_lock);
+
+	mutex_init(&qdev->async_io_mutex);
+
+	/* reset the device into a known state - no memslots, no primary
+	 * created, no surfaces. */
+	qxl_io_reset(qdev);
+
+	/* must initialize irq before first async io - slot creation */
+	r = qxl_irq_init(qdev);
+	if (r) {
+		DRM_ERROR("Unable to init qxl irq\n");
+		goto release_ring_free;
+	}
+
+	/*
+	 * Note that virtual is surface0. We rely on the single ioremap done
+	 * before.
+	 */
+	setup_slot(qdev, &qdev->main_slot, 0, "main",
+		   (unsigned long)qdev->vram_base,
+		   (unsigned long)qdev->rom->ram_header_offset);
+	setup_slot(qdev, &qdev->surfaces_slot, 1, "surfaces",
+		   (unsigned long)qdev->surfaceram_base,
+		   (unsigned long)qdev->surfaceram_size);
+
+	INIT_WORK(&qdev->gc_work, qxl_gc_work);
+
+	return 0;
+
+release_ring_free:
+	qxl_ring_free(qdev->release_ring);
+cursor_ring_free:
+	qxl_ring_free(qdev->cursor_ring);
+command_ring_free:
+	qxl_ring_free(qdev->command_ring);
+ram_header_unmap:
+	iounmap(qdev->ram_header);
+bo_fini:
+	qxl_bo_fini(qdev);
+rom_unmap:
+	iounmap(qdev->rom);
+surface_mapping_free:
+	io_mapping_free(qdev->surface_mapping);
+vram_mapping_free:
+	io_mapping_free(qdev->vram_mapping);
+error:
+	return r;
+}
+
+void qxl_device_fini(struct qxl_device *qdev)
+{
+	qxl_bo_unref(&qdev->current_release_bo[0]);
+	qxl_bo_unref(&qdev->current_release_bo[1]);
+	flush_work(&qdev->gc_work);
+	qxl_ring_free(qdev->command_ring);
+	qxl_ring_free(qdev->cursor_ring);
+	qxl_ring_free(qdev->release_ring);
+	qxl_gem_fini(qdev);
+	qxl_bo_fini(qdev);
+	io_mapping_free(qdev->surface_mapping);
+	io_mapping_free(qdev->vram_mapping);
+	iounmap(qdev->ram_header);
+	iounmap(qdev->rom);
+	qdev->rom = NULL;
+}
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_object.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_object.c
new file mode 100644
index 0000000..548dfe6
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_object.c
@@ -0,0 +1,358 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/io-mapping.h>
+static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+{
+	struct qxl_bo *bo;
+	struct qxl_device *qdev;
+
+	bo = to_qxl_bo(tbo);
+	qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private;
+
+	qxl_surface_evict(qdev, bo, false);
+	WARN_ON_ONCE(bo->map_count > 0);
+	mutex_lock(&qdev->gem.mutex);
+	list_del_init(&bo->list);
+	mutex_unlock(&qdev->gem.mutex);
+	drm_gem_object_release(&bo->tbo.base);
+	kfree(bo);
+}
+
+bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &qxl_ttm_bo_destroy)
+		return true;
+	return false;
+}
+
+void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
+{
+	u32 c = 0;
+	u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
+	unsigned int i;
+
+	qbo->placement.placement = qbo->placements;
+	qbo->placement.busy_placement = qbo->placements;
+	if (domain == QXL_GEM_DOMAIN_VRAM)
+		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
+	if (domain == QXL_GEM_DOMAIN_SURFACE) {
+		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
+		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
+	}
+	if (domain == QXL_GEM_DOMAIN_CPU)
+		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
+	if (!c)
+		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	qbo->placement.num_placement = c;
+	qbo->placement.num_busy_placement = c;
+	for (i = 0; i < c; ++i) {
+		qbo->placements[i].fpfn = 0;
+		qbo->placements[i].lpfn = 0;
+	}
+}
+
+int qxl_bo_create(struct qxl_device *qdev,
+		  unsigned long size, bool kernel, bool pinned, u32 domain,
+		  struct qxl_surface *surf,
+		  struct qxl_bo **bo_ptr)
+{
+	struct qxl_bo *bo;
+	enum ttm_bo_type type;
+	int r;
+
+	if (kernel)
+		type = ttm_bo_type_kernel;
+	else
+		type = ttm_bo_type_device;
+	*bo_ptr = NULL;
+	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
+	if (bo == NULL)
+		return -ENOMEM;
+	size = roundup(size, PAGE_SIZE);
+	r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
+	if (unlikely(r)) {
+		kfree(bo);
+		return r;
+	}
+	bo->type = domain;
+	bo->pin_count = pinned ? 1 : 0;
+	bo->surface_id = 0;
+	INIT_LIST_HEAD(&bo->list);
+
+	if (surf)
+		bo->surf = *surf;
+
+	qxl_ttm_placement_from_domain(bo, domain, pinned);
+
+	r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
+			&bo->placement, 0, !kernel, size,
+			NULL, NULL, &qxl_ttm_bo_destroy);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS)
+			dev_err(qdev->ddev.dev,
+				"object_init failed for (%lu, 0x%08X)\n",
+				size, domain);
+		return r;
+	}
+	*bo_ptr = bo;
+	return 0;
+}
+
+int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
+{
+	bool is_iomem;
+	int r;
+
+	if (bo->kptr) {
+		if (ptr)
+			*ptr = bo->kptr;
+		bo->map_count++;
+		return 0;
+	}
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	if (r)
+		return r;
+	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+	if (ptr)
+		*ptr = bo->kptr;
+	bo->map_count = 1;
+	return 0;
+}
+
+void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
+			      struct qxl_bo *bo, int page_offset)
+{
+	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
+	void *rptr;
+	int ret;
+	struct io_mapping *map;
+
+	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+		map = qdev->vram_mapping;
+	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
+		map = qdev->surface_mapping;
+	else
+		goto fallback;
+
+	(void) ttm_mem_io_lock(man, false);
+	ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
+	ttm_mem_io_unlock(man);
+
+	return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
+fallback:
+	if (bo->kptr) {
+		rptr = bo->kptr + (page_offset * PAGE_SIZE);
+		return rptr;
+	}
+
+	ret = qxl_bo_kmap(bo, &rptr);
+	if (ret)
+		return NULL;
+
+	rptr += page_offset * PAGE_SIZE;
+	return rptr;
+}
+
+void qxl_bo_kunmap(struct qxl_bo *bo)
+{
+	if (bo->kptr == NULL)
+		return;
+	bo->map_count--;
+	if (bo->map_count > 0)
+		return;
+	bo->kptr = NULL;
+	ttm_bo_kunmap(&bo->kmap);
+}
+
+void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
+			       struct qxl_bo *bo, void *pmap)
+{
+	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
+
+	if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
+	    (bo->tbo.mem.mem_type != TTM_PL_PRIV))
+		goto fallback;
+
+	io_mapping_unmap_atomic(pmap);
+
+	(void) ttm_mem_io_lock(man, false);
+	ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
+	ttm_mem_io_unlock(man);
+	return;
+ fallback:
+	qxl_bo_kunmap(bo);
+}
+
+void qxl_bo_unref(struct qxl_bo **bo)
+{
+	if ((*bo) == NULL)
+		return;
+
+	drm_gem_object_put_unlocked(&(*bo)->tbo.base);
+	*bo = NULL;
+}
+
+struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
+{
+	drm_gem_object_get(&bo->tbo.base);
+	return bo;
+}
+
+static int __qxl_bo_pin(struct qxl_bo *bo)
+{
+	struct ttm_operation_ctx ctx = { false, false };
+	struct drm_device *ddev = bo->tbo.base.dev;
+	int r;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		return 0;
+	}
+	qxl_ttm_placement_from_domain(bo, bo->type, true);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+	if (likely(r == 0)) {
+		bo->pin_count = 1;
+	}
+	if (unlikely(r != 0))
+		dev_err(ddev->dev, "%p pin failed\n", bo);
+	return r;
+}
+
+static int __qxl_bo_unpin(struct qxl_bo *bo)
+{
+	struct ttm_operation_ctx ctx = { false, false };
+	struct drm_device *ddev = bo->tbo.base.dev;
+	int r, i;
+
+	if (!bo->pin_count) {
+		dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+	if (unlikely(r != 0))
+		dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
+	return r;
+}
+
+/*
+ * Reserve the BO before pinning the object.  If the BO was reserved
+ * beforehand, use the internal version directly __qxl_bo_pin.
+ *
+ */
+int qxl_bo_pin(struct qxl_bo *bo)
+{
+	int r;
+
+	r = qxl_bo_reserve(bo, false);
+	if (r)
+		return r;
+
+	r = __qxl_bo_pin(bo);
+	qxl_bo_unreserve(bo);
+	return r;
+}
+
+/*
+ * Reserve the BO before pinning the object.  If the BO was reserved
+ * beforehand, use the internal version directly __qxl_bo_unpin.
+ *
+ */
+int qxl_bo_unpin(struct qxl_bo *bo)
+{
+	int r;
+
+	r = qxl_bo_reserve(bo, false);
+	if (r)
+		return r;
+
+	r = __qxl_bo_unpin(bo);
+	qxl_bo_unreserve(bo);
+	return r;
+}
+
+void qxl_bo_force_delete(struct qxl_device *qdev)
+{
+	struct qxl_bo *bo, *n;
+
+	if (list_empty(&qdev->gem.objects))
+		return;
+	dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
+	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
+		dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
+			&bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
+			*((unsigned long *)&bo->tbo.base.refcount));
+		mutex_lock(&qdev->gem.mutex);
+		list_del_init(&bo->list);
+		mutex_unlock(&qdev->gem.mutex);
+		/* this should unref the ttm bo */
+		drm_gem_object_put_unlocked(&bo->tbo.base);
+	}
+}
+
+int qxl_bo_init(struct qxl_device *qdev)
+{
+	return qxl_ttm_init(qdev);
+}
+
+void qxl_bo_fini(struct qxl_device *qdev)
+{
+	qxl_ttm_fini(qdev);
+}
+
+int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
+{
+	int ret;
+
+	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
+		/* allocate a surface id for this surface now */
+		ret = qxl_surface_id_alloc(qdev, bo);
+		if (ret)
+			return ret;
+
+		ret = qxl_hw_surface_alloc(qdev, bo);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+int qxl_surf_evict(struct qxl_device *qdev)
+{
+	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
+}
+
+int qxl_vram_evict(struct qxl_device *qdev)
+{
+	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
+}
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_object.h b/marvell/linux/drivers/gpu/drm/qxl/qxl_object.h
new file mode 100644
index 0000000..8ae54ba
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_object.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+#ifndef QXL_OBJECT_H
+#define QXL_OBJECT_H
+
+#include "qxl_drv.h"
+
+static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS) {
+			struct drm_device *ddev = bo->tbo.base.dev;
+
+			dev_err(ddev->dev, "%p reserve failed\n", bo);
+		}
+		return r;
+	}
+	return 0;
+}
+
+static inline void qxl_bo_unreserve(struct qxl_bo *bo)
+{
+	ttm_bo_unreserve(&bo->tbo);
+}
+
+static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo)
+{
+	return bo->tbo.offset;
+}
+
+static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
+{
+	return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
+{
+	return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
+}
+
+static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
+			      bool no_wait)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS) {
+			struct drm_device *ddev = bo->tbo.base.dev;
+
+			dev_err(ddev->dev, "%p reserve failed for wait\n",
+				bo);
+		}
+		return r;
+	}
+	if (mem_type)
+		*mem_type = bo->tbo.mem.mem_type;
+
+	r = ttm_bo_wait(&bo->tbo, true, no_wait);
+	ttm_bo_unreserve(&bo->tbo);
+	return r;
+}
+
+extern int qxl_bo_create(struct qxl_device *qdev,
+			 unsigned long size,
+			 bool kernel, bool pinned, u32 domain,
+			 struct qxl_surface *surf,
+			 struct qxl_bo **bo_ptr);
+extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+extern void qxl_bo_kunmap(struct qxl_bo *bo);
+void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
+void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
+extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
+extern void qxl_bo_unref(struct qxl_bo **bo);
+extern int qxl_bo_pin(struct qxl_bo *bo);
+extern int qxl_bo_unpin(struct qxl_bo *bo);
+extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
+extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
+
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_prime.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_prime.c
new file mode 100644
index 0000000..7d3816f
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_prime.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2014 Canonical
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andreas Pokorny
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/* Empty Implementations as there should not be any other driver for a virtual
+ * device that might share buffers with qxl */
+
+int qxl_gem_prime_pin(struct drm_gem_object *obj)
+{
+	struct qxl_bo *bo = gem_to_qxl_bo(obj);
+
+	return qxl_bo_pin(bo);
+}
+
+void qxl_gem_prime_unpin(struct drm_gem_object *obj)
+{
+	struct qxl_bo *bo = gem_to_qxl_bo(obj);
+
+	qxl_bo_unpin(bo);
+}
+
+struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+struct drm_gem_object *qxl_gem_prime_import_sg_table(
+	struct drm_device *dev, struct dma_buf_attachment *attach,
+	struct sg_table *table)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	struct qxl_bo *bo = gem_to_qxl_bo(obj);
+	void *ptr;
+	int ret;
+
+	ret = qxl_bo_kmap(bo, &ptr);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	return ptr;
+}
+
+void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	struct qxl_bo *bo = gem_to_qxl_bo(obj);
+
+	qxl_bo_kunmap(bo);
+}
+
+int qxl_gem_prime_mmap(struct drm_gem_object *obj,
+		       struct vm_area_struct *area)
+{
+	return -ENOSYS;
+}
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_release.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_release.c
new file mode 100644
index 0000000..312216c
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_release.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/delay.h>
+
+#include <trace/events/dma_fence.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/*
+ * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
+ * into 256 byte chunks for now - gives 16 cmds per page.
+ *
+ * use an ida to index into the chunks?
+ */
+/* manage releaseables */
+/* stack them 16 high for now -drawable object is 191 */
+#define RELEASE_SIZE 256
+#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
+/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
+#define SURFACE_RELEASE_SIZE 128
+#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
+
+static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
+static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
+
+static const char *qxl_get_driver_name(struct dma_fence *fence)
+{
+	return "qxl";
+}
+
+static const char *qxl_get_timeline_name(struct dma_fence *fence)
+{
+	return "release";
+}
+
+static long qxl_fence_wait(struct dma_fence *fence, bool intr,
+			   signed long timeout)
+{
+	struct qxl_device *qdev;
+	struct qxl_release *release;
+	int count = 0, sc = 0;
+	bool have_drawable_releases;
+	unsigned long cur, end = jiffies + timeout;
+
+	qdev = container_of(fence->lock, struct qxl_device, release_lock);
+	release = container_of(fence, struct qxl_release, base);
+	have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
+
+retry:
+	sc++;
+
+	if (dma_fence_is_signaled(fence))
+		goto signaled;
+
+	qxl_io_notify_oom(qdev);
+
+	for (count = 0; count < 11; count++) {
+		if (!qxl_queue_garbage_collect(qdev, true))
+			break;
+
+		if (dma_fence_is_signaled(fence))
+			goto signaled;
+	}
+
+	if (dma_fence_is_signaled(fence))
+		goto signaled;
+
+	if (have_drawable_releases || sc < 4) {
+		if (sc > 2)
+			/* back off */
+			usleep_range(500, 1000);
+
+		if (time_after(jiffies, end))
+			return 0;
+
+		if (have_drawable_releases && sc > 300) {
+			DMA_FENCE_WARN(fence, "failed to wait on release %llu "
+				       "after spincount %d\n",
+				       fence->context & ~0xf0000000, sc);
+			goto signaled;
+		}
+		goto retry;
+	}
+	/*
+	 * yeah, original sync_obj_wait gave up after 3 spins when
+	 * have_drawable_releases is not set.
+	 */
+
+signaled:
+	cur = jiffies;
+	if (time_after(cur, end))
+		return 0;
+	return end - cur;
+}
+
+static const struct dma_fence_ops qxl_fence_ops = {
+	.get_driver_name = qxl_get_driver_name,
+	.get_timeline_name = qxl_get_timeline_name,
+	.wait = qxl_fence_wait,
+};
+
+static int
+qxl_release_alloc(struct qxl_device *qdev, int type,
+		  struct qxl_release **ret)
+{
+	struct qxl_release *release;
+	int handle;
+	size_t size = sizeof(*release);
+
+	release = kmalloc(size, GFP_KERNEL);
+	if (!release) {
+		DRM_ERROR("Out of memory\n");
+		return -ENOMEM;
+	}
+	release->base.ops = NULL;
+	release->type = type;
+	release->release_offset = 0;
+	release->surface_release_id = 0;
+	INIT_LIST_HEAD(&release->bos);
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&qdev->release_idr_lock);
+	handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
+	release->base.seqno = ++qdev->release_seqno;
+	spin_unlock(&qdev->release_idr_lock);
+	idr_preload_end();
+	if (handle < 0) {
+		kfree(release);
+		*ret = NULL;
+		return handle;
+	}
+	*ret = release;
+	DRM_DEBUG_DRIVER("allocated release %d\n", handle);
+	release->id = handle;
+	return handle;
+}
+
+static void
+qxl_release_free_list(struct qxl_release *release)
+{
+	while (!list_empty(&release->bos)) {
+		struct qxl_bo_list *entry;
+		struct qxl_bo *bo;
+
+		entry = container_of(release->bos.next,
+				     struct qxl_bo_list, tv.head);
+		bo = to_qxl_bo(entry->tv.bo);
+		qxl_bo_unref(&bo);
+		list_del(&entry->tv.head);
+		kfree(entry);
+	}
+	release->release_bo = NULL;
+}
+
+void
+qxl_release_free(struct qxl_device *qdev,
+		 struct qxl_release *release)
+{
+	DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
+
+	if (release->surface_release_id)
+		qxl_surface_id_dealloc(qdev, release->surface_release_id);
+
+	spin_lock(&qdev->release_idr_lock);
+	idr_remove(&qdev->release_idr, release->id);
+	spin_unlock(&qdev->release_idr_lock);
+
+	if (release->base.ops) {
+		WARN_ON(list_empty(&release->bos));
+		qxl_release_free_list(release);
+
+		dma_fence_signal(&release->base);
+		dma_fence_put(&release->base);
+	} else {
+		qxl_release_free_list(release);
+		kfree(release);
+	}
+}
+
+static int qxl_release_bo_alloc(struct qxl_device *qdev,
+				struct qxl_bo **bo)
+{
+	/* pin releases bo's they are too messy to evict */
+	return qxl_bo_create(qdev, PAGE_SIZE, false, true,
+			     QXL_GEM_DOMAIN_VRAM, NULL, bo);
+}
+
+int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
+{
+	struct qxl_bo_list *entry;
+
+	list_for_each_entry(entry, &release->bos, tv.head) {
+		if (entry->tv.bo == &bo->tbo)
+			return 0;
+	}
+
+	entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	qxl_bo_ref(bo);
+	entry->tv.bo = &bo->tbo;
+	entry->tv.num_shared = 0;
+	list_add_tail(&entry->tv.head, &release->bos);
+	return 0;
+}
+
+static int qxl_release_validate_bo(struct qxl_bo *bo)
+{
+	struct ttm_operation_ctx ctx = { true, false };
+	int ret;
+
+	if (!bo->pin_count) {
+		qxl_ttm_placement_from_domain(bo, bo->type, false);
+		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+		if (ret)
+			return ret;
+	}
+
+	ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
+	if (ret)
+		return ret;
+
+	/* allocate a surface for reserved + validated buffers */
+	ret = qxl_bo_check_id(bo->tbo.base.dev->dev_private, bo);
+	if (ret)
+		return ret;
+	return 0;
+}
+
+int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
+{
+	int ret;
+	struct qxl_bo_list *entry;
+
+	/* if only one object on the release its the release itself
+	   since these objects are pinned no need to reserve */
+	if (list_is_singular(&release->bos))
+		return 0;
+
+	ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
+				     !no_intr, NULL, true);
+	if (ret)
+		return ret;
+
+	list_for_each_entry(entry, &release->bos, tv.head) {
+		struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
+
+		ret = qxl_release_validate_bo(bo);
+		if (ret) {
+			ttm_eu_backoff_reservation(&release->ticket, &release->bos);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+void qxl_release_backoff_reserve_list(struct qxl_release *release)
+{
+	/* if only one object on the release its the release itself
+	   since these objects are pinned no need to reserve */
+	if (list_is_singular(&release->bos))
+		return;
+
+	ttm_eu_backoff_reservation(&release->ticket, &release->bos);
+}
+
+int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
+				       enum qxl_surface_cmd_type surface_cmd_type,
+				       struct qxl_release *create_rel,
+				       struct qxl_release **release)
+{
+	if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
+		int idr_ret;
+		struct qxl_bo *bo;
+		union qxl_release_info *info;
+
+		/* stash the release after the create command */
+		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
+		if (idr_ret < 0)
+			return idr_ret;
+		bo = create_rel->release_bo;
+
+		(*release)->release_bo = bo;
+		(*release)->release_offset = create_rel->release_offset + 64;
+
+		qxl_release_list_add(*release, bo);
+
+		info = qxl_release_map(qdev, *release);
+		info->id = idr_ret;
+		qxl_release_unmap(qdev, *release, info);
+		return 0;
+	}
+
+	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
+					 QXL_RELEASE_SURFACE_CMD, release, NULL);
+}
+
+int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+				       int type, struct qxl_release **release,
+				       struct qxl_bo **rbo)
+{
+	struct qxl_bo *bo;
+	int idr_ret;
+	int ret = 0;
+	union qxl_release_info *info;
+	int cur_idx;
+
+	if (type == QXL_RELEASE_DRAWABLE)
+		cur_idx = 0;
+	else if (type == QXL_RELEASE_SURFACE_CMD)
+		cur_idx = 1;
+	else if (type == QXL_RELEASE_CURSOR_CMD)
+		cur_idx = 2;
+	else {
+		DRM_ERROR("got illegal type: %d\n", type);
+		return -EINVAL;
+	}
+
+	idr_ret = qxl_release_alloc(qdev, type, release);
+	if (idr_ret < 0) {
+		if (rbo)
+			*rbo = NULL;
+		return idr_ret;
+	}
+
+	mutex_lock(&qdev->release_mutex);
+	if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
+		qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
+		qdev->current_release_bo_offset[cur_idx] = 0;
+		qdev->current_release_bo[cur_idx] = NULL;
+	}
+	if (!qdev->current_release_bo[cur_idx]) {
+		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
+		if (ret) {
+			mutex_unlock(&qdev->release_mutex);
+			qxl_release_free(qdev, *release);
+			return ret;
+		}
+	}
+
+	bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
+
+	(*release)->release_bo = bo;
+	(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
+	qdev->current_release_bo_offset[cur_idx]++;
+
+	if (rbo)
+		*rbo = bo;
+
+	mutex_unlock(&qdev->release_mutex);
+
+	ret = qxl_release_list_add(*release, bo);
+	qxl_bo_unref(&bo);
+	if (ret) {
+		qxl_release_free(qdev, *release);
+		return ret;
+	}
+
+	info = qxl_release_map(qdev, *release);
+	info->id = idr_ret;
+	qxl_release_unmap(qdev, *release, info);
+
+	return ret;
+}
+
+struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
+						   uint64_t id)
+{
+	struct qxl_release *release;
+
+	spin_lock(&qdev->release_idr_lock);
+	release = idr_find(&qdev->release_idr, id);
+	spin_unlock(&qdev->release_idr_lock);
+	if (!release) {
+		DRM_ERROR("failed to find id in release_idr\n");
+		return NULL;
+	}
+
+	return release;
+}
+
+union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
+					struct qxl_release *release)
+{
+	void *ptr;
+	union qxl_release_info *info;
+	struct qxl_bo *bo = release->release_bo;
+
+	ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
+	if (!ptr)
+		return NULL;
+	info = ptr + (release->release_offset & ~PAGE_MASK);
+	return info;
+}
+
+void qxl_release_unmap(struct qxl_device *qdev,
+		       struct qxl_release *release,
+		       union qxl_release_info *info)
+{
+	struct qxl_bo *bo = release->release_bo;
+	void *ptr;
+
+	ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
+	qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
+}
+
+void qxl_release_fence_buffer_objects(struct qxl_release *release)
+{
+	struct ttm_buffer_object *bo;
+	struct ttm_bo_global *glob;
+	struct ttm_bo_device *bdev;
+	struct ttm_validate_buffer *entry;
+	struct qxl_device *qdev;
+
+	/* if only one object on the release its the release itself
+	   since these objects are pinned no need to reserve */
+	if (list_is_singular(&release->bos) || list_empty(&release->bos))
+		return;
+
+	bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
+	bdev = bo->bdev;
+	qdev = container_of(bdev, struct qxl_device, mman.bdev);
+
+	/*
+	 * Since we never really allocated a context and we don't want to conflict,
+	 * set the highest bits. This will break if we really allow exporting of dma-bufs.
+	 */
+	dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
+		       release->id | 0xf0000000, release->base.seqno);
+	trace_dma_fence_emit(&release->base);
+
+	glob = bdev->glob;
+
+	spin_lock(&glob->lru_lock);
+
+	list_for_each_entry(entry, &release->bos, head) {
+		bo = entry->bo;
+
+		dma_resv_add_shared_fence(bo->base.resv, &release->base);
+		ttm_bo_add_to_lru(bo);
+		dma_resv_unlock(bo->base.resv);
+	}
+	spin_unlock(&glob->lru_lock);
+	ww_acquire_fini(&release->ticket);
+}
+
diff --git a/marvell/linux/drivers/gpu/drm/qxl/qxl_ttm.c b/marvell/linux/drivers/gpu/drm/qxl/qxl_ttm.c
new file mode 100644
index 0000000..9b24514
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm.h>
+#include <drm/drm_file.h>
+#include <drm/drm_debugfs.h>
+#include <drm/qxl_drm.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
+{
+	struct qxl_mman *mman;
+	struct qxl_device *qdev;
+
+	mman = container_of(bdev, struct qxl_mman, bdev);
+	qdev = container_of(mman, struct qxl_device, mman);
+	return qdev;
+}
+
+static struct vm_operations_struct qxl_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops;
+
+static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
+{
+	struct ttm_buffer_object *bo;
+	vm_fault_t ret;
+
+	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
+	if (bo == NULL)
+		return VM_FAULT_NOPAGE;
+	ret = ttm_vm_ops->fault(vmf);
+	return ret;
+}
+
+int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	int r;
+	struct drm_file *file_priv = filp->private_data;
+	struct qxl_device *qdev = file_priv->minor->dev->dev_private;
+
+	if (qdev == NULL) {
+		DRM_ERROR(
+		 "filp->private_data->minor->dev->dev_private == NULL\n");
+		return -EINVAL;
+	}
+	DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
+		  filp->private_data, vma->vm_pgoff);
+
+	r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
+	if (unlikely(r != 0))
+		return r;
+	if (unlikely(ttm_vm_ops == NULL)) {
+		ttm_vm_ops = vma->vm_ops;
+		qxl_ttm_vm_ops = *ttm_vm_ops;
+		qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
+	}
+	vma->vm_ops = &qxl_ttm_vm_ops;
+	return 0;
+}
+
+static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+	return 0;
+}
+
+static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+			     struct ttm_mem_type_manager *man)
+{
+	struct qxl_device *qdev = qxl_get_qdev(bdev);
+	unsigned int gpu_offset_shift =
+		64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits + 8);
+	struct qxl_memslot *slot;
+
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+	case TTM_PL_PRIV:
+		/* "On-card" video ram */
+		slot = (type == TTM_PL_VRAM) ?
+			&qdev->main_slot : &qdev->surfaces_slot;
+		slot->gpu_offset = (uint64_t)type << gpu_offset_shift;
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = slot->gpu_offset;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			     TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void qxl_evict_flags(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement)
+{
+	struct qxl_bo *qbo;
+	static const struct ttm_place placements = {
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
+	};
+
+	if (!qxl_ttm_bo_is_qxl_bo(bo)) {
+		placement->placement = &placements;
+		placement->busy_placement = &placements;
+		placement->num_placement = 1;
+		placement->num_busy_placement = 1;
+		return;
+	}
+	qbo = to_qxl_bo(bo);
+	qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
+	*placement = qbo->placement;
+}
+
+static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	struct qxl_bo *qbo = to_qxl_bo(bo);
+
+	return drm_vma_node_verify_access(&qbo->tbo.base.vma_node,
+					  filp->private_data);
+}
+
+static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+				  struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct qxl_device *qdev = qxl_get_qdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.is_iomem = true;
+		mem->bus.base = qdev->vram_base;
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		break;
+	case TTM_PL_PRIV:
+		mem->bus.is_iomem = true;
+		mem->bus.base = qdev->surfaceram_base;
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev,
+				struct ttm_mem_reg *mem)
+{
+}
+
+/*
+ * TTM backend functions.
+ */
+struct qxl_ttm_tt {
+	struct ttm_tt		        ttm;
+	struct qxl_device		*qdev;
+	u64				offset;
+};
+
+static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
+				struct ttm_mem_reg *bo_mem)
+{
+	struct qxl_ttm_tt *gtt = (void *)ttm;
+
+	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+	if (!ttm->num_pages) {
+		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+		     ttm->num_pages, bo_mem, ttm);
+	}
+	/* Not implemented */
+	return -1;
+}
+
+static int qxl_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+	/* Not implemented */
+	return -1;
+}
+
+static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+	struct qxl_ttm_tt *gtt = (void *)ttm;
+
+	ttm_tt_fini(&gtt->ttm);
+	kfree(gtt);
+}
+
+static struct ttm_backend_func qxl_backend_func = {
+	.bind = &qxl_ttm_backend_bind,
+	.unbind = &qxl_ttm_backend_unbind,
+	.destroy = &qxl_ttm_backend_destroy,
+};
+
+static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
+					uint32_t page_flags)
+{
+	struct qxl_device *qdev;
+	struct qxl_ttm_tt *gtt;
+
+	qdev = qxl_get_qdev(bo->bdev);
+	gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
+	if (gtt == NULL)
+		return NULL;
+	gtt->ttm.func = &qxl_backend_func;
+	gtt->qdev = qdev;
+	if (ttm_tt_init(&gtt->ttm, bo, page_flags)) {
+		kfree(gtt);
+		return NULL;
+	}
+	return &gtt->ttm;
+}
+
+static void qxl_move_null(struct ttm_buffer_object *bo,
+			     struct ttm_mem_reg *new_mem)
+{
+	struct ttm_mem_reg *old_mem = &bo->mem;
+
+	BUG_ON(old_mem->mm_node != NULL);
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+}
+
+static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
+		       struct ttm_operation_ctx *ctx,
+		       struct ttm_mem_reg *new_mem)
+{
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	int ret;
+
+	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+	if (ret)
+		return ret;
+
+	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+		qxl_move_null(bo, new_mem);
+		return 0;
+	}
+	return ttm_bo_move_memcpy(bo, ctx, new_mem);
+}
+
+static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
+			       bool evict,
+			       struct ttm_mem_reg *new_mem)
+{
+	struct qxl_bo *qbo;
+	struct qxl_device *qdev;
+
+	if (!qxl_ttm_bo_is_qxl_bo(bo))
+		return;
+	qbo = to_qxl_bo(bo);
+	qdev = qbo->tbo.base.dev->dev_private;
+
+	if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
+		qxl_surface_evict(qdev, qbo, new_mem ? true : false);
+}
+
+static struct ttm_bo_driver qxl_bo_driver = {
+	.ttm_tt_create = &qxl_ttm_tt_create,
+	.invalidate_caches = &qxl_invalidate_caches,
+	.init_mem_type = &qxl_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
+	.evict_flags = &qxl_evict_flags,
+	.move = &qxl_bo_move,
+	.verify_access = &qxl_verify_access,
+	.io_mem_reserve = &qxl_ttm_io_mem_reserve,
+	.io_mem_free = &qxl_ttm_io_mem_free,
+	.move_notify = &qxl_bo_move_notify,
+};
+
+int qxl_ttm_init(struct qxl_device *qdev)
+{
+	int r;
+	int num_io_pages; /* != rom->num_io_pages, we include surface0 */
+
+	/* No others user of address space so set it to 0 */
+	r = ttm_bo_device_init(&qdev->mman.bdev,
+			       &qxl_bo_driver,
+			       qdev->ddev.anon_inode->i_mapping,
+			       false);
+	if (r) {
+		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+		return r;
+	}
+	/* NOTE: this includes the framebuffer (aka surface 0) */
+	num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
+	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
+			   num_io_pages);
+	if (r) {
+		DRM_ERROR("Failed initializing VRAM heap.\n");
+		return r;
+	}
+	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV,
+			   qdev->surfaceram_size / PAGE_SIZE);
+	if (r) {
+		DRM_ERROR("Failed initializing Surfaces heap.\n");
+		return r;
+	}
+	DRM_INFO("qxl: %uM of VRAM memory size\n",
+		 (unsigned int)qdev->vram_size / (1024 * 1024));
+	DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
+		 ((unsigned int)num_io_pages * PAGE_SIZE) / (1024 * 1024));
+	DRM_INFO("qxl: %uM of Surface memory size\n",
+		 (unsigned int)qdev->surfaceram_size / (1024 * 1024));
+	return 0;
+}
+
+void qxl_ttm_fini(struct qxl_device *qdev)
+{
+	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
+	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
+	ttm_bo_device_release(&qdev->mman.bdev);
+	DRM_INFO("qxl: ttm finalized\n");
+}
+
+#define QXL_DEBUGFS_MEM_TYPES 2
+
+#if defined(CONFIG_DEBUG_FS)
+static int qxl_mm_dump_table(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	struct qxl_device *rdev = dev->dev_private;
+	struct ttm_bo_global *glob = rdev->mman.bdev.glob;
+	struct drm_printer p = drm_seq_file_printer(m);
+
+	spin_lock(&glob->lru_lock);
+	drm_mm_print(mm, &p);
+	spin_unlock(&glob->lru_lock);
+	return 0;
+}
+#endif
+
+int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
+	static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
+	unsigned int i;
+
+	for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
+		if (i == 0)
+			sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
+		else
+			sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
+		qxl_mem_types_list[i].name = qxl_mem_types_names[i];
+		qxl_mem_types_list[i].show = &qxl_mm_dump_table;
+		qxl_mem_types_list[i].driver_features = 0;
+		if (i == 0)
+			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
+		else
+			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
+
+	}
+	return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
+#else
+	return 0;
+#endif
+}