ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/Kconfig b/marvell/linux/drivers/gpu/drm/vmwgfx/Kconfig
new file mode 100644
index 0000000..6b28a32
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0
+config DRM_VMWGFX
+	tristate "DRM driver for VMware Virtual GPU"
+	depends on DRM && PCI && X86 && MMU
+	select FB_DEFERRED_IO
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	select DRM_TTM
+	select FB
+	# Only needed for the transitional use of drm_crtc_init - can be removed
+	# again once vmwgfx sets up the primary plane itself.
+	select DRM_KMS_HELPER
+	help
+	  Choose this option if you would like to run 3D acceleration
+	  in a VMware virtual machine.
+	  This is a KMS enabled DRM driver for the VMware SVGA2
+	  virtual hardware.
+	  The compiled module will be called "vmwgfx.ko".
+
+config DRM_VMWGFX_FBCON
+	depends on DRM_VMWGFX && FB
+	bool "Enable framebuffer console under vmwgfx by default"
+	help
+	   Choose this option if you are shipping a new vmwgfx
+	   userspace driver that supports using the kernel driver.
+
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/Makefile b/marvell/linux/drivers/gpu/drm/vmwgfx/Makefile
new file mode 100644
index 0000000..8841bd3
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
+	    vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
+	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
+	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
+	    vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
+	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
+	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
+	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
+	    vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
+	    vmwgfx_validation.o \
+	    ttm_object.o ttm_lock.o
+
+obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
new file mode 100644
index 0000000..8cce7f1
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
@@ -0,0 +1,3 @@
+/*
+ * Intentionally empty file.
+ */
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
new file mode 100644
index 0000000..69c4253
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_caps.h --
+ *
+ *       Definitions for SVGA3D hardware capabilities.  Capabilities
+ *       are used to query for optional rendering features during
+ *       driver initialization. The capability data is stored as very
+ *       basic key/value dictionary within the "FIFO register" memory
+ *       area at the beginning of BAR2.
+ *
+ *       Note that these definitions are only for 3D capabilities.
+ *       The SVGA device also has "device capabilities" and "FIFO
+ *       capabilities", which are non-3D-specific and are stored as
+ *       bitfields rather than key/value pairs.
+ */
+
+#ifndef _SVGA3D_CAPS_H_
+#define _SVGA3D_CAPS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+
+#include "includeCheck.h"
+
+#include "svga_reg.h"
+
+#define SVGA_FIFO_3D_CAPS_SIZE   (SVGA_FIFO_3D_CAPS_LAST - \
+                                  SVGA_FIFO_3D_CAPS + 1)
+
+
+/*
+ * SVGA3dCapsRecordType
+ *
+ *    Record types that can be found in the caps block.
+ *    Related record types are grouped together numerically so that
+ *    SVGA3dCaps_FindRecord() can be applied on a range of record
+ *    types.
+ */
+
+typedef enum {
+   SVGA3DCAPS_RECORD_UNKNOWN        = 0,
+   SVGA3DCAPS_RECORD_DEVCAPS_MIN    = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS        = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS_MAX    = 0x1ff,
+} SVGA3dCapsRecordType;
+
+
+/*
+ * SVGA3dCapsRecordHeader
+ *
+ *    Header field leading each caps block record. Contains the offset (in
+ *    register words, NOT bytes) to the next caps block record (or the end
+ *    of caps block records which will be a zero word) and the record type
+ *    as defined above.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCapsRecordHeader {
+   uint32 length;
+   SVGA3dCapsRecordType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCapsRecordHeader;
+
+
+/*
+ * SVGA3dCapsRecord
+ *
+ *    Caps block record; "data" is a placeholder for the actual data structure
+ *    contained within the record;
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCapsRecord {
+   SVGA3dCapsRecordHeader header;
+   uint32 data[1];
+}
+#include "vmware_pack_end.h"
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
new file mode 100644
index 0000000..9cbba0e
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -0,0 +1,2184 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_cmd.h --
+ *
+ *       SVGA 3d hardware cmd definitions
+ */
+
+#ifndef _SVGA3D_CMD_H_
+#define _SVGA3D_CMD_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+#include "svga3d_types.h"
+
+/*
+ * Identifiers for commands in the command FIFO.
+ *
+ * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
+ * the SVGA3D protocol and remain reserved; they should not be used in the
+ * future.
+ *
+ * IDs between 1040 and 2999 (inclusive) are available for use by the
+ * current SVGA3D protocol.
+ *
+ * FIFO clients other than SVGA3D should stay below 1000, or at 3000
+ * and up.
+ */
+
+typedef enum {
+   SVGA_3D_CMD_LEGACY_BASE                                = 1000,
+   SVGA_3D_CMD_BASE                                       = 1040,
+
+   SVGA_3D_CMD_SURFACE_DEFINE                             = 1040,
+   SVGA_3D_CMD_SURFACE_DESTROY                            = 1041,
+   SVGA_3D_CMD_SURFACE_COPY                               = 1042,
+   SVGA_3D_CMD_SURFACE_STRETCHBLT                         = 1043,
+   SVGA_3D_CMD_SURFACE_DMA                                = 1044,
+   SVGA_3D_CMD_CONTEXT_DEFINE                             = 1045,
+   SVGA_3D_CMD_CONTEXT_DESTROY                            = 1046,
+   SVGA_3D_CMD_SETTRANSFORM                               = 1047,
+   SVGA_3D_CMD_SETZRANGE                                  = 1048,
+   SVGA_3D_CMD_SETRENDERSTATE                             = 1049,
+   SVGA_3D_CMD_SETRENDERTARGET                            = 1050,
+   SVGA_3D_CMD_SETTEXTURESTATE                            = 1051,
+   SVGA_3D_CMD_SETMATERIAL                                = 1052,
+   SVGA_3D_CMD_SETLIGHTDATA                               = 1053,
+   SVGA_3D_CMD_SETLIGHTENABLED                            = 1054,
+   SVGA_3D_CMD_SETVIEWPORT                                = 1055,
+   SVGA_3D_CMD_SETCLIPPLANE                               = 1056,
+   SVGA_3D_CMD_CLEAR                                      = 1057,
+   SVGA_3D_CMD_PRESENT                                    = 1058,
+   SVGA_3D_CMD_SHADER_DEFINE                              = 1059,
+   SVGA_3D_CMD_SHADER_DESTROY                             = 1060,
+   SVGA_3D_CMD_SET_SHADER                                 = 1061,
+   SVGA_3D_CMD_SET_SHADER_CONST                           = 1062,
+   SVGA_3D_CMD_DRAW_PRIMITIVES                            = 1063,
+   SVGA_3D_CMD_SETSCISSORRECT                             = 1064,
+   SVGA_3D_CMD_BEGIN_QUERY                                = 1065,
+   SVGA_3D_CMD_END_QUERY                                  = 1066,
+   SVGA_3D_CMD_WAIT_FOR_QUERY                             = 1067,
+   SVGA_3D_CMD_PRESENT_READBACK                           = 1068,
+   SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN                     = 1069,
+   SVGA_3D_CMD_SURFACE_DEFINE_V2                          = 1070,
+   SVGA_3D_CMD_GENERATE_MIPMAPS                           = 1071,
+   SVGA_3D_CMD_DEAD4                                      = 1072,
+   SVGA_3D_CMD_DEAD5                                      = 1073,
+   SVGA_3D_CMD_DEAD6                                      = 1074,
+   SVGA_3D_CMD_DEAD7                                      = 1075,
+   SVGA_3D_CMD_DEAD8                                      = 1076,
+   SVGA_3D_CMD_DEAD9                                      = 1077,
+   SVGA_3D_CMD_DEAD10                                     = 1078,
+   SVGA_3D_CMD_DEAD11                                     = 1079,
+   SVGA_3D_CMD_ACTIVATE_SURFACE                           = 1080,
+   SVGA_3D_CMD_DEACTIVATE_SURFACE                         = 1081,
+   SVGA_3D_CMD_SCREEN_DMA                                 = 1082,
+   SVGA_3D_CMD_DEAD1                                      = 1083,
+   SVGA_3D_CMD_DEAD2                                      = 1084,
+
+   SVGA_3D_CMD_LOGICOPS_BITBLT                            = 1085,
+   SVGA_3D_CMD_LOGICOPS_TRANSBLT                          = 1086,
+   SVGA_3D_CMD_LOGICOPS_STRETCHBLT                        = 1087,
+   SVGA_3D_CMD_LOGICOPS_COLORFILL                         = 1088,
+   SVGA_3D_CMD_LOGICOPS_ALPHABLEND                        = 1089,
+   SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND                    = 1090,
+
+   SVGA_3D_CMD_SET_OTABLE_BASE                            = 1091,
+   SVGA_3D_CMD_READBACK_OTABLE                            = 1092,
+
+   SVGA_3D_CMD_DEFINE_GB_MOB                              = 1093,
+   SVGA_3D_CMD_DESTROY_GB_MOB                             = 1094,
+   SVGA_3D_CMD_DEAD3                                      = 1095,
+   SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING                      = 1096,
+
+   SVGA_3D_CMD_DEFINE_GB_SURFACE                          = 1097,
+   SVGA_3D_CMD_DESTROY_GB_SURFACE                         = 1098,
+   SVGA_3D_CMD_BIND_GB_SURFACE                            = 1099,
+   SVGA_3D_CMD_COND_BIND_GB_SURFACE                       = 1100,
+   SVGA_3D_CMD_UPDATE_GB_IMAGE                            = 1101,
+   SVGA_3D_CMD_UPDATE_GB_SURFACE                          = 1102,
+   SVGA_3D_CMD_READBACK_GB_IMAGE                          = 1103,
+   SVGA_3D_CMD_READBACK_GB_SURFACE                        = 1104,
+   SVGA_3D_CMD_INVALIDATE_GB_IMAGE                        = 1105,
+   SVGA_3D_CMD_INVALIDATE_GB_SURFACE                      = 1106,
+
+   SVGA_3D_CMD_DEFINE_GB_CONTEXT                          = 1107,
+   SVGA_3D_CMD_DESTROY_GB_CONTEXT                         = 1108,
+   SVGA_3D_CMD_BIND_GB_CONTEXT                            = 1109,
+   SVGA_3D_CMD_READBACK_GB_CONTEXT                        = 1110,
+   SVGA_3D_CMD_INVALIDATE_GB_CONTEXT                      = 1111,
+
+   SVGA_3D_CMD_DEFINE_GB_SHADER                           = 1112,
+   SVGA_3D_CMD_DESTROY_GB_SHADER                          = 1113,
+   SVGA_3D_CMD_BIND_GB_SHADER                             = 1114,
+
+   SVGA_3D_CMD_SET_OTABLE_BASE64                          = 1115,
+
+   SVGA_3D_CMD_BEGIN_GB_QUERY                             = 1116,
+   SVGA_3D_CMD_END_GB_QUERY                               = 1117,
+   SVGA_3D_CMD_WAIT_FOR_GB_QUERY                          = 1118,
+
+   SVGA_3D_CMD_NOP                                        = 1119,
+
+   SVGA_3D_CMD_ENABLE_GART                                = 1120,
+   SVGA_3D_CMD_DISABLE_GART                               = 1121,
+   SVGA_3D_CMD_MAP_MOB_INTO_GART                          = 1122,
+   SVGA_3D_CMD_UNMAP_GART_RANGE                           = 1123,
+
+   SVGA_3D_CMD_DEFINE_GB_SCREENTARGET                     = 1124,
+   SVGA_3D_CMD_DESTROY_GB_SCREENTARGET                    = 1125,
+   SVGA_3D_CMD_BIND_GB_SCREENTARGET                       = 1126,
+   SVGA_3D_CMD_UPDATE_GB_SCREENTARGET                     = 1127,
+
+   SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL                  = 1128,
+   SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL                = 1129,
+
+   SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE                 = 1130,
+
+   SVGA_3D_CMD_GB_SCREEN_DMA                              = 1131,
+   SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH                 = 1132,
+   SVGA_3D_CMD_GB_MOB_FENCE                               = 1133,
+   SVGA_3D_CMD_DEFINE_GB_SURFACE_V2                       = 1134,
+   SVGA_3D_CMD_DEFINE_GB_MOB64                            = 1135,
+   SVGA_3D_CMD_REDEFINE_GB_MOB64                          = 1136,
+   SVGA_3D_CMD_NOP_ERROR                                  = 1137,
+
+   SVGA_3D_CMD_SET_VERTEX_STREAMS                         = 1138,
+   SVGA_3D_CMD_SET_VERTEX_DECLS                           = 1139,
+   SVGA_3D_CMD_SET_VERTEX_DIVISORS                        = 1140,
+   SVGA_3D_CMD_DRAW                                       = 1141,
+   SVGA_3D_CMD_DRAW_INDEXED                               = 1142,
+
+   /*
+    * DX10 Commands
+    */
+   SVGA_3D_CMD_DX_MIN                                     = 1143,
+   SVGA_3D_CMD_DX_DEFINE_CONTEXT                          = 1143,
+   SVGA_3D_CMD_DX_DESTROY_CONTEXT                         = 1144,
+   SVGA_3D_CMD_DX_BIND_CONTEXT                            = 1145,
+   SVGA_3D_CMD_DX_READBACK_CONTEXT                        = 1146,
+   SVGA_3D_CMD_DX_INVALIDATE_CONTEXT                      = 1147,
+   SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER              = 1148,
+   SVGA_3D_CMD_DX_SET_SHADER_RESOURCES                    = 1149,
+   SVGA_3D_CMD_DX_SET_SHADER                              = 1150,
+   SVGA_3D_CMD_DX_SET_SAMPLERS                            = 1151,
+   SVGA_3D_CMD_DX_DRAW                                    = 1152,
+   SVGA_3D_CMD_DX_DRAW_INDEXED                            = 1153,
+   SVGA_3D_CMD_DX_DRAW_INSTANCED                          = 1154,
+   SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED                  = 1155,
+   SVGA_3D_CMD_DX_DRAW_AUTO                               = 1156,
+   SVGA_3D_CMD_DX_SET_INPUT_LAYOUT                        = 1157,
+   SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS                      = 1158,
+   SVGA_3D_CMD_DX_SET_INDEX_BUFFER                        = 1159,
+   SVGA_3D_CMD_DX_SET_TOPOLOGY                            = 1160,
+   SVGA_3D_CMD_DX_SET_RENDERTARGETS                       = 1161,
+   SVGA_3D_CMD_DX_SET_BLEND_STATE                         = 1162,
+   SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE                  = 1163,
+   SVGA_3D_CMD_DX_SET_RASTERIZER_STATE                    = 1164,
+   SVGA_3D_CMD_DX_DEFINE_QUERY                            = 1165,
+   SVGA_3D_CMD_DX_DESTROY_QUERY                           = 1166,
+   SVGA_3D_CMD_DX_BIND_QUERY                              = 1167,
+   SVGA_3D_CMD_DX_SET_QUERY_OFFSET                        = 1168,
+   SVGA_3D_CMD_DX_BEGIN_QUERY                             = 1169,
+   SVGA_3D_CMD_DX_END_QUERY                               = 1170,
+   SVGA_3D_CMD_DX_READBACK_QUERY                          = 1171,
+   SVGA_3D_CMD_DX_SET_PREDICATION                         = 1172,
+   SVGA_3D_CMD_DX_SET_SOTARGETS                           = 1173,
+   SVGA_3D_CMD_DX_SET_VIEWPORTS                           = 1174,
+   SVGA_3D_CMD_DX_SET_SCISSORRECTS                        = 1175,
+   SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW                 = 1176,
+   SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW                 = 1177,
+   SVGA_3D_CMD_DX_PRED_COPY_REGION                        = 1178,
+   SVGA_3D_CMD_DX_PRED_COPY                               = 1179,
+   SVGA_3D_CMD_DX_PRESENTBLT                              = 1180,
+   SVGA_3D_CMD_DX_GENMIPS                                 = 1181,
+   SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE                      = 1182,
+   SVGA_3D_CMD_DX_READBACK_SUBRESOURCE                    = 1183,
+   SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE                  = 1184,
+   SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW              = 1185,
+   SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW             = 1186,
+   SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW                = 1187,
+   SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW               = 1188,
+   SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW                = 1189,
+   SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW               = 1190,
+   SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT                    = 1191,
+   SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT                   = 1192,
+   SVGA_3D_CMD_DX_DEFINE_BLEND_STATE                      = 1193,
+   SVGA_3D_CMD_DX_DESTROY_BLEND_STATE                     = 1194,
+   SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE               = 1195,
+   SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE              = 1196,
+   SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE                 = 1197,
+   SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE                = 1198,
+   SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE                    = 1199,
+   SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE                   = 1200,
+   SVGA_3D_CMD_DX_DEFINE_SHADER                           = 1201,
+   SVGA_3D_CMD_DX_DESTROY_SHADER                          = 1202,
+   SVGA_3D_CMD_DX_BIND_SHADER                             = 1203,
+   SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT                     = 1204,
+   SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT                    = 1205,
+   SVGA_3D_CMD_DX_SET_STREAMOUTPUT                        = 1206,
+   SVGA_3D_CMD_DX_SET_COTABLE                             = 1207,
+   SVGA_3D_CMD_DX_READBACK_COTABLE                        = 1208,
+   SVGA_3D_CMD_DX_BUFFER_COPY                             = 1209,
+   SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER                    = 1210,
+   SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK               = 1211,
+   SVGA_3D_CMD_DX_MOVE_QUERY                              = 1212,
+   SVGA_3D_CMD_DX_BIND_ALL_QUERY                          = 1213,
+   SVGA_3D_CMD_DX_READBACK_ALL_QUERY                      = 1214,
+   SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER               = 1215,
+   SVGA_3D_CMD_DX_MOB_FENCE_64                            = 1216,
+   SVGA_3D_CMD_DX_BIND_ALL_SHADER                         = 1217,
+   SVGA_3D_CMD_DX_HINT                                    = 1218,
+   SVGA_3D_CMD_DX_BUFFER_UPDATE                           = 1219,
+   SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET           = 1220,
+   SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET           = 1221,
+   SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET           = 1222,
+
+   /*
+    * Reserve some IDs to be used for the SM5 shader types.
+    */
+   SVGA_3D_CMD_DX_RESERVED1                               = 1223,
+   SVGA_3D_CMD_DX_RESERVED2                               = 1224,
+   SVGA_3D_CMD_DX_RESERVED3                               = 1225,
+
+   SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER                    = 1226,
+   SVGA_3D_CMD_DX_MAX                                     = 1227,
+
+   SVGA_3D_CMD_SCREEN_COPY                                = 1227,
+
+   /*
+    * Reserve some IDs to be used for video.
+    */
+   SVGA_3D_CMD_VIDEO_RESERVED1                            = 1228,
+   SVGA_3D_CMD_VIDEO_RESERVED2                            = 1229,
+   SVGA_3D_CMD_VIDEO_RESERVED3                            = 1230,
+   SVGA_3D_CMD_VIDEO_RESERVED4                            = 1231,
+   SVGA_3D_CMD_VIDEO_RESERVED5                            = 1232,
+   SVGA_3D_CMD_VIDEO_RESERVED6                            = 1233,
+   SVGA_3D_CMD_VIDEO_RESERVED7                            = 1234,
+   SVGA_3D_CMD_VIDEO_RESERVED8                            = 1235,
+
+   SVGA_3D_CMD_GROW_OTABLE                                = 1236,
+   SVGA_3D_CMD_DX_GROW_COTABLE                            = 1237,
+   SVGA_3D_CMD_INTRA_SURFACE_COPY                         = 1238,
+
+   SVGA_3D_CMD_DEFINE_GB_SURFACE_V3                       = 1239,
+
+   SVGA_3D_CMD_DX_RESOLVE_COPY                            = 1240,
+   SVGA_3D_CMD_DX_PRED_RESOLVE_COPY                       = 1241,
+   SVGA_3D_CMD_DX_PRED_CONVERT_REGION                     = 1242,
+   SVGA_3D_CMD_DX_PRED_CONVERT                            = 1243,
+   SVGA_3D_CMD_WHOLE_SURFACE_COPY                         = 1244,
+
+   SVGA_3D_CMD_MAX                                        = 1245,
+   SVGA_3D_CMD_FUTURE_MAX                                 = 3000
+} SVGAFifo3dCmdId;
+
+#define SVGA_NUM_3D_CMD (SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)
+
+/*
+ * FIFO command format definitions:
+ */
+
+/*
+ * The data size header following cmdNum for every 3d command
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               id;
+   uint32               size;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdHeader;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               numMipLevels;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceFace;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                      sid;
+   SVGA3dSurface1Flags         surfaceFlags;
+   SVGA3dSurfaceFormat         format;
+   /*
+    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+    * structures must have the same value of numMipLevels field.
+    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+    * numMipLevels set to 0.
+    */
+   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
+   /*
+    * Followed by an SVGA3dSize structure for each mip level in each face.
+    *
+    * A note on surface sizes: Sizes are always specified in pixels,
+    * even if the true surface size is not a multiple of the minimum
+    * block size of the surface's format. For example, a 3x3x1 DXT1
+    * compressed texture would actually be stored as a 4x4x1 image in
+    * memory.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineSurface;       /* SVGA_3D_CMD_SURFACE_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                      sid;
+   SVGA3dSurface1Flags         surfaceFlags;
+   SVGA3dSurfaceFormat         format;
+   /*
+    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+    * structures must have the same value of numMipLevels field.
+    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+    * numMipLevels set to 0.
+    */
+   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
+   uint32                      multisampleCount;
+   SVGA3dTextureFilter         autogenFilter;
+   /*
+    * Followed by an SVGA3dSize structure for each mip level in each face.
+    *
+    * A note on surface sizes: Sizes are always specified in pixels,
+    * even if the true surface size is not a multiple of the minimum
+    * block size of the surface's format. For example, a 3x3x1 DXT1
+    * compressed texture would actually be stored as a 4x4x1 image in
+    * memory.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineSurface_v2;     /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroySurface;      /* SVGA_3D_CMD_SURFACE_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineContext;       /* SVGA_3D_CMD_CONTEXT_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyContext;      /* SVGA_3D_CMD_CONTEXT_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dClearFlag      clearFlag;
+   uint32               color;
+   float                depth;
+   uint32               stencil;
+   /* Followed by variable number of SVGA3dRect structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdClear;               /* SVGA_3D_CMD_CLEAR */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dLightType      type;
+   SVGA3dBool           inWorldSpace;
+   float                diffuse[4];
+   float                specular[4];
+   float                ambient[4];
+   float                position[4];
+   float                direction[4];
+   float                range;
+   float                falloff;
+   float                attenuation0;
+   float                attenuation1;
+   float                attenuation2;
+   float                theta;
+   float                phi;
+}
+#include "vmware_pack_end.h"
+SVGA3dLightData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               sid;
+   /* Followed by variable number of SVGA3dCopyRect structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdPresent;             /* SVGA_3D_CMD_PRESENT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dRenderStateName   state;
+   union {
+      uint32               uintValue;
+      float                floatValue;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dRenderState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   /* Followed by variable number of SVGA3dRenderState structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetRenderState;      /* SVGA_3D_CMD_SETRENDERSTATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                 cid;
+   SVGA3dRenderTargetType type;
+   SVGA3dSurfaceImageId   target;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetRenderTarget;     /* SVGA_3D_CMD_SETRENDERTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId  src;
+   SVGA3dSurfaceImageId  dest;
+   /* Followed by variable number of SVGA3dCopyBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceCopy;               /* SVGA_3D_CMD_SURFACE_COPY */
+
+/*
+ * Perform a surface copy within the same image.
+ * The src/dest boxes are allowed to overlap.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId  surface;
+   SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdIntraSurfaceCopy;               /* SVGA_3D_CMD_INTRA_SURFACE_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 srcSid;
+   uint32 destSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWholeSurfaceCopy;               /* SVGA_3D_CMD_WHOLE_SURFACE_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId  src;
+   SVGA3dSurfaceImageId  dest;
+   SVGA3dBox             boxSrc;
+   SVGA3dBox             boxDest;
+   SVGA3dStretchBltMode  mode;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceStretchBlt;         /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * If the discard flag is present in a surface DMA operation, the host may
+    * discard the contents of the current mipmap level and face of the target
+    * surface before applying the surface DMA contents.
+    */
+   uint32 discard : 1;
+
+   /*
+    * If the unsynchronized flag is present, the host may perform this upload
+    * without syncing to pending reads on this surface.
+    */
+   uint32 unsynchronized : 1;
+
+   /*
+    * Guests *MUST* set the reserved bits to 0 before submitting the command
+    * suffix as future flags may occupy these bits.
+    */
+   uint32 reserved : 30;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceDMAFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAGuestImage guest;
+   SVGA3dSurfaceImageId host;
+   SVGA3dTransferType transfer;
+   /*
+    * Followed by variable number of SVGA3dCopyBox structures. For consistency
+    * in all clipping logic and coordinate translation, we define the
+    * "source" in each copyBox as the guest image and the
+    * "destination" as the host image, regardless of transfer
+    * direction.
+    *
+    * For efficiency, the SVGA3D device is free to copy more data than
+    * specified. For example, it may round copy boxes outwards such
+    * that they lie on particular alignment boundaries.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceDMA;                /* SVGA_3D_CMD_SURFACE_DMA */
+
+/*
+ * SVGA3dCmdSurfaceDMASuffix --
+ *
+ *    This is a command suffix that will appear after a SurfaceDMA command in
+ *    the FIFO.  It contains some extra information that hosts may use to
+ *    optimize performance or protect the guest.  This suffix exists to preserve
+ *    backwards compatibility while also allowing for new functionality to be
+ *    implemented.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 suffixSize;
+
+   /*
+    * The maximum offset is used to determine the maximum offset from the
+    * guestPtr base address that will be accessed or written to during this
+    * surfaceDMA.  If the suffix is supported, the host will respect this
+    * boundary while performing surface DMAs.
+    *
+    * Defaults to MAX_UINT32
+    */
+   uint32 maximumOffset;
+
+   /*
+    * A set of flags that describes optimizations that the host may perform
+    * while performing this surface DMA operation.  The guest should never rely
+    * on behaviour that is different when these flags are set for correctness.
+    *
+    * Defaults to 0
+    */
+   SVGA3dSurfaceDMAFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceDMASuffix;
+
+/*
+ * SVGA_3D_CMD_DRAW_PRIMITIVES --
+ *
+ *   This command is the SVGA3D device's generic drawing entry point.
+ *   It can draw multiple ranges of primitives, optionally using an
+ *   index buffer, using an arbitrary collection of vertex buffers.
+ *
+ *   Each SVGA3dVertexDecl defines a distinct vertex array to bind
+ *   during this draw call. The declarations specify which surface
+ *   the vertex data lives in, what that vertex data is used for,
+ *   and how to interpret it.
+ *
+ *   Each SVGA3dPrimitiveRange defines a collection of primitives
+ *   to render using the same vertex arrays. An index buffer is
+ *   optional.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * A range hint is an optional specification for the range of indices
+    * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
+    * that the entire array will be used.
+    *
+    * These are only hints. The SVGA3D device may use them for
+    * performance optimization if possible, but it's also allowed to
+    * ignore these values.
+    */
+   uint32               first;
+   uint32               last;
+}
+#include "vmware_pack_end.h"
+SVGA3dArrayRangeHint;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * Define the origin and shape of a vertex or index array. Both
+    * 'offset' and 'stride' are in bytes. The provided surface will be
+    * reinterpreted as a flat array of bytes in the same format used
+    * by surface DMA operations. To avoid unnecessary conversions, the
+    * surface should be created with the SVGA3D_BUFFER format.
+    *
+    * Index 0 in the array starts 'offset' bytes into the surface.
+    * Index 1 begins at byte 'offset + stride', etc. Array indices may
+    * not be negative.
+    */
+   uint32               surfaceId;
+   uint32               offset;
+   uint32               stride;
+}
+#include "vmware_pack_end.h"
+SVGA3dArray;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * Describe a vertex array's data type, and define how it is to be
+    * used by the fixed function pipeline or the vertex shader. It
+    * isn't useful to have two VertexDecls with the same
+    * VertexArrayIdentity in one draw call.
+    */
+   SVGA3dDeclType       type;
+   SVGA3dDeclMethod     method;
+   SVGA3dDeclUsage      usage;
+   uint32               usageIndex;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexArrayIdentity;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dVertexDecl {
+   SVGA3dVertexArrayIdentity  identity;
+   SVGA3dArray                array;
+   SVGA3dArrayRangeHint       rangeHint;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexDecl;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dPrimitiveRange {
+   /*
+    * Define a group of primitives to render, from sequential indices.
+    *
+    * The value of 'primitiveType' and 'primitiveCount' imply the
+    * total number of vertices that will be rendered.
+    */
+   SVGA3dPrimitiveType  primType;
+   uint32               primitiveCount;
+
+   /*
+    * Optional index buffer. If indexArray.surfaceId is
+    * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
+    * without an index buffer is identical to rendering with an index
+    * buffer containing the sequence [0, 1, 2, 3, ...].
+    *
+    * If an index buffer is in use, indexWidth specifies the width in
+    * bytes of each index value. It must be less than or equal to
+    * indexArray.stride.
+    *
+    * (Currently, the SVGA3D device requires index buffers to be tightly
+    * packed. In other words, indexWidth == indexArray.stride)
+    */
+   SVGA3dArray          indexArray;
+   uint32               indexWidth;
+
+   /*
+    * Optional index bias. This number is added to all indices from
+    * indexArray before they are used as vertex array indices. This
+    * can be used in multiple ways:
+    *
+    *  - When not using an indexArray, this bias can be used to
+    *    specify where in the vertex arrays to begin rendering.
+    *
+    *  - A positive number here is equivalent to increasing the
+    *    offset in each vertex array.
+    *
+    *  - A negative number can be used to render using a small
+    *    vertex array and an index buffer that contains large
+    *    values. This may be used by some applications that
+    *    crop a vertex buffer without modifying their index
+    *    buffer.
+    *
+    * Note that rendering with a negative bias value may be slower and
+    * use more memory than rendering with a positive or zero bias.
+    */
+   int32                indexBias;
+}
+#include "vmware_pack_end.h"
+SVGA3dPrimitiveRange;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               numVertexDecls;
+   uint32               numRanges;
+
+   /*
+    * There are two variable size arrays after the
+    * SVGA3dCmdDrawPrimitives structure. In order,
+    * they are:
+    *
+    * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
+    *    SVGA3D_MAX_VERTEX_ARRAYS;
+    * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
+    *    SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
+    * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
+    *    the frequency divisor for the corresponding vertex decl).
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDrawPrimitives;      /* SVGA_3D_CMD_DRAWPRIMITIVES */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint32 primitiveCount;        /* How many primitives to render */
+   uint32 startVertexLocation;   /* Which vertex do we start rendering at. */
+
+   uint8 primitiveType;          /* SVGA3dPrimitiveType */
+   uint8 padding[3];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDraw;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint8 primitiveType;       /* SVGA3dPrimitiveType */
+
+   uint32 indexBufferSid;     /* Valid index buffer sid. */
+   uint32 indexBufferOffset;  /* Byte offset into the vertex buffer, almost */
+			      /* always 0 for DX9 guests, non-zero for OpenGL */
+                              /* guests.  We can't represent non-multiple of */
+                              /* stride offsets in D3D9Renderer... */
+   uint8 indexBufferStride;   /* Allowable values = 1, 2, or 4 */
+
+   int32 baseVertexLocation;  /* Bias applied to the index when selecting a */
+                              /* vertex from the streams, may be negative */
+
+   uint32 primitiveCount;     /* How many primitives to render */
+   uint32 pad0;
+   uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDrawIndexed;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * Describe a vertex array's data type, and define how it is to be
+    * used by the fixed function pipeline or the vertex shader. It
+    * isn't useful to have two VertexDecls with the same
+    * VertexArrayIdentity in one draw call.
+    */
+   uint16 streamOffset;
+   uint8 stream;
+   uint8 type;          /* SVGA3dDeclType */
+   uint8 method;        /* SVGA3dDeclMethod */
+   uint8 usage;         /* SVGA3dDeclUsage */
+   uint8 usageIndex;
+   uint8 padding;
+
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexElement;
+
+/*
+ * Should the vertex element respect the stream value?  The high bit of the
+ * stream should be set to indicate that the stream should be respected.  If
+ * the high bit is not set, the stream will be ignored and replaced by the index
+ * of the position of the currently considered vertex element.
+ *
+ * All guests should set this bit and correctly specify the stream going
+ * forward.
+ */
+#define SVGA3D_VERTEX_ELEMENT_RESPECT_STREAM (1 << 7)
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint32 numElements;
+
+   /*
+    * Followed by numElements SVGA3dVertexElement structures.
+    *
+    * If numElements < SVGA3D_MAX_VERTEX_ARRAYS, the remaining elements
+    * are cleared and will not be used by following draws.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexDecls;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+   uint32 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexStream;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint32 numStreams;
+   /*
+    * Followed by numStream SVGA3dVertexStream structures.
+    *
+    * If numStreams < SVGA3D_MAX_VERTEX_ARRAYS, the remaining streams
+    * are cleared and will not be used by following draws.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexStreams;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+   uint32 numDivisors;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexDivisors;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                   stage;
+   SVGA3dTextureStateName   name;
+   union {
+      uint32                value;
+      float                 floatValue;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dTextureState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   /* Followed by variable number of SVGA3dTextureState structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetTextureState;      /* SVGA_3D_CMD_SETTEXTURESTATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                   cid;
+   SVGA3dTransformType      type;
+   float                    matrix[16];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetTransform;          /* SVGA_3D_CMD_SETTRANSFORM */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   float                min;
+   float                max;
+}
+#include "vmware_pack_end.h"
+SVGA3dZRange;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dZRange         zRange;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetZRange;             /* SVGA_3D_CMD_SETZRANGE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   float                diffuse[4];
+   float                ambient[4];
+   float                specular[4];
+   float                emissive[4];
+   float                shininess;
+}
+#include "vmware_pack_end.h"
+SVGA3dMaterial;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dFace           face;
+   SVGA3dMaterial       material;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetMaterial;           /* SVGA_3D_CMD_SETMATERIAL */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               index;
+   SVGA3dLightData      data;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetLightData;           /* SVGA_3D_CMD_SETLIGHTDATA */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               index;
+   uint32               enabled;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetLightEnabled;      /* SVGA_3D_CMD_SETLIGHTENABLED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dRect           rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetViewport;           /* SVGA_3D_CMD_SETVIEWPORT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dRect           rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetScissorRect;         /* SVGA_3D_CMD_SETSCISSORRECT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               index;
+   float                plane[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetClipPlane;           /* SVGA_3D_CMD_SETCLIPPLANE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               shid;
+   SVGA3dShaderType     type;
+   /* Followed by variable number of DWORDs for shader bycode */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineShader;           /* SVGA_3D_CMD_SHADER_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               shid;
+   SVGA3dShaderType     type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyShader;         /* SVGA_3D_CMD_SHADER_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                  cid;
+   uint32                  reg;     /* register number */
+   SVGA3dShaderType        type;
+   SVGA3dShaderConstType   ctype;
+   uint32                  values[4];
+
+   /*
+    * Followed by a variable number of additional values.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetShaderConst;        /* SVGA_3D_CMD_SET_SHADER_CONST */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dShaderType     type;
+   uint32               shid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetShader;       /* SVGA_3D_CMD_SET_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBeginQuery;           /* SVGA_3D_CMD_BEGIN_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAGuestPtr         guestResult;   /* Points to an SVGA3dQueryResult structure */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEndQuery;                  /* SVGA_3D_CMD_END_QUERY */
+
+
+/*
+ * SVGA3D_CMD_WAIT_FOR_QUERY --
+ *
+ *    Will read the SVGA3dQueryResult structure pointed to by guestResult,
+ *    and if the state member is set to anything else than
+ *    SVGA3D_QUERYSTATE_PENDING, this command will always be a no-op.
+ *
+ *    Otherwise, in addition to the query explicitly waited for,
+ *    All queries with the same type and issued with the same cid, for which
+ *    an SVGA_3D_CMD_END_QUERY command has previously been sent, will
+ *    be finished after execution of this command.
+ *
+ *    A query will be identified by the gmrId and offset of the guestResult
+ *    member. If the device can't find an SVGA_3D_CMD_END_QUERY that has
+ *    been sent previously with an indentical gmrId and offset, it will
+ *    effectively end all queries with an identical type issued with the
+ *    same cid, and the SVGA3dQueryResult structure pointed to by
+ *    guestResult will not be written to. This property can be used to
+ *    implement a query barrier for a given cid and query type.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;        /* Same parameters passed to END_QUERY */
+   SVGA3dQueryType      type;
+   SVGAGuestPtr         guestResult;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWaitForQuery;              /* SVGA_3D_CMD_WAIT_FOR_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               totalSize;    /* Set by guest before query is ended. */
+   SVGA3dQueryState     state;        /* Set by host or guest. See SVGA3dQueryState. */
+   union {                            /* Set by host on exit from PENDING state */
+      uint32            result32;
+      uint32            queryCookie; /* May be used to identify which QueryGetData this
+                                        result corresponds to. */
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dQueryResult;
+
+
+/*
+ * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
+ *
+ *    This is a blit from an SVGA3D surface to a Screen Object.
+ *    This blit must be directed at a specific screen.
+ *
+ *    The blit copies from a rectangular region of an SVGA3D surface
+ *    image to a rectangular region of a screen.
+ *
+ *    This command takes an optional variable-length list of clipping
+ *    rectangles after the body of the command. If no rectangles are
+ *    specified, there is no clipping region. The entire destRect is
+ *    drawn to. If one or more rectangles are included, they describe
+ *    a clipping region. The clip rectangle coordinates are measured
+ *    relative to the top-left corner of destRect.
+ *
+ *    The srcImage must be from mip=0 face=0.
+ *
+ *    This supports scaling if the src and dest are of different sizes.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId srcImage;
+   SVGASignedRect       srcRect;
+   uint32               destScreenId; /* Screen Object ID */
+   SVGASignedRect       destRect;
+   /* Clipping: zero or more SVGASignedRects follow */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBlitSurfaceToScreen;         /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               sid;
+   SVGA3dTextureFilter  filter;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGenerateMipmaps;             /* SVGA_3D_CMD_GENERATE_MIPMAPS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdActivateSurface;               /* SVGA_3D_CMD_ACTIVATE_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDeactivateSurface;             /* SVGA_3D_CMD_DEACTIVATE_SURFACE */
+
+/*
+ * Screen DMA command
+ *
+ * Available with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  The SVGA_CAP_3D device
+ * cap bit is not required.
+ *
+ * - refBuffer and destBuffer are 32bit BGRX; refBuffer and destBuffer could
+ *   be different, but it is required that guest makes sure refBuffer has
+ *   exactly the same contents that were written to when last time screen DMA
+ *   command is received by host.
+ *
+ * - changemap is generated by lib/blit, and it has the changes from last
+ *   received screen DMA or more.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdScreenDMA {
+   uint32 screenId;
+   SVGAGuestImage refBuffer;
+   SVGAGuestImage destBuffer;
+   SVGAGuestImage changeMap;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdScreenDMA;        /* SVGA_3D_CMD_SCREEN_DMA */
+
+/*
+ * Logic ops
+ */
+
+#define SVGA3D_LOTRANSBLT_HONORALPHA     (0x01)
+#define SVGA3D_LOSTRETCHBLT_MIRRORX      (0x01)
+#define SVGA3D_LOSTRETCHBLT_MIRRORY      (0x02)
+#define SVGA3D_LOALPHABLEND_SRCHASALPHA  (0x01)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsBitBlt {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   SVGA3dLogicOp logicOp;
+   /* Followed by variable number of SVGA3dCopyBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsBitBlt;   /* SVGA_3D_CMD_LOGICOPS_BITBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsTransBlt {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   uint32 color;
+   uint32 flags;
+   SVGA3dBox srcBox;
+   SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsTransBlt;   /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsStretchBlt {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   uint16 mode;
+   uint16 flags;
+   SVGA3dBox srcBox;
+   SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsStretchBlt;   /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsColorFill {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId dst;
+   uint32 color;
+   SVGA3dLogicOp logicOp;
+   /* Followed by variable number of SVGA3dRect structures. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsColorFill;   /* SVGA_3D_CMD_LOGICOPS_COLORFILL */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsAlphaBlend {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   uint32 alphaVal;
+   uint32 flags;
+   SVGA3dBox srcBox;
+   SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsAlphaBlend;   /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */
+
+#define SVGA3D_CLEARTYPE_INVALID_GAMMA_INDEX 0xFFFFFFFF
+
+#define SVGA3D_CLEARTYPE_GAMMA_WIDTH  512
+#define SVGA3D_CLEARTYPE_GAMMA_HEIGHT 16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsClearTypeBlend {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId tmp;
+   SVGA3dSurfaceImageId dst;
+   SVGA3dSurfaceImageId gammaSurf;
+   SVGA3dSurfaceImageId alphaSurf;
+   uint32 gamma;
+   uint32 color;
+   uint32 color2;
+   int32 alphaOffsetX;
+   int32 alphaOffsetY;
+   /* Followed by variable number of SVGA3dBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsClearTypeBlend;   /* SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND */
+
+
+/*
+ * Guest-backed objects definitions.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAMobFormat ptDepth;
+   uint32 sizeInBytes;
+   PPN64 base;
+}
+#include "vmware_pack_end.h"
+SVGAOTableMobEntry;
+#define SVGA3D_OTABLE_MOB_ENTRY_SIZE (sizeof(SVGAOTableMobEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceFormat format;
+   SVGA3dSurface1Flags surface1Flags;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+   SVGAMobId mobid;
+   uint32 arraySize;
+   uint32 mobPitch;
+   SVGA3dSurface2Flags surface2Flags;
+   uint8 multisamplePattern;
+   uint8 qualityLevel;
+   uint8  pad0[2];
+   uint32 pad1[3];
+}
+#include "vmware_pack_end.h"
+SVGAOTableSurfaceEntry;
+#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE (sizeof(SVGAOTableSurfaceEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableContextEntry;
+#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE (sizeof(SVGAOTableContextEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+   uint32 offsetInBytes;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableShaderEntry;
+#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry))
+
+#define SVGA_STFLAG_PRIMARY  (1 << 0)
+#define SVGA_STFLAG_RESERVED (1 << 1) /* Added with cap SVGA_CAP_HP_CMD_QUEUE */
+typedef uint32 SVGAScreenTargetFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId image;
+   uint32 width;
+   uint32 height;
+   int32 xRoot;
+   int32 yRoot;
+   SVGAScreenTargetFlags flags;
+   uint32 dpi;
+   uint32 pad[7];
+}
+#include "vmware_pack_end.h"
+SVGAOTableScreenTargetEntry;
+#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE \
+	(sizeof(SVGAOTableScreenTargetEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   float value[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstFloat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   int32 value[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstInt;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 value;
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstBool;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint16 streamOffset;
+   uint8 stream;
+   uint8 type;
+   uint8 methodUsage;
+   uint8 usageIndex;
+}
+#include "vmware_pack_end.h"
+SVGAGBVertexElement;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+   uint16 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGAGBVertexStream;
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dRect viewport;
+   SVGA3dRect scissorRect;
+   SVGA3dZRange zRange;
+
+   SVGA3dSurfaceImageId renderTargets[SVGA3D_RT_MAX];
+   SVGAGBVertexElement decl1[4];
+
+   uint32 renderStates[SVGA3D_RS_MAX];
+   SVGAGBVertexElement decl2[18];
+   uint32 pad0[2];
+
+   struct {
+      SVGA3dFace face;
+      SVGA3dMaterial material;
+   } material;
+
+   float clipPlanes[SVGA3D_NUM_CLIPPLANES][4];
+   float matrices[SVGA3D_TRANSFORM_MAX][16];
+
+   SVGA3dBool lightEnabled[SVGA3D_NUM_LIGHTS];
+   SVGA3dLightData lightData[SVGA3D_NUM_LIGHTS];
+
+   /*
+    * Shaders currently bound
+    */
+   uint32 shaders[SVGA3D_NUM_SHADERTYPE_PREDX];
+   SVGAGBVertexElement decl3[10];
+   uint32 pad1[3];
+
+   uint32 occQueryActive;
+   uint32 occQueryValue;
+
+   /*
+    * Int/Bool Shader constants
+    */
+   SVGA3dShaderConstInt pShaderIValues[SVGA3D_CONSTINTREG_MAX];
+   SVGA3dShaderConstInt vShaderIValues[SVGA3D_CONSTINTREG_MAX];
+   uint16 pShaderBValues;
+   uint16 vShaderBValues;
+
+
+   SVGAGBVertexStream streams[SVGA3D_MAX_VERTEX_ARRAYS];
+   SVGA3dVertexDivisor divisors[SVGA3D_MAX_VERTEX_ARRAYS];
+   uint32 numVertexDecls;
+   uint32 numVertexStreams;
+   uint32 numVertexDivisors;
+   uint32 pad2[30];
+
+   /*
+    * Texture Stages
+    *
+    * SVGA3D_TS_INVALID through SVGA3D_TS_CONSTANT are in the
+    * textureStages array.
+    * SVGA3D_TS_COLOR_KEY is in tsColorKey.
+    */
+   uint32 tsColorKey[SVGA3D_NUM_TEXTURE_UNITS];
+   uint32 textureStages[SVGA3D_NUM_TEXTURE_UNITS][SVGA3D_TS_CONSTANT + 1];
+   uint32 tsColorKeyEnable[SVGA3D_NUM_TEXTURE_UNITS];
+
+   /*
+    * Float Shader constants.
+    */
+   SVGA3dShaderConstFloat pShaderFValues[SVGA3D_CONSTREG_MAX];
+   SVGA3dShaderConstFloat vShaderFValues[SVGA3D_CONSTREG_MAX];
+}
+#include "vmware_pack_end.h"
+SVGAGBContextData;
+#define SVGA3D_CONTEXT_DATA_SIZE (sizeof(SVGAGBContextData))
+
+/*
+ * SVGA3dCmdSetOTableBase --
+ *
+ * This command allows the guest to specify the base PPN of the
+ * specified object table.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+   PPN baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetOTableBase;  /* SVGA_3D_CMD_SET_OTABLE_BASE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+   PPN64 baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetOTableBase64;  /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+
+/*
+ * Guests using SVGA_3D_CMD_GROW_OTABLE are promising that
+ * the new OTable contains the same contents as the old one, except possibly
+ * for some new invalid entries at the end.
+ *
+ * (Otherwise, guests should use one of the SetOTableBase commands.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+   PPN64 baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGrowOTable;  /* SVGA_3D_CMD_GROW_OTABLE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackOTable;  /* SVGA_3D_CMD_READBACK_OTABLE */
+
+/*
+ * Define a memory object (Mob) in the OTable.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBMob {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN base;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBMob;   /* SVGA_3D_CMD_DEFINE_GB_MOB */
+
+
+/*
+ * Destroys an object in the OTable.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBMob {
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBMob;   /* SVGA_3D_CMD_DESTROY_GB_MOB */
+
+
+/*
+ * Define a memory object (Mob) in the OTable with a PPN64 base.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBMob64 {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN64 base;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBMob64;   /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
+
+/*
+ * Redefine an object in the OTable with PPN64 base.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdRedefineGBMob64 {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN64 base;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdRedefineGBMob64;   /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
+
+/*
+ * Notification that the page tables have been modified.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBMobMapping {
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBMobMapping;   /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
+
+/*
+ * Define a guest-backed surface.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface {
+   uint32 sid;
+   SVGA3dSurface1Flags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+
+/*
+ * Defines a guest-backed surface, adding the arraySize field.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v2 {
+   uint32 sid;
+   SVGA3dSurface1Flags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+   uint32 arraySize;
+   uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v2;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
+
+/*
+ * Defines a guest-backed surface, adding the larger flags.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v3 {
+   uint32 sid;
+   SVGA3dSurfaceAllFlags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dMSPattern multisamplePattern;
+   SVGA3dMSQualityLevel qualityLevel;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+   uint32 arraySize;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v3;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 */
+
+/*
+ * Destroy a guest-backed surface.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBSurface;   /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+
+/*
+ * Bind a guest-backed surface to a mob.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBSurface {
+   uint32 sid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBSurface;   /* SVGA_3D_CMD_BIND_GB_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBSurfaceWithPitch {
+   uint32 sid;
+   SVGAMobId mobid;
+   uint32 baseLevelPitch;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBSurfaceWithPitch;   /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */
+
+/*
+ * Conditionally bind a mob to a guest-backed surface if testMobid
+ * matches the currently bound mob.  Optionally issue a
+ * readback/update on the surface while it is still bound to the old
+ * mobid if the mobid is changed by this command.
+ */
+
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_UPDATE   (1 << 1)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdCondBindGBSurface {
+   uint32 sid;
+   SVGAMobId testMobid;
+   SVGAMobId mobid;
+   uint32 flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdCondBindGBSurface;          /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
+
+/*
+ * Update an image in a guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBImage {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBImage;   /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+
+/*
+ * Update an entire guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBSurface;   /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+
+/*
+ * Readback an image in a guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBImage {
+   SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBImage;   /* SVGA_3D_CMD_READBACK_GB_IMAGE */
+
+/*
+ * Readback an entire guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBSurface;   /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+
+/*
+ * Readback a sub rect of an image in a guest-backed surface.  After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBImagePartial {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+   uint32 invertBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
+
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBImage {
+   SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBImage;   /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+
+/*
+ * Invalidate an entire guest-backed surface.
+ * (Notify the device that the contents if all images can be lost.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+
+/*
+ * Invalidate a sub rect of an image in a guest-backed surface.  After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBImagePartial {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+   uint32 invertBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
+
+
+/*
+ * Define a guest-backed context.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBContext;   /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+
+/*
+ * Destroy a guest-backed context.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBContext;   /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+
+/*
+ * Bind a guest-backed context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBContext {
+   uint32 cid;
+   SVGAMobId mobid;
+   uint32 validContents;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBContext;   /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+
+/*
+ * Readback a guest-backed context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBContext;   /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBContext;   /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+
+/*
+ * Define a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBShader {
+   uint32 shid;
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBShader;   /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+
+/*
+ * Bind a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBShader {
+   uint32 shid;
+   SVGAMobId mobid;
+   uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBShader;   /* SVGA_3D_CMD_BIND_GB_SHADER */
+
+/*
+ * Destroy a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBShader {
+   uint32 shid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBShader;   /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                  cid;
+   uint32                  regStart;
+   SVGA3dShaderType        shaderType;
+   SVGA3dShaderConstType   constType;
+
+   /*
+    * Followed by a variable number of shader constants.
+    *
+    * Note that FLOAT and INT constants are 4-dwords in length, while
+    * BOOL constants are 1-dword in length.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetGBShaderConstInline;   /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBeginGBQuery;           /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAMobId mobid;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEndGBQuery;                  /* SVGA_3D_CMD_END_GB_QUERY */
+
+
+/*
+ * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
+ *
+ *    The semantics of this command are identical to the
+ *    SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
+ *    to a Mob instead of a GMR.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAMobId mobid;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWaitForGBQuery;          /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAMobId mobid;
+   uint32 mustBeZero;
+   uint32 initialized;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEnableGart;              /* SVGA_3D_CMD_ENABLE_GART */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAMobId mobid;
+   uint32 gartOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdMapMobIntoGart;          /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 gartOffset;
+   uint32 numPages;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUnmapGartRange;          /* SVGA_3D_CMD_UNMAP_GART_RANGE */
+
+
+/*
+ * Screen Targets
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   uint32 width;
+   uint32 height;
+   int32 xRoot;
+   int32 yRoot;
+   SVGAScreenTargetFlags flags;
+
+   /*
+    * The physical DPI that the guest expects this screen displayed at.
+    *
+    * Guests which are not DPI-aware should set this to zero.
+    */
+   uint32 dpi;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBScreenTarget;    /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBScreenTarget;  /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBScreenTarget;  /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   SVGA3dRect rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBScreenTarget;  /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdGBScreenDMA {
+   uint32 screenId;
+   uint32 dead;
+   SVGAMobId destMobID;
+   uint32 destPitch;
+   SVGAMobId changeMapMobID;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGBScreenDMA;        /* SVGA_3D_CMD_GB_SCREEN_DMA */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 value;
+   uint32 mobId;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGBMobFence;  /* SVGA_3D_CMD_GB_MOB_FENCE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   SVGA3dSurfaceImageId dest;
+
+   uint32 statusMobId;
+   uint32 statusMobOffset;
+
+   /* Reserved fields */
+   uint32 mustBeInvalidId;
+   uint32 mustBeZero;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdScreenCopy;  /* SVGA_3D_CMD_SCREEN_COPY */
+
+#define SVGA_SCREEN_COPY_STATUS_FAILURE 0x00
+#define SVGA_SCREEN_COPY_STATUS_SUCCESS 0x01
+#define SVGA_SCREEN_COPY_STATUS_INVALID 0xFFFFFFFF
+
+#endif /* _SVGA3D_CMD_H_ */
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
new file mode 100644
index 0000000..f256560
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -0,0 +1,481 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_devcaps.h --
+ *
+ *       SVGA 3d caps definitions
+ */
+
+#ifndef _SVGA3D_DEVCAPS_H_
+#define _SVGA3D_DEVCAPS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+/*
+ * 3D Hardware Version
+ *
+ *   The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
+ *   register.   Is set by the host and read by the guest.  This lets
+ *   us make new guest drivers which are backwards-compatible with old
+ *   SVGA hardware revisions.  It does not let us support old guest
+ *   drivers.  Good enough for now.
+ *
+ */
+
+#define SVGA3D_MAKE_HWVERSION(major, minor)      (((major) << 16) | ((minor) & 0xFF))
+#define SVGA3D_MAJOR_HWVERSION(version)          ((version) >> 16)
+#define SVGA3D_MINOR_HWVERSION(version)          ((version) & 0xFF)
+
+typedef enum {
+   SVGA3D_HWVERSION_WS5_RC1   = SVGA3D_MAKE_HWVERSION(0, 1),
+   SVGA3D_HWVERSION_WS5_RC2   = SVGA3D_MAKE_HWVERSION(0, 2),
+   SVGA3D_HWVERSION_WS51_RC1  = SVGA3D_MAKE_HWVERSION(0, 3),
+   SVGA3D_HWVERSION_WS6_B1    = SVGA3D_MAKE_HWVERSION(1, 1),
+   SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
+   SVGA3D_HWVERSION_WS65_B1   = SVGA3D_MAKE_HWVERSION(2, 0),
+   SVGA3D_HWVERSION_WS8_B1    = SVGA3D_MAKE_HWVERSION(2, 1),
+   SVGA3D_HWVERSION_CURRENT   = SVGA3D_HWVERSION_WS8_B1,
+} SVGA3dHardwareVersion;
+
+/*
+ * DevCap indexes.
+ */
+
+typedef enum {
+   SVGA3D_DEVCAP_INVALID                           = ((uint32)-1),
+   SVGA3D_DEVCAP_3D                                = 0,
+   SVGA3D_DEVCAP_MAX_LIGHTS                        = 1,
+
+   /*
+    * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
+    * fixed-function texture units available. Each of these units
+    * work in both FFP and Shader modes, and they support texture
+    * transforms and texture coordinates. The host may have additional
+    * texture image units that are only usable with shaders.
+    */
+   SVGA3D_DEVCAP_MAX_TEXTURES                      = 2,
+   SVGA3D_DEVCAP_MAX_CLIP_PLANES                   = 3,
+   SVGA3D_DEVCAP_VERTEX_SHADER_VERSION             = 4,
+   SVGA3D_DEVCAP_VERTEX_SHADER                     = 5,
+   SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION           = 6,
+   SVGA3D_DEVCAP_FRAGMENT_SHADER                   = 7,
+   SVGA3D_DEVCAP_MAX_RENDER_TARGETS                = 8,
+   SVGA3D_DEVCAP_S23E8_TEXTURES                    = 9,
+   SVGA3D_DEVCAP_S10E5_TEXTURES                    = 10,
+   SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND             = 11,
+   SVGA3D_DEVCAP_D16_BUFFER_FORMAT                 = 12,
+   SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT               = 13,
+   SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT               = 14,
+   SVGA3D_DEVCAP_QUERY_TYPES                       = 15,
+   SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING         = 16,
+   SVGA3D_DEVCAP_MAX_POINT_SIZE                    = 17,
+   SVGA3D_DEVCAP_MAX_SHADER_TEXTURES               = 18,
+   SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH                 = 19,
+   SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT                = 20,
+   SVGA3D_DEVCAP_MAX_VOLUME_EXTENT                 = 21,
+   SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT                = 22,
+   SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO          = 23,
+   SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY            = 24,
+   SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT               = 25,
+   SVGA3D_DEVCAP_MAX_VERTEX_INDEX                  = 26,
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS    = 27,
+   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS  = 28,
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS           = 29,
+   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS         = 30,
+   SVGA3D_DEVCAP_TEXTURE_OPS                       = 31,
+   SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8               = 32,
+   SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8               = 33,
+   SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10            = 34,
+   SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5               = 35,
+   SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5               = 36,
+   SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4               = 37,
+   SVGA3D_DEVCAP_SURFACEFMT_R5G6B5                 = 38,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16            = 39,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8      = 40,
+   SVGA3D_DEVCAP_SURFACEFMT_ALPHA8                 = 41,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8             = 42,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D16                  = 43,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8                = 44,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8                = 45,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT1                   = 46,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT2                   = 47,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT3                   = 48,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT4                   = 49,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT5                   = 50,
+   SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8           = 51,
+   SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10            = 52,
+   SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8               = 53,
+   SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8               = 54,
+   SVGA3D_DEVCAP_SURFACEFMT_CxV8U8                 = 55,
+   SVGA3D_DEVCAP_SURFACEFMT_R_S10E5                = 56,
+   SVGA3D_DEVCAP_SURFACEFMT_R_S23E8                = 57,
+   SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5               = 58,
+   SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8               = 59,
+   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5             = 60,
+   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8             = 61,
+
+   /*
+    * There is a hole in our devcap definitions for
+    * historical reasons.
+    *
+    * Define a constant just for completeness.
+    */
+   SVGA3D_DEVCAP_MISSING62                         = 62,
+
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES        = 63,
+
+   /*
+    * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
+    * render targets.  This does not include the depth or stencil targets.
+    */
+   SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS   = 64,
+
+   SVGA3D_DEVCAP_SURFACEFMT_V16U16                 = 65,
+   SVGA3D_DEVCAP_SURFACEFMT_G16R16                 = 66,
+   SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16           = 67,
+   SVGA3D_DEVCAP_SURFACEFMT_UYVY                   = 68,
+   SVGA3D_DEVCAP_SURFACEFMT_YUY2                   = 69,
+   SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES    = 70,
+   SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES       = 71,
+   SVGA3D_DEVCAP_ALPHATOCOVERAGE                   = 72,
+   SVGA3D_DEVCAP_SUPERSAMPLE                       = 73,
+   SVGA3D_DEVCAP_AUTOGENMIPMAPS                    = 74,
+   SVGA3D_DEVCAP_SURFACEFMT_NV12                   = 75,
+   SVGA3D_DEVCAP_SURFACEFMT_AYUV                   = 76,
+
+   /*
+    * This is the maximum number of SVGA context IDs that the guest
+    * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
+    */
+   SVGA3D_DEVCAP_MAX_CONTEXT_IDS                   = 77,
+
+   /*
+    * This is the maximum number of SVGA surface IDs that the guest
+    * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
+    */
+   SVGA3D_DEVCAP_MAX_SURFACE_IDS                   = 78,
+
+   SVGA3D_DEVCAP_SURFACEFMT_Z_DF16                 = 79,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_DF24                 = 80,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT            = 81,
+
+   SVGA3D_DEVCAP_SURFACEFMT_ATI1                   = 82,
+   SVGA3D_DEVCAP_SURFACEFMT_ATI2                   = 83,
+
+   /*
+    * Deprecated.
+    */
+   SVGA3D_DEVCAP_DEAD1                             = 84,
+
+   /*
+    * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
+    * ored together, one for every type of video decoding supported.
+    */
+   SVGA3D_DEVCAP_VIDEO_DECODE                      = 85,
+
+   /*
+    * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
+    * ored together, one for every type of video processing supported.
+    */
+   SVGA3D_DEVCAP_VIDEO_PROCESS                     = 86,
+
+   SVGA3D_DEVCAP_LINE_AA                           = 87,  /* boolean */
+   SVGA3D_DEVCAP_LINE_STIPPLE                      = 88,  /* boolean */
+   SVGA3D_DEVCAP_MAX_LINE_WIDTH                    = 89,  /* float */
+   SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH                 = 90,  /* float */
+
+   SVGA3D_DEVCAP_SURFACEFMT_YV12                   = 91,
+
+   /*
+    * Does the host support the SVGA logic ops commands?
+    */
+   SVGA3D_DEVCAP_LOGICOPS                          = 92,
+
+   /*
+    * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
+    */
+   SVGA3D_DEVCAP_TS_COLOR_KEY                      = 93, /* boolean */
+
+   /*
+    * Deprecated.
+    */
+   SVGA3D_DEVCAP_DEAD2                             = 94,
+
+   /*
+    * Does the device support DXContexts?
+    */
+   SVGA3D_DEVCAP_DXCONTEXT                         = 95,
+
+   /*
+    * What is the maximum size of a texture array?
+    *
+    * (Even if this cap is zero, cubemaps are still allowed.)
+    */
+   SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE            = 96,
+
+   /*
+    * What is the maximum number of vertex buffers or vertex input registers
+    * that can be expected to work correctly with a DXContext?
+    *
+    * The guest is allowed to set up to SVGA3D_DX_MAX_VERTEXBUFFERS, but
+    * anything in excess of this cap is not guaranteed to render correctly.
+    *
+    * Similarly, the guest can set up to SVGA3D_DX_MAX_VERTEXINPUTREGISTERS
+    * input registers without the SVGA3D_DEVCAP_SM4_1 cap, or
+    * SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS with the SVGA3D_DEVCAP_SM4_1,
+    * but only the registers up to this cap value are guaranteed to render
+    * correctly.
+    *
+    * If guest-drivers are able to expose a lower-limit, it's recommended
+    * that they clamp to this value.  Otherwise, the host will make a
+    * best-effort on case-by-case basis if guests exceed this.
+    */
+   SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS              = 97,
+
+   /*
+    * What is the maximum number of constant buffers that can be expected to
+    * work correctly with a DX context?
+    *
+    * The guest is allowed to set up to SVGA3D_DX_MAX_CONSTBUFFERS, but
+    * anything in excess of this cap is not guaranteed to render correctly.
+    *
+    * If guest-drivers are able to expose a lower-limit, it's recommended
+    * that they clamp to this value.  Otherwise, the host will make a
+    * best-effort on case-by-case basis if guests exceed this.
+    */
+   SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS           = 98,
+
+   /*
+    * Does the device support provoking vertex control?
+    *
+    * If this cap is present, the provokingVertexLast field in the
+    * rasterizer state is enabled.  (Guests can then set it to FALSE,
+    * meaning that the first vertex is the provoking vertex, or TRUE,
+    * meaning that the last verteix is the provoking vertex.)
+    *
+    * If this cap is FALSE, then guests should set the provokingVertexLast
+    * to FALSE, otherwise rendering behavior is undefined.
+    */
+   SVGA3D_DEVCAP_DX_PROVOKING_VERTEX               = 99,
+
+   SVGA3D_DEVCAP_DXFMT_X8R8G8B8                    = 100,
+   SVGA3D_DEVCAP_DXFMT_A8R8G8B8                    = 101,
+   SVGA3D_DEVCAP_DXFMT_R5G6B5                      = 102,
+   SVGA3D_DEVCAP_DXFMT_X1R5G5B5                    = 103,
+   SVGA3D_DEVCAP_DXFMT_A1R5G5B5                    = 104,
+   SVGA3D_DEVCAP_DXFMT_A4R4G4B4                    = 105,
+   SVGA3D_DEVCAP_DXFMT_Z_D32                       = 106,
+   SVGA3D_DEVCAP_DXFMT_Z_D16                       = 107,
+   SVGA3D_DEVCAP_DXFMT_Z_D24S8                     = 108,
+   SVGA3D_DEVCAP_DXFMT_Z_D15S1                     = 109,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE8                  = 110,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4           = 111,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE16                 = 112,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8           = 113,
+   SVGA3D_DEVCAP_DXFMT_DXT1                        = 114,
+   SVGA3D_DEVCAP_DXFMT_DXT2                        = 115,
+   SVGA3D_DEVCAP_DXFMT_DXT3                        = 116,
+   SVGA3D_DEVCAP_DXFMT_DXT4                        = 117,
+   SVGA3D_DEVCAP_DXFMT_DXT5                        = 118,
+   SVGA3D_DEVCAP_DXFMT_BUMPU8V8                    = 119,
+   SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5                  = 120,
+   SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8                = 121,
+   SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1                = 122,
+   SVGA3D_DEVCAP_DXFMT_ARGB_S10E5                  = 123,
+   SVGA3D_DEVCAP_DXFMT_ARGB_S23E8                  = 124,
+   SVGA3D_DEVCAP_DXFMT_A2R10G10B10                 = 125,
+   SVGA3D_DEVCAP_DXFMT_V8U8                        = 126,
+   SVGA3D_DEVCAP_DXFMT_Q8W8V8U8                    = 127,
+   SVGA3D_DEVCAP_DXFMT_CxV8U8                      = 128,
+   SVGA3D_DEVCAP_DXFMT_X8L8V8U8                    = 129,
+   SVGA3D_DEVCAP_DXFMT_A2W10V10U10                 = 130,
+   SVGA3D_DEVCAP_DXFMT_ALPHA8                      = 131,
+   SVGA3D_DEVCAP_DXFMT_R_S10E5                     = 132,
+   SVGA3D_DEVCAP_DXFMT_R_S23E8                     = 133,
+   SVGA3D_DEVCAP_DXFMT_RG_S10E5                    = 134,
+   SVGA3D_DEVCAP_DXFMT_RG_S23E8                    = 135,
+   SVGA3D_DEVCAP_DXFMT_BUFFER                      = 136,
+   SVGA3D_DEVCAP_DXFMT_Z_D24X8                     = 137,
+   SVGA3D_DEVCAP_DXFMT_V16U16                      = 138,
+   SVGA3D_DEVCAP_DXFMT_G16R16                      = 139,
+   SVGA3D_DEVCAP_DXFMT_A16B16G16R16                = 140,
+   SVGA3D_DEVCAP_DXFMT_UYVY                        = 141,
+   SVGA3D_DEVCAP_DXFMT_YUY2                        = 142,
+   SVGA3D_DEVCAP_DXFMT_NV12                        = 143,
+   SVGA3D_DEVCAP_DXFMT_AYUV                        = 144,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS       = 145,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT           = 146,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT           = 147,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS          = 148,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT             = 149,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT              = 150,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT              = 151,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS       = 152,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT           = 153,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM          = 154,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT           = 155,
+   SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS             = 156,
+   SVGA3D_DEVCAP_DXFMT_R32G32_UINT                 = 157,
+   SVGA3D_DEVCAP_DXFMT_R32G32_SINT                 = 158,
+   SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS           = 159,
+   SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT        = 160,
+   SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24             = 161,
+   SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT              = 162,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS        = 163,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT            = 164,
+   SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT             = 165,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS           = 166,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM              = 167,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB         = 168,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT               = 169,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT               = 170,
+   SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS             = 171,
+   SVGA3D_DEVCAP_DXFMT_R16G16_UINT                 = 172,
+   SVGA3D_DEVCAP_DXFMT_R16G16_SINT                 = 173,
+   SVGA3D_DEVCAP_DXFMT_R32_TYPELESS                = 174,
+   SVGA3D_DEVCAP_DXFMT_D32_FLOAT                   = 175,
+   SVGA3D_DEVCAP_DXFMT_R32_UINT                    = 176,
+   SVGA3D_DEVCAP_DXFMT_R32_SINT                    = 177,
+   SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS              = 178,
+   SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT           = 179,
+   SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8                = 180,
+   SVGA3D_DEVCAP_DXFMT_X24_G8_UINT                 = 181,
+   SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS               = 182,
+   SVGA3D_DEVCAP_DXFMT_R8G8_UNORM                  = 183,
+   SVGA3D_DEVCAP_DXFMT_R8G8_UINT                   = 184,
+   SVGA3D_DEVCAP_DXFMT_R8G8_SINT                   = 185,
+   SVGA3D_DEVCAP_DXFMT_R16_TYPELESS                = 186,
+   SVGA3D_DEVCAP_DXFMT_R16_UNORM                   = 187,
+   SVGA3D_DEVCAP_DXFMT_R16_UINT                    = 188,
+   SVGA3D_DEVCAP_DXFMT_R16_SNORM                   = 189,
+   SVGA3D_DEVCAP_DXFMT_R16_SINT                    = 190,
+   SVGA3D_DEVCAP_DXFMT_R8_TYPELESS                 = 191,
+   SVGA3D_DEVCAP_DXFMT_R8_UNORM                    = 192,
+   SVGA3D_DEVCAP_DXFMT_R8_UINT                     = 193,
+   SVGA3D_DEVCAP_DXFMT_R8_SNORM                    = 194,
+   SVGA3D_DEVCAP_DXFMT_R8_SINT                     = 195,
+   SVGA3D_DEVCAP_DXFMT_P8                          = 196,
+   SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP          = 197,
+   SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM             = 198,
+   SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM             = 199,
+   SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS                = 200,
+   SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB              = 201,
+   SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS                = 202,
+   SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB              = 203,
+   SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS                = 204,
+   SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB              = 205,
+   SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS                = 206,
+   SVGA3D_DEVCAP_DXFMT_ATI1                        = 207,
+   SVGA3D_DEVCAP_DXFMT_BC4_SNORM                   = 208,
+   SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS                = 209,
+   SVGA3D_DEVCAP_DXFMT_ATI2                        = 210,
+   SVGA3D_DEVCAP_DXFMT_BC5_SNORM                   = 211,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM  = 212,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS           = 213,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB         = 214,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS           = 215,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB         = 216,
+   SVGA3D_DEVCAP_DXFMT_Z_DF16                      = 217,
+   SVGA3D_DEVCAP_DXFMT_Z_DF24                      = 218,
+   SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT                 = 219,
+   SVGA3D_DEVCAP_DXFMT_YV12                        = 220,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT          = 221,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT          = 222,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM          = 223,
+   SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT                = 224,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM           = 225,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM              = 226,
+   SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT                = 227,
+   SVGA3D_DEVCAP_DXFMT_R16G16_UNORM                = 228,
+   SVGA3D_DEVCAP_DXFMT_R16G16_SNORM                = 229,
+   SVGA3D_DEVCAP_DXFMT_R32_FLOAT                   = 230,
+   SVGA3D_DEVCAP_DXFMT_R8G8_SNORM                  = 231,
+   SVGA3D_DEVCAP_DXFMT_R16_FLOAT                   = 232,
+   SVGA3D_DEVCAP_DXFMT_D16_UNORM                   = 233,
+   SVGA3D_DEVCAP_DXFMT_A8_UNORM                    = 234,
+   SVGA3D_DEVCAP_DXFMT_BC1_UNORM                   = 235,
+   SVGA3D_DEVCAP_DXFMT_BC2_UNORM                   = 236,
+   SVGA3D_DEVCAP_DXFMT_BC3_UNORM                   = 237,
+   SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM                = 238,
+   SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM              = 239,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM              = 240,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM              = 241,
+   SVGA3D_DEVCAP_DXFMT_BC4_UNORM                   = 242,
+   SVGA3D_DEVCAP_DXFMT_BC5_UNORM                   = 243,
+
+   /*
+    * Advertises shaderModel 4.1 support, independent blend-states,
+    * cube-map arrays, and a higher vertex input registers limit.
+    *
+    * (See documentation on SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS.)
+    */
+   SVGA3D_DEVCAP_SM41                              = 244,
+
+   SVGA3D_DEVCAP_MULTISAMPLE_2X                    = 245,
+   SVGA3D_DEVCAP_MULTISAMPLE_4X                    = 246,
+
+   SVGA3D_DEVCAP_MAX                       /* This must be the last index. */
+} SVGA3dDevCapIndex;
+
+/*
+ * Bit definitions for DXFMT devcaps
+ *
+ *
+ * SUPPORTED: Can the format be defined?
+ * SHADER_SAMPLE: Can the format be sampled from a shader?
+ * COLOR_RENDERTARGET: Can the format be a color render target?
+ * DEPTH_RENDERTARGET: Can the format be a depth render target?
+ * BLENDABLE: Is the format blendable?
+ * MIPS: Does the format support mip levels?
+ * ARRAY: Does the format support texture arrays?
+ * VOLUME: Does the format support having volume?
+ * MULTISAMPLE: Does the format support multisample?
+ */
+#define SVGA3D_DXFMT_SUPPORTED                (1 <<  0)
+#define SVGA3D_DXFMT_SHADER_SAMPLE            (1 <<  1)
+#define SVGA3D_DXFMT_COLOR_RENDERTARGET       (1 <<  2)
+#define SVGA3D_DXFMT_DEPTH_RENDERTARGET       (1 <<  3)
+#define SVGA3D_DXFMT_BLENDABLE                (1 <<  4)
+#define SVGA3D_DXFMT_MIPS                     (1 <<  5)
+#define SVGA3D_DXFMT_ARRAY                    (1 <<  6)
+#define SVGA3D_DXFMT_VOLUME                   (1 <<  7)
+#define SVGA3D_DXFMT_DX_VERTEX_BUFFER         (1 <<  8)
+#define SVGA3D_DXFMT_MULTISAMPLE              (1 <<  9)
+#define SVGA3D_DXFMT_MAX                      (1 << 10)
+
+typedef union {
+   Bool   b;
+   uint32 u;
+   int32  i;
+   float  f;
+} SVGA3dDevCapResult;
+
+#endif /* _SVGA3D_DEVCAPS_H_ */
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
new file mode 100644
index 0000000..7a49c94
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -0,0 +1,1635 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**********************************************************
+ * Copyright 2012-2015 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_dx.h --
+ *
+ *       SVGA 3d hardware definitions for DX10 support.
+ */
+
+#ifndef _SVGA3D_DX_H_
+#define _SVGA3D_DX_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+#include "includeCheck.h"
+
+#include "svga3d_limits.h"
+
+#define SVGA3D_INPUT_MIN               0
+#define SVGA3D_INPUT_PER_VERTEX_DATA   0
+#define SVGA3D_INPUT_PER_INSTANCE_DATA 1
+#define SVGA3D_INPUT_MAX               2
+typedef uint32 SVGA3dInputClassification;
+
+#define SVGA3D_RESOURCE_TYPE_MIN      1
+#define SVGA3D_RESOURCE_BUFFER        1
+#define SVGA3D_RESOURCE_TEXTURE1D     2
+#define SVGA3D_RESOURCE_TEXTURE2D     3
+#define SVGA3D_RESOURCE_TEXTURE3D     4
+#define SVGA3D_RESOURCE_TEXTURECUBE   5
+#define SVGA3D_RESOURCE_TYPE_DX10_MAX 6
+#define SVGA3D_RESOURCE_BUFFEREX      6
+#define SVGA3D_RESOURCE_TYPE_MAX      7
+typedef uint32 SVGA3dResourceType;
+
+#define SVGA3D_COLOR_WRITE_ENABLE_RED     (1 << 0)
+#define SVGA3D_COLOR_WRITE_ENABLE_GREEN   (1 << 1)
+#define SVGA3D_COLOR_WRITE_ENABLE_BLUE    (1 << 2)
+#define SVGA3D_COLOR_WRITE_ENABLE_ALPHA   (1 << 3)
+#define SVGA3D_COLOR_WRITE_ENABLE_ALL     (SVGA3D_COLOR_WRITE_ENABLE_RED |   \
+                                           SVGA3D_COLOR_WRITE_ENABLE_GREEN | \
+                                           SVGA3D_COLOR_WRITE_ENABLE_BLUE |  \
+                                           SVGA3D_COLOR_WRITE_ENABLE_ALPHA)
+typedef uint8 SVGA3dColorWriteEnable;
+
+#define SVGA3D_DEPTH_WRITE_MASK_ZERO   0
+#define SVGA3D_DEPTH_WRITE_MASK_ALL    1
+typedef uint8 SVGA3dDepthWriteMask;
+
+#define SVGA3D_FILTER_MIP_LINEAR  (1 << 0)
+#define SVGA3D_FILTER_MAG_LINEAR  (1 << 2)
+#define SVGA3D_FILTER_MIN_LINEAR  (1 << 4)
+#define SVGA3D_FILTER_ANISOTROPIC (1 << 6)
+#define SVGA3D_FILTER_COMPARE     (1 << 7)
+typedef uint32 SVGA3dFilter;
+
+#define SVGA3D_CULL_INVALID 0
+#define SVGA3D_CULL_MIN     1
+#define SVGA3D_CULL_NONE    1
+#define SVGA3D_CULL_FRONT   2
+#define SVGA3D_CULL_BACK    3
+#define SVGA3D_CULL_MAX     4
+typedef uint8 SVGA3dCullMode;
+
+#define SVGA3D_COMPARISON_INVALID         0
+#define SVGA3D_COMPARISON_MIN             1
+#define SVGA3D_COMPARISON_NEVER           1
+#define SVGA3D_COMPARISON_LESS            2
+#define SVGA3D_COMPARISON_EQUAL           3
+#define SVGA3D_COMPARISON_LESS_EQUAL      4
+#define SVGA3D_COMPARISON_GREATER         5
+#define SVGA3D_COMPARISON_NOT_EQUAL       6
+#define SVGA3D_COMPARISON_GREATER_EQUAL   7
+#define SVGA3D_COMPARISON_ALWAYS          8
+#define SVGA3D_COMPARISON_MAX             9
+typedef uint8 SVGA3dComparisonFunc;
+
+/*
+ * SVGA3D_MULTISAMPLE_RAST_DISABLE disables MSAA for all primitives.
+ * SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE, which is supported in SM41,
+ * disables MSAA for lines only.
+ */
+#define SVGA3D_MULTISAMPLE_RAST_DISABLE        0
+#define SVGA3D_MULTISAMPLE_RAST_ENABLE         1
+#define SVGA3D_MULTISAMPLE_RAST_DX_MAX         1
+#define SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE   2
+#define SVGA3D_MULTISAMPLE_RAST_MAX            2
+typedef uint8 SVGA3dMultisampleRastEnable;
+
+#define SVGA3D_DX_MAX_VERTEXBUFFERS 32
+#define SVGA3D_DX_MAX_VERTEXINPUTREGISTERS 16
+#define SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS 32
+#define SVGA3D_DX_MAX_SOTARGETS 4
+#define SVGA3D_DX_MAX_SRVIEWS 128
+#define SVGA3D_DX_MAX_CONSTBUFFERS 16
+#define SVGA3D_DX_MAX_SAMPLERS 16
+
+#define SVGA3D_DX_MAX_CONSTBUF_BINDING_SIZE (4096 * 4 * (uint32)sizeof(uint32))
+
+typedef uint32 SVGA3dShaderResourceViewId;
+typedef uint32 SVGA3dRenderTargetViewId;
+typedef uint32 SVGA3dDepthStencilViewId;
+
+typedef uint32 SVGA3dShaderId;
+typedef uint32 SVGA3dElementLayoutId;
+typedef uint32 SVGA3dSamplerId;
+typedef uint32 SVGA3dBlendStateId;
+typedef uint32 SVGA3dDepthStencilStateId;
+typedef uint32 SVGA3dRasterizerStateId;
+typedef uint32 SVGA3dQueryId;
+typedef uint32 SVGA3dStreamOutputId;
+
+typedef union {
+   struct {
+      float r;
+      float g;
+      float b;
+      float a;
+   };
+
+   float value[4];
+} SVGA3dRGBAFloat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableDXContextEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineContext;   /* SVGA_3D_CMD_DX_DEFINE_CONTEXT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyContext;   /* SVGA_3D_CMD_DX_DESTROY_CONTEXT */
+
+/*
+ * Bind a DX context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindContext {
+   uint32 cid;
+   SVGAMobId mobid;
+   uint32 validContents;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindContext;   /* SVGA_3D_CMD_DX_BIND_CONTEXT */
+
+/*
+ * Readback a DX context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackContext;   /* SVGA_3D_CMD_DX_READBACK_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXInvalidateContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXInvalidateContext;   /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSingleConstantBuffer {
+   uint32 slot;
+   SVGA3dShaderType type;
+   SVGA3dSurfaceId sid;
+   uint32 offsetInBytes;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSingleConstantBuffer;
+/* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShaderResources {
+   uint32 startView;
+   SVGA3dShaderType type;
+
+   /*
+    * Followed by a variable number of SVGA3dShaderResourceViewId's.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShaderResources; /* SVGA_3D_CMD_DX_SET_SHADER_RESOURCES */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShader {
+   SVGA3dShaderId shaderId;
+   SVGA3dShaderType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSamplers {
+   uint32 startSampler;
+   SVGA3dShaderType type;
+
+   /*
+    * Followed by a variable number of SVGA3dSamplerId's.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSamplers; /* SVGA_3D_CMD_DX_SET_SAMPLERS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDraw {
+   uint32 vertexCount;
+   uint32 startVertexLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDraw; /* SVGA_3D_CMD_DX_DRAW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexed {
+   uint32 indexCount;
+   uint32 startIndexLocation;
+   int32  baseVertexLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexed; /* SVGA_3D_CMD_DX_DRAW_INDEXED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawInstanced {
+   uint32 vertexCountPerInstance;
+   uint32 instanceCount;
+   uint32 startVertexLocation;
+   uint32 startInstanceLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawInstanced; /* SVGA_3D_CMD_DX_DRAW_INSTANCED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexedInstanced {
+   uint32 indexCountPerInstance;
+   uint32 instanceCount;
+   uint32 startIndexLocation;
+   int32  baseVertexLocation;
+   uint32 startInstanceLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawAuto {
+   uint32 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetInputLayout {
+   SVGA3dElementLayoutId elementLayoutId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetInputLayout; /* SVGA_3D_CMD_DX_SET_INPUT_LAYOUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dVertexBuffer {
+   SVGA3dSurfaceId sid;
+   uint32 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexBuffer;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetVertexBuffers {
+   uint32 startBuffer;
+   /* Followed by a variable number of SVGA3dVertexBuffer's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetVertexBuffers; /* SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetIndexBuffer {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetIndexBuffer; /* SVGA_3D_CMD_DX_SET_INDEX_BUFFER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetTopology {
+   SVGA3dPrimitiveType topology;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetTopology; /* SVGA_3D_CMD_DX_SET_TOPOLOGY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetRenderTargets {
+   SVGA3dDepthStencilViewId depthStencilViewId;
+   /* Followed by a variable number of SVGA3dRenderTargetViewId's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetRenderTargets; /* SVGA_3D_CMD_DX_SET_RENDERTARGETS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetBlendState {
+   SVGA3dBlendStateId blendId;
+   float blendFactor[4];
+   uint32 sampleMask;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetBlendState; /* SVGA_3D_CMD_DX_SET_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetDepthStencilState {
+   SVGA3dDepthStencilStateId depthStencilId;
+   uint32 stencilRef;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetDepthStencilState; /* SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetRasterizerState {
+   SVGA3dRasterizerStateId rasterizerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetRasterizerState; /* SVGA_3D_CMD_DX_SET_RASTERIZER_STATE */
+
+#define SVGA3D_DXQUERY_FLAG_PREDICATEHINT (1 << 0)
+typedef uint32 SVGA3dDXQueryFlags;
+
+/*
+ * The SVGADXQueryDeviceState and SVGADXQueryDeviceBits are used by the device
+ * to track query state transitions, but are not intended to be used by the
+ * driver.
+ */
+#define SVGADX_QDSTATE_INVALID   ((uint8)-1) /* Query has no state */
+#define SVGADX_QDSTATE_MIN       0
+#define SVGADX_QDSTATE_IDLE      0   /* Query hasn't started yet */
+#define SVGADX_QDSTATE_ACTIVE    1   /* Query is actively gathering data */
+#define SVGADX_QDSTATE_PENDING   2   /* Query is waiting for results */
+#define SVGADX_QDSTATE_FINISHED  3   /* Query has completed */
+#define SVGADX_QDSTATE_MAX       4
+typedef uint8 SVGADXQueryDeviceState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dQueryTypeUint8 type;
+   uint16 pad0;
+   SVGADXQueryDeviceState state;
+   SVGA3dDXQueryFlags flags;
+   SVGAMobId mobid;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXQueryEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineQuery {
+   SVGA3dQueryId queryId;
+   SVGA3dQueryType type;
+   SVGA3dDXQueryFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineQuery; /* SVGA_3D_CMD_DX_DEFINE_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyQuery; /* SVGA_3D_CMD_DX_DESTROY_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindQuery {
+   SVGA3dQueryId queryId;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindQuery; /* SVGA_3D_CMD_DX_BIND_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetQueryOffset {
+   SVGA3dQueryId queryId;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetQueryOffset; /* SVGA_3D_CMD_DX_SET_QUERY_OFFSET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBeginQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBeginQuery; /* SVGA_3D_CMD_DX_QUERY_BEGIN */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXEndQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXEndQuery; /* SVGA_3D_CMD_DX_QUERY_END */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackQuery; /* SVGA_3D_CMD_DX_READBACK_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXMoveQuery {
+   SVGA3dQueryId queryId;
+   SVGAMobId mobid;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXMoveQuery; /* SVGA_3D_CMD_DX_MOVE_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindAllQuery {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindAllQuery; /* SVGA_3D_CMD_DX_BIND_ALL_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackAllQuery {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackAllQuery; /* SVGA_3D_CMD_DX_READBACK_ALL_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetPredication {
+   SVGA3dQueryId queryId;
+   uint32 predicateValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetPredication; /* SVGA_3D_CMD_DX_SET_PREDICATION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct MKS3dDXSOState {
+   uint32 offset;       /* Starting offset */
+   uint32 intOffset;    /* Internal offset */
+   uint32 vertexCount;  /* vertices written */
+   uint32 sizeInBytes;  /* max bytes to write */
+}
+#include "vmware_pack_end.h"
+SVGA3dDXSOState;
+
+/* Set the offset field to this value to append SO values to the buffer */
+#define SVGA3D_DX_SO_OFFSET_APPEND ((uint32) ~0u)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSoTarget {
+   SVGA3dSurfaceId sid;
+   uint32 offset;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dSoTarget;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSOTargets {
+   uint32 pad0;
+   /* Followed by a variable number of SVGA3dSOTarget's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSOTargets; /* SVGA_3D_CMD_DX_SET_SOTARGETS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dViewport
+{
+   float x;
+   float y;
+   float width;
+   float height;
+   float minDepth;
+   float maxDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dViewport;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetViewports {
+   uint32 pad0;
+   /* Followed by a variable number of SVGA3dViewport's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetViewports; /* SVGA_3D_CMD_DX_SET_VIEWPORTS */
+
+#define SVGA3D_DX_MAX_VIEWPORTS  16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetScissorRects {
+   uint32 pad0;
+   /* Followed by a variable number of SVGASignedRect's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetScissorRects; /* SVGA_3D_CMD_DX_SET_SCISSORRECTS */
+
+#define SVGA3D_DX_MAX_SCISSORRECTS  16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearRenderTargetView {
+   SVGA3dRenderTargetViewId renderTargetViewId;
+   SVGA3dRGBAFloat rgba;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearRenderTargetView; /* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearDepthStencilView {
+   uint16 flags;
+   uint16 stencil;
+   SVGA3dDepthStencilViewId depthStencilViewId;
+   float depth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearDepthStencilView; /* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredCopyRegion {
+   SVGA3dSurfaceId dstSid;
+   uint32 dstSubResource;
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredCopyRegion;
+/* SVGA_3D_CMD_DX_PRED_COPY_REGION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredCopy {
+   SVGA3dSurfaceId dstSid;
+   SVGA3dSurfaceId srcSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredConvertRegion {
+   SVGA3dSurfaceId dstSid;
+   uint32 dstSubResource;
+   SVGA3dBox destBox;
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dBox srcBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredConvertRegion; /* SVGA_3D_CMD_DX_PRED_CONVERT_REGION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredConvert {
+   SVGA3dSurfaceId dstSid;
+   SVGA3dSurfaceId srcSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredConvert; /* SVGA_3D_CMD_DX_PRED_CONVERT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBufferCopy {
+   SVGA3dSurfaceId dest;
+   SVGA3dSurfaceId src;
+   uint32 destX;
+   uint32 srcX;
+   uint32 width;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBufferCopy;
+/* SVGA_3D_CMD_DX_BUFFER_COPY */
+
+/*
+ * Perform a surface copy between a multisample, and a non-multisampled
+ * surface.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId dstSid;
+   uint32 dstSubResource;
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dSurfaceFormat copyFormat;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXResolveCopy;               /* SVGA_3D_CMD_DX_RESOLVE_COPY */
+
+/*
+ * Perform a predicated surface copy between a multisample, and a
+ * non-multisampled surface.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId dstSid;
+   uint32 dstSubResource;
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dSurfaceFormat copyFormat;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredResolveCopy;           /* SVGA_3D_CMD_DX_PRED_RESOLVE_COPY */
+
+typedef uint32 SVGA3dDXPresentBltMode;
+#define SVGADX_PRESENTBLT_LINEAR           (1 << 0)
+#define SVGADX_PRESENTBLT_FORCE_SRC_SRGB   (1 << 1)
+#define SVGADX_PRESENTBLT_FORCE_SRC_XRBIAS (1 << 2)
+#define SVGADX_PRESENTBLT_MODE_MAX         (1 << 3)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPresentBlt {
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dSurfaceId dstSid;
+   uint32 destSubResource;
+   SVGA3dBox boxSrc;
+   SVGA3dBox boxDest;
+   SVGA3dDXPresentBltMode mode;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPresentBlt; /* SVGA_3D_CMD_DX_PRESENTBLT*/
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXGenMips {
+   SVGA3dShaderResourceViewId shaderResourceViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */
+
+/*
+ * Update a sub-resource in a guest-backed resource.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXUpdateSubResource {
+   SVGA3dSurfaceId sid;
+   uint32 subResource;
+   SVGA3dBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXUpdateSubResource;   /* SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE */
+
+/*
+ * Readback a subresource in a guest-backed resource.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackSubResource {
+   SVGA3dSurfaceId sid;
+   uint32 subResource;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackSubResource;   /* SVGA_3D_CMD_DX_READBACK_SUBRESOURCE */
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXInvalidateSubResource {
+   SVGA3dSurfaceId sid;
+   uint32 subResource;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXInvalidateSubResource;   /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */
+
+
+/*
+ * Raw byte wise transfer from a buffer surface into another surface
+ * of the requested box.  Supported if 3d is enabled and SVGA_CAP_DX
+ * is set.  This command does not take a context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTransferFromBuffer {
+   SVGA3dSurfaceId srcSid;
+   uint32 srcOffset;
+   uint32 srcPitch;
+   uint32 srcSlicePitch;
+   SVGA3dSurfaceId destSid;
+   uint32 destSubResource;
+   SVGA3dBox destBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTransferFromBuffer;   /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */
+
+
+/*
+ * Raw byte wise transfer from a buffer surface into another surface
+ * of the requested box.  Supported if SVGA3D_DEVCAP_DXCONTEXT is set.
+ * The context is implied from the command buffer header.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredTransferFromBuffer {
+   SVGA3dSurfaceId srcSid;
+   uint32 srcOffset;
+   uint32 srcPitch;
+   uint32 srcSlicePitch;
+   SVGA3dSurfaceId destSid;
+   uint32 destSubResource;
+   SVGA3dBox destBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredTransferFromBuffer;
+/* SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSurfaceCopyAndReadback {
+   SVGA3dSurfaceId srcSid;
+   SVGA3dSurfaceId destSid;
+   SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSurfaceCopyAndReadback;
+/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */
+
+/*
+ * SVGA_DX_HINT_NONE: Does nothing.
+ *
+ * SVGA_DX_HINT_PREFETCH_OBJECT:
+ * SVGA_DX_HINT_PREEVICT_OBJECT:
+ *      Consumes a SVGAObjectRef, and hints that the host should consider
+ *      fetching/evicting the specified object.
+ *
+ *      An id of SVGA3D_INVALID_ID can be used if the guest isn't sure
+ *      what object was affected.  (For instance, if the guest knows that
+ *      it is about to evict a DXShader, but doesn't know precisely which one,
+ *      the device can still use this to help limit it's search, or track
+ *      how many page-outs have happened.)
+ *
+ * SVGA_DX_HINT_PREFETCH_COBJECT:
+ * SVGA_DX_HINT_PREEVICT_COBJECT:
+ *      Same as the above, except they consume an SVGACObjectRef.
+ */
+typedef uint32 SVGADXHintId;
+#define SVGA_DX_HINT_NONE              0
+#define SVGA_DX_HINT_PREFETCH_OBJECT   1
+#define SVGA_DX_HINT_PREEVICT_OBJECT   2
+#define SVGA_DX_HINT_PREFETCH_COBJECT  3
+#define SVGA_DX_HINT_PREEVICT_COBJECT  4
+#define SVGA_DX_HINT_MAX               5
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAObjectRef {
+   SVGAOTableType type;
+   uint32 id;
+}
+#include "vmware_pack_end.h"
+SVGAObjectRef;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGACObjectRef {
+   SVGACOTableType type;
+   uint32 cid;
+   uint32 id;
+}
+#include "vmware_pack_end.h"
+SVGACObjectRef;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXHint {
+   SVGADXHintId hintId;
+
+   /*
+    * Followed by variable sized data depending on the hintId.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXHint;
+/* SVGA_3D_CMD_DX_HINT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBufferUpdate {
+   SVGA3dSurfaceId sid;
+   uint32 x;
+   uint32 width;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBufferUpdate;
+/* SVGA_3D_CMD_DX_BUFFER_UPDATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetConstantBufferOffset {
+   uint32 slot;
+   uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetConstantBufferOffset;
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetVSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetPSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetGSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   union {
+      struct {
+         uint32 firstElement;
+         uint32 numElements;
+         uint32 pad0;
+         uint32 pad1;
+      } buffer;
+      struct {
+         uint32 mostDetailedMip;
+         uint32 firstArraySlice;
+         uint32 mipLevels;
+         uint32 arraySize;
+      } tex; /* 1d, 2d, 3d, cube */
+      struct {
+         uint32 firstElement;
+         uint32 numElements;
+         uint32 flags;
+         uint32 pad0;
+      } bufferex;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderResourceViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   SVGA3dShaderResourceViewDesc desc;
+   uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXSRViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineShaderResourceView {
+   SVGA3dShaderResourceViewId shaderResourceViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+
+   SVGA3dShaderResourceViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineShaderResourceView;
+/* SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyShaderResourceView {
+   SVGA3dShaderResourceViewId shaderResourceViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyShaderResourceView;
+/* SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dRenderTargetViewDesc {
+   union {
+      struct {
+         uint32 firstElement;
+         uint32 numElements;
+         uint32 padding0;
+      } buffer;
+      struct {
+         uint32 mipSlice;
+         uint32 firstArraySlice;
+         uint32 arraySize;
+      } tex;                    /* 1d, 2d, cube */
+      struct {
+         uint32 mipSlice;
+         uint32 firstW;
+         uint32 wSize;
+      } tex3D;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dRenderTargetViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   SVGA3dRenderTargetViewDesc desc;
+   uint32 pad[2];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXRTViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineRenderTargetView {
+   SVGA3dRenderTargetViewId renderTargetViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+
+   SVGA3dRenderTargetViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineRenderTargetView;
+/* SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyRenderTargetView {
+   SVGA3dRenderTargetViewId renderTargetViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyRenderTargetView;
+/* SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW */
+
+/*
+ */
+#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_DEPTH   0x01
+#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_STENCIL 0x02
+#define SVGA3D_DXDSVIEW_CREATE_FLAG_MASK         0x03
+typedef uint8 SVGA3DCreateDSViewFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   uint32 mipSlice;
+   uint32 firstArraySlice;
+   uint32 arraySize;
+   SVGA3DCreateDSViewFlags flags;
+   uint8 pad0;
+   uint16 pad1;
+   uint32 pad2;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXDSViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilView {
+   SVGA3dDepthStencilViewId depthStencilViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   uint32 mipSlice;
+   uint32 firstArraySlice;
+   uint32 arraySize;
+   SVGA3DCreateDSViewFlags flags;
+   uint8 pad0;
+   uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilView;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyDepthStencilView {
+   SVGA3dDepthStencilViewId depthStencilViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyDepthStencilView;
+/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dInputElementDesc {
+   uint32 inputSlot;
+   uint32 alignedByteOffset;
+   SVGA3dSurfaceFormat format;
+   SVGA3dInputClassification inputSlotClass;
+   uint32 instanceDataStepRate;
+   uint32 inputRegister;
+}
+#include "vmware_pack_end.h"
+SVGA3dInputElementDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 elid;
+   uint32 numDescs;
+   SVGA3dInputElementDesc desc[32];
+   uint32 pad[62];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXElementLayoutEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineElementLayout {
+   SVGA3dElementLayoutId elementLayoutId;
+   /* Followed by a variable number of SVGA3dInputElementDesc's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineElementLayout;
+/* SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyElementLayout {
+   SVGA3dElementLayoutId elementLayoutId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyElementLayout;
+/* SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT */
+
+
+#define SVGA3D_DX_MAX_RENDER_TARGETS 8
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dDXBlendStatePerRT {
+      uint8 blendEnable;
+      uint8 srcBlend;
+      uint8 destBlend;
+      uint8 blendOp;
+      uint8 srcBlendAlpha;
+      uint8 destBlendAlpha;
+      uint8 blendOpAlpha;
+      SVGA3dColorWriteEnable renderTargetWriteMask;
+      uint8 logicOpEnable;
+      uint8 logicOp;
+      uint16 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dDXBlendStatePerRT;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint8 alphaToCoverageEnable;
+   uint8 independentBlendEnable;
+   uint16 pad0;
+   SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
+   uint32 pad1[7];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXBlendStateEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineBlendState {
+   SVGA3dBlendStateId blendId;
+   uint8 alphaToCoverageEnable;
+   uint8 independentBlendEnable;
+   uint16 pad0;
+   SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineBlendState; /* SVGA_3D_CMD_DX_DEFINE_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyBlendState {
+   SVGA3dBlendStateId blendId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyBlendState; /* SVGA_3D_CMD_DX_DESTROY_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint8 depthEnable;
+   SVGA3dDepthWriteMask depthWriteMask;
+   SVGA3dComparisonFunc depthFunc;
+   uint8 stencilEnable;
+   uint8 frontEnable;
+   uint8 backEnable;
+   uint8 stencilReadMask;
+   uint8 stencilWriteMask;
+
+   uint8 frontStencilFailOp;
+   uint8 frontStencilDepthFailOp;
+   uint8 frontStencilPassOp;
+   SVGA3dComparisonFunc frontStencilFunc;
+
+   uint8 backStencilFailOp;
+   uint8 backStencilDepthFailOp;
+   uint8 backStencilPassOp;
+   SVGA3dComparisonFunc backStencilFunc;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXDepthStencilEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilState {
+   SVGA3dDepthStencilStateId depthStencilId;
+
+   uint8 depthEnable;
+   SVGA3dDepthWriteMask depthWriteMask;
+   SVGA3dComparisonFunc depthFunc;
+   uint8 stencilEnable;
+   uint8 frontEnable;
+   uint8 backEnable;
+   uint8 stencilReadMask;
+   uint8 stencilWriteMask;
+
+   uint8 frontStencilFailOp;
+   uint8 frontStencilDepthFailOp;
+   uint8 frontStencilPassOp;
+   SVGA3dComparisonFunc frontStencilFunc;
+
+   uint8 backStencilFailOp;
+   uint8 backStencilDepthFailOp;
+   uint8 backStencilPassOp;
+   SVGA3dComparisonFunc backStencilFunc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilState;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyDepthStencilState {
+   SVGA3dDepthStencilStateId depthStencilId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyDepthStencilState;
+/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint8 fillMode;
+   SVGA3dCullMode cullMode;
+   uint8 frontCounterClockwise;
+   uint8 provokingVertexLast;
+   int32 depthBias;
+   float depthBiasClamp;
+   float slopeScaledDepthBias;
+   uint8 depthClipEnable;
+   uint8 scissorEnable;
+   SVGA3dMultisampleRastEnable multisampleEnable;
+   uint8 antialiasedLineEnable;
+   float lineWidth;
+   uint8 lineStippleEnable;
+   uint8 lineStippleFactor;
+   uint16 lineStipplePattern;
+   uint32 forcedSampleCount;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXRasterizerStateEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineRasterizerState {
+   SVGA3dRasterizerStateId rasterizerId;
+
+   uint8 fillMode;
+   SVGA3dCullMode cullMode;
+   uint8 frontCounterClockwise;
+   uint8 provokingVertexLast;
+   int32 depthBias;
+   float depthBiasClamp;
+   float slopeScaledDepthBias;
+   uint8 depthClipEnable;
+   uint8 scissorEnable;
+   SVGA3dMultisampleRastEnable multisampleEnable;
+   uint8 antialiasedLineEnable;
+   float lineWidth;
+   uint8 lineStippleEnable;
+   uint8 lineStippleFactor;
+   uint16 lineStipplePattern;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineRasterizerState;
+/* SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyRasterizerState {
+   SVGA3dRasterizerStateId rasterizerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyRasterizerState;
+/* SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dFilter filter;
+   uint8 addressU;
+   uint8 addressV;
+   uint8 addressW;
+   uint8 pad0;
+   float mipLODBias;
+   uint8 maxAnisotropy;
+   SVGA3dComparisonFunc comparisonFunc;
+   uint16 pad1;
+   SVGA3dRGBAFloat borderColor;
+   float minLOD;
+   float maxLOD;
+   uint32 pad2[6];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXSamplerEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineSamplerState {
+   SVGA3dSamplerId samplerId;
+   SVGA3dFilter filter;
+   uint8 addressU;
+   uint8 addressV;
+   uint8 addressW;
+   uint8 pad0;
+   float mipLODBias;
+   uint8 maxAnisotropy;
+   SVGA3dComparisonFunc comparisonFunc;
+   uint16 pad1;
+   SVGA3dRGBAFloat borderColor;
+   float minLOD;
+   float maxLOD;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineSamplerState; /* SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroySamplerState {
+   SVGA3dSamplerId samplerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineShader {
+   SVGA3dShaderId shaderId;
+   SVGA3dShaderType type;
+   uint32 sizeInBytes; /* Number of bytes of shader text. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineShader; /* SVGA_3D_CMD_DX_DEFINE_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGACOTableDXShaderEntry {
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+   uint32 offsetInBytes;
+   SVGAMobId mobid;
+   uint32 pad[4];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXShaderEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyShader {
+   SVGA3dShaderId shaderId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyShader; /* SVGA_3D_CMD_DX_DESTROY_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindShader {
+   uint32 cid;
+   uint32 shid;
+   SVGAMobId mobid;
+   uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindShader;   /* SVGA_3D_CMD_DX_BIND_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindAllShader {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindAllShader;   /* SVGA_3D_CMD_DX_BIND_ALL_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXCondBindAllShader {
+   uint32 cid;
+   SVGAMobId testMobid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXCondBindAllShader;   /* SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER */
+
+/*
+ * The maximum number of streamout decl's in each streamout entry.
+ */
+#define SVGA3D_MAX_STREAMOUT_DECLS 64
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dStreamOutputDeclarationEntry {
+   uint32 outputSlot;
+   uint32 registerIndex;
+   uint8  registerMask;
+   uint8  pad0;
+   uint16 pad1;
+   uint32 stream;
+}
+#include "vmware_pack_end.h"
+SVGA3dStreamOutputDeclarationEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAOTableStreamOutputEntry {
+   uint32 numOutputStreamEntries;
+   SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+   uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+   uint32 rasterizedStream;
+   uint32 pad[250];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXStreamOutputEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineStreamOutput {
+   SVGA3dStreamOutputId soid;
+   uint32 numOutputStreamEntries;
+   SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+   uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+   uint32 rasterizedStream;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyStreamOutput {
+   SVGA3dStreamOutputId soid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyStreamOutput; /* SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetStreamOutput {
+   SVGA3dStreamOutputId soid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 value;
+   uint32 mobId;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXMobFence64;  /* SVGA_3D_CMD_DX_MOB_FENCE_64 */
+
+/*
+ * SVGA3dCmdSetCOTable --
+ *
+ * This command allows the guest to bind a mob to a context-object table.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetCOTable {
+   uint32 cid;
+   uint32 mobid;
+   SVGACOTableType type;
+   uint32 validSizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */
+
+/*
+ * Guests using SVGA_3D_CMD_DX_GROW_COTABLE are promising that
+ * the new COTable contains the same contents as the old one, except possibly
+ * for some new invalid entries at the end.
+ *
+ * If there is an old cotable mob bound, it also has to still be valid.
+ *
+ * (Otherwise, guests should use the DXSetCOTableBase command.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXGrowCOTable {
+   uint32 cid;
+   uint32 mobid;
+   SVGACOTableType type;
+   uint32 validSizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXGrowCOTable; /* SVGA_3D_CMD_DX_GROW_COTABLE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackCOTable {
+   uint32 cid;
+   SVGACOTableType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackCOTable; /* SVGA_3D_CMD_DX_READBACK_COTABLE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCOTableData {
+   uint32 mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCOTableData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dBufferBinding {
+   uint32 bufferId;
+   uint32 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dBufferBinding;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dConstantBufferBinding {
+   uint32 sid;
+   uint32 offsetInBytes;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dConstantBufferBinding;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXInputAssemblyMobFormat {
+   uint32 layoutId;
+   SVGA3dBufferBinding vertexBuffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
+   uint32 indexBufferSid;
+   uint32 pad;
+   uint32 indexBufferOffset;
+   uint32 indexBufferFormat;
+   uint32 topology;
+}
+#include "vmware_pack_end.h"
+SVGADXInputAssemblyMobFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXContextMobFormat {
+   SVGADXInputAssemblyMobFormat inputAssembly;
+
+   struct {
+      uint32 blendStateId;
+      uint32 blendFactor[4];
+      uint32 sampleMask;
+      uint32 depthStencilStateId;
+      uint32 stencilRef;
+      uint32 rasterizerStateId;
+      uint32 depthStencilViewId;
+      uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS];
+      uint32 unorderedAccessViewIds[SVGA3D_MAX_UAVIEWS];
+   } renderState;
+
+   struct {
+      uint32 targets[SVGA3D_DX_MAX_SOTARGETS];
+      uint32 soid;
+   } streamOut;
+   uint32 pad0[11];
+
+   uint8 numViewports;
+   uint8 numScissorRects;
+   uint16 pad1[1];
+
+   uint32 pad2[3];
+
+   SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS];
+   uint32 pad3[32];
+
+   SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS];
+   uint32 pad4[64];
+
+   struct {
+      uint32 queryID;
+      uint32 value;
+   } predication;
+   uint32 pad5[2];
+
+   struct {
+      uint32 shaderId;
+      SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS];
+      uint32 shaderResources[SVGA3D_DX_MAX_SRVIEWS];
+      uint32 samplers[SVGA3D_DX_MAX_SAMPLERS];
+   } shaderState[SVGA3D_NUM_SHADERTYPE];
+   uint32 pad6[26];
+
+   SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
+
+   SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
+   uint32 pad7[380];
+}
+#include "vmware_pack_end.h"
+SVGADXContextMobFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTempSetContext {
+   uint32 dxcid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTempSetContext; /* SVGA_3D_CMD_DX_TEMP_SET_CONTEXT */
+
+#endif /* _SVGA3D_DX_H_ */
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
new file mode 100644
index 0000000..b22a67f
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_limits.h --
+ *
+ *       SVGA 3d hardware limits
+ */
+
+#ifndef _SVGA3D_LIMITS_H_
+#define _SVGA3D_LIMITS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+#define SVGA3D_NUM_CLIPPLANES                   6
+#define SVGA3D_MAX_RENDER_TARGETS               8
+#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS  (SVGA3D_MAX_RENDER_TARGETS)
+#define SVGA3D_MAX_UAVIEWS                      8
+#define SVGA3D_MAX_CONTEXT_IDS                  256
+#define SVGA3D_MAX_SURFACE_IDS                  (32 * 1024)
+
+/*
+ * Maximum ID a shader can be assigned on a given context.
+ */
+#define SVGA3D_MAX_SHADERIDS                    5000
+/*
+ * Maximum number of shaders of a given type that can be defined
+ * (including all contexts).
+ */
+#define SVGA3D_MAX_SIMULTANEOUS_SHADERS         20000
+
+#define SVGA3D_NUM_TEXTURE_UNITS                32
+#define SVGA3D_NUM_LIGHTS                       8
+
+/*
+ * Maximum size in dwords of shader text the SVGA device will allow.
+ * Currently 8 MB.
+ */
+#define SVGA3D_MAX_SHADER_MEMORY_BYTES (8 * 1024 * 1024)
+#define SVGA3D_MAX_SHADER_MEMORY  (SVGA3D_MAX_SHADER_MEMORY_BYTES / \
+                                   sizeof(uint32))
+
+#define SVGA3D_MAX_CLIP_PLANES    6
+
+/*
+ * This is the limit to the number of fixed-function texture
+ * transforms and texture coordinates we can support. It does *not*
+ * correspond to the number of texture image units (samplers) we
+ * support!
+ */
+#define SVGA3D_MAX_TEXTURE_COORDS 8
+
+/*
+ * Number of faces in a cubemap.
+ */
+#define SVGA3D_MAX_SURFACE_FACES 6
+
+/*
+ * Maximum number of array indexes in a GB surface (with DX enabled).
+ */
+#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512
+
+/*
+ * The maximum number of vertex arrays we're guaranteed to support in
+ * SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_VERTEX_ARRAYS   32
+
+/*
+ * The maximum number of primitive ranges we're guaranteed to support
+ * in SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
+
+#endif /* _SVGA3D_LIMITS_H_ */
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
new file mode 100644
index 0000000..bdfc404
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_reg.h --
+ *
+ *       SVGA 3d hardware definitions
+ */
+
+#ifndef _SVGA3D_REG_H_
+#define _SVGA3D_REG_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+#include "svga_reg.h"
+
+#include "svga3d_types.h"
+#include "svga3d_limits.h"
+#include "svga3d_cmd.h"
+#include "svga3d_dx.h"
+#include "svga3d_devcaps.h"
+
+
+#endif /* _SVGA3D_REG_H_ */
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
new file mode 100644
index 0000000..f2bfd3d
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -0,0 +1,1378 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2008-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/*
+ * svga3d_surfacedefs.h --
+ *
+ *      Surface definitions and inlineable utilities for SVGA3d.
+ */
+
+#ifndef _SVGA3D_SURFACEDEFS_H_
+#define _SVGA3D_SURFACEDEFS_H_
+
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_MODULE
+#include "includeCheck.h"
+
+#include <linux/kernel.h>
+#include <drm/vmwgfx_drm.h>
+
+#include "svga3d_reg.h"
+
+#define surf_size_struct struct drm_vmw_size
+
+/*
+ * enum svga3d_block_desc - describes generic properties about formats.
+ */
+enum svga3d_block_desc {
+	/* Nothing special can be said about this format. */
+	SVGA3DBLOCKDESC_NONE        = 0,
+
+	/* Format contains Blue/U data */
+	SVGA3DBLOCKDESC_BLUE        = 1 << 0,
+	SVGA3DBLOCKDESC_W           = 1 << 0,
+	SVGA3DBLOCKDESC_BUMP_L      = 1 << 0,
+
+	/* Format contains Green/V data */
+	SVGA3DBLOCKDESC_GREEN       = 1 << 1,
+	SVGA3DBLOCKDESC_V           = 1 << 1,
+
+	/* Format contains Red/W/Luminance data */
+	SVGA3DBLOCKDESC_RED         = 1 << 2,
+	SVGA3DBLOCKDESC_U           = 1 << 2,
+	SVGA3DBLOCKDESC_LUMINANCE   = 1 << 2,
+
+	/* Format contains Alpha/Q data */
+	SVGA3DBLOCKDESC_ALPHA       = 1 << 3,
+	SVGA3DBLOCKDESC_Q           = 1 << 3,
+
+	/* Format is a buffer */
+	SVGA3DBLOCKDESC_BUFFER      = 1 << 4,
+
+	/* Format is compressed */
+	SVGA3DBLOCKDESC_COMPRESSED  = 1 << 5,
+
+	/* Format uses IEEE floating point */
+	SVGA3DBLOCKDESC_FP          = 1 << 6,
+
+	/* Three separate blocks store data. */
+	SVGA3DBLOCKDESC_PLANAR_YUV  = 1 << 7,
+
+	/* 2 planes of Y, UV, e.g., NV12. */
+	SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 8,
+
+	/* 3 planes of separate Y, U, V, e.g., YV12. */
+	SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 9,
+
+	/* Block with a stencil channel */
+	SVGA3DBLOCKDESC_STENCIL     = 1 << 11,
+
+	/* Typeless format */
+	SVGA3DBLOCKDESC_TYPELESS    = 1 << 12,
+
+	/* Channels are signed integers */
+	SVGA3DBLOCKDESC_SINT        = 1 << 13,
+
+	/* Channels are unsigned integers */
+	SVGA3DBLOCKDESC_UINT        = 1 << 14,
+
+	/* Channels are normalized (when sampling) */
+	SVGA3DBLOCKDESC_NORM        = 1 << 15,
+
+	/* Channels are in SRGB */
+	SVGA3DBLOCKDESC_SRGB        = 1 << 16,
+
+	/* Shared exponent */
+	SVGA3DBLOCKDESC_EXP         = 1 << 17,
+
+	/* Format contains color data. */
+	SVGA3DBLOCKDESC_COLOR       = 1 << 18,
+	/* Format contains depth data. */
+	SVGA3DBLOCKDESC_DEPTH       = 1 << 19,
+	/* Format contains bump data. */
+	SVGA3DBLOCKDESC_BUMP        = 1 << 20,
+
+	/* Format contains YUV video data. */
+	SVGA3DBLOCKDESC_YUV_VIDEO   = 1 << 21,
+
+	/* For mixed unsigned/signed formats. */
+	SVGA3DBLOCKDESC_MIXED       = 1 << 22,
+
+	/* For distingushing CxV8U8. */
+	SVGA3DBLOCKDESC_CX          = 1 << 23,
+
+	/* Different compressed format groups. */
+	SVGA3DBLOCKDESC_BC1         = 1 << 24,
+	SVGA3DBLOCKDESC_BC2         = 1 << 25,
+	SVGA3DBLOCKDESC_BC3         = 1 << 26,
+	SVGA3DBLOCKDESC_BC4         = 1 << 27,
+	SVGA3DBLOCKDESC_BC5         = 1 << 28,
+
+	SVGA3DBLOCKDESC_A_UINT    = SVGA3DBLOCKDESC_ALPHA |
+				    SVGA3DBLOCKDESC_UINT |
+				    SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_A_UNORM   = SVGA3DBLOCKDESC_A_UINT |
+				    SVGA3DBLOCKDESC_NORM,
+	SVGA3DBLOCKDESC_R_UINT    = SVGA3DBLOCKDESC_RED |
+				    SVGA3DBLOCKDESC_UINT |
+				    SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_R_UNORM   = SVGA3DBLOCKDESC_R_UINT |
+				    SVGA3DBLOCKDESC_NORM,
+	SVGA3DBLOCKDESC_R_SINT    = SVGA3DBLOCKDESC_RED |
+				    SVGA3DBLOCKDESC_SINT |
+				    SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_R_SNORM   = SVGA3DBLOCKDESC_R_SINT |
+				    SVGA3DBLOCKDESC_NORM,
+	SVGA3DBLOCKDESC_G_UINT    = SVGA3DBLOCKDESC_GREEN |
+				    SVGA3DBLOCKDESC_UINT |
+				    SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_RG_UINT    = SVGA3DBLOCKDESC_RED |
+				     SVGA3DBLOCKDESC_GREEN |
+				     SVGA3DBLOCKDESC_UINT |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_RG_UNORM   = SVGA3DBLOCKDESC_RG_UINT |
+				     SVGA3DBLOCKDESC_NORM,
+	SVGA3DBLOCKDESC_RG_SINT    = SVGA3DBLOCKDESC_RED |
+				     SVGA3DBLOCKDESC_GREEN |
+				     SVGA3DBLOCKDESC_SINT |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_RG_SNORM   = SVGA3DBLOCKDESC_RG_SINT |
+				     SVGA3DBLOCKDESC_NORM,
+	SVGA3DBLOCKDESC_RGB_UINT   = SVGA3DBLOCKDESC_RED |
+				     SVGA3DBLOCKDESC_GREEN |
+				     SVGA3DBLOCKDESC_BLUE |
+				     SVGA3DBLOCKDESC_UINT |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_RGB_SINT   = SVGA3DBLOCKDESC_RED |
+				     SVGA3DBLOCKDESC_GREEN |
+				     SVGA3DBLOCKDESC_BLUE |
+				     SVGA3DBLOCKDESC_SINT |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_RGB_UNORM   = SVGA3DBLOCKDESC_RGB_UINT |
+				      SVGA3DBLOCKDESC_NORM,
+	SVGA3DBLOCKDESC_RGB_UNORM_SRGB = SVGA3DBLOCKDESC_RGB_UNORM |
+					 SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_RGBA_UINT  = SVGA3DBLOCKDESC_RED |
+				     SVGA3DBLOCKDESC_GREEN |
+				     SVGA3DBLOCKDESC_BLUE |
+				     SVGA3DBLOCKDESC_ALPHA |
+				     SVGA3DBLOCKDESC_UINT |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_RGBA_UNORM = SVGA3DBLOCKDESC_RGBA_UINT |
+				     SVGA3DBLOCKDESC_NORM,
+	SVGA3DBLOCKDESC_RGBA_UNORM_SRGB = SVGA3DBLOCKDESC_RGBA_UNORM |
+					  SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_RGBA_SINT  = SVGA3DBLOCKDESC_RED |
+				     SVGA3DBLOCKDESC_GREEN |
+				     SVGA3DBLOCKDESC_BLUE |
+				     SVGA3DBLOCKDESC_ALPHA |
+				     SVGA3DBLOCKDESC_SINT |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_RGBA_SNORM = SVGA3DBLOCKDESC_RGBA_SINT |
+				     SVGA3DBLOCKDESC_NORM,
+	SVGA3DBLOCKDESC_RGBA_FP    = SVGA3DBLOCKDESC_RED |
+				     SVGA3DBLOCKDESC_GREEN |
+				     SVGA3DBLOCKDESC_BLUE |
+				     SVGA3DBLOCKDESC_ALPHA |
+				     SVGA3DBLOCKDESC_FP |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_UV         = SVGA3DBLOCKDESC_U |
+				     SVGA3DBLOCKDESC_V |
+				     SVGA3DBLOCKDESC_BUMP,
+	SVGA3DBLOCKDESC_UVL        = SVGA3DBLOCKDESC_UV |
+				     SVGA3DBLOCKDESC_BUMP_L |
+				     SVGA3DBLOCKDESC_MIXED |
+				     SVGA3DBLOCKDESC_BUMP,
+	SVGA3DBLOCKDESC_UVW        = SVGA3DBLOCKDESC_UV |
+				     SVGA3DBLOCKDESC_W |
+				     SVGA3DBLOCKDESC_BUMP,
+	SVGA3DBLOCKDESC_UVWA       = SVGA3DBLOCKDESC_UVW |
+				     SVGA3DBLOCKDESC_ALPHA |
+				     SVGA3DBLOCKDESC_MIXED |
+				     SVGA3DBLOCKDESC_BUMP,
+	SVGA3DBLOCKDESC_UVWQ       = SVGA3DBLOCKDESC_U |
+				     SVGA3DBLOCKDESC_V |
+				     SVGA3DBLOCKDESC_W |
+				     SVGA3DBLOCKDESC_Q |
+				     SVGA3DBLOCKDESC_BUMP,
+	SVGA3DBLOCKDESC_L_UNORM    = SVGA3DBLOCKDESC_LUMINANCE |
+				     SVGA3DBLOCKDESC_UINT |
+				     SVGA3DBLOCKDESC_NORM |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_LA_UNORM   = SVGA3DBLOCKDESC_LUMINANCE |
+				     SVGA3DBLOCKDESC_ALPHA |
+				     SVGA3DBLOCKDESC_UINT |
+				     SVGA3DBLOCKDESC_NORM |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_R_FP       = SVGA3DBLOCKDESC_RED |
+				     SVGA3DBLOCKDESC_FP |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_RG_FP      = SVGA3DBLOCKDESC_R_FP |
+				     SVGA3DBLOCKDESC_GREEN |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_RGB_FP     = SVGA3DBLOCKDESC_RG_FP |
+				     SVGA3DBLOCKDESC_BLUE |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_YUV        = SVGA3DBLOCKDESC_YUV_VIDEO |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_AYUV       = SVGA3DBLOCKDESC_ALPHA |
+				     SVGA3DBLOCKDESC_YUV_VIDEO |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_RGB_EXP       = SVGA3DBLOCKDESC_RED |
+					SVGA3DBLOCKDESC_GREEN |
+					SVGA3DBLOCKDESC_BLUE |
+					SVGA3DBLOCKDESC_EXP |
+					SVGA3DBLOCKDESC_COLOR,
+
+	SVGA3DBLOCKDESC_COMP_TYPELESS = SVGA3DBLOCKDESC_COMPRESSED |
+					SVGA3DBLOCKDESC_TYPELESS,
+	SVGA3DBLOCKDESC_COMP_UNORM = SVGA3DBLOCKDESC_COMPRESSED |
+				     SVGA3DBLOCKDESC_UINT |
+				     SVGA3DBLOCKDESC_NORM |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_COMP_SNORM = SVGA3DBLOCKDESC_COMPRESSED |
+				     SVGA3DBLOCKDESC_SINT |
+				     SVGA3DBLOCKDESC_NORM |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_COMP_UNORM |
+					  SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_BC1_COMP_TYPELESS = SVGA3DBLOCKDESC_BC1 |
+					    SVGA3DBLOCKDESC_COMP_TYPELESS,
+	SVGA3DBLOCKDESC_BC1_COMP_UNORM = SVGA3DBLOCKDESC_BC1 |
+					 SVGA3DBLOCKDESC_COMP_UNORM,
+	SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC1_COMP_UNORM |
+					      SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_BC2_COMP_TYPELESS = SVGA3DBLOCKDESC_BC2 |
+					    SVGA3DBLOCKDESC_COMP_TYPELESS,
+	SVGA3DBLOCKDESC_BC2_COMP_UNORM = SVGA3DBLOCKDESC_BC2 |
+					 SVGA3DBLOCKDESC_COMP_UNORM,
+	SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC2_COMP_UNORM |
+					      SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_BC3_COMP_TYPELESS = SVGA3DBLOCKDESC_BC3 |
+					    SVGA3DBLOCKDESC_COMP_TYPELESS,
+	SVGA3DBLOCKDESC_BC3_COMP_UNORM = SVGA3DBLOCKDESC_BC3 |
+					 SVGA3DBLOCKDESC_COMP_UNORM,
+	SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC3_COMP_UNORM |
+					      SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_BC4_COMP_TYPELESS = SVGA3DBLOCKDESC_BC4 |
+					    SVGA3DBLOCKDESC_COMP_TYPELESS,
+	SVGA3DBLOCKDESC_BC4_COMP_UNORM = SVGA3DBLOCKDESC_BC4 |
+					 SVGA3DBLOCKDESC_COMP_UNORM,
+	SVGA3DBLOCKDESC_BC4_COMP_SNORM = SVGA3DBLOCKDESC_BC4 |
+					 SVGA3DBLOCKDESC_COMP_SNORM,
+	SVGA3DBLOCKDESC_BC5_COMP_TYPELESS = SVGA3DBLOCKDESC_BC5 |
+					    SVGA3DBLOCKDESC_COMP_TYPELESS,
+	SVGA3DBLOCKDESC_BC5_COMP_UNORM = SVGA3DBLOCKDESC_BC5 |
+					 SVGA3DBLOCKDESC_COMP_UNORM,
+	SVGA3DBLOCKDESC_BC5_COMP_SNORM = SVGA3DBLOCKDESC_BC5 |
+					 SVGA3DBLOCKDESC_COMP_SNORM,
+
+	SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_YUV_VIDEO |
+				     SVGA3DBLOCKDESC_PLANAR_YUV |
+				     SVGA3DBLOCKDESC_2PLANAR_YUV |
+				     SVGA3DBLOCKDESC_COLOR,
+	SVGA3DBLOCKDESC_YV12       = SVGA3DBLOCKDESC_YUV_VIDEO |
+				     SVGA3DBLOCKDESC_PLANAR_YUV |
+				     SVGA3DBLOCKDESC_3PLANAR_YUV |
+				     SVGA3DBLOCKDESC_COLOR,
+
+	SVGA3DBLOCKDESC_DEPTH_UINT = SVGA3DBLOCKDESC_DEPTH |
+				     SVGA3DBLOCKDESC_UINT,
+	SVGA3DBLOCKDESC_DEPTH_UNORM = SVGA3DBLOCKDESC_DEPTH_UINT |
+				     SVGA3DBLOCKDESC_NORM,
+	SVGA3DBLOCKDESC_DS      =    SVGA3DBLOCKDESC_DEPTH |
+				     SVGA3DBLOCKDESC_STENCIL,
+	SVGA3DBLOCKDESC_DS_UINT =    SVGA3DBLOCKDESC_DEPTH |
+				     SVGA3DBLOCKDESC_STENCIL |
+				     SVGA3DBLOCKDESC_UINT,
+	SVGA3DBLOCKDESC_DS_UNORM =   SVGA3DBLOCKDESC_DS_UINT |
+				     SVGA3DBLOCKDESC_NORM,
+	SVGA3DBLOCKDESC_DEPTH_FP   = SVGA3DBLOCKDESC_DEPTH |
+				     SVGA3DBLOCKDESC_FP,
+
+	SVGA3DBLOCKDESC_UV_UINT    = SVGA3DBLOCKDESC_UV |
+				     SVGA3DBLOCKDESC_UINT,
+	SVGA3DBLOCKDESC_UV_SNORM   = SVGA3DBLOCKDESC_UV |
+				     SVGA3DBLOCKDESC_SINT |
+				     SVGA3DBLOCKDESC_NORM,
+	SVGA3DBLOCKDESC_UVCX_SNORM = SVGA3DBLOCKDESC_UV_SNORM |
+				     SVGA3DBLOCKDESC_CX,
+	SVGA3DBLOCKDESC_UVWQ_SNORM = SVGA3DBLOCKDESC_UVWQ |
+				     SVGA3DBLOCKDESC_SINT |
+				     SVGA3DBLOCKDESC_NORM,
+};
+
+struct svga3d_channel_def {
+	union {
+		u8 blue;
+		u8 w_bump;
+		u8 l_bump;
+		u8 uv_video;
+		u8 u_video;
+	};
+	union {
+		u8 green;
+		u8 stencil;
+		u8 v_bump;
+		u8 v_video;
+	};
+	union {
+		u8 red;
+		u8 u_bump;
+		u8 luminance;
+		u8 y_video;
+		u8 depth;
+		u8 data;
+	};
+	union {
+		u8 alpha;
+		u8 q_bump;
+		u8 exp;
+	};
+};
+
+/*
+ * struct svga3d_surface_desc - describes the actual pixel data.
+ *
+ * @format: Format
+ * @block_desc: Block description
+ * @block_size: Dimensions in pixels of a block
+ * @bytes_per_block: Size of block in bytes
+ * @pitch_bytes_per_block: Size of a block in bytes for purposes of pitch
+ * @bit_depth: Channel bit depths
+ * @bit_offset: Channel bit masks (in bits offset from the start of the pointer)
+ */
+struct svga3d_surface_desc {
+	SVGA3dSurfaceFormat format;
+	enum svga3d_block_desc block_desc;
+
+	surf_size_struct block_size;
+	u32 bytes_per_block;
+	u32 pitch_bytes_per_block;
+
+	struct svga3d_channel_def bit_depth;
+	struct svga3d_channel_def bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+   {SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE,
+      {1, 1, 1},  0, 0,
+      {{0}, {0}, {0}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB_UNORM,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA_UNORM,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB_UNORM,
+      {1, 1, 1},  2, 2,
+      {{5}, {6}, {5}, {0}},
+      {{0}, {5}, {11}, {0}}},
+
+   {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB_UNORM,
+      {1, 1, 1},  2, 2,
+      {{5}, {5}, {5}, {0}},
+      {{0}, {5}, {10}, {0}}},
+
+   {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA_UNORM,
+      {1, 1, 1},  2, 2,
+      {{5}, {5}, {5}, {1}},
+      {{0}, {5}, {10}, {15}}},
+
+   {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA_UNORM,
+      {1, 1, 1},  2, 2,
+      {{4}, {4}, {4}, {4}},
+      {{0}, {4}, {8}, {12}}},
+
+   {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH_UNORM,
+      {1, 1, 1},  4, 4,
+      {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH_UNORM,
+      {1, 1, 1},  2, 2,
+      {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS_UNORM,
+      {1, 1, 1},  4, 4,
+      {{0}, {8}, {24}, {0}},
+      {{0}, {0}, {8}, {0}}},
+
+   {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS_UNORM,
+      {1, 1, 1},  2, 2,
+      {{0}, {1}, {15}, {0}},
+      {{0}, {0}, {1}, {0}}},
+
+   {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_L_UNORM,
+      {1, 1, 1},  1, 1,
+      {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA_UNORM,
+      {1, 1, 1},  1, 1,
+      {{0}, {0}, {4}, {4}},
+      {{0}, {0}, {0}, {4}}},
+
+   {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_L_UNORM,
+      {1, 1, 1},  2, 2,
+      {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA_UNORM,
+      {1, 1, 1},  2, 2,
+      {{0}, {0}, {8}, {8}},
+      {{0}, {0}, {0}, {8}}},
+
+   {SVGA3D_DXT1, SVGA3DBLOCKDESC_BC1_COMP_UNORM,
+      {4, 4, 1},  8, 8,
+      {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT2, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT3, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT4, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT5, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV_SNORM,
+      {1, 1, 1},  2, 2,
+      {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  2, 2,
+      {{6}, {5}, {5}, {0}},
+      {{10}, {5}, {0}, {0}}},
+
+   {SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {0}},
+      {{16}, {8}, {0}, {0}}},
+
+   {SVGA3D_FORMAT_DEAD1, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  3, 3,
+      {{8}, {8}, {8}, {0}},
+      {{16}, {8}, {0}, {0}}},
+
+   {SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  8, 8,
+      {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  16, 16,
+      {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA_UNORM,
+      {1, 1, 1},  4, 4,
+      {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV_SNORM,
+      {1, 1, 1},  2, 2,
+      {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ_SNORM,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UVCX_SNORM,
+      {1, 1, 1},  2, 2,
+      {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {0}},
+      {{16}, {8}, {0}, {0}}},
+
+   {SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA,
+      {1, 1, 1},  4, 4,
+      {{10}, {10}, {10}, {2}},
+      {{20}, {10}, {0}, {30}}},
+
+   {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_A_UNORM,
+      {1, 1, 1},  1, 1,
+      {{0}, {0}, {0}, {8}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  2, 2,
+      {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  4, 4,
+      {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  4, 4,
+      {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  8, 8,
+      {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER,
+      {1, 1, 1},  1, 1,
+      {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH_UNORM,
+      {1, 1, 1},  4, 4,
+      {{0}, {0}, {24}, {0}},
+      {{0}, {0}, {8}, {0}}},
+
+   {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV_SNORM,
+      {1, 1, 1},  4, 4,
+      {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG_UNORM,
+      {1, 1, 1},  4, 4,
+      {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA_UNORM,
+      {1, 1, 1},  8, 8,
+      {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV,
+      {2, 1, 1},  4, 4,
+      {{8}, {0}, {8}, {0}},
+      {{0}, {0}, {8}, {0}}},
+
+   {SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV,
+      {2, 1, 1},  4, 4,
+      {{8}, {0}, {8}, {0}},
+      {{8}, {0}, {0}, {0}}},
+
+   {SVGA3D_NV12, SVGA3DBLOCKDESC_NV12,
+      {2, 2, 1},  6, 2,
+      {{0}, {0}, {48}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  16, 16,
+      {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
+      {1, 1, 1},  16, 16,
+      {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
+      {1, 1, 1},  16, 16,
+      {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  12, 12,
+      {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
+      {1, 1, 1},  12, 12,
+      {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB_UINT,
+      {1, 1, 1},  12, 12,
+      {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_RGB_SINT,
+      {1, 1, 1},  12, 12,
+      {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  8, 8,
+      {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
+      {1, 1, 1},  8, 8,
+      {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM,
+      {1, 1, 1},  8, 8,
+      {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
+      {1, 1, 1},  8, 8,
+      {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  8, 8,
+      {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG_UINT,
+      {1, 1, 1},  8, 8,
+      {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_RG_SINT,
+      {1, 1, 1},  8, 8,
+      {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  8, 8,
+      {{0}, {8}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  8, 8,
+      {{0}, {8}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32_FLOAT_X8X24, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  8, 8,
+      {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_X32_G8X24_UINT, SVGA3DBLOCKDESC_G_UINT,
+      {1, 1, 1},  8, 8,
+      {{0}, {8}, {0}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  4, 4,
+      {{10}, {10}, {10}, {2}},
+      {{20}, {10}, {0}, {30}}},
+
+   {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
+      {1, 1, 1},  4, 4,
+      {{10}, {10}, {10}, {2}},
+      {{20}, {10}, {0}, {30}}},
+
+   {SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
+      {1, 1, 1},  4, 4,
+      {{10}, {11}, {11}, {0}},
+      {{22}, {11}, {0}, {0}}},
+
+   {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  4, 4,
+      {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_UINT,
+      {1, 1, 1},  4, 4,
+      {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_RG_SINT,
+      {1, 1, 1},  4, 4,
+      {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  4, 4,
+      {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH_FP,
+      {1, 1, 1},  4, 4,
+      {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_R_UINT,
+      {1, 1, 1},  4, 4,
+      {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_R_SINT,
+      {1, 1, 1},  4, 4,
+      {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  4, 4,
+      {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS_UNORM,
+      {1, 1, 1},  4, 4,
+      {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_R24_UNORM_X8, SVGA3DBLOCKDESC_R_UNORM,
+      {1, 1, 1},  4, 4,
+      {{0}, {0}, {24}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_X24_G8_UINT, SVGA3DBLOCKDESC_G_UINT,
+      {1, 1, 1},  4, 4,
+      {{0}, {8}, {0}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  2, 2,
+      {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG_UNORM,
+      {1, 1, 1},  2, 2,
+      {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG_UINT,
+      {1, 1, 1},  2, 2,
+      {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_RG_SINT,
+      {1, 1, 1},  2, 2,
+      {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  2, 2,
+      {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_R_UNORM,
+      {1, 1, 1},  2, 2,
+      {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_R_UINT,
+      {1, 1, 1},  2, 2,
+      {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_R_SNORM,
+      {1, 1, 1},  2, 2,
+      {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_R_SINT,
+      {1, 1, 1},  2, 2,
+      {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  1, 1,
+      {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_R_UNORM,
+      {1, 1, 1},  1, 1,
+      {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_R_UINT,
+      {1, 1, 1},  1, 1,
+      {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_R_SNORM,
+      {1, 1, 1},  1, 1,
+      {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_R_SINT,
+      {1, 1, 1},  1, 1,
+      {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_P8, SVGA3DBLOCKDESC_NONE,
+      {1, 1, 1},  1, 1,
+      {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGB_EXP,
+      {1, 1, 1},  4, 4,
+      {{9}, {9}, {9}, {5}},
+      {{18}, {9}, {0}, {27}}},
+
+   {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_NONE,
+      {2, 1, 1},  4, 4,
+      {{0}, {8}, {8}, {0}},
+      {{0}, {0}, {8}, {0}}},
+
+   {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_NONE,
+      {2, 1, 1},  4, 4,
+      {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_BC1_COMP_TYPELESS,
+      {4, 4, 1},  8, 8,
+      {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB,
+      {4, 4, 1},  8, 8,
+      {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_BC2_COMP_TYPELESS,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_BC3_COMP_TYPELESS,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_BC4_COMP_TYPELESS,
+      {4, 4, 1},  8, 8,
+      {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_ATI1, SVGA3DBLOCKDESC_BC4_COMP_UNORM,
+      {4, 4, 1},  8, 8,
+      {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_BC4_COMP_SNORM,
+      {4, 4, 1},  8, 8,
+      {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_BC5_COMP_TYPELESS,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_ATI2, SVGA3DBLOCKDESC_BC5_COMP_UNORM,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_BC5_COMP_SNORM,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
+      {1, 1, 1},  4, 4,
+      {{10}, {10}, {10}, {2}},
+     {{20}, {10}, {0}, {30}}},
+
+   {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_UNORM_SRGB,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH_UNORM,
+      {1, 1, 1},  2, 2,
+      {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH_UNORM,
+      {1, 1, 1},  4, 4,
+      {{0}, {0}, {24}, {0}},
+      {{0}, {0}, {8}, {0}}},
+
+   {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS_UNORM,
+      {1, 1, 1},  4, 4,
+      {{0}, {8}, {24}, {0}},
+      {{0}, {0}, {8}, {0}}},
+
+   {SVGA3D_YV12, SVGA3DBLOCKDESC_YV12,
+      {2, 2, 1},  6, 2,
+      {{0}, {0}, {48}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  16, 16,
+      {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  8, 8,
+      {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
+      {1, 1, 1},  8, 8,
+      {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  8, 8,
+      {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
+      {1, 1, 1},  4, 4,
+      {{10}, {10}, {10}, {2}},
+      {{20}, {10}, {0}, {30}}},
+
+   {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  4, 4,
+      {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG_UNORM,
+      {1, 1, 1},  4, 4,
+      {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG_SNORM,
+      {1, 1, 1},  4, 4,
+      {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  4, 4,
+      {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG_SNORM,
+      {1, 1, 1},  2, 2,
+      {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  2, 2,
+      {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH_UNORM,
+      {1, 1, 1},  2, 2,
+      {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_A_UNORM,
+      {1, 1, 1},  1, 1,
+      {{0}, {0}, {0}, {8}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_BC1_COMP_UNORM,
+      {4, 4, 1},  8, 8,
+      {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB_UNORM,
+      {1, 1, 1},  2, 2,
+      {{5}, {6}, {5}, {0}},
+      {{0}, {5}, {11}, {0}}},
+
+   {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
+      {1, 1, 1},  2, 2,
+      {{5}, {5}, {5}, {1}},
+      {{0}, {5}, {10}, {15}}},
+
+   {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB_UNORM,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_BC4_COMP_UNORM,
+      {4, 4, 1},  8, 8,
+      {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_BC5_COMP_UNORM,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+	uint64_t tmp = (uint64_t) a*b;
+	return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+/**
+ * svga3dsurface_get_desc - Look up the appropriate SVGA3dSurfaceDesc for the
+ * given format.
+ */
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+	if (format < ARRAY_SIZE(svga3d_surface_descs))
+		return &svga3d_surface_descs[format];
+
+	return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/**
+ * svga3dsurface_get_mip_size -  Given a base level size and the mip level,
+ * compute the size of the mip level.
+ */
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+	surf_size_struct size;
+
+	size.width = max_t(u32, base_level.width >> mip_level, 1);
+	size.height = max_t(u32, base_level.height >> mip_level, 1);
+	size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+	size.pad64 = 0;
+
+	return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+				 const surf_size_struct *pixel_size,
+				 surf_size_struct *block_size)
+{
+	block_size->width = __KERNEL_DIV_ROUND_UP(pixel_size->width,
+						  desc->block_size.width);
+	block_size->height = __KERNEL_DIV_ROUND_UP(pixel_size->height,
+						   desc->block_size.height);
+	block_size->depth = __KERNEL_DIV_ROUND_UP(pixel_size->depth,
+						  desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+	return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+			      const surf_size_struct *size)
+{
+	u32 pitch;
+	surf_size_struct blocks;
+
+	svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+	pitch = blocks.width * desc->pitch_bytes_per_block;
+
+	return pitch;
+}
+
+/**
+ * svga3dsurface_get_image_buffer_size - Calculates image buffer size.
+ *
+ * Return the number of bytes of buffer space required to store one image of a
+ * surface, optionally using the specified pitch.
+ *
+ * If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ * This function is overflow-safe. If the result would have overflowed, instead
+ * we return MAX_UINT32.
+ */
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+				    const surf_size_struct *size,
+				    u32 pitch)
+{
+	surf_size_struct image_blocks;
+	u32 slice_size, total_size;
+
+	svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+	if (svga3dsurface_is_planar_surface(desc)) {
+		total_size = clamped_umul32(image_blocks.width,
+					    image_blocks.height);
+		total_size = clamped_umul32(total_size, image_blocks.depth);
+		total_size = clamped_umul32(total_size, desc->bytes_per_block);
+		return total_size;
+	}
+
+	if (pitch == 0)
+		pitch = svga3dsurface_calculate_pitch(desc, size);
+
+	slice_size = clamped_umul32(image_blocks.height, pitch);
+	total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+	return total_size;
+}
+
+/**
+ * svga3dsurface_get_serialized_size - Get the serialized size for the image.
+ */
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+				  surf_size_struct base_level_size,
+				  u32 num_mip_levels,
+				  u32 num_layers)
+{
+	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+	u32 total_size = 0;
+	u32 mip;
+
+	for (mip = 0; mip < num_mip_levels; mip++) {
+		surf_size_struct size =
+			svga3dsurface_get_mip_size(base_level_size, mip);
+		total_size += svga3dsurface_get_image_buffer_size(desc,
+								  &size, 0);
+	}
+
+	return total_size * num_layers;
+}
+
+/**
+ * svga3dsurface_get_serialized_size_extended - Returns the number of bytes
+ * required for a surface with given parameters. Support for sample count.
+ */
+static inline u32
+svga3dsurface_get_serialized_size_extended(SVGA3dSurfaceFormat format,
+					   surf_size_struct base_level_size,
+					   u32 num_mip_levels,
+					   u32 num_layers,
+					   u32 num_samples)
+{
+	uint64_t total_size =
+		svga3dsurface_get_serialized_size(format,
+						  base_level_size,
+						  num_mip_levels,
+						  num_layers);
+	total_size *= max_t(u32, 1, num_samples);
+
+	return min_t(uint64_t, total_size, (uint64_t)U32_MAX);
+}
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+			       u32 width, u32 height,
+			       u32 x, u32 y, u32 z)
+{
+	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+	const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+	const u32 bd = desc->block_size.depth;
+	const u32 rowstride = __KERNEL_DIV_ROUND_UP(width, bw) *
+			      desc->bytes_per_block;
+	const u32 imgstride = __KERNEL_DIV_ROUND_UP(height, bh) * rowstride;
+	const u32 offset = (z / bd * imgstride +
+			    y / bh * rowstride +
+			    x / bw * desc->bytes_per_block);
+	return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+			       surf_size_struct baseLevelSize,
+			       u32 numMipLevels,
+			       u32 face,
+			       u32 mip)
+
+{
+	u32 offset;
+	u32 mipChainBytes;
+	u32 mipChainBytesToLevel;
+	u32 i;
+	const struct svga3d_surface_desc *desc;
+	surf_size_struct mipSize;
+	u32 bytes;
+
+	desc = svga3dsurface_get_desc(format);
+
+	mipChainBytes = 0;
+	mipChainBytesToLevel = 0;
+	for (i = 0; i < numMipLevels; i++) {
+		mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+		bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+		mipChainBytes += bytes;
+		if (i < mip)
+			mipChainBytesToLevel += bytes;
+	}
+
+	offset = mipChainBytes * face + mipChainBytesToLevel;
+
+	return offset;
+}
+
+
+/**
+ * svga3dsurface_is_gb_screen_target_format - Is the specified format usable as
+ *                                            a ScreenTarget?
+ *                                            (with just the GBObjects cap-bit
+ *                                             set)
+ * @format: format to queried
+ *
+ * RETURNS:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_gb_screen_target_format(SVGA3dSurfaceFormat format)
+{
+	return (format == SVGA3D_X8R8G8B8 ||
+		format == SVGA3D_A8R8G8B8 ||
+		format == SVGA3D_R5G6B5   ||
+		format == SVGA3D_X1R5G5B5 ||
+		format == SVGA3D_A1R5G5B5 ||
+		format == SVGA3D_P8);
+}
+
+
+/**
+ * svga3dsurface_is_dx_screen_target_format - Is the specified format usable as
+ *                                            a ScreenTarget?
+ *                                            (with DX10 enabled)
+ *
+ * @format: format to queried
+ *
+ * Results:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_dx_screen_target_format(SVGA3dSurfaceFormat format)
+{
+	return (format == SVGA3D_R8G8B8A8_UNORM ||
+		format == SVGA3D_B8G8R8A8_UNORM ||
+		format == SVGA3D_B8G8R8X8_UNORM);
+}
+
+
+/**
+ * svga3dsurface_is_screen_target_format - Is the specified format usable as a
+ *                                         ScreenTarget?
+ *                                         (for some combination of caps)
+ *
+ * @format: format to queried
+ *
+ * Results:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
+{
+	if (svga3dsurface_is_gb_screen_target_format(format)) {
+		return true;
+	}
+	return svga3dsurface_is_dx_screen_target_format(format);
+}
+
+#endif /* _SVGA3D_SURFACEDEFS_H_ */
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
new file mode 100644
index 0000000..3083706
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -0,0 +1,1745 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**********************************************************
+ * Copyright 2012-2015 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_types.h --
+ *
+ *       SVGA 3d hardware definitions for basic types
+ */
+
+#ifndef _SVGA3D_TYPES_H_
+#define _SVGA3D_TYPES_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+/*
+ * Generic Types
+ */
+
+#define SVGA3D_INVALID_ID         ((uint32)-1)
+
+typedef uint8 SVGABool8;   /* 8-bit Bool definition */
+typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
+typedef uint32 SVGA3dColor; /* a, r, g, b */
+
+typedef uint32 SVGA3dSurfaceId;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 numerator;
+   uint32 denominator;
+}
+#include "vmware_pack_end.h"
+SVGA3dFraction64;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCopyRect {
+   uint32               x;
+   uint32               y;
+   uint32               w;
+   uint32               h;
+   uint32               srcx;
+   uint32               srcy;
+}
+#include "vmware_pack_end.h"
+SVGA3dCopyRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCopyBox {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+   uint32               w;
+   uint32               h;
+   uint32               d;
+   uint32               srcx;
+   uint32               srcy;
+   uint32               srcz;
+}
+#include "vmware_pack_end.h"
+SVGA3dCopyBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dRect {
+   uint32               x;
+   uint32               y;
+   uint32               w;
+   uint32               h;
+}
+#include "vmware_pack_end.h"
+SVGA3dRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+   uint32               w;
+   uint32               h;
+   uint32               d;
+}
+#include "vmware_pack_end.h"
+SVGA3dBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+}
+#include "vmware_pack_end.h"
+SVGA3dPoint;
+
+/*
+ * Surface formats.
+ */
+typedef enum SVGA3dSurfaceFormat {
+   SVGA3D_FORMAT_INVALID               = 0,
+
+   SVGA3D_X8R8G8B8                     = 1,
+   SVGA3D_FORMAT_MIN                   = 1,
+
+   SVGA3D_A8R8G8B8                     = 2,
+
+   SVGA3D_R5G6B5                       = 3,
+   SVGA3D_X1R5G5B5                     = 4,
+   SVGA3D_A1R5G5B5                     = 5,
+   SVGA3D_A4R4G4B4                     = 6,
+
+   SVGA3D_Z_D32                        = 7,
+   SVGA3D_Z_D16                        = 8,
+   SVGA3D_Z_D24S8                      = 9,
+   SVGA3D_Z_D15S1                      = 10,
+
+   SVGA3D_LUMINANCE8                   = 11,
+   SVGA3D_LUMINANCE4_ALPHA4            = 12,
+   SVGA3D_LUMINANCE16                  = 13,
+   SVGA3D_LUMINANCE8_ALPHA8            = 14,
+
+   SVGA3D_DXT1                         = 15,
+   SVGA3D_DXT2                         = 16,
+   SVGA3D_DXT3                         = 17,
+   SVGA3D_DXT4                         = 18,
+   SVGA3D_DXT5                         = 19,
+
+   SVGA3D_BUMPU8V8                     = 20,
+   SVGA3D_BUMPL6V5U5                   = 21,
+   SVGA3D_BUMPX8L8V8U8                 = 22,
+   SVGA3D_FORMAT_DEAD1                 = 23,
+
+   SVGA3D_ARGB_S10E5                   = 24,   /* 16-bit floating-point ARGB */
+   SVGA3D_ARGB_S23E8                   = 25,   /* 32-bit floating-point ARGB */
+
+   SVGA3D_A2R10G10B10                  = 26,
+
+   /* signed formats */
+   SVGA3D_V8U8                         = 27,
+   SVGA3D_Q8W8V8U8                     = 28,
+   SVGA3D_CxV8U8                       = 29,
+
+   /* mixed formats */
+   SVGA3D_X8L8V8U8                     = 30,
+   SVGA3D_A2W10V10U10                  = 31,
+
+   SVGA3D_ALPHA8                       = 32,
+
+   /* Single- and dual-component floating point formats */
+   SVGA3D_R_S10E5                      = 33,
+   SVGA3D_R_S23E8                      = 34,
+   SVGA3D_RG_S10E5                     = 35,
+   SVGA3D_RG_S23E8                     = 36,
+
+   SVGA3D_BUFFER                       = 37,
+
+   SVGA3D_Z_D24X8                      = 38,
+
+   SVGA3D_V16U16                       = 39,
+
+   SVGA3D_G16R16                       = 40,
+   SVGA3D_A16B16G16R16                 = 41,
+
+   /* Packed Video formats */
+   SVGA3D_UYVY                         = 42,
+   SVGA3D_YUY2                         = 43,
+
+   /* Planar video formats */
+   SVGA3D_NV12                         = 44,
+
+   /* Video format with alpha */
+   SVGA3D_AYUV                         = 45,
+
+   SVGA3D_R32G32B32A32_TYPELESS        = 46,
+   SVGA3D_R32G32B32A32_UINT            = 47,
+   SVGA3D_R32G32B32A32_SINT            = 48,
+   SVGA3D_R32G32B32_TYPELESS           = 49,
+   SVGA3D_R32G32B32_FLOAT              = 50,
+   SVGA3D_R32G32B32_UINT               = 51,
+   SVGA3D_R32G32B32_SINT               = 52,
+   SVGA3D_R16G16B16A16_TYPELESS        = 53,
+   SVGA3D_R16G16B16A16_UINT            = 54,
+   SVGA3D_R16G16B16A16_SNORM           = 55,
+   SVGA3D_R16G16B16A16_SINT            = 56,
+   SVGA3D_R32G32_TYPELESS              = 57,
+   SVGA3D_R32G32_UINT                  = 58,
+   SVGA3D_R32G32_SINT                  = 59,
+   SVGA3D_R32G8X24_TYPELESS            = 60,
+   SVGA3D_D32_FLOAT_S8X24_UINT         = 61,
+   SVGA3D_R32_FLOAT_X8X24              = 62,
+   SVGA3D_X32_G8X24_UINT               = 63,
+   SVGA3D_R10G10B10A2_TYPELESS         = 64,
+   SVGA3D_R10G10B10A2_UINT             = 65,
+   SVGA3D_R11G11B10_FLOAT              = 66,
+   SVGA3D_R8G8B8A8_TYPELESS            = 67,
+   SVGA3D_R8G8B8A8_UNORM               = 68,
+   SVGA3D_R8G8B8A8_UNORM_SRGB          = 69,
+   SVGA3D_R8G8B8A8_UINT                = 70,
+   SVGA3D_R8G8B8A8_SINT                = 71,
+   SVGA3D_R16G16_TYPELESS              = 72,
+   SVGA3D_R16G16_UINT                  = 73,
+   SVGA3D_R16G16_SINT                  = 74,
+   SVGA3D_R32_TYPELESS                 = 75,
+   SVGA3D_D32_FLOAT                    = 76,
+   SVGA3D_R32_UINT                     = 77,
+   SVGA3D_R32_SINT                     = 78,
+   SVGA3D_R24G8_TYPELESS               = 79,
+   SVGA3D_D24_UNORM_S8_UINT            = 80,
+   SVGA3D_R24_UNORM_X8                 = 81,
+   SVGA3D_X24_G8_UINT                  = 82,
+   SVGA3D_R8G8_TYPELESS                = 83,
+   SVGA3D_R8G8_UNORM                   = 84,
+   SVGA3D_R8G8_UINT                    = 85,
+   SVGA3D_R8G8_SINT                    = 86,
+   SVGA3D_R16_TYPELESS                 = 87,
+   SVGA3D_R16_UNORM                    = 88,
+   SVGA3D_R16_UINT                     = 89,
+   SVGA3D_R16_SNORM                    = 90,
+   SVGA3D_R16_SINT                     = 91,
+   SVGA3D_R8_TYPELESS                  = 92,
+   SVGA3D_R8_UNORM                     = 93,
+   SVGA3D_R8_UINT                      = 94,
+   SVGA3D_R8_SNORM                     = 95,
+   SVGA3D_R8_SINT                      = 96,
+   SVGA3D_P8                           = 97,
+   SVGA3D_R9G9B9E5_SHAREDEXP           = 98,
+   SVGA3D_R8G8_B8G8_UNORM              = 99,
+   SVGA3D_G8R8_G8B8_UNORM              = 100,
+   SVGA3D_BC1_TYPELESS                 = 101,
+   SVGA3D_BC1_UNORM_SRGB               = 102,
+   SVGA3D_BC2_TYPELESS                 = 103,
+   SVGA3D_BC2_UNORM_SRGB               = 104,
+   SVGA3D_BC3_TYPELESS                 = 105,
+   SVGA3D_BC3_UNORM_SRGB               = 106,
+   SVGA3D_BC4_TYPELESS                 = 107,
+   SVGA3D_ATI1                         = 108,   /* DX9-specific BC4_UNORM */
+   SVGA3D_BC4_SNORM                    = 109,
+   SVGA3D_BC5_TYPELESS                 = 110,
+   SVGA3D_ATI2                         = 111,   /* DX9-specific BC5_UNORM */
+   SVGA3D_BC5_SNORM                    = 112,
+   SVGA3D_R10G10B10_XR_BIAS_A2_UNORM   = 113,
+   SVGA3D_B8G8R8A8_TYPELESS            = 114,
+   SVGA3D_B8G8R8A8_UNORM_SRGB          = 115,
+   SVGA3D_B8G8R8X8_TYPELESS            = 116,
+   SVGA3D_B8G8R8X8_UNORM_SRGB          = 117,
+
+   /* Advanced depth formats. */
+   SVGA3D_Z_DF16                       = 118,
+   SVGA3D_Z_DF24                       = 119,
+   SVGA3D_Z_D24S8_INT                  = 120,
+
+   /* Planar video formats. */
+   SVGA3D_YV12                         = 121,
+
+   SVGA3D_R32G32B32A32_FLOAT           = 122,
+   SVGA3D_R16G16B16A16_FLOAT           = 123,
+   SVGA3D_R16G16B16A16_UNORM           = 124,
+   SVGA3D_R32G32_FLOAT                 = 125,
+   SVGA3D_R10G10B10A2_UNORM            = 126,
+   SVGA3D_R8G8B8A8_SNORM               = 127,
+   SVGA3D_R16G16_FLOAT                 = 128,
+   SVGA3D_R16G16_UNORM                 = 129,
+   SVGA3D_R16G16_SNORM                 = 130,
+   SVGA3D_R32_FLOAT                    = 131,
+   SVGA3D_R8G8_SNORM                   = 132,
+   SVGA3D_R16_FLOAT                    = 133,
+   SVGA3D_D16_UNORM                    = 134,
+   SVGA3D_A8_UNORM                     = 135,
+   SVGA3D_BC1_UNORM                    = 136,
+   SVGA3D_BC2_UNORM                    = 137,
+   SVGA3D_BC3_UNORM                    = 138,
+   SVGA3D_B5G6R5_UNORM                 = 139,
+   SVGA3D_B5G5R5A1_UNORM               = 140,
+   SVGA3D_B8G8R8A8_UNORM               = 141,
+   SVGA3D_B8G8R8X8_UNORM               = 142,
+   SVGA3D_BC4_UNORM                    = 143,
+   SVGA3D_BC5_UNORM                    = 144,
+
+   SVGA3D_FORMAT_MAX
+} SVGA3dSurfaceFormat;
+
+/*
+ * SVGA3d Surface Flags --
+ */
+#define SVGA3D_SURFACE_CUBEMAP                (1 << 0)
+
+/*
+ * HINT flags are not enforced by the device but are useful for
+ * performance.
+ */
+#define SVGA3D_SURFACE_HINT_STATIC            (CONST64U(1) << 1)
+#define SVGA3D_SURFACE_HINT_DYNAMIC           (CONST64U(1) << 2)
+#define SVGA3D_SURFACE_HINT_INDEXBUFFER       (CONST64U(1) << 3)
+#define SVGA3D_SURFACE_HINT_VERTEXBUFFER      (CONST64U(1) << 4)
+#define SVGA3D_SURFACE_HINT_TEXTURE           (CONST64U(1) << 5)
+#define SVGA3D_SURFACE_HINT_RENDERTARGET      (CONST64U(1) << 6)
+#define SVGA3D_SURFACE_HINT_DEPTHSTENCIL      (CONST64U(1) << 7)
+#define SVGA3D_SURFACE_HINT_WRITEONLY         (CONST64U(1) << 8)
+#define SVGA3D_SURFACE_MASKABLE_ANTIALIAS     (CONST64U(1) << 9)
+#define SVGA3D_SURFACE_AUTOGENMIPMAPS         (CONST64U(1) << 10)
+
+#define SVGA3D_SURFACE_DECODE_RENDERTARGET    (CONST64U(1) << 11)
+
+/*
+ * Is this surface using a base-level pitch for it's mob backing?
+ *
+ * This flag is not intended to be set by guest-drivers, but is instead
+ * set by the device when the surface is bound to a mob with a specified
+ * pitch.
+ */
+#define SVGA3D_SURFACE_MOB_PITCH              (CONST64U(1) << 12)
+
+#define SVGA3D_SURFACE_INACTIVE               (CONST64U(1) << 13)
+#define SVGA3D_SURFACE_HINT_RT_LOCKABLE       (CONST64U(1) << 14)
+#define SVGA3D_SURFACE_VOLUME                 (CONST64U(1) << 15)
+
+/*
+ * Required to be set on a surface to bind it to a screen target.
+ */
+#define SVGA3D_SURFACE_SCREENTARGET           (CONST64U(1) << 16)
+
+/*
+ * Align images in the guest-backing mob to 16-bytes.
+ */
+#define SVGA3D_SURFACE_ALIGN16                (CONST64U(1) << 17)
+
+#define SVGA3D_SURFACE_1D                     (CONST64U(1) << 18)
+#define SVGA3D_SURFACE_ARRAY                  (CONST64U(1) << 19)
+
+/*
+ * Bind flags.
+ * These are enforced for any surface defined with DefineGBSurface_v2.
+ */
+#define SVGA3D_SURFACE_BIND_VERTEX_BUFFER     (CONST64U(1) << 20)
+#define SVGA3D_SURFACE_BIND_INDEX_BUFFER      (CONST64U(1) << 21)
+#define SVGA3D_SURFACE_BIND_CONSTANT_BUFFER   (CONST64U(1) << 22)
+#define SVGA3D_SURFACE_BIND_SHADER_RESOURCE   (CONST64U(1) << 23)
+#define SVGA3D_SURFACE_BIND_RENDER_TARGET     (CONST64U(1) << 24)
+#define SVGA3D_SURFACE_BIND_DEPTH_STENCIL     (CONST64U(1) << 25)
+#define SVGA3D_SURFACE_BIND_STREAM_OUTPUT     (CONST64U(1) << 26)
+
+/*
+ * The STAGING flags notes that the surface will not be used directly by the
+ * drawing pipeline, i.e. that it will not be bound to any bind point.
+ * Staging surfaces may be used by copy operations to move data in and out
+ * of other surfaces.  No bind flags may be set on surfaces with this flag.
+ *
+ * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
+ * updates indirectly, i.e. the surface will not be updated directly, but
+ * will receive copies from staging surfaces.
+ */
+#define SVGA3D_SURFACE_STAGING_UPLOAD         (CONST64U(1) << 27)
+#define SVGA3D_SURFACE_STAGING_DOWNLOAD       (CONST64U(1) << 28)
+#define SVGA3D_SURFACE_HINT_INDIRECT_UPDATE   (CONST64U(1) << 29)
+
+/*
+ * Setting this flag allow this surface to be used with the
+ * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command.  It is only valid for
+ * buffer surfaces, and no bind flags are allowed to be set on surfaces
+ * with this flag.
+ */
+#define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER   (CONST64U(1) << 30)
+
+/*
+ * Reserved for video operations.
+ */
+#define SVGA3D_SURFACE_RESERVED1              (CONST64U(1) << 31)
+
+/*
+ * Specifies that a surface is multisample, and therefore requires the full
+ * mob-backing to store all the samples.
+ */
+#define SVGA3D_SURFACE_MULTISAMPLE            (CONST64U(1) << 32)
+
+#define SVGA3D_SURFACE_FLAG_MAX               (CONST64U(1) << 33)
+
+/*
+ * Surface flags types:
+ *
+ * SVGA3dSurface1Flags:  Lower 32-bits of flags.
+ * SVGA3dSurface2Flags:  Upper 32-bits of flags.
+ * SVGA3dSurfaceAllFlags: Full 64-bits of flags.
+ */
+typedef uint32 SVGA3dSurface1Flags;
+typedef uint32 SVGA3dSurface2Flags;
+typedef uint64 SVGA3dSurfaceAllFlags;
+
+#define SVGA3D_SURFACE_FLAGS1_MASK ((uint64_t)MAX_UINT32)
+#define SVGA3D_SURFACE_FLAGS2_MASK (MAX_UINT64 & ~SVGA3D_SURFACE_FLAGS1_MASK)
+
+#define SVGA3D_SURFACE_HB_DISALLOWED_MASK        \
+        (  SVGA3D_SURFACE_MOB_PITCH    |         \
+           SVGA3D_SURFACE_SCREENTARGET |         \
+           SVGA3D_SURFACE_ALIGN16 |              \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |   \
+           SVGA3D_SURFACE_STAGING_UPLOAD |       \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD |     \
+           SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+           SVGA3D_SURFACE_MULTISAMPLE            \
+        )
+
+#define SVGA3D_SURFACE_HB_PRESENT_DISALLOWED_MASK   \
+       (   SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_MULTISAMPLE               \
+        )
+
+#define SVGA3D_SURFACE_2D_DISALLOWED_MASK           \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_MASKABLE_ANTIALIAS |      \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_BIND_VERTEX_BUFFER |      \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER |       \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |    \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL |      \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |      \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER |    \
+           SVGA3D_SURFACE_MULTISAMPLE               \
+        )
+
+#define SVGA3D_SURFACE_BASICOPS_DISALLOWED_MASK     \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_MULTISAMPLE               \
+        )
+
+#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_BIND_VERTEX_BUFFER |      \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER |       \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |    \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL |      \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |      \
+           SVGA3D_SURFACE_INACTIVE |                \
+           SVGA3D_SURFACE_STAGING_UPLOAD |          \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD |        \
+           SVGA3D_SURFACE_HINT_INDIRECT_UPDATE |    \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER |    \
+           SVGA3D_SURFACE_MULTISAMPLE               \
+        )
+
+#define SVGA3D_SURFACE_BUFFER_DISALLOWED_MASK       \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_MASKABLE_ANTIALIAS |      \
+           SVGA3D_SURFACE_ARRAY |                   \
+           SVGA3D_SURFACE_MULTISAMPLE |             \
+           SVGA3D_SURFACE_MOB_PITCH                 \
+        )
+
+#define SVGA3D_SURFACE_MULTISAMPLE_DISALLOWED_MASK  \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_SCREENTARGET |            \
+           SVGA3D_SURFACE_MOB_PITCH                 \
+        )
+
+#define SVGA3D_SURFACE_DX_ONLY_MASK             \
+        (  SVGA3D_SURFACE_BIND_STREAM_OUTPUT |  \
+           SVGA3D_SURFACE_STAGING_UPLOAD |      \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD |    \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER  \
+        )
+
+#define SVGA3D_SURFACE_STAGING_MASK             \
+        (  SVGA3D_SURFACE_STAGING_UPLOAD |      \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD      \
+        )
+
+#define SVGA3D_SURFACE_BIND_MASK                  \
+        (  SVGA3D_SURFACE_BIND_VERTEX_BUFFER   |  \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER    |  \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |  \
+           SVGA3D_SURFACE_BIND_SHADER_RESOURCE |  \
+           SVGA3D_SURFACE_BIND_RENDER_TARGET   |  \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL   |  \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT      \
+        )
+
+typedef enum {
+   SVGA3DFORMAT_OP_TEXTURE                               = 0x00000001,
+   SVGA3DFORMAT_OP_VOLUMETEXTURE                         = 0x00000002,
+   SVGA3DFORMAT_OP_CUBETEXTURE                           = 0x00000004,
+   SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET                = 0x00000008,
+   SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET              = 0x00000010,
+   SVGA3DFORMAT_OP_ZSTENCIL                              = 0x00000040,
+   SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH   = 0x00000080,
+
+/*
+ * This format can be used as a render target if the current display mode
+ * is the same depth if the alpha channel is ignored. e.g. if the device
+ * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
+ * format op list entry for A8R8G8B8 should have this cap.
+ */
+   SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET  = 0x00000100,
+
+/*
+ * This format contains DirectDraw support (including Flip).  This flag
+ * should not to be set on alpha formats.
+ */
+   SVGA3DFORMAT_OP_DISPLAYMODE                           = 0x00000400,
+
+/*
+ * The rasterizer can support some level of Direct3D support in this format
+ * and implies that the driver can create a Context in this mode (for some
+ * render target format).  When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
+ * flag must also be set.
+ */
+   SVGA3DFORMAT_OP_3DACCELERATION                        = 0x00000800,
+
+/*
+ * This is set for a private format when the driver has put the bpp in
+ * the structure.
+ */
+   SVGA3DFORMAT_OP_PIXELSIZE                             = 0x00001000,
+
+/*
+ * Indicates that this format can be converted to any RGB format for which
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified.
+ */
+   SVGA3DFORMAT_OP_CONVERT_TO_ARGB                       = 0x00002000,
+
+/*
+ * Indicates that this format can be used to create offscreen plain surfaces.
+ */
+   SVGA3DFORMAT_OP_OFFSCREENPLAIN                        = 0x00004000,
+
+/*
+ * Indicated that this format can be read as an SRGB texture (meaning that the
+ * sampler will linearize the looked up data).
+ */
+   SVGA3DFORMAT_OP_SRGBREAD                              = 0x00008000,
+
+/*
+ * Indicates that this format can be used in the bumpmap instructions.
+ */
+   SVGA3DFORMAT_OP_BUMPMAP                               = 0x00010000,
+
+/*
+ * Indicates that this format can be sampled by the displacement map sampler.
+ */
+   SVGA3DFORMAT_OP_DMAP                                  = 0x00020000,
+
+/*
+ * Indicates that this format cannot be used with texture filtering.
+ */
+   SVGA3DFORMAT_OP_NOFILTER                              = 0x00040000,
+
+/*
+ * Indicates that format conversions are supported to this RGB format if
+ * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
+ */
+   SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB                    = 0x00080000,
+
+/*
+ * Indicated that this format can be written as an SRGB target
+ * (meaning that the pixel pipe will DE-linearize data on output to format)
+ */
+   SVGA3DFORMAT_OP_SRGBWRITE                             = 0x00100000,
+
+/*
+ * Indicates that this format cannot be used with alpha blending.
+ */
+   SVGA3DFORMAT_OP_NOALPHABLEND                          = 0x00200000,
+
+/*
+ * Indicates that the device can auto-generated sublevels for resources
+ * of this format.
+ */
+   SVGA3DFORMAT_OP_AUTOGENMIPMAP                         = 0x00400000,
+
+/*
+ * Indicates that this format can be used by vertex texture sampler.
+ */
+   SVGA3DFORMAT_OP_VERTEXTEXTURE                         = 0x00800000,
+
+/*
+ * Indicates that this format supports neither texture coordinate
+ * wrap modes, nor mipmapping.
+ */
+   SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP                  = 0x01000000
+} SVGA3dFormatOp;
+
+#define SVGA3D_FORMAT_POSITIVE                             \
+   (SVGA3DFORMAT_OP_TEXTURE                              | \
+    SVGA3DFORMAT_OP_VOLUMETEXTURE                        | \
+    SVGA3DFORMAT_OP_CUBETEXTURE                          | \
+    SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET               | \
+    SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET             | \
+    SVGA3DFORMAT_OP_ZSTENCIL                             | \
+    SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH  | \
+    SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET | \
+    SVGA3DFORMAT_OP_DISPLAYMODE                          | \
+    SVGA3DFORMAT_OP_3DACCELERATION                       | \
+    SVGA3DFORMAT_OP_PIXELSIZE                            | \
+    SVGA3DFORMAT_OP_CONVERT_TO_ARGB                      | \
+    SVGA3DFORMAT_OP_OFFSCREENPLAIN                       | \
+    SVGA3DFORMAT_OP_SRGBREAD                             | \
+    SVGA3DFORMAT_OP_BUMPMAP                              | \
+    SVGA3DFORMAT_OP_DMAP                                 | \
+    SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB                   | \
+    SVGA3DFORMAT_OP_SRGBWRITE                            | \
+    SVGA3DFORMAT_OP_AUTOGENMIPMAP                        | \
+    SVGA3DFORMAT_OP_VERTEXTEXTURE)
+
+#define SVGA3D_FORMAT_NEGATIVE               \
+   (SVGA3DFORMAT_OP_NOFILTER               | \
+    SVGA3DFORMAT_OP_NOALPHABLEND           | \
+    SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP)
+
+/*
+ * This structure is a conversion of SVGA3DFORMAT_OP_*
+ * Entries must be located at the same position.
+ */
+typedef union {
+   uint32 value;
+   struct {
+      uint32 texture : 1;
+      uint32 volumeTexture : 1;
+      uint32 cubeTexture : 1;
+      uint32 offscreenRenderTarget : 1;
+      uint32 sameFormatRenderTarget : 1;
+      uint32 unknown1 : 1;
+      uint32 zStencil : 1;
+      uint32 zStencilArbitraryDepth : 1;
+      uint32 sameFormatUpToAlpha : 1;
+      uint32 unknown2 : 1;
+      uint32 displayMode : 1;
+      uint32 acceleration3d : 1;
+      uint32 pixelSize : 1;
+      uint32 convertToARGB : 1;
+      uint32 offscreenPlain : 1;
+      uint32 sRGBRead : 1;
+      uint32 bumpMap : 1;
+      uint32 dmap : 1;
+      uint32 noFilter : 1;
+      uint32 memberOfGroupARGB : 1;
+      uint32 sRGBWrite : 1;
+      uint32 noAlphaBlend : 1;
+      uint32 autoGenMipMap : 1;
+      uint32 vertexTexture : 1;
+      uint32 noTexCoordWrapNorMip : 1;
+   };
+} SVGA3dSurfaceFormatCaps;
+
+/*
+ * SVGA_3D_CMD_SETRENDERSTATE Types.  All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+   SVGA3D_RS_INVALID                   = 0,
+   SVGA3D_RS_MIN                       = 1,
+   SVGA3D_RS_ZENABLE                   = 1,     /* SVGA3dBool */
+   SVGA3D_RS_ZWRITEENABLE              = 2,     /* SVGA3dBool */
+   SVGA3D_RS_ALPHATESTENABLE           = 3,     /* SVGA3dBool */
+   SVGA3D_RS_DITHERENABLE              = 4,     /* SVGA3dBool */
+   SVGA3D_RS_BLENDENABLE               = 5,     /* SVGA3dBool */
+   SVGA3D_RS_FOGENABLE                 = 6,     /* SVGA3dBool */
+   SVGA3D_RS_SPECULARENABLE            = 7,     /* SVGA3dBool */
+   SVGA3D_RS_STENCILENABLE             = 8,     /* SVGA3dBool */
+   SVGA3D_RS_LIGHTINGENABLE            = 9,     /* SVGA3dBool */
+   SVGA3D_RS_NORMALIZENORMALS          = 10,    /* SVGA3dBool */
+   SVGA3D_RS_POINTSPRITEENABLE         = 11,    /* SVGA3dBool */
+   SVGA3D_RS_POINTSCALEENABLE          = 12,    /* SVGA3dBool */
+   SVGA3D_RS_STENCILREF                = 13,    /* uint32 */
+   SVGA3D_RS_STENCILMASK               = 14,    /* uint32 */
+   SVGA3D_RS_STENCILWRITEMASK          = 15,    /* uint32 */
+   SVGA3D_RS_FOGSTART                  = 16,    /* float */
+   SVGA3D_RS_FOGEND                    = 17,    /* float */
+   SVGA3D_RS_FOGDENSITY                = 18,    /* float */
+   SVGA3D_RS_POINTSIZE                 = 19,    /* float */
+   SVGA3D_RS_POINTSIZEMIN              = 20,    /* float */
+   SVGA3D_RS_POINTSIZEMAX              = 21,    /* float */
+   SVGA3D_RS_POINTSCALE_A              = 22,    /* float */
+   SVGA3D_RS_POINTSCALE_B              = 23,    /* float */
+   SVGA3D_RS_POINTSCALE_C              = 24,    /* float */
+   SVGA3D_RS_FOGCOLOR                  = 25,    /* SVGA3dColor */
+   SVGA3D_RS_AMBIENT                   = 26,    /* SVGA3dColor */
+   SVGA3D_RS_CLIPPLANEENABLE           = 27,    /* SVGA3dClipPlanes */
+   SVGA3D_RS_FOGMODE                   = 28,    /* SVGA3dFogMode */
+   SVGA3D_RS_FILLMODE                  = 29,    /* SVGA3dFillMode */
+   SVGA3D_RS_SHADEMODE                 = 30,    /* SVGA3dShadeMode */
+   SVGA3D_RS_LINEPATTERN               = 31,    /* SVGA3dLinePattern */
+   SVGA3D_RS_SRCBLEND                  = 32,    /* SVGA3dBlendOp */
+   SVGA3D_RS_DSTBLEND                  = 33,    /* SVGA3dBlendOp */
+   SVGA3D_RS_BLENDEQUATION             = 34,    /* SVGA3dBlendEquation */
+   SVGA3D_RS_CULLMODE                  = 35,    /* SVGA3dFace */
+   SVGA3D_RS_ZFUNC                     = 36,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_ALPHAFUNC                 = 37,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_STENCILFUNC               = 38,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_STENCILFAIL               = 39,    /* SVGA3dStencilOp */
+   SVGA3D_RS_STENCILZFAIL              = 40,    /* SVGA3dStencilOp */
+   SVGA3D_RS_STENCILPASS               = 41,    /* SVGA3dStencilOp */
+   SVGA3D_RS_ALPHAREF                  = 42,    /* float (0.0 .. 1.0) */
+   SVGA3D_RS_FRONTWINDING              = 43,    /* SVGA3dFrontWinding */
+   SVGA3D_RS_COORDINATETYPE            = 44,    /* SVGA3dCoordinateType */
+   SVGA3D_RS_ZBIAS                     = 45,    /* float */
+   SVGA3D_RS_RANGEFOGENABLE            = 46,    /* SVGA3dBool */
+   SVGA3D_RS_COLORWRITEENABLE          = 47,    /* SVGA3dColorMask */
+   SVGA3D_RS_VERTEXMATERIALENABLE      = 48,    /* SVGA3dBool */
+   SVGA3D_RS_DIFFUSEMATERIALSOURCE     = 49,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_SPECULARMATERIALSOURCE    = 50,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_AMBIENTMATERIALSOURCE     = 51,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_EMISSIVEMATERIALSOURCE    = 52,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_TEXTUREFACTOR             = 53,    /* SVGA3dColor */
+   SVGA3D_RS_LOCALVIEWER               = 54,    /* SVGA3dBool */
+   SVGA3D_RS_SCISSORTESTENABLE         = 55,    /* SVGA3dBool */
+   SVGA3D_RS_BLENDCOLOR                = 56,    /* SVGA3dColor */
+   SVGA3D_RS_STENCILENABLE2SIDED       = 57,    /* SVGA3dBool */
+   SVGA3D_RS_CCWSTENCILFUNC            = 58,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_CCWSTENCILFAIL            = 59,    /* SVGA3dStencilOp */
+   SVGA3D_RS_CCWSTENCILZFAIL           = 60,    /* SVGA3dStencilOp */
+   SVGA3D_RS_CCWSTENCILPASS            = 61,    /* SVGA3dStencilOp */
+   SVGA3D_RS_VERTEXBLEND               = 62,    /* SVGA3dVertexBlendFlags */
+   SVGA3D_RS_SLOPESCALEDEPTHBIAS       = 63,    /* float */
+   SVGA3D_RS_DEPTHBIAS                 = 64,    /* float */
+
+
+   /*
+    * Output Gamma Level
+    *
+    * Output gamma effects the gamma curve of colors that are output from the
+    * rendering pipeline.  A value of 1.0 specifies a linear color space. If the
+    * value is <= 0.0, gamma correction is ignored and linear color space is
+    * used.
+    */
+
+   SVGA3D_RS_OUTPUTGAMMA               = 65,    /* float */
+   SVGA3D_RS_ZVISIBLE                  = 66,    /* SVGA3dBool */
+   SVGA3D_RS_LASTPIXEL                 = 67,    /* SVGA3dBool */
+   SVGA3D_RS_CLIPPING                  = 68,    /* SVGA3dBool */
+   SVGA3D_RS_WRAP0                     = 69,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP1                     = 70,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP2                     = 71,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP3                     = 72,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP4                     = 73,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP5                     = 74,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP6                     = 75,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP7                     = 76,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP8                     = 77,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP9                     = 78,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP10                    = 79,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP11                    = 80,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP12                    = 81,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP13                    = 82,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP14                    = 83,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP15                    = 84,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_MULTISAMPLEANTIALIAS      = 85,    /* SVGA3dBool */
+   SVGA3D_RS_MULTISAMPLEMASK           = 86,    /* uint32 */
+   SVGA3D_RS_INDEXEDVERTEXBLENDENABLE  = 87,    /* SVGA3dBool */
+   SVGA3D_RS_TWEENFACTOR               = 88,    /* float */
+   SVGA3D_RS_ANTIALIASEDLINEENABLE     = 89,    /* SVGA3dBool */
+   SVGA3D_RS_COLORWRITEENABLE1         = 90,    /* SVGA3dColorMask */
+   SVGA3D_RS_COLORWRITEENABLE2         = 91,    /* SVGA3dColorMask */
+   SVGA3D_RS_COLORWRITEENABLE3         = 92,    /* SVGA3dColorMask */
+   SVGA3D_RS_SEPARATEALPHABLENDENABLE  = 93,    /* SVGA3dBool */
+   SVGA3D_RS_SRCBLENDALPHA             = 94,    /* SVGA3dBlendOp */
+   SVGA3D_RS_DSTBLENDALPHA             = 95,    /* SVGA3dBlendOp */
+   SVGA3D_RS_BLENDEQUATIONALPHA        = 96,    /* SVGA3dBlendEquation */
+   SVGA3D_RS_TRANSPARENCYANTIALIAS     = 97,    /* SVGA3dTransparencyAntialiasType */
+   SVGA3D_RS_LINEWIDTH                 = 98,    /* float */
+   SVGA3D_RS_MAX
+} SVGA3dRenderStateName;
+
+typedef enum {
+   SVGA3D_TRANSPARENCYANTIALIAS_NORMAL            = 0,
+   SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE   = 1,
+   SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE       = 2,
+   SVGA3D_TRANSPARENCYANTIALIAS_MAX
+} SVGA3dTransparencyAntialiasType;
+
+typedef enum {
+   SVGA3D_VERTEXMATERIAL_NONE     = 0,    /* Use the value in the current material */
+   SVGA3D_VERTEXMATERIAL_DIFFUSE  = 1,    /* Use the value in the diffuse component */
+   SVGA3D_VERTEXMATERIAL_SPECULAR = 2,    /* Use the value in the specular component */
+   SVGA3D_VERTEXMATERIAL_MAX      = 3,
+} SVGA3dVertexMaterial;
+
+typedef enum {
+   SVGA3D_FILLMODE_INVALID = 0,
+   SVGA3D_FILLMODE_MIN     = 1,
+   SVGA3D_FILLMODE_POINT   = 1,
+   SVGA3D_FILLMODE_LINE    = 2,
+   SVGA3D_FILLMODE_FILL    = 3,
+   SVGA3D_FILLMODE_MAX
+} SVGA3dFillModeType;
+
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+   struct {
+      uint16   mode;       /* SVGA3dFillModeType */
+      uint16   face;       /* SVGA3dFace */
+   };
+   uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dFillMode;
+
+typedef enum {
+   SVGA3D_SHADEMODE_INVALID = 0,
+   SVGA3D_SHADEMODE_FLAT    = 1,
+   SVGA3D_SHADEMODE_SMOOTH  = 2,
+   SVGA3D_SHADEMODE_PHONG   = 3,     /* Not supported */
+   SVGA3D_SHADEMODE_MAX
+} SVGA3dShadeMode;
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+   struct {
+      uint16 repeat;
+      uint16 pattern;
+   };
+   uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dLinePattern;
+
+typedef enum {
+   SVGA3D_BLENDOP_INVALID             = 0,
+   SVGA3D_BLENDOP_MIN                 = 1,
+   SVGA3D_BLENDOP_ZERO                = 1,
+   SVGA3D_BLENDOP_ONE                 = 2,
+   SVGA3D_BLENDOP_SRCCOLOR            = 3,
+   SVGA3D_BLENDOP_INVSRCCOLOR         = 4,
+   SVGA3D_BLENDOP_SRCALPHA            = 5,
+   SVGA3D_BLENDOP_INVSRCALPHA         = 6,
+   SVGA3D_BLENDOP_DESTALPHA           = 7,
+   SVGA3D_BLENDOP_INVDESTALPHA        = 8,
+   SVGA3D_BLENDOP_DESTCOLOR           = 9,
+   SVGA3D_BLENDOP_INVDESTCOLOR        = 10,
+   SVGA3D_BLENDOP_SRCALPHASAT         = 11,
+   SVGA3D_BLENDOP_BLENDFACTOR         = 12,
+   SVGA3D_BLENDOP_INVBLENDFACTOR      = 13,
+   SVGA3D_BLENDOP_SRC1COLOR           = 14,
+   SVGA3D_BLENDOP_INVSRC1COLOR        = 15,
+   SVGA3D_BLENDOP_SRC1ALPHA           = 16,
+   SVGA3D_BLENDOP_INVSRC1ALPHA        = 17,
+   SVGA3D_BLENDOP_BLENDFACTORALPHA    = 18,
+   SVGA3D_BLENDOP_INVBLENDFACTORALPHA = 19,
+   SVGA3D_BLENDOP_MAX
+} SVGA3dBlendOp;
+
+typedef enum {
+   SVGA3D_BLENDEQ_INVALID            = 0,
+   SVGA3D_BLENDEQ_MIN                = 1,
+   SVGA3D_BLENDEQ_ADD                = 1,
+   SVGA3D_BLENDEQ_SUBTRACT           = 2,
+   SVGA3D_BLENDEQ_REVSUBTRACT        = 3,
+   SVGA3D_BLENDEQ_MINIMUM            = 4,
+   SVGA3D_BLENDEQ_MAXIMUM            = 5,
+   SVGA3D_BLENDEQ_MAX
+} SVGA3dBlendEquation;
+
+typedef enum {
+   SVGA3D_DX11_LOGICOP_MIN           = 0,
+   SVGA3D_DX11_LOGICOP_CLEAR         = 0,
+   SVGA3D_DX11_LOGICOP_SET           = 1,
+   SVGA3D_DX11_LOGICOP_COPY          = 2,
+   SVGA3D_DX11_LOGICOP_COPY_INVERTED = 3,
+   SVGA3D_DX11_LOGICOP_NOOP          = 4,
+   SVGA3D_DX11_LOGICOP_INVERT        = 5,
+   SVGA3D_DX11_LOGICOP_AND           = 6,
+   SVGA3D_DX11_LOGICOP_NAND          = 7,
+   SVGA3D_DX11_LOGICOP_OR            = 8,
+   SVGA3D_DX11_LOGICOP_NOR           = 9,
+   SVGA3D_DX11_LOGICOP_XOR           = 10,
+   SVGA3D_DX11_LOGICOP_EQUIV         = 11,
+   SVGA3D_DX11_LOGICOP_AND_REVERSE   = 12,
+   SVGA3D_DX11_LOGICOP_AND_INVERTED  = 13,
+   SVGA3D_DX11_LOGICOP_OR_REVERSE    = 14,
+   SVGA3D_DX11_LOGICOP_OR_INVERTED   = 15,
+   SVGA3D_DX11_LOGICOP_MAX
+} SVGA3dDX11LogicOp;
+
+typedef enum {
+   SVGA3D_FRONTWINDING_INVALID = 0,
+   SVGA3D_FRONTWINDING_CW      = 1,
+   SVGA3D_FRONTWINDING_CCW     = 2,
+   SVGA3D_FRONTWINDING_MAX
+} SVGA3dFrontWinding;
+
+typedef enum {
+   SVGA3D_FACE_INVALID  = 0,
+   SVGA3D_FACE_NONE     = 1,
+   SVGA3D_FACE_MIN      = 1,
+   SVGA3D_FACE_FRONT    = 2,
+   SVGA3D_FACE_BACK     = 3,
+   SVGA3D_FACE_FRONT_BACK = 4,
+   SVGA3D_FACE_MAX
+} SVGA3dFace;
+
+/*
+ * The order and the values should not be changed
+ */
+
+typedef enum {
+   SVGA3D_CMP_INVALID              = 0,
+   SVGA3D_CMP_NEVER                = 1,
+   SVGA3D_CMP_LESS                 = 2,
+   SVGA3D_CMP_EQUAL                = 3,
+   SVGA3D_CMP_LESSEQUAL            = 4,
+   SVGA3D_CMP_GREATER              = 5,
+   SVGA3D_CMP_NOTEQUAL             = 6,
+   SVGA3D_CMP_GREATEREQUAL         = 7,
+   SVGA3D_CMP_ALWAYS               = 8,
+   SVGA3D_CMP_MAX
+} SVGA3dCmpFunc;
+
+/*
+ * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
+ * the fog factor to be specified in the alpha component of the specular
+ * (a.k.a. secondary) vertex color.
+ */
+typedef enum {
+   SVGA3D_FOGFUNC_INVALID          = 0,
+   SVGA3D_FOGFUNC_EXP              = 1,
+   SVGA3D_FOGFUNC_EXP2             = 2,
+   SVGA3D_FOGFUNC_LINEAR           = 3,
+   SVGA3D_FOGFUNC_PER_VERTEX       = 4
+} SVGA3dFogFunction;
+
+/*
+ * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
+ * or per-pixel basis.
+ */
+typedef enum {
+   SVGA3D_FOGTYPE_INVALID          = 0,
+   SVGA3D_FOGTYPE_VERTEX           = 1,
+   SVGA3D_FOGTYPE_PIXEL            = 2,
+   SVGA3D_FOGTYPE_MAX              = 3
+} SVGA3dFogType;
+
+/*
+ * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
+ * computed using the eye Z value of each pixel (or vertex), whereas range-
+ * based fog is computed using the actual distance (range) to the eye.
+ */
+typedef enum {
+   SVGA3D_FOGBASE_INVALID          = 0,
+   SVGA3D_FOGBASE_DEPTHBASED       = 1,
+   SVGA3D_FOGBASE_RANGEBASED       = 2,
+   SVGA3D_FOGBASE_MAX              = 3
+} SVGA3dFogBase;
+
+typedef enum {
+   SVGA3D_STENCILOP_INVALID        = 0,
+   SVGA3D_STENCILOP_MIN            = 1,
+   SVGA3D_STENCILOP_KEEP           = 1,
+   SVGA3D_STENCILOP_ZERO           = 2,
+   SVGA3D_STENCILOP_REPLACE        = 3,
+   SVGA3D_STENCILOP_INCRSAT        = 4,
+   SVGA3D_STENCILOP_DECRSAT        = 5,
+   SVGA3D_STENCILOP_INVERT         = 6,
+   SVGA3D_STENCILOP_INCR           = 7,
+   SVGA3D_STENCILOP_DECR           = 8,
+   SVGA3D_STENCILOP_MAX
+} SVGA3dStencilOp;
+
+typedef enum {
+   SVGA3D_CLIPPLANE_0              = (1 << 0),
+   SVGA3D_CLIPPLANE_1              = (1 << 1),
+   SVGA3D_CLIPPLANE_2              = (1 << 2),
+   SVGA3D_CLIPPLANE_3              = (1 << 3),
+   SVGA3D_CLIPPLANE_4              = (1 << 4),
+   SVGA3D_CLIPPLANE_5              = (1 << 5),
+} SVGA3dClipPlanes;
+
+typedef enum {
+   SVGA3D_CLEAR_COLOR              = 0x1,
+   SVGA3D_CLEAR_DEPTH              = 0x2,
+   SVGA3D_CLEAR_STENCIL            = 0x4,
+
+   /*
+    * Hint only, must be used together with SVGA3D_CLEAR_COLOR. If
+    * SVGA3D_CLEAR_DEPTH or SVGA3D_CLEAR_STENCIL bit is set, this
+    * bit will be ignored.
+    */
+   SVGA3D_CLEAR_COLORFILL          = 0x8
+} SVGA3dClearFlag;
+
+typedef enum {
+   SVGA3D_RT_DEPTH                 = 0,
+   SVGA3D_RT_MIN                   = 0,
+   SVGA3D_RT_STENCIL               = 1,
+   SVGA3D_RT_COLOR0                = 2,
+   SVGA3D_RT_COLOR1                = 3,
+   SVGA3D_RT_COLOR2                = 4,
+   SVGA3D_RT_COLOR3                = 5,
+   SVGA3D_RT_COLOR4                = 6,
+   SVGA3D_RT_COLOR5                = 7,
+   SVGA3D_RT_COLOR6                = 8,
+   SVGA3D_RT_COLOR7                = 9,
+   SVGA3D_RT_MAX,
+   SVGA3D_RT_INVALID               = ((uint32)-1),
+} SVGA3dRenderTargetType;
+
+#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+   struct {
+      uint32  red   : 1;
+      uint32  green : 1;
+      uint32  blue  : 1;
+      uint32  alpha : 1;
+   };
+   uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dColorMask;
+
+typedef enum {
+   SVGA3D_VBLEND_DISABLE            = 0,
+   SVGA3D_VBLEND_1WEIGHT            = 1,
+   SVGA3D_VBLEND_2WEIGHT            = 2,
+   SVGA3D_VBLEND_3WEIGHT            = 3,
+   SVGA3D_VBLEND_MAX                = 4,
+} SVGA3dVertexBlendFlags;
+
+typedef enum {
+   SVGA3D_WRAPCOORD_0   = 1 << 0,
+   SVGA3D_WRAPCOORD_1   = 1 << 1,
+   SVGA3D_WRAPCOORD_2   = 1 << 2,
+   SVGA3D_WRAPCOORD_3   = 1 << 3,
+   SVGA3D_WRAPCOORD_ALL = 0xF,
+} SVGA3dWrapFlags;
+
+/*
+ * SVGA_3D_CMD_TEXTURESTATE Types.  All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+   SVGA3D_TS_INVALID                    = 0,
+   SVGA3D_TS_MIN                        = 1,
+   SVGA3D_TS_BIND_TEXTURE               = 1,    /* SVGA3dSurfaceId */
+   SVGA3D_TS_COLOROP                    = 2,    /* SVGA3dTextureCombiner */
+   SVGA3D_TS_COLORARG1                  = 3,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_COLORARG2                  = 4,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAOP                    = 5,    /* SVGA3dTextureCombiner */
+   SVGA3D_TS_ALPHAARG1                  = 6,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAARG2                  = 7,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ADDRESSU                   = 8,    /* SVGA3dTextureAddress */
+   SVGA3D_TS_ADDRESSV                   = 9,    /* SVGA3dTextureAddress */
+   SVGA3D_TS_MIPFILTER                  = 10,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_MAGFILTER                  = 11,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_MINFILTER                  = 12,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_BORDERCOLOR                = 13,   /* SVGA3dColor */
+   SVGA3D_TS_TEXCOORDINDEX              = 14,   /* uint32 */
+   SVGA3D_TS_TEXTURETRANSFORMFLAGS      = 15,   /* SVGA3dTexTransformFlags */
+   SVGA3D_TS_TEXCOORDGEN                = 16,   /* SVGA3dTextureCoordGen */
+   SVGA3D_TS_BUMPENVMAT00               = 17,   /* float */
+   SVGA3D_TS_BUMPENVMAT01               = 18,   /* float */
+   SVGA3D_TS_BUMPENVMAT10               = 19,   /* float */
+   SVGA3D_TS_BUMPENVMAT11               = 20,   /* float */
+   SVGA3D_TS_TEXTURE_MIPMAP_LEVEL       = 21,   /* uint32 */
+   SVGA3D_TS_TEXTURE_LOD_BIAS           = 22,   /* float */
+   SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL  = 23,   /* uint32 */
+   SVGA3D_TS_ADDRESSW                   = 24,   /* SVGA3dTextureAddress */
+
+
+   /*
+    * Sampler Gamma Level
+    *
+    * Sampler gamma effects the color of samples taken from the sampler.  A
+    * value of 1.0 will produce linear samples.  If the value is <= 0.0 the
+    * gamma value is ignored and a linear space is used.
+    */
+
+   SVGA3D_TS_GAMMA                      = 25,   /* float */
+   SVGA3D_TS_BUMPENVLSCALE              = 26,   /* float */
+   SVGA3D_TS_BUMPENVLOFFSET             = 27,   /* float */
+   SVGA3D_TS_COLORARG0                  = 28,   /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAARG0                  = 29,   /* SVGA3dTextureArgData */
+   SVGA3D_TS_PREGB_MAX                  = 30,   /* Max value before GBObjects */
+   SVGA3D_TS_CONSTANT                   = 30,   /* SVGA3dColor */
+   SVGA3D_TS_COLOR_KEY_ENABLE           = 31,   /* SVGA3dBool */
+   SVGA3D_TS_COLOR_KEY                  = 32,   /* SVGA3dColor */
+   SVGA3D_TS_MAX
+} SVGA3dTextureStateName;
+
+typedef enum {
+   SVGA3D_TC_INVALID                   = 0,
+   SVGA3D_TC_DISABLE                   = 1,
+   SVGA3D_TC_SELECTARG1                = 2,
+   SVGA3D_TC_SELECTARG2                = 3,
+   SVGA3D_TC_MODULATE                  = 4,
+   SVGA3D_TC_ADD                       = 5,
+   SVGA3D_TC_ADDSIGNED                 = 6,
+   SVGA3D_TC_SUBTRACT                  = 7,
+   SVGA3D_TC_BLENDTEXTUREALPHA         = 8,
+   SVGA3D_TC_BLENDDIFFUSEALPHA         = 9,
+   SVGA3D_TC_BLENDCURRENTALPHA         = 10,
+   SVGA3D_TC_BLENDFACTORALPHA          = 11,
+   SVGA3D_TC_MODULATE2X                = 12,
+   SVGA3D_TC_MODULATE4X                = 13,
+   SVGA3D_TC_DSDT                      = 14,
+   SVGA3D_TC_DOTPRODUCT3               = 15,
+   SVGA3D_TC_BLENDTEXTUREALPHAPM       = 16,
+   SVGA3D_TC_ADDSIGNED2X               = 17,
+   SVGA3D_TC_ADDSMOOTH                 = 18,
+   SVGA3D_TC_PREMODULATE               = 19,
+   SVGA3D_TC_MODULATEALPHA_ADDCOLOR    = 20,
+   SVGA3D_TC_MODULATECOLOR_ADDALPHA    = 21,
+   SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
+   SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
+   SVGA3D_TC_BUMPENVMAPLUMINANCE       = 24,
+   SVGA3D_TC_MULTIPLYADD               = 25,
+   SVGA3D_TC_LERP                      = 26,
+   SVGA3D_TC_MAX
+} SVGA3dTextureCombiner;
+
+#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
+
+typedef enum {
+   SVGA3D_TEX_ADDRESS_INVALID    = 0,
+   SVGA3D_TEX_ADDRESS_MIN        = 1,
+   SVGA3D_TEX_ADDRESS_WRAP       = 1,
+   SVGA3D_TEX_ADDRESS_MIRROR     = 2,
+   SVGA3D_TEX_ADDRESS_CLAMP      = 3,
+   SVGA3D_TEX_ADDRESS_BORDER     = 4,
+   SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
+   SVGA3D_TEX_ADDRESS_EDGE       = 6,
+   SVGA3D_TEX_ADDRESS_MAX
+} SVGA3dTextureAddress;
+
+/*
+ * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
+ * disabled, and the rasterizer should use the magnification filter instead.
+ */
+typedef enum {
+   SVGA3D_TEX_FILTER_NONE           = 0,
+   SVGA3D_TEX_FILTER_MIN            = 0,
+   SVGA3D_TEX_FILTER_NEAREST        = 1,
+   SVGA3D_TEX_FILTER_LINEAR         = 2,
+   SVGA3D_TEX_FILTER_ANISOTROPIC    = 3,
+   SVGA3D_TEX_FILTER_FLATCUBIC      = 4, /* Deprecated, not implemented */
+   SVGA3D_TEX_FILTER_GAUSSIANCUBIC  = 5, /* Deprecated, not implemented */
+   SVGA3D_TEX_FILTER_PYRAMIDALQUAD  = 6, /* Not currently implemented */
+   SVGA3D_TEX_FILTER_GAUSSIANQUAD   = 7, /* Not currently implemented */
+   SVGA3D_TEX_FILTER_MAX
+} SVGA3dTextureFilter;
+
+typedef enum {
+   SVGA3D_TEX_TRANSFORM_OFF    = 0,
+   SVGA3D_TEX_TRANSFORM_S      = (1 << 0),
+   SVGA3D_TEX_TRANSFORM_T      = (1 << 1),
+   SVGA3D_TEX_TRANSFORM_R      = (1 << 2),
+   SVGA3D_TEX_TRANSFORM_Q      = (1 << 3),
+   SVGA3D_TEX_PROJECTED        = (1 << 15),
+} SVGA3dTexTransformFlags;
+
+typedef enum {
+   SVGA3D_TEXCOORD_GEN_OFF              = 0,
+   SVGA3D_TEXCOORD_GEN_EYE_POSITION     = 1,
+   SVGA3D_TEXCOORD_GEN_EYE_NORMAL       = 2,
+   SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
+   SVGA3D_TEXCOORD_GEN_SPHERE           = 4,
+   SVGA3D_TEXCOORD_GEN_MAX
+} SVGA3dTextureCoordGen;
+
+/*
+ * Texture argument constants for texture combiner
+ */
+typedef enum {
+   SVGA3D_TA_INVALID    = 0,
+   SVGA3D_TA_TFACTOR    = 1,
+   SVGA3D_TA_PREVIOUS   = 2,
+   SVGA3D_TA_DIFFUSE    = 3,
+   SVGA3D_TA_TEXTURE    = 4,
+   SVGA3D_TA_SPECULAR   = 5,
+   SVGA3D_TA_CONSTANT   = 6,
+   SVGA3D_TA_MAX
+} SVGA3dTextureArgData;
+
+#define SVGA3D_TM_MASK_LEN 4
+
+/* Modifiers for texture argument constants defined above. */
+typedef enum {
+   SVGA3D_TM_NONE       = 0,
+   SVGA3D_TM_ALPHA      = (1 << SVGA3D_TM_MASK_LEN),
+   SVGA3D_TM_ONE_MINUS  = (2 << SVGA3D_TM_MASK_LEN),
+} SVGA3dTextureArgModifier;
+
+/*
+ * Vertex declarations
+ *
+ * Notes:
+ *
+ * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
+ * draw with any POSITIONT vertex arrays, the programmable vertex
+ * pipeline will be implicitly disabled. Drawing will take place as if
+ * no vertex shader was bound.
+ */
+
+typedef enum {
+   SVGA3D_DECLUSAGE_POSITION     = 0,
+   SVGA3D_DECLUSAGE_BLENDWEIGHT,
+   SVGA3D_DECLUSAGE_BLENDINDICES,
+   SVGA3D_DECLUSAGE_NORMAL,
+   SVGA3D_DECLUSAGE_PSIZE,
+   SVGA3D_DECLUSAGE_TEXCOORD,
+   SVGA3D_DECLUSAGE_TANGENT,
+   SVGA3D_DECLUSAGE_BINORMAL,
+   SVGA3D_DECLUSAGE_TESSFACTOR,
+   SVGA3D_DECLUSAGE_POSITIONT,
+   SVGA3D_DECLUSAGE_COLOR,
+   SVGA3D_DECLUSAGE_FOG,
+   SVGA3D_DECLUSAGE_DEPTH,
+   SVGA3D_DECLUSAGE_SAMPLE,
+   SVGA3D_DECLUSAGE_MAX
+} SVGA3dDeclUsage;
+
+typedef enum {
+   SVGA3D_DECLMETHOD_DEFAULT     = 0,
+   SVGA3D_DECLMETHOD_PARTIALU,
+   SVGA3D_DECLMETHOD_PARTIALV,
+   SVGA3D_DECLMETHOD_CROSSUV,          /* Normal */
+   SVGA3D_DECLMETHOD_UV,
+   SVGA3D_DECLMETHOD_LOOKUP,           /* Lookup a displacement map */
+   SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement */
+                                       /* map */
+} SVGA3dDeclMethod;
+
+typedef enum {
+   SVGA3D_DECLTYPE_FLOAT1        =  0,
+   SVGA3D_DECLTYPE_FLOAT2        =  1,
+   SVGA3D_DECLTYPE_FLOAT3        =  2,
+   SVGA3D_DECLTYPE_FLOAT4        =  3,
+   SVGA3D_DECLTYPE_D3DCOLOR      =  4,
+   SVGA3D_DECLTYPE_UBYTE4        =  5,
+   SVGA3D_DECLTYPE_SHORT2        =  6,
+   SVGA3D_DECLTYPE_SHORT4        =  7,
+   SVGA3D_DECLTYPE_UBYTE4N       =  8,
+   SVGA3D_DECLTYPE_SHORT2N       =  9,
+   SVGA3D_DECLTYPE_SHORT4N       = 10,
+   SVGA3D_DECLTYPE_USHORT2N      = 11,
+   SVGA3D_DECLTYPE_USHORT4N      = 12,
+   SVGA3D_DECLTYPE_UDEC3         = 13,
+   SVGA3D_DECLTYPE_DEC3N         = 14,
+   SVGA3D_DECLTYPE_FLOAT16_2     = 15,
+   SVGA3D_DECLTYPE_FLOAT16_4     = 16,
+   SVGA3D_DECLTYPE_MAX,
+} SVGA3dDeclType;
+
+/*
+ * This structure is used for the divisor for geometry instancing;
+ * it's a direct translation of the Direct3D equivalent.
+ */
+typedef union {
+   struct {
+      /*
+       * For index data, this number represents the number of instances to draw.
+       * For instance data, this number represents the number of
+       * instances/vertex in this stream
+       */
+      uint32 count : 30;
+
+      /*
+       * This is 1 if this is supposed to be the data that is repeated for
+       * every instance.
+       */
+      uint32 indexedData : 1;
+
+      /*
+       * This is 1 if this is supposed to be the per-instance data.
+       */
+      uint32 instanceData : 1;
+   };
+
+   uint32 value;
+} SVGA3dVertexDivisor;
+
+typedef enum {
+   /*
+    * SVGA3D_PRIMITIVE_INVALID is a valid primitive type.
+    *
+    * List MIN second so debuggers will think INVALID is
+    * the correct name.
+    */
+   SVGA3D_PRIMITIVE_INVALID                     = 0,
+   SVGA3D_PRIMITIVE_MIN                         = 0,
+   SVGA3D_PRIMITIVE_TRIANGLELIST                = 1,
+   SVGA3D_PRIMITIVE_POINTLIST                   = 2,
+   SVGA3D_PRIMITIVE_LINELIST                    = 3,
+   SVGA3D_PRIMITIVE_LINESTRIP                   = 4,
+   SVGA3D_PRIMITIVE_TRIANGLESTRIP               = 5,
+   SVGA3D_PRIMITIVE_TRIANGLEFAN                 = 6,
+   SVGA3D_PRIMITIVE_LINELIST_ADJ                = 7,
+   SVGA3D_PRIMITIVE_PREDX_MAX                   = 7,
+   SVGA3D_PRIMITIVE_LINESTRIP_ADJ               = 8,
+   SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ            = 9,
+   SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ           = 10,
+   SVGA3D_PRIMITIVE_MAX
+} SVGA3dPrimitiveType;
+
+typedef enum {
+   SVGA3D_COORDINATE_INVALID                   = 0,
+   SVGA3D_COORDINATE_LEFTHANDED                = 1,
+   SVGA3D_COORDINATE_RIGHTHANDED               = 2,
+   SVGA3D_COORDINATE_MAX
+} SVGA3dCoordinateType;
+
+typedef enum {
+   SVGA3D_TRANSFORM_INVALID                     = 0,
+   SVGA3D_TRANSFORM_WORLD                       = 1,
+   SVGA3D_TRANSFORM_MIN                         = 1,
+   SVGA3D_TRANSFORM_VIEW                        = 2,
+   SVGA3D_TRANSFORM_PROJECTION                  = 3,
+   SVGA3D_TRANSFORM_TEXTURE0                    = 4,
+   SVGA3D_TRANSFORM_TEXTURE1                    = 5,
+   SVGA3D_TRANSFORM_TEXTURE2                    = 6,
+   SVGA3D_TRANSFORM_TEXTURE3                    = 7,
+   SVGA3D_TRANSFORM_TEXTURE4                    = 8,
+   SVGA3D_TRANSFORM_TEXTURE5                    = 9,
+   SVGA3D_TRANSFORM_TEXTURE6                    = 10,
+   SVGA3D_TRANSFORM_TEXTURE7                    = 11,
+   SVGA3D_TRANSFORM_WORLD1                      = 12,
+   SVGA3D_TRANSFORM_WORLD2                      = 13,
+   SVGA3D_TRANSFORM_WORLD3                      = 14,
+   SVGA3D_TRANSFORM_MAX
+} SVGA3dTransformType;
+
+typedef enum {
+   SVGA3D_LIGHTTYPE_INVALID                     = 0,
+   SVGA3D_LIGHTTYPE_MIN                         = 1,
+   SVGA3D_LIGHTTYPE_POINT                       = 1,
+   SVGA3D_LIGHTTYPE_SPOT1                       = 2, /* 1-cone, in degrees */
+   SVGA3D_LIGHTTYPE_SPOT2                       = 3, /* 2-cone, in radians */
+   SVGA3D_LIGHTTYPE_DIRECTIONAL                 = 4,
+   SVGA3D_LIGHTTYPE_MAX
+} SVGA3dLightType;
+
+typedef enum {
+   SVGA3D_CUBEFACE_POSX                         = 0,
+   SVGA3D_CUBEFACE_NEGX                         = 1,
+   SVGA3D_CUBEFACE_POSY                         = 2,
+   SVGA3D_CUBEFACE_NEGY                         = 3,
+   SVGA3D_CUBEFACE_POSZ                         = 4,
+   SVGA3D_CUBEFACE_NEGZ                         = 5,
+} SVGA3dCubeFace;
+
+typedef enum {
+   SVGA3D_SHADERTYPE_INVALID                    = 0,
+   SVGA3D_SHADERTYPE_MIN                        = 1,
+   SVGA3D_SHADERTYPE_VS                         = 1,
+   SVGA3D_SHADERTYPE_PS                         = 2,
+   SVGA3D_SHADERTYPE_PREDX_MAX                  = 3,
+   SVGA3D_SHADERTYPE_GS                         = 3,
+   SVGA3D_SHADERTYPE_DX10_MAX                   = 4,
+   SVGA3D_SHADERTYPE_HS                         = 4,
+   SVGA3D_SHADERTYPE_DS                         = 5,
+   SVGA3D_SHADERTYPE_CS                         = 6,
+   SVGA3D_SHADERTYPE_MAX                        = 7
+} SVGA3dShaderType;
+
+#define SVGA3D_NUM_SHADERTYPE_PREDX \
+   (SVGA3D_SHADERTYPE_PREDX_MAX - SVGA3D_SHADERTYPE_MIN)
+
+#define SVGA3D_NUM_SHADERTYPE_DX10 \
+   (SVGA3D_SHADERTYPE_DX10_MAX - SVGA3D_SHADERTYPE_MIN)
+
+#define SVGA3D_NUM_SHADERTYPE \
+   (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
+
+typedef enum {
+   SVGA3D_CONST_TYPE_MIN                        = 0,
+   SVGA3D_CONST_TYPE_FLOAT                      = 0,
+   SVGA3D_CONST_TYPE_INT                        = 1,
+   SVGA3D_CONST_TYPE_BOOL                       = 2,
+   SVGA3D_CONST_TYPE_MAX                        = 3,
+} SVGA3dShaderConstType;
+
+/*
+ * Register limits for shader consts.
+ */
+#define SVGA3D_CONSTREG_MAX            256
+#define SVGA3D_CONSTINTREG_MAX         16
+#define SVGA3D_CONSTBOOLREG_MAX        16
+
+typedef enum {
+   SVGA3D_STRETCH_BLT_POINT                     = 0,
+   SVGA3D_STRETCH_BLT_LINEAR                    = 1,
+   SVGA3D_STRETCH_BLT_MAX
+} SVGA3dStretchBltMode;
+
+typedef enum {
+   SVGA3D_QUERYTYPE_INVALID                     = ((uint8)-1),
+   SVGA3D_QUERYTYPE_MIN                         = 0,
+   SVGA3D_QUERYTYPE_OCCLUSION                   = 0,
+   SVGA3D_QUERYTYPE_TIMESTAMP                   = 1,
+   SVGA3D_QUERYTYPE_TIMESTAMPDISJOINT           = 2,
+   SVGA3D_QUERYTYPE_PIPELINESTATS               = 3,
+   SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE          = 4,
+   SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS           = 5,
+   SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE     = 6,
+   SVGA3D_QUERYTYPE_OCCLUSION64                 = 7,
+   SVGA3D_QUERYTYPE_EVENT                       = 8,
+   SVGA3D_QUERYTYPE_DX10_MAX                    = 9,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM0             = 9,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM1             = 10,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM2             = 11,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM3             = 12,
+   SVGA3D_QUERYTYPE_SOP_STREAM0                 = 13,
+   SVGA3D_QUERYTYPE_SOP_STREAM1                 = 14,
+   SVGA3D_QUERYTYPE_SOP_STREAM2                 = 15,
+   SVGA3D_QUERYTYPE_SOP_STREAM3                 = 16,
+   SVGA3D_QUERYTYPE_MAX
+} SVGA3dQueryType;
+
+typedef uint8 SVGA3dQueryTypeUint8;
+
+#define SVGA3D_NUM_QUERYTYPE  (SVGA3D_QUERYTYPE_MAX - SVGA3D_QUERYTYPE_MIN)
+
+/*
+ * This is the maximum number of queries per context that can be active
+ * simultaneously between a beginQuery and endQuery.
+ */
+#define SVGA3D_MAX_QUERY 64
+
+/*
+ * Query result buffer formats
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 samplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusionQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 passed;
+}
+#include "vmware_pack_end.h"
+SVGADXEventQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 timestamp;
+}
+#include "vmware_pack_end.h"
+SVGADXTimestampQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 realFrequency;
+   uint32 disjoint;
+}
+#include "vmware_pack_end.h"
+SVGADXTimestampDisjointQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 inputAssemblyVertices;
+   uint64 inputAssemblyPrimitives;
+   uint64 vertexShaderInvocations;
+   uint64 geometryShaderInvocations;
+   uint64 geometryShaderPrimitives;
+   uint64 clipperInvocations;
+   uint64 clipperPrimitives;
+   uint64 pixelShaderInvocations;
+   uint64 hullShaderInvocations;
+   uint64 domainShaderInvocations;
+   uint64 computeShaderInvocations;
+}
+#include "vmware_pack_end.h"
+SVGADXPipelineStatisticsQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 anySamplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusionPredicateQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 numPrimitivesWritten;
+   uint64 numPrimitivesRequired;
+}
+#include "vmware_pack_end.h"
+SVGADXStreamOutStatisticsQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 overflowed;
+}
+#include "vmware_pack_end.h"
+SVGADXStreamOutPredicateQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 samplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusion64QueryResult;
+
+/*
+ * SVGADXQueryResultUnion is not intended for use in the protocol, but is
+ * very helpful when working with queries generically.
+ */
+typedef
+#include "vmware_pack_begin.h"
+union SVGADXQueryResultUnion {
+   SVGADXOcclusionQueryResult occ;
+   SVGADXEventQueryResult event;
+   SVGADXTimestampQueryResult ts;
+   SVGADXTimestampDisjointQueryResult tsDisjoint;
+   SVGADXPipelineStatisticsQueryResult pipelineStats;
+   SVGADXOcclusionPredicateQueryResult occPred;
+   SVGADXStreamOutStatisticsQueryResult soStats;
+   SVGADXStreamOutPredicateQueryResult soPred;
+   SVGADXOcclusion64QueryResult occ64;
+}
+#include "vmware_pack_end.h"
+SVGADXQueryResultUnion;
+
+typedef enum {
+   SVGA3D_QUERYSTATE_PENDING     = 0,      /* Query is not finished yet */
+   SVGA3D_QUERYSTATE_SUCCEEDED   = 1,      /* Completed successfully */
+   SVGA3D_QUERYSTATE_FAILED      = 2,      /* Completed unsuccessfully */
+   SVGA3D_QUERYSTATE_NEW         = 3,      /* Never submitted (guest only) */
+} SVGA3dQueryState;
+
+typedef enum {
+   SVGA3D_WRITE_HOST_VRAM        = 1,
+   SVGA3D_READ_HOST_VRAM         = 2,
+} SVGA3dTransferType;
+
+typedef enum {
+   SVGA3D_LOGICOP_INVALID   = 0,
+   SVGA3D_LOGICOP_MIN       = 1,
+   SVGA3D_LOGICOP_COPY      = 1,
+   SVGA3D_LOGICOP_NOT       = 2,
+   SVGA3D_LOGICOP_AND       = 3,
+   SVGA3D_LOGICOP_OR        = 4,
+   SVGA3D_LOGICOP_XOR       = 5,
+   SVGA3D_LOGICOP_NXOR      = 6,
+   SVGA3D_LOGICOP_ROP3MIN   = 30,   /* 7-29 are reserved for future logic ops. */
+   SVGA3D_LOGICOP_ROP3MAX   = (SVGA3D_LOGICOP_ROP3MIN + 255),
+   SVGA3D_LOGICOP_MAX       = (SVGA3D_LOGICOP_ROP3MAX + 1),
+} SVGA3dLogicOp;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   union {
+      struct {
+         uint16  function;       /* SVGA3dFogFunction */
+         uint8   type;           /* SVGA3dFogType */
+         uint8   base;           /* SVGA3dFogBase */
+      };
+      uint32     uintValue;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dFogMode;
+
+/*
+ * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
+ * is a surface ID as well as face/mipmap indices.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSurfaceImageId {
+   uint32 sid;
+   uint32 face;
+   uint32 mipmap;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceImageId;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSubSurfaceId {
+   uint32 sid;
+   uint32 subResourceId;
+}
+#include "vmware_pack_end.h"
+SVGA3dSubSurfaceId;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               width;
+   uint32               height;
+   uint32               depth;
+}
+#include "vmware_pack_end.h"
+SVGA3dSize;
+
+/*
+ * Guest-backed objects definitions.
+ */
+typedef enum {
+   SVGA_OTABLE_MOB             = 0,
+   SVGA_OTABLE_MIN             = 0,
+   SVGA_OTABLE_SURFACE         = 1,
+   SVGA_OTABLE_CONTEXT         = 2,
+   SVGA_OTABLE_SHADER          = 3,
+   SVGA_OTABLE_SCREENTARGET    = 4,
+
+   SVGA_OTABLE_DX9_MAX         = 5,
+
+   SVGA_OTABLE_DXCONTEXT       = 5,
+   SVGA_OTABLE_DX_MAX          = 6,
+
+   SVGA_OTABLE_RESERVED1       = 6,
+   SVGA_OTABLE_RESERVED2       = 7,
+
+   /*
+    * Additions to this table need to be tied to HW-version features and
+    * checkpointed accordingly.
+    */
+   SVGA_OTABLE_DEVEL_MAX       = 8,
+   SVGA_OTABLE_MAX             = 8
+} SVGAOTableType;
+
+typedef enum {
+   SVGA_COTABLE_MIN             = 0,
+   SVGA_COTABLE_RTVIEW          = 0,
+   SVGA_COTABLE_DSVIEW          = 1,
+   SVGA_COTABLE_SRVIEW          = 2,
+   SVGA_COTABLE_ELEMENTLAYOUT   = 3,
+   SVGA_COTABLE_BLENDSTATE      = 4,
+   SVGA_COTABLE_DEPTHSTENCIL    = 5,
+   SVGA_COTABLE_RASTERIZERSTATE = 6,
+   SVGA_COTABLE_SAMPLER         = 7,
+   SVGA_COTABLE_STREAMOUTPUT    = 8,
+   SVGA_COTABLE_DXQUERY         = 9,
+   SVGA_COTABLE_DXSHADER        = 10,
+   SVGA_COTABLE_DX10_MAX        = 11,
+   SVGA_COTABLE_UAVIEW          = 11,
+   SVGA_COTABLE_MAX             = 12,
+} SVGACOTableType;
+
+/*
+ * The largest size (number of entries) allowed in a COTable.
+ */
+#define SVGA_COTABLE_MAX_IDS (MAX_UINT16 - 2)
+
+typedef enum SVGAMobFormat {
+   SVGA3D_MOBFMT_INVALID     = SVGA3D_INVALID_ID,
+   SVGA3D_MOBFMT_PTDEPTH_0   = 0,
+   SVGA3D_MOBFMT_MIN         = 0,
+   SVGA3D_MOBFMT_PTDEPTH_1   = 1,
+   SVGA3D_MOBFMT_PTDEPTH_2   = 2,
+   SVGA3D_MOBFMT_RANGE       = 3,
+   SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
+   SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
+   SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
+   SVGA3D_MOBFMT_PREDX_MAX   = 7,
+   SVGA3D_MOBFMT_EMPTY       = 7,
+   SVGA3D_MOBFMT_MAX,
+
+   /*
+    * This isn't actually used by the guest, but is a mob-format used
+    * internally by the SVGA device (and is therefore not binary compatible).
+    */
+   SVGA3D_MOBFMT_HB,
+} SVGAMobFormat;
+
+#define SVGA3D_MOB_EMPTY_BASE 1
+
+/*
+ * Multisample pattern types.
+ */
+
+typedef enum SVGA3dMSPattern {
+   SVGA3D_MS_PATTERN_NONE     = 0,
+   SVGA3D_MS_PATTERN_MIN      = 0,
+   SVGA3D_MS_PATTERN_STANDARD = 1,
+   SVGA3D_MS_PATTERN_CENTER   = 2,
+   SVGA3D_MS_PATTERN_MAX      = 3,
+} SVGA3dMSPattern;
+
+/*
+ * Precision settings for each sample.
+ */
+
+typedef enum SVGA3dMSQualityLevel {
+   SVGA3D_MS_QUALITY_NONE = 0,
+   SVGA3D_MS_QUALITY_MIN  = 0,
+   SVGA3D_MS_QUALITY_FULL = 1,
+   SVGA3D_MS_QUALITY_MAX  = 2,
+} SVGA3dMSQualityLevel;
+
+#endif /* _SVGA3D_TYPES_H_ */
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
new file mode 100644
index 0000000..acb41e2
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_escape.h --
+ *
+ *    Definitions for our own (vendor-specific) SVGA Escape commands.
+ */
+
+#ifndef _SVGA_ESCAPE_H_
+#define _SVGA_ESCAPE_H_
+
+
+/*
+ * Namespace IDs for the escape command
+ */
+
+#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
+#define SVGA_ESCAPE_NSID_DEVEL  0xFFFFFFFF
+
+
+/*
+ * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
+ * the first DWORD of escape data (after the nsID and size). As a
+ * guideline we're using the high word and low word as a major and
+ * minor command number, respectively.
+ *
+ * Major command number allocation:
+ *
+ *   0000: Reserved
+ *   0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
+ *   0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
+ *   0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
+ */
+
+#define SVGA_ESCAPE_VMWARE_MAJOR_MASK  0xFFFF0000
+
+
+/*
+ * SVGA Hint commands.
+ *
+ * These escapes let the SVGA driver provide optional information to
+ * he host about the state of the guest or guest applications. The
+ * host can use these hints to make user interface or performance
+ * decisions.
+ *
+ * Notes:
+ *
+ *   - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
+ *     that use the SVGA Screen Object extension. Instead of sending
+ *     this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
+ *     Screen Object.
+ */
+
+#define SVGA_ESCAPE_VMWARE_HINT               0x00030000
+#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN    0x00030001  /* Deprecated */
+
+typedef
+struct {
+   uint32 command;
+   uint32 fullscreen;
+   struct {
+      int32 x, y;
+   } monitorPosition;
+} SVGAEscapeHintFullscreen;
+
+#endif /* _SVGA_ESCAPE_H_ */
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
new file mode 100644
index 0000000..e538514
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_overlay.h --
+ *
+ *    Definitions for video-overlay support.
+ */
+
+#ifndef _SVGA_OVERLAY_H_
+#define _SVGA_OVERLAY_H_
+
+#include "svga_reg.h"
+
+/*
+ * Video formats we support
+ */
+
+#define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */
+#define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */
+#define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */
+
+typedef enum {
+   SVGA_OVERLAY_FORMAT_INVALID = 0,
+   SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
+   SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
+   SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
+} SVGAOverlayFormat;
+
+#define SVGA_VIDEO_COLORKEY_MASK             0x00ffffff
+
+#define SVGA_ESCAPE_VMWARE_VIDEO             0x00020000
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS    0x00020001
+        /* FIFO escape layout:
+         * Type, Stream Id, (Register Id, Value) pairs */
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH       0x00020002
+        /* FIFO escape layout:
+         * Type, Stream Id */
+
+typedef
+struct SVGAEscapeVideoSetRegs {
+   struct {
+      uint32 cmdType;
+      uint32 streamId;
+   } header;
+
+   /* May include zero or more items. */
+   struct {
+      uint32 registerId;
+      uint32 value;
+   } items[1];
+} SVGAEscapeVideoSetRegs;
+
+typedef
+struct SVGAEscapeVideoFlush {
+   uint32 cmdType;
+   uint32 streamId;
+} SVGAEscapeVideoFlush;
+
+
+/*
+ * Struct definitions for the video overlay commands built on
+ * SVGAFifoCmdEscape.
+ */
+typedef
+struct {
+   uint32 command;
+   uint32 overlay;
+} SVGAFifoEscapeCmdVideoBase;
+
+typedef
+struct {
+   SVGAFifoEscapeCmdVideoBase videoCmd;
+} SVGAFifoEscapeCmdVideoFlush;
+
+typedef
+struct {
+   SVGAFifoEscapeCmdVideoBase videoCmd;
+   struct {
+      uint32 regId;
+      uint32 value;
+   } items[1];
+} SVGAFifoEscapeCmdVideoSetRegs;
+
+typedef
+struct {
+   SVGAFifoEscapeCmdVideoBase videoCmd;
+   struct {
+      uint32 regId;
+      uint32 value;
+   } items[SVGA_VIDEO_NUM_REGS];
+} SVGAFifoEscapeCmdVideoSetAllRegs;
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * VMwareVideoGetAttributes --
+ *
+ *      Computes the size, pitches and offsets for YUV frames.
+ *
+ * Results:
+ *      TRUE on success; otherwise FALSE on failure.
+ *
+ * Side effects:
+ *      Pitches and offsets for the given YUV frame are put in 'pitches'
+ *      and 'offsets' respectively. They are both optional though.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline bool
+VMwareVideoGetAttributes(const SVGAOverlayFormat format,    /* IN */
+                         uint32 *width,                     /* IN / OUT */
+                         uint32 *height,                    /* IN / OUT */
+                         uint32 *size,                      /* OUT */
+                         uint32 *pitches,                   /* OUT (optional) */
+                         uint32 *offsets)                   /* OUT (optional) */
+{
+    int tmp;
+
+    *width = (*width + 1) & ~1;
+
+    if (offsets) {
+        offsets[0] = 0;
+    }
+
+    switch (format) {
+    case VMWARE_FOURCC_YV12:
+       *height = (*height + 1) & ~1;
+       *size = (*width) * (*height);
+
+       if (pitches) {
+          pitches[0] = *width;
+       }
+
+       if (offsets) {
+          offsets[1] = *size;
+       }
+
+       tmp = *width >> 1;
+
+       if (pitches) {
+          pitches[1] = pitches[2] = tmp;
+       }
+
+       tmp *= (*height >> 1);
+       *size += tmp;
+
+       if (offsets) {
+          offsets[2] = *size;
+       }
+
+       *size += tmp;
+       break;
+
+    case VMWARE_FOURCC_YUY2:
+    case VMWARE_FOURCC_UYVY:
+       *size = *width * 2;
+
+       if (pitches) {
+          pitches[0] = *size;
+       }
+
+       *size *= *height;
+       break;
+
+    default:
+       return false;
+    }
+
+    return true;
+}
+
+#endif /* _SVGA_OVERLAY_H_ */
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
new file mode 100644
index 0000000..056f54b
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -0,0 +1,2089 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_reg.h --
+ *
+ *    Virtual hardware definitions for the VMware SVGA II device.
+ */
+
+#ifndef _SVGA_REG_H_
+#define _SVGA_REG_H_
+#include <linux/pci_ids.h>
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+
+#define INCLUDE_ALLOW_VMCORE
+#include "includeCheck.h"
+
+#include "svga_types.h"
+
+/*
+ * SVGA_REG_ENABLE bit definitions.
+ */
+typedef enum {
+   SVGA_REG_ENABLE_DISABLE = 0,
+   SVGA_REG_ENABLE_ENABLE = (1 << 0),
+   SVGA_REG_ENABLE_HIDE = (1 << 1),
+} SvgaRegEnable;
+
+typedef uint32 SVGAMobId;
+
+/*
+ * Arbitrary and meaningless limits. Please ignore these when writing
+ * new drivers.
+ */
+#define SVGA_MAX_WIDTH                  2560
+#define SVGA_MAX_HEIGHT                 1600
+
+
+#define SVGA_MAX_BITS_PER_PIXEL         32
+#define SVGA_MAX_DEPTH                  24
+#define SVGA_MAX_DISPLAYS               10
+#define SVGA_MAX_SCREEN_SIZE            8192
+#define SVGA_SCREEN_ROOT_LIMIT (SVGA_MAX_SCREEN_SIZE * SVGA_MAX_DISPLAYS)
+
+
+/*
+ * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
+ * cursor bypass mode. This is still supported, but no new guest
+ * drivers should use it.
+ */
+#define SVGA_CURSOR_ON_HIDE            0x0
+#define SVGA_CURSOR_ON_SHOW            0x1
+
+/*
+ * Remove the cursor from the framebuffer
+ * because we need to see what's under it
+ */
+#define SVGA_CURSOR_ON_REMOVE_FROM_FB  0x2
+
+/* Put the cursor back in the framebuffer so the user can see it */
+#define SVGA_CURSOR_ON_RESTORE_TO_FB   0x3
+
+/*
+ * The maximum framebuffer size that can traced for guests unless the
+ * SVGA_CAP_GBOBJECTS is set in SVGA_REG_CAPABILITIES.  In that case
+ * the full framebuffer can be traced independent of this limit.
+ */
+#define SVGA_FB_MAX_TRACEABLE_SIZE      0x1000000
+
+#define SVGA_MAX_PSEUDOCOLOR_DEPTH      8
+#define SVGA_MAX_PSEUDOCOLORS           (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
+#define SVGA_NUM_PALETTE_REGS           (3 * SVGA_MAX_PSEUDOCOLORS)
+
+#define SVGA_MAGIC         0x900000UL
+#define SVGA_MAKE_ID(ver)  (SVGA_MAGIC << 8 | (ver))
+
+/* Version 2 let the address of the frame buffer be unsigned on Win32 */
+#define SVGA_VERSION_2     2
+#define SVGA_ID_2          SVGA_MAKE_ID(SVGA_VERSION_2)
+
+/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
+   PALETTE_BASE has moved */
+#define SVGA_VERSION_1     1
+#define SVGA_ID_1          SVGA_MAKE_ID(SVGA_VERSION_1)
+
+/* Version 0 is the initial version */
+#define SVGA_VERSION_0     0
+#define SVGA_ID_0          SVGA_MAKE_ID(SVGA_VERSION_0)
+
+/*
+ * "Invalid" value for all SVGA IDs.
+ * (Version ID, screen object ID, surface ID...)
+ */
+#define SVGA_ID_INVALID    0xFFFFFFFF
+
+/* Port offsets, relative to BAR0 */
+#define SVGA_INDEX_PORT         0x0
+#define SVGA_VALUE_PORT         0x1
+#define SVGA_BIOS_PORT          0x2
+#define SVGA_IRQSTATUS_PORT     0x8
+
+/*
+ * Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
+ *
+ * Interrupts are only supported when the
+ * SVGA_CAP_IRQMASK capability is present.
+ */
+#define SVGA_IRQFLAG_ANY_FENCE            0x1    /* Any fence was passed */
+#define SVGA_IRQFLAG_FIFO_PROGRESS        0x2    /* Made forward progress in the FIFO */
+#define SVGA_IRQFLAG_FENCE_GOAL           0x4    /* SVGA_FIFO_FENCE_GOAL reached */
+#define SVGA_IRQFLAG_COMMAND_BUFFER       0x8    /* Command buffer completed */
+#define SVGA_IRQFLAG_ERROR                0x10   /* Error while processing commands */
+
+/*
+ * Registers
+ */
+
+enum {
+   SVGA_REG_ID = 0,
+   SVGA_REG_ENABLE = 1,
+   SVGA_REG_WIDTH = 2,
+   SVGA_REG_HEIGHT = 3,
+   SVGA_REG_MAX_WIDTH = 4,
+   SVGA_REG_MAX_HEIGHT = 5,
+   SVGA_REG_DEPTH = 6,
+   SVGA_REG_BITS_PER_PIXEL = 7,       /* Current bpp in the guest */
+   SVGA_REG_PSEUDOCOLOR = 8,
+   SVGA_REG_RED_MASK = 9,
+   SVGA_REG_GREEN_MASK = 10,
+   SVGA_REG_BLUE_MASK = 11,
+   SVGA_REG_BYTES_PER_LINE = 12,
+   SVGA_REG_FB_START = 13,            /* (Deprecated) */
+   SVGA_REG_FB_OFFSET = 14,
+   SVGA_REG_VRAM_SIZE = 15,
+   SVGA_REG_FB_SIZE = 16,
+
+   /* ID 0 implementation only had the above registers, then the palette */
+   SVGA_REG_ID_0_TOP = 17,
+
+   SVGA_REG_CAPABILITIES = 17,
+   SVGA_REG_MEM_START = 18,           /* (Deprecated) */
+   SVGA_REG_MEM_SIZE = 19,
+   SVGA_REG_CONFIG_DONE = 20,         /* Set when memory area configured */
+   SVGA_REG_SYNC = 21,                /* See "FIFO Synchronization Registers" */
+   SVGA_REG_BUSY = 22,                /* See "FIFO Synchronization Registers" */
+   SVGA_REG_GUEST_ID = 23,            /* (Deprecated) */
+   SVGA_REG_CURSOR_ID = 24,           /* (Deprecated) */
+   SVGA_REG_CURSOR_X = 25,            /* (Deprecated) */
+   SVGA_REG_CURSOR_Y = 26,            /* (Deprecated) */
+   SVGA_REG_CURSOR_ON = 27,           /* (Deprecated) */
+   SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
+   SVGA_REG_SCRATCH_SIZE = 29,        /* Number of scratch registers */
+   SVGA_REG_MEM_REGS = 30,            /* Number of FIFO registers */
+   SVGA_REG_NUM_DISPLAYS = 31,        /* (Deprecated) */
+   SVGA_REG_PITCHLOCK = 32,           /* Fixed pitch for all modes */
+   SVGA_REG_IRQMASK = 33,             /* Interrupt mask */
+
+   /* Legacy multi-monitor support */
+   SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
+   SVGA_REG_DISPLAY_ID = 35,        /* Display ID for the following display attributes */
+   SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
+   SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
+   SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
+   SVGA_REG_DISPLAY_WIDTH = 39,     /* The display's width */
+   SVGA_REG_DISPLAY_HEIGHT = 40,    /* The display's height */
+
+   /* See "Guest memory regions" below. */
+   SVGA_REG_GMR_ID = 41,
+   SVGA_REG_GMR_DESCRIPTOR = 42,
+   SVGA_REG_GMR_MAX_IDS = 43,
+   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
+
+   SVGA_REG_TRACES = 45,            /* Enable trace-based updates even when FIFO is on */
+   SVGA_REG_GMRS_MAX_PAGES = 46,    /* Maximum number of 4KB pages for all GMRs */
+   SVGA_REG_MEMORY_SIZE = 47,       /* Total dedicated device memory excluding FIFO */
+   SVGA_REG_COMMAND_LOW = 48,       /* Lower 32 bits and submits commands */
+   SVGA_REG_COMMAND_HIGH = 49,      /* Upper 32 bits of command buffer PA */
+
+   /*
+    * Max primary memory.
+    * See SVGA_CAP_NO_BB_RESTRICTION.
+    */
+   SVGA_REG_MAX_PRIMARY_MEM = 50,
+   SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,
+
+   SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
+   SVGA_REG_DEV_CAP = 52,           /* Write dev cap index, read value */
+   SVGA_REG_CMD_PREPEND_LOW = 53,
+   SVGA_REG_CMD_PREPEND_HIGH = 54,
+   SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
+   SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
+   SVGA_REG_MOB_MAX_SIZE = 57,
+   SVGA_REG_BLANK_SCREEN_TARGETS = 58,
+   SVGA_REG_CAP2 = 59,
+   SVGA_REG_DEVEL_CAP = 60,
+   SVGA_REG_TOP = 61,               /* Must be 1 more than the last register */
+
+   SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
+   /* Next 768 (== 256*3) registers exist for colormap */
+   SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
+                                    /* Base of scratch registers */
+   /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
+      First 4 are reserved for VESA BIOS Extension; any remaining are for
+      the use of the current SVGA driver. */
+};
+
+/*
+ * Guest memory regions (GMRs):
+ *
+ * This is a new memory mapping feature available in SVGA devices
+ * which have the SVGA_CAP_GMR bit set. Previously, there were two
+ * fixed memory regions available with which to share data between the
+ * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
+ * are our name for an extensible way of providing arbitrary DMA
+ * buffers for use between the driver and the SVGA device. They are a
+ * new alternative to framebuffer memory, usable for both 2D and 3D
+ * graphics operations.
+ *
+ * Since GMR mapping must be done synchronously with guest CPU
+ * execution, we use a new pair of SVGA registers:
+ *
+ *   SVGA_REG_GMR_ID --
+ *
+ *     Read/write.
+ *     This register holds the 32-bit ID (a small positive integer)
+ *     of a GMR to create, delete, or redefine. Writing this register
+ *     has no side-effects.
+ *
+ *   SVGA_REG_GMR_DESCRIPTOR --
+ *
+ *     Write-only.
+ *     Writing this register will create, delete, or redefine the GMR
+ *     specified by the above ID register. If this register is zero,
+ *     the GMR is deleted. Any pointers into this GMR (including those
+ *     currently being processed by FIFO commands) will be
+ *     synchronously invalidated.
+ *
+ *     If this register is nonzero, it must be the physical page
+ *     number (PPN) of a data structure which describes the physical
+ *     layout of the memory region this GMR should describe. The
+ *     descriptor structure will be read synchronously by the SVGA
+ *     device when this register is written. The descriptor need not
+ *     remain allocated for the lifetime of the GMR.
+ *
+ *     The guest driver should write SVGA_REG_GMR_ID first, then
+ *     SVGA_REG_GMR_DESCRIPTOR.
+ *
+ *   SVGA_REG_GMR_MAX_IDS --
+ *
+ *     Read-only.
+ *     The SVGA device may choose to support a maximum number of
+ *     user-defined GMR IDs. This register holds the number of supported
+ *     IDs. (The maximum supported ID plus 1)
+ *
+ *   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
+ *
+ *     Read-only.
+ *     The SVGA device may choose to put a limit on the total number
+ *     of SVGAGuestMemDescriptor structures it will read when defining
+ *     a single GMR.
+ *
+ * The descriptor structure is an array of SVGAGuestMemDescriptor
+ * structures. Each structure may do one of three things:
+ *
+ *   - Terminate the GMR descriptor list.
+ *     (ppn==0, numPages==0)
+ *
+ *   - Add a PPN or range of PPNs to the GMR's virtual address space.
+ *     (ppn != 0, numPages != 0)
+ *
+ *   - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
+ *     support multi-page GMR descriptor tables without forcing the
+ *     driver to allocate physically contiguous memory.
+ *     (ppn != 0, numPages == 0)
+ *
+ * Note that each physical page of SVGAGuestMemDescriptor structures
+ * can describe at least 2MB of guest memory. If the driver needs to
+ * use more than one page of descriptor structures, it must use one of
+ * its SVGAGuestMemDescriptors to point to an additional page.  The
+ * device will never automatically cross a page boundary.
+ *
+ * Once the driver has described a GMR, it is immediately available
+ * for use via any FIFO command that uses an SVGAGuestPtr structure.
+ * These pointers include a GMR identifier plus an offset into that
+ * GMR.
+ *
+ * The driver must check the SVGA_CAP_GMR bit before using the GMR
+ * registers.
+ */
+
+/*
+ * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
+ * memory as well.  In the future, these IDs could even be used to
+ * allow legacy memory regions to be redefined by the guest as GMRs.
+ *
+ * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
+ * is being phased out. Please try to use user-defined GMRs whenever
+ * possible.
+ */
+#define SVGA_GMR_NULL         ((uint32) -1)
+#define SVGA_GMR_FRAMEBUFFER  ((uint32) -2)  /* Guest Framebuffer (GFB) */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAGuestMemDescriptor {
+   uint32 ppn;
+   uint32 numPages;
+}
+#include "vmware_pack_end.h"
+SVGAGuestMemDescriptor;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAGuestPtr {
+   uint32 gmrId;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGAGuestPtr;
+
+/*
+ * Register based command buffers --
+ *
+ * Provide an SVGA device interface that allows the guest to submit
+ * command buffers to the SVGA device through an SVGA device register.
+ * The metadata for each command buffer is contained in the
+ * SVGACBHeader structure along with the return status codes.
+ *
+ * The SVGA device supports command buffers if
+ * SVGA_CAP_COMMAND_BUFFERS is set in the device caps register.  The
+ * fifo must be enabled for command buffers to be submitted.
+ *
+ * Command buffers are submitted when the guest writing the 64 byte
+ * aligned physical address into the SVGA_REG_COMMAND_LOW and
+ * SVGA_REG_COMMAND_HIGH.  SVGA_REG_COMMAND_HIGH contains the upper 32
+ * bits of the physical address.  SVGA_REG_COMMAND_LOW contains the
+ * lower 32 bits of the physical address, since the command buffer
+ * headers are required to be 64 byte aligned the lower 6 bits are
+ * used for the SVGACBContext value.  Writing to SVGA_REG_COMMAND_LOW
+ * submits the command buffer to the device and queues it for
+ * execution.  The SVGA device supports at least
+ * SVGA_CB_MAX_QUEUED_PER_CONTEXT command buffers that can be queued
+ * per context and if that limit is reached the device will write the
+ * status SVGA_CB_STATUS_QUEUE_FULL to the status value of the command
+ * buffer header synchronously and not raise any IRQs.
+ *
+ * It is invalid to submit a command buffer without a valid physical
+ * address and results are undefined.
+ *
+ * The device guarantees that command buffers of size SVGA_CB_MAX_SIZE
+ * will be supported.  If a larger command buffer is submitted results
+ * are unspecified and the device will either complete the command
+ * buffer or return an error.
+ *
+ * The device guarantees that any individual command in a command
+ * buffer can be up to SVGA_CB_MAX_COMMAND_SIZE in size which is
+ * enough to fit a 64x64 color-cursor definition.  If the command is
+ * too large the device is allowed to process the command or return an
+ * error.
+ *
+ * The device context is a special SVGACBContext that allows for
+ * synchronous register like accesses with the flexibility of
+ * commands.  There is a different command set defined by
+ * SVGADeviceContextCmdId.  The commands in each command buffer is not
+ * allowed to straddle physical pages.
+ *
+ * The offset field which is available starting with the
+ * SVGA_CAP_CMD_BUFFERS_2 cap bit can be set by the guest to bias the
+ * start of command processing into the buffer.  If an error is
+ * encountered the errorOffset will still be relative to the specific
+ * PA, not biased by the offset.  When the command buffer is finished
+ * the guest should not read the offset field as there is no guarantee
+ * what it will set to.
+ *
+ * When the SVGA_CAP_HP_CMD_QUEUE cap bit is set a new command queue
+ * SVGA_CB_CONTEXT_1 is available.  Commands submitted to this queue
+ * will be executed as quickly as possible by the SVGA device
+ * potentially before already queued commands on SVGA_CB_CONTEXT_0.
+ * The SVGA device guarantees that any command buffers submitted to
+ * SVGA_CB_CONTEXT_0 will be executed after any _already_ submitted
+ * command buffers to SVGA_CB_CONTEXT_1.
+ */
+
+#define SVGA_CB_MAX_SIZE (512 * 1024)  /* 512 KB */
+#define SVGA_CB_MAX_QUEUED_PER_CONTEXT 32
+#define SVGA_CB_MAX_COMMAND_SIZE (32 * 1024) /* 32 KB */
+
+#define SVGA_CB_CONTEXT_MASK 0x3f
+typedef enum {
+   SVGA_CB_CONTEXT_DEVICE = 0x3f,
+   SVGA_CB_CONTEXT_0      = 0x0,
+   SVGA_CB_CONTEXT_1      = 0x1, /* Supported with SVGA_CAP_HP_CMD_QUEUE */
+   SVGA_CB_CONTEXT_MAX    = 0x2,
+   SVGA_CB_CONTEXT_HP_MAX = 0x2,
+} SVGACBContext;
+
+
+typedef enum {
+   /*
+    * The guest is supposed to write SVGA_CB_STATUS_NONE to the status
+    * field before submitting the command buffer header, the host will
+    * change the value when it is done with the command buffer.
+    */
+   SVGA_CB_STATUS_NONE             = 0,
+
+   /*
+    * Written by the host when a command buffer completes successfully.
+    * The device raises an IRQ with SVGA_IRQFLAG_COMMAND_BUFFER unless
+    * the SVGA_CB_FLAG_NO_IRQ flag is set.
+    */
+   SVGA_CB_STATUS_COMPLETED        = 1,
+
+   /*
+    * Written by the host synchronously with the command buffer
+    * submission to indicate the command buffer was not submitted.  No
+    * IRQ is raised.
+    */
+   SVGA_CB_STATUS_QUEUE_FULL       = 2,
+
+   /*
+    * Written by the host when an error was detected parsing a command
+    * in the command buffer, errorOffset is written to contain the
+    * offset to the first byte of the failing command.  The device
+    * raises the IRQ with both SVGA_IRQFLAG_ERROR and
+    * SVGA_IRQFLAG_COMMAND_BUFFER.  Some of the commands may have been
+    * processed.
+    */
+   SVGA_CB_STATUS_COMMAND_ERROR    = 3,
+
+   /*
+    * Written by the host if there is an error parsing the command
+    * buffer header.  The device raises the IRQ with both
+    * SVGA_IRQFLAG_ERROR and SVGA_IRQFLAG_COMMAND_BUFFER.  The device
+    * did not processes any of the command buffer.
+    */
+   SVGA_CB_STATUS_CB_HEADER_ERROR  = 4,
+
+   /*
+    * Written by the host if the guest requested the host to preempt
+    * the command buffer.  The device will not raise any IRQs and the
+    * command buffer was not processed.
+    */
+   SVGA_CB_STATUS_PREEMPTED        = 5,
+
+   /*
+    * Written by the host synchronously with the command buffer
+    * submission to indicate the the command buffer was not submitted
+    * due to an error.  No IRQ is raised.
+    */
+   SVGA_CB_STATUS_SUBMISSION_ERROR = 6,
+
+   /*
+    * Written by the host when the host finished a
+    * SVGA_DC_CMD_ASYNC_STOP_QUEUE request for this command buffer
+    * queue.  The offset of the first byte not processed is stored in
+    * the errorOffset field of the command buffer header.  All guest
+    * visible side effects of commands till that point are guaranteed
+    * to be finished before this is written.  The
+    * SVGA_IRQFLAG_COMMAND_BUFFER IRQ is raised as long as the
+    * SVGA_CB_FLAG_NO_IRQ is not set.
+    */
+   SVGA_CB_STATUS_PARTIAL_COMPLETE = 7,
+} SVGACBStatus;
+
+typedef enum {
+   SVGA_CB_FLAG_NONE       = 0,
+   SVGA_CB_FLAG_NO_IRQ     = 1 << 0,
+   SVGA_CB_FLAG_DX_CONTEXT = 1 << 1,
+   SVGA_CB_FLAG_MOB        = 1 << 2,
+} SVGACBFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   volatile SVGACBStatus status; /* Modified by device. */
+   volatile uint32 errorOffset;  /* Modified by device. */
+   uint64 id;
+   SVGACBFlags flags;
+   uint32 length;
+   union {
+      PA pa;
+      struct {
+         SVGAMobId mobid;
+         uint32 mobOffset;
+      } mob;
+   } ptr;
+   uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise,
+                   * modified by device.
+                   */
+   uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */
+   uint32 mustBeZero[6];
+}
+#include "vmware_pack_end.h"
+SVGACBHeader;
+
+typedef enum {
+   SVGA_DC_CMD_NOP                   = 0,
+   SVGA_DC_CMD_START_STOP_CONTEXT    = 1,
+   SVGA_DC_CMD_PREEMPT               = 2,
+   SVGA_DC_CMD_START_QUEUE           = 3, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+   SVGA_DC_CMD_ASYNC_STOP_QUEUE      = 4, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+   SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE   = 5, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+   SVGA_DC_CMD_MAX                   = 6,
+} SVGADeviceContextCmdId;
+
+/*
+ * Starts or stops both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1.
+ */
+
+typedef struct SVGADCCmdStartStop {
+   uint32 enable;
+   SVGACBContext context; /* Must be zero */
+} SVGADCCmdStartStop;
+
+/*
+ * SVGADCCmdPreempt --
+ *
+ * This command allows the guest to request that all command buffers
+ * on SVGA_CB_CONTEXT_0 be preempted that can be.  After execution
+ * of this command all command buffers that were preempted will
+ * already have SVGA_CB_STATUS_PREEMPTED written into the status
+ * field.  The device might still be processing a command buffer,
+ * assuming execution of it started before the preemption request was
+ * received.  Specifying the ignoreIDZero flag to TRUE will cause the
+ * device to not preempt command buffers with the id field in the
+ * command buffer header set to zero.
+ */
+
+typedef struct SVGADCCmdPreempt {
+   SVGACBContext context; /* Must be zero */
+   uint32 ignoreIDZero;
+} SVGADCCmdPreempt;
+
+/*
+ * Starts the requested command buffer processing queue.  Valid only
+ * if the SVGA_CAP_HP_CMD_QUEUE cap is set.
+ *
+ * For a command queue to be considered runnable it must be enabled
+ * and any corresponding higher priority queues must also be enabled.
+ * For example in order for command buffers to be processed on
+ * SVGA_CB_CONTEXT_0 both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1 must
+ * be enabled.  But for commands to be runnable on SVGA_CB_CONTEXT_1
+ * only that queue must be enabled.
+ */
+
+typedef struct SVGADCCmdStartQueue {
+   SVGACBContext context;
+} SVGADCCmdStartQueue;
+
+/*
+ * Requests the SVGA device to stop processing the requested command
+ * buffer queue as soon as possible.  The guest knows the stop has
+ * completed when one of the following happens.
+ *
+ * 1) A command buffer status of SVGA_CB_STATUS_PARTIAL_COMPLETE is returned
+ * 2) A command buffer error is encountered with would stop the queue
+ *    regardless of the async stop request.
+ * 3) All command buffers that have been submitted complete successfully.
+ * 4) The stop completes synchronously if no command buffers are
+ *    active on the queue when it is issued.
+ *
+ * If the command queue is not in a runnable state there is no
+ * guarentee this async stop will finish.  For instance if the high
+ * priority queue is not enabled and a stop is requested on the low
+ * priority queue, the high priority queue must be reenabled to
+ * guarantee that the async stop will finish.
+ *
+ * This command along with SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE can be used
+ * to implement mid command buffer preemption.
+ *
+ * Valid only if the SVGA_CAP_HP_CMD_QUEUE cap is set.
+ */
+
+typedef struct SVGADCCmdAsyncStopQueue {
+   SVGACBContext context;
+} SVGADCCmdAsyncStopQueue;
+
+/*
+ * Requests the SVGA device to throw away any full command buffers on
+ * the requested command queue that have not been started.  For a
+ * driver to know which command buffers were thrown away a driver
+ * should only issue this command when the queue is stopped, for
+ * whatever reason.
+ */
+
+typedef struct SVGADCCmdEmptyQueue {
+   SVGACBContext context;
+} SVGADCCmdEmptyQueue;
+
+
+/*
+ * SVGAGMRImageFormat --
+ *
+ *    This is a packed representation of the source 2D image format
+ *    for a GMR-to-screen blit. Currently it is defined as an encoding
+ *    of the screen's color depth and bits-per-pixel, however, 16 bits
+ *    are reserved for future use to identify other encodings (such as
+ *    RGBA or higher-precision images).
+ *
+ *    Currently supported formats:
+ *
+ *       bpp depth  Format Name
+ *       --- -----  -----------
+ *        32    24  32-bit BGRX
+ *        24    24  24-bit BGR
+ *        16    16  RGB 5-6-5
+ *        16    15  RGB 5-5-5
+ *
+ */
+
+typedef struct SVGAGMRImageFormat {
+   union {
+      struct {
+         uint32 bitsPerPixel : 8;
+         uint32 colorDepth   : 8;
+         uint32 reserved     : 16;  /* Must be zero */
+      };
+
+      uint32 value;
+   };
+} SVGAGMRImageFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAGuestImage {
+   SVGAGuestPtr         ptr;
+
+   /*
+    * A note on interpretation of pitch: This value of pitch is the
+    * number of bytes between vertically adjacent image
+    * blocks. Normally this is the number of bytes between the first
+    * pixel of two adjacent scanlines. With compressed textures,
+    * however, this may represent the number of bytes between
+    * compression blocks rather than between rows of pixels.
+    *
+    * XXX: Compressed textures currently must be tightly packed in guest memory.
+    *
+    * If the image is 1-dimensional, pitch is ignored.
+    *
+    * If 'pitch' is zero, the SVGA3D device calculates a pitch value
+    * assuming each row of blocks is tightly packed.
+    */
+   uint32 pitch;
+}
+#include "vmware_pack_end.h"
+SVGAGuestImage;
+
+/*
+ * SVGAColorBGRX --
+ *
+ *    A 24-bit color format (BGRX), which does not depend on the
+ *    format of the legacy guest framebuffer (GFB) or the current
+ *    GMRFB state.
+ */
+
+typedef struct SVGAColorBGRX {
+   union {
+      struct {
+         uint32 b : 8;
+         uint32 g : 8;
+         uint32 r : 8;
+	 uint32 x : 8;  /* Unused */
+      };
+
+      uint32 value;
+   };
+} SVGAColorBGRX;
+
+
+/*
+ * SVGASignedRect --
+ * SVGASignedPoint --
+ *
+ *    Signed rectangle and point primitives. These are used by the new
+ *    2D primitives for drawing to Screen Objects, which can occupy a
+ *    signed virtual coordinate space.
+ *
+ *    SVGASignedRect specifies a half-open interval: the (left, top)
+ *    pixel is part of the rectangle, but the (right, bottom) pixel is
+ *    not.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   int32  left;
+   int32  top;
+   int32  right;
+   int32  bottom;
+}
+#include "vmware_pack_end.h"
+SVGASignedRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   int32  x;
+   int32  y;
+}
+#include "vmware_pack_end.h"
+SVGASignedPoint;
+
+
+/*
+ * SVGA Device Capabilities
+ *
+ * Note the holes in the bitfield. Missing bits have been deprecated,
+ * and must not be reused. Those capabilities will never be reported
+ * by new versions of the SVGA device.
+ *
+ * XXX: Add longer descriptions for each capability, including a list
+ *      of the new features that each capability provides.
+ *
+ * SVGA_CAP_IRQMASK --
+ *    Provides device interrupts.  Adds device register SVGA_REG_IRQMASK
+ *    to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to
+ *    set/clear pending interrupts.
+ *
+ * SVGA_CAP_GMR --
+ *    Provides synchronous mapping of guest memory regions (GMR).
+ *    Adds device registers SVGA_REG_GMR_ID, SVGA_REG_GMR_DESCRIPTOR,
+ *    SVGA_REG_GMR_MAX_IDS, and SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH.
+ *
+ * SVGA_CAP_TRACES --
+ *    Allows framebuffer trace-based updates even when FIFO is enabled.
+ *    Adds device register SVGA_REG_TRACES.
+ *
+ * SVGA_CAP_GMR2 --
+ *    Provides asynchronous commands to define and remap guest memory
+ *    regions.  Adds device registers SVGA_REG_GMRS_MAX_PAGES and
+ *    SVGA_REG_MEMORY_SIZE.
+ *
+ * SVGA_CAP_SCREEN_OBJECT_2 --
+ *    Allow screen object support, and require backing stores from the
+ *    guest for each screen object.
+ *
+ * SVGA_CAP_COMMAND_BUFFERS --
+ *    Enable register based command buffer submission.
+ *
+ * SVGA_CAP_DEAD1 --
+ *    This cap was incorrectly used by old drivers and should not be
+ *    reused.
+ *
+ * SVGA_CAP_CMD_BUFFERS_2 --
+ *    Enable support for the prepend command buffer submision
+ *    registers.  SVGA_REG_CMD_PREPEND_LOW and
+ *    SVGA_REG_CMD_PREPEND_HIGH.
+ *
+ * SVGA_CAP_GBOBJECTS --
+ *    Enable guest-backed objects and surfaces.
+ *
+ * SVGA_CAP_DX --
+ *    Enable support for DX commands, and command buffers in a mob.
+ *
+ * SVGA_CAP_HP_CMD_QUEUE --
+ *    Enable support for the high priority command queue, and the
+ *    ScreenCopy command.
+ *
+ * SVGA_CAP_NO_BB_RESTRICTION --
+ *    Allow ScreenTargets to be defined without regard to the 32-bpp
+ *    bounding-box memory restrictions. ie:
+ *
+ *    The summed memory usage of all screens (assuming they were defined as
+ *    32-bpp) must always be less than the value of the
+ *    SVGA_REG_MAX_PRIMARY_MEM register.
+ *
+ *    If this cap is not present, the 32-bpp bounding box around all screens
+ *    must additionally be under the value of the SVGA_REG_MAX_PRIMARY_MEM
+ *    register.
+ *
+ *    If the cap is present, the bounding box restriction is lifted (and only
+ *    the screen-sum limit applies).
+ *
+ *    (Note that this is a slight lie... there is still a sanity limit on any
+ *     dimension of the topology to be less than SVGA_SCREEN_ROOT_LIMIT, even
+ *     when SVGA_CAP_NO_BB_RESTRICTION is present, but that should be
+ *     large enough to express any possible topology without holes between
+ *     monitors.)
+ *
+ * SVGA_CAP_CAP2_REGISTER --
+ *    If this cap is present, the SVGA_REG_CAP2 register is supported.
+ */
+
+#define SVGA_CAP_NONE               0x00000000
+#define SVGA_CAP_RECT_COPY          0x00000002
+#define SVGA_CAP_CURSOR             0x00000020
+#define SVGA_CAP_CURSOR_BYPASS      0x00000040
+#define SVGA_CAP_CURSOR_BYPASS_2    0x00000080
+#define SVGA_CAP_8BIT_EMULATION     0x00000100
+#define SVGA_CAP_ALPHA_CURSOR       0x00000200
+#define SVGA_CAP_3D                 0x00004000
+#define SVGA_CAP_EXTENDED_FIFO      0x00008000
+#define SVGA_CAP_MULTIMON           0x00010000
+#define SVGA_CAP_PITCHLOCK          0x00020000
+#define SVGA_CAP_IRQMASK            0x00040000
+#define SVGA_CAP_DISPLAY_TOPOLOGY   0x00080000
+#define SVGA_CAP_GMR                0x00100000
+#define SVGA_CAP_TRACES             0x00200000
+#define SVGA_CAP_GMR2               0x00400000
+#define SVGA_CAP_SCREEN_OBJECT_2    0x00800000
+#define SVGA_CAP_COMMAND_BUFFERS    0x01000000
+#define SVGA_CAP_DEAD1              0x02000000
+#define SVGA_CAP_CMD_BUFFERS_2      0x04000000
+#define SVGA_CAP_GBOBJECTS          0x08000000
+#define SVGA_CAP_DX                 0x10000000
+#define SVGA_CAP_HP_CMD_QUEUE       0x20000000
+#define SVGA_CAP_NO_BB_RESTRICTION  0x40000000
+#define SVGA_CAP_CAP2_REGISTER      0x80000000
+
+/*
+ * The SVGA_REG_CAP2 register is an additional set of SVGA capability bits.
+ *
+ * SVGA_CAP2_GROW_OTABLE --
+ *      Allow the GrowOTable/DXGrowCOTable commands.
+ *
+ * SVGA_CAP2_INTRA_SURFACE_COPY --
+ *      Allow the IntraSurfaceCopy command.
+ *
+ * SVGA_CAP2_DX2 --
+ *      Allow the DefineGBSurface_v3, WholeSurfaceCopy.
+ *
+ * SVGA_CAP2_RESERVED --
+ *      Reserve the last bit for extending the SVGA capabilities to some
+ *      future mechanisms.
+ */
+#define SVGA_CAP2_NONE               0x00000000
+#define SVGA_CAP2_GROW_OTABLE        0x00000001
+#define SVGA_CAP2_INTRA_SURFACE_COPY 0x00000002
+#define SVGA_CAP2_DX2                0x00000004
+#define SVGA_CAP2_RESERVED           0x80000000
+
+
+/*
+ * The Guest can optionally read some SVGA device capabilities through
+ * the backdoor with command BDOOR_CMD_GET_SVGA_CAPABILITIES before
+ * the SVGA device is initialized.  The type of capability the guest
+ * is requesting from the SVGABackdoorCapType enum should be placed in
+ * the upper 16 bits of the backdoor command id (ECX).  On success the
+ * the value of EBX will be set to BDOOR_MAGIC and EAX will be set to
+ * the requested capability.  If the command is not supported then EBX
+ * will be left unchanged and EAX will be set to -1.  Because it is
+ * possible that -1 is the value of the requested cap the correct way
+ * to check if the command was successful is to check if EBX was changed
+ * to BDOOR_MAGIC making sure to initialize the register to something
+ * else first.
+ */
+
+typedef enum {
+   SVGABackdoorCapDeviceCaps = 0,
+   SVGABackdoorCapFifoCaps = 1,
+   SVGABackdoorCap3dHWVersion = 2,
+   SVGABackdoorCapDeviceCaps2 = 3,
+   SVGABackdoorCapMax = 4,
+} SVGABackdoorCapType;
+
+
+/*
+ * FIFO register indices.
+ *
+ * The FIFO is a chunk of device memory mapped into guest physmem.  It
+ * is always treated as 32-bit words.
+ *
+ * The guest driver gets to decide how to partition it between
+ * - FIFO registers (there are always at least 4, specifying where the
+ *   following data area is and how much data it contains; there may be
+ *   more registers following these, depending on the FIFO protocol
+ *   version in use)
+ * - FIFO data, written by the guest and slurped out by the VMX.
+ * These indices are 32-bit word offsets into the FIFO.
+ */
+
+enum {
+   /*
+    * Block 1 (basic registers): The originally defined FIFO registers.
+    * These exist and are valid for all versions of the FIFO protocol.
+    */
+
+   SVGA_FIFO_MIN = 0,
+   SVGA_FIFO_MAX,       /* The distance from MIN to MAX must be at least 10K */
+   SVGA_FIFO_NEXT_CMD,
+   SVGA_FIFO_STOP,
+
+   /*
+    * Block 2 (extended registers): Mandatory registers for the extended
+    * FIFO.  These exist if the SVGA caps register includes
+    * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
+    * associated capability bit is enabled.
+    *
+    * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
+    * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
+    * This means that the guest has to test individually (in most cases
+    * using FIFO caps) for the presence of registers after this; the VMX
+    * can define "extended FIFO" to mean whatever it wants, and currently
+    * won't enable it unless there's room for that set and much more.
+    */
+
+   SVGA_FIFO_CAPABILITIES = 4,
+   SVGA_FIFO_FLAGS,
+   /* Valid with SVGA_FIFO_CAP_FENCE: */
+   SVGA_FIFO_FENCE,
+
+   /*
+    * Block 3a (optional extended registers): Additional registers for the
+    * extended FIFO, whose presence isn't actually implied by
+    * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
+    * leave room for them.
+    *
+    * These in block 3a, the VMX currently considers mandatory for the
+    * extended FIFO.
+    */
+
+   /* Valid if exists (i.e. if extended FIFO enabled): */
+   SVGA_FIFO_3D_HWVERSION,       /* See SVGA3dHardwareVersion in svga3d_reg.h */
+   /* Valid with SVGA_FIFO_CAP_PITCHLOCK: */
+   SVGA_FIFO_PITCHLOCK,
+
+   /* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */
+   SVGA_FIFO_CURSOR_ON,          /* Cursor bypass 3 show/hide register */
+   SVGA_FIFO_CURSOR_X,           /* Cursor bypass 3 x register */
+   SVGA_FIFO_CURSOR_Y,           /* Cursor bypass 3 y register */
+   SVGA_FIFO_CURSOR_COUNT,       /* Incremented when any of the other 3 change */
+   SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
+
+   /* Valid with SVGA_FIFO_CAP_RESERVE: */
+   SVGA_FIFO_RESERVED,           /* Bytes past NEXT_CMD with real contents */
+
+   /*
+    * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2:
+    *
+    * By default this is SVGA_ID_INVALID, to indicate that the cursor
+    * coordinates are specified relative to the virtual root. If this
+    * is set to a specific screen ID, cursor position is reinterpreted
+    * as a signed offset relative to that screen's origin.
+    */
+   SVGA_FIFO_CURSOR_SCREEN_ID,
+
+   /*
+    * Valid with SVGA_FIFO_CAP_DEAD
+    *
+    * An arbitrary value written by the host, drivers should not use it.
+    */
+   SVGA_FIFO_DEAD,
+
+   /*
+    * Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED:
+    *
+    * Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h)
+    * on platforms that can enforce graphics resource limits.
+    */
+   SVGA_FIFO_3D_HWVERSION_REVISED,
+
+   /*
+    * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
+    * registers, but this must be done carefully and with judicious use of
+    * capability bits, since comparisons based on SVGA_FIFO_MIN aren't
+    * enough to tell you whether the register exists: we've shipped drivers
+    * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
+    * the earlier ones.  The actual order of introduction was:
+    * - PITCHLOCK
+    * - 3D_CAPS
+    * - CURSOR_* (cursor bypass 3)
+    * - RESERVED
+    * So, code that wants to know whether it can use any of the
+    * aforementioned registers, or anything else added after PITCHLOCK and
+    * before 3D_CAPS, needs to reason about something other than
+    * SVGA_FIFO_MIN.
+    */
+
+   /*
+    * 3D caps block space; valid with 3D hardware version >=
+    * SVGA3D_HWVERSION_WS6_B1.
+    */
+   SVGA_FIFO_3D_CAPS      = 32,
+   SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
+
+   /*
+    * End of VMX's current definition of "extended-FIFO registers".
+    * Registers before here are always enabled/disabled as a block; either
+    * the extended FIFO is enabled and includes all preceding registers, or
+    * it's disabled entirely.
+    *
+    * Block 3b (truly optional extended registers): Additional registers for
+    * the extended FIFO, which the VMX already knows how to enable and
+    * disable with correct granularity.
+    *
+    * Registers after here exist if and only if the guest SVGA driver
+    * sets SVGA_FIFO_MIN high enough to leave room for them.
+    */
+
+   /* Valid if register exists: */
+   SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
+   SVGA_FIFO_FENCE_GOAL,         /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
+   SVGA_FIFO_BUSY,               /* See "FIFO Synchronization Registers" */
+
+   /*
+    * Always keep this last.  This defines the maximum number of
+    * registers we know about.  At power-on, this value is placed in
+    * the SVGA_REG_MEM_REGS register, and we expect the guest driver
+    * to allocate this much space in FIFO memory for registers.
+    */
+    SVGA_FIFO_NUM_REGS
+};
+
+
+/*
+ * Definition of registers included in extended FIFO support.
+ *
+ * The guest SVGA driver gets to allocate the FIFO between registers
+ * and data.  It must always allocate at least 4 registers, but old
+ * drivers stopped there.
+ *
+ * The VMX will enable extended FIFO support if and only if the guest
+ * left enough room for all registers defined as part of the mandatory
+ * set for the extended FIFO.
+ *
+ * Note that the guest drivers typically allocate the FIFO only at
+ * initialization time, not at mode switches, so it's likely that the
+ * number of FIFO registers won't change without a reboot.
+ *
+ * All registers less than this value are guaranteed to be present if
+ * svgaUser->fifo.extended is set. Any later registers must be tested
+ * individually for compatibility at each use (in the VMX).
+ *
+ * This value is used only by the VMX, so it can change without
+ * affecting driver compatibility; keep it that way?
+ */
+#define SVGA_FIFO_EXTENDED_MANDATORY_REGS  (SVGA_FIFO_3D_CAPS_LAST + 1)
+
+
+/*
+ * FIFO Synchronization Registers
+ *
+ *  This explains the relationship between the various FIFO
+ *  sync-related registers in IOSpace and in FIFO space.
+ *
+ *  SVGA_REG_SYNC --
+ *
+ *       The SYNC register can be used in two different ways by the guest:
+ *
+ *         1. If the guest wishes to fully sync (drain) the FIFO,
+ *            it will write once to SYNC then poll on the BUSY
+ *            register. The FIFO is sync'ed once BUSY is zero.
+ *
+ *         2. If the guest wants to asynchronously wake up the host,
+ *            it will write once to SYNC without polling on BUSY.
+ *            Ideally it will do this after some new commands have
+ *            been placed in the FIFO, and after reading a zero
+ *            from SVGA_FIFO_BUSY.
+ *
+ *       (1) is the original behaviour that SYNC was designed to
+ *       support.  Originally, a write to SYNC would implicitly
+ *       trigger a read from BUSY. This causes us to synchronously
+ *       process the FIFO.
+ *
+ *       This behaviour has since been changed so that writing SYNC
+ *       will *not* implicitly cause a read from BUSY. Instead, it
+ *       makes a channel call which asynchronously wakes up the MKS
+ *       thread.
+ *
+ *       New guests can use this new behaviour to implement (2)
+ *       efficiently. This lets guests get the host's attention
+ *       without waiting for the MKS to poll, which gives us much
+ *       better CPU utilization on SMP hosts and on UP hosts while
+ *       we're blocked on the host GPU.
+ *
+ *       Old guests shouldn't notice the behaviour change. SYNC was
+ *       never guaranteed to process the entire FIFO, since it was
+ *       bounded to a particular number of CPU cycles. Old guests will
+ *       still loop on the BUSY register until the FIFO is empty.
+ *
+ *       Writing to SYNC currently has the following side-effects:
+ *
+ *         - Sets SVGA_REG_BUSY to TRUE (in the monitor)
+ *         - Asynchronously wakes up the MKS thread for FIFO processing
+ *         - The value written to SYNC is recorded as a "reason", for
+ *           stats purposes.
+ *
+ *       If SVGA_FIFO_BUSY is available, drivers are advised to only
+ *       write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
+ *       SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
+ *       eventually set SVGA_FIFO_BUSY on its own, but this approach
+ *       lets the driver avoid sending multiple asynchronous wakeup
+ *       messages to the MKS thread.
+ *
+ *  SVGA_REG_BUSY --
+ *
+ *       This register is set to TRUE when SVGA_REG_SYNC is written,
+ *       and it reads as FALSE when the FIFO has been completely
+ *       drained.
+ *
+ *       Every read from this register causes us to synchronously
+ *       process FIFO commands. There is no guarantee as to how many
+ *       commands each read will process.
+ *
+ *       CPU time spent processing FIFO commands will be billed to
+ *       the guest.
+ *
+ *       New drivers should avoid using this register unless they
+ *       need to guarantee that the FIFO is completely drained. It
+ *       is overkill for performing a sync-to-fence. Older drivers
+ *       will use this register for any type of synchronization.
+ *
+ *  SVGA_FIFO_BUSY --
+ *
+ *       This register is a fast way for the guest driver to check
+ *       whether the FIFO is already being processed. It reads and
+ *       writes at normal RAM speeds, with no monitor intervention.
+ *
+ *       If this register reads as TRUE, the host is guaranteeing that
+ *       any new commands written into the FIFO will be noticed before
+ *       the MKS goes back to sleep.
+ *
+ *       If this register reads as FALSE, no such guarantee can be
+ *       made.
+ *
+ *       The guest should use this register to quickly determine
+ *       whether or not it needs to wake up the host. If the guest
+ *       just wrote a command or group of commands that it would like
+ *       the host to begin processing, it should:
+ *
+ *         1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
+ *            action is necessary.
+ *
+ *         2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
+ *            code that we've already sent a SYNC to the host and we
+ *            don't need to send a duplicate.
+ *
+ *         3. Write a reason to SVGA_REG_SYNC. This will send an
+ *            asynchronous wakeup to the MKS thread.
+ */
+
+
+/*
+ * FIFO Capabilities
+ *
+ *      Fence -- Fence register and command are supported
+ *      Accel Front -- Front buffer only commands are supported
+ *      Pitch Lock -- Pitch lock register is supported
+ *      Video -- SVGA Video overlay units are supported
+ *      Escape -- Escape command is supported
+ *
+ * XXX: Add longer descriptions for each capability, including a list
+ *      of the new features that each capability provides.
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT --
+ *
+ *    Provides dynamic multi-screen rendering, for improved Unity and
+ *    multi-monitor modes. With Screen Object, the guest can
+ *    dynamically create and destroy 'screens', which can represent
+ *    Unity windows or virtual monitors. Screen Object also provides
+ *    strong guarantees that DMA operations happen only when
+ *    guest-initiated. Screen Object deprecates the BAR1 guest
+ *    framebuffer (GFB) and all commands that work only with the GFB.
+ *
+ *    New registers:
+ *       FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
+ *
+ *    New 2D commands:
+ *       DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
+ *       BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
+ *
+ *    New 3D commands:
+ *       BLIT_SURFACE_TO_SCREEN
+ *
+ *    New guarantees:
+ *
+ *       - The host will not read or write guest memory, including the GFB,
+ *         except when explicitly initiated by a DMA command.
+ *
+ *       - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
+ *         is guaranteed to complete before any subsequent FENCEs.
+ *
+ *       - All legacy commands which affect a Screen (UPDATE, PRESENT,
+ *         PRESENT_READBACK) as well as new Screen blit commands will
+ *         all behave consistently as blits, and memory will be read
+ *         or written in FIFO order.
+ *
+ *         For example, if you PRESENT from one SVGA3D surface to multiple
+ *         places on the screen, the data copied will always be from the
+ *         SVGA3D surface at the time the PRESENT was issued in the FIFO.
+ *         This was not necessarily true on devices without Screen Object.
+ *
+ *         This means that on devices that support Screen Object, the
+ *         PRESENT_READBACK command should not be necessary unless you
+ *         actually want to read back the results of 3D rendering into
+ *         system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
+ *         command provides a strict superset of functionality.)
+ *
+ *       - When a screen is resized, either using Screen Object commands or
+ *         legacy multimon registers, its contents are preserved.
+ *
+ * SVGA_FIFO_CAP_GMR2 --
+ *
+ *    Provides new commands to define and remap guest memory regions (GMR).
+ *
+ *    New 2D commands:
+ *       DEFINE_GMR2, REMAP_GMR2.
+ *
+ * SVGA_FIFO_CAP_3D_HWVERSION_REVISED --
+ *
+ *    Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists.
+ *    This register may replace SVGA_FIFO_3D_HWVERSION on platforms
+ *    that enforce graphics resource limits.  This allows the platform
+ *    to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest
+ *    drivers that do not limit their resources.
+ *
+ *    Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators
+ *    are codependent (and thus we use a single capability bit).
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT_2 --
+ *
+ *    Modifies the DEFINE_SCREEN command to include a guest provided
+ *    backing store in GMR memory and the bytesPerLine for the backing
+ *    store.  This capability requires the use of a backing store when
+ *    creating screen objects.  However if SVGA_FIFO_CAP_SCREEN_OBJECT
+ *    is present then backing stores are optional.
+ *
+ * SVGA_FIFO_CAP_DEAD --
+ *
+ *    Drivers should not use this cap bit.  This cap bit can not be
+ *    reused since some hosts already expose it.
+ */
+
+#define SVGA_FIFO_CAP_NONE                  0
+#define SVGA_FIFO_CAP_FENCE             (1<<0)
+#define SVGA_FIFO_CAP_ACCELFRONT        (1<<1)
+#define SVGA_FIFO_CAP_PITCHLOCK         (1<<2)
+#define SVGA_FIFO_CAP_VIDEO             (1<<3)
+#define SVGA_FIFO_CAP_CURSOR_BYPASS_3   (1<<4)
+#define SVGA_FIFO_CAP_ESCAPE            (1<<5)
+#define SVGA_FIFO_CAP_RESERVE           (1<<6)
+#define SVGA_FIFO_CAP_SCREEN_OBJECT     (1<<7)
+#define SVGA_FIFO_CAP_GMR2              (1<<8)
+#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED  SVGA_FIFO_CAP_GMR2
+#define SVGA_FIFO_CAP_SCREEN_OBJECT_2   (1<<9)
+#define SVGA_FIFO_CAP_DEAD              (1<<10)
+
+
+/*
+ * FIFO Flags
+ *
+ *      Accel Front -- Driver should use front buffer only commands
+ */
+
+#define SVGA_FIFO_FLAG_NONE                 0
+#define SVGA_FIFO_FLAG_ACCELFRONT       (1<<0)
+#define SVGA_FIFO_FLAG_RESERVED        (1<<31) /* Internal use only */
+
+/*
+ * FIFO reservation sentinel value
+ */
+
+#define SVGA_FIFO_RESERVED_UNKNOWN      0xffffffff
+
+
+/*
+ * Video overlay support
+ */
+
+#define SVGA_NUM_OVERLAY_UNITS 32
+
+
+/*
+ * Video capabilities that the guest is currently using
+ */
+
+#define SVGA_VIDEO_FLAG_COLORKEY        0x0001
+
+
+/*
+ * Offsets for the video overlay registers
+ */
+
+enum {
+   SVGA_VIDEO_ENABLED = 0,
+   SVGA_VIDEO_FLAGS,
+   SVGA_VIDEO_DATA_OFFSET,
+   SVGA_VIDEO_FORMAT,
+   SVGA_VIDEO_COLORKEY,
+   SVGA_VIDEO_SIZE,          /* Deprecated */
+   SVGA_VIDEO_WIDTH,
+   SVGA_VIDEO_HEIGHT,
+   SVGA_VIDEO_SRC_X,
+   SVGA_VIDEO_SRC_Y,
+   SVGA_VIDEO_SRC_WIDTH,
+   SVGA_VIDEO_SRC_HEIGHT,
+   SVGA_VIDEO_DST_X,         /* Signed int32 */
+   SVGA_VIDEO_DST_Y,         /* Signed int32 */
+   SVGA_VIDEO_DST_WIDTH,
+   SVGA_VIDEO_DST_HEIGHT,
+   SVGA_VIDEO_PITCH_1,
+   SVGA_VIDEO_PITCH_2,
+   SVGA_VIDEO_PITCH_3,
+   SVGA_VIDEO_DATA_GMRID,    /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
+   SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords */
+                             /* (SVGA_ID_INVALID) */
+   SVGA_VIDEO_NUM_REGS
+};
+
+
+/*
+ * SVGA Overlay Units
+ *
+ *      width and height relate to the entire source video frame.
+ *      srcX, srcY, srcWidth and srcHeight represent subset of the source
+ *      video frame to be displayed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAOverlayUnit {
+   uint32 enabled;
+   uint32 flags;
+   uint32 dataOffset;
+   uint32 format;
+   uint32 colorKey;
+   uint32 size;
+   uint32 width;
+   uint32 height;
+   uint32 srcX;
+   uint32 srcY;
+   uint32 srcWidth;
+   uint32 srcHeight;
+   int32  dstX;
+   int32  dstY;
+   uint32 dstWidth;
+   uint32 dstHeight;
+   uint32 pitches[3];
+   uint32 dataGMRId;
+   uint32 dstScreenId;
+}
+#include "vmware_pack_end.h"
+SVGAOverlayUnit;
+
+
+/*
+ * Guest display topology
+ *
+ * XXX: This structure is not part of the SVGA device's interface, and
+ * doesn't really belong here.
+ */
+#define SVGA_INVALID_DISPLAY_ID ((uint32)-1)
+
+typedef struct SVGADisplayTopology {
+   uint16 displayId;
+   uint16 isPrimary;
+   uint32 width;
+   uint32 height;
+   uint32 positionX;
+   uint32 positionY;
+} SVGADisplayTopology;
+
+
+/*
+ * SVGAScreenObject --
+ *
+ *    This is a new way to represent a guest's multi-monitor screen or
+ *    Unity window. Screen objects are only supported if the
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
+ *
+ *    If Screen Objects are supported, they can be used to fully
+ *    replace the functionality provided by the framebuffer registers
+ *    (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
+ *
+ *    The screen object is a struct with guaranteed binary
+ *    compatibility. New flags can be added, and the struct may grow,
+ *    but existing fields must retain their meaning.
+ *
+ *    Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of
+ *    a SVGAGuestPtr that is used to back the screen contents.  This
+ *    memory must come from the GFB.  The guest is not allowed to
+ *    access the memory and doing so will have undefined results.  The
+ *    backing store is required to be page aligned and the size is
+ *    padded to the next page boundry.  The number of pages is:
+ *       (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE
+ *
+ *    The pitch in the backingStore is required to be at least large
+ *    enough to hold a 32bbp scanline.  It is recommended that the
+ *    driver pad bytesPerLine for a potential performance win.
+ *
+ *    The cloneCount field is treated as a hint from the guest that
+ *    the user wants this display to be cloned, countCount times.  A
+ *    value of zero means no cloning should happen.
+ */
+
+#define SVGA_SCREEN_MUST_BE_SET     (1 << 0)
+#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
+#define SVGA_SCREEN_IS_PRIMARY      (1 << 1)
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2)
+
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When the screen is
+ * deactivated the base layer is defined to lose all contents and
+ * become black.  When a screen is deactivated the backing store is
+ * optional.  When set backingPtr and bytesPerLine will be ignored.
+ */
+#define SVGA_SCREEN_DEACTIVATE  (1 << 3)
+
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When this flag is set
+ * the screen contents will be outputted as all black to the user
+ * though the base layer contents is preserved.  The screen base layer
+ * can still be read and written to like normal though the no visible
+ * effect will be seen by the user.  When the flag is changed the
+ * screen will be blanked or redrawn to the current contents as needed
+ * without any extra commands from the driver.  This flag only has an
+ * effect when the screen is not deactivated.
+ */
+#define SVGA_SCREEN_BLANKING (1 << 4)
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 structSize;   /* sizeof(SVGAScreenObject) */
+   uint32 id;
+   uint32 flags;
+   struct {
+      uint32 width;
+      uint32 height;
+   } size;
+   struct {
+      int32 x;
+      int32 y;
+   } root;
+
+   /*
+    * Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional
+    * with SVGA_FIFO_CAP_SCREEN_OBJECT.
+    */
+   SVGAGuestImage backingStore;
+
+   /*
+    * The cloneCount field is treated as a hint from the guest that
+    * the user wants this display to be cloned, cloneCount times.
+    *
+    * A value of zero means no cloning should happen.
+    */
+   uint32 cloneCount;
+}
+#include "vmware_pack_end.h"
+SVGAScreenObject;
+
+
+/*
+ *  Commands in the command FIFO:
+ *
+ *  Command IDs defined below are used for the traditional 2D FIFO
+ *  communication (not all commands are available for all versions of the
+ *  SVGA FIFO protocol).
+ *
+ *  Note the holes in the command ID numbers: These commands have been
+ *  deprecated, and the old IDs must not be reused.
+ *
+ *  Command IDs from 1000 to 2999 are reserved for use by the SVGA3D
+ *  protocol.
+ *
+ *  Each command's parameters are described by the comments and
+ *  structs below.
+ */
+
+typedef enum {
+   SVGA_CMD_INVALID_CMD           = 0,
+   SVGA_CMD_UPDATE                = 1,
+   SVGA_CMD_RECT_COPY             = 3,
+   SVGA_CMD_RECT_ROP_COPY         = 14,
+   SVGA_CMD_DEFINE_CURSOR         = 19,
+   SVGA_CMD_DEFINE_ALPHA_CURSOR   = 22,
+   SVGA_CMD_UPDATE_VERBOSE        = 25,
+   SVGA_CMD_FRONT_ROP_FILL        = 29,
+   SVGA_CMD_FENCE                 = 30,
+   SVGA_CMD_ESCAPE                = 33,
+   SVGA_CMD_DEFINE_SCREEN         = 34,
+   SVGA_CMD_DESTROY_SCREEN        = 35,
+   SVGA_CMD_DEFINE_GMRFB          = 36,
+   SVGA_CMD_BLIT_GMRFB_TO_SCREEN  = 37,
+   SVGA_CMD_BLIT_SCREEN_TO_GMRFB  = 38,
+   SVGA_CMD_ANNOTATION_FILL       = 39,
+   SVGA_CMD_ANNOTATION_COPY       = 40,
+   SVGA_CMD_DEFINE_GMR2           = 41,
+   SVGA_CMD_REMAP_GMR2            = 42,
+   SVGA_CMD_DEAD                  = 43,
+   SVGA_CMD_DEAD_2                = 44,
+   SVGA_CMD_NOP                   = 45,
+   SVGA_CMD_NOP_ERROR             = 46,
+   SVGA_CMD_MAX
+} SVGAFifoCmdId;
+
+#define SVGA_CMD_MAX_DATASIZE       (256 * 1024)
+#define SVGA_CMD_MAX_ARGS           64
+
+
+/*
+ * SVGA_CMD_UPDATE --
+ *
+ *    This is a DMA transfer which copies from the Guest Framebuffer
+ *    (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
+ *    intersect with the provided virtual rectangle.
+ *
+ *    This command does not support using arbitrary guest memory as a
+ *    data source- it only works with the pre-defined GFB memory.
+ *    This command also does not support signed virtual coordinates.
+ *    If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
+ *    negative root x/y coordinates, the negative portion of those
+ *    screens will not be reachable by this command.
+ *
+ *    This command is not necessary when using framebuffer
+ *    traces. Traces are automatically enabled if the SVGA FIFO is
+ *    disabled, and you may explicitly enable/disable traces using
+ *    SVGA_REG_TRACES. With traces enabled, any write to the GFB will
+ *    automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
+ *
+ *    Traces and SVGA_CMD_UPDATE are the only supported ways to render
+ *    pseudocolor screen updates. The newer Screen Object commands
+ *    only support true color formats.
+ *
+ * Availability:
+ *    Always available.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 x;
+   uint32 y;
+   uint32 width;
+   uint32 height;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdUpdate;
+
+
+/*
+ * SVGA_CMD_RECT_COPY --
+ *
+ *    Perform a rectangular DMA transfer from one area of the GFB to
+ *    another, and copy the result to any screens which intersect it.
+ *
+ * Availability:
+ *    SVGA_CAP_RECT_COPY
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 srcX;
+   uint32 srcY;
+   uint32 destX;
+   uint32 destY;
+   uint32 width;
+   uint32 height;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRectCopy;
+
+
+/*
+ * SVGA_CMD_RECT_ROP_COPY --
+ *
+ *    Perform a rectangular DMA transfer from one area of the GFB to
+ *    another, and copy the result to any screens which intersect it.
+ *    The value of ROP may only be SVGA_ROP_COPY, and this command is
+ *    only supported for backwards compatibility reasons.
+ *
+ * Availability:
+ *    SVGA_CAP_RECT_COPY
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 srcX;
+   uint32 srcY;
+   uint32 destX;
+   uint32 destY;
+   uint32 width;
+   uint32 height;
+   uint32 rop;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRectRopCopy;
+
+
+/*
+ * SVGA_CMD_DEFINE_CURSOR --
+ *
+ *    Provide a new cursor image, as an AND/XOR mask.
+ *
+ *    The recommended way to position the cursor overlay is by using
+ *    the SVGA_FIFO_CURSOR_* registers, supported by the
+ *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ *    SVGA_CAP_CURSOR
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 id;             /* Reserved, must be zero. */
+   uint32 hotspotX;
+   uint32 hotspotY;
+   uint32 width;
+   uint32 height;
+   uint32 andMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
+   uint32 xorMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
+   /*
+    * Followed by scanline data for AND mask, then XOR mask.
+    * Each scanline is padded to a 32-bit boundary.
+   */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineCursor;
+
+
+/*
+ * SVGA_CMD_DEFINE_ALPHA_CURSOR --
+ *
+ *    Provide a new cursor image, in 32-bit BGRA format.
+ *
+ *    The recommended way to position the cursor overlay is by using
+ *    the SVGA_FIFO_CURSOR_* registers, supported by the
+ *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ *    SVGA_CAP_ALPHA_CURSOR
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 id;             /* Reserved, must be zero. */
+   uint32 hotspotX;
+   uint32 hotspotY;
+   uint32 width;
+   uint32 height;
+   /* Followed by scanline data */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineAlphaCursor;
+
+
+/*
+ * SVGA_CMD_UPDATE_VERBOSE --
+ *
+ *    Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
+ *    'reason' value, an opaque cookie which is used by internal
+ *    debugging tools. Third party drivers should not use this
+ *    command.
+ *
+ * Availability:
+ *    SVGA_CAP_EXTENDED_FIFO
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 x;
+   uint32 y;
+   uint32 width;
+   uint32 height;
+   uint32 reason;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdUpdateVerbose;
+
+
+/*
+ * SVGA_CMD_FRONT_ROP_FILL --
+ *
+ *    This is a hint which tells the SVGA device that the driver has
+ *    just filled a rectangular region of the GFB with a solid
+ *    color. Instead of reading these pixels from the GFB, the device
+ *    can assume that they all equal 'color'. This is primarily used
+ *    for remote desktop protocols.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_ACCELFRONT
+ */
+
+#define  SVGA_ROP_COPY                    0x03
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 color;     /* In the same format as the GFB */
+   uint32 x;
+   uint32 y;
+   uint32 width;
+   uint32 height;
+   uint32 rop;       /* Must be SVGA_ROP_COPY */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdFrontRopFill;
+
+
+/*
+ * SVGA_CMD_FENCE --
+ *
+ *    Insert a synchronization fence.  When the SVGA device reaches
+ *    this command, it will copy the 'fence' value into the
+ *    SVGA_FIFO_FENCE register. It will also compare the fence against
+ *    SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
+ *    SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
+ *    raise this interrupt.
+ *
+ * Availability:
+ *    SVGA_FIFO_FENCE for this command,
+ *    SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 fence;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdFence;
+
+
+/*
+ * SVGA_CMD_ESCAPE --
+ *
+ *    Send an extended or vendor-specific variable length command.
+ *    This is used for video overlay, third party plugins, and
+ *    internal debugging tools. See svga_escape.h
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_ESCAPE
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 nsid;
+   uint32 size;
+   /* followed by 'size' bytes of data */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdEscape;
+
+
+/*
+ * SVGA_CMD_DEFINE_SCREEN --
+ *
+ *    Define or redefine an SVGAScreenObject. See the description of
+ *    SVGAScreenObject above.  The video driver is responsible for
+ *    generating new screen IDs. They should be small positive
+ *    integers. The virtual device will have an implementation
+ *    specific upper limit on the number of screen IDs
+ *    supported. Drivers are responsible for recycling IDs. The first
+ *    valid ID is zero.
+ *
+ *    - Interaction with other registers:
+ *
+ *    For backwards compatibility, when the GFB mode registers (WIDTH,
+ *    HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
+ *    deletes all screens other than screen #0, and redefines screen
+ *    #0 according to the specified mode. Drivers that use
+ *    SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
+ *
+ *    If you use screen objects, do not use the legacy multi-mon
+ *    registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAScreenObject screen;   /* Variable-length according to version */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineScreen;
+
+
+/*
+ * SVGA_CMD_DESTROY_SCREEN --
+ *
+ *    Destroy an SVGAScreenObject. Its ID is immediately available for
+ *    re-use.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 screenId;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDestroyScreen;
+
+
+/*
+ * SVGA_CMD_DEFINE_GMRFB --
+ *
+ *    This command sets a piece of SVGA device state called the
+ *    Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
+ *    piece of light-weight state which identifies the location and
+ *    format of an image in guest memory or in BAR1. The GMRFB has
+ *    an arbitrary size, and it doesn't need to match the geometry
+ *    of the GFB or any screen object.
+ *
+ *    The GMRFB can be redefined as often as you like. You could
+ *    always use the same GMRFB, you could redefine it before
+ *    rendering from a different guest screen, or you could even
+ *    redefine it before every blit.
+ *
+ *    There are multiple ways to use this command. The simplest way is
+ *    to use it to move the framebuffer either to elsewhere in the GFB
+ *    (BAR1) memory region, or to a user-defined GMR. This lets a
+ *    driver use a framebuffer allocated entirely out of normal system
+ *    memory, which we encourage.
+ *
+ *    Another way to use this command is to set up a ring buffer of
+ *    updates in GFB memory. If a driver wants to ensure that no
+ *    frames are skipped by the SVGA device, it is important that the
+ *    driver not modify the source data for a blit until the device is
+ *    done processing the command. One efficient way to accomplish
+ *    this is to use a ring of small DMA buffers. Each buffer is used
+ *    for one blit, then we move on to the next buffer in the
+ *    ring. The FENCE mechanism is used to protect each buffer from
+ *    re-use until the device is finished with that buffer's
+ *    corresponding blit.
+ *
+ *    This command does not affect the meaning of SVGA_CMD_UPDATE.
+ *    UPDATEs always occur from the legacy GFB memory area. This
+ *    command has no support for pseudocolor GMRFBs. Currently only
+ *    true-color 15, 16, and 24-bit depths are supported. Future
+ *    devices may expose capabilities for additional framebuffer
+ *    formats.
+ *
+ *    The default GMRFB value is undefined. Drivers must always send
+ *    this command at least once before performing any blit from the
+ *    GMRFB.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAGuestPtr        ptr;
+   uint32              bytesPerLine;
+   SVGAGMRImageFormat  format;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineGMRFB;
+
+
+/*
+ * SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
+ *
+ *    This is a guest-to-host blit. It performs a DMA operation to
+ *    copy a rectangular region of pixels from the current GMRFB to
+ *    a ScreenObject.
+ *
+ *    The destination coordinate may be specified relative to a
+ *    screen's origin.  The provided screen ID must be valid.
+ *
+ *    The SVGA device is guaranteed to finish reading from the GMRFB
+ *    by the time any subsequent FENCE commands are reached.
+ *
+ *    This command consumes an annotation. See the
+ *    SVGA_CMD_ANNOTATION_* commands for details.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGASignedPoint  srcOrigin;
+   SVGASignedRect   destRect;
+   uint32           destScreenId;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdBlitGMRFBToScreen;
+
+
+/*
+ * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
+ *
+ *    This is a host-to-guest blit. It performs a DMA operation to
+ *    copy a rectangular region of pixels from a single ScreenObject
+ *    back to the current GMRFB.
+ *
+ *    The source coordinate is specified relative to a screen's
+ *    origin.  The provided screen ID must be valid. If any parameters
+ *    are invalid, the resulting pixel values are undefined.
+ *
+ *    The SVGA device is guaranteed to finish writing to the GMRFB by
+ *    the time any subsequent FENCE commands are reached.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGASignedPoint  destOrigin;
+   SVGASignedRect   srcRect;
+   uint32           srcScreenId;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdBlitScreenToGMRFB;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_FILL --
+ *
+ *    The annotation commands have been deprecated, should not be used
+ *    by new drivers.  They used to provide performance hints to the SVGA
+ *    device about the content of screen updates, but newer SVGA devices
+ *    ignore these.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAColorBGRX  color;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdAnnotationFill;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_COPY --
+ *
+ *    The annotation commands have been deprecated, should not be used
+ *    by new drivers.  They used to provide performance hints to the SVGA
+ *    device about the content of screen updates, but newer SVGA devices
+ *    ignore these.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGASignedPoint  srcOrigin;
+   uint32           srcScreenId;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdAnnotationCopy;
+
+
+/*
+ * SVGA_CMD_DEFINE_GMR2 --
+ *
+ *    Define guest memory region v2.  See the description of GMRs above.
+ *
+ * Availability:
+ *    SVGA_CAP_GMR2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 gmrId;
+   uint32 numPages;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineGMR2;
+
+
+/*
+ * SVGA_CMD_REMAP_GMR2 --
+ *
+ *    Remap guest memory region v2.  See the description of GMRs above.
+ *
+ *    This command allows guest to modify a portion of an existing GMR by
+ *    invalidating it or reassigning it to different guest physical pages.
+ *    The pages are identified by physical page number (PPN).  The pages
+ *    are assumed to be pinned and valid for DMA operations.
+ *
+ *    Description of command flags:
+ *
+ *    SVGA_REMAP_GMR2_VIA_GMR: If enabled, references a PPN list in a GMR.
+ *       The PPN list must not overlap with the remap region (this can be
+ *       handled trivially by referencing a separate GMR).  If flag is
+ *       disabled, PPN list is appended to SVGARemapGMR command.
+ *
+ *    SVGA_REMAP_GMR2_PPN64: If set, PPN list is in PPN64 format, otherwise
+ *       it is in PPN32 format.
+ *
+ *    SVGA_REMAP_GMR2_SINGLE_PPN: If set, PPN list contains a single entry.
+ *       A single PPN can be used to invalidate a portion of a GMR or
+ *       map it to to a single guest scratch page.
+ *
+ * Availability:
+ *    SVGA_CAP_GMR2
+ */
+
+typedef enum {
+   SVGA_REMAP_GMR2_PPN32         = 0,
+   SVGA_REMAP_GMR2_VIA_GMR       = (1 << 0),
+   SVGA_REMAP_GMR2_PPN64         = (1 << 1),
+   SVGA_REMAP_GMR2_SINGLE_PPN    = (1 << 2),
+} SVGARemapGMR2Flags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 gmrId;
+   SVGARemapGMR2Flags flags;
+   uint32 offsetPages; /* offset in pages to begin remap */
+   uint32 numPages; /* number of pages to remap */
+   /*
+    * Followed by additional data depending on SVGARemapGMR2Flags.
+    *
+    * If flag SVGA_REMAP_GMR2_VIA_GMR is set, single SVGAGuestPtr follows.
+    * Otherwise an array of page descriptors in PPN32 or PPN64 format
+    * (according to flag SVGA_REMAP_GMR2_PPN64) follows.  If flag
+    * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
+    */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRemapGMR2;
+
+
+/*
+ * Size of SVGA device memory such as frame buffer and FIFO.
+ */
+#define SVGA_VRAM_MIN_SIZE             (4 * 640 * 480) /* bytes */
+#define SVGA_VRAM_MIN_SIZE_3D       (16 * 1024 * 1024)
+#define SVGA_VRAM_MAX_SIZE         (128 * 1024 * 1024)
+#define SVGA_MEMORY_SIZE_MAX      (1024 * 1024 * 1024)
+#define SVGA_FIFO_SIZE_MAX           (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MIN       (32 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX       (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT   (256 * 1024)
+
+#define SVGA_VRAM_SIZE_W2K          (64 * 1024 * 1024) /* 64 MB */
+
+#if defined(VMX86_SERVER)
+#define SVGA_VRAM_SIZE               (4 * 1024 * 1024)
+#define SVGA_VRAM_SIZE_3D           (64 * 1024 * 1024)
+#define SVGA_FIFO_SIZE                    (256 * 1024)
+#define SVGA_FIFO_SIZE_3D                 (516 * 1024)
+#define SVGA_MEMORY_SIZE_DEFAULT   (160 * 1024 * 1024)
+#define SVGA_AUTODETECT_DEFAULT                  FALSE
+#else
+#define SVGA_VRAM_SIZE              (16 * 1024 * 1024)
+#define SVGA_VRAM_SIZE_3D           SVGA_VRAM_MAX_SIZE
+#define SVGA_FIFO_SIZE               (2 * 1024 * 1024)
+#define SVGA_FIFO_SIZE_3D               SVGA_FIFO_SIZE
+#define SVGA_MEMORY_SIZE_DEFAULT   (768 * 1024 * 1024)
+#define SVGA_AUTODETECT_DEFAULT                   TRUE
+#endif
+
+#define SVGA_FIFO_SIZE_GBOBJECTS          (256 * 1024)
+#define SVGA_VRAM_SIZE_GBOBJECTS     (4 * 1024 * 1024)
+
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
new file mode 100644
index 0000000..350bbc6
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**********************************************************
+ * Copyright 2015 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+#ifndef _VM_BASIC_TYPES_H_
+#define _VM_BASIC_TYPES_H_
+#include <linux/kernel.h>
+
+typedef u32 uint32;
+typedef s32 int32;
+typedef u64 uint64;
+typedef u16 uint16;
+typedef s16 int16;
+typedef u8  uint8;
+typedef s8  int8;
+
+typedef uint64 PA;
+typedef uint32 PPN;
+typedef uint64 PPN64;
+
+typedef bool Bool;
+
+#define MAX_UINT64 U64_MAX
+#define MAX_UINT32 U32_MAX
+#define MAX_UINT16 U16_MAX
+
+#define CONST64U(x) x##ULL
+
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
new file mode 100644
index 0000000..3a195e8
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VM_BASIC_TYPES_H_
+#define _VM_BASIC_TYPES_H_
+#include <linux/kernel.h>
+
+typedef u32 uint32;
+typedef s32 int32;
+typedef u64 uint64;
+typedef u16 uint16;
+typedef s16 int16;
+typedef u8  uint8;
+typedef s8  int8;
+
+typedef uint64 PA;
+typedef uint32 PPN;
+typedef uint64 PPN64;
+
+typedef bool Bool;
+
+#define MAX_UINT32 U32_MAX
+
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
new file mode 100644
index 0000000..75308bd
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/compiler.h>
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
new file mode 100644
index 0000000..e93d6f2
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+__packed
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/ttm_lock.c b/marvell/linux/drivers/gpu/drm/vmwgfx/ttm_lock.c
new file mode 100644
index 0000000..5971c72
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/ttm_lock.c
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/sched/signal.h>
+#include "ttm_lock.h"
+#include "ttm_object.h"
+
+#define TTM_WRITE_LOCK_PENDING    (1 << 0)
+#define TTM_VT_LOCK_PENDING       (1 << 1)
+#define TTM_SUSPEND_LOCK_PENDING  (1 << 2)
+#define TTM_VT_LOCK               (1 << 3)
+#define TTM_SUSPEND_LOCK          (1 << 4)
+
+void ttm_lock_init(struct ttm_lock *lock)
+{
+	spin_lock_init(&lock->lock);
+	init_waitqueue_head(&lock->queue);
+	lock->rw = 0;
+	lock->flags = 0;
+}
+
+void ttm_read_unlock(struct ttm_lock *lock)
+{
+	spin_lock(&lock->lock);
+	if (--lock->rw == 0)
+		wake_up_all(&lock->queue);
+	spin_unlock(&lock->lock);
+}
+
+static bool __ttm_read_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	spin_lock(&lock->lock);
+	if (lock->rw >= 0 && lock->flags == 0) {
+		++lock->rw;
+		locked = true;
+	}
+	spin_unlock(&lock->lock);
+	return locked;
+}
+
+int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
+{
+	int ret = 0;
+
+	if (interruptible)
+		ret = wait_event_interruptible(lock->queue,
+					       __ttm_read_lock(lock));
+	else
+		wait_event(lock->queue, __ttm_read_lock(lock));
+	return ret;
+}
+
+static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
+{
+	bool block = true;
+
+	*locked = false;
+
+	spin_lock(&lock->lock);
+	if (lock->rw >= 0 && lock->flags == 0) {
+		++lock->rw;
+		block = false;
+		*locked = true;
+	} else if (lock->flags == 0) {
+		block = false;
+	}
+	spin_unlock(&lock->lock);
+
+	return !block;
+}
+
+int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
+{
+	int ret = 0;
+	bool locked;
+
+	if (interruptible)
+		ret = wait_event_interruptible
+			(lock->queue, __ttm_read_trylock(lock, &locked));
+	else
+		wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
+
+	if (unlikely(ret != 0)) {
+		BUG_ON(locked);
+		return ret;
+	}
+
+	return (locked) ? 0 : -EBUSY;
+}
+
+void ttm_write_unlock(struct ttm_lock *lock)
+{
+	spin_lock(&lock->lock);
+	lock->rw = 0;
+	wake_up_all(&lock->queue);
+	spin_unlock(&lock->lock);
+}
+
+static bool __ttm_write_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	spin_lock(&lock->lock);
+	if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
+		lock->rw = -1;
+		lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+		locked = true;
+	} else {
+		lock->flags |= TTM_WRITE_LOCK_PENDING;
+	}
+	spin_unlock(&lock->lock);
+	return locked;
+}
+
+int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
+{
+	int ret = 0;
+
+	if (interruptible) {
+		ret = wait_event_interruptible(lock->queue,
+					       __ttm_write_lock(lock));
+		if (unlikely(ret != 0)) {
+			spin_lock(&lock->lock);
+			lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+			wake_up_all(&lock->queue);
+			spin_unlock(&lock->lock);
+		}
+	} else
+		wait_event(lock->queue, __ttm_write_lock(lock));
+
+	return ret;
+}
+
+void ttm_suspend_unlock(struct ttm_lock *lock)
+{
+	spin_lock(&lock->lock);
+	lock->flags &= ~TTM_SUSPEND_LOCK;
+	wake_up_all(&lock->queue);
+	spin_unlock(&lock->lock);
+}
+
+static bool __ttm_suspend_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	spin_lock(&lock->lock);
+	if (lock->rw == 0) {
+		lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
+		lock->flags |= TTM_SUSPEND_LOCK;
+		locked = true;
+	} else {
+		lock->flags |= TTM_SUSPEND_LOCK_PENDING;
+	}
+	spin_unlock(&lock->lock);
+	return locked;
+}
+
+void ttm_suspend_lock(struct ttm_lock *lock)
+{
+	wait_event(lock->queue, __ttm_suspend_lock(lock));
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/ttm_lock.h b/marvell/linux/drivers/gpu/drm/vmwgfx/ttm_lock.h
new file mode 100644
index 0000000..af8b28c
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/ttm_lock.h
@@ -0,0 +1,218 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+/** @file ttm_lock.h
+ * This file implements a simple replacement for the buffer manager use
+ * of the DRM heavyweight hardware lock.
+ * The lock is a read-write lock. Taking it in read mode and write mode
+ * is relatively fast, and intended for in-kernel use only.
+ *
+ * The vt mode is used only when there is a need to block all
+ * user-space processes from validating buffers.
+ * It's allowed to leave kernel space with the vt lock held.
+ * If a user-space process dies while having the vt-lock,
+ * it will be released during the file descriptor release. The vt lock
+ * excludes write lock and read lock.
+ *
+ * The suspend mode is used to lock out all TTM users when preparing for
+ * and executing suspend operations.
+ *
+ */
+
+#ifndef _TTM_LOCK_H_
+#define _TTM_LOCK_H_
+
+#include <linux/atomic.h>
+#include <linux/wait.h>
+
+#include "ttm_object.h"
+
+/**
+ * struct ttm_lock
+ *
+ * @base: ttm base object used solely to release the lock if the client
+ * holding the lock dies.
+ * @queue: Queue for processes waiting for lock change-of-status.
+ * @lock: Spinlock protecting some lock members.
+ * @rw: Read-write lock counter. Protected by @lock.
+ * @flags: Lock state. Protected by @lock.
+ */
+
+struct ttm_lock {
+	struct ttm_base_object base;
+	wait_queue_head_t queue;
+	spinlock_t lock;
+	int32_t rw;
+	uint32_t flags;
+};
+
+
+/**
+ * ttm_lock_init
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * Initializes the lock.
+ */
+extern void ttm_lock_init(struct ttm_lock *lock);
+
+/**
+ * ttm_read_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a read lock.
+ */
+extern void ttm_read_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_read_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in read mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_read_trylock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Tries to take the lock in read mode. If the lock is already held
+ * in write mode, the function will return -EBUSY. If the lock is held
+ * in vt or suspend mode, the function will sleep until these modes
+ * are unlocked.
+ *
+ * Returns:
+ * -EBUSY The lock was already held in write mode.
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_downgrade
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Downgrades a write lock to a read lock.
+ */
+extern void ttm_lock_downgrade(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Takes the lock in suspend mode. Excludes read and write mode.
+ */
+extern void ttm_suspend_lock(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a suspend lock
+ */
+extern void ttm_suspend_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_vt_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ * @tfile: Pointer to a struct ttm_object_file to register the lock with.
+ *
+ * Takes the lock in vt mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ * -ENOMEM: Out of memory when locking.
+ */
+extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
+		       struct ttm_object_file *tfile);
+
+/**
+ * ttm_vt_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a vt lock.
+ * Returns:
+ * -EINVAL If the lock was not held.
+ */
+extern int ttm_vt_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/ttm_object.c b/marvell/linux/drivers/gpu/drm/vmwgfx/ttm_object.c
new file mode 100644
index 0000000..1607778
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -0,0 +1,794 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *
+ * While no substantial code is shared, the prime code is inspired by
+ * drm_prime.c, with
+ * Authors:
+ *      Dave Airlie <airlied@redhat.com>
+ *      Rob Clark <rob.clark@linaro.org>
+ */
+/** @file ttm_ref_object.c
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+
+/**
+ * struct ttm_object_file
+ *
+ * @tdev: Pointer to the ttm_object_device.
+ *
+ * @lock: Lock that protects the ref_list list and the
+ * ref_hash hash tables.
+ *
+ * @ref_list: List of ttm_ref_objects to be destroyed at
+ * file release.
+ *
+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
+ * for fast lookup of ref objects given a base object.
+ */
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <drm/ttm/ttm_module.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include "ttm_object.h"
+
+struct ttm_object_file {
+	struct ttm_object_device *tdev;
+	spinlock_t lock;
+	struct list_head ref_list;
+	struct drm_open_hash ref_hash[TTM_REF_NUM];
+	struct kref refcount;
+};
+
+/**
+ * struct ttm_object_device
+ *
+ * @object_lock: lock that protects the object_hash hash table.
+ *
+ * @object_hash: hash table for fast lookup of object global names.
+ *
+ * @object_count: Per device object count.
+ *
+ * This is the per-device data structure needed for ttm object management.
+ */
+
+struct ttm_object_device {
+	spinlock_t object_lock;
+	struct drm_open_hash object_hash;
+	atomic_t object_count;
+	struct ttm_mem_global *mem_glob;
+	struct dma_buf_ops ops;
+	void (*dmabuf_release)(struct dma_buf *dma_buf);
+	size_t dma_buf_size;
+	struct idr idr;
+};
+
+/**
+ * struct ttm_ref_object
+ *
+ * @hash: Hash entry for the per-file object reference hash.
+ *
+ * @head: List entry for the per-file list of ref-objects.
+ *
+ * @kref: Ref count.
+ *
+ * @obj: Base object this ref object is referencing.
+ *
+ * @ref_type: Type of ref object.
+ *
+ * This is similar to an idr object, but it also has a hash table entry
+ * that allows lookup with a pointer to the referenced object as a key. In
+ * that way, one can easily detect whether a base object is referenced by
+ * a particular ttm_object_file. It also carries a ref count to avoid creating
+ * multiple ref objects if a ttm_object_file references the same base
+ * object more than once.
+ */
+
+struct ttm_ref_object {
+	struct rcu_head rcu_head;
+	struct drm_hash_item hash;
+	struct list_head head;
+	struct kref kref;
+	enum ttm_ref_type ref_type;
+	struct ttm_base_object *obj;
+	struct ttm_object_file *tfile;
+};
+
+static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
+
+static inline struct ttm_object_file *
+ttm_object_file_ref(struct ttm_object_file *tfile)
+{
+	kref_get(&tfile->refcount);
+	return tfile;
+}
+
+static void ttm_object_file_destroy(struct kref *kref)
+{
+	struct ttm_object_file *tfile =
+		container_of(kref, struct ttm_object_file, refcount);
+
+	kfree(tfile);
+}
+
+
+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
+{
+	struct ttm_object_file *tfile = *p_tfile;
+
+	*p_tfile = NULL;
+	kref_put(&tfile->refcount, ttm_object_file_destroy);
+}
+
+
+int ttm_base_object_init(struct ttm_object_file *tfile,
+			 struct ttm_base_object *base,
+			 bool shareable,
+			 enum ttm_object_type object_type,
+			 void (*refcount_release) (struct ttm_base_object **),
+			 void (*ref_obj_release) (struct ttm_base_object *,
+						  enum ttm_ref_type ref_type))
+{
+	struct ttm_object_device *tdev = tfile->tdev;
+	int ret;
+
+	base->shareable = shareable;
+	base->tfile = ttm_object_file_ref(tfile);
+	base->refcount_release = refcount_release;
+	base->ref_obj_release = ref_obj_release;
+	base->object_type = object_type;
+	kref_init(&base->refcount);
+	idr_preload(GFP_KERNEL);
+	spin_lock(&tdev->object_lock);
+	ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
+	spin_unlock(&tdev->object_lock);
+	idr_preload_end();
+	if (ret < 0)
+		return ret;
+
+	base->handle = ret;
+	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+	if (unlikely(ret != 0))
+		goto out_err1;
+
+	ttm_base_object_unref(&base);
+
+	return 0;
+out_err1:
+	spin_lock(&tdev->object_lock);
+	idr_remove(&tdev->idr, base->handle);
+	spin_unlock(&tdev->object_lock);
+	return ret;
+}
+
+static void ttm_release_base(struct kref *kref)
+{
+	struct ttm_base_object *base =
+	    container_of(kref, struct ttm_base_object, refcount);
+	struct ttm_object_device *tdev = base->tfile->tdev;
+
+	spin_lock(&tdev->object_lock);
+	idr_remove(&tdev->idr, base->handle);
+	spin_unlock(&tdev->object_lock);
+
+	/*
+	 * Note: We don't use synchronize_rcu() here because it's far
+	 * too slow. It's up to the user to free the object using
+	 * call_rcu() or ttm_base_object_kfree().
+	 */
+
+	ttm_object_file_unref(&base->tfile);
+	if (base->refcount_release)
+		base->refcount_release(&base);
+}
+
+void ttm_base_object_unref(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+
+	*p_base = NULL;
+
+	kref_put(&base->refcount, ttm_release_base);
+}
+
+/**
+ * ttm_base_object_noref_lookup - look up a base object without reference
+ * @tfile: The struct ttm_object_file the object is registered with.
+ * @key: The object handle.
+ *
+ * This function looks up a ttm base object and returns a pointer to it
+ * without refcounting the pointer. The returned pointer is only valid
+ * until ttm_base_object_noref_release() is called, and the object
+ * pointed to by the returned pointer may be doomed. Any persistent usage
+ * of the object requires a refcount to be taken using kref_get_unless_zero().
+ * Iff this function returns successfully it needs to be paired with
+ * ttm_base_object_noref_release() and no sleeping- or scheduling functions
+ * may be called inbetween these function callse.
+ *
+ * Return: A pointer to the object if successful or NULL otherwise.
+ */
+struct ttm_base_object *
+ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
+{
+	struct drm_hash_item *hash;
+	struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
+	int ret;
+
+	rcu_read_lock();
+	ret = drm_ht_find_item_rcu(ht, key, &hash);
+	if (ret) {
+		rcu_read_unlock();
+		return NULL;
+	}
+
+	__release(RCU);
+	return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
+}
+EXPORT_SYMBOL(ttm_base_object_noref_lookup);
+
+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
+					       uint32_t key)
+{
+	struct ttm_base_object *base = NULL;
+	struct drm_hash_item *hash;
+	struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
+	int ret;
+
+	rcu_read_lock();
+	ret = drm_ht_find_item_rcu(ht, key, &hash);
+
+	if (likely(ret == 0)) {
+		base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
+		if (!kref_get_unless_zero(&base->refcount))
+			base = NULL;
+	}
+	rcu_read_unlock();
+
+	return base;
+}
+
+struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
+{
+	struct ttm_base_object *base;
+
+	rcu_read_lock();
+	base = idr_find(&tdev->idr, key);
+
+	if (base && !kref_get_unless_zero(&base->refcount))
+		base = NULL;
+	rcu_read_unlock();
+
+	return base;
+}
+
+/**
+ * ttm_ref_object_exists - Check whether a caller has a valid ref object
+ * (has opened) a base object.
+ *
+ * @tfile: Pointer to a struct ttm_object_file identifying the caller.
+ * @base: Pointer to a struct base object.
+ *
+ * Checks wether the caller identified by @tfile has put a valid USAGE
+ * reference object on the base object identified by @base.
+ */
+bool ttm_ref_object_exists(struct ttm_object_file *tfile,
+			   struct ttm_base_object *base)
+{
+	struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
+	struct drm_hash_item *hash;
+	struct ttm_ref_object *ref;
+
+	rcu_read_lock();
+	if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
+		goto out_false;
+
+	/*
+	 * Verify that the ref object is really pointing to our base object.
+	 * Our base object could actually be dead, and the ref object pointing
+	 * to another base object with the same handle.
+	 */
+	ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+	if (unlikely(base != ref->obj))
+		goto out_false;
+
+	/*
+	 * Verify that the ref->obj pointer was actually valid!
+	 */
+	rmb();
+	if (unlikely(kref_read(&ref->kref) == 0))
+		goto out_false;
+
+	rcu_read_unlock();
+	return true;
+
+ out_false:
+	rcu_read_unlock();
+	return false;
+}
+
+int ttm_ref_object_add(struct ttm_object_file *tfile,
+		       struct ttm_base_object *base,
+		       enum ttm_ref_type ref_type, bool *existed,
+		       bool require_existed)
+{
+	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+	struct ttm_ref_object *ref;
+	struct drm_hash_item *hash;
+	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+	struct ttm_operation_ctx ctx = {
+		.interruptible = false,
+		.no_wait_gpu = false
+	};
+	int ret = -EINVAL;
+
+	if (base->tfile != tfile && !base->shareable)
+		return -EPERM;
+
+	if (existed != NULL)
+		*existed = true;
+
+	while (ret == -EINVAL) {
+		rcu_read_lock();
+		ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
+
+		if (ret == 0) {
+			ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+			if (kref_get_unless_zero(&ref->kref)) {
+				rcu_read_unlock();
+				break;
+			}
+		}
+
+		rcu_read_unlock();
+		if (require_existed)
+			return -EPERM;
+
+		ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+					   &ctx);
+		if (unlikely(ret != 0))
+			return ret;
+		ref = kmalloc(sizeof(*ref), GFP_KERNEL);
+		if (unlikely(ref == NULL)) {
+			ttm_mem_global_free(mem_glob, sizeof(*ref));
+			return -ENOMEM;
+		}
+
+		ref->hash.key = base->handle;
+		ref->obj = base;
+		ref->tfile = tfile;
+		ref->ref_type = ref_type;
+		kref_init(&ref->kref);
+
+		spin_lock(&tfile->lock);
+		ret = drm_ht_insert_item_rcu(ht, &ref->hash);
+
+		if (likely(ret == 0)) {
+			list_add_tail(&ref->head, &tfile->ref_list);
+			kref_get(&base->refcount);
+			spin_unlock(&tfile->lock);
+			if (existed != NULL)
+				*existed = false;
+			break;
+		}
+
+		spin_unlock(&tfile->lock);
+		BUG_ON(ret != -EINVAL);
+
+		ttm_mem_global_free(mem_glob, sizeof(*ref));
+		kfree(ref);
+	}
+
+	return ret;
+}
+
+static void __releases(tfile->lock) __acquires(tfile->lock)
+ttm_ref_object_release(struct kref *kref)
+{
+	struct ttm_ref_object *ref =
+	    container_of(kref, struct ttm_ref_object, kref);
+	struct ttm_base_object *base = ref->obj;
+	struct ttm_object_file *tfile = ref->tfile;
+	struct drm_open_hash *ht;
+	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+
+	ht = &tfile->ref_hash[ref->ref_type];
+	(void)drm_ht_remove_item_rcu(ht, &ref->hash);
+	list_del(&ref->head);
+	spin_unlock(&tfile->lock);
+
+	if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
+		base->ref_obj_release(base, ref->ref_type);
+
+	ttm_base_object_unref(&ref->obj);
+	ttm_mem_global_free(mem_glob, sizeof(*ref));
+	kfree_rcu(ref, rcu_head);
+	spin_lock(&tfile->lock);
+}
+
+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+			      unsigned long key, enum ttm_ref_type ref_type)
+{
+	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+	struct ttm_ref_object *ref;
+	struct drm_hash_item *hash;
+	int ret;
+
+	spin_lock(&tfile->lock);
+	ret = drm_ht_find_item(ht, key, &hash);
+	if (unlikely(ret != 0)) {
+		spin_unlock(&tfile->lock);
+		return -EINVAL;
+	}
+	ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+	kref_put(&ref->kref, ttm_ref_object_release);
+	spin_unlock(&tfile->lock);
+	return 0;
+}
+
+void ttm_object_file_release(struct ttm_object_file **p_tfile)
+{
+	struct ttm_ref_object *ref;
+	struct list_head *list;
+	unsigned int i;
+	struct ttm_object_file *tfile = *p_tfile;
+
+	*p_tfile = NULL;
+	spin_lock(&tfile->lock);
+
+	/*
+	 * Since we release the lock within the loop, we have to
+	 * restart it from the beginning each time.
+	 */
+
+	while (!list_empty(&tfile->ref_list)) {
+		list = tfile->ref_list.next;
+		ref = list_entry(list, struct ttm_ref_object, head);
+		ttm_ref_object_release(&ref->kref);
+	}
+
+	spin_unlock(&tfile->lock);
+	for (i = 0; i < TTM_REF_NUM; ++i)
+		drm_ht_remove(&tfile->ref_hash[i]);
+
+	ttm_object_file_unref(&tfile);
+}
+
+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
+					     unsigned int hash_order)
+{
+	struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+	unsigned int i;
+	unsigned int j = 0;
+	int ret;
+
+	if (unlikely(tfile == NULL))
+		return NULL;
+
+	spin_lock_init(&tfile->lock);
+	tfile->tdev = tdev;
+	kref_init(&tfile->refcount);
+	INIT_LIST_HEAD(&tfile->ref_list);
+
+	for (i = 0; i < TTM_REF_NUM; ++i) {
+		ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
+		if (ret) {
+			j = i;
+			goto out_err;
+		}
+	}
+
+	return tfile;
+out_err:
+	for (i = 0; i < j; ++i)
+		drm_ht_remove(&tfile->ref_hash[i]);
+
+	kfree(tfile);
+
+	return NULL;
+}
+
+struct ttm_object_device *
+ttm_object_device_init(struct ttm_mem_global *mem_glob,
+		       unsigned int hash_order,
+		       const struct dma_buf_ops *ops)
+{
+	struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
+	int ret;
+
+	if (unlikely(tdev == NULL))
+		return NULL;
+
+	tdev->mem_glob = mem_glob;
+	spin_lock_init(&tdev->object_lock);
+	atomic_set(&tdev->object_count, 0);
+	ret = drm_ht_create(&tdev->object_hash, hash_order);
+	if (ret != 0)
+		goto out_no_object_hash;
+
+	idr_init(&tdev->idr);
+	tdev->ops = *ops;
+	tdev->dmabuf_release = tdev->ops.release;
+	tdev->ops.release = ttm_prime_dmabuf_release;
+	tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
+		ttm_round_pot(sizeof(struct file));
+	return tdev;
+
+out_no_object_hash:
+	kfree(tdev);
+	return NULL;
+}
+
+void ttm_object_device_release(struct ttm_object_device **p_tdev)
+{
+	struct ttm_object_device *tdev = *p_tdev;
+
+	*p_tdev = NULL;
+
+	WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
+	idr_destroy(&tdev->idr);
+	drm_ht_remove(&tdev->object_hash);
+
+	kfree(tdev);
+}
+
+/**
+ * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
+ *
+ * @dma_buf: Non-refcounted pointer to a struct dma-buf.
+ *
+ * Obtain a file reference from a lookup structure that doesn't refcount
+ * the file, but synchronizes with its release method to make sure it has
+ * not been freed yet. See for example kref_get_unless_zero documentation.
+ * Returns true if refcounting succeeds, false otherwise.
+ *
+ * Nobody really wants this as a public API yet, so let it mature here
+ * for some time...
+ */
+static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
+{
+	return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
+}
+
+/**
+ * ttm_prime_refcount_release - refcount release method for a prime object.
+ *
+ * @p_base: Pointer to ttm_base_object pointer.
+ *
+ * This is a wrapper that calls the refcount_release founction of the
+ * underlying object. At the same time it cleans up the prime object.
+ * This function is called when all references to the base object we
+ * derive from are gone.
+ */
+static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct ttm_prime_object *prime;
+
+	*p_base = NULL;
+	prime = container_of(base, struct ttm_prime_object, base);
+	BUG_ON(prime->dma_buf != NULL);
+	mutex_destroy(&prime->mutex);
+	if (prime->refcount_release)
+		prime->refcount_release(&base);
+}
+
+/**
+ * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
+ *
+ * @dma_buf:
+ *
+ * This function first calls the dma_buf release method the driver
+ * provides. Then it cleans up our dma_buf pointer used for lookup,
+ * and finally releases the reference the dma_buf has on our base
+ * object.
+ */
+static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
+{
+	struct ttm_prime_object *prime =
+		(struct ttm_prime_object *) dma_buf->priv;
+	struct ttm_base_object *base = &prime->base;
+	struct ttm_object_device *tdev = base->tfile->tdev;
+
+	if (tdev->dmabuf_release)
+		tdev->dmabuf_release(dma_buf);
+	mutex_lock(&prime->mutex);
+	if (prime->dma_buf == dma_buf)
+		prime->dma_buf = NULL;
+	mutex_unlock(&prime->mutex);
+	ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
+	ttm_base_object_unref(&base);
+}
+
+/**
+ * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @fd: The prime / dmabuf fd.
+ * @handle: The returned handle.
+ *
+ * This function returns a handle to an object that previously exported
+ * a dma-buf. Note that we don't handle imports yet, because we simply
+ * have no consumers of that implementation.
+ */
+int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+			   int fd, u32 *handle)
+{
+	struct ttm_object_device *tdev = tfile->tdev;
+	struct dma_buf *dma_buf;
+	struct ttm_prime_object *prime;
+	struct ttm_base_object *base;
+	int ret;
+
+	dma_buf = dma_buf_get(fd);
+	if (IS_ERR(dma_buf))
+		return PTR_ERR(dma_buf);
+
+	if (dma_buf->ops != &tdev->ops)
+		return -ENOSYS;
+
+	prime = (struct ttm_prime_object *) dma_buf->priv;
+	base = &prime->base;
+	*handle = base->handle;
+	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+
+	dma_buf_put(dma_buf);
+
+	return ret;
+}
+
+/**
+ * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
+ *
+ * @tfile: Struct ttm_object_file identifying the caller.
+ * @handle: Handle to the object we're exporting from.
+ * @flags: flags for dma-buf creation. We just pass them on.
+ * @prime_fd: The returned file descriptor.
+ *
+ */
+int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+			   uint32_t handle, uint32_t flags,
+			   int *prime_fd)
+{
+	struct ttm_object_device *tdev = tfile->tdev;
+	struct ttm_base_object *base;
+	struct dma_buf *dma_buf;
+	struct ttm_prime_object *prime;
+	int ret;
+
+	base = ttm_base_object_lookup(tfile, handle);
+	if (unlikely(base == NULL ||
+		     base->object_type != ttm_prime_type)) {
+		ret = -ENOENT;
+		goto out_unref;
+	}
+
+	prime = container_of(base, struct ttm_prime_object, base);
+	if (unlikely(!base->shareable)) {
+		ret = -EPERM;
+		goto out_unref;
+	}
+
+	ret = mutex_lock_interruptible(&prime->mutex);
+	if (unlikely(ret != 0)) {
+		ret = -ERESTARTSYS;
+		goto out_unref;
+	}
+
+	dma_buf = prime->dma_buf;
+	if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
+		DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+		struct ttm_operation_ctx ctx = {
+			.interruptible = true,
+			.no_wait_gpu = false
+		};
+		exp_info.ops = &tdev->ops;
+		exp_info.size = prime->size;
+		exp_info.flags = flags;
+		exp_info.priv = prime;
+
+		/*
+		 * Need to create a new dma_buf, with memory accounting.
+		 */
+		ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
+					   &ctx);
+		if (unlikely(ret != 0)) {
+			mutex_unlock(&prime->mutex);
+			goto out_unref;
+		}
+
+		dma_buf = dma_buf_export(&exp_info);
+		if (IS_ERR(dma_buf)) {
+			ret = PTR_ERR(dma_buf);
+			ttm_mem_global_free(tdev->mem_glob,
+					    tdev->dma_buf_size);
+			mutex_unlock(&prime->mutex);
+			goto out_unref;
+		}
+
+		/*
+		 * dma_buf has taken the base object reference
+		 */
+		base = NULL;
+		prime->dma_buf = dma_buf;
+	}
+	mutex_unlock(&prime->mutex);
+
+	ret = dma_buf_fd(dma_buf, flags);
+	if (ret >= 0) {
+		*prime_fd = ret;
+		ret = 0;
+	} else
+		dma_buf_put(dma_buf);
+
+out_unref:
+	if (base)
+		ttm_base_object_unref(&base);
+	return ret;
+}
+
+/**
+ * ttm_prime_object_init - Initialize a ttm_prime_object
+ *
+ * @tfile: struct ttm_object_file identifying the caller
+ * @size: The size of the dma_bufs we export.
+ * @prime: The object to be initialized.
+ * @shareable: See ttm_base_object_init
+ * @type: See ttm_base_object_init
+ * @refcount_release: See ttm_base_object_init
+ * @ref_obj_release: See ttm_base_object_init
+ *
+ * Initializes an object which is compatible with the drm_prime model
+ * for data sharing between processes and devices.
+ */
+int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
+			  struct ttm_prime_object *prime, bool shareable,
+			  enum ttm_object_type type,
+			  void (*refcount_release) (struct ttm_base_object **),
+			  void (*ref_obj_release) (struct ttm_base_object *,
+						   enum ttm_ref_type ref_type))
+{
+	mutex_init(&prime->mutex);
+	prime->size = PAGE_ALIGN(size);
+	prime->real_type = type;
+	prime->dma_buf = NULL;
+	prime->refcount_release = refcount_release;
+	return ttm_base_object_init(tfile, &prime->base, shareable,
+				    ttm_prime_type,
+				    ttm_prime_refcount_release,
+				    ref_obj_release);
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/ttm_object.h b/marvell/linux/drivers/gpu/drm/vmwgfx/ttm_object.h
new file mode 100644
index 0000000..ede26df
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -0,0 +1,376 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_object.h
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+#ifndef _TTM_OBJECT_H_
+#define _TTM_OBJECT_H_
+
+#include <linux/dma-buf.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/rcupdate.h>
+
+#include <drm/drm_hashtab.h>
+#include <drm/ttm/ttm_memory.h>
+
+/**
+ * enum ttm_ref_type
+ *
+ * Describes what type of reference a ref object holds.
+ *
+ * TTM_REF_USAGE is a simple refcount on a base object.
+ *
+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
+ * buffer object.
+ *
+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
+ * buffer object.
+ *
+ */
+
+enum ttm_ref_type {
+	TTM_REF_USAGE,
+	TTM_REF_SYNCCPU_READ,
+	TTM_REF_SYNCCPU_WRITE,
+	TTM_REF_NUM
+};
+
+/**
+ * enum ttm_object_type
+ *
+ * One entry per ttm object type.
+ * Device-specific types should use the
+ * ttm_driver_typex types.
+ */
+
+enum ttm_object_type {
+	ttm_fence_type,
+	ttm_buffer_type,
+	ttm_lock_type,
+	ttm_prime_type,
+	ttm_driver_type0 = 256,
+	ttm_driver_type1,
+	ttm_driver_type2,
+	ttm_driver_type3,
+	ttm_driver_type4,
+	ttm_driver_type5
+};
+
+struct ttm_object_file;
+struct ttm_object_device;
+
+/**
+ * struct ttm_base_object
+ *
+ * @hash: hash entry for the per-device object hash.
+ * @type: derived type this object is base class for.
+ * @shareable: Other ttm_object_files can access this object.
+ *
+ * @tfile: Pointer to ttm_object_file of the creator.
+ * NULL if the object was not created by a user request.
+ * (kernel object).
+ *
+ * @refcount: Number of references to this object, not
+ * including the hash entry. A reference to a base object can
+ * only be held by a ref object.
+ *
+ * @refcount_release: A function to be called when there are
+ * no more references to this object. This function should
+ * destroy the object (or make sure destruction eventually happens),
+ * and when it is called, the object has
+ * already been taken out of the per-device hash. The parameter
+ * "base" should be set to NULL by the function.
+ *
+ * @ref_obj_release: A function to be called when a reference object
+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
+ * This function may, for example, release a lock held by a user-space
+ * process.
+ *
+ * This struct is intended to be used as a base struct for objects that
+ * are visible to user-space. It provides a global name, race-safe
+ * access and refcounting, minimal access contol and hooks for unref actions.
+ */
+
+struct ttm_base_object {
+	struct rcu_head rhead;
+	struct ttm_object_file *tfile;
+	struct kref refcount;
+	void (*refcount_release) (struct ttm_base_object **base);
+	void (*ref_obj_release) (struct ttm_base_object *base,
+				 enum ttm_ref_type ref_type);
+	u32 handle;
+	enum ttm_object_type object_type;
+	u32 shareable;
+};
+
+
+/**
+ * struct ttm_prime_object - Modified base object that is prime-aware
+ *
+ * @base: struct ttm_base_object that we derive from
+ * @mutex: Mutex protecting the @dma_buf member.
+ * @size: Size of the dma_buf associated with this object
+ * @real_type: Type of the underlying object. Needed since we're setting
+ * the value of @base::object_type to ttm_prime_type
+ * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this
+ * object.
+ * @refcount_release: The underlying object's release method. Needed since
+ * we set @base::refcount_release to our own release method.
+ */
+
+struct ttm_prime_object {
+	struct ttm_base_object base;
+	struct mutex mutex;
+	size_t size;
+	enum ttm_object_type real_type;
+	struct dma_buf *dma_buf;
+	void (*refcount_release) (struct ttm_base_object **);
+};
+
+/**
+ * ttm_base_object_init
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @base: The struct ttm_base_object to initialize.
+ * @shareable: This object is shareable with other applcations.
+ * (different @tfile pointers.)
+ * @type: The object type.
+ * @refcount_release: See the struct ttm_base_object description.
+ * @ref_obj_release: See the struct ttm_base_object description.
+ *
+ * Initializes a struct ttm_base_object.
+ */
+
+extern int ttm_base_object_init(struct ttm_object_file *tfile,
+				struct ttm_base_object *base,
+				bool shareable,
+				enum ttm_object_type type,
+				void (*refcount_release) (struct ttm_base_object
+							  **),
+				void (*ref_obj_release) (struct ttm_base_object
+							 *,
+							 enum ttm_ref_type
+							 ref_type));
+
+/**
+ * ttm_base_object_lookup
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ */
+
+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
+						      *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_lookup_for_ref
+ *
+ * @tdev: Pointer to a struct ttm_object_device.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * This function should only be used when the struct tfile associated with the
+ * caller doesn't yet have a reference to the base object.
+ */
+
+extern struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
+
+/**
+ * ttm_base_object_unref
+ *
+ * @p_base: Pointer to a pointer referencing a struct ttm_base_object.
+ *
+ * Decrements the base object refcount and clears the pointer pointed to by
+ * p_base.
+ */
+
+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+
+/**
+ * ttm_ref_object_add.
+ *
+ * @tfile: A struct ttm_object_file representing the application owning the
+ * ref_object.
+ * @base: The base object to reference.
+ * @ref_type: The type of reference.
+ * @existed: Upon completion, indicates that an identical reference object
+ * already existed, and the refcount was upped on that object instead.
+ * @require_existed: Fail with -EPERM if an identical ref object didn't
+ * already exist.
+ *
+ * Checks that the base object is shareable and adds a ref object to it.
+ *
+ * Adding a ref object to a base object is basically like referencing the
+ * base object, but a user-space application holds the reference. When the
+ * file corresponding to @tfile is closed, all its reference objects are
+ * deleted. A reference object can have different types depending on what
+ * it's intended for. It can be refcounting to prevent object destruction,
+ * When user-space takes a lock, it can add a ref object to that lock to
+ * make sure the lock is released if the application dies. A ref object
+ * will hold a single reference on a base object.
+ */
+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
+			      struct ttm_base_object *base,
+			      enum ttm_ref_type ref_type, bool *existed,
+			      bool require_existed);
+
+extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
+				  struct ttm_base_object *base);
+
+/**
+ * ttm_ref_object_base_unref
+ *
+ * @key: Key representing the base object.
+ * @ref_type: Ref type of the ref object to be dereferenced.
+ *
+ * Unreference a ref object with type @ref_type
+ * on the base object identified by @key. If there are no duplicate
+ * references, the ref object will be destroyed and the base object
+ * will be unreferenced.
+ */
+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+				     unsigned long key,
+				     enum ttm_ref_type ref_type);
+
+/**
+ * ttm_object_file_init - initialize a struct ttm_object file
+ *
+ * @tdev: A struct ttm_object device this file is initialized on.
+ * @hash_order: Order of the hash table used to hold the reference objects.
+ *
+ * This is typically called by the file_ops::open function.
+ */
+
+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
+						    *tdev,
+						    unsigned int hash_order);
+
+/**
+ * ttm_object_file_release - release data held by a ttm_object_file
+ *
+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
+ * *p_tfile will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_file.
+ * Typically called from file_ops::release. The caller must
+ * ensure that there are no concurrent users of tfile.
+ */
+
+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
+
+/**
+ * ttm_object device init - initialize a struct ttm_object_device
+ *
+ * @mem_glob: struct ttm_mem_global for memory accounting.
+ * @hash_order: Order of hash table used to hash the base objects.
+ * @ops: DMA buf ops for prime objects of this device.
+ *
+ * This function is typically called on device initialization to prepare
+ * data structures needed for ttm base and ref objects.
+ */
+
+extern struct ttm_object_device *
+ttm_object_device_init(struct ttm_mem_global *mem_glob,
+		       unsigned int hash_order,
+		       const struct dma_buf_ops *ops);
+
+/**
+ * ttm_object_device_release - release data held by a ttm_object_device
+ *
+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
+ * *p_tdev will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_device.
+ * Typically called from driver::unload before the destruction of the
+ * device private data structure.
+ */
+
+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
+
+#define ttm_base_object_kfree(__object, __base)\
+	kfree_rcu(__object, __base.rhead)
+
+extern int ttm_prime_object_init(struct ttm_object_file *tfile,
+				 size_t size,
+				 struct ttm_prime_object *prime,
+				 bool shareable,
+				 enum ttm_object_type type,
+				 void (*refcount_release)
+				 (struct ttm_base_object **),
+				 void (*ref_obj_release)
+				 (struct ttm_base_object *,
+				  enum ttm_ref_type ref_type));
+
+static inline enum ttm_object_type
+ttm_base_object_type(struct ttm_base_object *base)
+{
+	return (base->object_type == ttm_prime_type) ?
+		container_of(base, struct ttm_prime_object, base)->real_type :
+		base->object_type;
+}
+extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+				  int fd, u32 *handle);
+extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+				  uint32_t handle, uint32_t flags,
+				  int *prime_fd);
+
+#define ttm_prime_object_kfree(__obj, __prime)		\
+	kfree_rcu(__obj, __prime.base.rhead)
+
+/*
+ * Extra memory required by the base object's idr storage, which is allocated
+ * separately from the base object itself. We estimate an on-average 128 bytes
+ * per idr.
+ */
+#define TTM_OBJ_EXTRA_SIZE 128
+
+struct ttm_base_object *
+ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_noref_release - release a base object pointer looked up
+ * without reference
+ *
+ * Releases a base object pointer looked up with ttm_base_object_noref_lookup().
+ */
+static inline void ttm_base_object_noref_release(void)
+{
+	__acquire(RCU);
+	rcu_read_unlock();
+}
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
new file mode 100644
index 0000000..66e14e3
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -0,0 +1,1296 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * This file implements the vmwgfx context binding manager,
+ * The sole reason for having to use this code is that vmware guest
+ * backed contexts can be swapped out to their backing mobs by the device
+ * at any time, also swapped in at any time. At swapin time, the device
+ * validates the context bindings to make sure they point to valid resources.
+ * It's this outside-of-drawcall validation (that can happen at any time),
+ * that makes this code necessary.
+ *
+ * We therefore need to kill any context bindings pointing to a resource
+ * when the resource is swapped out. Furthermore, if the vmwgfx driver has
+ * swapped out the context we can't swap it in again to kill bindings because
+ * of backing mob reservation lockdep violations, so as part of
+ * context swapout, also kill all bindings of a context, so that they are
+ * already killed if a resource to which a binding points
+ * needs to be swapped out.
+ *
+ * Note that a resource can be pointed to by bindings from multiple contexts,
+ * Therefore we can't easily protect this data by a per context mutex
+ * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
+ * to protect all binding manager data.
+ *
+ * Finally, any association between a context and a global resource
+ * (surface, shader or even DX query) is conceptually a context binding that
+ * needs to be tracked by this code.
+ */
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
+#include "device_include/svga3d_reg.h"
+
+#define VMW_BINDING_RT_BIT     0
+#define VMW_BINDING_PS_BIT     1
+#define VMW_BINDING_SO_BIT     2
+#define VMW_BINDING_VB_BIT     3
+#define VMW_BINDING_NUM_BITS   4
+
+#define VMW_BINDING_PS_SR_BIT  0
+
+/**
+ * struct vmw_ctx_binding_state - per context binding state
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: linked list of individual active bindings.
+ * @render_targets: Render target bindings.
+ * @texture_units: Texture units bindings.
+ * @ds_view: Depth-stencil view binding.
+ * @so_targets: StreamOutput target bindings.
+ * @vertex_buffers: Vertex buffer bindings.
+ * @index_buffer: Index buffer binding.
+ * @per_shader: Per shader-type bindings.
+ * @dirty: Bitmap tracking per binding-type changes that have not yet
+ * been emitted to the device.
+ * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
+ * have not yet been emitted to the device.
+ * @bind_cmd_buffer: Scratch space used to construct binding commands.
+ * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
+ * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
+ * device binding slot of the first command data entry in @bind_cmd_buffer.
+ *
+ * Note that this structure also provides storage space for the individual
+ * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
+ * for individual bindings.
+ *
+ */
+struct vmw_ctx_binding_state {
+	struct vmw_private *dev_priv;
+	struct list_head list;
+	struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
+	struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
+	struct vmw_ctx_bindinfo_view ds_view;
+	struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
+	struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
+	struct vmw_ctx_bindinfo_ib index_buffer;
+	struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
+
+	unsigned long dirty;
+	DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
+
+	u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
+	u32 bind_cmd_count;
+	u32 bind_first_slot;
+};
+
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+					   bool rebind);
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
+				       bool rebind);
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_binding_build_asserts(void) __attribute__ ((unused));
+
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
+
+/**
+ * struct vmw_binding_info - Per binding type information for the binding
+ * manager
+ *
+ * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
+ * @offsets: array[shader_slot] of offsets to the array[slot]
+ * of struct bindings for the binding type.
+ * @scrub_func: Pointer to the scrub function for this binding type.
+ *
+ * Holds static information to help optimize the binding manager and avoid
+ * an excessive amount of switch statements.
+ */
+struct vmw_binding_info {
+	size_t size;
+	const size_t *offsets;
+	vmw_scrub_func scrub_func;
+};
+
+/*
+ * A number of static variables that help determine the scrub func and the
+ * location of the struct vmw_ctx_bindinfo slots for each binding type.
+ */
+static const size_t vmw_binding_shader_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
+	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
+	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
+};
+static const size_t vmw_binding_rt_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, render_targets),
+};
+static const size_t vmw_binding_tex_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, texture_units),
+};
+static const size_t vmw_binding_cb_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
+	offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
+	offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
+};
+static const size_t vmw_binding_dx_ds_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, ds_view),
+};
+static const size_t vmw_binding_sr_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
+	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
+	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
+};
+static const size_t vmw_binding_so_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, so_targets),
+};
+static const size_t vmw_binding_vb_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, vertex_buffers),
+};
+static const size_t vmw_binding_ib_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, index_buffer),
+};
+
+static const struct vmw_binding_info vmw_binding_infos[] = {
+	[vmw_ctx_binding_shader] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_shader),
+		.offsets = vmw_binding_shader_offsets,
+		.scrub_func = vmw_binding_scrub_shader},
+	[vmw_ctx_binding_rt] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_rt_offsets,
+		.scrub_func = vmw_binding_scrub_render_target},
+	[vmw_ctx_binding_tex] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_tex),
+		.offsets = vmw_binding_tex_offsets,
+		.scrub_func = vmw_binding_scrub_texture},
+	[vmw_ctx_binding_cb] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_cb),
+		.offsets = vmw_binding_cb_offsets,
+		.scrub_func = vmw_binding_scrub_cb},
+	[vmw_ctx_binding_dx_shader] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_shader),
+		.offsets = vmw_binding_shader_offsets,
+		.scrub_func = vmw_binding_scrub_dx_shader},
+	[vmw_ctx_binding_dx_rt] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_rt_offsets,
+		.scrub_func = vmw_binding_scrub_dx_rt},
+	[vmw_ctx_binding_sr] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_sr_offsets,
+		.scrub_func = vmw_binding_scrub_sr},
+	[vmw_ctx_binding_ds] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_dx_ds_offsets,
+		.scrub_func = vmw_binding_scrub_dx_rt},
+	[vmw_ctx_binding_so] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_so),
+		.offsets = vmw_binding_so_offsets,
+		.scrub_func = vmw_binding_scrub_so},
+	[vmw_ctx_binding_vb] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_vb),
+		.offsets = vmw_binding_vb_offsets,
+		.scrub_func = vmw_binding_scrub_vb},
+	[vmw_ctx_binding_ib] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_ib),
+		.offsets = vmw_binding_ib_offsets,
+		.scrub_func = vmw_binding_scrub_ib},
+};
+
+/**
+ * vmw_cbs_context - Return a pointer to the context resource of a
+ * context binding state tracker.
+ *
+ * @cbs: The context binding state tracker.
+ *
+ * Provided there are any active bindings, this function will return an
+ * unreferenced pointer to the context resource that owns the context
+ * binding state tracker. If there are no active bindings, this function
+ * will return NULL. Note that the caller must somehow ensure that a reference
+ * is held on the context resource prior to calling this function.
+ */
+static const struct vmw_resource *
+vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
+{
+	if (list_empty(&cbs->list))
+		return NULL;
+
+	return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
+				ctx_list)->ctx;
+}
+
+/**
+ * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
+ *
+ * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
+ * @bt: The binding type.
+ * @shader_slot: The shader slot of the binding. If none, then set to 0.
+ * @slot: The slot of the binding.
+ */
+static struct vmw_ctx_bindinfo *
+vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
+		enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
+{
+	const struct vmw_binding_info *b = &vmw_binding_infos[bt];
+	size_t offset = b->offsets[shader_slot] + b->size*slot;
+
+	return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
+}
+
+/**
+ * vmw_binding_drop: Stop tracking a context binding
+ *
+ * @bi: Pointer to binding tracker storage.
+ *
+ * Stops tracking a context binding, and re-initializes its storage.
+ * Typically used when the context binding is replaced with a binding to
+ * another (or the same, for that matter) resource.
+ */
+static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
+{
+	list_del(&bi->ctx_list);
+	if (!list_empty(&bi->res_list))
+		list_del(&bi->res_list);
+	bi->ctx = NULL;
+}
+
+/**
+ * vmw_binding_add: Start tracking a context binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ * Starts tracking the binding in the context binding
+ * state structure @cbs.
+ */
+void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+		    const struct vmw_ctx_bindinfo *bi,
+		    u32 shader_slot, u32 slot)
+{
+	struct vmw_ctx_bindinfo *loc =
+		vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
+	const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
+
+	if (loc->ctx != NULL)
+		vmw_binding_drop(loc);
+
+	memcpy(loc, bi, b->size);
+	loc->scrubbed = false;
+	list_add(&loc->ctx_list, &cbs->list);
+	INIT_LIST_HEAD(&loc->res_list);
+}
+
+/**
+ * vmw_binding_transfer: Transfer a context binding tracking entry.
+ *
+ * @cbs: Pointer to the persistent context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ */
+static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
+				 const struct vmw_ctx_binding_state *from,
+				 const struct vmw_ctx_bindinfo *bi)
+{
+	size_t offset = (unsigned long)bi - (unsigned long)from;
+	struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
+		((unsigned long) cbs + offset);
+
+	if (loc->ctx != NULL) {
+		WARN_ON(bi->scrubbed);
+
+		vmw_binding_drop(loc);
+	}
+
+	if (bi->res != NULL) {
+		memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
+		list_add_tail(&loc->ctx_list, &cbs->list);
+		list_add_tail(&loc->res_list, &loc->res->binding_head);
+	}
+}
+
+/**
+ * vmw_binding_state_kill - Kill all bindings associated with a
+ * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker. Then re-initializes the whole structure.
+ */
+void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	vmw_binding_state_scrub(cbs);
+	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+		vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry;
+
+	list_for_each_entry(entry, &cbs->list, ctx_list) {
+		if (!entry->scrubbed) {
+			(void) vmw_binding_infos[entry->bt].scrub_func
+				(entry, false);
+			entry->scrubbed = true;
+		}
+	}
+
+	(void) vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_res_list_kill - Kill all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Kills all bindings associated with a specific resource. Typically
+ * called before the resource is destroyed.
+ */
+void vmw_binding_res_list_kill(struct list_head *head)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	vmw_binding_res_list_scrub(head);
+	list_for_each_entry_safe(entry, next, head, res_list)
+		vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_binding_res_list_scrub(struct list_head *head)
+{
+	struct vmw_ctx_bindinfo *entry;
+
+	list_for_each_entry(entry, head, res_list) {
+		if (!entry->scrubbed) {
+			(void) vmw_binding_infos[entry->bt].scrub_func
+				(entry, false);
+			entry->scrubbed = true;
+		}
+	}
+
+	list_for_each_entry(entry, head, res_list) {
+		struct vmw_ctx_binding_state *cbs =
+			vmw_context_binding_state(entry->ctx);
+
+		(void) vmw_binding_emit_dirty(cbs);
+	}
+}
+
+
+/**
+ * vmw_binding_state_commit - Commit staged binding info
+ *
+ * @ctx: Pointer to context to commit the staged binding info to.
+ * @from: Staged binding info built during execbuf.
+ * @scrubbed: Transfer only scrubbed bindings.
+ *
+ * Transfers binding info from a temporary structure
+ * (typically used by execbuf) to the persistent
+ * structure in the context. This can be done once commands have been
+ * submitted to hardware
+ */
+void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+			      struct vmw_ctx_binding_state *from)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
+		vmw_binding_transfer(to, from, entry);
+		vmw_binding_drop(entry);
+	}
+}
+
+/**
+ * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry;
+	int ret;
+
+	list_for_each_entry(entry, &cbs->list, ctx_list) {
+		if (likely(!entry->scrubbed))
+			continue;
+
+		if ((entry->res == NULL || entry->res->id ==
+			    SVGA3D_INVALID_ID))
+			continue;
+
+		ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
+		if (unlikely(ret != 0))
+			return ret;
+
+		entry->scrubbed = false;
+	}
+
+	return vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_scrub_shader - scrub a shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_shader *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetShader body;
+	} *cmd;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = bi->ctx->id;
+	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_render_target - scrub a render target binding
+ * from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+					   bool rebind)
+{
+	struct vmw_ctx_bindinfo_view *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetRenderTarget body;
+	} *cmd;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = bi->ctx->id;
+	cmd->body.type = binding->slot;
+	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	cmd->body.target.face = 0;
+	cmd->body.target.mipmap = 0;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_texture - scrub a texture binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ *
+ * TODO: Possibly complement this function with a function that takes
+ * a list of texture bindings and combines them to a single command.
+ */
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
+				     bool rebind)
+{
+	struct vmw_ctx_bindinfo_tex *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		struct {
+			SVGA3dCmdSetTextureState c;
+			SVGA3dTextureState s1;
+		} body;
+	} *cmd;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.c.cid = bi->ctx->id;
+	cmd->body.s1.stage = binding->texture_stage;
+	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
+	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_shader *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShader body;
+	} *cmd;
+
+	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+	cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_cb *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSingleConstantBuffer body;
+	} *cmd;
+
+	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.slot = binding->slot;
+	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+	if (rebind) {
+		cmd->body.offsetInBytes = binding->offset;
+		cmd->body.sizeInBytes = binding->size;
+		cmd->body.sid = bi->res->id;
+	} else {
+		cmd->body.offsetInBytes = 0;
+		cmd->body.sizeInBytes = 0;
+		cmd->body.sid = SVGA3D_INVALID_ID;
+	}
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_collect_view_ids - Build view id data for a view binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of view id data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
+				 const struct vmw_ctx_bindinfo *bi,
+				 u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_view *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+	unsigned long i;
+
+	cbs->bind_cmd_count = 0;
+	cbs->bind_first_slot = 0;
+
+	for (i = 0; i < max_num; ++i, ++biv) {
+		if (!biv->bi.ctx)
+			break;
+
+		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+			((biv->bi.scrubbed) ?
+			 SVGA3D_INVALID_ID : biv->bi.res->id);
+	}
+}
+
+/**
+ * vmw_collect_dirty_view_ids - Build view id data for a view binding command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of view id data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
+				       const struct vmw_ctx_bindinfo *bi,
+				       unsigned long *dirty,
+				       u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_view *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+	unsigned long i, next_bit;
+
+	cbs->bind_cmd_count = 0;
+	i = find_first_bit(dirty, max_num);
+	next_bit = i;
+	cbs->bind_first_slot = i;
+
+	biv += i;
+	for (; i < max_num; ++i, ++biv) {
+		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+			((!biv->bi.ctx || biv->bi.scrubbed) ?
+			 SVGA3D_INVALID_ID : biv->bi.res->id);
+
+		if (next_bit == i) {
+			next_bit = find_next_bit(dirty, max_num, i + 1);
+			if (next_bit >= max_num)
+				break;
+		}
+	}
+}
+
+/**
+ * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
+			   int shader_slot)
+{
+	const struct vmw_ctx_bindinfo *loc =
+		&cbs->per_shader[shader_slot].shader_res[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShaderResources body;
+	} *cmd;
+	size_t cmd_size, view_id_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_dirty_view_ids(cbs, loc,
+				   cbs->per_shader[shader_slot].dirty_sr,
+				   SVGA3D_DX_MAX_SRVIEWS);
+	if (cbs->bind_cmd_count == 0)
+		return 0;
+
+	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+	cmd_size = sizeof(*cmd) + view_id_size;
+	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
+	cmd->header.size = sizeof(cmd->body) + view_id_size;
+	cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
+	cmd->body.startView = cbs->bind_first_slot;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
+		     cbs->bind_first_slot, cbs->bind_cmd_count);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetRenderTargets body;
+	} *cmd;
+	size_t cmd_size, view_id_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
+	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+	cmd_size = sizeof(*cmd) + view_id_size;
+	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
+	cmd->header.size = sizeof(cmd->body) + view_id_size;
+
+	if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
+		cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
+	else
+		cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+	return 0;
+
+}
+
+/**
+ * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
+				   const struct vmw_ctx_bindinfo *bi,
+				   u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_so *biso =
+		container_of(bi, struct vmw_ctx_bindinfo_so, bi);
+	unsigned long i;
+	SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
+
+	cbs->bind_cmd_count = 0;
+	cbs->bind_first_slot = 0;
+
+	for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
+		    ++cbs->bind_cmd_count) {
+		if (!biso->bi.ctx)
+			break;
+
+		if (!biso->bi.scrubbed) {
+			so_buffer->sid = biso->bi.res->id;
+			so_buffer->offset = biso->offset;
+			so_buffer->sizeInBytes = biso->size;
+		} else {
+			so_buffer->sid = SVGA3D_INVALID_ID;
+			so_buffer->offset = 0;
+			so_buffer->sizeInBytes = 0;
+		}
+	}
+}
+
+/**
+ * vmw_binding_emit_set_so - Issue delayed streamout binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSOTargets body;
+	} *cmd;
+	size_t cmd_size, so_target_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
+	if (cbs->bind_cmd_count == 0)
+		return 0;
+
+	so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
+	cmd_size = sizeof(*cmd) + so_target_size;
+	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
+	cmd->header.size = sizeof(cmd->body) + so_target_size;
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+	return 0;
+
+}
+
+/**
+ * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
+	u32 i;
+	int ret;
+
+	for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
+		if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
+			continue;
+
+		ret = vmw_emit_set_sr(cbs, i);
+		if (ret)
+			break;
+
+		__clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
+ * SVGA3dCmdDXSetVertexBuffers command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of SVGA3dVertexBuffer data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
+				  const struct vmw_ctx_bindinfo *bi,
+				  unsigned long *dirty,
+				  u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_vb *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+	unsigned long i, next_bit;
+	SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
+
+	cbs->bind_cmd_count = 0;
+	i = find_first_bit(dirty, max_num);
+	next_bit = i;
+	cbs->bind_first_slot = i;
+
+	biv += i;
+	for (; i < max_num; ++i, ++biv, ++vbs) {
+		if (!biv->bi.ctx || biv->bi.scrubbed) {
+			vbs->sid = SVGA3D_INVALID_ID;
+			vbs->stride = 0;
+			vbs->offset = 0;
+		} else {
+			vbs->sid = biv->bi.res->id;
+			vbs->stride = biv->stride;
+			vbs->offset = biv->offset;
+		}
+		cbs->bind_cmd_count++;
+		if (next_bit == i) {
+			next_bit = find_next_bit(dirty, max_num, i + 1);
+			if (next_bit >= max_num)
+				break;
+		}
+	}
+}
+
+/**
+ * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc =
+		&cbs->vertex_buffers[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetVertexBuffers body;
+	} *cmd;
+	size_t cmd_size, set_vb_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
+			     SVGA3D_DX_MAX_VERTEXBUFFERS);
+	if (cbs->bind_cmd_count == 0)
+		return 0;
+
+	set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
+	cmd_size = sizeof(*cmd) + set_vb_size;
+	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
+	cmd->header.size = sizeof(cmd->body) + set_vb_size;
+	cmd->body.startBuffer = cbs->bind_first_slot;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	bitmap_clear(cbs->dirty_vb,
+		     cbs->bind_first_slot, cbs->bind_cmd_count);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_emit_dirty - Issue delayed binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ * This function issues the delayed binding commands that arise from
+ * previous scrub / unscrub calls. These binding commands are typically
+ * commands that batch a number of bindings and therefore it makes sense
+ * to delay them.
+ */
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
+{
+	int ret = 0;
+	unsigned long hit = 0;
+
+	while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
+	      < VMW_BINDING_NUM_BITS) {
+
+		switch (hit) {
+		case VMW_BINDING_RT_BIT:
+			ret = vmw_emit_set_rt(cbs);
+			break;
+		case VMW_BINDING_PS_BIT:
+			ret = vmw_binding_emit_dirty_ps(cbs);
+			break;
+		case VMW_BINDING_SO_BIT:
+			ret = vmw_emit_set_so(cbs);
+			break;
+		case VMW_BINDING_VB_BIT:
+			ret = vmw_emit_set_vb(cbs);
+			break;
+		default:
+			BUG();
+		}
+		if (ret)
+			return ret;
+
+		__clear_bit(hit, &cbs->dirty);
+		hit++;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_view *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
+	__set_bit(VMW_BINDING_PS_SR_BIT,
+		  &cbs->per_shader[biv->shader_slot].dirty);
+	__set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_vb *bivb =
+		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(bivb->slot, cbs->dirty_vb);
+	__set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_ib *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetIndexBuffer body;
+	} *cmd;
+
+	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
+	cmd->header.size = sizeof(cmd->body);
+	if (rebind) {
+		cmd->body.sid = bi->res->id;
+		cmd->body.format = binding->format;
+		cmd->body.offset = binding->offset;
+	} else {
+		cmd->body.sid = SVGA3D_INVALID_ID;
+		cmd->body.format = 0;
+		cmd->body.offset = 0;
+	}
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
+ * memory accounting.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Returns a pointer to a newly allocated struct or an error pointer on error.
+ */
+struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv)
+{
+	struct vmw_ctx_binding_state *cbs;
+	struct ttm_operation_ctx ctx = {
+		.interruptible = false,
+		.no_wait_gpu = false
+	};
+	int ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
+				&ctx);
+	if (ret)
+		return ERR_PTR(ret);
+
+	cbs = vzalloc(sizeof(*cbs));
+	if (!cbs) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	cbs->dev_priv = dev_priv;
+	INIT_LIST_HEAD(&cbs->list);
+
+	return cbs;
+}
+
+/**
+ * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
+ * memory accounting info.
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
+ */
+void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_private *dev_priv = cbs->dev_priv;
+
+	vfree(cbs);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+}
+
+/**
+ * vmw_binding_state_list - Get the binding list of a
+ * struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state
+ *
+ * Returns the binding list which can be used to traverse through the bindings
+ * and access the resource information of all bindings.
+ */
+struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
+{
+	return &cbs->list;
+}
+
+/**
+ * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
+ *
+ * Drops all bindings registered in @cbs. No device binding actions are
+ * performed.
+ */
+void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+		vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_dirtying - Return whether a binding type is dirtying its resource
+ * @binding_type: The binding type
+ *
+ * Each time a resource is put on the validation list as the result of a
+ * context binding referencing it, we need to determine whether that resource
+ * will be dirtied (written to by the GPU) as a result of the corresponding
+ * GPU operation. Currently rendertarget-, depth-stencil-, and
+ * stream-output-target bindings are capable of dirtying its resource.
+ *
+ * Return: Whether the binding type dirties the resource its binding points to.
+ */
+u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
+{
+	static u32 is_binding_dirtying[vmw_ctx_binding_max] = {
+		[vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET,
+		[vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
+		[vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
+		[vmw_ctx_binding_so] = VMW_RES_DIRTY_SET,
+	};
+
+	/* Review this function as new bindings are added. */
+	BUILD_BUG_ON(vmw_ctx_binding_max != 11);
+	return is_binding_dirtying[binding_type];
+}
+
+/*
+ * This function is unused at run-time, and only used to hold various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_binding_build_asserts(void)
+{
+	BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
+	BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
+	BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
+
+	/*
+	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
+	 * view id arrays.
+	 */
+	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
+	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
+	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
+
+	/*
+	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
+	 * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
+	 */
+	BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
+		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+	BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
+		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
new file mode 100644
index 0000000..cd9805c
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _VMWGFX_BINDING_H_
+#define _VMWGFX_BINDING_H_
+
+#include <linux/list.h>
+
+#include "device_include/svga3d_reg.h"
+
+#define VMW_MAX_VIEW_BINDINGS 128
+
+struct vmw_private;
+struct vmw_ctx_binding_state;
+
+/*
+ * enum vmw_ctx_binding_type - abstract resource to context binding types
+ */
+enum vmw_ctx_binding_type {
+	vmw_ctx_binding_shader,
+	vmw_ctx_binding_rt,
+	vmw_ctx_binding_tex,
+	vmw_ctx_binding_cb,
+	vmw_ctx_binding_dx_shader,
+	vmw_ctx_binding_dx_rt,
+	vmw_ctx_binding_sr,
+	vmw_ctx_binding_ds,
+	vmw_ctx_binding_so,
+	vmw_ctx_binding_vb,
+	vmw_ctx_binding_ib,
+	vmw_ctx_binding_max
+};
+
+/**
+ * struct vmw_ctx_bindinfo - single binding metadata
+ *
+ * @ctx_list: List head for the context's list of bindings.
+ * @res_list: List head for a resource's list of bindings.
+ * @ctx: Non-refcounted pointer to the context that owns the binding. NULL
+ * indicates no binding present.
+ * @res: Non-refcounted pointer to the resource the binding points to. This
+ * is typically a surface or a view.
+ * @bt: Binding type.
+ * @scrubbed: Whether the binding has been scrubbed from the context.
+ */
+struct vmw_ctx_bindinfo {
+	struct list_head ctx_list;
+	struct list_head res_list;
+	struct vmw_resource *ctx;
+	struct vmw_resource *res;
+	enum vmw_ctx_binding_type bt;
+	bool scrubbed;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_tex - texture stage binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @texture_stage: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_tex {
+	struct vmw_ctx_bindinfo bi;
+	uint32 texture_stage;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_shader - Shader binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_shader {
+	struct vmw_ctx_bindinfo bi;
+	SVGA3dShaderType shader_slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_cb {
+	struct vmw_ctx_bindinfo bi;
+	SVGA3dShaderType shader_slot;
+	uint32 offset;
+	uint32 size;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_view - View binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_view {
+	struct vmw_ctx_bindinfo bi;
+	SVGA3dShaderType shader_slot;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_so {
+	struct vmw_ctx_bindinfo bi;
+	uint32 offset;
+	uint32 size;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @stride: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_vb {
+	struct vmw_ctx_bindinfo bi;
+	uint32 offset;
+	uint32 stride;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @format: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_ib {
+	struct vmw_ctx_bindinfo bi;
+	uint32 offset;
+	uint32 format;
+};
+
+/**
+ * struct vmw_dx_shader_bindings - per shader type context binding state
+ *
+ * @shader: The shader binding for this shader type
+ * @const_buffer: Const buffer bindings for this shader type.
+ * @shader_res: Shader resource view bindings for this shader type.
+ * @dirty_sr: Bitmap tracking individual shader resource bindings changes
+ * that have not yet been emitted to the device.
+ * @dirty: Bitmap tracking per-binding type binding changes that have not
+ * yet been emitted to the device.
+ */
+struct vmw_dx_shader_bindings {
+	struct vmw_ctx_bindinfo_shader shader;
+	struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS];
+	struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS];
+	DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS);
+	unsigned long dirty;
+};
+
+extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+			    const struct vmw_ctx_bindinfo *ci,
+			    u32 shader_slot, u32 slot);
+extern void
+vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+			 struct vmw_ctx_binding_state *from);
+extern void vmw_binding_res_list_kill(struct list_head *head);
+extern void vmw_binding_res_list_scrub(struct list_head *head);
+extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
+extern struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv);
+extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
+extern struct list_head *
+vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
+extern u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type);
+
+
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
new file mode 100644
index 0000000..bb46ca0
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2017 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+
+/*
+ * Template that implements find_first_diff() for a generic
+ * unsigned integer type. @size and return value are in bytes.
+ */
+#define VMW_FIND_FIRST_DIFF(_type)			 \
+static size_t vmw_find_first_diff_ ## _type		 \
+	(const _type * dst, const _type * src, size_t size)\
+{							 \
+	size_t i;					 \
+							 \
+	for (i = 0; i < size; i += sizeof(_type)) {	 \
+		if (*dst++ != *src++)			 \
+			break;				 \
+	}						 \
+							 \
+	return i;					 \
+}
+
+
+/*
+ * Template that implements find_last_diff() for a generic
+ * unsigned integer type. Pointers point to the item following the
+ * *end* of the area to be examined. @size and return value are in
+ * bytes.
+ */
+#define VMW_FIND_LAST_DIFF(_type)					\
+static ssize_t vmw_find_last_diff_ ## _type(				\
+	const _type * dst, const _type * src, size_t size)		\
+{									\
+	while (size) {							\
+		if (*--dst != *--src)					\
+			break;						\
+									\
+		size -= sizeof(_type);					\
+	}								\
+	return size;							\
+}
+
+
+/*
+ * Instantiate find diff functions for relevant unsigned integer sizes,
+ * assuming that wider integers are faster (including aligning) up to the
+ * architecture native width, which is assumed to be 32 bit unless
+ * CONFIG_64BIT is defined.
+ */
+VMW_FIND_FIRST_DIFF(u8);
+VMW_FIND_LAST_DIFF(u8);
+
+VMW_FIND_FIRST_DIFF(u16);
+VMW_FIND_LAST_DIFF(u16);
+
+VMW_FIND_FIRST_DIFF(u32);
+VMW_FIND_LAST_DIFF(u32);
+
+#ifdef CONFIG_64BIT
+VMW_FIND_FIRST_DIFF(u64);
+VMW_FIND_LAST_DIFF(u64);
+#endif
+
+
+/* We use size aligned copies. This computes (addr - align(addr)) */
+#define SPILL(_var, _type) ((unsigned long) _var & (sizeof(_type) - 1))
+
+
+/*
+ * Template to compute find_first_diff() for a certain integer type
+ * including a head copy for alignment, and adjustment of parameters
+ * for tail find or increased resolution find using an unsigned integer find
+ * of smaller width. If finding is complete, and resolution is sufficient,
+ * the macro executes a return statement. Otherwise it falls through.
+ */
+#define VMW_TRY_FIND_FIRST_DIFF(_type)					\
+do {									\
+	unsigned int spill = SPILL(dst, _type);				\
+	size_t diff_offs;						\
+									\
+	if (spill && spill == SPILL(src, _type) &&			\
+	    sizeof(_type) - spill <= size) {				\
+		spill = sizeof(_type) - spill;				\
+		diff_offs = vmw_find_first_diff_u8(dst, src, spill);	\
+		if (diff_offs < spill)					\
+			return round_down(offset + diff_offs, granularity); \
+									\
+		dst += spill;						\
+		src += spill;						\
+		size -= spill;						\
+		offset += spill;					\
+		spill = 0;						\
+	}								\
+	if (!spill && !SPILL(src, _type)) {				\
+		size_t to_copy = size &	 ~(sizeof(_type) - 1);		\
+									\
+		diff_offs = vmw_find_first_diff_ ## _type		\
+			((_type *) dst, (_type *) src, to_copy);	\
+		if (diff_offs >= size || granularity == sizeof(_type))	\
+			return (offset + diff_offs);			\
+									\
+		dst += diff_offs;					\
+		src += diff_offs;					\
+		size -= diff_offs;					\
+		offset += diff_offs;					\
+	}								\
+} while (0)								\
+
+
+/**
+ * vmw_find_first_diff - find the first difference between dst and src
+ *
+ * @dst: The destination address
+ * @src: The source address
+ * @size: Number of bytes to compare
+ * @granularity: The granularity needed for the return value in bytes.
+ * return: The offset from find start where the first difference was
+ * encountered in bytes. If no difference was found, the function returns
+ * a value >= @size.
+ */
+static size_t vmw_find_first_diff(const u8 *dst, const u8 *src, size_t size,
+				  size_t granularity)
+{
+	size_t offset = 0;
+
+	/*
+	 * Try finding with large integers if alignment allows, or we can
+	 * fix it. Fall through if we need better resolution or alignment
+	 * was bad.
+	 */
+#ifdef CONFIG_64BIT
+	VMW_TRY_FIND_FIRST_DIFF(u64);
+#endif
+	VMW_TRY_FIND_FIRST_DIFF(u32);
+	VMW_TRY_FIND_FIRST_DIFF(u16);
+
+	return round_down(offset + vmw_find_first_diff_u8(dst, src, size),
+			  granularity);
+}
+
+
+/*
+ * Template to compute find_last_diff() for a certain integer type
+ * including a tail copy for alignment, and adjustment of parameters
+ * for head find or increased resolution find using an unsigned integer find
+ * of smaller width. If finding is complete, and resolution is sufficient,
+ * the macro executes a return statement. Otherwise it falls through.
+ */
+#define VMW_TRY_FIND_LAST_DIFF(_type)					\
+do {									\
+	unsigned int spill = SPILL(dst, _type);				\
+	ssize_t location;						\
+	ssize_t diff_offs;						\
+									\
+	if (spill && spill <= size && spill == SPILL(src, _type)) {	\
+		diff_offs = vmw_find_last_diff_u8(dst, src, spill);	\
+		if (diff_offs) {					\
+			location = size - spill + diff_offs - 1;	\
+			return round_down(location, granularity);	\
+		}							\
+									\
+		dst -= spill;						\
+		src -= spill;						\
+		size -= spill;						\
+		spill = 0;						\
+	}								\
+	if (!spill && !SPILL(src, _type)) {				\
+		size_t to_copy = round_down(size, sizeof(_type));	\
+									\
+		diff_offs = vmw_find_last_diff_ ## _type		\
+			((_type *) dst, (_type *) src, to_copy);	\
+		location = size - to_copy + diff_offs - sizeof(_type);	\
+		if (location < 0 || granularity == sizeof(_type))	\
+			return location;				\
+									\
+		dst -= to_copy - diff_offs;				\
+		src -= to_copy - diff_offs;				\
+		size -= to_copy - diff_offs;				\
+	}								\
+} while (0)
+
+
+/**
+ * vmw_find_last_diff - find the last difference between dst and src
+ *
+ * @dst: The destination address
+ * @src: The source address
+ * @size: Number of bytes to compare
+ * @granularity: The granularity needed for the return value in bytes.
+ * return: The offset from find start where the last difference was
+ * encountered in bytes, or a negative value if no difference was found.
+ */
+static ssize_t vmw_find_last_diff(const u8 *dst, const u8 *src, size_t size,
+				  size_t granularity)
+{
+	dst += size;
+	src += size;
+
+#ifdef CONFIG_64BIT
+	VMW_TRY_FIND_LAST_DIFF(u64);
+#endif
+	VMW_TRY_FIND_LAST_DIFF(u32);
+	VMW_TRY_FIND_LAST_DIFF(u16);
+
+	return round_down(vmw_find_last_diff_u8(dst, src, size) - 1,
+			  granularity);
+}
+
+
+/**
+ * vmw_memcpy - A wrapper around kernel memcpy with allowing to plug it into a
+ * struct vmw_diff_cpy.
+ *
+ * @diff: The struct vmw_diff_cpy closure argument (unused).
+ * @dest: The copy destination.
+ * @src: The copy source.
+ * @n: Number of bytes to copy.
+ */
+void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n)
+{
+	memcpy(dest, src, n);
+}
+
+
+/**
+ * vmw_adjust_rect - Adjust rectangle coordinates for newly found difference
+ *
+ * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
+ * @diff_offs: The offset from @diff->line_offset where the difference was
+ * found.
+ */
+static void vmw_adjust_rect(struct vmw_diff_cpy *diff, size_t diff_offs)
+{
+	size_t offs = (diff_offs + diff->line_offset) / diff->cpp;
+	struct drm_rect *rect = &diff->rect;
+
+	rect->x1 = min_t(int, rect->x1, offs);
+	rect->x2 = max_t(int, rect->x2, offs + 1);
+	rect->y1 = min_t(int, rect->y1, diff->line);
+	rect->y2 = max_t(int, rect->y2, diff->line + 1);
+}
+
+/**
+ * vmw_diff_memcpy - memcpy that creates a bounding box of modified content.
+ *
+ * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
+ * @dest: The copy destination.
+ * @src: The copy source.
+ * @n: Number of bytes to copy.
+ *
+ * In order to correctly track the modified content, the field @diff->line must
+ * be pre-loaded with the current line number, the field @diff->line_offset must
+ * be pre-loaded with the line offset in bytes where the copy starts, and
+ * finally the field @diff->cpp need to be preloaded with the number of bytes
+ * per unit in the horizontal direction of the area we're examining.
+ * Typically bytes per pixel.
+ * This is needed to know the needed granularity of the difference computing
+ * operations. A higher cpp generally leads to faster execution at the cost of
+ * bounding box width precision.
+ */
+void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
+		     size_t n)
+{
+	ssize_t csize, byte_len;
+
+	if (WARN_ON_ONCE(round_down(n, diff->cpp) != n))
+		return;
+
+	/* TODO: Possibly use a single vmw_find_first_diff per line? */
+	csize = vmw_find_first_diff(dest, src, n, diff->cpp);
+	if (csize < n) {
+		vmw_adjust_rect(diff, csize);
+		byte_len = diff->cpp;
+
+		/*
+		 * Starting from where first difference was found, find
+		 * location of last difference, and then copy.
+		 */
+		diff->line_offset += csize;
+		dest += csize;
+		src += csize;
+		n -= csize;
+		csize = vmw_find_last_diff(dest, src, n, diff->cpp);
+		if (csize >= 0) {
+			byte_len += csize;
+			vmw_adjust_rect(diff, csize);
+		}
+		memcpy(dest, src, byte_len);
+	}
+	diff->line_offset += n;
+}
+
+/**
+ * struct vmw_bo_blit_line_data - Convenience argument to vmw_bo_cpu_blit_line
+ *
+ * @mapped_dst: Already mapped destination page index in @dst_pages.
+ * @dst_addr: Kernel virtual address of mapped destination page.
+ * @dst_pages: Array of destination bo pages.
+ * @dst_num_pages: Number of destination bo pages.
+ * @dst_prot: Destination bo page protection.
+ * @mapped_src: Already mapped source page index in @dst_pages.
+ * @src_addr: Kernel virtual address of mapped source page.
+ * @src_pages: Array of source bo pages.
+ * @src_num_pages: Number of source bo pages.
+ * @src_prot: Source bo page protection.
+ * @diff: Struct vmw_diff_cpy, in the end forwarded to the memcpy routine.
+ */
+struct vmw_bo_blit_line_data {
+	u32 mapped_dst;
+	u8 *dst_addr;
+	struct page **dst_pages;
+	u32 dst_num_pages;
+	pgprot_t dst_prot;
+	u32 mapped_src;
+	u8 *src_addr;
+	struct page **src_pages;
+	u32 src_num_pages;
+	pgprot_t src_prot;
+	struct vmw_diff_cpy *diff;
+};
+
+/**
+ * vmw_bo_cpu_blit_line - Blit part of a line from one bo to another.
+ *
+ * @d: Blit data as described above.
+ * @dst_offset: Destination copy start offset from start of bo.
+ * @src_offset: Source copy start offset from start of bo.
+ * @bytes_to_copy: Number of bytes to copy in this line.
+ */
+static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
+				u32 dst_offset,
+				u32 src_offset,
+				u32 bytes_to_copy)
+{
+	struct vmw_diff_cpy *diff = d->diff;
+
+	while (bytes_to_copy) {
+		u32 copy_size = bytes_to_copy;
+		u32 dst_page = dst_offset >> PAGE_SHIFT;
+		u32 src_page = src_offset >> PAGE_SHIFT;
+		u32 dst_page_offset = dst_offset & ~PAGE_MASK;
+		u32 src_page_offset = src_offset & ~PAGE_MASK;
+		bool unmap_dst = d->dst_addr && dst_page != d->mapped_dst;
+		bool unmap_src = d->src_addr && (src_page != d->mapped_src ||
+						 unmap_dst);
+
+		copy_size = min_t(u32, copy_size, PAGE_SIZE - dst_page_offset);
+		copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
+
+		if (unmap_src) {
+			ttm_kunmap_atomic_prot(d->src_addr, d->src_prot);
+			d->src_addr = NULL;
+		}
+
+		if (unmap_dst) {
+			ttm_kunmap_atomic_prot(d->dst_addr, d->dst_prot);
+			d->dst_addr = NULL;
+		}
+
+		if (!d->dst_addr) {
+			if (WARN_ON_ONCE(dst_page >= d->dst_num_pages))
+				return -EINVAL;
+
+			d->dst_addr =
+				ttm_kmap_atomic_prot(d->dst_pages[dst_page],
+						     d->dst_prot);
+			if (!d->dst_addr)
+				return -ENOMEM;
+
+			d->mapped_dst = dst_page;
+		}
+
+		if (!d->src_addr) {
+			if (WARN_ON_ONCE(src_page >= d->src_num_pages))
+				return -EINVAL;
+
+			d->src_addr =
+				ttm_kmap_atomic_prot(d->src_pages[src_page],
+						     d->src_prot);
+			if (!d->src_addr)
+				return -ENOMEM;
+
+			d->mapped_src = src_page;
+		}
+		diff->do_cpy(diff, d->dst_addr + dst_page_offset,
+			     d->src_addr + src_page_offset, copy_size);
+
+		bytes_to_copy -= copy_size;
+		dst_offset += copy_size;
+		src_offset += copy_size;
+	}
+
+	return 0;
+}
+
+/**
+ * ttm_bo_cpu_blit - in-kernel cpu blit.
+ *
+ * @dst: Destination buffer object.
+ * @dst_offset: Destination offset of blit start in bytes.
+ * @dst_stride: Destination stride in bytes.
+ * @src: Source buffer object.
+ * @src_offset: Source offset of blit start in bytes.
+ * @src_stride: Source stride in bytes.
+ * @w: Width of blit.
+ * @h: Height of blit.
+ * return: Zero on success. Negative error value on failure. Will print out
+ * kernel warnings on caller bugs.
+ *
+ * Performs a CPU blit from one buffer object to another avoiding a full
+ * bo vmap which may exhaust- or fragment vmalloc space.
+ * On supported architectures (x86), we're using kmap_atomic which avoids
+ * cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems
+ * reference already set-up mappings.
+ *
+ * Neither of the buffer objects may be placed in PCI memory
+ * (Fixed memory in TTM terminology) when using this function.
+ */
+int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+		    u32 dst_offset, u32 dst_stride,
+		    struct ttm_buffer_object *src,
+		    u32 src_offset, u32 src_stride,
+		    u32 w, u32 h,
+		    struct vmw_diff_cpy *diff)
+{
+	struct ttm_operation_ctx ctx = {
+		.interruptible = false,
+		.no_wait_gpu = false
+	};
+	u32 j, initial_line = dst_offset / dst_stride;
+	struct vmw_bo_blit_line_data d;
+	int ret = 0;
+
+	/* Buffer objects need to be either pinned or reserved: */
+	if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
+		dma_resv_assert_held(dst->base.resv);
+	if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
+		dma_resv_assert_held(src->base.resv);
+
+	if (dst->ttm->state == tt_unpopulated) {
+		ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
+		if (ret)
+			return ret;
+	}
+
+	if (src->ttm->state == tt_unpopulated) {
+		ret = src->ttm->bdev->driver->ttm_tt_populate(src->ttm, &ctx);
+		if (ret)
+			return ret;
+	}
+
+	d.mapped_dst = 0;
+	d.mapped_src = 0;
+	d.dst_addr = NULL;
+	d.src_addr = NULL;
+	d.dst_pages = dst->ttm->pages;
+	d.src_pages = src->ttm->pages;
+	d.dst_num_pages = dst->num_pages;
+	d.src_num_pages = src->num_pages;
+	d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL);
+	d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL);
+	d.diff = diff;
+
+	for (j = 0; j < h; ++j) {
+		diff->line = j + initial_line;
+		diff->line_offset = dst_offset % dst_stride;
+		ret = vmw_bo_cpu_blit_line(&d, dst_offset, src_offset, w);
+		if (ret)
+			goto out;
+
+		dst_offset += dst_stride;
+		src_offset += src_stride;
+	}
+out:
+	if (d.src_addr)
+		ttm_kunmap_atomic_prot(d.src_addr, d.src_prot);
+	if (d.dst_addr)
+		ttm_kunmap_atomic_prot(d.dst_addr, d.dst_prot);
+
+	return ret;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
new file mode 100644
index 0000000..aad8d81
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -0,0 +1,1163 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include "vmwgfx_drv.h"
+#include "ttm_object.h"
+
+
+/**
+ * struct vmw_user_buffer_object - User-space-visible buffer object
+ *
+ * @prime: The prime object providing user visibility.
+ * @vbo: The struct vmw_buffer_object
+ */
+struct vmw_user_buffer_object {
+	struct ttm_prime_object prime;
+	struct vmw_buffer_object vbo;
+};
+
+
+/**
+ * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the
+ * TTM buffer object.
+ */
+static struct vmw_buffer_object *
+vmw_buffer_object(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct vmw_buffer_object, base);
+}
+
+
+/**
+ * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_user_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
+ * object.
+ */
+static struct vmw_user_buffer_object *
+vmw_user_buffer_object(struct ttm_buffer_object *bo)
+{
+	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+
+	return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
+}
+
+
+/**
+ * vmw_bo_pin_in_placement - Validate a buffer to placement.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @placement:  The placement to pin it.
+ * @interruptible:  Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
+			    struct vmw_buffer_object *buf,
+			    struct ttm_placement *placement,
+			    bool interruptible)
+{
+	struct ttm_operation_ctx ctx = {interruptible, false };
+	struct ttm_buffer_object *bo = &buf->base;
+	int ret;
+	uint32_t new_flags;
+
+	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_execbuf_release_pinned_bo(dev_priv);
+
+	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+	if (unlikely(ret != 0))
+		goto err;
+
+	if (buf->pin_count > 0)
+		ret = ttm_bo_mem_compat(placement, &bo->mem,
+					&new_flags) == true ? 0 : -EINVAL;
+	else
+		ret = ttm_bo_validate(bo, placement, &ctx);
+
+	if (!ret)
+		vmw_bo_pin_reserved(buf, true);
+
+	ttm_bo_unreserve(bo);
+
+err:
+	ttm_write_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
+
+
+/**
+ * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @pin:  Pin buffer if true.
+ * @interruptible:  Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+			      struct vmw_buffer_object *buf,
+			      bool interruptible)
+{
+	struct ttm_operation_ctx ctx = {interruptible, false };
+	struct ttm_buffer_object *bo = &buf->base;
+	int ret;
+	uint32_t new_flags;
+
+	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_execbuf_release_pinned_bo(dev_priv);
+
+	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+	if (unlikely(ret != 0))
+		goto err;
+
+	if (buf->pin_count > 0) {
+		ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+					&new_flags) == true ? 0 : -EINVAL;
+		goto out_unreserve;
+	}
+
+	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+	if (likely(ret == 0) || ret == -ERESTARTSYS)
+		goto out_unreserve;
+
+	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+
+out_unreserve:
+	if (!ret)
+		vmw_bo_pin_reserved(buf, true);
+
+	ttm_bo_unreserve(bo);
+err:
+	ttm_write_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
+
+
+/**
+ * vmw_bo_pin_in_vram - Move a buffer to vram.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @interruptible:  Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+		       struct vmw_buffer_object *buf,
+		       bool interruptible)
+{
+	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
+				       interruptible);
+}
+
+
+/**
+ * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to pin.
+ * @interruptible:  Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
+				struct vmw_buffer_object *buf,
+				bool interruptible)
+{
+	struct ttm_operation_ctx ctx = {interruptible, false };
+	struct ttm_buffer_object *bo = &buf->base;
+	struct ttm_placement placement;
+	struct ttm_place place;
+	int ret = 0;
+	uint32_t new_flags;
+
+	place = vmw_vram_placement.placement[0];
+	place.lpfn = bo->num_pages;
+	placement.num_placement = 1;
+	placement.placement = &place;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &place;
+
+	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_execbuf_release_pinned_bo(dev_priv);
+	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+	if (unlikely(ret != 0))
+		goto err_unlock;
+
+	/*
+	 * Is this buffer already in vram but not at the start of it?
+	 * In that case, evict it first because TTM isn't good at handling
+	 * that situation.
+	 */
+	if (bo->mem.mem_type == TTM_PL_VRAM &&
+	    bo->mem.start < bo->num_pages &&
+	    bo->mem.start > 0 &&
+	    buf->pin_count == 0) {
+		ctx.interruptible = false;
+		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+	}
+
+	if (buf->pin_count > 0)
+		ret = ttm_bo_mem_compat(&placement, &bo->mem,
+					&new_flags) == true ? 0 : -EINVAL;
+	else
+		ret = ttm_bo_validate(bo, &placement, &ctx);
+
+	/* For some reason we didn't end up at the start of vram */
+	WARN_ON(ret == 0 && bo->offset != 0);
+	if (!ret)
+		vmw_bo_pin_reserved(buf, true);
+
+	ttm_bo_unreserve(bo);
+err_unlock:
+	ttm_write_unlock(&dev_priv->reservation_sem);
+
+	return ret;
+}
+
+
+/**
+ * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
+ *
+ * This function takes the reservation_sem in write mode.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to unpin.
+ * @interruptible:  Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_unpin(struct vmw_private *dev_priv,
+		 struct vmw_buffer_object *buf,
+		 bool interruptible)
+{
+	struct ttm_buffer_object *bo = &buf->base;
+	int ret;
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+	if (unlikely(ret != 0))
+		goto err;
+
+	vmw_bo_pin_reserved(buf, false);
+
+	ttm_bo_unreserve(bo);
+
+err:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
+
+/**
+ * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
+ * of a buffer.
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
+ * @ptr: SVGAGuestPtr returning the result.
+ */
+void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
+			  SVGAGuestPtr *ptr)
+{
+	if (bo->mem.mem_type == TTM_PL_VRAM) {
+		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
+		ptr->offset = bo->offset;
+	} else {
+		ptr->gmrId = bo->mem.start;
+		ptr->offset = 0;
+	}
+}
+
+
+/**
+ * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
+ *
+ * @vbo: The buffer object. Must be reserved.
+ * @pin: Whether to pin or unpin.
+ *
+ */
+void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
+{
+	struct ttm_operation_ctx ctx = { false, true };
+	struct ttm_place pl;
+	struct ttm_placement placement;
+	struct ttm_buffer_object *bo = &vbo->base;
+	uint32_t old_mem_type = bo->mem.mem_type;
+	int ret;
+
+	dma_resv_assert_held(bo->base.resv);
+
+	if (pin) {
+		if (vbo->pin_count++ > 0)
+			return;
+	} else {
+		WARN_ON(vbo->pin_count <= 0);
+		if (--vbo->pin_count > 0)
+			return;
+	}
+
+	pl.fpfn = 0;
+	pl.lpfn = 0;
+	pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
+		| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+	if (pin)
+		pl.flags |= TTM_PL_FLAG_NO_EVICT;
+
+	memset(&placement, 0, sizeof(placement));
+	placement.num_placement = 1;
+	placement.placement = &pl;
+
+	ret = ttm_bo_validate(bo, &placement, &ctx);
+
+	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+}
+
+
+/**
+ * vmw_bo_map_and_cache - Map a buffer object and cache the map
+ *
+ * @vbo: The buffer object to map
+ * Return: A kernel virtual address or NULL if mapping failed.
+ *
+ * This function maps a buffer object into the kernel address space, or
+ * returns the virtual kernel address of an already existing map. The virtual
+ * address remains valid as long as the buffer object is pinned or reserved.
+ * The cached map is torn down on either
+ * 1) Buffer object move
+ * 2) Buffer object swapout
+ * 3) Buffer object destruction
+ *
+ */
+void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
+{
+	struct ttm_buffer_object *bo = &vbo->base;
+	bool not_used;
+	void *virtual;
+	int ret;
+
+	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
+	if (virtual)
+		return virtual;
+
+	ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+	if (ret)
+		DRM_ERROR("Buffer object map failed: %d.\n", ret);
+
+	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
+}
+
+
+/**
+ * vmw_bo_unmap - Tear down a cached buffer object map.
+ *
+ * @vbo: The buffer object whose map we are tearing down.
+ *
+ * This function tears down a cached map set up using
+ * vmw_buffer_object_map_and_cache().
+ */
+void vmw_bo_unmap(struct vmw_buffer_object *vbo)
+{
+	if (vbo->map.bo == NULL)
+		return;
+
+	ttm_bo_kunmap(&vbo->map);
+}
+
+
+/**
+ * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
+ *
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
+ * @size: The requested buffer size.
+ * @user: Whether this is an ordinary dma buffer or a user dma buffer.
+ */
+static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
+			      bool user)
+{
+	static size_t struct_size, user_struct_size;
+	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
+
+	if (unlikely(struct_size == 0)) {
+		size_t backend_size = ttm_round_pot(vmw_tt_size);
+
+		struct_size = backend_size +
+			ttm_round_pot(sizeof(struct vmw_buffer_object));
+		user_struct_size = backend_size +
+		  ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
+				      TTM_OBJ_EXTRA_SIZE;
+	}
+
+	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+		page_array_size +=
+			ttm_round_pot(num_pages * sizeof(dma_addr_t));
+
+	return ((user) ? user_struct_size : struct_size) +
+		page_array_size;
+}
+
+
+/**
+ * vmw_bo_bo_free - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+void vmw_bo_bo_free(struct ttm_buffer_object *bo)
+{
+	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+
+	vmw_bo_unmap(vmw_bo);
+	kfree(vmw_bo);
+}
+
+
+/**
+ * vmw_user_bo_destroy - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
+{
+	struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
+
+	vmw_bo_unmap(&vmw_user_bo->vbo);
+	ttm_prime_object_kfree(vmw_user_bo, prime);
+}
+
+
+/**
+ * vmw_bo_init - Initialize a vmw buffer object
+ *
+ * @dev_priv: Pointer to the device private struct
+ * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
+ * @size: Buffer object size in bytes.
+ * @placement: Initial placement.
+ * @interruptible: Whether waits should be performed interruptible.
+ * @bo_free: The buffer object destructor.
+ * Returns: Zero on success, negative error code on error.
+ *
+ * Note that on error, the code will free the buffer object.
+ */
+int vmw_bo_init(struct vmw_private *dev_priv,
+		struct vmw_buffer_object *vmw_bo,
+		size_t size, struct ttm_placement *placement,
+		bool interruptible,
+		void (*bo_free)(struct ttm_buffer_object *bo))
+{
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	size_t acc_size;
+	int ret;
+	bool user = (bo_free == &vmw_user_bo_destroy);
+
+	WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
+
+	acc_size = vmw_bo_acc_size(dev_priv, size, user);
+	memset(vmw_bo, 0, sizeof(*vmw_bo));
+	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
+	vmw_bo->base.priority = 3;
+
+	INIT_LIST_HEAD(&vmw_bo->res_list);
+
+	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+			  ttm_bo_type_device, placement,
+			  0, interruptible, acc_size,
+			  NULL, NULL, bo_free);
+	return ret;
+}
+
+
+/**
+ * vmw_user_bo_release - TTM reference base object release callback for
+ * vmw user buffer objects
+ *
+ * @p_base: The TTM base object pointer about to be unreferenced.
+ *
+ * Clears the TTM base object pointer and drops the reference the
+ * base object has on the underlying struct vmw_buffer_object.
+ */
+static void vmw_user_bo_release(struct ttm_base_object **p_base)
+{
+	struct vmw_user_buffer_object *vmw_user_bo;
+	struct ttm_base_object *base = *p_base;
+
+	*p_base = NULL;
+
+	if (unlikely(base == NULL))
+		return;
+
+	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+				   prime.base);
+	ttm_bo_put(&vmw_user_bo->vbo.base);
+}
+
+
+/**
+ * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
+ * for vmw user buffer objects
+ *
+ * @base: Pointer to the TTM base object
+ * @ref_type: Reference type of the reference reaching zero.
+ *
+ * Called when user-space drops its last synccpu reference on the buffer
+ * object, Either explicitly or as part of a cleanup file close.
+ */
+static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
+					enum ttm_ref_type ref_type)
+{
+	struct vmw_user_buffer_object *user_bo;
+
+	user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
+
+	switch (ref_type) {
+	case TTM_REF_SYNCCPU_WRITE:
+		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+		break;
+	default:
+		WARN_ONCE(true, "Undefined buffer object reference release.\n");
+	}
+}
+
+
+/**
+ * vmw_user_bo_alloc - Allocate a user buffer object
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the buffer object.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
+ * should be assigned.
+ * Return: Zero on success, negative error code on error.
+ */
+int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+		      struct ttm_object_file *tfile,
+		      uint32_t size,
+		      bool shareable,
+		      uint32_t *handle,
+		      struct vmw_buffer_object **p_vbo,
+		      struct ttm_base_object **p_base)
+{
+	struct vmw_user_buffer_object *user_bo;
+	int ret;
+
+	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+	if (unlikely(!user_bo)) {
+		DRM_ERROR("Failed to allocate a buffer.\n");
+		return -ENOMEM;
+	}
+
+	ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
+			  (dev_priv->has_mob) ?
+			  &vmw_sys_placement :
+			  &vmw_vram_sys_placement, true,
+			  &vmw_user_bo_destroy);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ttm_bo_get(&user_bo->vbo.base);
+	ret = ttm_prime_object_init(tfile,
+				    size,
+				    &user_bo->prime,
+				    shareable,
+				    ttm_buffer_type,
+				    &vmw_user_bo_release,
+				    &vmw_user_bo_ref_obj_release);
+	if (unlikely(ret != 0)) {
+		ttm_bo_put(&user_bo->vbo.base);
+		goto out_no_base_object;
+	}
+
+	*p_vbo = &user_bo->vbo;
+	if (p_base) {
+		*p_base = &user_bo->prime.base;
+		kref_get(&(*p_base)->refcount);
+	}
+	*handle = user_bo->prime.base.handle;
+
+out_no_base_object:
+	return ret;
+}
+
+
+/**
+ * vmw_user_bo_verify_access - verify access permissions on this
+ * buffer object.
+ *
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
+ */
+int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+			      struct ttm_object_file *tfile)
+{
+	struct vmw_user_buffer_object *vmw_user_bo;
+
+	if (unlikely(bo->destroy != vmw_user_bo_destroy))
+		return -EPERM;
+
+	vmw_user_bo = vmw_user_buffer_object(bo);
+
+	/* Check that the caller has opened the object. */
+	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
+		return 0;
+
+	DRM_ERROR("Could not grant buffer access.\n");
+	return -EPERM;
+}
+
+
+/**
+ * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
+ * access, idling previous GPU operations on the buffer and optionally
+ * blocking it for further command submissions.
+ *
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating how the grab should be performed.
+ * Return: Zero on success, Negative error code on error. In particular,
+ * -EBUSY will be returned if a dontblock operation is requested and the
+ * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
+ * interrupted by a signal.
+ *
+ * A blocking grab will be automatically released when @tfile is closed.
+ */
+static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
+				    struct ttm_object_file *tfile,
+				    uint32_t flags)
+{
+	struct ttm_buffer_object *bo = &user_bo->vbo.base;
+	bool existed;
+	int ret;
+
+	if (flags & drm_vmw_synccpu_allow_cs) {
+		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
+		long lret;
+
+		lret = dma_resv_wait_timeout_rcu
+			(bo->base.resv, true, true,
+			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+		if (!lret)
+			return -EBUSY;
+		else if (lret < 0)
+			return lret;
+		return 0;
+	}
+
+	ret = ttm_bo_synccpu_write_grab
+		(bo, !!(flags & drm_vmw_synccpu_dontblock));
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+				 TTM_REF_SYNCCPU_WRITE, &existed, false);
+	if (ret != 0 || existed)
+		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+
+	return ret;
+}
+
+/**
+ * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
+ * and unblock command submission on the buffer if blocked.
+ *
+ * @handle: Handle identifying the buffer object.
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating the type of release.
+ */
+static int vmw_user_bo_synccpu_release(uint32_t handle,
+					   struct ttm_object_file *tfile,
+					   uint32_t flags)
+{
+	if (!(flags & drm_vmw_synccpu_allow_cs))
+		return ttm_ref_object_base_unref(tfile, handle,
+						 TTM_REF_SYNCCPU_WRITE);
+
+	return 0;
+}
+
+
+/**
+ * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
+ * functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and calls the
+ * relevant synccpu functions.
+ */
+int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_vmw_synccpu_arg *arg =
+		(struct drm_vmw_synccpu_arg *) data;
+	struct vmw_buffer_object *vbo;
+	struct vmw_user_buffer_object *user_bo;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct ttm_base_object *buffer_base;
+	int ret;
+
+	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+			       drm_vmw_synccpu_dontblock |
+			       drm_vmw_synccpu_allow_cs)) != 0) {
+		DRM_ERROR("Illegal synccpu flags.\n");
+		return -EINVAL;
+	}
+
+	switch (arg->op) {
+	case drm_vmw_synccpu_grab:
+		ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
+					     &buffer_base);
+		if (unlikely(ret != 0))
+			return ret;
+
+		user_bo = container_of(vbo, struct vmw_user_buffer_object,
+				       vbo);
+		ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
+		vmw_bo_unreference(&vbo);
+		ttm_base_object_unref(&buffer_base);
+		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+			     ret != -EBUSY)) {
+			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+				  (unsigned int) arg->handle);
+			return ret;
+		}
+		break;
+	case drm_vmw_synccpu_release:
+		ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
+						  arg->flags);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+				  (unsigned int) arg->handle);
+			return ret;
+		}
+		break;
+	default:
+		DRM_ERROR("Invalid synccpu operation.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+/**
+ * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
+ * allocation functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and allocates a
+ * struct vmw_user_buffer_object bo.
+ */
+int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	union drm_vmw_alloc_dmabuf_arg *arg =
+	    (union drm_vmw_alloc_dmabuf_arg *)data;
+	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+	struct vmw_buffer_object *vbo;
+	uint32_t handle;
+	int ret;
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+				req->size, false, &handle, &vbo,
+				NULL);
+	if (unlikely(ret != 0))
+		goto out_no_bo;
+
+	rep->handle = handle;
+	rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
+	rep->cur_gmr_id = handle;
+	rep->cur_gmr_offset = 0;
+
+	vmw_bo_unreference(&vbo);
+
+out_no_bo:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+
+	return ret;
+}
+
+
+/**
+ * vmw_bo_unref_ioctl - Generic handle close ioctl.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and closes a
+ * handle to a TTM base object, optionally freeing the object.
+ */
+int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_vmw_unref_dmabuf_arg *arg =
+	    (struct drm_vmw_unref_dmabuf_arg *)data;
+
+	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+					 arg->handle,
+					 TTM_REF_USAGE);
+}
+
+
+/**
+ * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
+ *
+ * @tfile: The TTM object file the handle is registered with.
+ * @handle: The user buffer object handle
+ * @out: Pointer to a where a pointer to the embedded
+ * struct vmw_buffer_object should be placed.
+ * @p_base: Pointer to where a pointer to the TTM base object should be
+ * placed, or NULL if no such pointer is required.
+ * Return: Zero on success, Negative error code on error.
+ *
+ * Both the output base object pointer and the vmw buffer object pointer
+ * will be refcounted.
+ */
+int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+		       uint32_t handle, struct vmw_buffer_object **out,
+		       struct ttm_base_object **p_base)
+{
+	struct vmw_user_buffer_object *vmw_user_bo;
+	struct ttm_base_object *base;
+
+	base = ttm_base_object_lookup(tfile, handle);
+	if (unlikely(base == NULL)) {
+		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+			  (unsigned long)handle);
+		return -ESRCH;
+	}
+
+	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
+		ttm_base_object_unref(&base);
+		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+			  (unsigned long)handle);
+		return -EINVAL;
+	}
+
+	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+				   prime.base);
+	ttm_bo_get(&vmw_user_bo->vbo.base);
+	if (p_base)
+		*p_base = base;
+	else
+		ttm_base_object_unref(&base);
+	*out = &vmw_user_bo->vbo;
+
+	return 0;
+}
+
+/**
+ * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
+ * @tfile: The TTM object file the handle is registered with.
+ * @handle: The user buffer object handle.
+ *
+ * This function looks up a struct vmw_user_bo and returns a pointer to the
+ * struct vmw_buffer_object it derives from without refcounting the pointer.
+ * The returned pointer is only valid until vmw_user_bo_noref_release() is
+ * called, and the object pointed to by the returned pointer may be doomed.
+ * Any persistent usage of the object requires a refcount to be taken using
+ * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
+ * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
+ * or scheduling functions may be called inbetween these function calls.
+ *
+ * Return: A struct vmw_buffer_object pointer if successful or negative
+ * error pointer on failure.
+ */
+struct vmw_buffer_object *
+vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+	struct vmw_user_buffer_object *vmw_user_bo;
+	struct ttm_base_object *base;
+
+	base = ttm_base_object_noref_lookup(tfile, handle);
+	if (!base) {
+		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+			  (unsigned long)handle);
+		return ERR_PTR(-ESRCH);
+	}
+
+	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
+		ttm_base_object_noref_release();
+		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+			  (unsigned long)handle);
+		return ERR_PTR(-EINVAL);
+	}
+
+	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+				   prime.base);
+	return &vmw_user_bo->vbo;
+}
+
+/**
+ * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
+ *
+ * @tfile: The TTM object file to register the handle with.
+ * @vbo: The embedded vmw buffer object.
+ * @handle: Pointer to where the new handle should be placed.
+ * Return: Zero on success, Negative error code on error.
+ */
+int vmw_user_bo_reference(struct ttm_object_file *tfile,
+			  struct vmw_buffer_object *vbo,
+			  uint32_t *handle)
+{
+	struct vmw_user_buffer_object *user_bo;
+
+	if (vbo->base.destroy != vmw_user_bo_destroy)
+		return -EINVAL;
+
+	user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
+
+	*handle = user_bo->prime.base.handle;
+	return ttm_ref_object_add(tfile, &user_bo->prime.base,
+				  TTM_REF_USAGE, NULL, false);
+}
+
+
+/**
+ * vmw_bo_fence_single - Utility function to fence a single TTM buffer
+ *                       object without unreserving it.
+ *
+ * @bo:             Pointer to the struct ttm_buffer_object to fence.
+ * @fence:          Pointer to the fence. If NULL, this function will
+ *                  insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+			 struct vmw_fence_obj *fence)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+
+	struct vmw_private *dev_priv =
+		container_of(bdev, struct vmw_private, bdev);
+
+	if (fence == NULL) {
+		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
+		dma_fence_put(&fence->base);
+	} else
+		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
+}
+
+
+/**
+ * vmw_dumb_create - Create a dumb kms buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @args: Pointer to a struct drm_mode_create_dumb structure
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm create_dumb functionality.
+ * Note that this is very similar to the vmw_bo_alloc ioctl, except
+ * that the arguments have a different format.
+ */
+int vmw_dumb_create(struct drm_file *file_priv,
+		    struct drm_device *dev,
+		    struct drm_mode_create_dumb *args)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_buffer_object *vbo;
+	int ret;
+
+	args->pitch = args->width * ((args->bpp + 7) / 8);
+	args->size = args->pitch * args->height;
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+				    args->size, false, &args->handle,
+				    &vbo, NULL);
+	if (unlikely(ret != 0))
+		goto out_no_bo;
+
+	vmw_bo_unreference(&vbo);
+out_no_bo:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
+
+
+/**
+ * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * @offset: The address space offset returned.
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm dumb_map_offset functionality.
+ */
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+			struct drm_device *dev, uint32_t handle,
+			uint64_t *offset)
+{
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_buffer_object *out_buf;
+	int ret;
+
+	ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
+	if (ret != 0)
+		return -EINVAL;
+
+	*offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
+	vmw_bo_unreference(&out_buf);
+	return 0;
+}
+
+
+/**
+ * vmw_dumb_destroy - Destroy a dumb boffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm dumb_destroy functionality.
+ */
+int vmw_dumb_destroy(struct drm_file *file_priv,
+		     struct drm_device *dev,
+		     uint32_t handle)
+{
+	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+					 handle, TTM_REF_USAGE);
+}
+
+
+/**
+ * vmw_bo_swap_notify - swapout notify callback.
+ *
+ * @bo: The buffer object to be swapped out.
+ */
+void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
+{
+	/* Is @bo embedded in a struct vmw_buffer_object? */
+	if (bo->destroy != vmw_bo_bo_free &&
+	    bo->destroy != vmw_user_bo_destroy)
+		return;
+
+	/* Kill any cached kernel maps before swapout */
+	vmw_bo_unmap(vmw_buffer_object(bo));
+}
+
+
+/**
+ * vmw_bo_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ *       region the move is taking place.
+ *
+ * Detaches cached maps and device bindings that require that the
+ * buffer doesn't move.
+ */
+void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+			struct ttm_mem_reg *mem)
+{
+	struct vmw_buffer_object *vbo;
+
+	if (mem == NULL)
+		return;
+
+	/* Make sure @bo is embedded in a struct vmw_buffer_object? */
+	if (bo->destroy != vmw_bo_bo_free &&
+	    bo->destroy != vmw_user_bo_destroy)
+		return;
+
+	vbo = container_of(bo, struct vmw_buffer_object, base);
+
+	/*
+	 * Kill any cached kernel maps before move to or from VRAM.
+	 * With other types of moves, the underlying pages stay the same,
+	 * and the map can be kept.
+	 */
+	if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
+		vmw_bo_unmap(vbo);
+
+	/*
+	 * If we're moving a backup MOB out of MOB placement, then make sure we
+	 * read back all resource content first, and unbind the MOB from
+	 * the resource.
+	 */
+	if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
+		vmw_resource_unbind_list(vbo);
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
new file mode 100644
index 0000000..065015d
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -0,0 +1,1418 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <linux/dmapool.h>
+#include <linux/pci.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+
+#include "vmwgfx_drv.h"
+
+/*
+ * Size of inline command buffers. Try to make sure that a page size is a
+ * multiple of the DMA pool allocation size.
+ */
+#define VMW_CMDBUF_INLINE_ALIGN 64
+#define VMW_CMDBUF_INLINE_SIZE \
+	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
+
+/**
+ * struct vmw_cmdbuf_context - Command buffer context queues
+ *
+ * @submitted: List of command buffers that have been submitted to the
+ * manager but not yet submitted to hardware.
+ * @hw_submitted: List of command buffers submitted to hardware.
+ * @preempted: List of preempted command buffers.
+ * @num_hw_submitted: Number of buffers currently being processed by hardware
+ */
+struct vmw_cmdbuf_context {
+	struct list_head submitted;
+	struct list_head hw_submitted;
+	struct list_head preempted;
+	unsigned num_hw_submitted;
+	bool block_submission;
+};
+
+/**
+ * struct vmw_cmdbuf_man: - Command buffer manager
+ *
+ * @cur_mutex: Mutex protecting the command buffer used for incremental small
+ * kernel command submissions, @cur.
+ * @space_mutex: Mutex to protect against starvation when we allocate
+ * main pool buffer space.
+ * @error_mutex: Mutex to serialize the work queue error handling.
+ * Note this is not needed if the same workqueue handler
+ * can't race with itself...
+ * @work: A struct work_struct implementeing command buffer error handling.
+ * Immutable.
+ * @dev_priv: Pointer to the device private struct. Immutable.
+ * @ctx: Array of command buffer context queues. The queues and the context
+ * data is protected by @lock.
+ * @error: List of command buffers that have caused device errors.
+ * Protected by @lock.
+ * @mm: Range manager for the command buffer space. Manager allocations and
+ * frees are protected by @lock.
+ * @cmd_space: Buffer object for the command buffer space, unless we were
+ * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
+ * @map_obj: Mapping state for @cmd_space. Immutable.
+ * @map: Pointer to command buffer space. May be a mapped buffer object or
+ * a contigous coherent DMA memory allocation. Immutable.
+ * @cur: Command buffer for small kernel command submissions. Protected by
+ * the @cur_mutex.
+ * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
+ * @default_size: Default size for the @cur command buffer. Immutable.
+ * @max_hw_submitted: Max number of in-flight command buffers the device can
+ * handle. Immutable.
+ * @lock: Spinlock protecting command submission queues.
+ * @header: Pool of DMA memory for device command buffer headers.
+ * Internal protection.
+ * @dheaders: Pool of DMA memory for device command buffer headers with trailing
+ * space for inline data. Internal protection.
+ * @alloc_queue: Wait queue for processes waiting to allocate command buffer
+ * space.
+ * @idle_queue: Wait queue for processes waiting for command buffer idle.
+ * @irq_on: Whether the process function has requested irq to be turned on.
+ * Protected by @lock.
+ * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
+ * allocation. Immutable.
+ * @has_pool: Has a large pool of DMA memory which allows larger allocations.
+ * Typically this is false only during bootstrap.
+ * @handle: DMA address handle for the command buffer space if @using_mob is
+ * false. Immutable.
+ * @size: The size of the command buffer space. Immutable.
+ * @num_contexts: Number of contexts actually enabled.
+ */
+struct vmw_cmdbuf_man {
+	struct mutex cur_mutex;
+	struct mutex space_mutex;
+	struct mutex error_mutex;
+	struct work_struct work;
+	struct vmw_private *dev_priv;
+	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
+	struct list_head error;
+	struct drm_mm mm;
+	struct ttm_buffer_object *cmd_space;
+	struct ttm_bo_kmap_obj map_obj;
+	u8 *map;
+	struct vmw_cmdbuf_header *cur;
+	size_t cur_pos;
+	size_t default_size;
+	unsigned max_hw_submitted;
+	spinlock_t lock;
+	struct dma_pool *headers;
+	struct dma_pool *dheaders;
+	wait_queue_head_t alloc_queue;
+	wait_queue_head_t idle_queue;
+	bool irq_on;
+	bool using_mob;
+	bool has_pool;
+	dma_addr_t handle;
+	size_t size;
+	u32 num_contexts;
+};
+
+/**
+ * struct vmw_cmdbuf_header - Command buffer metadata
+ *
+ * @man: The command buffer manager.
+ * @cb_header: Device command buffer header, allocated from a DMA pool.
+ * @cb_context: The device command buffer context.
+ * @list: List head for attaching to the manager lists.
+ * @node: The range manager node.
+ * @handle. The DMA address of @cb_header. Handed to the device on command
+ * buffer submission.
+ * @cmd: Pointer to the command buffer space of this buffer.
+ * @size: Size of the command buffer space of this buffer.
+ * @reserved: Reserved space of this buffer.
+ * @inline_space: Whether inline command buffer space is used.
+ */
+struct vmw_cmdbuf_header {
+	struct vmw_cmdbuf_man *man;
+	SVGACBHeader *cb_header;
+	SVGACBContext cb_context;
+	struct list_head list;
+	struct drm_mm_node node;
+	dma_addr_t handle;
+	u8 *cmd;
+	size_t size;
+	size_t reserved;
+	bool inline_space;
+};
+
+/**
+ * struct vmw_cmdbuf_dheader - Device command buffer header with inline
+ * command buffer space.
+ *
+ * @cb_header: Device command buffer header.
+ * @cmd: Inline command buffer space.
+ */
+struct vmw_cmdbuf_dheader {
+	SVGACBHeader cb_header;
+	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
+};
+
+/**
+ * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
+ *
+ * @page_size: Size of requested command buffer space in pages.
+ * @node: Pointer to the range manager node.
+ * @done: True if this allocation has succeeded.
+ */
+struct vmw_cmdbuf_alloc_info {
+	size_t page_size;
+	struct drm_mm_node *node;
+	bool done;
+};
+
+/* Loop over each context in the command buffer manager. */
+#define for_each_cmdbuf_ctx(_man, _i, _ctx)				\
+	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
+	     ++(_i), ++(_ctx))
+
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
+				bool enable);
+static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
+
+/**
+ * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
+ *
+ * @man: The range manager.
+ * @interruptible: Whether to wait interruptible when locking.
+ */
+static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
+{
+	if (interruptible) {
+		if (mutex_lock_interruptible(&man->cur_mutex))
+			return -ERESTARTSYS;
+	} else {
+		mutex_lock(&man->cur_mutex);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
+ *
+ * @man: The range manager.
+ */
+static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
+{
+	mutex_unlock(&man->cur_mutex);
+}
+
+/**
+ * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
+ * been used for the device context with inline command buffers.
+ * Need not be called locked.
+ *
+ * @header: Pointer to the header to free.
+ */
+static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_dheader *dheader;
+
+	if (WARN_ON_ONCE(!header->inline_space))
+		return;
+
+	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
+			       cb_header);
+	dma_pool_free(header->man->dheaders, dheader, header->handle);
+	kfree(header);
+}
+
+/**
+ * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
+ * associated structures.
+ *
+ * header: Pointer to the header to free.
+ *
+ * For internal use. Must be called with man::lock held.
+ */
+static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_man *man = header->man;
+
+	lockdep_assert_held_once(&man->lock);
+
+	if (header->inline_space) {
+		vmw_cmdbuf_header_inline_free(header);
+		return;
+	}
+
+	drm_mm_remove_node(&header->node);
+	wake_up_all(&man->alloc_queue);
+	if (header->cb_header)
+		dma_pool_free(man->headers, header->cb_header,
+			      header->handle);
+	kfree(header);
+}
+
+/**
+ * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
+ * associated structures.
+ *
+ * @header: Pointer to the header to free.
+ */
+void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_man *man = header->man;
+
+	/* Avoid locking if inline_space */
+	if (header->inline_space) {
+		vmw_cmdbuf_header_inline_free(header);
+		return;
+	}
+	spin_lock(&man->lock);
+	__vmw_cmdbuf_header_free(header);
+	spin_unlock(&man->lock);
+}
+
+
+/**
+ * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
+ *
+ * @header: The header of the buffer to submit.
+ */
+static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_man *man = header->man;
+	u32 val;
+
+	val = upper_32_bits(header->handle);
+	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
+
+	val = lower_32_bits(header->handle);
+	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
+	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
+
+	return header->cb_header->status;
+}
+
+/**
+ * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
+ *
+ * @ctx: The command buffer context to initialize
+ */
+static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
+{
+	INIT_LIST_HEAD(&ctx->hw_submitted);
+	INIT_LIST_HEAD(&ctx->submitted);
+	INIT_LIST_HEAD(&ctx->preempted);
+	ctx->num_hw_submitted = 0;
+}
+
+/**
+ * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
+ * context.
+ *
+ * @man: The command buffer manager.
+ * @ctx: The command buffer context.
+ *
+ * Submits command buffers to hardware until there are no more command
+ * buffers to submit or the hardware can't handle more command buffers.
+ */
+static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
+				  struct vmw_cmdbuf_context *ctx)
+{
+	while (ctx->num_hw_submitted < man->max_hw_submitted &&
+	       !list_empty(&ctx->submitted) &&
+	       !ctx->block_submission) {
+		struct vmw_cmdbuf_header *entry;
+		SVGACBStatus status;
+
+		entry = list_first_entry(&ctx->submitted,
+					 struct vmw_cmdbuf_header,
+					 list);
+
+		status = vmw_cmdbuf_header_submit(entry);
+
+		/* This should never happen */
+		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
+			entry->cb_header->status = SVGA_CB_STATUS_NONE;
+			break;
+		}
+
+		list_del(&entry->list);
+		list_add_tail(&entry->list, &ctx->hw_submitted);
+		ctx->num_hw_submitted++;
+	}
+
+}
+
+/**
+ * vmw_cmdbuf_ctx_submit: Process a command buffer context.
+ *
+ * @man: The command buffer manager.
+ * @ctx: The command buffer context.
+ *
+ * Submit command buffers to hardware if possible, and process finished
+ * buffers. Typically freeing them, but on preemption or error take
+ * appropriate action. Wake up waiters if appropriate.
+ */
+static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
+				   struct vmw_cmdbuf_context *ctx,
+				   int *notempty)
+{
+	struct vmw_cmdbuf_header *entry, *next;
+
+	vmw_cmdbuf_ctx_submit(man, ctx);
+
+	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
+		SVGACBStatus status = entry->cb_header->status;
+
+		if (status == SVGA_CB_STATUS_NONE)
+			break;
+
+		list_del(&entry->list);
+		wake_up_all(&man->idle_queue);
+		ctx->num_hw_submitted--;
+		switch (status) {
+		case SVGA_CB_STATUS_COMPLETED:
+			__vmw_cmdbuf_header_free(entry);
+			break;
+		case SVGA_CB_STATUS_COMMAND_ERROR:
+			WARN_ONCE(true, "Command buffer error.\n");
+			entry->cb_header->status = SVGA_CB_STATUS_NONE;
+			list_add_tail(&entry->list, &man->error);
+			schedule_work(&man->work);
+			break;
+		case SVGA_CB_STATUS_PREEMPTED:
+			entry->cb_header->status = SVGA_CB_STATUS_NONE;
+			list_add_tail(&entry->list, &ctx->preempted);
+			break;
+		case SVGA_CB_STATUS_CB_HEADER_ERROR:
+			WARN_ONCE(true, "Command buffer header error.\n");
+			__vmw_cmdbuf_header_free(entry);
+			break;
+		default:
+			WARN_ONCE(true, "Undefined command buffer status.\n");
+			__vmw_cmdbuf_header_free(entry);
+			break;
+		}
+	}
+
+	vmw_cmdbuf_ctx_submit(man, ctx);
+	if (!list_empty(&ctx->submitted))
+		(*notempty)++;
+}
+
+/**
+ * vmw_cmdbuf_man_process - Process all command buffer contexts and
+ * switch on and off irqs as appropriate.
+ *
+ * @man: The command buffer manager.
+ *
+ * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
+ * command buffers left that are not submitted to hardware, Make sure
+ * IRQ handling is turned on. Otherwise, make sure it's turned off.
+ */
+static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
+{
+	int notempty;
+	struct vmw_cmdbuf_context *ctx;
+	int i;
+
+retry:
+	notempty = 0;
+	for_each_cmdbuf_ctx(man, i, ctx)
+		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
+
+	if (man->irq_on && !notempty) {
+		vmw_generic_waiter_remove(man->dev_priv,
+					  SVGA_IRQFLAG_COMMAND_BUFFER,
+					  &man->dev_priv->cmdbuf_waiters);
+		man->irq_on = false;
+	} else if (!man->irq_on && notempty) {
+		vmw_generic_waiter_add(man->dev_priv,
+				       SVGA_IRQFLAG_COMMAND_BUFFER,
+				       &man->dev_priv->cmdbuf_waiters);
+		man->irq_on = true;
+
+		/* Rerun in case we just missed an irq. */
+		goto retry;
+	}
+}
+
+/**
+ * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
+ * command buffer context
+ *
+ * @man: The command buffer manager.
+ * @header: The header of the buffer to submit.
+ * @cb_context: The command buffer context to use.
+ *
+ * This function adds @header to the "submitted" queue of the command
+ * buffer context identified by @cb_context. It then calls the command buffer
+ * manager processing to potentially submit the buffer to hardware.
+ * @man->lock needs to be held when calling this function.
+ */
+static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
+			       struct vmw_cmdbuf_header *header,
+			       SVGACBContext cb_context)
+{
+	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
+		header->cb_header->dxContext = 0;
+	header->cb_context = cb_context;
+	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
+
+	vmw_cmdbuf_man_process(man);
+}
+
+/**
+ * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
+ * handler implemented as a threaded irq task.
+ *
+ * @man: Pointer to the command buffer manager.
+ *
+ * The bottom half of the interrupt handler simply calls into the
+ * command buffer processor to free finished buffers and submit any
+ * queued buffers to hardware.
+ */
+void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
+{
+	spin_lock(&man->lock);
+	vmw_cmdbuf_man_process(man);
+	spin_unlock(&man->lock);
+}
+
+/**
+ * vmw_cmdbuf_work_func - The deferred work function that handles
+ * command buffer errors.
+ *
+ * @work: The work func closure argument.
+ *
+ * Restarting the command buffer context after an error requires process
+ * context, so it is deferred to this work function.
+ */
+static void vmw_cmdbuf_work_func(struct work_struct *work)
+{
+	struct vmw_cmdbuf_man *man =
+		container_of(work, struct vmw_cmdbuf_man, work);
+	struct vmw_cmdbuf_header *entry, *next;
+	uint32_t dummy;
+	bool send_fence = false;
+	struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
+	int i;
+	struct vmw_cmdbuf_context *ctx;
+	bool global_block = false;
+
+	for_each_cmdbuf_ctx(man, i, ctx)
+		INIT_LIST_HEAD(&restart_head[i]);
+
+	mutex_lock(&man->error_mutex);
+	spin_lock(&man->lock);
+	list_for_each_entry_safe(entry, next, &man->error, list) {
+		SVGACBHeader *cb_hdr = entry->cb_header;
+		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
+			(entry->cmd + cb_hdr->errorOffset);
+		u32 error_cmd_size, new_start_offset;
+		const char *cmd_name;
+
+		list_del_init(&entry->list);
+		global_block = true;
+
+		if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
+			VMW_DEBUG_USER("Unknown command causing device error.\n");
+			VMW_DEBUG_USER("Command buffer offset is %lu\n",
+				       (unsigned long) cb_hdr->errorOffset);
+			__vmw_cmdbuf_header_free(entry);
+			send_fence = true;
+			continue;
+		}
+
+		VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
+			       cmd_name);
+		VMW_DEBUG_USER("Command buffer offset is %lu\n",
+			       (unsigned long) cb_hdr->errorOffset);
+		VMW_DEBUG_USER("Command size is %lu\n",
+			       (unsigned long) error_cmd_size);
+
+		new_start_offset = cb_hdr->errorOffset + error_cmd_size;
+
+		if (new_start_offset >= cb_hdr->length) {
+			__vmw_cmdbuf_header_free(entry);
+			send_fence = true;
+			continue;
+		}
+
+		if (man->using_mob)
+			cb_hdr->ptr.mob.mobOffset += new_start_offset;
+		else
+			cb_hdr->ptr.pa += (u64) new_start_offset;
+
+		entry->cmd += new_start_offset;
+		cb_hdr->length -= new_start_offset;
+		cb_hdr->errorOffset = 0;
+		cb_hdr->offset = 0;
+
+		list_add_tail(&entry->list, &restart_head[entry->cb_context]);
+	}
+
+	for_each_cmdbuf_ctx(man, i, ctx)
+		man->ctx[i].block_submission = true;
+
+	spin_unlock(&man->lock);
+
+	/* Preempt all contexts */
+	if (global_block && vmw_cmdbuf_preempt(man, 0))
+		DRM_ERROR("Failed preempting command buffer contexts\n");
+
+	spin_lock(&man->lock);
+	for_each_cmdbuf_ctx(man, i, ctx) {
+		/* Move preempted command buffers to the preempted queue. */
+		vmw_cmdbuf_ctx_process(man, ctx, &dummy);
+
+		/*
+		 * Add the preempted queue after the command buffer
+		 * that caused an error.
+		 */
+		list_splice_init(&ctx->preempted, restart_head[i].prev);
+
+		/*
+		 * Finally add all command buffers first in the submitted
+		 * queue, to rerun them.
+		 */
+
+		ctx->block_submission = false;
+		list_splice_init(&restart_head[i], &ctx->submitted);
+	}
+
+	vmw_cmdbuf_man_process(man);
+	spin_unlock(&man->lock);
+
+	if (global_block && vmw_cmdbuf_startstop(man, 0, true))
+		DRM_ERROR("Failed restarting command buffer contexts\n");
+
+	/* Send a new fence in case one was removed */
+	if (send_fence) {
+		vmw_fifo_send_fence(man->dev_priv, &dummy);
+		wake_up_all(&man->idle_queue);
+	}
+
+	mutex_unlock(&man->error_mutex);
+}
+
+/**
+ * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
+ *
+ * @man: The command buffer manager.
+ * @check_preempted: Check also the preempted queue for pending command buffers.
+ *
+ */
+static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
+				bool check_preempted)
+{
+	struct vmw_cmdbuf_context *ctx;
+	bool idle = false;
+	int i;
+
+	spin_lock(&man->lock);
+	vmw_cmdbuf_man_process(man);
+	for_each_cmdbuf_ctx(man, i, ctx) {
+		if (!list_empty(&ctx->submitted) ||
+		    !list_empty(&ctx->hw_submitted) ||
+		    (check_preempted && !list_empty(&ctx->preempted)))
+			goto out_unlock;
+	}
+
+	idle = list_empty(&man->error);
+
+out_unlock:
+	spin_unlock(&man->lock);
+
+	return idle;
+}
+
+/**
+ * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
+ * command submissions
+ *
+ * @man: The command buffer manager.
+ *
+ * Flushes the current command buffer without allocating a new one. A new one
+ * is automatically allocated when needed. Call with @man->cur_mutex held.
+ */
+static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
+{
+	struct vmw_cmdbuf_header *cur = man->cur;
+
+	lockdep_assert_held_once(&man->cur_mutex);
+
+	if (!cur)
+		return;
+
+	spin_lock(&man->lock);
+	if (man->cur_pos == 0) {
+		__vmw_cmdbuf_header_free(cur);
+		goto out_unlock;
+	}
+
+	man->cur->cb_header->length = man->cur_pos;
+	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
+out_unlock:
+	spin_unlock(&man->lock);
+	man->cur = NULL;
+	man->cur_pos = 0;
+}
+
+/**
+ * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
+ * command submissions
+ *
+ * @man: The command buffer manager.
+ * @interruptible: Whether to sleep interruptible when sleeping.
+ *
+ * Flushes the current command buffer without allocating a new one. A new one
+ * is automatically allocated when needed.
+ */
+int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
+			 bool interruptible)
+{
+	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
+
+	if (ret)
+		return ret;
+
+	__vmw_cmdbuf_cur_flush(man);
+	vmw_cmdbuf_cur_unlock(man);
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_idle - Wait for command buffer manager idle.
+ *
+ * @man: The command buffer manager.
+ * @interruptible: Sleep interruptible while waiting.
+ * @timeout: Time out after this many ticks.
+ *
+ * Wait until the command buffer manager has processed all command buffers,
+ * or until a timeout occurs. If a timeout occurs, the function will return
+ * -EBUSY.
+ */
+int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
+		    unsigned long timeout)
+{
+	int ret;
+
+	ret = vmw_cmdbuf_cur_flush(man, interruptible);
+	vmw_generic_waiter_add(man->dev_priv,
+			       SVGA_IRQFLAG_COMMAND_BUFFER,
+			       &man->dev_priv->cmdbuf_waiters);
+
+	if (interruptible) {
+		ret = wait_event_interruptible_timeout
+			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
+			 timeout);
+	} else {
+		ret = wait_event_timeout
+			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
+			 timeout);
+	}
+	vmw_generic_waiter_remove(man->dev_priv,
+				  SVGA_IRQFLAG_COMMAND_BUFFER,
+				  &man->dev_priv->cmdbuf_waiters);
+	if (ret == 0) {
+		if (!vmw_cmdbuf_man_idle(man, true))
+			ret = -EBUSY;
+		else
+			ret = 0;
+	}
+	if (ret > 0)
+		ret = 0;
+
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @info: Allocation info. Will hold the size on entry and allocated mm node
+ * on successful return.
+ *
+ * Try to allocate buffer space from the main pool. Returns true if succeeded.
+ * If a fatal error was hit, the error code is returned in @info->ret.
+ */
+static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
+				 struct vmw_cmdbuf_alloc_info *info)
+{
+	int ret;
+
+	if (info->done)
+		return true;
+
+	memset(info->node, 0, sizeof(*info->node));
+	spin_lock(&man->lock);
+	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
+	if (ret) {
+		vmw_cmdbuf_man_process(man);
+		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
+	}
+
+	spin_unlock(&man->lock);
+	info->done = !ret;
+
+	return info->done;
+}
+
+/**
+ * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @node: Pointer to pre-allocated range-manager node.
+ * @size: The size of the allocation.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ *
+ * This function allocates buffer space from the main pool, and if there is
+ * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
+ * become available.
+ */
+static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
+				  struct drm_mm_node *node,
+				  size_t size,
+				  bool interruptible)
+{
+	struct vmw_cmdbuf_alloc_info info;
+
+	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	info.node = node;
+	info.done = false;
+
+	/*
+	 * To prevent starvation of large requests, only one allocating call
+	 * at a time waiting for space.
+	 */
+	if (interruptible) {
+		if (mutex_lock_interruptible(&man->space_mutex))
+			return -ERESTARTSYS;
+	} else {
+		mutex_lock(&man->space_mutex);
+	}
+
+	/* Try to allocate space without waiting. */
+	if (vmw_cmdbuf_try_alloc(man, &info))
+		goto out_unlock;
+
+	vmw_generic_waiter_add(man->dev_priv,
+			       SVGA_IRQFLAG_COMMAND_BUFFER,
+			       &man->dev_priv->cmdbuf_waiters);
+
+	if (interruptible) {
+		int ret;
+
+		ret = wait_event_interruptible
+			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
+		if (ret) {
+			vmw_generic_waiter_remove
+				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
+				 &man->dev_priv->cmdbuf_waiters);
+			mutex_unlock(&man->space_mutex);
+			return ret;
+		}
+	} else {
+		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
+	}
+	vmw_generic_waiter_remove(man->dev_priv,
+				  SVGA_IRQFLAG_COMMAND_BUFFER,
+				  &man->dev_priv->cmdbuf_waiters);
+
+out_unlock:
+	mutex_unlock(&man->space_mutex);
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
+ * space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @header: Pointer to the header to set up.
+ * @size: The requested size of the buffer space.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ */
+static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
+				 struct vmw_cmdbuf_header *header,
+				 size_t size,
+				 bool interruptible)
+{
+	SVGACBHeader *cb_hdr;
+	size_t offset;
+	int ret;
+
+	if (!man->has_pool)
+		return -ENOMEM;
+
+	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
+
+	if (ret)
+		return ret;
+
+	header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
+					    &header->handle);
+	if (!header->cb_header) {
+		ret = -ENOMEM;
+		goto out_no_cb_header;
+	}
+
+	header->size = header->node.size << PAGE_SHIFT;
+	cb_hdr = header->cb_header;
+	offset = header->node.start << PAGE_SHIFT;
+	header->cmd = man->map + offset;
+	if (man->using_mob) {
+		cb_hdr->flags = SVGA_CB_FLAG_MOB;
+		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
+		cb_hdr->ptr.mob.mobOffset = offset;
+	} else {
+		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
+	}
+
+	return 0;
+
+out_no_cb_header:
+	spin_lock(&man->lock);
+	drm_mm_remove_node(&header->node);
+	spin_unlock(&man->lock);
+
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_space_inline - Set up a command buffer header with
+ * inline command buffer space.
+ *
+ * @man: The command buffer manager.
+ * @header: Pointer to the header to set up.
+ * @size: The requested size of the buffer space.
+ */
+static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
+				   struct vmw_cmdbuf_header *header,
+				   int size)
+{
+	struct vmw_cmdbuf_dheader *dheader;
+	SVGACBHeader *cb_hdr;
+
+	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
+		return -ENOMEM;
+
+	dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
+				  &header->handle);
+	if (!dheader)
+		return -ENOMEM;
+
+	header->inline_space = true;
+	header->size = VMW_CMDBUF_INLINE_SIZE;
+	cb_hdr = &dheader->cb_header;
+	header->cb_header = cb_hdr;
+	header->cmd = dheader->cmd;
+	cb_hdr->status = SVGA_CB_STATUS_NONE;
+	cb_hdr->flags = SVGA_CB_FLAG_NONE;
+	cb_hdr->ptr.pa = (u64)header->handle +
+		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
+ * command buffer space.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the buffer space.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ * @p_header: points to a header pointer to populate on successful return.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer. The header pointer returned in @p_header should
+ * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
+ */
+void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
+		       size_t size, bool interruptible,
+		       struct vmw_cmdbuf_header **p_header)
+{
+	struct vmw_cmdbuf_header *header;
+	int ret = 0;
+
+	*p_header = NULL;
+
+	header = kzalloc(sizeof(*header), GFP_KERNEL);
+	if (!header)
+		return ERR_PTR(-ENOMEM);
+
+	if (size <= VMW_CMDBUF_INLINE_SIZE)
+		ret = vmw_cmdbuf_space_inline(man, header, size);
+	else
+		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
+
+	if (ret) {
+		kfree(header);
+		return ERR_PTR(ret);
+	}
+
+	header->man = man;
+	INIT_LIST_HEAD(&header->list);
+	header->cb_header->status = SVGA_CB_STATUS_NONE;
+	*p_header = header;
+
+	return header->cmd;
+}
+
+/**
+ * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
+ * command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the commands.
+ * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer.
+ */
+static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
+				    size_t size,
+				    int ctx_id,
+				    bool interruptible)
+{
+	struct vmw_cmdbuf_header *cur;
+	void *ret;
+
+	if (vmw_cmdbuf_cur_lock(man, interruptible))
+		return ERR_PTR(-ERESTARTSYS);
+
+	cur = man->cur;
+	if (cur && (size + man->cur_pos > cur->size ||
+		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
+		     ctx_id != cur->cb_header->dxContext)))
+		__vmw_cmdbuf_cur_flush(man);
+
+	if (!man->cur) {
+		ret = vmw_cmdbuf_alloc(man,
+				       max_t(size_t, size, man->default_size),
+				       interruptible, &man->cur);
+		if (IS_ERR(ret)) {
+			vmw_cmdbuf_cur_unlock(man);
+			return ret;
+		}
+
+		cur = man->cur;
+	}
+
+	if (ctx_id != SVGA3D_INVALID_ID) {
+		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
+		cur->cb_header->dxContext = ctx_id;
+	}
+
+	cur->reserved = size;
+
+	return (void *) (man->cur->cmd + man->cur_pos);
+}
+
+/**
+ * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the commands actually written.
+ * @flush: Whether to flush the command buffer immediately.
+ */
+static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
+				  size_t size, bool flush)
+{
+	struct vmw_cmdbuf_header *cur = man->cur;
+
+	lockdep_assert_held_once(&man->cur_mutex);
+
+	WARN_ON(size > cur->reserved);
+	man->cur_pos += size;
+	if (!size)
+		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
+	if (flush)
+		__vmw_cmdbuf_cur_flush(man);
+	vmw_cmdbuf_cur_unlock(man);
+}
+
+/**
+ * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the commands.
+ * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ * @header: Header of the command buffer. NULL if the current command buffer
+ * should be used.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer.
+ */
+void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
+			 int ctx_id, bool interruptible,
+			 struct vmw_cmdbuf_header *header)
+{
+	if (!header)
+		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
+
+	if (size > header->size)
+		return ERR_PTR(-EINVAL);
+
+	if (ctx_id != SVGA3D_INVALID_ID) {
+		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
+		header->cb_header->dxContext = ctx_id;
+	}
+
+	header->reserved = size;
+	return header->cmd;
+}
+
+/**
+ * vmw_cmdbuf_commit - Commit commands in a command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the commands actually written.
+ * @header: Header of the command buffer. NULL if the current command buffer
+ * should be used.
+ * @flush: Whether to flush the command buffer immediately.
+ */
+void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
+		       struct vmw_cmdbuf_header *header, bool flush)
+{
+	if (!header) {
+		vmw_cmdbuf_commit_cur(man, size, flush);
+		return;
+	}
+
+	(void) vmw_cmdbuf_cur_lock(man, false);
+	__vmw_cmdbuf_cur_flush(man);
+	WARN_ON(size > header->reserved);
+	man->cur = header;
+	man->cur_pos = size;
+	if (!size)
+		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
+	if (flush)
+		__vmw_cmdbuf_cur_flush(man);
+	vmw_cmdbuf_cur_unlock(man);
+}
+
+
+/**
+ * vmw_cmdbuf_send_device_command - Send a command through the device context.
+ *
+ * @man: The command buffer manager.
+ * @command: Pointer to the command to send.
+ * @size: Size of the command.
+ *
+ * Synchronously sends a device context command.
+ */
+static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
+					  const void *command,
+					  size_t size)
+{
+	struct vmw_cmdbuf_header *header;
+	int status;
+	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
+
+	if (IS_ERR(cmd))
+		return PTR_ERR(cmd);
+
+	memcpy(cmd, command, size);
+	header->cb_header->length = size;
+	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
+	spin_lock(&man->lock);
+	status = vmw_cmdbuf_header_submit(header);
+	spin_unlock(&man->lock);
+	vmw_cmdbuf_header_free(header);
+
+	if (status != SVGA_CB_STATUS_COMPLETED) {
+		DRM_ERROR("Device context command failed with status %d\n",
+			  status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_preempt - Send a preempt command through the device
+ * context.
+ *
+ * @man: The command buffer manager.
+ *
+ * Synchronously sends a preempt command.
+ */
+static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
+{
+	struct {
+		uint32 id;
+		SVGADCCmdPreempt body;
+	} __packed cmd;
+
+	cmd.id = SVGA_DC_CMD_PREEMPT;
+	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
+	cmd.body.ignoreIDZero = 0;
+
+	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
+}
+
+
+/**
+ * vmw_cmdbuf_startstop - Send a start / stop command through the device
+ * context.
+ *
+ * @man: The command buffer manager.
+ * @enable: Whether to enable or disable the context.
+ *
+ * Synchronously sends a device start / stop context command.
+ */
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
+				bool enable)
+{
+	struct {
+		uint32 id;
+		SVGADCCmdStartStop body;
+	} __packed cmd;
+
+	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
+	cmd.body.enable = (enable) ? 1 : 0;
+	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
+
+	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
+}
+
+/**
+ * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the main space pool.
+ * @default_size: The default size of the command buffer for small kernel
+ * submissions.
+ *
+ * Set the size and allocate the main command buffer space pool,
+ * as well as the default size of the command buffer for
+ * small kernel submissions. If successful, this enables large command
+ * submissions. Note that this function requires that rudimentary command
+ * submission is already available and that the MOB memory manager is alive.
+ * Returns 0 on success. Negative error code on failure.
+ */
+int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
+			     size_t size, size_t default_size)
+{
+	struct vmw_private *dev_priv = man->dev_priv;
+	bool dummy;
+	int ret;
+
+	if (man->has_pool)
+		return -EINVAL;
+
+	/* First, try to allocate a huge chunk of DMA memory */
+	size = PAGE_ALIGN(size);
+	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
+				      &man->handle, GFP_KERNEL);
+	if (man->map) {
+		man->using_mob = false;
+	} else {
+		/*
+		 * DMA memory failed. If we can have command buffers in a
+		 * MOB, try to use that instead. Note that this will
+		 * actually call into the already enabled manager, when
+		 * binding the MOB.
+		 */
+		if (!(dev_priv->capabilities & SVGA_CAP_DX))
+			return -ENOMEM;
+
+		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
+				    &vmw_mob_ne_placement, 0, false,
+				    &man->cmd_space);
+		if (ret)
+			return ret;
+
+		man->using_mob = true;
+		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
+				  &man->map_obj);
+		if (ret)
+			goto out_no_map;
+
+		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
+	}
+
+	man->size = size;
+	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
+
+	man->has_pool = true;
+
+	/*
+	 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
+	 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
+	 * needs to wait for space and we block on further command
+	 * submissions to be able to free up space.
+	 */
+	man->default_size = VMW_CMDBUF_INLINE_SIZE;
+	DRM_INFO("Using command buffers with %s pool.\n",
+		 (man->using_mob) ? "MOB" : "DMA");
+
+	return 0;
+
+out_no_map:
+	if (man->using_mob) {
+		ttm_bo_put(man->cmd_space);
+		man->cmd_space = NULL;
+	}
+
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
+ * inline command buffer submissions only.
+ *
+ * @dev_priv: Pointer to device private structure.
+ *
+ * Returns a pointer to a cummand buffer manager to success or error pointer
+ * on failure. The command buffer manager will be enabled for submissions of
+ * size VMW_CMDBUF_INLINE_SIZE only.
+ */
+struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
+{
+	struct vmw_cmdbuf_man *man;
+	struct vmw_cmdbuf_context *ctx;
+	unsigned int i;
+	int ret;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
+		return ERR_PTR(-ENOSYS);
+
+	man = kzalloc(sizeof(*man), GFP_KERNEL);
+	if (!man)
+		return ERR_PTR(-ENOMEM);
+
+	man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
+		2 : 1;
+	man->headers = dma_pool_create("vmwgfx cmdbuf",
+				       &dev_priv->dev->pdev->dev,
+				       sizeof(SVGACBHeader),
+				       64, PAGE_SIZE);
+	if (!man->headers) {
+		ret = -ENOMEM;
+		goto out_no_pool;
+	}
+
+	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
+					&dev_priv->dev->pdev->dev,
+					sizeof(struct vmw_cmdbuf_dheader),
+					64, PAGE_SIZE);
+	if (!man->dheaders) {
+		ret = -ENOMEM;
+		goto out_no_dpool;
+	}
+
+	for_each_cmdbuf_ctx(man, i, ctx)
+		vmw_cmdbuf_ctx_init(ctx);
+
+	INIT_LIST_HEAD(&man->error);
+	spin_lock_init(&man->lock);
+	mutex_init(&man->cur_mutex);
+	mutex_init(&man->space_mutex);
+	mutex_init(&man->error_mutex);
+	man->default_size = VMW_CMDBUF_INLINE_SIZE;
+	init_waitqueue_head(&man->alloc_queue);
+	init_waitqueue_head(&man->idle_queue);
+	man->dev_priv = dev_priv;
+	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
+	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
+	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
+			       &dev_priv->error_waiters);
+	ret = vmw_cmdbuf_startstop(man, 0, true);
+	if (ret) {
+		DRM_ERROR("Failed starting command buffer contexts\n");
+		vmw_cmdbuf_man_destroy(man);
+		return ERR_PTR(ret);
+	}
+
+	return man;
+
+out_no_dpool:
+	dma_pool_destroy(man->headers);
+out_no_pool:
+	kfree(man);
+
+	return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
+ *
+ * @man: Pointer to a command buffer manager.
+ *
+ * This function removes the main buffer space pool, and should be called
+ * before MOB memory management is removed. When this function has been called,
+ * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
+ * less are allowed, and the default size of the command buffer for small kernel
+ * submissions is also set to this size.
+ */
+void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
+{
+	if (!man->has_pool)
+		return;
+
+	man->has_pool = false;
+	man->default_size = VMW_CMDBUF_INLINE_SIZE;
+	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
+	if (man->using_mob) {
+		(void) ttm_bo_kunmap(&man->map_obj);
+		ttm_bo_put(man->cmd_space);
+		man->cmd_space = NULL;
+	} else {
+		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
+				  man->size, man->map, man->handle);
+	}
+}
+
+/**
+ * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
+ *
+ * @man: Pointer to a command buffer manager.
+ *
+ * This function idles and then destroys a command buffer manager.
+ */
+void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
+{
+	WARN_ON_ONCE(man->has_pool);
+	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
+
+	if (vmw_cmdbuf_startstop(man, 0, false))
+		DRM_ERROR("Failed stopping command buffer contexts.\n");
+
+	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
+				  &man->dev_priv->error_waiters);
+	(void) cancel_work_sync(&man->work);
+	dma_pool_destroy(man->dheaders);
+	dma_pool_destroy(man->headers);
+	mutex_destroy(&man->cur_mutex);
+	mutex_destroy(&man->space_mutex);
+	mutex_destroy(&man->error_mutex);
+	kfree(man);
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
new file mode 100644
index 0000000..44d858c
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+
+#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
+
+/**
+ * struct vmw_cmdbuf_res - Command buffer managed resource entry.
+ *
+ * @res: Refcounted pointer to a struct vmw_resource.
+ * @hash: Hash entry for the manager hash table.
+ * @head: List head used either by the staging list or the manager list
+ * of commited resources.
+ * @state: Staging state of this resource entry.
+ * @man: Pointer to a resource manager for this entry.
+ */
+struct vmw_cmdbuf_res {
+	struct vmw_resource *res;
+	struct drm_hash_item hash;
+	struct list_head head;
+	enum vmw_cmdbuf_res_state state;
+	struct vmw_cmdbuf_res_manager *man;
+};
+
+/**
+ * struct vmw_cmdbuf_res_manager - Command buffer resource manager.
+ *
+ * @resources: Hash table containing staged and commited command buffer
+ * resources
+ * @list: List of commited command buffer resources.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @resources and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_cmdbuf_res_manager {
+	struct drm_open_hash resources;
+	struct list_head list;
+	struct vmw_private *dev_priv;
+};
+
+
+/**
+ * vmw_cmdbuf_res_lookup - Look up a command buffer resource
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @resource_type: The resource type, that combined with the user key
+ * identifies the resource.
+ * @user_key: The user key.
+ *
+ * Returns a valid refcounted struct vmw_resource pointer on success,
+ * an error pointer on failure.
+ */
+struct vmw_resource *
+vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
+		      enum vmw_cmdbuf_res_type res_type,
+		      u32 user_key)
+{
+	struct drm_hash_item *hash;
+	int ret;
+	unsigned long key = user_key | (res_type << 24);
+
+	ret = drm_ht_find_item(&man->resources, key, &hash);
+	if (unlikely(ret != 0))
+		return ERR_PTR(ret);
+
+	return drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res;
+}
+
+/**
+ * vmw_cmdbuf_res_free - Free a command buffer resource.
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @entry: Pointer to a struct vmw_cmdbuf_res.
+ *
+ * Frees a struct vmw_cmdbuf_res entry and drops its reference to the
+ * struct vmw_resource.
+ */
+static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
+				struct vmw_cmdbuf_res *entry)
+{
+	list_del(&entry->head);
+	WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash));
+	vmw_resource_unreference(&entry->res);
+	kfree(entry);
+}
+
+/**
+ * vmw_cmdbuf_res_commit - Commit a list of command buffer resource actions
+ *
+ * @list: Caller's list of command buffer resource actions.
+ *
+ * This function commits a list of command buffer resource
+ * additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_cmdbuf_res_commit(struct list_head *list)
+{
+	struct vmw_cmdbuf_res *entry, *next;
+
+	list_for_each_entry_safe(entry, next, list, head) {
+		list_del(&entry->head);
+		if (entry->res->func->commit_notify)
+			entry->res->func->commit_notify(entry->res,
+							entry->state);
+		switch (entry->state) {
+		case VMW_CMDBUF_RES_ADD:
+			entry->state = VMW_CMDBUF_RES_COMMITTED;
+			list_add_tail(&entry->head, &entry->man->list);
+			break;
+		case VMW_CMDBUF_RES_DEL:
+			vmw_resource_unreference(&entry->res);
+			kfree(entry);
+			break;
+		default:
+			BUG();
+			break;
+		}
+	}
+}
+
+/**
+ * vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @list: Caller's list of command buffer resource action
+ *
+ * This function reverts a list of command buffer resource
+ * additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_cmdbuf_res_revert(struct list_head *list)
+{
+	struct vmw_cmdbuf_res *entry, *next;
+	int ret;
+
+	list_for_each_entry_safe(entry, next, list, head) {
+		switch (entry->state) {
+		case VMW_CMDBUF_RES_ADD:
+			vmw_cmdbuf_res_free(entry->man, entry);
+			break;
+		case VMW_CMDBUF_RES_DEL:
+			ret = drm_ht_insert_item(&entry->man->resources,
+						 &entry->hash);
+			list_del(&entry->head);
+			list_add_tail(&entry->head, &entry->man->list);
+			entry->state = VMW_CMDBUF_RES_COMMITTED;
+			break;
+		default:
+			BUG();
+			break;
+		}
+	}
+}
+
+/**
+ * vmw_cmdbuf_res_add - Stage a command buffer managed resource for addition.
+ *
+ * @man: Pointer to the command buffer resource manager.
+ * @res_type: The resource type.
+ * @user_key: The user-space id of the resource.
+ * @res: Valid (refcount != 0) pointer to a struct vmw_resource.
+ * @list: The staging list.
+ *
+ * This function allocates a struct vmw_cmdbuf_res entry and adds the
+ * resource to the hash table of the manager identified by @man. The
+ * entry is then put on the staging list identified by @list.
+ */
+int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
+		       enum vmw_cmdbuf_res_type res_type,
+		       u32 user_key,
+		       struct vmw_resource *res,
+		       struct list_head *list)
+{
+	struct vmw_cmdbuf_res *cres;
+	int ret;
+
+	cres = kzalloc(sizeof(*cres), GFP_KERNEL);
+	if (unlikely(!cres))
+		return -ENOMEM;
+
+	cres->hash.key = user_key | (res_type << 24);
+	ret = drm_ht_insert_item(&man->resources, &cres->hash);
+	if (unlikely(ret != 0)) {
+		kfree(cres);
+		goto out_invalid_key;
+	}
+
+	cres->state = VMW_CMDBUF_RES_ADD;
+	cres->res = vmw_resource_reference(res);
+	cres->man = man;
+	list_add_tail(&cres->head, list);
+
+out_invalid_key:
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_res_remove - Stage a command buffer managed resource for removal.
+ *
+ * @man: Pointer to the command buffer resource manager.
+ * @res_type: The resource type.
+ * @user_key: The user-space id of the resource.
+ * @list: The staging list.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
+ *
+ * This function looks up the struct vmw_cmdbuf_res entry from the manager
+ * hash table and, if it exists, removes it. Depending on its current staging
+ * state it then either removes the entry from the staging list or adds it
+ * to it with a staging state of removal.
+ */
+int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
+			  enum vmw_cmdbuf_res_type res_type,
+			  u32 user_key,
+			  struct list_head *list,
+			  struct vmw_resource **res_p)
+{
+	struct vmw_cmdbuf_res *entry;
+	struct drm_hash_item *hash;
+	int ret;
+
+	ret = drm_ht_find_item(&man->resources, user_key | (res_type << 24),
+			       &hash);
+	if (likely(ret != 0))
+		return -EINVAL;
+
+	entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash);
+
+	switch (entry->state) {
+	case VMW_CMDBUF_RES_ADD:
+		vmw_cmdbuf_res_free(man, entry);
+		*res_p = NULL;
+		break;
+	case VMW_CMDBUF_RES_COMMITTED:
+		(void) drm_ht_remove_item(&man->resources, &entry->hash);
+		list_del(&entry->head);
+		entry->state = VMW_CMDBUF_RES_DEL;
+		list_add_tail(&entry->head, list);
+		*res_p = entry->res;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_res_man_create - Allocate a command buffer managed resource
+ * manager.
+ *
+ * @dev_priv: Pointer to a struct vmw_private
+ *
+ * Allocates and initializes a command buffer managed resource manager. Returns
+ * an error pointer on failure.
+ */
+struct vmw_cmdbuf_res_manager *
+vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
+{
+	struct vmw_cmdbuf_res_manager *man;
+	int ret;
+
+	man = kzalloc(sizeof(*man), GFP_KERNEL);
+	if (!man)
+		return ERR_PTR(-ENOMEM);
+
+	man->dev_priv = dev_priv;
+	INIT_LIST_HEAD(&man->list);
+	ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
+	if (ret == 0)
+		return man;
+
+	kfree(man);
+	return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cmdbuf_res_man_destroy - Destroy a command buffer managed resource
+ * manager.
+ *
+ * @man: Pointer to the  manager to destroy.
+ *
+ * This function destroys a command buffer managed resource manager and
+ * unreferences / frees all command buffer managed resources and -entries
+ * associated with it.
+ */
+void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
+{
+	struct vmw_cmdbuf_res *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &man->list, head)
+		vmw_cmdbuf_res_free(man, entry);
+
+	drm_ht_remove(&man->resources);
+	kfree(man);
+}
+
+/**
+ *
+ * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
+ * resource manager
+ *
+ * Returns the approximate allocation size of a command buffer managed
+ * resource manager.
+ */
+size_t vmw_cmdbuf_res_man_size(void)
+{
+	static size_t res_man_size;
+
+	if (unlikely(res_man_size == 0))
+		res_man_size =
+			ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
+			ttm_round_pot(sizeof(struct hlist_head) <<
+				      VMW_CMDBUF_RES_MAN_HT_ORDER);
+
+	return res_man_size;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
new file mode 100644
index 0000000..a56c9d8
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -0,0 +1,916 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
+
+struct vmw_user_context {
+	struct ttm_base_object base;
+	struct vmw_resource res;
+	struct vmw_ctx_binding_state *cbs;
+	struct vmw_cmdbuf_res_manager *man;
+	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
+	spinlock_t cotable_lock;
+	struct vmw_buffer_object *dx_query_mob;
+};
+
+static void vmw_user_context_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base);
+
+static int vmw_gb_context_create(struct vmw_resource *res);
+static int vmw_gb_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_gb_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_gb_context_destroy(struct vmw_resource *res);
+static int vmw_dx_context_create(struct vmw_resource *res);
+static int vmw_dx_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_destroy(struct vmw_resource *res);
+
+static uint64_t vmw_user_context_size;
+
+static const struct vmw_user_resource_conv user_context_conv = {
+	.object_type = VMW_RES_CONTEXT,
+	.base_obj_to_res = vmw_user_context_base_to_res,
+	.res_free = vmw_user_context_free
+};
+
+const struct vmw_user_resource_conv *user_context_converter =
+	&user_context_conv;
+
+
+static const struct vmw_res_func vmw_legacy_context_func = {
+	.res_type = vmw_res_context,
+	.needs_backup = false,
+	.may_evict = false,
+	.type_name = "legacy contexts",
+	.backup_placement = NULL,
+	.create = NULL,
+	.destroy = NULL,
+	.bind = NULL,
+	.unbind = NULL
+};
+
+static const struct vmw_res_func vmw_gb_context_func = {
+	.res_type = vmw_res_context,
+	.needs_backup = true,
+	.may_evict = true,
+	.prio = 3,
+	.dirty_prio = 3,
+	.type_name = "guest backed contexts",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_gb_context_create,
+	.destroy = vmw_gb_context_destroy,
+	.bind = vmw_gb_context_bind,
+	.unbind = vmw_gb_context_unbind
+};
+
+static const struct vmw_res_func vmw_dx_context_func = {
+	.res_type = vmw_res_dx_context,
+	.needs_backup = true,
+	.may_evict = true,
+	.prio = 3,
+	.dirty_prio = 3,
+	.type_name = "dx contexts",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_dx_context_create,
+	.destroy = vmw_dx_context_destroy,
+	.bind = vmw_dx_context_bind,
+	.unbind = vmw_dx_context_unbind
+};
+
+/**
+ * Context management:
+ */
+
+static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
+{
+	struct vmw_resource *res;
+	int i;
+
+	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+		spin_lock(&uctx->cotable_lock);
+		res = uctx->cotables[i];
+		uctx->cotables[i] = NULL;
+		spin_unlock(&uctx->cotable_lock);
+
+		if (res)
+			vmw_resource_unreference(&res);
+	}
+}
+
+static void vmw_hw_context_destroy(struct vmw_resource *res)
+{
+	struct vmw_user_context *uctx =
+		container_of(res, struct vmw_user_context, res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyContext body;
+	} *cmd;
+
+
+	if (res->func->destroy == vmw_gb_context_destroy ||
+	    res->func->destroy == vmw_dx_context_destroy) {
+		mutex_lock(&dev_priv->cmdbuf_mutex);
+		vmw_cmdbuf_res_man_destroy(uctx->man);
+		mutex_lock(&dev_priv->binding_mutex);
+		vmw_binding_state_kill(uctx->cbs);
+		(void) res->func->destroy(res);
+		mutex_unlock(&dev_priv->binding_mutex);
+		if (dev_priv->pinned_bo != NULL &&
+		    !dev_priv->query_cid_valid)
+			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+		mutex_unlock(&dev_priv->cmdbuf_mutex);
+		vmw_context_cotables_unref(uctx);
+		return;
+	}
+
+	vmw_execbuf_release_pinned_bo(dev_priv);
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return;
+
+	cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_fifo_resource_dec(dev_priv);
+}
+
+static int vmw_gb_context_init(struct vmw_private *dev_priv,
+			       bool dx,
+			       struct vmw_resource *res,
+			       void (*res_free)(struct vmw_resource *res))
+{
+	int ret, i;
+	struct vmw_user_context *uctx =
+		container_of(res, struct vmw_user_context, res);
+
+	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
+			    SVGA3D_CONTEXT_DATA_SIZE);
+	ret = vmw_resource_init(dev_priv, res, true,
+				res_free,
+				dx ? &vmw_dx_context_func :
+				&vmw_gb_context_func);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	if (dev_priv->has_mob) {
+		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
+		if (IS_ERR(uctx->man)) {
+			ret = PTR_ERR(uctx->man);
+			uctx->man = NULL;
+			goto out_err;
+		}
+	}
+
+	uctx->cbs = vmw_binding_state_alloc(dev_priv);
+	if (IS_ERR(uctx->cbs)) {
+		ret = PTR_ERR(uctx->cbs);
+		goto out_err;
+	}
+
+	spin_lock_init(&uctx->cotable_lock);
+
+	if (dx) {
+		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
+							      &uctx->res, i);
+			if (IS_ERR(uctx->cotables[i])) {
+				ret = PTR_ERR(uctx->cotables[i]);
+				goto out_cotables;
+			}
+		}
+	}
+
+	res->hw_destroy = vmw_hw_context_destroy;
+	return 0;
+
+out_cotables:
+	vmw_context_cotables_unref(uctx);
+out_err:
+	if (res_free)
+		res_free(res);
+	else
+		kfree(res);
+	return ret;
+}
+
+static int vmw_context_init(struct vmw_private *dev_priv,
+			    struct vmw_resource *res,
+			    void (*res_free)(struct vmw_resource *res),
+			    bool dx)
+{
+	int ret;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineContext body;
+	} *cmd;
+
+	if (dev_priv->has_mob)
+		return vmw_gb_context_init(dev_priv, dx, res, res_free);
+
+	ret = vmw_resource_init(dev_priv, res, false,
+				res_free, &vmw_legacy_context_func);
+
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a resource id.\n");
+		goto out_early;
+	}
+
+	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
+		DRM_ERROR("Out of hw context ids.\n");
+		vmw_resource_unreference(&res);
+		return -ENOMEM;
+	}
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		vmw_resource_unreference(&res);
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_fifo_resource_inc(dev_priv);
+	res->hw_destroy = vmw_hw_context_destroy;
+	return 0;
+
+out_early:
+	if (res_free == NULL)
+		kfree(res);
+	else
+		res_free(res);
+	return ret;
+}
+
+
+/*
+ * GB context.
+ */
+
+static int vmw_gb_context_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBContext body;
+	} *cmd;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a context id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_fifo_resource_inc(dev_priv);
+
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	return ret;
+}
+
+static int vmw_gb_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBContext body;
+	} *cmd;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.validContents = res->backup_dirty;
+	res->backup_dirty = false;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+static int vmw_gb_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	struct vmw_fence_obj *fence;
+	struct vmw_user_context *uctx =
+		container_of(res, struct vmw_user_context, res);
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdReadbackGBContext body;
+	} *cmd1;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBContext body;
+	} *cmd2;
+	uint32_t submit_size;
+	uint8_t *cmd;
+
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_binding_state_scrub(uctx->cbs);
+
+	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+
+	cmd2 = (void *) cmd;
+	if (readback) {
+		cmd1 = (void *) cmd;
+		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
+		cmd1->header.size = sizeof(cmd1->body);
+		cmd1->body.cid = res->id;
+		cmd2 = (void *) (&cmd1[1]);
+	}
+	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
+	cmd2->header.size = sizeof(cmd2->body);
+	cmd2->body.cid = res->id;
+	cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+	vmw_fifo_commit(dev_priv, submit_size);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_bo_fence_single(bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+static int vmw_gb_context_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyGBContext body;
+	} *cmd;
+
+	if (likely(res->id == -1))
+		return 0;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	if (dev_priv->query_cid == res->id)
+		dev_priv->query_cid_valid = false;
+	vmw_resource_release_id(res);
+	vmw_fifo_resource_dec(dev_priv);
+
+	return 0;
+}
+
+/*
+ * DX context.
+ */
+
+static int vmw_dx_context_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDefineContext body;
+	} *cmd;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a context id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_fifo_resource_inc(dev_priv);
+
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	return ret;
+}
+
+static int vmw_dx_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindContext body;
+	} *cmd;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.validContents = res->backup_dirty;
+	res->backup_dirty = false;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+
+	return 0;
+}
+
+/**
+ * vmw_dx_context_scrub_cotables - Scrub all bindings and
+ * cotables from a context
+ *
+ * @ctx: Pointer to the context resource
+ * @readback: Whether to save the otable contents on scrubbing.
+ *
+ * COtables must be unbound before their context, but unbinding requires
+ * the backup buffer being reserved, whereas scrubbing does not.
+ * This function scrubs all cotables of a context, potentially reading back
+ * the contents into their backup buffers. However, scrubbing cotables
+ * also makes the device context invalid, so scrub all bindings first so
+ * that doesn't have to be done later with an invalid context.
+ */
+void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+				   bool readback)
+{
+	struct vmw_user_context *uctx =
+		container_of(ctx, struct vmw_user_context, res);
+	int i;
+
+	vmw_binding_state_scrub(uctx->cbs);
+	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+		struct vmw_resource *res;
+
+		/* Avoid racing with ongoing cotable destruction. */
+		spin_lock(&uctx->cotable_lock);
+		res = uctx->cotables[vmw_cotable_scrub_order[i]];
+		if (res)
+			res = vmw_resource_reference_unless_doomed(res);
+		spin_unlock(&uctx->cotable_lock);
+		if (!res)
+			continue;
+
+		WARN_ON(vmw_cotable_scrub(res, readback));
+		vmw_resource_unreference(&res);
+	}
+}
+
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	struct vmw_fence_obj *fence;
+	struct vmw_user_context *uctx =
+		container_of(res, struct vmw_user_context, res);
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackContext body;
+	} *cmd1;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindContext body;
+	} *cmd2;
+	uint32_t submit_size;
+	uint8_t *cmd;
+
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_dx_context_scrub_cotables(res, readback);
+
+	if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
+	    readback) {
+		WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
+		if (vmw_query_readback_all(uctx->dx_query_mob))
+			DRM_ERROR("Failed to read back query states\n");
+	}
+
+	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+
+	cmd2 = (void *) cmd;
+	if (readback) {
+		cmd1 = (void *) cmd;
+		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
+		cmd1->header.size = sizeof(cmd1->body);
+		cmd1->body.cid = res->id;
+		cmd2 = (void *) (&cmd1[1]);
+	}
+	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+	cmd2->header.size = sizeof(cmd2->body);
+	cmd2->body.cid = res->id;
+	cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+	vmw_fifo_commit(dev_priv, submit_size);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_bo_fence_single(bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+static int vmw_dx_context_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDestroyContext body;
+	} *cmd;
+
+	if (likely(res->id == -1))
+		return 0;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	if (dev_priv->query_cid == res->id)
+		dev_priv->query_cid_valid = false;
+	vmw_resource_release_id(res);
+	vmw_fifo_resource_dec(dev_priv);
+
+	return 0;
+}
+
+/**
+ * User-space context management:
+ */
+
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base)
+{
+	return &(container_of(base, struct vmw_user_context, base)->res);
+}
+
+static void vmw_user_context_free(struct vmw_resource *res)
+{
+	struct vmw_user_context *ctx =
+	    container_of(res, struct vmw_user_context, res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	if (ctx->cbs)
+		vmw_binding_state_free(ctx->cbs);
+
+	(void) vmw_context_bind_dx_query(res, NULL);
+
+	ttm_base_object_kfree(ctx, base);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv),
+			    vmw_user_context_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct vmw_user_context *ctx =
+	    container_of(base, struct vmw_user_context, base);
+	struct vmw_resource *res = &ctx->res;
+
+	*p_base = NULL;
+	vmw_resource_unreference(&res);
+}
+
+int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
+}
+
+static int vmw_context_define(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv, bool dx)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_context *ctx;
+	struct vmw_resource *res;
+	struct vmw_resource *tmp;
+	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct ttm_operation_ctx ttm_opt_ctx = {
+		.interruptible = true,
+		.no_wait_gpu = false
+	};
+	int ret;
+
+	if (!dev_priv->has_dx && dx) {
+		VMW_DEBUG_USER("DX contexts not supported by device.\n");
+		return -EINVAL;
+	}
+
+	if (unlikely(vmw_user_context_size == 0))
+		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
+		  ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
+		  + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   vmw_user_context_size,
+				   &ttm_opt_ctx);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for context"
+				  " creation.\n");
+		goto out_unlock;
+	}
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (unlikely(!ctx)) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    vmw_user_context_size);
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	res = &ctx->res;
+	ctx->base.shareable = false;
+	ctx->base.tfile = NULL;
+
+	/*
+	 * From here on, the destructor takes over resource freeing.
+	 */
+
+	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
+	if (unlikely(ret != 0))
+		goto out_unlock;
+
+	tmp = vmw_resource_reference(&ctx->res);
+	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
+				   &vmw_user_context_base_release, NULL);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&tmp);
+		goto out_err;
+	}
+
+	arg->cid = ctx->base.handle;
+out_err:
+	vmw_resource_unreference(&res);
+out_unlock:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
+
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	return vmw_context_define(dev, data, file_priv, false);
+}
+
+int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv)
+{
+	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
+	struct drm_vmw_context_arg *rep = &arg->rep;
+
+	switch (arg->req) {
+	case drm_vmw_context_legacy:
+		return vmw_context_define(dev, rep, file_priv, false);
+	case drm_vmw_context_dx:
+		return vmw_context_define(dev, rep, file_priv, true);
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+/**
+ * vmw_context_binding_list - Return a list of context bindings
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+{
+	struct vmw_user_context *uctx =
+		container_of(ctx, struct vmw_user_context, res);
+
+	return vmw_binding_state_list(uctx->cbs);
+}
+
+struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
+{
+	return container_of(ctx, struct vmw_user_context, res)->man;
+}
+
+struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+					 SVGACOTableType cotable_type)
+{
+	if (cotable_type >= SVGA_COTABLE_DX10_MAX)
+		return ERR_PTR(-EINVAL);
+
+	return container_of(ctx, struct vmw_user_context, res)->
+		cotables[cotable_type];
+}
+
+/**
+ * vmw_context_binding_state -
+ * Return a pointer to a context binding state structure
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current state of bindings of the given context. Note that
+ * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx)
+{
+	return container_of(ctx, struct vmw_user_context, res)->cbs;
+}
+
+/**
+ * vmw_context_bind_dx_query -
+ * Sets query MOB for the context.  If @mob is NULL, then this function will
+ * remove the association between the MOB and the context.  This function
+ * assumes the binding_mutex is held.
+ *
+ * @ctx_res: The context resource
+ * @mob: a reference to the query MOB
+ *
+ * Returns -EINVAL if a MOB has already been set and does not match the one
+ * specified in the parameter.  0 otherwise.
+ */
+int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+			      struct vmw_buffer_object *mob)
+{
+	struct vmw_user_context *uctx =
+		container_of(ctx_res, struct vmw_user_context, res);
+
+	if (mob == NULL) {
+		if (uctx->dx_query_mob) {
+			uctx->dx_query_mob->dx_query_ctx = NULL;
+			vmw_bo_unreference(&uctx->dx_query_mob);
+			uctx->dx_query_mob = NULL;
+		}
+
+		return 0;
+	}
+
+	/* Can only have one MOB per context for queries */
+	if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
+		return -EINVAL;
+
+	mob->dx_query_ctx  = ctx_res;
+
+	if (!uctx->dx_query_mob)
+		uctx->dx_query_mob = vmw_bo_reference(mob);
+
+	return 0;
+}
+
+/**
+ * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
+ *
+ * @ctx_res: The context resource
+ */
+struct vmw_buffer_object *
+vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
+{
+	struct vmw_user_context *uctx =
+		container_of(ctx_res, struct vmw_user_context, res);
+
+	return uctx->dx_query_mob;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
new file mode 100644
index 0000000..3ca5cf3
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Treat context OTables as resources to make use of the resource
+ * backing MOB eviction mechanism, that is used to read back the COTable
+ * whenever the backing MOB is evicted.
+ */
+
+#include <drm/ttm/ttm_placement.h>
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+
+/**
+ * struct vmw_cotable - Context Object Table resource
+ *
+ * @res: struct vmw_resource we are deriving from.
+ * @ctx: non-refcounted pointer to the owning context.
+ * @size_read_back: Size of data read back during eviction.
+ * @seen_entries: Seen entries in command stream for this cotable.
+ * @type: The cotable type.
+ * @scrubbed: Whether the cotable has been scrubbed.
+ * @resource_list: List of resources in the cotable.
+ */
+struct vmw_cotable {
+	struct vmw_resource res;
+	struct vmw_resource *ctx;
+	size_t size_read_back;
+	int seen_entries;
+	u32 type;
+	bool scrubbed;
+	struct list_head resource_list;
+};
+
+/**
+ * struct vmw_cotable_info - Static info about cotable types
+ *
+ * @min_initial_entries: Min number of initial intries at cotable allocation
+ * for this cotable type.
+ * @size: Size of each entry.
+ */
+struct vmw_cotable_info {
+	u32 min_initial_entries;
+	u32 size;
+	void (*unbind_func)(struct vmw_private *, struct list_head *,
+			    bool);
+};
+
+static const struct vmw_cotable_info co_info[] = {
+	{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
+	{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
+	{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
+	{1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
+	{1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
+	{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
+	{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
+	{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
+	{1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
+	{1, sizeof(SVGACOTableDXQueryEntry), NULL},
+	{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
+};
+
+/*
+ * Cotables with bindings that we remove must be scrubbed first,
+ * otherwise, the device will swap in an invalid context when we remove
+ * bindings before scrubbing a cotable...
+ */
+const SVGACOTableType vmw_cotable_scrub_order[] = {
+	SVGA_COTABLE_RTVIEW,
+	SVGA_COTABLE_DSVIEW,
+	SVGA_COTABLE_SRVIEW,
+	SVGA_COTABLE_DXSHADER,
+	SVGA_COTABLE_ELEMENTLAYOUT,
+	SVGA_COTABLE_BLENDSTATE,
+	SVGA_COTABLE_DEPTHSTENCIL,
+	SVGA_COTABLE_RASTERIZERSTATE,
+	SVGA_COTABLE_SAMPLER,
+	SVGA_COTABLE_STREAMOUTPUT,
+	SVGA_COTABLE_DXQUERY,
+};
+
+static int vmw_cotable_bind(struct vmw_resource *res,
+			    struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_unbind(struct vmw_resource *res,
+			      bool readback,
+			      struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_create(struct vmw_resource *res);
+static int vmw_cotable_destroy(struct vmw_resource *res);
+
+static const struct vmw_res_func vmw_cotable_func = {
+	.res_type = vmw_res_cotable,
+	.needs_backup = true,
+	.may_evict = true,
+	.prio = 3,
+	.dirty_prio = 3,
+	.type_name = "context guest backed object tables",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_cotable_create,
+	.destroy = vmw_cotable_destroy,
+	.bind = vmw_cotable_bind,
+	.unbind = vmw_cotable_unbind,
+};
+
+/**
+ * vmw_cotable - Convert a struct vmw_resource pointer to a struct
+ * vmw_cotable pointer
+ *
+ * @res: Pointer to the resource.
+ */
+static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_cotable, res);
+}
+
+/**
+ * vmw_cotable_destroy - Cotable resource destroy callback
+ *
+ * @res: Pointer to the cotable resource.
+ *
+ * There is no device cotable destroy command, so this function only
+ * makes sure that the resource id is set to invalid.
+ */
+static int vmw_cotable_destroy(struct vmw_resource *res)
+{
+	res->id = -1;
+	return 0;
+}
+
+/**
+ * vmw_cotable_unscrub - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ * This is identical to bind() except the function interface looks different.
+ */
+static int vmw_cotable_unscrub(struct vmw_resource *res)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = &res->backup->base;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetCOTable body;
+	} *cmd;
+
+	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+	dma_resv_assert_held(bo->base.resv);
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (!cmd)
+		return -ENOMEM;
+
+	WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
+	WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
+	cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = vcotbl->ctx->id;
+	cmd->body.type = vcotbl->type;
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.validSizeInBytes = vcotbl->size_read_back;
+
+	vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
+	vcotbl->scrubbed = false;
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_bind - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ */
+static int vmw_cotable_bind(struct vmw_resource *res,
+			    struct ttm_validate_buffer *val_buf)
+{
+	/*
+	 * The create() callback may have changed @res->backup without
+	 * the caller noticing, and with val_buf->bo still pointing to
+	 * the old backup buffer. Although hackish, and not used currently,
+	 * take the opportunity to correct the value here so that it's not
+	 * misused in the future.
+	 */
+	val_buf->bo = &res->backup->base;
+
+	return vmw_cotable_unscrub(res);
+}
+
+/**
+ * vmw_cotable_scrub - Scrub the cotable from the device.
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether initiate a readback of the cotable data to the backup
+ * buffer.
+ *
+ * In some situations (context swapouts) it might be desirable to make the
+ * device forget about the cotable without performing a full unbind. A full
+ * unbind requires reserved backup buffers and it might not be possible to
+ * reserve them due to locking order violation issues. The vmw_cotable_scrub
+ * function implements a partial unbind() without that requirement but with the
+ * following restrictions.
+ * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
+ *    be called.
+ * 2) Before the cotable backing buffer is used by the CPU, or during the
+ *    resource destruction, vmw_cotable_unbind() must be called.
+ */
+int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	size_t submit_size;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackCOTable body;
+	} *cmd0;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetCOTable body;
+	} *cmd1;
+
+	if (vcotbl->scrubbed)
+		return 0;
+
+	if (co_info[vcotbl->type].unbind_func)
+		co_info[vcotbl->type].unbind_func(dev_priv,
+						  &vcotbl->resource_list,
+						  readback);
+	submit_size = sizeof(*cmd1);
+	if (readback)
+		submit_size += sizeof(*cmd0);
+
+	cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	if (!cmd1)
+		return -ENOMEM;
+
+	vcotbl->size_read_back = 0;
+	if (readback) {
+		cmd0 = (void *) cmd1;
+		cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+		cmd0->header.size = sizeof(cmd0->body);
+		cmd0->body.cid = vcotbl->ctx->id;
+		cmd0->body.type = vcotbl->type;
+		cmd1 = (void *) &cmd0[1];
+		vcotbl->size_read_back = res->backup_size;
+	}
+	cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+	cmd1->header.size = sizeof(cmd1->body);
+	cmd1->body.cid = vcotbl->ctx->id;
+	cmd1->body.type = vcotbl->type;
+	cmd1->body.mobid = SVGA3D_INVALID_ID;
+	cmd1->body.validSizeInBytes = 0;
+	vmw_fifo_commit_flush(dev_priv, submit_size);
+	vcotbl->scrubbed = true;
+
+	/* Trigger a create() on next validate. */
+	res->id = -1;
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_unbind - Cotable resource unbind callback
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether to read back cotable data to the backup buffer.
+ * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * Unbinds the cotable from the device and fences the backup buffer.
+ */
+static int vmw_cotable_unbind(struct vmw_resource *res,
+			      bool readback,
+			      struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	struct vmw_fence_obj *fence;
+
+	if (!vmw_resource_mob_attached(res))
+		return 0;
+
+	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+	dma_resv_assert_held(bo->base.resv);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	if (!vcotbl->scrubbed)
+		vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
+	mutex_unlock(&dev_priv->binding_mutex);
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	vmw_bo_fence_single(bo, fence);
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_readback - Read back a cotable without unbinding.
+ *
+ * @res: The cotable resource.
+ *
+ * Reads back a cotable to its backing mob without scrubbing the MOB from
+ * the cotable. The MOB is fenced for subsequent CPU access.
+ */
+static int vmw_cotable_readback(struct vmw_resource *res)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackCOTable body;
+	} *cmd;
+	struct vmw_fence_obj *fence;
+
+	if (!vcotbl->scrubbed) {
+		cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+		if (!cmd)
+			return -ENOMEM;
+
+		cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+		cmd->header.size = sizeof(cmd->body);
+		cmd->body.cid = vcotbl->ctx->id;
+		cmd->body.type = vcotbl->type;
+		vcotbl->size_read_back = res->backup_size;
+		vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	}
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	vmw_bo_fence_single(&res->backup->base, fence);
+	vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_resize - Resize a cotable.
+ *
+ * @res: The cotable resource.
+ * @new_size: The new size.
+ *
+ * Resizes a cotable and binds the new backup buffer.
+ * On failure the cotable is left intact.
+ * Important! This function may not fail once the MOB switch has been
+ * committed to hardware. That would put the device context in an
+ * invalid state which we can't currently recover from.
+ */
+static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
+{
+	struct ttm_operation_ctx ctx = { false, false };
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_buffer_object *buf, *old_buf = res->backup;
+	struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
+	size_t old_size = res->backup_size;
+	size_t old_size_read_back = vcotbl->size_read_back;
+	size_t cur_size_read_back;
+	struct ttm_bo_kmap_obj old_map, new_map;
+	int ret;
+	size_t i;
+
+	ret = vmw_cotable_readback(res);
+	if (ret)
+		return ret;
+
+	cur_size_read_back = vcotbl->size_read_back;
+	vcotbl->size_read_back = old_size_read_back;
+
+	/*
+	 * While device is processing, Allocate and reserve a buffer object
+	 * for the new COTable. Initially pin the buffer object to make sure
+	 * we can use tryreserve without failure.
+	 */
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
+			  true, vmw_bo_bo_free);
+	if (ret) {
+		DRM_ERROR("Failed initializing new cotable MOB.\n");
+		return ret;
+	}
+
+	bo = &buf->base;
+	WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
+
+	ret = ttm_bo_wait(old_bo, false, false);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed waiting for cotable unbind.\n");
+		goto out_wait;
+	}
+
+	/*
+	 * Do a page by page copy of COTables. This eliminates slow vmap()s.
+	 * This should really be a TTM utility.
+	 */
+	for (i = 0; i < old_bo->num_pages; ++i) {
+		bool dummy;
+
+		ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed mapping old COTable on resize.\n");
+			goto out_wait;
+		}
+		ret = ttm_bo_kmap(bo, i, 1, &new_map);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed mapping new COTable on resize.\n");
+			goto out_map_new;
+		}
+		memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
+		       ttm_kmap_obj_virtual(&old_map, &dummy),
+		       PAGE_SIZE);
+		ttm_bo_kunmap(&new_map);
+		ttm_bo_kunmap(&old_map);
+	}
+
+	/* Unpin new buffer, and switch backup buffers. */
+	ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed validating new COTable backup buffer.\n");
+		goto out_wait;
+	}
+
+	vmw_resource_mob_detach(res);
+	res->backup = buf;
+	res->backup_size = new_size;
+	vcotbl->size_read_back = cur_size_read_back;
+
+	/*
+	 * Now tell the device to switch. If this fails, then we need to
+	 * revert the full resize.
+	 */
+	ret = vmw_cotable_unscrub(res);
+	if (ret) {
+		DRM_ERROR("Failed switching COTable backup buffer.\n");
+		res->backup = old_buf;
+		res->backup_size = old_size;
+		vcotbl->size_read_back = old_size_read_back;
+		vmw_resource_mob_attach(res);
+		goto out_wait;
+	}
+
+	vmw_resource_mob_attach(res);
+	/* Let go of the old mob. */
+	vmw_bo_unreference(&old_buf);
+	res->id = vcotbl->type;
+
+	return 0;
+
+out_map_new:
+	ttm_bo_kunmap(&old_map);
+out_wait:
+	ttm_bo_unreserve(bo);
+	vmw_bo_unreference(&buf);
+
+	return ret;
+}
+
+/**
+ * vmw_cotable_create - Cotable resource create callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * There is no separate create command for cotables, so this callback, which
+ * is called before bind() in the validation sequence is instead used for two
+ * things.
+ * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
+ *    buffer.
+ * 2) Resize the cotable if needed.
+ */
+static int vmw_cotable_create(struct vmw_resource *res)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	size_t new_size = res->backup_size;
+	size_t needed_size;
+	int ret;
+
+	/* Check whether we need to resize the cotable */
+	needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
+	while (needed_size > new_size)
+		new_size *= 2;
+
+	if (likely(new_size <= res->backup_size)) {
+		if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
+			ret = vmw_cotable_unscrub(res);
+			if (ret)
+				return ret;
+		}
+		res->id = vcotbl->type;
+		return 0;
+	}
+
+	return vmw_cotable_resize(res, new_size);
+}
+
+/**
+ * vmw_hw_cotable_destroy - Cotable hw_destroy callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * The final (part of resource destruction) destroy callback.
+ */
+static void vmw_hw_cotable_destroy(struct vmw_resource *res)
+{
+	(void) vmw_cotable_destroy(res);
+}
+
+static size_t cotable_acc_size;
+
+/**
+ * vmw_cotable_free - Cotable resource destructor
+ *
+ * @res: Pointer to a cotable resource.
+ */
+static void vmw_cotable_free(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	kfree(res);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+}
+
+/**
+ * vmw_cotable_alloc - Create a cotable resource
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @ctx: Pointer to the context resource.
+ * The cotable resource will not add a refcount.
+ * @type: The cotable type.
+ */
+struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+				       struct vmw_resource *ctx,
+				       u32 type)
+{
+	struct vmw_cotable *vcotbl;
+	struct ttm_operation_ctx ttm_opt_ctx = {
+		.interruptible = true,
+		.no_wait_gpu = false
+	};
+	int ret;
+	u32 num_entries;
+
+	if (unlikely(cotable_acc_size == 0))
+		cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   cotable_acc_size, &ttm_opt_ctx);
+	if (unlikely(ret))
+		return ERR_PTR(ret);
+
+	vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
+	if (unlikely(!vcotbl)) {
+		ret = -ENOMEM;
+		goto out_no_alloc;
+	}
+
+	ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
+				vmw_cotable_free, &vmw_cotable_func);
+	if (unlikely(ret != 0))
+		goto out_no_init;
+
+	INIT_LIST_HEAD(&vcotbl->resource_list);
+	vcotbl->res.id = type;
+	vcotbl->res.backup_size = PAGE_SIZE;
+	num_entries = PAGE_SIZE / co_info[type].size;
+	if (num_entries < co_info[type].min_initial_entries) {
+		vcotbl->res.backup_size = co_info[type].min_initial_entries *
+			co_info[type].size;
+		vcotbl->res.backup_size =
+			(vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+	}
+
+	vcotbl->scrubbed = true;
+	vcotbl->seen_entries = -1;
+	vcotbl->type = type;
+	vcotbl->ctx = ctx;
+
+	vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
+
+	return &vcotbl->res;
+
+out_no_init:
+	kfree(vcotbl);
+out_no_alloc:
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+	return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cotable_notify - Notify the cotable about an item creation
+ *
+ * @res: Pointer to a cotable resource.
+ * @id: Item id.
+ */
+int vmw_cotable_notify(struct vmw_resource *res, int id)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+
+	if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
+		DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
+			  (unsigned) vcotbl->type, id);
+		return -EINVAL;
+	}
+
+	if (vcotbl->seen_entries < id) {
+		/* Trigger a call to create() on next validate */
+		res->id = -1;
+		vcotbl->seen_entries = id;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_add_view - add a view to the cotable's list of active views.
+ *
+ * @res: pointer struct vmw_resource representing the cotable.
+ * @head: pointer to the struct list_head member of the resource, dedicated
+ * to the cotable active resource list.
+ */
+void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
+{
+	struct vmw_cotable *vcotbl =
+		container_of(res, struct vmw_cotable, res);
+
+	list_add_tail(head, &vcotbl->resource_list);
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
new file mode 100644
index 0000000..b38bcb0
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -0,0 +1,1460 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <linux/console.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_sysfs.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_object.h"
+#include "vmwgfx_binding.h"
+#include "vmwgfx_drv.h"
+
+#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
+#define VMWGFX_CHIP_SVGAII 0
+#define VMW_FB_RESERVATION 0
+
+#define VMW_MIN_INITIAL_WIDTH 800
+#define VMW_MIN_INITIAL_HEIGHT 600
+
+#ifndef VMWGFX_GIT_VERSION
+#define VMWGFX_GIT_VERSION "Unknown"
+#endif
+
+#define VMWGFX_REPO "In Tree"
+
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
+
+/**
+ * Fully encoded drm commands. Might move to vmw_drm.h
+ */
+
+#define DRM_IOCTL_VMW_GET_PARAM					\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
+		 struct drm_vmw_getparam_arg)
+#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
+		union drm_vmw_alloc_dmabuf_arg)
+#define DRM_IOCTL_VMW_UNREF_DMABUF				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
+		struct drm_vmw_unref_dmabuf_arg)
+#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
+		 struct drm_vmw_cursor_bypass_arg)
+
+#define DRM_IOCTL_VMW_CONTROL_STREAM				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
+		 struct drm_vmw_control_stream_arg)
+#define DRM_IOCTL_VMW_CLAIM_STREAM				\
+	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
+		 struct drm_vmw_stream_arg)
+#define DRM_IOCTL_VMW_UNREF_STREAM				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
+		 struct drm_vmw_stream_arg)
+
+#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
+	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
+		struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
+		struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_CREATE_SURFACE				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
+		 union drm_vmw_surface_create_arg)
+#define DRM_IOCTL_VMW_UNREF_SURFACE				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
+		 struct drm_vmw_surface_arg)
+#define DRM_IOCTL_VMW_REF_SURFACE				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
+		 union drm_vmw_surface_reference_arg)
+#define DRM_IOCTL_VMW_EXECBUF					\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
+		struct drm_vmw_execbuf_arg)
+#define DRM_IOCTL_VMW_GET_3D_CAP				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
+		 struct drm_vmw_get_3d_cap_arg)
+#define DRM_IOCTL_VMW_FENCE_WAIT				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
+		 struct drm_vmw_fence_wait_arg)
+#define DRM_IOCTL_VMW_FENCE_SIGNALED				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
+		 struct drm_vmw_fence_signaled_arg)
+#define DRM_IOCTL_VMW_FENCE_UNREF				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
+		 struct drm_vmw_fence_arg)
+#define DRM_IOCTL_VMW_FENCE_EVENT				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
+		 struct drm_vmw_fence_event_arg)
+#define DRM_IOCTL_VMW_PRESENT					\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
+		 struct drm_vmw_present_arg)
+#define DRM_IOCTL_VMW_PRESENT_READBACK				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
+		 struct drm_vmw_present_readback_arg)
+#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
+		 struct drm_vmw_update_layout_arg)
+#define DRM_IOCTL_VMW_CREATE_SHADER				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
+		 struct drm_vmw_shader_create_arg)
+#define DRM_IOCTL_VMW_UNREF_SHADER				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
+		 struct drm_vmw_shader_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
+		 union drm_vmw_gb_surface_create_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_REF				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
+		 union drm_vmw_gb_surface_reference_arg)
+#define DRM_IOCTL_VMW_SYNCCPU					\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
+		 struct drm_vmw_synccpu_arg)
+#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
+		struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT,	\
+		union drm_vmw_gb_surface_create_ext_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT,		\
+		union drm_vmw_gb_surface_reference_ext_arg)
+
+/**
+ * The core DRM version of this macro doesn't account for
+ * DRM_COMMAND_BASE.
+ */
+
+#define VMW_IOCTL_DEF(ioctl, func, flags) \
+  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
+
+/**
+ * Ioctl definitions.
+ */
+
+static const struct drm_ioctl_desc vmw_ioctls[] = {
+	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
+		      DRM_AUTH | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
+		      DRM_AUTH | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
+		      vmw_kms_cursor_bypass_ioctl,
+		      DRM_MASTER),
+
+	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
+		      DRM_MASTER),
+	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
+		      DRM_MASTER),
+	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
+		      DRM_MASTER),
+
+	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
+		      DRM_AUTH | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
+		      DRM_AUTH | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
+		      DRM_AUTH | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
+		      vmw_fence_obj_signaled_ioctl,
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
+		      DRM_AUTH | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
+		      DRM_AUTH | DRM_RENDER_ALLOW),
+
+	/* these allow direct access to the framebuffers mark as master only */
+	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
+		      DRM_MASTER | DRM_AUTH),
+	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
+		      vmw_present_readback_ioctl,
+		      DRM_MASTER | DRM_AUTH),
+	/*
+	 * The permissions of the below ioctl are overridden in
+	 * vmw_generic_ioctl(). We require either
+	 * DRM_MASTER or capable(CAP_SYS_ADMIN).
+	 */
+	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
+		      vmw_kms_update_layout_ioctl,
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
+		      vmw_shader_define_ioctl,
+		      DRM_AUTH | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
+		      vmw_shader_destroy_ioctl,
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
+		      vmw_gb_surface_define_ioctl,
+		      DRM_AUTH | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
+		      vmw_gb_surface_reference_ioctl,
+		      DRM_AUTH | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_SYNCCPU,
+		      vmw_user_bo_synccpu_ioctl,
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
+		      vmw_extended_context_define_ioctl,
+		      DRM_AUTH | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
+		      vmw_gb_surface_define_ext_ioctl,
+		      DRM_AUTH | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
+		      vmw_gb_surface_reference_ext_ioctl,
+		      DRM_AUTH | DRM_RENDER_ALLOW),
+};
+
+static const struct pci_device_id vmw_pci_id_list[] = {
+	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
+	{0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
+
+static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
+static int vmw_force_iommu;
+static int vmw_restrict_iommu;
+static int vmw_force_coherent;
+static int vmw_restrict_dma_mask;
+static int vmw_assume_16bpp;
+
+static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
+static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+			      void *ptr);
+
+MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
+module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
+module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
+MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
+module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
+MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
+module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
+module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
+
+
+static void vmw_print_capabilities2(uint32_t capabilities2)
+{
+	DRM_INFO("Capabilities2:\n");
+	if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
+		DRM_INFO("  Grow oTable.\n");
+	if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
+		DRM_INFO("  IntraSurface copy.\n");
+}
+
+static void vmw_print_capabilities(uint32_t capabilities)
+{
+	DRM_INFO("Capabilities:\n");
+	if (capabilities & SVGA_CAP_RECT_COPY)
+		DRM_INFO("  Rect copy.\n");
+	if (capabilities & SVGA_CAP_CURSOR)
+		DRM_INFO("  Cursor.\n");
+	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
+		DRM_INFO("  Cursor bypass.\n");
+	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
+		DRM_INFO("  Cursor bypass 2.\n");
+	if (capabilities & SVGA_CAP_8BIT_EMULATION)
+		DRM_INFO("  8bit emulation.\n");
+	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
+		DRM_INFO("  Alpha cursor.\n");
+	if (capabilities & SVGA_CAP_3D)
+		DRM_INFO("  3D.\n");
+	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
+		DRM_INFO("  Extended Fifo.\n");
+	if (capabilities & SVGA_CAP_MULTIMON)
+		DRM_INFO("  Multimon.\n");
+	if (capabilities & SVGA_CAP_PITCHLOCK)
+		DRM_INFO("  Pitchlock.\n");
+	if (capabilities & SVGA_CAP_IRQMASK)
+		DRM_INFO("  Irq mask.\n");
+	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
+		DRM_INFO("  Display Topology.\n");
+	if (capabilities & SVGA_CAP_GMR)
+		DRM_INFO("  GMR.\n");
+	if (capabilities & SVGA_CAP_TRACES)
+		DRM_INFO("  Traces.\n");
+	if (capabilities & SVGA_CAP_GMR2)
+		DRM_INFO("  GMR2.\n");
+	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
+		DRM_INFO("  Screen Object 2.\n");
+	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
+		DRM_INFO("  Command Buffers.\n");
+	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
+		DRM_INFO("  Command Buffers 2.\n");
+	if (capabilities & SVGA_CAP_GBOBJECTS)
+		DRM_INFO("  Guest Backed Resources.\n");
+	if (capabilities & SVGA_CAP_DX)
+		DRM_INFO("  DX Features.\n");
+	if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
+		DRM_INFO("  HP Command Queue.\n");
+}
+
+/**
+ * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
+ *
+ * @dev_priv: A device private structure.
+ *
+ * This function creates a small buffer object that holds the query
+ * result for dummy queries emitted as query barriers.
+ * The function will then map the first page and initialize a pending
+ * occlusion query result structure, Finally it will unmap the buffer.
+ * No interruptible waits are done within this function.
+ *
+ * Returns an error if bo creation or initialization fails.
+ */
+static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
+{
+	int ret;
+	struct vmw_buffer_object *vbo;
+	struct ttm_bo_kmap_obj map;
+	volatile SVGA3dQueryResult *result;
+	bool dummy;
+
+	/*
+	 * Create the vbo as pinned, so that a tryreserve will
+	 * immediately succeed. This is because we're the only
+	 * user of the bo currently.
+	 */
+	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
+	if (!vbo)
+		return -ENOMEM;
+
+	ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
+			  &vmw_sys_ne_placement, false,
+			  &vmw_bo_bo_free);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
+	BUG_ON(ret != 0);
+	vmw_bo_pin_reserved(vbo, true);
+
+	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
+	if (likely(ret == 0)) {
+		result = ttm_kmap_obj_virtual(&map, &dummy);
+		result->totalSize = sizeof(*result);
+		result->state = SVGA3D_QUERYSTATE_PENDING;
+		result->result32 = 0xff;
+		ttm_bo_kunmap(&map);
+	}
+	vmw_bo_pin_reserved(vbo, false);
+	ttm_bo_unreserve(&vbo->base);
+
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Dummy query buffer map failed.\n");
+		vmw_bo_unreference(&vbo);
+	} else
+		dev_priv->dummy_query_bo = vbo;
+
+	return ret;
+}
+
+/**
+ * vmw_request_device_late - Perform late device setup
+ *
+ * @dev_priv: Pointer to device private.
+ *
+ * This function performs setup of otables and enables large command
+ * buffer submission. These tasks are split out to a separate function
+ * because it reverts vmw_release_device_early and is intended to be used
+ * by an error path in the hibernation code.
+ */
+static int vmw_request_device_late(struct vmw_private *dev_priv)
+{
+	int ret;
+
+	if (dev_priv->has_mob) {
+		ret = vmw_otables_setup(dev_priv);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Unable to initialize "
+				  "guest Memory OBjects.\n");
+			return ret;
+		}
+	}
+
+	if (dev_priv->cman) {
+		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
+					       256*4096, 2*4096);
+		if (ret) {
+			struct vmw_cmdbuf_man *man = dev_priv->cman;
+
+			dev_priv->cman = NULL;
+			vmw_cmdbuf_man_destroy(man);
+		}
+	}
+
+	return 0;
+}
+
+static int vmw_request_device(struct vmw_private *dev_priv)
+{
+	int ret;
+
+	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Unable to initialize FIFO.\n");
+		return ret;
+	}
+	vmw_fence_fifo_up(dev_priv->fman);
+	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
+	if (IS_ERR(dev_priv->cman)) {
+		dev_priv->cman = NULL;
+		dev_priv->has_dx = false;
+	}
+
+	ret = vmw_request_device_late(dev_priv);
+	if (ret)
+		goto out_no_mob;
+
+	ret = vmw_dummy_query_bo_create(dev_priv);
+	if (unlikely(ret != 0))
+		goto out_no_query_bo;
+
+	return 0;
+
+out_no_query_bo:
+	if (dev_priv->cman)
+		vmw_cmdbuf_remove_pool(dev_priv->cman);
+	if (dev_priv->has_mob) {
+		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+		vmw_otables_takedown(dev_priv);
+	}
+	if (dev_priv->cman)
+		vmw_cmdbuf_man_destroy(dev_priv->cman);
+out_no_mob:
+	vmw_fence_fifo_down(dev_priv->fman);
+	vmw_fifo_release(dev_priv, &dev_priv->fifo);
+	return ret;
+}
+
+/**
+ * vmw_release_device_early - Early part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the first part of command submission takedown, to be called before
+ * buffer management is taken down.
+ */
+static void vmw_release_device_early(struct vmw_private *dev_priv)
+{
+	/*
+	 * Previous destructions should've released
+	 * the pinned bo.
+	 */
+
+	BUG_ON(dev_priv->pinned_bo != NULL);
+
+	vmw_bo_unreference(&dev_priv->dummy_query_bo);
+	if (dev_priv->cman)
+		vmw_cmdbuf_remove_pool(dev_priv->cman);
+
+	if (dev_priv->has_mob) {
+		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+		vmw_otables_takedown(dev_priv);
+	}
+}
+
+/**
+ * vmw_release_device_late - Late part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the last part of the command submission takedown, to be called when
+ * command submission is no longer needed. It may wait on pending fences.
+ */
+static void vmw_release_device_late(struct vmw_private *dev_priv)
+{
+	vmw_fence_fifo_down(dev_priv->fman);
+	if (dev_priv->cman)
+		vmw_cmdbuf_man_destroy(dev_priv->cman);
+
+	vmw_fifo_release(dev_priv, &dev_priv->fifo);
+}
+
+/**
+ * Sets the initial_[width|height] fields on the given vmw_private.
+ *
+ * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
+ * clamping the value to fb_max_[width|height] fields and the
+ * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
+ * If the values appear to be invalid, set them to
+ * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
+ */
+static void vmw_get_initial_size(struct vmw_private *dev_priv)
+{
+	uint32_t width;
+	uint32_t height;
+
+	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
+	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
+
+	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
+	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
+
+	if (width > dev_priv->fb_max_width ||
+	    height > dev_priv->fb_max_height) {
+
+		/*
+		 * This is a host error and shouldn't occur.
+		 */
+
+		width = VMW_MIN_INITIAL_WIDTH;
+		height = VMW_MIN_INITIAL_HEIGHT;
+	}
+
+	dev_priv->initial_width = width;
+	dev_priv->initial_height = height;
+}
+
+/**
+ * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
+ * system.
+ *
+ * @dev_priv: Pointer to a struct vmw_private
+ *
+ * This functions tries to determine what actions need to be taken by the
+ * driver to make system pages visible to the device.
+ * If this function decides that DMA is not possible, it returns -EINVAL.
+ * The driver may then try to disable features of the device that require
+ * DMA.
+ */
+static int vmw_dma_select_mode(struct vmw_private *dev_priv)
+{
+	static const char *names[vmw_dma_map_max] = {
+		[vmw_dma_phys] = "Using physical TTM page addresses.",
+		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
+		[vmw_dma_map_populate] = "Caching DMA mappings.",
+		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
+
+	if (vmw_force_coherent)
+		dev_priv->map_mode = vmw_dma_alloc_coherent;
+	else if (vmw_restrict_iommu)
+		dev_priv->map_mode = vmw_dma_map_bind;
+	else
+		dev_priv->map_mode = vmw_dma_map_populate;
+
+	/* No TTM coherent page pool? FIXME: Ask TTM instead! */
+        if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
+	    (dev_priv->map_mode == vmw_dma_alloc_coherent))
+		return -EINVAL;
+
+	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
+	return 0;
+}
+
+/**
+ * vmw_dma_masks - set required page- and dma masks
+ *
+ * @dev: Pointer to struct drm-device
+ *
+ * With 32-bit we can only handle 32 bit PFNs. Optionally set that
+ * restriction also for 64-bit systems.
+ */
+static int vmw_dma_masks(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int ret = 0;
+
+	ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
+	if (dev_priv->map_mode != vmw_dma_phys &&
+	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
+		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
+		return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
+	}
+
+	return ret;
+}
+
+static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+	struct vmw_private *dev_priv;
+	int ret;
+	uint32_t svga_id;
+	enum vmw_res_type i;
+	bool refuse_dma = false;
+	char host_log[100] = {0};
+
+	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+	if (unlikely(!dev_priv)) {
+		DRM_ERROR("Failed allocating a device private struct.\n");
+		return -ENOMEM;
+	}
+
+	pci_set_master(dev->pdev);
+
+	dev_priv->dev = dev;
+	dev_priv->vmw_chipset = chipset;
+	dev_priv->last_read_seqno = (uint32_t) -100;
+	mutex_init(&dev_priv->cmdbuf_mutex);
+	mutex_init(&dev_priv->release_mutex);
+	mutex_init(&dev_priv->binding_mutex);
+	mutex_init(&dev_priv->global_kms_state_mutex);
+	ttm_lock_init(&dev_priv->reservation_sem);
+	spin_lock_init(&dev_priv->resource_lock);
+	spin_lock_init(&dev_priv->hw_lock);
+	spin_lock_init(&dev_priv->waiter_lock);
+	spin_lock_init(&dev_priv->cap_lock);
+	spin_lock_init(&dev_priv->svga_lock);
+	spin_lock_init(&dev_priv->cursor_lock);
+
+	for (i = vmw_res_context; i < vmw_res_max; ++i) {
+		idr_init(&dev_priv->res_idr[i]);
+		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
+	}
+
+	init_waitqueue_head(&dev_priv->fence_queue);
+	init_waitqueue_head(&dev_priv->fifo_queue);
+	dev_priv->fence_queue_waiters = 0;
+	dev_priv->fifo_queue_waiters = 0;
+
+	dev_priv->used_memory_size = 0;
+
+	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
+	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
+	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
+
+	dev_priv->assume_16bpp = !!vmw_assume_16bpp;
+
+	dev_priv->enable_fb = enable_fbdev;
+
+	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
+	if (svga_id != SVGA_ID_2) {
+		ret = -ENOSYS;
+		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
+		goto out_err0;
+	}
+
+	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+
+	if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
+		dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
+	}
+
+
+	ret = vmw_dma_select_mode(dev_priv);
+	if (unlikely(ret != 0)) {
+		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
+		refuse_dma = true;
+	}
+
+	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
+	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
+	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
+
+	vmw_get_initial_size(dev_priv);
+
+	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
+		dev_priv->max_gmr_ids =
+			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
+		dev_priv->max_gmr_pages =
+			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
+		dev_priv->memory_size =
+			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
+		dev_priv->memory_size -= dev_priv->vram_size;
+	} else {
+		/*
+		 * An arbitrary limit of 512MiB on surface
+		 * memory. But all HWV8 hardware supports GMR2.
+		 */
+		dev_priv->memory_size = 512*1024*1024;
+	}
+	dev_priv->max_mob_pages = 0;
+	dev_priv->max_mob_size = 0;
+	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+		uint64_t mem_size =
+			vmw_read(dev_priv,
+				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+
+		/*
+		 * Workaround for low memory 2D VMs to compensate for the
+		 * allocation taken by fbdev
+		 */
+		if (!(dev_priv->capabilities & SVGA_CAP_3D))
+			mem_size *= 3;
+
+		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
+		dev_priv->prim_bb_mem =
+			vmw_read(dev_priv,
+				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
+		dev_priv->max_mob_size =
+			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
+		dev_priv->stdu_max_width =
+			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
+		dev_priv->stdu_max_height =
+			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
+
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
+		dev_priv->texture_max_width = vmw_read(dev_priv,
+						       SVGA_REG_DEV_CAP);
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
+		dev_priv->texture_max_height = vmw_read(dev_priv,
+							SVGA_REG_DEV_CAP);
+	} else {
+		dev_priv->texture_max_width = 8192;
+		dev_priv->texture_max_height = 8192;
+		dev_priv->prim_bb_mem = dev_priv->vram_size;
+	}
+
+	vmw_print_capabilities(dev_priv->capabilities);
+	if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
+		vmw_print_capabilities2(dev_priv->capabilities2);
+
+	ret = vmw_dma_masks(dev_priv);
+	if (unlikely(ret != 0))
+		goto out_err0;
+
+	dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK,
+					     SCATTERLIST_MAX_SEGMENT));
+
+	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
+		DRM_INFO("Max GMR ids is %u\n",
+			 (unsigned)dev_priv->max_gmr_ids);
+		DRM_INFO("Max number of GMR pages is %u\n",
+			 (unsigned)dev_priv->max_gmr_pages);
+		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
+			 (unsigned)dev_priv->memory_size / 1024);
+	}
+	DRM_INFO("Maximum display memory size is %u kiB\n",
+		 dev_priv->prim_bb_mem / 1024);
+	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
+		 dev_priv->vram_start, dev_priv->vram_size / 1024);
+	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
+		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
+
+	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
+				       dev_priv->mmio_size, MEMREMAP_WB);
+
+	if (unlikely(dev_priv->mmio_virt == NULL)) {
+		ret = -ENOMEM;
+		DRM_ERROR("Failed mapping MMIO.\n");
+		goto out_err0;
+	}
+
+	/* Need mmio memory to check for fifo pitchlock cap. */
+	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
+	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
+	    !vmw_fifo_have_pitchlock(dev_priv)) {
+		ret = -ENOSYS;
+		DRM_ERROR("Hardware has no pitchlock\n");
+		goto out_err4;
+	}
+
+	dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
+						&vmw_prime_dmabuf_ops);
+
+	if (unlikely(dev_priv->tdev == NULL)) {
+		DRM_ERROR("Unable to initialize TTM object management.\n");
+		ret = -ENOMEM;
+		goto out_err4;
+	}
+
+	dev->dev_private = dev_priv;
+
+	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
+	dev_priv->stealth = (ret != 0);
+	if (dev_priv->stealth) {
+		/**
+		 * Request at least the mmio PCI resource.
+		 */
+
+		DRM_INFO("It appears like vesafb is loaded. "
+			 "Ignore above error if any.\n");
+		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
+			goto out_no_device;
+		}
+	}
+
+	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+		ret = vmw_irq_install(dev, dev->pdev->irq);
+		if (ret != 0) {
+			DRM_ERROR("Failed installing irq: %d\n", ret);
+			goto out_no_irq;
+		}
+	}
+
+	dev_priv->fman = vmw_fence_manager_init(dev_priv);
+	if (unlikely(dev_priv->fman == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_fman;
+	}
+
+	ret = ttm_bo_device_init(&dev_priv->bdev,
+				 &vmw_bo_driver,
+				 dev->anon_inode->i_mapping,
+				 false);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
+		goto out_no_bdev;
+	}
+
+	/*
+	 * Enable VRAM, but initially don't use it until SVGA is enabled and
+	 * unhidden.
+	 */
+	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
+			     (dev_priv->vram_size >> PAGE_SHIFT));
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
+		goto out_no_vram;
+	}
+	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+
+	dev_priv->has_gmr = true;
+	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
+	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+					 VMW_PL_GMR) != 0) {
+		DRM_INFO("No GMR memory available. "
+			 "Graphics memory resources are very limited.\n");
+		dev_priv->has_gmr = false;
+	}
+
+	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+		dev_priv->has_mob = true;
+		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
+				   VMW_PL_MOB) != 0) {
+			DRM_INFO("No MOB memory available. "
+				 "3D will be disabled.\n");
+			dev_priv->has_mob = false;
+		}
+	}
+
+	if (dev_priv->has_mob) {
+		spin_lock(&dev_priv->cap_lock);
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
+		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+		spin_unlock(&dev_priv->cap_lock);
+	}
+
+	vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
+	ret = vmw_kms_init(dev_priv);
+	if (unlikely(ret != 0))
+		goto out_no_kms;
+	vmw_overlay_init(dev_priv);
+
+	ret = vmw_request_device(dev_priv);
+	if (ret)
+		goto out_no_fifo;
+
+	if (dev_priv->has_dx) {
+		/*
+		 * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
+		 * support
+		 */
+		if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
+			vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+					SVGA3D_DEVCAP_SM41);
+			dev_priv->has_sm4_1 = vmw_read(dev_priv,
+							SVGA_REG_DEV_CAP);
+		}
+	}
+
+	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
+	DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
+		 ? "yes." : "no.");
+	DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
+
+	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
+		VMWGFX_REPO, VMWGFX_GIT_VERSION);
+	vmw_host_log(host_log);
+
+	memset(host_log, 0, sizeof(host_log));
+	snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
+		VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
+		VMWGFX_DRIVER_PATCHLEVEL);
+	vmw_host_log(host_log);
+
+	if (dev_priv->enable_fb) {
+		vmw_fifo_resource_inc(dev_priv);
+		vmw_svga_enable(dev_priv);
+		vmw_fb_init(dev_priv);
+	}
+
+	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
+	register_pm_notifier(&dev_priv->pm_nb);
+
+	return 0;
+
+out_no_fifo:
+	vmw_overlay_close(dev_priv);
+	vmw_kms_close(dev_priv);
+out_no_kms:
+	if (dev_priv->has_mob)
+		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+	if (dev_priv->has_gmr)
+		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+out_no_vram:
+	(void)ttm_bo_device_release(&dev_priv->bdev);
+out_no_bdev:
+	vmw_fence_manager_takedown(dev_priv->fman);
+out_no_fman:
+	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+		vmw_irq_uninstall(dev_priv->dev);
+out_no_irq:
+	if (dev_priv->stealth)
+		pci_release_region(dev->pdev, 2);
+	else
+		pci_release_regions(dev->pdev);
+out_no_device:
+	ttm_object_device_release(&dev_priv->tdev);
+out_err4:
+	memunmap(dev_priv->mmio_virt);
+out_err0:
+	for (i = vmw_res_context; i < vmw_res_max; ++i)
+		idr_destroy(&dev_priv->res_idr[i]);
+
+	if (dev_priv->ctx.staged_bindings)
+		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
+	kfree(dev_priv);
+	return ret;
+}
+
+static void vmw_driver_unload(struct drm_device *dev)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	enum vmw_res_type i;
+
+	unregister_pm_notifier(&dev_priv->pm_nb);
+
+	if (dev_priv->ctx.res_ht_initialized)
+		drm_ht_remove(&dev_priv->ctx.res_ht);
+	vfree(dev_priv->ctx.cmd_bounce);
+	if (dev_priv->enable_fb) {
+		vmw_fb_off(dev_priv);
+		vmw_fb_close(dev_priv);
+		vmw_fifo_resource_dec(dev_priv);
+		vmw_svga_disable(dev_priv);
+	}
+
+	vmw_kms_close(dev_priv);
+	vmw_overlay_close(dev_priv);
+
+	if (dev_priv->has_gmr)
+		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+
+	vmw_release_device_early(dev_priv);
+	if (dev_priv->has_mob)
+		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+	(void) ttm_bo_device_release(&dev_priv->bdev);
+	vmw_release_device_late(dev_priv);
+	vmw_fence_manager_takedown(dev_priv->fman);
+	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+		vmw_irq_uninstall(dev_priv->dev);
+	if (dev_priv->stealth)
+		pci_release_region(dev->pdev, 2);
+	else
+		pci_release_regions(dev->pdev);
+
+	ttm_object_device_release(&dev_priv->tdev);
+	memunmap(dev_priv->mmio_virt);
+	if (dev_priv->ctx.staged_bindings)
+		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
+
+	for (i = vmw_res_context; i < vmw_res_max; ++i)
+		idr_destroy(&dev_priv->res_idr[i]);
+
+	kfree(dev_priv);
+}
+
+static void vmw_postclose(struct drm_device *dev,
+			 struct drm_file *file_priv)
+{
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+
+	ttm_object_file_release(&vmw_fp->tfile);
+	kfree(vmw_fp);
+}
+
+static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_fpriv *vmw_fp;
+	int ret = -ENOMEM;
+
+	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
+	if (unlikely(!vmw_fp))
+		return ret;
+
+	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
+	if (unlikely(vmw_fp->tfile == NULL))
+		goto out_no_tfile;
+
+	file_priv->driver_priv = vmw_fp;
+
+	return 0;
+
+out_no_tfile:
+	kfree(vmw_fp);
+	return ret;
+}
+
+static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
+			      unsigned long arg,
+			      long (*ioctl_func)(struct file *, unsigned int,
+						 unsigned long))
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_device *dev = file_priv->minor->dev;
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	unsigned int flags;
+
+	/*
+	 * Do extra checking on driver private ioctls.
+	 */
+
+	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
+	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+		const struct drm_ioctl_desc *ioctl =
+			&vmw_ioctls[nr - DRM_COMMAND_BASE];
+
+		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
+			return ioctl_func(filp, cmd, arg);
+		} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
+			if (!drm_is_current_master(file_priv) &&
+			    !capable(CAP_SYS_ADMIN))
+				return -EACCES;
+		}
+
+		if (unlikely(ioctl->cmd != cmd))
+			goto out_io_encoding;
+
+		flags = ioctl->flags;
+	} else if (!drm_ioctl_flags(nr, &flags))
+		return -EINVAL;
+
+	return ioctl_func(filp, cmd, arg);
+
+out_io_encoding:
+	DRM_ERROR("Invalid command format, ioctl %d\n",
+		  nr - DRM_COMMAND_BASE);
+
+	return -EINVAL;
+}
+
+static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
+			       unsigned long arg)
+{
+	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
+}
+
+#ifdef CONFIG_COMPAT
+static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
+			     unsigned long arg)
+{
+	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
+}
+#endif
+
+static int vmw_master_set(struct drm_device *dev,
+			  struct drm_file *file_priv,
+			  bool from_open)
+{
+	/*
+	 * Inform a new master that the layout may have changed while
+	 * it was gone.
+	 */
+	if (!from_open)
+		drm_sysfs_hotplug_event(dev);
+
+	return 0;
+}
+
+static void vmw_master_drop(struct drm_device *dev,
+			    struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+
+	vmw_kms_legacy_hotspot_clear(dev_priv);
+	if (!dev_priv->enable_fb)
+		vmw_svga_disable(dev_priv);
+}
+
+/**
+ * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in non-exclusive mode.
+ */
+static void __vmw_svga_enable(struct vmw_private *dev_priv)
+{
+	spin_lock(&dev_priv->svga_lock);
+	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
+		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
+	}
+	spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ */
+void vmw_svga_enable(struct vmw_private *dev_priv)
+{
+	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
+	__vmw_svga_enable(dev_priv);
+	ttm_read_unlock(&dev_priv->reservation_sem);
+}
+
+/**
+ * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in exclusive mode.
+ * Will not empty VRAM. VRAM must be emptied by caller.
+ */
+static void __vmw_svga_disable(struct vmw_private *dev_priv)
+{
+	spin_lock(&dev_priv->svga_lock);
+	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+		vmw_write(dev_priv, SVGA_REG_ENABLE,
+			  SVGA_REG_ENABLE_HIDE |
+			  SVGA_REG_ENABLE_ENABLE);
+	}
+	spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
+ * running.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Will empty VRAM.
+ */
+void vmw_svga_disable(struct vmw_private *dev_priv)
+{
+	/*
+	 * Disabling SVGA will turn off device modesetting capabilities, so
+	 * notify KMS about that so that it doesn't cache atomic state that
+	 * isn't valid anymore, for example crtcs turned on.
+	 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
+	 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
+	 * end up with lock order reversal. Thus, a master may actually perform
+	 * a new modeset just after we call vmw_kms_lost_device() and race with
+	 * vmw_svga_disable(), but that should at worst cause atomic KMS state
+	 * to be inconsistent with the device, causing modesetting problems.
+	 *
+	 */
+	vmw_kms_lost_device(dev_priv->dev);
+	ttm_write_lock(&dev_priv->reservation_sem, false);
+	spin_lock(&dev_priv->svga_lock);
+	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+		spin_unlock(&dev_priv->svga_lock);
+		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
+			DRM_ERROR("Failed evicting VRAM buffers.\n");
+		vmw_write(dev_priv, SVGA_REG_ENABLE,
+			  SVGA_REG_ENABLE_HIDE |
+			  SVGA_REG_ENABLE_ENABLE);
+	} else
+		spin_unlock(&dev_priv->svga_lock);
+	ttm_write_unlock(&dev_priv->reservation_sem);
+}
+
+static void vmw_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	pci_disable_device(pdev);
+	drm_put_dev(dev);
+}
+
+static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+			      void *ptr)
+{
+	struct vmw_private *dev_priv =
+		container_of(nb, struct vmw_private, pm_nb);
+
+	switch (val) {
+	case PM_HIBERNATION_PREPARE:
+		/*
+		 * Take the reservation sem in write mode, which will make sure
+		 * there are no other processes holding a buffer object
+		 * reservation, meaning we should be able to evict all buffer
+		 * objects if needed.
+		 * Once user-space processes have been frozen, we can release
+		 * the lock again.
+		 */
+		ttm_suspend_lock(&dev_priv->reservation_sem);
+		dev_priv->suspend_locked = true;
+		break;
+	case PM_POST_HIBERNATION:
+	case PM_POST_RESTORE:
+		if (READ_ONCE(dev_priv->suspend_locked)) {
+			dev_priv->suspend_locked = false;
+			ttm_suspend_unlock(&dev_priv->reservation_sem);
+		}
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct vmw_private *dev_priv = vmw_priv(dev);
+
+	if (dev_priv->refuse_hibernation)
+		return -EBUSY;
+
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+	return 0;
+}
+
+static int vmw_pci_resume(struct pci_dev *pdev)
+{
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	return pci_enable_device(pdev);
+}
+
+static int vmw_pm_suspend(struct device *kdev)
+{
+	struct pci_dev *pdev = to_pci_dev(kdev);
+	struct pm_message dummy;
+
+	dummy.event = 0;
+
+	return vmw_pci_suspend(pdev, dummy);
+}
+
+static int vmw_pm_resume(struct device *kdev)
+{
+	struct pci_dev *pdev = to_pci_dev(kdev);
+
+	return vmw_pci_resume(pdev);
+}
+
+static int vmw_pm_freeze(struct device *kdev)
+{
+	struct pci_dev *pdev = to_pci_dev(kdev);
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	int ret;
+
+	/*
+	 * Unlock for vmw_kms_suspend.
+	 * No user-space processes should be running now.
+	 */
+	ttm_suspend_unlock(&dev_priv->reservation_sem);
+	ret = vmw_kms_suspend(dev_priv->dev);
+	if (ret) {
+		ttm_suspend_lock(&dev_priv->reservation_sem);
+		DRM_ERROR("Failed to freeze modesetting.\n");
+		return ret;
+	}
+	if (dev_priv->enable_fb)
+		vmw_fb_off(dev_priv);
+
+	ttm_suspend_lock(&dev_priv->reservation_sem);
+	vmw_execbuf_release_pinned_bo(dev_priv);
+	vmw_resource_evict_all(dev_priv);
+	vmw_release_device_early(dev_priv);
+	ttm_bo_swapout_all(&dev_priv->bdev);
+	if (dev_priv->enable_fb)
+		vmw_fifo_resource_dec(dev_priv);
+	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
+		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
+		if (dev_priv->enable_fb)
+			vmw_fifo_resource_inc(dev_priv);
+		WARN_ON(vmw_request_device_late(dev_priv));
+		dev_priv->suspend_locked = false;
+		ttm_suspend_unlock(&dev_priv->reservation_sem);
+		if (dev_priv->suspend_state)
+			vmw_kms_resume(dev);
+		if (dev_priv->enable_fb)
+			vmw_fb_on(dev_priv);
+		return -EBUSY;
+	}
+
+	vmw_fence_fifo_down(dev_priv->fman);
+	__vmw_svga_disable(dev_priv);
+	
+	vmw_release_device_late(dev_priv);
+	return 0;
+}
+
+static int vmw_pm_restore(struct device *kdev)
+{
+	struct pci_dev *pdev = to_pci_dev(kdev);
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	int ret;
+
+	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+	(void) vmw_read(dev_priv, SVGA_REG_ID);
+
+	if (dev_priv->enable_fb)
+		vmw_fifo_resource_inc(dev_priv);
+
+	ret = vmw_request_device(dev_priv);
+	if (ret)
+		return ret;
+
+	if (dev_priv->enable_fb)
+		__vmw_svga_enable(dev_priv);
+
+	vmw_fence_fifo_up(dev_priv->fman);
+	dev_priv->suspend_locked = false;
+	ttm_suspend_unlock(&dev_priv->reservation_sem);
+	if (dev_priv->suspend_state)
+		vmw_kms_resume(dev_priv->dev);
+
+	if (dev_priv->enable_fb)
+		vmw_fb_on(dev_priv);
+
+	return 0;
+}
+
+static const struct dev_pm_ops vmw_pm_ops = {
+	.freeze = vmw_pm_freeze,
+	.thaw = vmw_pm_restore,
+	.restore = vmw_pm_restore,
+	.suspend = vmw_pm_suspend,
+	.resume = vmw_pm_resume,
+};
+
+static const struct file_operations vmwgfx_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = vmw_unlocked_ioctl,
+	.mmap = vmw_mmap,
+	.poll = vmw_fops_poll,
+	.read = vmw_fops_read,
+#if defined(CONFIG_COMPAT)
+	.compat_ioctl = vmw_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+	.driver_features =
+	DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
+	.load = vmw_driver_load,
+	.unload = vmw_driver_unload,
+	.get_vblank_counter = vmw_get_vblank_counter,
+	.enable_vblank = vmw_enable_vblank,
+	.disable_vblank = vmw_disable_vblank,
+	.ioctls = vmw_ioctls,
+	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
+	.master_set = vmw_master_set,
+	.master_drop = vmw_master_drop,
+	.open = vmw_driver_open,
+	.postclose = vmw_postclose,
+
+	.dumb_create = vmw_dumb_create,
+	.dumb_map_offset = vmw_dumb_map_offset,
+	.dumb_destroy = vmw_dumb_destroy,
+
+	.prime_fd_to_handle = vmw_prime_fd_to_handle,
+	.prime_handle_to_fd = vmw_prime_handle_to_fd,
+
+	.fops = &vmwgfx_driver_fops,
+	.name = VMWGFX_DRIVER_NAME,
+	.desc = VMWGFX_DRIVER_DESC,
+	.date = VMWGFX_DRIVER_DATE,
+	.major = VMWGFX_DRIVER_MAJOR,
+	.minor = VMWGFX_DRIVER_MINOR,
+	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
+};
+
+static struct pci_driver vmw_pci_driver = {
+	.name = VMWGFX_DRIVER_NAME,
+	.id_table = vmw_pci_id_list,
+	.probe = vmw_probe,
+	.remove = vmw_remove,
+	.driver = {
+		.pm = &vmw_pm_ops
+	}
+};
+
+static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static int __init vmwgfx_init(void)
+{
+	int ret;
+
+	if (vgacon_text_force())
+		return -EINVAL;
+
+	ret = pci_register_driver(&vmw_pci_driver);
+	if (ret)
+		DRM_ERROR("Failed initializing DRM.\n");
+	return ret;
+}
+
+static void __exit vmwgfx_exit(void)
+{
+	pci_unregister_driver(&vmw_pci_driver);
+}
+
+module_init(vmwgfx_init);
+module_exit(vmwgfx_exit);
+
+MODULE_AUTHOR("VMware Inc. and others");
+MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
+MODULE_LICENSE("GPL and additional rights");
+MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
+	       __stringify(VMWGFX_DRIVER_MINOR) "."
+	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
+	       "0");
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
new file mode 100644
index 0000000..765f7a6
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -0,0 +1,1489 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_DRV_H_
+#define _VMWGFX_DRV_H_
+
+#include <linux/suspend.h>
+#include <linux/sync_file.h>
+
+#include <drm/drm_auth.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_hashtab.h>
+#include <drm/drm_rect.h>
+
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_module.h>
+
+#include "ttm_lock.h"
+#include "ttm_object.h"
+
+#include "vmwgfx_fence.h"
+#include "vmwgfx_reg.h"
+#include "vmwgfx_validation.h"
+
+/*
+ * FIXME: vmwgfx_drm.h needs to be last due to dependencies.
+ * uapi headers should not depend on header files outside uapi/.
+ */
+#include <drm/vmwgfx_drm.h>
+
+
+#define VMWGFX_DRIVER_NAME "vmwgfx"
+#define VMWGFX_DRIVER_DATE "20180704"
+#define VMWGFX_DRIVER_MAJOR 2
+#define VMWGFX_DRIVER_MINOR 15
+#define VMWGFX_DRIVER_PATCHLEVEL 0
+#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
+#define VMWGFX_MAX_RELOCATIONS 2048
+#define VMWGFX_MAX_VALIDATIONS 2048
+#define VMWGFX_MAX_DISPLAYS 16
+#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
+#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
+
+/*
+ * Perhaps we should have sysfs entries for these.
+ */
+#define VMWGFX_NUM_GB_CONTEXT 256
+#define VMWGFX_NUM_GB_SHADER 20000
+#define VMWGFX_NUM_GB_SURFACE 32768
+#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_DXCONTEXT 256
+#define VMWGFX_NUM_DXQUERY 512
+#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
+			VMWGFX_NUM_GB_SHADER +\
+			VMWGFX_NUM_GB_SURFACE +\
+			VMWGFX_NUM_GB_SCREEN_TARGET)
+
+#define VMW_PL_GMR (TTM_PL_PRIV + 0)
+#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0)
+#define VMW_PL_MOB (TTM_PL_PRIV + 1)
+#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1)
+
+#define VMW_RES_CONTEXT ttm_driver_type0
+#define VMW_RES_SURFACE ttm_driver_type1
+#define VMW_RES_STREAM ttm_driver_type2
+#define VMW_RES_FENCE ttm_driver_type3
+#define VMW_RES_SHADER ttm_driver_type4
+
+struct vmw_fpriv {
+	struct ttm_object_file *tfile;
+	bool gb_aware; /* user-space is guest-backed aware */
+};
+
+/**
+ * struct vmw_buffer_object - TTM buffer object with vmwgfx additions
+ * @base: The TTM buffer object
+ * @res_list: List of resources using this buffer object as a backing MOB
+ * @pin_count: pin depth
+ * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
+ * @map: Kmap object for semi-persistent mappings
+ * @res_prios: Eviction priority counts for attached resources
+ */
+struct vmw_buffer_object {
+	struct ttm_buffer_object base;
+	struct list_head res_list;
+	s32 pin_count;
+	/* Not ref-counted.  Protected by binding_mutex */
+	struct vmw_resource *dx_query_ctx;
+	/* Protected by reservation */
+	struct ttm_bo_kmap_obj map;
+	u32 res_prios[TTM_MAX_BO_PRIORITY];
+};
+
+/**
+ * struct vmw_validate_buffer - Carries validation info about buffers.
+ *
+ * @base: Validation info for TTM.
+ * @hash: Hash entry for quick lookup of the TTM buffer object.
+ *
+ * This structure contains also driver private validation info
+ * on top of the info needed by TTM.
+ */
+struct vmw_validate_buffer {
+	struct ttm_validate_buffer base;
+	struct drm_hash_item hash;
+	bool validate_as_mob;
+};
+
+struct vmw_res_func;
+
+
+/**
+ * struct vmw-resource - base class for hardware resources
+ *
+ * @kref: For refcounting.
+ * @dev_priv: Pointer to the device private for this resource. Immutable.
+ * @id: Device id. Protected by @dev_priv::resource_lock.
+ * @backup_size: Backup buffer size. Immutable.
+ * @res_dirty: Resource contains data not yet in the backup buffer. Protected
+ * by resource reserved.
+ * @backup_dirty: Backup buffer contains data not yet in the HW resource.
+ * Protecte by resource reserved.
+ * @backup: The backup buffer if any. Protected by resource reserved.
+ * @backup_offset: Offset into the backup buffer if any. Protected by resource
+ * reserved. Note that only a few resource types can have a @backup_offset
+ * different from zero.
+ * @pin_count: The pin count for this resource. A pinned resource has a
+ * pin-count greater than zero. It is not on the resource LRU lists and its
+ * backup buffer is pinned. Hence it can't be evicted.
+ * @func: Method vtable for this resource. Immutable.
+ * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
+ * @mob_head: List head for the MOB backup list. Protected by @backup reserved.
+ * @binding_head: List head for the context binding list. Protected by
+ * the @dev_priv::binding_mutex
+ * @res_free: The resource destructor.
+ * @hw_destroy: Callback to destroy the resource on the device, as part of
+ * resource destruction.
+ */
+struct vmw_resource {
+	struct kref kref;
+	struct vmw_private *dev_priv;
+	int id;
+	u32 used_prio;
+	unsigned long backup_size;
+	bool res_dirty;
+	bool backup_dirty;
+	struct vmw_buffer_object *backup;
+	unsigned long backup_offset;
+	unsigned long pin_count;
+	const struct vmw_res_func *func;
+	struct list_head lru_head;
+	struct list_head mob_head;
+	struct list_head binding_head;
+	void (*res_free) (struct vmw_resource *res);
+	void (*hw_destroy) (struct vmw_resource *res);
+};
+
+
+/*
+ * Resources that are managed using ioctls.
+ */
+enum vmw_res_type {
+	vmw_res_context,
+	vmw_res_surface,
+	vmw_res_stream,
+	vmw_res_shader,
+	vmw_res_dx_context,
+	vmw_res_cotable,
+	vmw_res_view,
+	vmw_res_max
+};
+
+/*
+ * Resources that are managed using command streams.
+ */
+enum vmw_cmdbuf_res_type {
+	vmw_cmdbuf_res_shader,
+	vmw_cmdbuf_res_view
+};
+
+struct vmw_cmdbuf_res_manager;
+
+struct vmw_cursor_snooper {
+	size_t age;
+	uint32_t *image;
+};
+
+struct vmw_framebuffer;
+struct vmw_surface_offset;
+
+struct vmw_surface {
+	struct vmw_resource res;
+	SVGA3dSurfaceAllFlags flags;
+	uint32_t format;
+	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+	struct drm_vmw_size base_size;
+	struct drm_vmw_size *sizes;
+	uint32_t num_sizes;
+	bool scanout;
+	uint32_t array_size;
+	/* TODO so far just a extra pointer */
+	struct vmw_cursor_snooper snooper;
+	struct vmw_surface_offset *offsets;
+	SVGA3dTextureFilter autogen_filter;
+	uint32_t multisample_count;
+	struct list_head view_list;
+	SVGA3dMSPattern multisample_pattern;
+	SVGA3dMSQualityLevel quality_level;
+};
+
+struct vmw_marker_queue {
+	struct list_head head;
+	u64 lag;
+	u64 lag_time;
+	spinlock_t lock;
+};
+
+struct vmw_fifo_state {
+	unsigned long reserved_size;
+	u32 *dynamic_buffer;
+	u32 *static_buffer;
+	unsigned long static_buffer_size;
+	bool using_bounce_buffer;
+	uint32_t capabilities;
+	struct mutex fifo_mutex;
+	struct rw_semaphore rwsem;
+	struct vmw_marker_queue marker_queue;
+	bool dx;
+};
+
+/**
+ * struct vmw_res_cache_entry - resource information cache entry
+ * @handle: User-space handle of a resource.
+ * @res: Non-ref-counted pointer to the resource.
+ * @valid_handle: Whether the @handle member is valid.
+ * @valid: Whether the entry is valid, which also implies that the execbuf
+ * code holds a reference to the resource, and it's placed on the
+ * validation list.
+ *
+ * Used to avoid frequent repeated user-space handle lookups of the
+ * same resource.
+ */
+struct vmw_res_cache_entry {
+	uint32_t handle;
+	struct vmw_resource *res;
+	void *private;
+	unsigned short valid_handle;
+	unsigned short valid;
+};
+
+/**
+ * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
+ */
+enum vmw_dma_map_mode {
+	vmw_dma_phys,           /* Use physical page addresses */
+	vmw_dma_alloc_coherent, /* Use TTM coherent pages */
+	vmw_dma_map_populate,   /* Unmap from DMA just after unpopulate */
+	vmw_dma_map_bind,       /* Unmap from DMA just before unbind */
+	vmw_dma_map_max
+};
+
+/**
+ * struct vmw_sg_table - Scatter/gather table for binding, with additional
+ * device-specific information.
+ *
+ * @sgt: Pointer to a struct sg_table with binding information
+ * @num_regions: Number of regions with device-address contiguous pages
+ */
+struct vmw_sg_table {
+	enum vmw_dma_map_mode mode;
+	struct page **pages;
+	const dma_addr_t *addrs;
+	struct sg_table *sgt;
+	unsigned long num_regions;
+	unsigned long num_pages;
+};
+
+/**
+ * struct vmw_piter - Page iterator that iterates over a list of pages
+ * and DMA addresses that could be either a scatter-gather list or
+ * arrays
+ *
+ * @pages: Array of page pointers to the pages.
+ * @addrs: DMA addresses to the pages if coherent pages are used.
+ * @iter: Scatter-gather page iterator. Current position in SG list.
+ * @i: Current position in arrays.
+ * @num_pages: Number of pages total.
+ * @next: Function to advance the iterator. Returns false if past the list
+ * of pages, true otherwise.
+ * @dma_address: Function to return the DMA address of the current page.
+ */
+struct vmw_piter {
+	struct page **pages;
+	const dma_addr_t *addrs;
+	struct sg_dma_page_iter iter;
+	unsigned long i;
+	unsigned long num_pages;
+	bool (*next)(struct vmw_piter *);
+	dma_addr_t (*dma_address)(struct vmw_piter *);
+	struct page *(*page)(struct vmw_piter *);
+};
+
+/*
+ * enum vmw_display_unit_type - Describes the display unit
+ */
+enum vmw_display_unit_type {
+	vmw_du_invalid = 0,
+	vmw_du_legacy,
+	vmw_du_screen_object,
+	vmw_du_screen_target
+};
+
+struct vmw_validation_context;
+struct vmw_ctx_validation_info;
+
+/**
+ * struct vmw_sw_context - Command submission context
+ * @res_ht: Pointer hash table used to find validation duplicates
+ * @kernel: Whether the command buffer originates from kernel code rather
+ * than from user-space
+ * @fp: If @kernel is false, points to the file of the client. Otherwise
+ * NULL
+ * @cmd_bounce: Command bounce buffer used for command validation before
+ * copying to fifo space
+ * @cmd_bounce_size: Current command bounce buffer size
+ * @cur_query_bo: Current buffer object used as query result buffer
+ * @bo_relocations: List of buffer object relocations
+ * @res_relocations: List of resource relocations
+ * @buf_start: Pointer to start of memory where command validation takes
+ * place
+ * @res_cache: Cache of recently looked up resources
+ * @last_query_ctx: Last context that submitted a query
+ * @needs_post_query_barrier: Whether a query barrier is needed after
+ * command submission
+ * @staged_bindings: Cached per-context binding tracker
+ * @staged_bindings_inuse: Whether the cached per-context binding tracker
+ * is in use
+ * @staged_cmd_res: List of staged command buffer managed resources in this
+ * command buffer
+ * @ctx_list: List of context resources referenced in this command buffer
+ * @dx_ctx_node: Validation metadata of the current DX context
+ * @dx_query_mob: The MOB used for DX queries
+ * @dx_query_ctx: The DX context used for the last DX query
+ * @man: Pointer to the command buffer managed resource manager
+ * @ctx: The validation context
+ */
+struct vmw_sw_context{
+	struct drm_open_hash res_ht;
+	bool res_ht_initialized;
+	bool kernel;
+	struct vmw_fpriv *fp;
+	uint32_t *cmd_bounce;
+	uint32_t cmd_bounce_size;
+	struct vmw_buffer_object *cur_query_bo;
+	struct list_head bo_relocations;
+	struct list_head res_relocations;
+	uint32_t *buf_start;
+	struct vmw_res_cache_entry res_cache[vmw_res_max];
+	struct vmw_resource *last_query_ctx;
+	bool needs_post_query_barrier;
+	struct vmw_ctx_binding_state *staged_bindings;
+	bool staged_bindings_inuse;
+	struct list_head staged_cmd_res;
+	struct list_head ctx_list;
+	struct vmw_ctx_validation_info *dx_ctx_node;
+	struct vmw_buffer_object *dx_query_mob;
+	struct vmw_resource *dx_query_ctx;
+	struct vmw_cmdbuf_res_manager *man;
+	struct vmw_validation_context *ctx;
+};
+
+struct vmw_legacy_display;
+struct vmw_overlay;
+
+struct vmw_vga_topology_state {
+	uint32_t width;
+	uint32_t height;
+	uint32_t primary;
+	uint32_t pos_x;
+	uint32_t pos_y;
+};
+
+
+/*
+ * struct vmw_otable - Guest Memory OBject table metadata
+ *
+ * @size:           Size of the table (page-aligned).
+ * @page_table:     Pointer to a struct vmw_mob holding the page table.
+ */
+struct vmw_otable {
+	unsigned long size;
+	struct vmw_mob *page_table;
+	bool enabled;
+};
+
+struct vmw_otable_batch {
+	unsigned num_otables;
+	struct vmw_otable *otables;
+	struct vmw_resource *context;
+	struct ttm_buffer_object *otable_bo;
+};
+
+enum {
+	VMW_IRQTHREAD_FENCE,
+	VMW_IRQTHREAD_CMDBUF,
+	VMW_IRQTHREAD_MAX
+};
+
+struct vmw_private {
+	struct ttm_bo_device bdev;
+
+	struct vmw_fifo_state fifo;
+
+	struct drm_device *dev;
+	unsigned long vmw_chipset;
+	unsigned int io_start;
+	uint32_t vram_start;
+	uint32_t vram_size;
+	uint32_t prim_bb_mem;
+	uint32_t mmio_start;
+	uint32_t mmio_size;
+	uint32_t fb_max_width;
+	uint32_t fb_max_height;
+	uint32_t texture_max_width;
+	uint32_t texture_max_height;
+	uint32_t stdu_max_width;
+	uint32_t stdu_max_height;
+	uint32_t initial_width;
+	uint32_t initial_height;
+	u32 *mmio_virt;
+	uint32_t capabilities;
+	uint32_t capabilities2;
+	uint32_t max_gmr_ids;
+	uint32_t max_gmr_pages;
+	uint32_t max_mob_pages;
+	uint32_t max_mob_size;
+	uint32_t memory_size;
+	bool has_gmr;
+	bool has_mob;
+	spinlock_t hw_lock;
+	spinlock_t cap_lock;
+	bool has_dx;
+	bool assume_16bpp;
+	bool has_sm4_1;
+
+	/*
+	 * VGA registers.
+	 */
+
+	struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
+	uint32_t vga_width;
+	uint32_t vga_height;
+	uint32_t vga_bpp;
+	uint32_t vga_bpl;
+	uint32_t vga_pitchlock;
+
+	uint32_t num_displays;
+
+	/*
+	 * Framebuffer info.
+	 */
+
+	void *fb_info;
+	enum vmw_display_unit_type active_display_unit;
+	struct vmw_legacy_display *ldu_priv;
+	struct vmw_overlay *overlay_priv;
+	struct drm_property *hotplug_mode_update_property;
+	struct drm_property *implicit_placement_property;
+	struct mutex global_kms_state_mutex;
+	spinlock_t cursor_lock;
+	struct drm_atomic_state *suspend_state;
+
+	/*
+	 * Context and surface management.
+	 */
+
+	spinlock_t resource_lock;
+	struct idr res_idr[vmw_res_max];
+
+	/*
+	 * A resource manager for kernel-only surfaces and
+	 * contexts.
+	 */
+
+	struct ttm_object_device *tdev;
+
+	/*
+	 * Fencing and IRQs.
+	 */
+
+	atomic_t marker_seq;
+	wait_queue_head_t fence_queue;
+	wait_queue_head_t fifo_queue;
+	spinlock_t waiter_lock;
+	int fence_queue_waiters; /* Protected by waiter_lock */
+	int goal_queue_waiters; /* Protected by waiter_lock */
+	int cmdbuf_waiters; /* Protected by waiter_lock */
+	int error_waiters; /* Protected by waiter_lock */
+	int fifo_queue_waiters; /* Protected by waiter_lock */
+	uint32_t last_read_seqno;
+	struct vmw_fence_manager *fman;
+	uint32_t irq_mask; /* Updates protected by waiter_lock */
+
+	/*
+	 * Device state
+	 */
+
+	uint32_t traces_state;
+	uint32_t enable_state;
+	uint32_t config_done_state;
+
+	/**
+	 * Execbuf
+	 */
+	/**
+	 * Protected by the cmdbuf mutex.
+	 */
+
+	struct vmw_sw_context ctx;
+	struct mutex cmdbuf_mutex;
+	struct mutex binding_mutex;
+
+	/**
+	 * Operating mode.
+	 */
+
+	bool stealth;
+	bool enable_fb;
+	spinlock_t svga_lock;
+
+	/**
+	 * PM management.
+	 */
+	struct notifier_block pm_nb;
+	bool refuse_hibernation;
+	bool suspend_locked;
+
+	struct mutex release_mutex;
+	atomic_t num_fifo_resources;
+
+	/*
+	 * Replace this with an rwsem as soon as we have down_xx_interruptible()
+	 */
+	struct ttm_lock reservation_sem;
+
+	/*
+	 * Query processing. These members
+	 * are protected by the cmdbuf mutex.
+	 */
+
+	struct vmw_buffer_object *dummy_query_bo;
+	struct vmw_buffer_object *pinned_bo;
+	uint32_t query_cid;
+	uint32_t query_cid_valid;
+	bool dummy_query_bo_pinned;
+
+	/*
+	 * Surface swapping. The "surface_lru" list is protected by the
+	 * resource lock in order to be able to destroy a surface and take
+	 * it off the lru atomically. "used_memory_size" is currently
+	 * protected by the cmdbuf mutex for simplicity.
+	 */
+
+	struct list_head res_lru[vmw_res_max];
+	uint32_t used_memory_size;
+
+	/*
+	 * DMA mapping stuff.
+	 */
+	enum vmw_dma_map_mode map_mode;
+
+	/*
+	 * Guest Backed stuff
+	 */
+	struct vmw_otable_batch otable_batch;
+
+	struct vmw_cmdbuf_man *cman;
+	DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
+
+	/* Validation memory reservation */
+	struct vmw_validation_mem vvm;
+};
+
+static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_surface, res);
+}
+
+static inline struct vmw_private *vmw_priv(struct drm_device *dev)
+{
+	return (struct vmw_private *)dev->dev_private;
+}
+
+static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
+{
+	return (struct vmw_fpriv *)file_priv->driver_priv;
+}
+
+/*
+ * The locking here is fine-grained, so that it is performed once
+ * for every read- and write operation. This is of course costly, but we
+ * don't perform much register access in the timing critical paths anyway.
+ * Instead we have the extra benefit of being sure that we don't forget
+ * the hw lock around register accesses.
+ */
+static inline void vmw_write(struct vmw_private *dev_priv,
+			     unsigned int offset, uint32_t value)
+{
+	spin_lock(&dev_priv->hw_lock);
+	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
+	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+	spin_unlock(&dev_priv->hw_lock);
+}
+
+static inline uint32_t vmw_read(struct vmw_private *dev_priv,
+				unsigned int offset)
+{
+	u32 val;
+
+	spin_lock(&dev_priv->hw_lock);
+	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
+	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+	spin_unlock(&dev_priv->hw_lock);
+
+	return val;
+}
+
+extern void vmw_svga_enable(struct vmw_private *dev_priv);
+extern void vmw_svga_disable(struct vmw_private *dev_priv);
+
+
+/**
+ * GMR utilities - vmwgfx_gmr.c
+ */
+
+extern int vmw_gmr_bind(struct vmw_private *dev_priv,
+			const struct vmw_sg_table *vsgt,
+			unsigned long num_pages,
+			int gmr_id);
+extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
+
+/**
+ * Resource utilities - vmwgfx_resource.c
+ */
+struct vmw_user_resource_conv;
+
+extern void vmw_resource_unreference(struct vmw_resource **p_res);
+extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res);
+extern int vmw_resource_validate(struct vmw_resource *res, bool intr);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+				bool no_backup);
+extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
+extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+				  struct ttm_object_file *tfile,
+				  uint32_t handle,
+				  struct vmw_surface **out_surf,
+				  struct vmw_buffer_object **out_buf);
+extern int vmw_user_resource_lookup_handle(
+	struct vmw_private *dev_priv,
+	struct ttm_object_file *tfile,
+	uint32_t handle,
+	const struct vmw_user_resource_conv *converter,
+	struct vmw_resource **p_res);
+extern struct vmw_resource *
+vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
+				      struct ttm_object_file *tfile,
+				      uint32_t handle,
+				      const struct vmw_user_resource_conv *
+				      converter);
+extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
+				  struct ttm_object_file *tfile,
+				  uint32_t *inout_id,
+				  struct vmw_resource **out);
+extern void vmw_resource_unreserve(struct vmw_resource *res,
+				   bool dirty_set,
+				   bool dirty,
+				   bool switch_backup,
+				   struct vmw_buffer_object *new_backup,
+				   unsigned long new_backup_offset);
+extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
+				  struct ttm_mem_reg *mem);
+extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
+extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
+extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
+void vmw_resource_mob_attach(struct vmw_resource *res);
+void vmw_resource_mob_detach(struct vmw_resource *res);
+
+/**
+ * vmw_resource_mob_attached - Whether a resource currently has a mob attached
+ * @res: The resource
+ *
+ * Return: true if the resource has a mob attached, false otherwise.
+ */
+static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
+{
+	return !list_empty(&res->mob_head);
+}
+
+/**
+ * vmw_user_resource_noref_release - release a user resource pointer looked up
+ * without reference
+ */
+static inline void vmw_user_resource_noref_release(void)
+{
+	ttm_base_object_noref_release();
+}
+
+/**
+ * Buffer object helper functions - vmwgfx_bo.c
+ */
+extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
+				   struct vmw_buffer_object *bo,
+				   struct ttm_placement *placement,
+				   bool interruptible);
+extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+			      struct vmw_buffer_object *buf,
+			      bool interruptible);
+extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+				     struct vmw_buffer_object *buf,
+				     bool interruptible);
+extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
+				       struct vmw_buffer_object *bo,
+				       bool interruptible);
+extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
+			struct vmw_buffer_object *bo,
+			bool interruptible);
+extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
+				 SVGAGuestPtr *ptr);
+extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
+extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_bo_init(struct vmw_private *dev_priv,
+		       struct vmw_buffer_object *vmw_bo,
+		       size_t size, struct ttm_placement *placement,
+		       bool interuptable,
+		       void (*bo_free)(struct ttm_buffer_object *bo));
+extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+				     struct ttm_object_file *tfile);
+extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+			     struct ttm_object_file *tfile,
+			     uint32_t size,
+			     bool shareable,
+			     uint32_t *handle,
+			     struct vmw_buffer_object **p_dma_buf,
+			     struct ttm_base_object **p_base);
+extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
+				 struct vmw_buffer_object *dma_buf,
+				 uint32_t *handle);
+extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+			      uint32_t id, struct vmw_buffer_object **out,
+			      struct ttm_base_object **base);
+extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+				struct vmw_fence_obj *fence);
+extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
+extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
+extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+			       struct ttm_mem_reg *mem);
+extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
+extern struct vmw_buffer_object *
+vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
+
+/**
+ * vmw_user_bo_noref_release - release a buffer object pointer looked up
+ * without reference
+ */
+static inline void vmw_user_bo_noref_release(void)
+{
+	ttm_base_object_noref_release();
+}
+
+/**
+ * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
+ * according to attached resources
+ * @vbo: The struct vmw_buffer_object
+ */
+static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo)
+{
+	int i = ARRAY_SIZE(vbo->res_prios);
+
+	while (i--) {
+		if (vbo->res_prios[i]) {
+			vbo->base.priority = i;
+			return;
+		}
+	}
+
+	vbo->base.priority = 3;
+}
+
+/**
+ * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
+ * eviction priority
+ * @vbo: The struct vmw_buffer_object
+ * @prio: The resource priority
+ *
+ * After being notified, the code assigns the highest resource eviction priority
+ * to the backing buffer object (mob).
+ */
+static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio)
+{
+	if (vbo->res_prios[prio]++ == 0)
+		vmw_bo_prio_adjust(vbo);
+}
+
+/**
+ * vmw_bo_prio_del - Notify a buffer object of a resource with a certain
+ * priority being removed
+ * @vbo: The struct vmw_buffer_object
+ * @prio: The resource priority
+ *
+ * After being notified, the code assigns the highest resource eviction priority
+ * to the backing buffer object (mob).
+ */
+static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
+{
+	if (--vbo->res_prios[prio] == 0)
+		vmw_bo_prio_adjust(vbo);
+}
+
+/**
+ * Misc Ioctl functionality - vmwgfx_ioctl.c
+ */
+
+extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+extern int vmw_present_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv);
+extern __poll_t vmw_fops_poll(struct file *filp,
+				  struct poll_table_struct *wait);
+extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
+			     size_t count, loff_t *offset);
+
+/**
+ * Fifo utilities - vmwgfx_fifo.c
+ */
+
+extern int vmw_fifo_init(struct vmw_private *dev_priv,
+			 struct vmw_fifo_state *fifo);
+extern void vmw_fifo_release(struct vmw_private *dev_priv,
+			     struct vmw_fifo_state *fifo);
+extern void *
+vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
+extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
+extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
+			       uint32_t *seqno);
+extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
+extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
+extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
+extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
+extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+				     uint32_t cid);
+extern int vmw_fifo_flush(struct vmw_private *dev_priv,
+			  bool interruptible);
+
+#define VMW_FIFO_RESERVE_DX(__priv, __bytes, __ctx_id)                        \
+({                                                                            \
+	vmw_fifo_reserve_dx(__priv, __bytes, __ctx_id) ? : ({                 \
+		DRM_ERROR("FIFO reserve failed at %s for %u bytes\n",         \
+			  __func__, (unsigned int) __bytes);                  \
+		NULL;                                                         \
+	});                                                                   \
+})
+
+#define VMW_FIFO_RESERVE(__priv, __bytes)                                     \
+	VMW_FIFO_RESERVE_DX(__priv, __bytes, SVGA3D_INVALID_ID)
+
+/**
+ * TTM glue - vmwgfx_ttm_glue.c
+ */
+
+extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
+
+extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
+					size_t gran);
+/**
+ * TTM buffer object driver - vmwgfx_ttm_buffer.c
+ */
+
+extern const size_t vmw_tt_size;
+extern struct ttm_placement vmw_vram_placement;
+extern struct ttm_placement vmw_vram_ne_placement;
+extern struct ttm_placement vmw_vram_sys_placement;
+extern struct ttm_placement vmw_vram_gmr_placement;
+extern struct ttm_placement vmw_vram_gmr_ne_placement;
+extern struct ttm_placement vmw_sys_placement;
+extern struct ttm_placement vmw_sys_ne_placement;
+extern struct ttm_placement vmw_evictable_placement;
+extern struct ttm_placement vmw_srf_placement;
+extern struct ttm_placement vmw_mob_placement;
+extern struct ttm_placement vmw_mob_ne_placement;
+extern struct ttm_placement vmw_nonfixed_placement;
+extern struct ttm_bo_driver vmw_bo_driver;
+extern int vmw_dma_quiescent(struct drm_device *dev);
+extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
+extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
+extern const struct vmw_sg_table *
+vmw_bo_sg_table(struct ttm_buffer_object *bo);
+extern void vmw_piter_start(struct vmw_piter *viter,
+			    const struct vmw_sg_table *vsgt,
+			    unsigned long p_offs);
+
+/**
+ * vmw_piter_next - Advance the iterator one page.
+ *
+ * @viter: Pointer to the iterator to advance.
+ *
+ * Returns false if past the list of pages, true otherwise.
+ */
+static inline bool vmw_piter_next(struct vmw_piter *viter)
+{
+	return viter->next(viter);
+}
+
+/**
+ * vmw_piter_dma_addr - Return the DMA address of the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * Returns the DMA address of the page pointed to by @viter.
+ */
+static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
+{
+	return viter->dma_address(viter);
+}
+
+/**
+ * vmw_piter_page - Return a pointer to the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * Returns the DMA address of the page pointed to by @viter.
+ */
+static inline struct page *vmw_piter_page(struct vmw_piter *viter)
+{
+	return viter->page(viter);
+}
+
+/**
+ * Command submission - vmwgfx_execbuf.c
+ */
+
+extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+extern int vmw_execbuf_process(struct drm_file *file_priv,
+			       struct vmw_private *dev_priv,
+			       void __user *user_commands,
+			       void *kernel_commands,
+			       uint32_t command_size,
+			       uint64_t throttle_us,
+			       uint32_t dx_context_handle,
+			       struct drm_vmw_fence_rep __user
+			       *user_fence_rep,
+			       struct vmw_fence_obj **out_fence,
+			       uint32_t flags);
+extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+					    struct vmw_fence_obj *fence);
+extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
+
+extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
+				      struct vmw_private *dev_priv,
+				      struct vmw_fence_obj **p_fence,
+				      uint32_t *p_handle);
+extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+					struct vmw_fpriv *vmw_fp,
+					int ret,
+					struct drm_vmw_fence_rep __user
+					*user_fence_rep,
+					struct vmw_fence_obj *fence,
+					uint32_t fence_handle,
+					int32_t out_fence_fd);
+bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
+
+/**
+ * IRQs and wating - vmwgfx_irq.c
+ */
+
+extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
+			  uint32_t seqno, bool interruptible,
+			  unsigned long timeout);
+extern int vmw_irq_install(struct drm_device *dev, int irq);
+extern void vmw_irq_uninstall(struct drm_device *dev);
+extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
+				uint32_t seqno);
+extern int vmw_fallback_wait(struct vmw_private *dev_priv,
+			     bool lazy,
+			     bool fifo_idle,
+			     uint32_t seqno,
+			     bool interruptible,
+			     unsigned long timeout);
+extern void vmw_update_seqno(struct vmw_private *dev_priv,
+				struct vmw_fifo_state *fifo_state);
+extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
+extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
+extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
+extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
+extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
+				   int *waiter_count);
+extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+				      u32 flag, int *waiter_count);
+
+/**
+ * Rudimentary fence-like objects currently used only for throttling -
+ * vmwgfx_marker.c
+ */
+
+extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
+extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
+extern int vmw_marker_push(struct vmw_marker_queue *queue,
+			   uint32_t seqno);
+extern int vmw_marker_pull(struct vmw_marker_queue *queue,
+			   uint32_t signaled_seqno);
+extern int vmw_wait_lag(struct vmw_private *dev_priv,
+			struct vmw_marker_queue *queue, uint32_t us);
+
+/**
+ * Kernel framebuffer - vmwgfx_fb.c
+ */
+
+int vmw_fb_init(struct vmw_private *vmw_priv);
+int vmw_fb_close(struct vmw_private *dev_priv);
+int vmw_fb_off(struct vmw_private *vmw_priv);
+int vmw_fb_on(struct vmw_private *vmw_priv);
+
+/**
+ * Kernel modesetting - vmwgfx_kms.c
+ */
+
+int vmw_kms_init(struct vmw_private *dev_priv);
+int vmw_kms_close(struct vmw_private *dev_priv);
+int vmw_kms_save_vga(struct vmw_private *vmw_priv);
+int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+			  struct ttm_object_file *tfile,
+			  struct ttm_buffer_object *bo,
+			  SVGA3dCmdHeader *header);
+int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+		       unsigned width, unsigned height, unsigned pitch,
+		       unsigned bpp, unsigned depth);
+bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+				uint32_t pitch,
+				uint32_t height);
+u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
+void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
+int vmw_kms_present(struct vmw_private *dev_priv,
+		    struct drm_file *file_priv,
+		    struct vmw_framebuffer *vfb,
+		    struct vmw_surface *surface,
+		    uint32_t sid, int32_t destX, int32_t destY,
+		    struct drm_vmw_rect *clips,
+		    uint32_t num_clips);
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
+int vmw_kms_suspend(struct drm_device *dev);
+int vmw_kms_resume(struct drm_device *dev);
+void vmw_kms_lost_device(struct drm_device *dev);
+
+int vmw_dumb_create(struct drm_file *file_priv,
+		    struct drm_device *dev,
+		    struct drm_mode_create_dumb *args);
+
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+			struct drm_device *dev, uint32_t handle,
+			uint64_t *offset);
+int vmw_dumb_destroy(struct drm_file *file_priv,
+		     struct drm_device *dev,
+		     uint32_t handle);
+extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
+extern void vmw_resource_unpin(struct vmw_resource *res);
+extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
+
+/**
+ * Overlay control - vmwgfx_overlay.c
+ */
+
+int vmw_overlay_init(struct vmw_private *dev_priv);
+int vmw_overlay_close(struct vmw_private *dev_priv);
+int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv);
+int vmw_overlay_stop_all(struct vmw_private *dev_priv);
+int vmw_overlay_resume_all(struct vmw_private *dev_priv);
+int vmw_overlay_pause_all(struct vmw_private *dev_priv);
+int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
+int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
+int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
+int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
+
+/**
+ * GMR Id manager
+ */
+
+extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
+
+/**
+ * Prime - vmwgfx_prime.c
+ */
+
+extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
+extern int vmw_prime_fd_to_handle(struct drm_device *dev,
+				  struct drm_file *file_priv,
+				  int fd, u32 *handle);
+extern int vmw_prime_handle_to_fd(struct drm_device *dev,
+				  struct drm_file *file_priv,
+				  uint32_t handle, uint32_t flags,
+				  int *prime_fd);
+
+/*
+ * MemoryOBject management -  vmwgfx_mob.c
+ */
+struct vmw_mob;
+extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
+			const struct vmw_sg_table *vsgt,
+			unsigned long num_data_pages, int32_t mob_id);
+extern void vmw_mob_unbind(struct vmw_private *dev_priv,
+			   struct vmw_mob *mob);
+extern void vmw_mob_destroy(struct vmw_mob *mob);
+extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
+extern int vmw_otables_setup(struct vmw_private *dev_priv);
+extern void vmw_otables_takedown(struct vmw_private *dev_priv);
+
+/*
+ * Context management - vmwgfx_context.c
+ */
+
+extern const struct vmw_user_resource_conv *user_context_converter;
+
+extern int vmw_context_check(struct vmw_private *dev_priv,
+			     struct ttm_object_file *tfile,
+			     int id,
+			     struct vmw_resource **p_res);
+extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+					     struct drm_file *file_priv);
+extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
+extern struct vmw_cmdbuf_res_manager *
+vmw_context_res_man(struct vmw_resource *ctx);
+extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+						SVGACOTableType cotable_type);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
+struct vmw_ctx_binding_state;
+extern struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx);
+extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+					  bool readback);
+extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+				     struct vmw_buffer_object *mob);
+extern struct vmw_buffer_object *
+vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
+
+
+/*
+ * Surface management - vmwgfx_surface.c
+ */
+
+extern const struct vmw_user_resource_conv *user_surface_converter;
+
+extern void vmw_surface_res_free(struct vmw_resource *res);
+extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+				       struct drm_file *file_priv);
+extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+				       struct drm_file *file_priv);
+extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+					  struct drm_file *file_priv);
+extern int vmw_surface_check(struct vmw_private *dev_priv,
+			     struct ttm_object_file *tfile,
+			     uint32_t handle, int *id);
+extern int vmw_surface_validate(struct vmw_private *dev_priv,
+				struct vmw_surface *srf);
+int vmw_surface_gb_priv_define(struct drm_device *dev,
+			       uint32_t user_accounting_size,
+			       SVGA3dSurfaceAllFlags svga3d_flags,
+			       SVGA3dSurfaceFormat format,
+			       bool for_scanout,
+			       uint32_t num_mip_levels,
+			       uint32_t multisample_count,
+			       uint32_t array_size,
+			       struct drm_vmw_size size,
+			       SVGA3dMSPattern multisample_pattern,
+			       SVGA3dMSQualityLevel quality_level,
+			       struct vmw_surface **srf_out);
+extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
+					   void *data,
+					   struct drm_file *file_priv);
+extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
+					      void *data,
+					      struct drm_file *file_priv);
+
+/*
+ * Shader management - vmwgfx_shader.c
+ */
+
+extern const struct vmw_user_resource_conv *user_shader_converter;
+
+extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv);
+extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
+				 struct vmw_cmdbuf_res_manager *man,
+				 u32 user_key, const void *bytecode,
+				 SVGA3dShaderType shader_type,
+				 size_t size,
+				 struct list_head *list);
+extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+			     u32 user_key, SVGA3dShaderType shader_type,
+			     struct list_head *list);
+extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+			     struct vmw_resource *ctx,
+			     u32 user_key,
+			     SVGA3dShaderType shader_type,
+			     struct list_head *list);
+extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+					     struct list_head *list,
+					     bool readback);
+
+extern struct vmw_resource *
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+		  u32 user_key, SVGA3dShaderType shader_type);
+
+/*
+ * Command buffer managed resources - vmwgfx_cmdbuf_res.c
+ */
+
+extern struct vmw_cmdbuf_res_manager *
+vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
+extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
+extern size_t vmw_cmdbuf_res_man_size(void);
+extern struct vmw_resource *
+vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
+		      enum vmw_cmdbuf_res_type res_type,
+		      u32 user_key);
+extern void vmw_cmdbuf_res_revert(struct list_head *list);
+extern void vmw_cmdbuf_res_commit(struct list_head *list);
+extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
+			      enum vmw_cmdbuf_res_type res_type,
+			      u32 user_key,
+			      struct vmw_resource *res,
+			      struct list_head *list);
+extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
+				 enum vmw_cmdbuf_res_type res_type,
+				 u32 user_key,
+				 struct list_head *list,
+				 struct vmw_resource **res);
+
+/*
+ * COTable management - vmwgfx_cotable.c
+ */
+extern const SVGACOTableType vmw_cotable_scrub_order[];
+extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+					      struct vmw_resource *ctx,
+					      u32 type);
+extern int vmw_cotable_notify(struct vmw_resource *res, int id);
+extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
+extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
+				     struct list_head *head);
+
+/*
+ * Command buffer managerment vmwgfx_cmdbuf.c
+ */
+struct vmw_cmdbuf_man;
+struct vmw_cmdbuf_header;
+
+extern struct vmw_cmdbuf_man *
+vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
+extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
+				    size_t size, size_t default_size);
+extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
+extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
+extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
+			   unsigned long timeout);
+extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
+				int ctx_id, bool interruptible,
+				struct vmw_cmdbuf_header *header);
+extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
+			      struct vmw_cmdbuf_header *header,
+			      bool flush);
+extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
+			      size_t size, bool interruptible,
+			      struct vmw_cmdbuf_header **p_header);
+extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
+extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
+				bool interruptible);
+extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
+
+/* CPU blit utilities - vmwgfx_blit.c */
+
+/**
+ * struct vmw_diff_cpy - CPU blit information structure
+ *
+ * @rect: The output bounding box rectangle.
+ * @line: The current line of the blit.
+ * @line_offset: Offset of the current line segment.
+ * @cpp: Bytes per pixel (granularity information).
+ * @memcpy: Which memcpy function to use.
+ */
+struct vmw_diff_cpy {
+	struct drm_rect rect;
+	size_t line;
+	size_t line_offset;
+	int cpp;
+	void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
+		       size_t n);
+};
+
+#define VMW_CPU_BLIT_INITIALIZER {	\
+	.do_cpy = vmw_memcpy,		\
+}
+
+#define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) {	  \
+	.line = 0,				  \
+	.line_offset = 0,			  \
+	.rect = { .x1 = INT_MAX/2,		  \
+		  .y1 = INT_MAX/2,		  \
+		  .x2 = INT_MIN/2,		  \
+		  .y2 = INT_MIN/2		  \
+	},					  \
+	.cpp = _cpp,				  \
+	.do_cpy = vmw_diff_memcpy,		  \
+}
+
+void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
+		     size_t n);
+
+void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
+
+int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+		    u32 dst_offset, u32 dst_stride,
+		    struct ttm_buffer_object *src,
+		    u32 src_offset, u32 src_stride,
+		    u32 w, u32 h,
+		    struct vmw_diff_cpy *diff);
+
+/* Host messaging -vmwgfx_msg.c: */
+int vmw_host_get_guestinfo(const char *guest_info_param,
+			   char *buffer, size_t *length);
+int vmw_host_log(const char *log);
+
+/* VMW logging */
+
+/**
+ * VMW_DEBUG_USER - Debug output for user-space debugging.
+ *
+ * @fmt: printf() like format string.
+ *
+ * This macro is for logging user-space error and debugging messages for e.g.
+ * command buffer execution errors due to malformed commands, invalid context,
+ * etc.
+ */
+#define VMW_DEBUG_USER(fmt, ...)                                              \
+	DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+
+/**
+ * VMW_DEBUG_KMS - Debug output for kernel mode-setting
+ *
+ * This macro is for debugging vmwgfx mode-setting code.
+ */
+#define VMW_DEBUG_KMS(fmt, ...)                                               \
+	DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+
+/**
+ * Inline helper functions
+ */
+
+static inline void vmw_surface_unreference(struct vmw_surface **srf)
+{
+	struct vmw_surface *tmp_srf = *srf;
+	struct vmw_resource *res = &tmp_srf->res;
+	*srf = NULL;
+
+	vmw_resource_unreference(&res);
+}
+
+static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
+{
+	(void) vmw_resource_reference(&srf->res);
+	return srf;
+}
+
+static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
+{
+	struct vmw_buffer_object *tmp_buf = *buf;
+
+	*buf = NULL;
+	if (tmp_buf != NULL) {
+		ttm_bo_put(&tmp_buf->base);
+	}
+}
+
+static inline struct vmw_buffer_object *
+vmw_bo_reference(struct vmw_buffer_object *buf)
+{
+	ttm_bo_get(&buf->base);
+	return buf;
+}
+
+static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
+{
+	return &ttm_mem_glob;
+}
+
+static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
+{
+	atomic_inc(&dev_priv->num_fifo_resources);
+}
+
+static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
+{
+	atomic_dec(&dev_priv->num_fifo_resources);
+}
+
+/**
+ * vmw_mmio_read - Perform a MMIO read from volatile memory
+ *
+ * @addr: The address to read from
+ *
+ * This function is intended to be equivalent to ioread32() on
+ * memremap'd memory, but without byteswapping.
+ */
+static inline u32 vmw_mmio_read(u32 *addr)
+{
+	return READ_ONCE(*addr);
+}
+
+/**
+ * vmw_mmio_write - Perform a MMIO write to volatile memory
+ *
+ * @addr: The address to write to
+ *
+ * This function is intended to be equivalent to iowrite32 on
+ * memremap'd memory, but without byteswapping.
+ */
+static inline void vmw_mmio_write(u32 value, u32 *addr)
+{
+	WRITE_ONCE(*addr, value);
+}
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
new file mode 100644
index 0000000..8db3b3d
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -0,0 +1,4065 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#include <linux/sync_file.h>
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_reg.h"
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
+
+#define VMW_RES_HT_ORDER 12
+
+/*
+ * Helper macro to get dx_ctx_node if available otherwise print an error
+ * message. This is for use in command verifier function where if dx_ctx_node
+ * is not set then command is invalid.
+ */
+#define VMW_GET_CTX_NODE(__sw_context)                                        \
+({                                                                            \
+	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
+		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
+		__sw_context->dx_ctx_node;                                    \
+	});                                                                   \
+})
+
+#define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
+	struct {                                                              \
+		SVGA3dCmdHeader header;                                       \
+		__type body;                                                  \
+	} __var
+
+/**
+ * struct vmw_relocation - Buffer object relocation
+ *
+ * @head: List head for the command submission context's relocation list
+ * @vbo: Non ref-counted pointer to buffer object
+ * @mob_loc: Pointer to location for mob id to be modified
+ * @location: Pointer to location for guest pointer to be modified
+ */
+struct vmw_relocation {
+	struct list_head head;
+	struct vmw_buffer_object *vbo;
+	union {
+		SVGAMobId *mob_loc;
+		SVGAGuestPtr *location;
+	};
+};
+
+/**
+ * enum vmw_resource_relocation_type - Relocation type for resources
+ *
+ * @vmw_res_rel_normal: Traditional relocation. The resource id in the
+ * command stream is replaced with the actual id after validation.
+ * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
+ * with a NOP.
+ * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
+ * validation is -1, the command is replaced with a NOP. Otherwise no action.
+ */
+enum vmw_resource_relocation_type {
+	vmw_res_rel_normal,
+	vmw_res_rel_nop,
+	vmw_res_rel_cond_nop,
+	vmw_res_rel_max
+};
+
+/**
+ * struct vmw_resource_relocation - Relocation info for resources
+ *
+ * @head: List head for the software context's relocation list.
+ * @res: Non-ref-counted pointer to the resource.
+ * @offset: Offset of single byte entries into the command buffer where the id
+ * that needs fixup is located.
+ * @rel_type: Type of relocation.
+ */
+struct vmw_resource_relocation {
+	struct list_head head;
+	const struct vmw_resource *res;
+	u32 offset:29;
+	enum vmw_resource_relocation_type rel_type:3;
+};
+
+/**
+ * struct vmw_ctx_validation_info - Extra validation metadata for contexts
+ *
+ * @head: List head of context list
+ * @ctx: The context resource
+ * @cur: The context's persistent binding state
+ * @staged: The binding state changes of this command buffer
+ */
+struct vmw_ctx_validation_info {
+	struct list_head head;
+	struct vmw_resource *ctx;
+	struct vmw_ctx_binding_state *cur;
+	struct vmw_ctx_binding_state *staged;
+};
+
+/**
+ * struct vmw_cmd_entry - Describe a command for the verifier
+ *
+ * @user_allow: Whether allowed from the execbuf ioctl.
+ * @gb_disable: Whether disabled if guest-backed objects are available.
+ * @gb_enable: Whether enabled iff guest-backed objects are available.
+ */
+struct vmw_cmd_entry {
+	int (*func) (struct vmw_private *, struct vmw_sw_context *,
+		     SVGA3dCmdHeader *);
+	bool user_allow;
+	bool gb_disable;
+	bool gb_enable;
+	const char *cmd_name;
+};
+
+#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
+	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
+				       (_gb_disable), (_gb_enable), #_cmd}
+
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					struct vmw_resource *ctx);
+static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGAMobId *id,
+				 struct vmw_buffer_object **vmw_bo_p);
+/**
+ * vmw_ptr_diff - Compute the offset from a to b in bytes
+ *
+ * @a: A starting pointer.
+ * @b: A pointer offset in the same address space.
+ *
+ * Returns: The offset in bytes between the two pointers.
+ */
+static size_t vmw_ptr_diff(void *a, void *b)
+{
+	return (unsigned long) b - (unsigned long) a;
+}
+
+/**
+ * vmw_execbuf_bindings_commit - Commit modified binding state
+ *
+ * @sw_context: The command submission context
+ * @backoff: Whether this is part of the error path and binding state changes
+ * should be ignored
+ */
+static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
+					bool backoff)
+{
+	struct vmw_ctx_validation_info *entry;
+
+	list_for_each_entry(entry, &sw_context->ctx_list, head) {
+		if (!backoff)
+			vmw_binding_state_commit(entry->cur, entry->staged);
+
+		if (entry->staged != sw_context->staged_bindings)
+			vmw_binding_state_free(entry->staged);
+		else
+			sw_context->staged_bindings_inuse = false;
+	}
+
+	/* List entries are freed with the validation context */
+	INIT_LIST_HEAD(&sw_context->ctx_list);
+}
+
+/**
+ * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
+ *
+ * @sw_context: The command submission context
+ */
+static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
+{
+	if (sw_context->dx_query_mob)
+		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
+					  sw_context->dx_query_mob);
+}
+
+/**
+ * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
+ * the validate list.
+ *
+ * @dev_priv: Pointer to the device private:
+ * @sw_context: The command submission context
+ * @node: The validation node holding the context resource metadata
+ */
+static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   struct vmw_resource *res,
+				   struct vmw_ctx_validation_info *node)
+{
+	int ret;
+
+	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	if (!sw_context->staged_bindings) {
+		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
+		if (IS_ERR(sw_context->staged_bindings)) {
+			ret = PTR_ERR(sw_context->staged_bindings);
+			sw_context->staged_bindings = NULL;
+			goto out_err;
+		}
+	}
+
+	if (sw_context->staged_bindings_inuse) {
+		node->staged = vmw_binding_state_alloc(dev_priv);
+		if (IS_ERR(node->staged)) {
+			ret = PTR_ERR(node->staged);
+			node->staged = NULL;
+			goto out_err;
+		}
+	} else {
+		node->staged = sw_context->staged_bindings;
+		sw_context->staged_bindings_inuse = true;
+	}
+
+	node->ctx = res;
+	node->cur = vmw_context_binding_state(res);
+	list_add_tail(&node->head, &sw_context->ctx_list);
+
+	return 0;
+
+out_err:
+	return ret;
+}
+
+/**
+ * vmw_execbuf_res_size - calculate extra size fore the resource validation node
+ *
+ * @dev_priv: Pointer to the device private struct.
+ * @res_type: The resource type.
+ *
+ * Guest-backed contexts and DX contexts require extra size to store execbuf
+ * private information in the validation node. Typically the binding manager
+ * associated data structures.
+ *
+ * Returns: The extra size requirement based on resource type.
+ */
+static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
+					 enum vmw_res_type res_type)
+{
+	return (res_type == vmw_res_dx_context ||
+		(res_type == vmw_res_context && dev_priv->has_mob)) ?
+		sizeof(struct vmw_ctx_validation_info) : 0;
+}
+
+/**
+ * vmw_execbuf_rcache_update - Update a resource-node cache entry
+ *
+ * @rcache: Pointer to the entry to update.
+ * @res: Pointer to the resource.
+ * @private: Pointer to the execbuf-private space in the resource validation
+ * node.
+ */
+static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
+				      struct vmw_resource *res,
+				      void *private)
+{
+	rcache->res = res;
+	rcache->private = private;
+	rcache->valid = 1;
+	rcache->valid_handle = 0;
+}
+
+/**
+ * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
+ * rcu-protected pointer to the validation list.
+ *
+ * @sw_context: Pointer to the software context.
+ * @res: Unreferenced rcu-protected pointer to the resource.
+ * @dirty: Whether to change dirty status.
+ *
+ * Returns: 0 on success. Negative error code on failure. Typical error codes
+ * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
+ */
+static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
+					 struct vmw_resource *res,
+					 u32 dirty)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+	enum vmw_res_type res_type = vmw_res_type(res);
+	struct vmw_res_cache_entry *rcache;
+	struct vmw_ctx_validation_info *ctx_info;
+	bool first_usage;
+	unsigned int priv_size;
+
+	rcache = &sw_context->res_cache[res_type];
+	if (likely(rcache->valid && rcache->res == res)) {
+		if (dirty)
+			vmw_validation_res_set_dirty(sw_context->ctx,
+						     rcache->private, dirty);
+		vmw_user_resource_noref_release();
+		return 0;
+	}
+
+	priv_size = vmw_execbuf_res_size(dev_priv, res_type);
+	ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
+					  dirty, (void **)&ctx_info,
+					  &first_usage);
+	vmw_user_resource_noref_release();
+	if (ret)
+		return ret;
+
+	if (priv_size && first_usage) {
+		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
+					      ctx_info);
+		if (ret) {
+			VMW_DEBUG_USER("Failed first usage context setup.\n");
+			return ret;
+		}
+	}
+
+	vmw_execbuf_rcache_update(rcache, res, ctx_info);
+	return 0;
+}
+
+/**
+ * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
+ * validation list if it's not already on it
+ *
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ * @dirty: Whether to change dirty status.
+ *
+ * Returns: Zero on success. Negative error code on failure.
+ */
+static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
+					 struct vmw_resource *res,
+					 u32 dirty)
+{
+	struct vmw_res_cache_entry *rcache;
+	enum vmw_res_type res_type = vmw_res_type(res);
+	void *ptr;
+	int ret;
+
+	rcache = &sw_context->res_cache[res_type];
+	if (likely(rcache->valid && rcache->res == res)) {
+		if (dirty)
+			vmw_validation_res_set_dirty(sw_context->ctx,
+						     rcache->private, dirty);
+		return 0;
+	}
+
+	ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
+					  &ptr, NULL);
+	if (ret)
+		return ret;
+
+	vmw_execbuf_rcache_update(rcache, res, ptr);
+
+	return 0;
+}
+
+/**
+ * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
+ * validation list
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view: Pointer to the view resource.
+ *
+ * Returns 0 if success, negative error code otherwise.
+ */
+static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
+				struct vmw_resource *view)
+{
+	int ret;
+
+	/*
+	 * First add the resource the view is pointing to, otherwise it may be
+	 * swapped out when the view is validated.
+	 */
+	ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
+					    vmw_view_dirtying(view));
+	if (ret)
+		return ret;
+
+	return vmw_execbuf_res_noctx_val_add(sw_context, view,
+					     VMW_RES_DIRTY_NONE);
+}
+
+/**
+ * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
+ * to to the validation list.
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view_type: The view type to look up.
+ * @id: view id of the view.
+ *
+ * The view is represented by a view id and the DX context it's created on, or
+ * scheduled for creation on. If there is no DX context set, the function will
+ * return an -EINVAL error pointer.
+ *
+ * Returns: Unreferenced pointer to the resource on success, negative error
+ * pointer on failure.
+ */
+static struct vmw_resource *
+vmw_view_id_val_add(struct vmw_sw_context *sw_context,
+		    enum vmw_view_type view_type, u32 id)
+{
+	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *view;
+	int ret;
+
+	if (!ctx_node)
+		return ERR_PTR(-EINVAL);
+
+	view = vmw_view_lookup(sw_context->man, view_type, id);
+	if (IS_ERR(view))
+		return view;
+
+	ret = vmw_view_res_val_add(sw_context, view);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return view;
+}
+
+/**
+ * vmw_resource_context_res_add - Put resources previously bound to a context on
+ * the validation list
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @sw_context: Pointer to a software context used for this command submission
+ * @ctx: Pointer to the context resource
+ *
+ * This function puts all resources that were previously bound to @ctx on the
+ * resource validation list. This is part of the context state reemission
+ */
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					struct vmw_resource *ctx)
+{
+	struct list_head *binding_list;
+	struct vmw_ctx_bindinfo *entry;
+	int ret = 0;
+	struct vmw_resource *res;
+	u32 i;
+
+	/* Add all cotables to the validation list. */
+	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+			res = vmw_context_cotable(ctx, i);
+			if (IS_ERR(res))
+				continue;
+
+			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+							    VMW_RES_DIRTY_SET);
+			if (unlikely(ret != 0))
+				return ret;
+		}
+	}
+
+	/* Add all resources bound to the context to the validation list */
+	mutex_lock(&dev_priv->binding_mutex);
+	binding_list = vmw_context_binding_list(ctx);
+
+	list_for_each_entry(entry, binding_list, ctx_list) {
+		if (vmw_res_type(entry->res) == vmw_res_view)
+			ret = vmw_view_res_val_add(sw_context, entry->res);
+		else
+			ret = vmw_execbuf_res_noctx_val_add
+				(sw_context, entry->res,
+				 vmw_binding_dirtying(entry->bt));
+		if (unlikely(ret != 0))
+			break;
+	}
+
+	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+		struct vmw_buffer_object *dx_query_mob;
+
+		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
+		if (dx_query_mob)
+			ret = vmw_validation_add_bo(sw_context->ctx,
+						    dx_query_mob, true, false);
+	}
+
+	mutex_unlock(&dev_priv->binding_mutex);
+	return ret;
+}
+
+/**
+ * vmw_resource_relocation_add - Add a relocation to the relocation list
+ *
+ * @list: Pointer to head of relocation list.
+ * @res: The resource.
+ * @offset: Offset into the command buffer currently being parsed where the id
+ * that needs fixup is located. Granularity is one byte.
+ * @rel_type: Relocation type.
+ */
+static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
+				       const struct vmw_resource *res,
+				       unsigned long offset,
+				       enum vmw_resource_relocation_type
+				       rel_type)
+{
+	struct vmw_resource_relocation *rel;
+
+	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
+	if (unlikely(!rel)) {
+		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
+		return -ENOMEM;
+	}
+
+	rel->res = res;
+	rel->offset = offset;
+	rel->rel_type = rel_type;
+	list_add_tail(&rel->head, &sw_context->res_relocations);
+
+	return 0;
+}
+
+/**
+ * vmw_resource_relocations_free - Free all relocations on a list
+ *
+ * @list: Pointer to the head of the relocation list
+ */
+static void vmw_resource_relocations_free(struct list_head *list)
+{
+	/* Memory is validation context memory, so no need to free it */
+	INIT_LIST_HEAD(list);
+}
+
+/**
+ * vmw_resource_relocations_apply - Apply all relocations on a list
+ *
+ * @cb: Pointer to the start of the command buffer bein patch. This need not be
+ * the same buffer as the one being parsed when the relocation list was built,
+ * but the contents must be the same modulo the resource ids.
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_apply(uint32_t *cb,
+					   struct list_head *list)
+{
+	struct vmw_resource_relocation *rel;
+
+	/* Validate the struct vmw_resource_relocation member size */
+	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
+	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
+
+	list_for_each_entry(rel, list, head) {
+		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
+		switch (rel->rel_type) {
+		case vmw_res_rel_normal:
+			*addr = rel->res->id;
+			break;
+		case vmw_res_rel_nop:
+			*addr = SVGA_3D_CMD_NOP;
+			break;
+		default:
+			if (rel->res->id == -1)
+				*addr = SVGA_3D_CMD_NOP;
+			break;
+		}
+	}
+}
+
+static int vmw_cmd_invalid(struct vmw_private *dev_priv,
+			   struct vmw_sw_context *sw_context,
+			   SVGA3dCmdHeader *header)
+{
+	return -EINVAL;
+}
+
+static int vmw_cmd_ok(struct vmw_private *dev_priv,
+		      struct vmw_sw_context *sw_context,
+		      SVGA3dCmdHeader *header)
+{
+	return 0;
+}
+
+/**
+ * vmw_resources_reserve - Reserve all resources on the sw_context's resource
+ * list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Note that since vmware's command submission currently is protected by the
+ * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
+ * only a single thread at once will attempt this.
+ */
+static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
+{
+	int ret;
+
+	ret = vmw_validation_res_reserve(sw_context->ctx, true);
+	if (ret)
+		return ret;
+
+	if (sw_context->dx_query_mob) {
+		struct vmw_buffer_object *expected_dx_query_mob;
+
+		expected_dx_query_mob =
+			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
+		if (expected_dx_query_mob &&
+		    expected_dx_query_mob != sw_context->dx_query_mob) {
+			ret = -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
+ * resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @dirty: Whether to change dirty status.
+ * @converter: User-space visisble type specific information.
+ * @id_loc: Pointer to the location in the command buffer currently being parsed
+ * from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated on
+ * exit.
+ */
+static int
+vmw_cmd_res_check(struct vmw_private *dev_priv,
+		  struct vmw_sw_context *sw_context,
+		  enum vmw_res_type res_type,
+		  u32 dirty,
+		  const struct vmw_user_resource_conv *converter,
+		  uint32_t *id_loc,
+		  struct vmw_resource **p_res)
+{
+	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
+	struct vmw_resource *res;
+	int ret;
+
+	if (p_res)
+		*p_res = NULL;
+
+	if (*id_loc == SVGA3D_INVALID_ID) {
+		if (res_type == vmw_res_context) {
+			VMW_DEBUG_USER("Illegal context invalid id.\n");
+			return -EINVAL;
+		}
+		return 0;
+	}
+
+	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
+		res = rcache->res;
+		if (dirty)
+			vmw_validation_res_set_dirty(sw_context->ctx,
+						     rcache->private, dirty);
+	} else {
+		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
+
+		ret = vmw_validation_preload_res(sw_context->ctx, size);
+		if (ret)
+			return ret;
+
+		res = vmw_user_resource_noref_lookup_handle
+			(dev_priv, sw_context->fp->tfile, *id_loc, converter);
+		if (IS_ERR(res)) {
+			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
+				       (unsigned int) *id_loc);
+			return PTR_ERR(res);
+		}
+
+		ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
+		if (unlikely(ret != 0))
+			return ret;
+
+		if (rcache->valid && rcache->res == res) {
+			rcache->valid_handle = true;
+			rcache->handle = *id_loc;
+		}
+	}
+
+	ret = vmw_resource_relocation_add(sw_context, res,
+					  vmw_ptr_diff(sw_context->buf_start,
+						       id_loc),
+					  vmw_res_rel_normal);
+	if (p_res)
+		*p_res = res;
+
+	return 0;
+}
+
+/**
+ * vmw_rebind_dx_query - Rebind DX query associated with the context
+ *
+ * @ctx_res: context the query belongs to
+ *
+ * This function assumes binding_mutex is held.
+ */
+static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
+{
+	struct vmw_private *dev_priv = ctx_res->dev_priv;
+	struct vmw_buffer_object *dx_query_mob;
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
+
+	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
+
+	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
+		return 0;
+
+	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
+	if (cmd == NULL)
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = ctx_res->id;
+	cmd->body.mobid = dx_query_mob->base.mem.start;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
+
+	return 0;
+}
+
+/**
+ * vmw_rebind_contexts - Rebind all resources previously bound to referenced
+ * contexts.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Rebind context binding points that have been scrubbed because of eviction.
+ */
+static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
+{
+	struct vmw_ctx_validation_info *val;
+	int ret;
+
+	list_for_each_entry(val, &sw_context->ctx_list, head) {
+		ret = vmw_binding_rebind_all(val->cur);
+		if (unlikely(ret != 0)) {
+			if (ret != -ERESTARTSYS)
+				VMW_DEBUG_USER("Failed to rebind context.\n");
+			return ret;
+		}
+
+		ret = vmw_rebind_all_dx_query(val->ctx);
+		if (ret != 0) {
+			VMW_DEBUG_USER("Failed to rebind queries.\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_view_bindings_add - Add an array of view bindings to a context binding
+ * state tracker.
+ *
+ * @sw_context: The execbuf state used for this command.
+ * @view_type: View type for the bindings.
+ * @binding_type: Binding type for the bindings.
+ * @shader_slot: The shader slot to user for the bindings.
+ * @view_ids: Array of view ids to be bound.
+ * @num_views: Number of view ids in @view_ids.
+ * @first_slot: The binding slot to be used for the first view id in @view_ids.
+ */
+static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
+				 enum vmw_view_type view_type,
+				 enum vmw_ctx_binding_type binding_type,
+				 uint32 shader_slot,
+				 uint32 view_ids[], u32 num_views,
+				 u32 first_slot)
+{
+	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+	u32 i;
+
+	if (!ctx_node)
+		return -EINVAL;
+
+	for (i = 0; i < num_views; ++i) {
+		struct vmw_ctx_bindinfo_view binding;
+		struct vmw_resource *view = NULL;
+
+		if (view_ids[i] != SVGA3D_INVALID_ID) {
+			view = vmw_view_id_val_add(sw_context, view_type,
+						   view_ids[i]);
+			if (IS_ERR(view)) {
+				VMW_DEBUG_USER("View not found.\n");
+				return PTR_ERR(view);
+			}
+		}
+		binding.bi.ctx = ctx_node->ctx;
+		binding.bi.res = view;
+		binding.bi.bt = binding_type;
+		binding.shader_slot = shader_slot;
+		binding.slot = first_slot + i;
+		vmw_binding_add(ctx_node->staged, &binding.bi,
+				shader_slot, binding.slot);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_cid_check - Check a command header for valid context information.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @header: A command header with an embedded user-space context handle.
+ *
+ * Convenience function: Call vmw_cmd_res_check with the user-space context
+ * handle embedded in @header.
+ */
+static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
+			     struct vmw_sw_context *sw_context,
+			     SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
+		container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				 VMW_RES_DIRTY_SET, user_context_converter,
+				 &cmd->body, NULL);
+}
+
+/**
+ * vmw_execbuf_info_from_res - Get the private validation metadata for a
+ * recently validated resource
+ *
+ * @sw_context: Pointer to the command submission context
+ * @res: The resource
+ *
+ * The resource pointed to by @res needs to be present in the command submission
+ * context's resource cache and hence the last resource of that type to be
+ * processed by the validation code.
+ *
+ * Return: a pointer to the private metadata of the resource, or NULL if it
+ * wasn't found
+ */
+static struct vmw_ctx_validation_info *
+vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
+			  struct vmw_resource *res)
+{
+	struct vmw_res_cache_entry *rcache =
+		&sw_context->res_cache[vmw_res_type(res)];
+
+	if (rcache->valid && rcache->res == res)
+		return rcache->private;
+
+	WARN_ON_ONCE(true);
+	return NULL;
+}
+
+static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
+					   struct vmw_sw_context *sw_context,
+					   SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
+	struct vmw_resource *ctx;
+	struct vmw_resource *res;
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	if (cmd->body.type >= SVGA3D_RT_MAX) {
+		VMW_DEBUG_USER("Illegal render target type %u.\n",
+			       (unsigned int) cmd->body.type);
+		return -EINVAL;
+	}
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				VMW_RES_DIRTY_SET, user_context_converter,
+				&cmd->body.cid, &ctx);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				VMW_RES_DIRTY_SET, user_surface_converter,
+				&cmd->body.target.sid, &res);
+	if (unlikely(ret))
+		return ret;
+
+	if (dev_priv->has_mob) {
+		struct vmw_ctx_bindinfo_view binding;
+		struct vmw_ctx_validation_info *node;
+
+		node = vmw_execbuf_info_from_res(sw_context, ctx);
+		if (!node)
+			return -EINVAL;
+
+		binding.bi.ctx = ctx;
+		binding.bi.res = res;
+		binding.bi.bt = vmw_ctx_binding_rt;
+		binding.slot = cmd->body.type;
+		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
+	}
+
+	return 0;
+}
+
+static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
+				      struct vmw_sw_context *sw_context,
+				      SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				VMW_RES_DIRTY_NONE, user_surface_converter,
+				&cmd->body.src.sid, NULL);
+	if (ret)
+		return ret;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_SET, user_surface_converter,
+				 &cmd->body.dest.sid, NULL);
+}
+
+static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				VMW_RES_DIRTY_NONE, user_surface_converter,
+				&cmd->body.src, NULL);
+	if (ret != 0)
+		return ret;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_SET, user_surface_converter,
+				 &cmd->body.dest, NULL);
+}
+
+static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				VMW_RES_DIRTY_NONE, user_surface_converter,
+				&cmd->body.srcSid, NULL);
+	if (ret != 0)
+		return ret;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_SET, user_surface_converter,
+				 &cmd->body.dstSid, NULL);
+}
+
+static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				VMW_RES_DIRTY_NONE, user_surface_converter,
+				&cmd->body.src.sid, NULL);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_SET, user_surface_converter,
+				 &cmd->body.dest.sid, NULL);
+}
+
+static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
+					 struct vmw_sw_context *sw_context,
+					 SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
+		container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_NONE, user_surface_converter,
+				 &cmd->body.srcImage.sid, NULL);
+}
+
+static int vmw_cmd_present_check(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
+		container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_NONE, user_surface_converter,
+				 &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
+ *
+ * @dev_priv: The device private structure.
+ * @new_query_bo: The new buffer holding query results.
+ * @sw_context: The software context used for this command submission.
+ *
+ * This function checks whether @new_query_bo is suitable for holding query
+ * results, and if another buffer currently is pinned for query results. If so,
+ * the function prepares the state of @sw_context for switching pinned buffers
+ * after successful submission of the current command batch.
+ */
+static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
+				       struct vmw_buffer_object *new_query_bo,
+				       struct vmw_sw_context *sw_context)
+{
+	struct vmw_res_cache_entry *ctx_entry =
+		&sw_context->res_cache[vmw_res_context];
+	int ret;
+
+	BUG_ON(!ctx_entry->valid);
+	sw_context->last_query_ctx = ctx_entry->res;
+
+	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
+
+		if (unlikely(new_query_bo->base.num_pages > 4)) {
+			VMW_DEBUG_USER("Query buffer too large.\n");
+			return -EINVAL;
+		}
+
+		if (unlikely(sw_context->cur_query_bo != NULL)) {
+			sw_context->needs_post_query_barrier = true;
+			ret = vmw_validation_add_bo(sw_context->ctx,
+						    sw_context->cur_query_bo,
+						    dev_priv->has_mob, false);
+			if (unlikely(ret != 0))
+				return ret;
+		}
+		sw_context->cur_query_bo = new_query_bo;
+
+		ret = vmw_validation_add_bo(sw_context->ctx,
+					    dev_priv->dummy_query_bo,
+					    dev_priv->has_mob, false);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
+ *
+ * @dev_priv: The device private structure.
+ * @sw_context: The software context used for this command submission batch.
+ *
+ * This function will check if we're switching query buffers, and will then,
+ * issue a dummy occlusion query wait used as a query barrier. When the fence
+ * object following that query wait has signaled, we are sure that all preceding
+ * queries have finished, and the old query buffer can be unpinned. However,
+ * since both the new query buffer and the old one are fenced with that fence,
+ * we can do an asynchronus unpin now, and be sure that the old query buffer
+ * won't be moved until the fence has signaled.
+ *
+ * As mentioned above, both the new - and old query buffers need to be fenced
+ * using a sequence emitted *after* calling this function.
+ */
+static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context)
+{
+	/*
+	 * The validate list should still hold references to all
+	 * contexts here.
+	 */
+	if (sw_context->needs_post_query_barrier) {
+		struct vmw_res_cache_entry *ctx_entry =
+			&sw_context->res_cache[vmw_res_context];
+		struct vmw_resource *ctx;
+		int ret;
+
+		BUG_ON(!ctx_entry->valid);
+		ctx = ctx_entry->res;
+
+		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
+
+		if (unlikely(ret != 0))
+			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
+	}
+
+	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
+		if (dev_priv->pinned_bo) {
+			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+			vmw_bo_unreference(&dev_priv->pinned_bo);
+		}
+
+		if (!sw_context->needs_post_query_barrier) {
+			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
+
+			/*
+			 * We pin also the dummy_query_bo buffer so that we
+			 * don't need to validate it when emitting dummy queries
+			 * in context destroy paths.
+			 */
+			if (!dev_priv->dummy_query_bo_pinned) {
+				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
+						    true);
+				dev_priv->dummy_query_bo_pinned = true;
+			}
+
+			BUG_ON(sw_context->last_query_ctx == NULL);
+			dev_priv->query_cid = sw_context->last_query_ctx->id;
+			dev_priv->query_cid_valid = true;
+			dev_priv->pinned_bo =
+				vmw_bo_reference(sw_context->cur_query_bo);
+		}
+	}
+}
+
+/**
+ * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
+ * to a MOB id.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @id: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry a
+ * non-reference-counted pointer to the buffer object identified by the
+ * user-space handle in @id.
+ *
+ * This function saves information needed to translate a user-space buffer
+ * handle to a MOB id. The translation does not take place immediately, but
+ * during a call to vmw_apply_relocations().
+ *
+ * This function builds a relocation list and a list of buffers to validate. The
+ * former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
+ */
+static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGAMobId *id,
+				 struct vmw_buffer_object **vmw_bo_p)
+{
+	struct vmw_buffer_object *vmw_bo;
+	uint32_t handle = *id;
+	struct vmw_relocation *reloc;
+	int ret;
+
+	vmw_validation_preload_bo(sw_context->ctx);
+	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
+	if (IS_ERR(vmw_bo)) {
+		VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
+		return PTR_ERR(vmw_bo);
+	}
+
+	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
+	vmw_user_bo_noref_release();
+	if (unlikely(ret != 0))
+		return ret;
+
+	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
+	if (!reloc)
+		return -ENOMEM;
+
+	reloc->mob_loc = id;
+	reloc->vbo = vmw_bo;
+
+	*vmw_bo_p = vmw_bo;
+	list_add_tail(&reloc->head, &sw_context->bo_relocations);
+
+	return 0;
+}
+
+/**
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
+ * to a valid SVGAGuestPtr
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @ptr: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry a
+ * non-reference-counted pointer to the DMA buffer identified by the user-space
+ * handle in @id.
+ *
+ * This function saves information needed to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr. The translation does not take place
+ * immediately, but during a call to vmw_apply_relocations().
+ *
+ * This function builds a relocation list and a list of buffers to validate.
+ * The former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
+ */
+static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGAGuestPtr *ptr,
+				   struct vmw_buffer_object **vmw_bo_p)
+{
+	struct vmw_buffer_object *vmw_bo;
+	uint32_t handle = ptr->gmrId;
+	struct vmw_relocation *reloc;
+	int ret;
+
+	vmw_validation_preload_bo(sw_context->ctx);
+	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
+	if (IS_ERR(vmw_bo)) {
+		VMW_DEBUG_USER("Could not find or use GMR region.\n");
+		return PTR_ERR(vmw_bo);
+	}
+
+	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
+	vmw_user_bo_noref_release();
+	if (unlikely(ret != 0))
+		return ret;
+
+	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
+	if (!reloc)
+		return -ENOMEM;
+
+	reloc->location = ptr;
+	reloc->vbo = vmw_bo;
+	*vmw_bo_p = vmw_bo;
+	list_add_tail(&reloc->head, &sw_context->bo_relocations);
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * This function adds the new query into the query COTABLE
+ */
+static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
+	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+	struct vmw_resource *cotable_res;
+	int ret;
+
+	if (!ctx_node)
+		return -EINVAL;
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
+	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
+		return -EINVAL;
+
+	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
+	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * The query bind operation will eventually associate the query ID with its
+ * backing MOB.  In this function, we take the user mode MOB ID and use
+ * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
+ */
+static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
+	struct vmw_buffer_object *vmw_bo;
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	/*
+	 * Look up the buffer pointed to by q.mobid, put it on the relocation
+	 * list so its kernel mode MOB ID can be filled in later
+	 */
+	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
+				    &vmw_bo);
+
+	if (ret != 0)
+		return ret;
+
+	sw_context->dx_query_mob = vmw_bo;
+	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
+	return 0;
+}
+
+/**
+ * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
+		container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				 VMW_RES_DIRTY_SET, user_context_converter,
+				 &cmd->body.cid, NULL);
+}
+
+/**
+ * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
+			       struct vmw_sw_context *sw_context,
+			       SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
+		container_of(header, typeof(*cmd), header);
+
+	if (unlikely(dev_priv->has_mob)) {
+		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
+
+		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
+		gb_cmd.header.size = cmd->header.size;
+		gb_cmd.body.cid = cmd->body.cid;
+		gb_cmd.body.type = cmd->body.type;
+
+		memcpy(cmd, &gb_cmd, sizeof(*cmd));
+		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
+	}
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				 VMW_RES_DIRTY_SET, user_context_converter,
+				 &cmd->body.cid, NULL);
+}
+
+/**
+ * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context,
+				SVGA3dCmdHeader *header)
+{
+	struct vmw_buffer_object *vmw_bo;
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
+				    &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_end_query(struct vmw_private *dev_priv,
+			     struct vmw_sw_context *sw_context,
+			     SVGA3dCmdHeader *header)
+{
+	struct vmw_buffer_object *vmw_bo;
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	if (dev_priv->has_mob) {
+		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
+
+		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
+		gb_cmd.header.size = cmd->header.size;
+		gb_cmd.body.cid = cmd->body.cid;
+		gb_cmd.body.type = cmd->body.type;
+		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
+		gb_cmd.body.offset = cmd->body.guestResult.offset;
+
+		memcpy(cmd, &gb_cmd, sizeof(*cmd));
+		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
+	}
+
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+				      &cmd->body.guestResult, &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	struct vmw_buffer_object *vmw_bo;
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
+				    &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
+			      struct vmw_sw_context *sw_context,
+			      SVGA3dCmdHeader *header)
+{
+	struct vmw_buffer_object *vmw_bo;
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	if (dev_priv->has_mob) {
+		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
+
+		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
+		gb_cmd.header.size = cmd->header.size;
+		gb_cmd.body.cid = cmd->body.cid;
+		gb_cmd.body.type = cmd->body.type;
+		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
+		gb_cmd.body.offset = cmd->body.guestResult.offset;
+
+		memcpy(cmd, &gb_cmd, sizeof(*cmd));
+		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
+	}
+
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+				      &cmd->body.guestResult, &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return 0;
+}
+
+static int vmw_cmd_dma(struct vmw_private *dev_priv,
+		       struct vmw_sw_context *sw_context,
+		       SVGA3dCmdHeader *header)
+{
+	struct vmw_buffer_object *vmw_bo = NULL;
+	struct vmw_surface *srf = NULL;
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
+	int ret;
+	SVGA3dCmdSurfaceDMASuffix *suffix;
+	uint32_t bo_size;
+	bool dirty;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
+					       header->size - sizeof(*suffix));
+
+	/* Make sure device and verifier stays in sync. */
+	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
+		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+				      &cmd->body.guest.ptr, &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	/* Make sure DMA doesn't cross BO boundaries. */
+	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
+		VMW_DEBUG_USER("Invalid DMA offset.\n");
+		return -EINVAL;
+	}
+
+	bo_size -= cmd->body.guest.ptr.offset;
+	if (unlikely(suffix->maximumOffset > bo_size))
+		suffix->maximumOffset = bo_size;
+
+	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
+		VMW_RES_DIRTY_SET : 0;
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				dirty, user_surface_converter,
+				&cmd->body.host.sid, NULL);
+	if (unlikely(ret != 0)) {
+		if (unlikely(ret != -ERESTARTSYS))
+			VMW_DEBUG_USER("could not find surface for DMA.\n");
+		return ret;
+	}
+
+	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
+
+	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
+
+	return 0;
+}
+
+static int vmw_cmd_draw(struct vmw_private *dev_priv,
+			struct vmw_sw_context *sw_context,
+			SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
+	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
+		(unsigned long)header + sizeof(*cmd));
+	SVGA3dPrimitiveRange *range;
+	uint32_t i;
+	uint32_t maxnum;
+	int ret;
+
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
+
+	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
+		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					VMW_RES_DIRTY_NONE,
+					user_surface_converter,
+					&decl->array.surfaceId, NULL);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	maxnum = (header->size - sizeof(cmd->body) -
+		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
+	if (unlikely(cmd->body.numRanges > maxnum)) {
+		VMW_DEBUG_USER("Illegal number of index ranges.\n");
+		return -EINVAL;
+	}
+
+	range = (SVGA3dPrimitiveRange *) decl;
+	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					VMW_RES_DIRTY_NONE,
+					user_surface_converter,
+					&range->indexArray.surfaceId, NULL);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+	return 0;
+}
+
+static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
+			     struct vmw_sw_context *sw_context,
+			     SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
+	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
+	  ((unsigned long) header + header->size + sizeof(*header));
+	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
+		((unsigned long) header + sizeof(*cmd));
+	struct vmw_resource *ctx;
+	struct vmw_resource *res;
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				VMW_RES_DIRTY_SET, user_context_converter,
+				&cmd->body.cid, &ctx);
+	if (unlikely(ret != 0))
+		return ret;
+
+	for (; cur_state < last_state; ++cur_state) {
+		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
+			continue;
+
+		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
+			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
+				       (unsigned int) cur_state->stage);
+			return -EINVAL;
+		}
+
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					VMW_RES_DIRTY_NONE,
+					user_surface_converter,
+					&cur_state->value, &res);
+		if (unlikely(ret != 0))
+			return ret;
+
+		if (dev_priv->has_mob) {
+			struct vmw_ctx_bindinfo_tex binding;
+			struct vmw_ctx_validation_info *node;
+
+			node = vmw_execbuf_info_from_res(sw_context, ctx);
+			if (!node)
+				return -EINVAL;
+
+			binding.bi.ctx = ctx;
+			binding.bi.res = res;
+			binding.bi.bt = vmw_ctx_binding_tex;
+			binding.texture_stage = cur_state->stage;
+			vmw_binding_add(node->staged, &binding.bi, 0,
+					binding.texture_stage);
+		}
+	}
+
+	return 0;
+}
+
+static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
+				      struct vmw_sw_context *sw_context,
+				      void *buf)
+{
+	struct vmw_buffer_object *vmw_bo;
+
+	struct {
+		uint32_t header;
+		SVGAFifoCmdDefineGMRFB body;
+	} *cmd = buf;
+
+	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
+				       &vmw_bo);
+}
+
+/**
+ * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
+ * switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @val_node: The validation node representing the resource.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers in the
+ * resource metadata just prior to unreserving. It's basically a wrapper around
+ * vmw_cmd_res_switch_backup with a different interface.
+ */
+static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     struct vmw_resource *res, uint32_t *buf_id,
+				     unsigned long backup_offset)
+{
+	struct vmw_buffer_object *vbo;
+	void *info;
+	int ret;
+
+	info = vmw_execbuf_info_from_res(sw_context, res);
+	if (!info)
+		return -EINVAL;
+
+	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
+	if (ret)
+		return ret;
+
+	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
+					 backup_offset);
+	return 0;
+}
+
+/**
+ * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @res_type: The resource type.
+ * @converter: Information about user-space binding for this resource type.
+ * @res_id: Pointer to the user-space resource handle in the command stream.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers in the
+ * resource metadata just prior to unreserving. It's basically a wrapper around
+ * vmw_cmd_res_switch_backup with a different interface.
+ */
+static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 enum vmw_res_type res_type,
+				 const struct vmw_user_resource_conv
+				 *converter, uint32_t *res_id, uint32_t *buf_id,
+				 unsigned long backup_offset)
+{
+	struct vmw_resource *res;
+	int ret;
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
+				VMW_RES_DIRTY_NONE, converter, res_id, &res);
+	if (ret)
+		return ret;
+
+	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
+					 backup_offset);
+}
+
+/**
+ * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
+		container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
+				     user_surface_converter, &cmd->body.sid,
+				     &cmd->body.mobid, 0);
+}
+
+/**
+ * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
+		container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_NONE, user_surface_converter,
+				 &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
+		container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
+				 &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
+		container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_NONE, user_surface_converter,
+				 &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
+				       struct vmw_sw_context *sw_context,
+				       SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
+		container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
+				 &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
+				       struct vmw_sw_context *sw_context,
+				       SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
+		container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_NONE, user_surface_converter,
+				 &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
+					 struct vmw_sw_context *sw_context,
+					 SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
+		container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
+				 &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
+	int ret;
+	size_t size;
+	struct vmw_resource *ctx;
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				VMW_RES_DIRTY_SET, user_context_converter,
+				&cmd->body.cid, &ctx);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (unlikely(!dev_priv->has_mob))
+		return 0;
+
+	size = cmd->header.size - sizeof(cmd->body);
+	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
+				    cmd->body.shid, cmd + 1, cmd->body.type,
+				    size, &sw_context->staged_cmd_res);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return vmw_resource_relocation_add(sw_context, NULL,
+					   vmw_ptr_diff(sw_context->buf_start,
+							&cmd->header.id),
+					   vmw_res_rel_nop);
+}
+
+/**
+ * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
+	int ret;
+	struct vmw_resource *ctx;
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				VMW_RES_DIRTY_SET, user_context_converter,
+				&cmd->body.cid, &ctx);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (unlikely(!dev_priv->has_mob))
+		return 0;
+
+	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
+				cmd->body.type, &sw_context->staged_cmd_res);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return vmw_resource_relocation_add(sw_context, NULL,
+					   vmw_ptr_diff(sw_context->buf_start,
+							&cmd->header.id),
+					   vmw_res_rel_nop);
+}
+
+/**
+ * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
+			      struct vmw_sw_context *sw_context,
+			      SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
+	struct vmw_ctx_bindinfo_shader binding;
+	struct vmw_resource *ctx, *res = NULL;
+	struct vmw_ctx_validation_info *ctx_info;
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
+		VMW_DEBUG_USER("Illegal shader type %u.\n",
+			       (unsigned int) cmd->body.type);
+		return -EINVAL;
+	}
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				VMW_RES_DIRTY_SET, user_context_converter,
+				&cmd->body.cid, &ctx);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (!dev_priv->has_mob)
+		return 0;
+
+	if (cmd->body.shid != SVGA3D_INVALID_ID) {
+		/*
+		 * This is the compat shader path - Per device guest-backed
+		 * shaders, but user-space thinks it's per context host-
+		 * backed shaders.
+		 */
+		res = vmw_shader_lookup(vmw_context_res_man(ctx),
+					cmd->body.shid, cmd->body.type);
+		if (!IS_ERR(res)) {
+			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+							    VMW_RES_DIRTY_NONE);
+			if (unlikely(ret != 0))
+				return ret;
+
+			ret = vmw_resource_relocation_add
+				(sw_context, res,
+				 vmw_ptr_diff(sw_context->buf_start,
+					      &cmd->body.shid),
+				 vmw_res_rel_normal);
+			if (unlikely(ret != 0))
+				return ret;
+		}
+	}
+
+	if (IS_ERR_OR_NULL(res)) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
+					VMW_RES_DIRTY_NONE,
+					user_shader_converter, &cmd->body.shid,
+					&res);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
+	if (!ctx_info)
+		return -EINVAL;
+
+	binding.bi.ctx = ctx;
+	binding.bi.res = res;
+	binding.bi.bt = vmw_ctx_binding_shader;
+	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
+				    struct vmw_sw_context *sw_context,
+				    SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				VMW_RES_DIRTY_SET, user_context_converter,
+				&cmd->body.cid, NULL);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (dev_priv->has_mob)
+		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
+		container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
+				     user_shader_converter, &cmd->body.shid,
+				     &cmd->body.mobid, cmd->body.offsetInBytes);
+}
+
+/**
+ * vmw_cmd_dx_set_single_constant_buffer - Validate
+ * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int
+vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
+				      struct vmw_sw_context *sw_context,
+				      SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
+	struct vmw_resource *res = NULL;
+	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+	struct vmw_ctx_bindinfo_cb binding;
+	int ret;
+
+	if (!ctx_node)
+		return -EINVAL;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				VMW_RES_DIRTY_NONE, user_surface_converter,
+				&cmd->body.sid, &res);
+	if (unlikely(ret != 0))
+		return ret;
+
+	binding.bi.ctx = ctx_node->ctx;
+	binding.bi.res = res;
+	binding.bi.bt = vmw_ctx_binding_cb;
+	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+	binding.offset = cmd->body.offsetInBytes;
+	binding.size = cmd->body.sizeInBytes;
+	binding.slot = cmd->body.slot;
+
+	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
+	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
+			       (unsigned int) cmd->body.type,
+			       (unsigned int) binding.slot);
+		return -EINVAL;
+	}
+
+	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
+			binding.slot);
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
+		container_of(header, typeof(*cmd), header);
+	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dShaderResourceViewId);
+
+	if ((u64) cmd->body.startView + (u64) num_sr_view >
+	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
+	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+		VMW_DEBUG_USER("Invalid shader binding.\n");
+		return -EINVAL;
+	}
+
+	return vmw_view_bindings_add(sw_context, vmw_view_sr,
+				     vmw_ctx_binding_sr,
+				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
+				     (void *) &cmd[1], num_sr_view,
+				     cmd->body.startView);
+}
+
+/**
+ * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
+	struct vmw_resource *res = NULL;
+	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+	struct vmw_ctx_bindinfo_shader binding;
+	int ret = 0;
+
+	if (!ctx_node)
+		return -EINVAL;
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
+	    cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
+		VMW_DEBUG_USER("Illegal shader type %u.\n",
+			       (unsigned int) cmd->body.type);
+		return -EINVAL;
+	}
+
+	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
+		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
+		if (IS_ERR(res)) {
+			VMW_DEBUG_USER("Could not find shader for binding.\n");
+			return PTR_ERR(res);
+		}
+
+		ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+						    VMW_RES_DIRTY_NONE);
+		if (ret)
+			return ret;
+	}
+
+	binding.bi.ctx = ctx_node->ctx;
+	binding.bi.res = res;
+	binding.bi.bt = vmw_ctx_binding_dx_shader;
+	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+
+	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
+					 struct vmw_sw_context *sw_context,
+					 SVGA3dCmdHeader *header)
+{
+	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+	struct vmw_ctx_bindinfo_vb binding;
+	struct vmw_resource *res;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetVertexBuffers body;
+		SVGA3dVertexBuffer buf[];
+	} *cmd;
+	int i, ret, num;
+
+	if (!ctx_node)
+		return -EINVAL;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	num = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dVertexBuffer);
+	if ((u64)num + (u64)cmd->body.startBuffer >
+	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
+		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num; i++) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					VMW_RES_DIRTY_NONE,
+					user_surface_converter,
+					&cmd->buf[i].sid, &res);
+		if (unlikely(ret != 0))
+			return ret;
+
+		binding.bi.ctx = ctx_node->ctx;
+		binding.bi.bt = vmw_ctx_binding_vb;
+		binding.bi.res = res;
+		binding.offset = cmd->buf[i].offset;
+		binding.stride = cmd->buf[i].stride;
+		binding.slot = i + cmd->body.startBuffer;
+
+		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_ia_set_vertex_buffers - Validate
+ * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
+				       struct vmw_sw_context *sw_context,
+				       SVGA3dCmdHeader *header)
+{
+	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+	struct vmw_ctx_bindinfo_ib binding;
+	struct vmw_resource *res;
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
+	int ret;
+
+	if (!ctx_node)
+		return -EINVAL;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				VMW_RES_DIRTY_NONE, user_surface_converter,
+				&cmd->body.sid, &res);
+	if (unlikely(ret != 0))
+		return ret;
+
+	binding.bi.ctx = ctx_node->ctx;
+	binding.bi.res = res;
+	binding.bi.bt = vmw_ctx_binding_ib;
+	binding.offset = cmd->body.offset;
+	binding.format = cmd->body.format;
+
+	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
+		container_of(header, typeof(*cmd), header);
+	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dRenderTargetViewId);
+	int ret;
+
+	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
+		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
+				    0, &cmd->body.depthStencilViewId, 1, 0);
+	if (ret)
+		return ret;
+
+	return vmw_view_bindings_add(sw_context, vmw_view_rt,
+				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
+				     num_rt_view, 0);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate
+ * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
+					      struct vmw_sw_context *sw_context,
+					      SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
+		container_of(header, typeof(*cmd), header);
+
+	return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
+					   cmd->body.renderTargetViewId));
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate
+ * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
+					      struct vmw_sw_context *sw_context,
+					      SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
+		container_of(header, typeof(*cmd), header);
+
+	return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
+					   cmd->body.depthStencilViewId));
+}
+
+static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+	struct vmw_resource *srf;
+	struct vmw_resource *res;
+	enum vmw_view_type view_type;
+	int ret;
+	/*
+	 * This is based on the fact that all affected define commands have the
+	 * same initial command body layout.
+	 */
+	struct {
+		SVGA3dCmdHeader header;
+		uint32 defined_id;
+		uint32 sid;
+	} *cmd;
+
+	if (!ctx_node)
+		return -EINVAL;
+
+	view_type = vmw_view_cmd_to_type(header->id);
+	if (view_type == vmw_view_max)
+		return -EINVAL;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
+		VMW_DEBUG_USER("Invalid surface id.\n");
+		return -EINVAL;
+	}
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				VMW_RES_DIRTY_NONE, user_surface_converter,
+				&cmd->sid, &srf);
+	if (unlikely(ret != 0))
+		return ret;
+
+	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
+	ret = vmw_cotable_notify(res, cmd->defined_id);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
+			    cmd->defined_id, header,
+			    header->size + sizeof(*header),
+			    &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+	struct vmw_ctx_bindinfo_so binding;
+	struct vmw_resource *res;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSOTargets body;
+		SVGA3dSoTarget targets[];
+	} *cmd;
+	int i, ret, num;
+
+	if (!ctx_node)
+		return -EINVAL;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
+
+	if (num > SVGA3D_DX_MAX_SOTARGETS) {
+		VMW_DEBUG_USER("Invalid DX SO binding.\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num; i++) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					VMW_RES_DIRTY_SET,
+					user_surface_converter,
+					&cmd->targets[i].sid, &res);
+		if (unlikely(ret != 0))
+			return ret;
+
+		binding.bi.ctx = ctx_node->ctx;
+		binding.bi.res = res;
+		binding.bi.bt = vmw_ctx_binding_so,
+		binding.offset = cmd->targets[i].offset;
+		binding.size = cmd->targets[i].sizeInBytes;
+		binding.slot = i;
+
+		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
+	}
+
+	return 0;
+}
+
+static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context,
+				SVGA3dCmdHeader *header)
+{
+	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+	struct vmw_resource *res;
+	/*
+	 * This is based on the fact that all affected define commands have
+	 * the same initial command body layout.
+	 */
+	struct {
+		SVGA3dCmdHeader header;
+		uint32 defined_id;
+	} *cmd;
+	enum vmw_so_type so_type;
+	int ret;
+
+	if (!ctx_node)
+		return -EINVAL;
+
+	so_type = vmw_so_cmd_to_type(header->id);
+	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cotable_notify(res, cmd->defined_id);
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		union {
+			SVGA3dCmdDXReadbackSubResource r_body;
+			SVGA3dCmdDXInvalidateSubResource i_body;
+			SVGA3dCmdDXUpdateSubResource u_body;
+			SVGA3dSurfaceId sid;
+		};
+	} *cmd;
+
+	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
+		     offsetof(typeof(*cmd), sid));
+	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
+		     offsetof(typeof(*cmd), sid));
+	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
+		     offsetof(typeof(*cmd), sid));
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_NONE, user_surface_converter,
+				 &cmd->sid, NULL);
+}
+
+static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context,
+				SVGA3dCmdHeader *header)
+{
+	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+
+	if (!ctx_node)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
+ * resource for removal.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * Check that the view exists, and if it was not created using this command
+ * batch, conditionally make this command a NOP.
+ */
+static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+	struct {
+		SVGA3dCmdHeader header;
+		union vmw_view_destroy body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
+	struct vmw_resource *view;
+	int ret;
+
+	if (!ctx_node)
+		return -EINVAL;
+
+	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
+			      &sw_context->staged_cmd_res, &view);
+	if (ret || !view)
+		return ret;
+
+	/*
+	 * If the view wasn't created during this command batch, it might
+	 * have been removed due to a context swapout, so add a
+	 * relocation to conditionally make this command a NOP to avoid
+	 * device errors.
+	 */
+	return vmw_resource_relocation_add(sw_context, view,
+					   vmw_ptr_diff(sw_context->buf_start,
+							&cmd->header.id),
+					   vmw_res_rel_cond_nop);
+}
+
+/**
+ * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
+				    struct vmw_sw_context *sw_context,
+				    SVGA3dCmdHeader *header)
+{
+	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+	struct vmw_resource *res;
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
+		container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (!ctx_node)
+		return -EINVAL;
+
+	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
+	ret = vmw_cotable_notify(res, cmd->body.shaderId);
+	if (ret)
+		return ret;
+
+	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
+				 cmd->body.shaderId, cmd->body.type,
+				 &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
+		container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (!ctx_node)
+		return -EINVAL;
+
+	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
+				&sw_context->staged_cmd_res);
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_resource *ctx;
+	struct vmw_resource *res;
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
+		container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (cmd->body.cid != SVGA3D_INVALID_ID) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+					VMW_RES_DIRTY_SET,
+					user_context_converter, &cmd->body.cid,
+					&ctx);
+		if (ret)
+			return ret;
+	} else {
+		struct vmw_ctx_validation_info *ctx_node =
+			VMW_GET_CTX_NODE(sw_context);
+
+		if (!ctx_node)
+			return -EINVAL;
+
+		ctx = ctx_node->ctx;
+	}
+
+	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
+	if (IS_ERR(res)) {
+		VMW_DEBUG_USER("Could not find shader to bind.\n");
+		return PTR_ERR(res);
+	}
+
+	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+					    VMW_RES_DIRTY_NONE);
+	if (ret) {
+		VMW_DEBUG_USER("Error creating resource validation node.\n");
+		return ret;
+	}
+
+	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
+					 &cmd->body.mobid,
+					 cmd->body.offsetInBytes);
+}
+
+/**
+ * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
+			      struct vmw_sw_context *sw_context,
+			      SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
+		container_of(header, typeof(*cmd), header);
+
+	return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
+					   cmd->body.shaderResourceViewId));
+}
+
+/**
+ * vmw_cmd_dx_transfer_from_buffer - Validate
+ * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
+					   struct vmw_sw_context *sw_context,
+					   SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
+		container_of(header, typeof(*cmd), header);
+	int ret;
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				VMW_RES_DIRTY_NONE, user_surface_converter,
+				&cmd->body.srcSid, NULL);
+	if (ret != 0)
+		return ret;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_SET, user_surface_converter,
+				 &cmd->body.destSid, NULL);
+}
+
+/**
+ * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
+					   struct vmw_sw_context *sw_context,
+					   SVGA3dCmdHeader *header)
+{
+	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
+		container_of(header, typeof(*cmd), header);
+
+	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
+		return -EINVAL;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_SET, user_surface_converter,
+				 &cmd->body.surface.sid, NULL);
+}
+
+static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context,
+				void *buf, uint32_t *size)
+{
+	uint32_t size_remaining = *size;
+	uint32_t cmd_id;
+
+	cmd_id = ((uint32_t *)buf)[0];
+	switch (cmd_id) {
+	case SVGA_CMD_UPDATE:
+		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
+		break;
+	case SVGA_CMD_DEFINE_GMRFB:
+		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
+		break;
+	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
+		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+		break;
+	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
+		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+		break;
+	default:
+		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
+		return -EINVAL;
+	}
+
+	if (*size > size_remaining) {
+		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
+			       cmd_id);
+		return -EINVAL;
+	}
+
+	if (unlikely(!sw_context->kernel)) {
+		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
+		return -EPERM;
+	}
+
+	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
+		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
+
+	return 0;
+}
+
+static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
+		    &vmw_cmd_set_render_target_check, true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
+		    &vmw_cmd_blt_surf_screen_check, false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
+		    &vmw_cmd_update_gb_surface, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
+		    &vmw_cmd_readback_gb_image, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
+		    &vmw_cmd_readback_gb_surface, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
+		    &vmw_cmd_invalidate_gb_image, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
+		    &vmw_cmd_invalidate_gb_surface, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
+		    false, false, true),
+
+	/* SM commands */
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
+		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
+		    &vmw_cmd_dx_set_shader_res, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
+		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
+		    &vmw_cmd_dx_set_index_buffer, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
+		    &vmw_cmd_dx_set_rendertargets, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
+		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
+		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
+		    &vmw_cmd_dx_check_subresource, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
+		    &vmw_cmd_dx_check_subresource, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
+		    &vmw_cmd_dx_check_subresource, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
+		    &vmw_cmd_dx_view_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+		    &vmw_cmd_dx_view_remove, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
+		    &vmw_cmd_dx_view_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+		    &vmw_cmd_dx_view_remove, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
+		    &vmw_cmd_dx_view_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+		    &vmw_cmd_dx_view_remove, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
+		    &vmw_cmd_dx_define_shader, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
+		    &vmw_cmd_dx_destroy_shader, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
+		    &vmw_cmd_dx_bind_shader, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
+		    &vmw_cmd_dx_set_so_targets, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
+		    &vmw_cmd_buffer_copy_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
+		    &vmw_cmd_pred_copy_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
+		    &vmw_cmd_dx_transfer_from_buffer,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
+		    true, false, true),
+};
+
+bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
+{
+	u32 cmd_id = ((u32 *) buf)[0];
+
+	if (cmd_id >= SVGA_CMD_MAX) {
+		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
+		const struct vmw_cmd_entry *entry;
+
+		*size = header->size + sizeof(SVGA3dCmdHeader);
+		cmd_id = header->id;
+		if (cmd_id >= SVGA_3D_CMD_MAX)
+			return false;
+
+		cmd_id -= SVGA_3D_CMD_BASE;
+		entry = &vmw_cmd_entries[cmd_id];
+		*cmd = entry->cmd_name;
+		return true;
+	}
+
+	switch (cmd_id) {
+	case SVGA_CMD_UPDATE:
+		*cmd = "SVGA_CMD_UPDATE";
+		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
+		break;
+	case SVGA_CMD_DEFINE_GMRFB:
+		*cmd = "SVGA_CMD_DEFINE_GMRFB";
+		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
+		break;
+	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
+		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
+		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+		break;
+	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
+		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
+		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+		break;
+	default:
+		*cmd = "UNKNOWN";
+		*size = 0;
+		return false;
+	}
+
+	return true;
+}
+
+static int vmw_cmd_check(struct vmw_private *dev_priv,
+			 struct vmw_sw_context *sw_context, void *buf,
+			 uint32_t *size)
+{
+	uint32_t cmd_id;
+	uint32_t size_remaining = *size;
+	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
+	int ret;
+	const struct vmw_cmd_entry *entry;
+	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
+
+	cmd_id = ((uint32_t *)buf)[0];
+	/* Handle any none 3D commands */
+	if (unlikely(cmd_id < SVGA_CMD_MAX))
+		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
+
+
+	cmd_id = header->id;
+	*size = header->size + sizeof(SVGA3dCmdHeader);
+
+	cmd_id -= SVGA_3D_CMD_BASE;
+	if (unlikely(*size > size_remaining))
+		goto out_invalid;
+
+	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
+		goto out_invalid;
+
+	entry = &vmw_cmd_entries[cmd_id];
+	if (unlikely(!entry->func))
+		goto out_invalid;
+
+	if (unlikely(!entry->user_allow && !sw_context->kernel))
+		goto out_privileged;
+
+	if (unlikely(entry->gb_disable && gb))
+		goto out_old;
+
+	if (unlikely(entry->gb_enable && !gb))
+		goto out_new;
+
+	ret = entry->func(dev_priv, sw_context, header);
+	if (unlikely(ret != 0)) {
+		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
+			       cmd_id + SVGA_3D_CMD_BASE, ret);
+		return ret;
+	}
+
+	return 0;
+out_invalid:
+	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
+		       cmd_id + SVGA_3D_CMD_BASE);
+	return -EINVAL;
+out_privileged:
+	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
+		       cmd_id + SVGA_3D_CMD_BASE);
+	return -EPERM;
+out_old:
+	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
+		       cmd_id + SVGA_3D_CMD_BASE);
+	return -EINVAL;
+out_new:
+	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
+		       cmd_id + SVGA_3D_CMD_BASE);
+	return -EINVAL;
+}
+
+static int vmw_cmd_check_all(struct vmw_private *dev_priv,
+			     struct vmw_sw_context *sw_context, void *buf,
+			     uint32_t size)
+{
+	int32_t cur_size = size;
+	int ret;
+
+	sw_context->buf_start = buf;
+
+	while (cur_size > 0) {
+		size = cur_size;
+		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
+		if (unlikely(ret != 0))
+			return ret;
+		buf = (void *)((unsigned long) buf + size);
+		cur_size -= size;
+	}
+
+	if (unlikely(cur_size != 0)) {
+		VMW_DEBUG_USER("Command verifier out of sync.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void vmw_free_relocations(struct vmw_sw_context *sw_context)
+{
+	/* Memory is validation context memory, so no need to free it */
+	INIT_LIST_HEAD(&sw_context->bo_relocations);
+}
+
+static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
+{
+	struct vmw_relocation *reloc;
+	struct ttm_buffer_object *bo;
+
+	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
+		bo = &reloc->vbo->base;
+		switch (bo->mem.mem_type) {
+		case TTM_PL_VRAM:
+			reloc->location->offset += bo->offset;
+			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
+			break;
+		case VMW_PL_GMR:
+			reloc->location->gmrId = bo->mem.start;
+			break;
+		case VMW_PL_MOB:
+			*reloc->mob_loc = bo->mem.start;
+			break;
+		default:
+			BUG();
+		}
+	}
+	vmw_free_relocations(sw_context);
+}
+
+static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
+				 uint32_t size)
+{
+	if (likely(sw_context->cmd_bounce_size >= size))
+		return 0;
+
+	if (sw_context->cmd_bounce_size == 0)
+		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
+
+	while (sw_context->cmd_bounce_size < size) {
+		sw_context->cmd_bounce_size =
+			PAGE_ALIGN(sw_context->cmd_bounce_size +
+				   (sw_context->cmd_bounce_size >> 1));
+	}
+
+	vfree(sw_context->cmd_bounce);
+	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
+
+	if (sw_context->cmd_bounce == NULL) {
+		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
+		sw_context->cmd_bounce_size = 0;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_execbuf_fence_commands - create and submit a command stream fence
+ *
+ * Creates a fence object and submits a command stream marker.
+ * If this fails for some reason, We sync the fifo and return NULL.
+ * It is then safe to fence buffers with a NULL pointer.
+ *
+ * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
+ * userspace handle if @p_handle is not NULL, otherwise not.
+ */
+
+int vmw_execbuf_fence_commands(struct drm_file *file_priv,
+			       struct vmw_private *dev_priv,
+			       struct vmw_fence_obj **p_fence,
+			       uint32_t *p_handle)
+{
+	uint32_t sequence;
+	int ret;
+	bool synced = false;
+
+	/* p_handle implies file_priv. */
+	BUG_ON(p_handle != NULL && file_priv == NULL);
+
+	ret = vmw_fifo_send_fence(dev_priv, &sequence);
+	if (unlikely(ret != 0)) {
+		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
+		synced = true;
+	}
+
+	if (p_handle != NULL)
+		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
+					    sequence, p_fence, p_handle);
+	else
+		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
+
+	if (unlikely(ret != 0 && !synced)) {
+		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
+					 false, VMW_FENCE_WAIT_TIMEOUT);
+		*p_fence = NULL;
+	}
+
+	return ret;
+}
+
+/**
+ * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
+ *
+ * @dev_priv: Pointer to a vmw_private struct.
+ * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
+ * @ret: Return value from fence object creation.
+ * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
+ * the information should be copied.
+ * @fence: Pointer to the fenc object.
+ * @fence_handle: User-space fence handle.
+ * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
+ * @sync_file:  Only used to clean up in case of an error in this function.
+ *
+ * This function copies fence information to user-space. If copying fails, the
+ * user-space struct drm_vmw_fence_rep::error member is hopefully left
+ * untouched, and if it's preloaded with an -EFAULT by user-space, the error
+ * will hopefully be detected.
+ *
+ * Also if copying fails, user-space will be unable to signal the fence object
+ * so we wait for it immediately, and then unreference the user-space reference.
+ */
+int
+vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+			    struct vmw_fpriv *vmw_fp, int ret,
+			    struct drm_vmw_fence_rep __user *user_fence_rep,
+			    struct vmw_fence_obj *fence, uint32_t fence_handle,
+			    int32_t out_fence_fd)
+{
+	struct drm_vmw_fence_rep fence_rep;
+
+	if (user_fence_rep == NULL)
+		return 0;
+
+	memset(&fence_rep, 0, sizeof(fence_rep));
+
+	fence_rep.error = ret;
+	fence_rep.fd = out_fence_fd;
+	if (ret == 0) {
+		BUG_ON(fence == NULL);
+
+		fence_rep.handle = fence_handle;
+		fence_rep.seqno = fence->base.seqno;
+		vmw_update_seqno(dev_priv, &dev_priv->fifo);
+		fence_rep.passed_seqno = dev_priv->last_read_seqno;
+	}
+
+	/*
+	 * copy_to_user errors will be detected by user space not seeing
+	 * fence_rep::error filled in. Typically user-space would have pre-set
+	 * that member to -EFAULT.
+	 */
+	ret = copy_to_user(user_fence_rep, &fence_rep,
+			   sizeof(fence_rep));
+
+	/*
+	 * User-space lost the fence object. We need to sync and unreference the
+	 * handle.
+	 */
+	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
+		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
+					  TTM_REF_USAGE);
+		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
+		(void) vmw_fence_obj_wait(fence, false, false,
+					  VMW_FENCE_WAIT_TIMEOUT);
+	}
+
+	return ret ? -EFAULT : 0;
+}
+
+/**
+ * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @kernel_commands: Pointer to the unpatched command batch.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command batch pointed to
+ * by @kernel_commands will have been modified.
+ */
+static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
+				   void *kernel_commands, u32 command_size,
+				   struct vmw_sw_context *sw_context)
+{
+	void *cmd;
+
+	if (sw_context->dx_ctx_node)
+		cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
+					  sw_context->dx_ctx_node->ctx->id);
+	else
+		cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
+
+	if (!cmd)
+		return -ENOMEM;
+
+	vmw_apply_relocations(sw_context);
+	memcpy(cmd, kernel_commands, command_size);
+	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+	vmw_fifo_commit(dev_priv, command_size);
+
+	return 0;
+}
+
+/**
+ * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
+ * command buffer manager.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @header: Opaque handle to the command buffer allocation.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command buffer represented
+ * by @header will have been modified.
+ */
+static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
+				     struct vmw_cmdbuf_header *header,
+				     u32 command_size,
+				     struct vmw_sw_context *sw_context)
+{
+	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
+		  SVGA3D_INVALID_ID);
+	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
+				       header);
+
+	vmw_apply_relocations(sw_context);
+	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
+
+	return 0;
+}
+
+/**
+ * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
+ * submission using a command buffer.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @user_commands: User-space pointer to the commands to be submitted.
+ * @command_size: Size of the unpatched command batch.
+ * @header: Out parameter returning the opaque pointer to the command buffer.
+ *
+ * This function checks whether we can use the command buffer manager for
+ * submission and if so, creates a command buffer of suitable size and copies
+ * the user data into that buffer.
+ *
+ * On successful return, the function returns a pointer to the data in the
+ * command buffer and *@header is set to non-NULL.
+ *
+ * If command buffers could not be used, the function will return the value of
+ * @kernel_commands on function call. That value may be NULL. In that case, the
+ * value of *@header will be set to NULL.
+ *
+ * If an error is encountered, the function will return a pointer error value.
+ * If the function is interrupted by a signal while sleeping, it will return
+ * -ERESTARTSYS casted to a pointer error value.
+ */
+static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
+				void __user *user_commands,
+				void *kernel_commands, u32 command_size,
+				struct vmw_cmdbuf_header **header)
+{
+	size_t cmdbuf_size;
+	int ret;
+
+	*header = NULL;
+	if (command_size > SVGA_CB_MAX_SIZE) {
+		VMW_DEBUG_USER("Command buffer is too large.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!dev_priv->cman || kernel_commands)
+		return kernel_commands;
+
+	/* If possible, add a little space for fencing. */
+	cmdbuf_size = command_size + 512;
+	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
+	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
+					   header);
+	if (IS_ERR(kernel_commands))
+		return kernel_commands;
+
+	ret = copy_from_user(kernel_commands, user_commands, command_size);
+	if (ret) {
+		VMW_DEBUG_USER("Failed copying commands.\n");
+		vmw_cmdbuf_header_free(*header);
+		*header = NULL;
+		return ERR_PTR(-EFAULT);
+	}
+
+	return kernel_commands;
+}
+
+static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   uint32_t handle)
+{
+	struct vmw_resource *res;
+	int ret;
+	unsigned int size;
+
+	if (handle == SVGA3D_INVALID_ID)
+		return 0;
+
+	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
+	ret = vmw_validation_preload_res(sw_context->ctx, size);
+	if (ret)
+		return ret;
+
+	res = vmw_user_resource_noref_lookup_handle
+		(dev_priv, sw_context->fp->tfile, handle,
+		 user_context_converter);
+	if (IS_ERR(res)) {
+		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
+			       (unsigned int) handle);
+		return PTR_ERR(res);
+	}
+
+	ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
+	if (unlikely(ret != 0))
+		return ret;
+
+	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
+	sw_context->man = vmw_context_res_man(res);
+
+	return 0;
+}
+
+int vmw_execbuf_process(struct drm_file *file_priv,
+			struct vmw_private *dev_priv,
+			void __user *user_commands, void *kernel_commands,
+			uint32_t command_size, uint64_t throttle_us,
+			uint32_t dx_context_handle,
+			struct drm_vmw_fence_rep __user *user_fence_rep,
+			struct vmw_fence_obj **out_fence, uint32_t flags)
+{
+	struct vmw_sw_context *sw_context = &dev_priv->ctx;
+	struct vmw_fence_obj *fence = NULL;
+	struct vmw_cmdbuf_header *header;
+	uint32_t handle = 0;
+	int ret;
+	int32_t out_fence_fd = -1;
+	struct sync_file *sync_file = NULL;
+	DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
+
+	vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
+
+	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
+		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+		if (out_fence_fd < 0) {
+			VMW_DEBUG_USER("Failed to get a fence fd.\n");
+			return out_fence_fd;
+		}
+	}
+
+	if (throttle_us) {
+		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
+				   throttle_us);
+
+		if (ret)
+			goto out_free_fence_fd;
+	}
+
+	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
+					     kernel_commands, command_size,
+					     &header);
+	if (IS_ERR(kernel_commands)) {
+		ret = PTR_ERR(kernel_commands);
+		goto out_free_fence_fd;
+	}
+
+	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+	if (ret) {
+		ret = -ERESTARTSYS;
+		goto out_free_header;
+	}
+
+	sw_context->kernel = false;
+	if (kernel_commands == NULL) {
+		ret = vmw_resize_cmd_bounce(sw_context, command_size);
+		if (unlikely(ret != 0))
+			goto out_unlock;
+
+		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
+				     command_size);
+		if (unlikely(ret != 0)) {
+			ret = -EFAULT;
+			VMW_DEBUG_USER("Failed copying commands.\n");
+			goto out_unlock;
+		}
+
+		kernel_commands = sw_context->cmd_bounce;
+	} else if (!header) {
+		sw_context->kernel = true;
+	}
+
+	sw_context->fp = vmw_fpriv(file_priv);
+	INIT_LIST_HEAD(&sw_context->ctx_list);
+	sw_context->cur_query_bo = dev_priv->pinned_bo;
+	sw_context->last_query_ctx = NULL;
+	sw_context->needs_post_query_barrier = false;
+	sw_context->dx_ctx_node = NULL;
+	sw_context->dx_query_mob = NULL;
+	sw_context->dx_query_ctx = NULL;
+	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
+	INIT_LIST_HEAD(&sw_context->res_relocations);
+	INIT_LIST_HEAD(&sw_context->bo_relocations);
+
+	if (sw_context->staged_bindings)
+		vmw_binding_state_reset(sw_context->staged_bindings);
+
+	if (!sw_context->res_ht_initialized) {
+		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+		if (unlikely(ret != 0))
+			goto out_unlock;
+
+		sw_context->res_ht_initialized = true;
+	}
+
+	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
+	sw_context->ctx = &val_ctx;
+	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
+	if (unlikely(ret != 0))
+		goto out_err_nores;
+
+	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
+				command_size);
+	if (unlikely(ret != 0))
+		goto out_err_nores;
+
+	ret = vmw_resources_reserve(sw_context);
+	if (unlikely(ret != 0))
+		goto out_err_nores;
+
+	ret = vmw_validation_bo_reserve(&val_ctx, true);
+	if (unlikely(ret != 0))
+		goto out_err_nores;
+
+	ret = vmw_validation_bo_validate(&val_ctx, true);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	ret = vmw_validation_res_validate(&val_ctx, true);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	vmw_validation_drop_ht(&val_ctx);
+
+	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
+	if (unlikely(ret != 0)) {
+		ret = -ERESTARTSYS;
+		goto out_err;
+	}
+
+	if (dev_priv->has_mob) {
+		ret = vmw_rebind_contexts(sw_context);
+		if (unlikely(ret != 0))
+			goto out_unlock_binding;
+	}
+
+	if (!header) {
+		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
+					      command_size, sw_context);
+	} else {
+		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
+						sw_context);
+		header = NULL;
+	}
+	mutex_unlock(&dev_priv->binding_mutex);
+	if (ret)
+		goto out_err;
+
+	vmw_query_bo_switch_commit(dev_priv, sw_context);
+	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
+					 (user_fence_rep) ? &handle : NULL);
+	/*
+	 * This error is harmless, because if fence submission fails,
+	 * vmw_fifo_send_fence will sync. The error will be propagated to
+	 * user-space in @fence_rep
+	 */
+	if (ret != 0)
+		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
+
+	vmw_execbuf_bindings_commit(sw_context, false);
+	vmw_bind_dx_query_mob(sw_context);
+	vmw_validation_res_unreserve(&val_ctx, false);
+
+	vmw_validation_bo_fence(sw_context->ctx, fence);
+
+	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
+		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
+
+	/*
+	 * If anything fails here, give up trying to export the fence and do a
+	 * sync since the user mode will not be able to sync the fence itself.
+	 * This ensures we are still functionally correct.
+	 */
+	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
+
+		sync_file = sync_file_create(&fence->base);
+		if (!sync_file) {
+			VMW_DEBUG_USER("Sync file create failed for fence\n");
+			put_unused_fd(out_fence_fd);
+			out_fence_fd = -1;
+
+			(void) vmw_fence_obj_wait(fence, false, false,
+						  VMW_FENCE_WAIT_TIMEOUT);
+		}
+	}
+
+	ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
+				    user_fence_rep, fence, handle, out_fence_fd);
+
+	if (sync_file) {
+		if (ret) {
+			/* usercopy of fence failed, put the file object */
+			fput(sync_file->file);
+			put_unused_fd(out_fence_fd);
+		} else {
+			/* Link the fence with the FD created earlier */
+			fd_install(out_fence_fd, sync_file->file);
+		}
+	}
+
+	/* Don't unreference when handing fence out */
+	if (unlikely(out_fence != NULL)) {
+		*out_fence = fence;
+		fence = NULL;
+	} else if (likely(fence != NULL)) {
+		vmw_fence_obj_unreference(&fence);
+	}
+
+	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+	/*
+	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
+	 * in resource destruction paths.
+	 */
+	vmw_validation_unref_lists(&val_ctx);
+
+	return ret;
+
+out_unlock_binding:
+	mutex_unlock(&dev_priv->binding_mutex);
+out_err:
+	vmw_validation_bo_backoff(&val_ctx);
+out_err_nores:
+	vmw_execbuf_bindings_commit(sw_context, true);
+	vmw_validation_res_unreserve(&val_ctx, true);
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+	vmw_free_relocations(sw_context);
+	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
+		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+out_unlock:
+	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
+	vmw_validation_drop_ht(&val_ctx);
+	WARN_ON(!list_empty(&sw_context->ctx_list));
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+	/*
+	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
+	 * in resource destruction paths.
+	 */
+	vmw_validation_unref_lists(&val_ctx);
+out_free_header:
+	if (header)
+		vmw_cmdbuf_header_free(header);
+out_free_fence_fd:
+	if (out_fence_fd >= 0)
+		put_unused_fd(out_fence_fd);
+
+	return ret;
+}
+
+/**
+ * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function is called to idle the fifo and unpin the query buffer if the
+ * normal way to do this hits an error, which should typically be extremely
+ * rare.
+ */
+static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
+{
+	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
+
+	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
+	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+	if (dev_priv->dummy_query_bo_pinned) {
+		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+		dev_priv->dummy_query_bo_pinned = false;
+	}
+}
+
+
+/**
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
+ * bo.
+ *
+ * @dev_priv: The device private structure.
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
+ * query barrier that flushes all queries touching the current buffer pointed to
+ * by @dev_priv->pinned_bo
+ *
+ * This function should be used to unpin the pinned query bo, or as a query
+ * barrier when we need to make sure that all queries have finished before the
+ * next fifo command. (For example on hardware context destructions where the
+ * hardware may otherwise leak unfinished queries).
+ *
+ * This function does not return any failure codes, but make attempts to do safe
+ * unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will thus
+ * not finish until that barrier has executed.
+ *
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
+ * calling this function.
+ */
+void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+				     struct vmw_fence_obj *fence)
+{
+	int ret = 0;
+	struct vmw_fence_obj *lfence = NULL;
+	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
+
+	if (dev_priv->pinned_bo == NULL)
+		goto out_unlock;
+
+	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
+				    false);
+	if (ret)
+		goto out_no_reserve;
+
+	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
+				    false);
+	if (ret)
+		goto out_no_reserve;
+
+	ret = vmw_validation_bo_reserve(&val_ctx, false);
+	if (ret)
+		goto out_no_reserve;
+
+	if (dev_priv->query_cid_valid) {
+		BUG_ON(fence != NULL);
+		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+		if (ret)
+			goto out_no_emit;
+		dev_priv->query_cid_valid = false;
+	}
+
+	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+	if (dev_priv->dummy_query_bo_pinned) {
+		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+		dev_priv->dummy_query_bo_pinned = false;
+	}
+	if (fence == NULL) {
+		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
+						  NULL);
+		fence = lfence;
+	}
+	vmw_validation_bo_fence(&val_ctx, fence);
+	if (lfence != NULL)
+		vmw_fence_obj_unreference(&lfence);
+
+	vmw_validation_unref_lists(&val_ctx);
+	vmw_bo_unreference(&dev_priv->pinned_bo);
+
+out_unlock:
+	return;
+out_no_emit:
+	vmw_validation_bo_backoff(&val_ctx);
+out_no_reserve:
+	vmw_validation_unref_lists(&val_ctx);
+	vmw_execbuf_unpin_panic(dev_priv);
+	vmw_bo_unreference(&dev_priv->pinned_bo);
+}
+
+/**
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function should be used to unpin the pinned query bo, or as a query
+ * barrier when we need to make sure that all queries have finished before the
+ * next fifo command. (For example on hardware context destructions where the
+ * hardware may otherwise leak unfinished queries).
+ *
+ * This function does not return any failure codes, but make attempts to do safe
+ * unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will thus
+ * not finish until that barrier has executed.
+ */
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
+{
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+	if (dev_priv->query_cid_valid)
+		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
+
+int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_execbuf_arg *arg = data;
+	int ret;
+	struct dma_fence *in_fence = NULL;
+
+	/*
+	 * Extend the ioctl argument while maintaining backwards compatibility:
+	 * We take different code paths depending on the value of arg->version.
+	 *
+	 * Note: The ioctl argument is extended and zeropadded by core DRM.
+	 */
+	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
+		     arg->version == 0)) {
+		VMW_DEBUG_USER("Incorrect execbuf version.\n");
+		return -EINVAL;
+	}
+
+	switch (arg->version) {
+	case 1:
+		/* For v1 core DRM have extended + zeropadded the data */
+		arg->context_handle = (uint32_t) -1;
+		break;
+	case 2:
+	default:
+		/* For v2 and later core DRM would have correctly copied it */
+		break;
+	}
+
+	/* If imported a fence FD from elsewhere, then wait on it */
+	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
+		in_fence = sync_file_get_fence(arg->imported_fence_fd);
+
+		if (!in_fence) {
+			VMW_DEBUG_USER("Cannot get imported fence\n");
+			return -EINVAL;
+		}
+
+		ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
+		if (ret)
+			goto out;
+	}
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_execbuf_process(file_priv, dev_priv,
+				  (void __user *)(unsigned long)arg->commands,
+				  NULL, arg->command_size, arg->throttle_us,
+				  arg->context_handle,
+				  (void __user *)(unsigned long)arg->fence_rep,
+				  NULL, arg->flags);
+
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	if (unlikely(ret != 0))
+		goto out;
+
+	vmw_kms_cursor_post_execbuf(dev_priv);
+
+out:
+	if (in_fence)
+		dma_fence_put(in_fence);
+	return ret;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
new file mode 100644
index 0000000..8e7b054
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -0,0 +1,845 @@
+/**************************************************************************
+ *
+ * Copyright © 2007 David Airlie
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <linux/pci.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
+
+#define VMW_DIRTY_DELAY (HZ / 30)
+
+struct vmw_fb_par {
+	struct vmw_private *vmw_priv;
+
+	void *vmalloc;
+
+	struct mutex bo_mutex;
+	struct vmw_buffer_object *vmw_bo;
+	unsigned bo_size;
+	struct drm_framebuffer *set_fb;
+	struct drm_display_mode *set_mode;
+	u32 fb_x;
+	u32 fb_y;
+	bool bo_iowrite;
+
+	u32 pseudo_palette[17];
+
+	unsigned max_width;
+	unsigned max_height;
+
+	struct {
+		spinlock_t lock;
+		bool active;
+		unsigned x1;
+		unsigned y1;
+		unsigned x2;
+		unsigned y2;
+	} dirty;
+
+	struct drm_crtc *crtc;
+	struct drm_connector *con;
+	struct delayed_work local_work;
+};
+
+static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+			    unsigned blue, unsigned transp,
+			    struct fb_info *info)
+{
+	struct vmw_fb_par *par = info->par;
+	u32 *pal = par->pseudo_palette;
+
+	if (regno > 15) {
+		DRM_ERROR("Bad regno %u.\n", regno);
+		return 1;
+	}
+
+	switch (par->set_fb->format->depth) {
+	case 24:
+	case 32:
+		pal[regno] = ((red & 0xff00) << 8) |
+			      (green & 0xff00) |
+			     ((blue  & 0xff00) >> 8);
+		break;
+	default:
+		DRM_ERROR("Bad depth %u, bpp %u.\n",
+			  par->set_fb->format->depth,
+			  par->set_fb->format->cpp[0] * 8);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int vmw_fb_check_var(struct fb_var_screeninfo *var,
+			    struct fb_info *info)
+{
+	int depth = var->bits_per_pixel;
+	struct vmw_fb_par *par = info->par;
+	struct vmw_private *vmw_priv = par->vmw_priv;
+
+	switch (var->bits_per_pixel) {
+	case 32:
+		depth = (var->transp.length > 0) ? 32 : 24;
+		break;
+	default:
+		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
+		return -EINVAL;
+	}
+
+	switch (depth) {
+	case 24:
+		var->red.offset = 16;
+		var->green.offset = 8;
+		var->blue.offset = 0;
+		var->red.length = 8;
+		var->green.length = 8;
+		var->blue.length = 8;
+		var->transp.length = 0;
+		var->transp.offset = 0;
+		break;
+	case 32:
+		var->red.offset = 16;
+		var->green.offset = 8;
+		var->blue.offset = 0;
+		var->red.length = 8;
+		var->green.length = 8;
+		var->blue.length = 8;
+		var->transp.length = 8;
+		var->transp.offset = 24;
+		break;
+	default:
+		DRM_ERROR("Bad depth %u.\n", depth);
+		return -EINVAL;
+	}
+
+	if ((var->xoffset + var->xres) > par->max_width ||
+	    (var->yoffset + var->yres) > par->max_height) {
+		DRM_ERROR("Requested geom can not fit in framebuffer\n");
+		return -EINVAL;
+	}
+
+	if (!vmw_kms_validate_mode_vram(vmw_priv,
+					var->xres * var->bits_per_pixel/8,
+					var->yoffset + var->yres)) {
+		DRM_ERROR("Requested geom can not fit in framebuffer\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vmw_fb_blank(int blank, struct fb_info *info)
+{
+	return 0;
+}
+
+/**
+ * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
+ *
+ * @work: The struct work_struct associated with this task.
+ *
+ * This function flushes the dirty regions of the vmalloc framebuffer to the
+ * kms framebuffer, and if the kms framebuffer is visible, also updated the
+ * corresponding displays. Note that this function runs even if the kms
+ * framebuffer is not bound to a crtc and thus not visible, but it's turned
+ * off during hibernation using the par->dirty.active bool.
+ */
+static void vmw_fb_dirty_flush(struct work_struct *work)
+{
+	struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
+					      local_work.work);
+	struct vmw_private *vmw_priv = par->vmw_priv;
+	struct fb_info *info = vmw_priv->fb_info;
+	unsigned long irq_flags;
+	s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
+	u32 cpp, max_x, max_y;
+	struct drm_clip_rect clip;
+	struct drm_framebuffer *cur_fb;
+	u8 *src_ptr, *dst_ptr;
+	struct vmw_buffer_object *vbo = par->vmw_bo;
+	void *virtual;
+
+	if (!READ_ONCE(par->dirty.active))
+		return;
+
+	mutex_lock(&par->bo_mutex);
+	cur_fb = par->set_fb;
+	if (!cur_fb)
+		goto out_unlock;
+
+	(void) ttm_read_lock(&vmw_priv->reservation_sem, false);
+	(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
+	virtual = vmw_bo_map_and_cache(vbo);
+	if (!virtual)
+		goto out_unreserve;
+
+	spin_lock_irqsave(&par->dirty.lock, irq_flags);
+	if (!par->dirty.active) {
+		spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
+		goto out_unreserve;
+	}
+
+	/*
+	 * Handle panning when copying from vmalloc to framebuffer.
+	 * Clip dirty area to framebuffer.
+	 */
+	cpp = cur_fb->format->cpp[0];
+	max_x = par->fb_x + cur_fb->width;
+	max_y = par->fb_y + cur_fb->height;
+
+	dst_x1 = par->dirty.x1 - par->fb_x;
+	dst_y1 = par->dirty.y1 - par->fb_y;
+	dst_x1 = max_t(s32, dst_x1, 0);
+	dst_y1 = max_t(s32, dst_y1, 0);
+
+	dst_x2 = par->dirty.x2 - par->fb_x;
+	dst_y2 = par->dirty.y2 - par->fb_y;
+	dst_x2 = min_t(s32, dst_x2, max_x);
+	dst_y2 = min_t(s32, dst_y2, max_y);
+	w = dst_x2 - dst_x1;
+	h = dst_y2 - dst_y1;
+	w = max_t(s32, 0, w);
+	h = max_t(s32, 0, h);
+
+	par->dirty.x1 = par->dirty.x2 = 0;
+	par->dirty.y1 = par->dirty.y2 = 0;
+	spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
+
+	if (w && h) {
+		dst_ptr = (u8 *)virtual  +
+			(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
+		src_ptr = (u8 *)par->vmalloc +
+			((dst_y1 + par->fb_y) * info->fix.line_length +
+			 (dst_x1 + par->fb_x) * cpp);
+
+		while (h-- > 0) {
+			memcpy(dst_ptr, src_ptr, w*cpp);
+			dst_ptr += par->set_fb->pitches[0];
+			src_ptr += info->fix.line_length;
+		}
+
+		clip.x1 = dst_x1;
+		clip.x2 = dst_x2;
+		clip.y1 = dst_y1;
+		clip.y2 = dst_y2;
+	}
+
+out_unreserve:
+	ttm_bo_unreserve(&vbo->base);
+	ttm_read_unlock(&vmw_priv->reservation_sem);
+	if (w && h) {
+		WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
+						       &clip, 1));
+		vmw_fifo_flush(vmw_priv, false);
+	}
+out_unlock:
+	mutex_unlock(&par->bo_mutex);
+}
+
+static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
+			      unsigned x1, unsigned y1,
+			      unsigned width, unsigned height)
+{
+	unsigned long flags;
+	unsigned x2 = x1 + width;
+	unsigned y2 = y1 + height;
+
+	spin_lock_irqsave(&par->dirty.lock, flags);
+	if (par->dirty.x1 == par->dirty.x2) {
+		par->dirty.x1 = x1;
+		par->dirty.y1 = y1;
+		par->dirty.x2 = x2;
+		par->dirty.y2 = y2;
+		/* if we are active start the dirty work
+		 * we share the work with the defio system */
+		if (par->dirty.active)
+			schedule_delayed_work(&par->local_work,
+					      VMW_DIRTY_DELAY);
+	} else {
+		if (x1 < par->dirty.x1)
+			par->dirty.x1 = x1;
+		if (y1 < par->dirty.y1)
+			par->dirty.y1 = y1;
+		if (x2 > par->dirty.x2)
+			par->dirty.x2 = x2;
+		if (y2 > par->dirty.y2)
+			par->dirty.y2 = y2;
+	}
+	spin_unlock_irqrestore(&par->dirty.lock, flags);
+}
+
+static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
+			      struct fb_info *info)
+{
+	struct vmw_fb_par *par = info->par;
+
+	if ((var->xoffset + var->xres) > var->xres_virtual ||
+	    (var->yoffset + var->yres) > var->yres_virtual) {
+		DRM_ERROR("Requested panning can not fit in framebuffer\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&par->bo_mutex);
+	par->fb_x = var->xoffset;
+	par->fb_y = var->yoffset;
+	if (par->set_fb)
+		vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
+				  par->set_fb->height);
+	mutex_unlock(&par->bo_mutex);
+
+	return 0;
+}
+
+static void vmw_deferred_io(struct fb_info *info,
+			    struct list_head *pagelist)
+{
+	struct vmw_fb_par *par = info->par;
+	unsigned long start, end, min, max;
+	unsigned long flags;
+	struct page *page;
+	int y1, y2;
+
+	min = ULONG_MAX;
+	max = 0;
+	list_for_each_entry(page, pagelist, lru) {
+		start = page->index << PAGE_SHIFT;
+		end = start + PAGE_SIZE - 1;
+		min = min(min, start);
+		max = max(max, end);
+	}
+
+	if (min < max) {
+		y1 = min / info->fix.line_length;
+		y2 = (max / info->fix.line_length) + 1;
+
+		spin_lock_irqsave(&par->dirty.lock, flags);
+		par->dirty.x1 = 0;
+		par->dirty.y1 = y1;
+		par->dirty.x2 = info->var.xres;
+		par->dirty.y2 = y2;
+		spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+		/*
+		 * Since we've already waited on this work once, try to
+		 * execute asap.
+		 */
+		cancel_delayed_work(&par->local_work);
+		schedule_delayed_work(&par->local_work, 0);
+	}
+};
+
+static struct fb_deferred_io vmw_defio = {
+	.delay		= VMW_DIRTY_DELAY,
+	.deferred_io	= vmw_deferred_io,
+};
+
+/*
+ * Draw code
+ */
+
+static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	cfb_fillrect(info, rect);
+	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
+			  rect->width, rect->height);
+}
+
+static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+	cfb_copyarea(info, region);
+	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
+			  region->width, region->height);
+}
+
+static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	cfb_imageblit(info, image);
+	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
+			  image->width, image->height);
+}
+
+/*
+ * Bring up code
+ */
+
+static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
+			    size_t size, struct vmw_buffer_object **out)
+{
+	struct vmw_buffer_object *vmw_bo;
+	int ret;
+
+	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
+
+	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
+	if (!vmw_bo) {
+		ret = -ENOMEM;
+		goto err_unlock;
+	}
+
+	ret = vmw_bo_init(vmw_priv, vmw_bo, size,
+			      &vmw_sys_placement,
+			      false,
+			      &vmw_bo_bo_free);
+	if (unlikely(ret != 0))
+		goto err_unlock; /* init frees the buffer on failure */
+
+	*out = vmw_bo;
+	ttm_write_unlock(&vmw_priv->reservation_sem);
+
+	return 0;
+
+err_unlock:
+	ttm_write_unlock(&vmw_priv->reservation_sem);
+	return ret;
+}
+
+static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
+				int *depth)
+{
+	switch (var->bits_per_pixel) {
+	case 32:
+		*depth = (var->transp.length > 0) ? 32 : 24;
+		break;
+	default:
+		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vmwgfx_set_config_internal(struct drm_mode_set *set)
+{
+	struct drm_crtc *crtc = set->crtc;
+	struct drm_modeset_acquire_ctx ctx;
+	int ret;
+
+	drm_modeset_acquire_init(&ctx, 0);
+
+restart:
+	ret = crtc->funcs->set_config(set, &ctx);
+
+	if (ret == -EDEADLK) {
+		drm_modeset_backoff(&ctx);
+		goto restart;
+	}
+
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+
+	return ret;
+}
+
+static int vmw_fb_kms_detach(struct vmw_fb_par *par,
+			     bool detach_bo,
+			     bool unref_bo)
+{
+	struct drm_framebuffer *cur_fb = par->set_fb;
+	int ret;
+
+	/* Detach the KMS framebuffer from crtcs */
+	if (par->set_mode) {
+		struct drm_mode_set set;
+
+		set.crtc = par->crtc;
+		set.x = 0;
+		set.y = 0;
+		set.mode = NULL;
+		set.fb = NULL;
+		set.num_connectors = 0;
+		set.connectors = &par->con;
+		ret = vmwgfx_set_config_internal(&set);
+		if (ret) {
+			DRM_ERROR("Could not unset a mode.\n");
+			return ret;
+		}
+		drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
+		par->set_mode = NULL;
+	}
+
+	if (cur_fb) {
+		drm_framebuffer_put(cur_fb);
+		par->set_fb = NULL;
+	}
+
+	if (par->vmw_bo && detach_bo && unref_bo)
+		vmw_bo_unreference(&par->vmw_bo);
+
+	return 0;
+}
+
+static int vmw_fb_kms_framebuffer(struct fb_info *info)
+{
+	struct drm_mode_fb_cmd2 mode_cmd = {0};
+	struct vmw_fb_par *par = info->par;
+	struct fb_var_screeninfo *var = &info->var;
+	struct drm_framebuffer *cur_fb;
+	struct vmw_framebuffer *vfb;
+	int ret = 0, depth;
+	size_t new_bo_size;
+
+	ret = vmw_fb_compute_depth(var, &depth);
+	if (ret)
+		return ret;
+
+	mode_cmd.width = var->xres;
+	mode_cmd.height = var->yres;
+	mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
+	mode_cmd.pixel_format =
+		drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
+
+	cur_fb = par->set_fb;
+	if (cur_fb && cur_fb->width == mode_cmd.width &&
+	    cur_fb->height == mode_cmd.height &&
+	    cur_fb->format->format == mode_cmd.pixel_format &&
+	    cur_fb->pitches[0] == mode_cmd.pitches[0])
+		return 0;
+
+	/* Need new buffer object ? */
+	new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
+	ret = vmw_fb_kms_detach(par,
+				par->bo_size < new_bo_size ||
+				par->bo_size > 2*new_bo_size,
+				true);
+	if (ret)
+		return ret;
+
+	if (!par->vmw_bo) {
+		ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
+				       &par->vmw_bo);
+		if (ret) {
+			DRM_ERROR("Failed creating a buffer object for "
+				  "fbdev.\n");
+			return ret;
+		}
+		par->bo_size = new_bo_size;
+	}
+
+	vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
+				      true, &mode_cmd);
+	if (IS_ERR(vfb))
+		return PTR_ERR(vfb);
+
+	par->set_fb = &vfb->base;
+
+	return 0;
+}
+
+static int vmw_fb_set_par(struct fb_info *info)
+{
+	struct vmw_fb_par *par = info->par;
+	struct vmw_private *vmw_priv = par->vmw_priv;
+	struct drm_mode_set set;
+	struct fb_var_screeninfo *var = &info->var;
+	struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
+		DRM_MODE_TYPE_DRIVER,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+	};
+	struct drm_display_mode *mode;
+	int ret;
+
+	mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
+	if (!mode) {
+		DRM_ERROR("Could not create new fb mode.\n");
+		return -ENOMEM;
+	}
+
+	mode->hdisplay = var->xres;
+	mode->vdisplay = var->yres;
+	vmw_guess_mode_timing(mode);
+
+	if (!vmw_kms_validate_mode_vram(vmw_priv,
+					mode->hdisplay *
+					DIV_ROUND_UP(var->bits_per_pixel, 8),
+					mode->vdisplay)) {
+		drm_mode_destroy(vmw_priv->dev, mode);
+		return -EINVAL;
+	}
+
+	mutex_lock(&par->bo_mutex);
+	ret = vmw_fb_kms_framebuffer(info);
+	if (ret)
+		goto out_unlock;
+
+	par->fb_x = var->xoffset;
+	par->fb_y = var->yoffset;
+
+	set.crtc = par->crtc;
+	set.x = 0;
+	set.y = 0;
+	set.mode = mode;
+	set.fb = par->set_fb;
+	set.num_connectors = 1;
+	set.connectors = &par->con;
+
+	ret = vmwgfx_set_config_internal(&set);
+	if (ret)
+		goto out_unlock;
+
+	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
+			  par->set_fb->width, par->set_fb->height);
+
+	/* If there already was stuff dirty we wont
+	 * schedule a new work, so lets do it now */
+
+	schedule_delayed_work(&par->local_work, 0);
+
+out_unlock:
+	if (par->set_mode)
+		drm_mode_destroy(vmw_priv->dev, par->set_mode);
+	par->set_mode = mode;
+
+	mutex_unlock(&par->bo_mutex);
+
+	return ret;
+}
+
+
+static struct fb_ops vmw_fb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = vmw_fb_check_var,
+	.fb_set_par = vmw_fb_set_par,
+	.fb_setcolreg = vmw_fb_setcolreg,
+	.fb_fillrect = vmw_fb_fillrect,
+	.fb_copyarea = vmw_fb_copyarea,
+	.fb_imageblit = vmw_fb_imageblit,
+	.fb_pan_display = vmw_fb_pan_display,
+	.fb_blank = vmw_fb_blank,
+};
+
+int vmw_fb_init(struct vmw_private *vmw_priv)
+{
+	struct device *device = &vmw_priv->dev->pdev->dev;
+	struct vmw_fb_par *par;
+	struct fb_info *info;
+	unsigned fb_width, fb_height;
+	unsigned int fb_bpp, fb_pitch, fb_size;
+	struct drm_display_mode *init_mode;
+	int ret;
+
+	fb_bpp = 32;
+
+	/* XXX As shouldn't these be as well. */
+	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
+	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
+
+	fb_pitch = fb_width * fb_bpp / 8;
+	fb_size = fb_pitch * fb_height;
+
+	info = framebuffer_alloc(sizeof(*par), device);
+	if (!info)
+		return -ENOMEM;
+
+	/*
+	 * Par
+	 */
+	vmw_priv->fb_info = info;
+	par = info->par;
+	memset(par, 0, sizeof(*par));
+	INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
+	par->vmw_priv = vmw_priv;
+	par->vmalloc = NULL;
+	par->max_width = fb_width;
+	par->max_height = fb_height;
+
+	ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
+				      par->max_height, &par->con,
+				      &par->crtc, &init_mode);
+	if (ret)
+		goto err_kms;
+
+	info->var.xres = init_mode->hdisplay;
+	info->var.yres = init_mode->vdisplay;
+
+	/*
+	 * Create buffers and alloc memory
+	 */
+	par->vmalloc = vzalloc(fb_size);
+	if (unlikely(par->vmalloc == NULL)) {
+		ret = -ENOMEM;
+		goto err_free;
+	}
+
+	/*
+	 * Fixed and var
+	 */
+	strcpy(info->fix.id, "svgadrmfb");
+	info->fix.type = FB_TYPE_PACKED_PIXELS;
+	info->fix.visual = FB_VISUAL_TRUECOLOR;
+	info->fix.type_aux = 0;
+	info->fix.xpanstep = 1; /* doing it in hw */
+	info->fix.ypanstep = 1; /* doing it in hw */
+	info->fix.ywrapstep = 0;
+	info->fix.accel = FB_ACCEL_NONE;
+	info->fix.line_length = fb_pitch;
+
+	info->fix.smem_start = 0;
+	info->fix.smem_len = fb_size;
+
+	info->pseudo_palette = par->pseudo_palette;
+	info->screen_base = (char __iomem *)par->vmalloc;
+	info->screen_size = fb_size;
+
+	info->fbops = &vmw_fb_ops;
+
+	/* 24 depth per default */
+	info->var.red.offset = 16;
+	info->var.green.offset = 8;
+	info->var.blue.offset = 0;
+	info->var.red.length = 8;
+	info->var.green.length = 8;
+	info->var.blue.length = 8;
+	info->var.transp.offset = 0;
+	info->var.transp.length = 0;
+
+	info->var.xres_virtual = fb_width;
+	info->var.yres_virtual = fb_height;
+	info->var.bits_per_pixel = fb_bpp;
+	info->var.xoffset = 0;
+	info->var.yoffset = 0;
+	info->var.activate = FB_ACTIVATE_NOW;
+	info->var.height = -1;
+	info->var.width = -1;
+
+	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto err_aper;
+	}
+	info->apertures->ranges[0].base = vmw_priv->vram_start;
+	info->apertures->ranges[0].size = vmw_priv->vram_size;
+
+	/*
+	 * Dirty & Deferred IO
+	 */
+	par->dirty.x1 = par->dirty.x2 = 0;
+	par->dirty.y1 = par->dirty.y2 = 0;
+	par->dirty.active = true;
+	spin_lock_init(&par->dirty.lock);
+	mutex_init(&par->bo_mutex);
+	info->fbdefio = &vmw_defio;
+	fb_deferred_io_init(info);
+
+	ret = register_framebuffer(info);
+	if (unlikely(ret != 0))
+		goto err_defio;
+
+	vmw_fb_set_par(info);
+
+	return 0;
+
+err_defio:
+	fb_deferred_io_cleanup(info);
+err_aper:
+err_free:
+	vfree(par->vmalloc);
+err_kms:
+	framebuffer_release(info);
+	vmw_priv->fb_info = NULL;
+
+	return ret;
+}
+
+int vmw_fb_close(struct vmw_private *vmw_priv)
+{
+	struct fb_info *info;
+	struct vmw_fb_par *par;
+
+	if (!vmw_priv->fb_info)
+		return 0;
+
+	info = vmw_priv->fb_info;
+	par = info->par;
+
+	/* ??? order */
+	fb_deferred_io_cleanup(info);
+	cancel_delayed_work_sync(&par->local_work);
+	unregister_framebuffer(info);
+
+	mutex_lock(&par->bo_mutex);
+	(void) vmw_fb_kms_detach(par, true, true);
+	mutex_unlock(&par->bo_mutex);
+
+	vfree(par->vmalloc);
+	framebuffer_release(info);
+
+	return 0;
+}
+
+int vmw_fb_off(struct vmw_private *vmw_priv)
+{
+	struct fb_info *info;
+	struct vmw_fb_par *par;
+	unsigned long flags;
+
+	if (!vmw_priv->fb_info)
+		return -EINVAL;
+
+	info = vmw_priv->fb_info;
+	par = info->par;
+
+	spin_lock_irqsave(&par->dirty.lock, flags);
+	par->dirty.active = false;
+	spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+	flush_delayed_work(&info->deferred_work);
+	flush_delayed_work(&par->local_work);
+
+	return 0;
+}
+
+int vmw_fb_on(struct vmw_private *vmw_priv)
+{
+	struct fb_info *info;
+	struct vmw_fb_par *par;
+	unsigned long flags;
+
+	if (!vmw_priv->fb_info)
+		return -EINVAL;
+
+	info = vmw_priv->fb_info;
+	par = info->par;
+
+	spin_lock_irqsave(&par->dirty.lock, flags);
+	par->dirty.active = true;
+	spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+	/*
+	 * Need to reschedule a dirty update, because otherwise that's
+	 * only done in dirty_mark() if the previous coalesced
+	 * dirty region was empty.
+	 */
+	schedule_delayed_work(&par->local_work, 0);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
new file mode 100644
index 0000000..da052e3
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -0,0 +1,1183 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <linux/sched/signal.h>
+
+#include "vmwgfx_drv.h"
+
+#define VMW_FENCE_WRAP (1 << 31)
+
+struct vmw_fence_manager {
+	int num_fence_objects;
+	struct vmw_private *dev_priv;
+	spinlock_t lock;
+	struct list_head fence_list;
+	struct work_struct work;
+	u32 user_fence_size;
+	u32 fence_size;
+	u32 event_fence_action_size;
+	bool fifo_down;
+	struct list_head cleanup_list;
+	uint32_t pending_actions[VMW_ACTION_MAX];
+	struct mutex goal_irq_mutex;
+	bool goal_irq_on; /* Protected by @goal_irq_mutex */
+	bool seqno_valid; /* Protected by @lock, and may not be set to true
+			     without the @goal_irq_mutex held. */
+	u64 ctx;
+};
+
+struct vmw_user_fence {
+	struct ttm_base_object base;
+	struct vmw_fence_obj fence;
+};
+
+/**
+ * struct vmw_event_fence_action - fence action that delivers a drm event.
+ *
+ * @e: A struct drm_pending_event that controls the event delivery.
+ * @action: A struct vmw_fence_action to hook up to a fence.
+ * @fence: A referenced pointer to the fence to keep it alive while @action
+ * hangs on it.
+ * @dev: Pointer to a struct drm_device so we can access the event stuff.
+ * @kref: Both @e and @action has destructors, so we need to refcount.
+ * @size: Size accounted for this object.
+ * @tv_sec: If non-null, the variable pointed to will be assigned
+ * current time tv_sec val when the fence signals.
+ * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
+ * be assigned the current time tv_usec val when the fence signals.
+ */
+struct vmw_event_fence_action {
+	struct vmw_fence_action action;
+
+	struct drm_pending_event *event;
+	struct vmw_fence_obj *fence;
+	struct drm_device *dev;
+
+	uint32_t *tv_sec;
+	uint32_t *tv_usec;
+};
+
+static struct vmw_fence_manager *
+fman_from_fence(struct vmw_fence_obj *fence)
+{
+	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
+}
+
+/**
+ * Note on fencing subsystem usage of irqs:
+ * Typically the vmw_fences_update function is called
+ *
+ * a) When a new fence seqno has been submitted by the fifo code.
+ * b) On-demand when we have waiters. Sleeping waiters will switch on the
+ * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
+ * irq is received. When the last fence waiter is gone, that IRQ is masked
+ * away.
+ *
+ * In situations where there are no waiters and we don't submit any new fences,
+ * fence objects may not be signaled. This is perfectly OK, since there are
+ * no consumers of the signaled data, but that is NOT ok when there are fence
+ * actions attached to a fence. The fencing subsystem then makes use of the
+ * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
+ * which has an action attached, and each time vmw_fences_update is called,
+ * the subsystem makes sure the fence goal seqno is updated.
+ *
+ * The fence goal seqno irq is on as long as there are unsignaled fence
+ * objects with actions attached to them.
+ */
+
+static void vmw_fence_obj_destroy(struct dma_fence *f)
+{
+	struct vmw_fence_obj *fence =
+		container_of(f, struct vmw_fence_obj, base);
+
+	struct vmw_fence_manager *fman = fman_from_fence(fence);
+
+	spin_lock(&fman->lock);
+	list_del_init(&fence->head);
+	--fman->num_fence_objects;
+	spin_unlock(&fman->lock);
+	fence->destroy(fence);
+}
+
+static const char *vmw_fence_get_driver_name(struct dma_fence *f)
+{
+	return "vmwgfx";
+}
+
+static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
+{
+	return "svga";
+}
+
+static bool vmw_fence_enable_signaling(struct dma_fence *f)
+{
+	struct vmw_fence_obj *fence =
+		container_of(f, struct vmw_fence_obj, base);
+
+	struct vmw_fence_manager *fman = fman_from_fence(fence);
+	struct vmw_private *dev_priv = fman->dev_priv;
+
+	u32 *fifo_mem = dev_priv->mmio_virt;
+	u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
+		return false;
+
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+
+	return true;
+}
+
+struct vmwgfx_wait_cb {
+	struct dma_fence_cb base;
+	struct task_struct *task;
+};
+
+static void
+vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct vmwgfx_wait_cb *wait =
+		container_of(cb, struct vmwgfx_wait_cb, base);
+
+	wake_up_process(wait->task);
+}
+
+static void __vmw_fences_update(struct vmw_fence_manager *fman);
+
+static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
+{
+	struct vmw_fence_obj *fence =
+		container_of(f, struct vmw_fence_obj, base);
+
+	struct vmw_fence_manager *fman = fman_from_fence(fence);
+	struct vmw_private *dev_priv = fman->dev_priv;
+	struct vmwgfx_wait_cb cb;
+	long ret = timeout;
+
+	if (likely(vmw_fence_obj_signaled(fence)))
+		return timeout;
+
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+	vmw_seqno_waiter_add(dev_priv);
+
+	spin_lock(f->lock);
+
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
+		goto out;
+
+	if (intr && signal_pending(current)) {
+		ret = -ERESTARTSYS;
+		goto out;
+	}
+
+	cb.base.func = vmwgfx_wait_cb;
+	cb.task = current;
+	list_add(&cb.base.node, &f->cb_list);
+
+	for (;;) {
+		__vmw_fences_update(fman);
+
+		/*
+		 * We can use the barrier free __set_current_state() since
+		 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
+		 * fence spinlock.
+		 */
+		if (intr)
+			__set_current_state(TASK_INTERRUPTIBLE);
+		else
+			__set_current_state(TASK_UNINTERRUPTIBLE);
+
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
+			if (ret == 0 && timeout > 0)
+				ret = 1;
+			break;
+		}
+
+		if (intr && signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		if (ret == 0)
+			break;
+
+		spin_unlock(f->lock);
+
+		ret = schedule_timeout(ret);
+
+		spin_lock(f->lock);
+	}
+	__set_current_state(TASK_RUNNING);
+	if (!list_empty(&cb.base.node))
+		list_del(&cb.base.node);
+
+out:
+	spin_unlock(f->lock);
+
+	vmw_seqno_waiter_remove(dev_priv);
+
+	return ret;
+}
+
+static const struct dma_fence_ops vmw_fence_ops = {
+	.get_driver_name = vmw_fence_get_driver_name,
+	.get_timeline_name = vmw_fence_get_timeline_name,
+	.enable_signaling = vmw_fence_enable_signaling,
+	.wait = vmw_fence_wait,
+	.release = vmw_fence_obj_destroy,
+};
+
+
+/**
+ * Execute signal actions on fences recently signaled.
+ * This is done from a workqueue so we don't have to execute
+ * signal actions from atomic context.
+ */
+
+static void vmw_fence_work_func(struct work_struct *work)
+{
+	struct vmw_fence_manager *fman =
+		container_of(work, struct vmw_fence_manager, work);
+	struct list_head list;
+	struct vmw_fence_action *action, *next_action;
+	bool seqno_valid;
+
+	do {
+		INIT_LIST_HEAD(&list);
+		mutex_lock(&fman->goal_irq_mutex);
+
+		spin_lock(&fman->lock);
+		list_splice_init(&fman->cleanup_list, &list);
+		seqno_valid = fman->seqno_valid;
+		spin_unlock(&fman->lock);
+
+		if (!seqno_valid && fman->goal_irq_on) {
+			fman->goal_irq_on = false;
+			vmw_goal_waiter_remove(fman->dev_priv);
+		}
+		mutex_unlock(&fman->goal_irq_mutex);
+
+		if (list_empty(&list))
+			return;
+
+		/*
+		 * At this point, only we should be able to manipulate the
+		 * list heads of the actions we have on the private list.
+		 * hence fman::lock not held.
+		 */
+
+		list_for_each_entry_safe(action, next_action, &list, head) {
+			list_del_init(&action->head);
+			if (action->cleanup)
+				action->cleanup(action);
+		}
+	} while (1);
+}
+
+struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
+{
+	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
+
+	if (unlikely(!fman))
+		return NULL;
+
+	fman->dev_priv = dev_priv;
+	spin_lock_init(&fman->lock);
+	INIT_LIST_HEAD(&fman->fence_list);
+	INIT_LIST_HEAD(&fman->cleanup_list);
+	INIT_WORK(&fman->work, &vmw_fence_work_func);
+	fman->fifo_down = true;
+	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
+		TTM_OBJ_EXTRA_SIZE;
+	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
+	fman->event_fence_action_size =
+		ttm_round_pot(sizeof(struct vmw_event_fence_action));
+	mutex_init(&fman->goal_irq_mutex);
+	fman->ctx = dma_fence_context_alloc(1);
+
+	return fman;
+}
+
+void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
+{
+	bool lists_empty;
+
+	(void) cancel_work_sync(&fman->work);
+
+	spin_lock(&fman->lock);
+	lists_empty = list_empty(&fman->fence_list) &&
+		list_empty(&fman->cleanup_list);
+	spin_unlock(&fman->lock);
+
+	BUG_ON(!lists_empty);
+	kfree(fman);
+}
+
+static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
+			      struct vmw_fence_obj *fence, u32 seqno,
+			      void (*destroy) (struct vmw_fence_obj *fence))
+{
+	int ret = 0;
+
+	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
+		       fman->ctx, seqno);
+	INIT_LIST_HEAD(&fence->seq_passed_actions);
+	fence->destroy = destroy;
+
+	spin_lock(&fman->lock);
+	if (unlikely(fman->fifo_down)) {
+		ret = -EBUSY;
+		goto out_unlock;
+	}
+	list_add_tail(&fence->head, &fman->fence_list);
+	++fman->num_fence_objects;
+
+out_unlock:
+	spin_unlock(&fman->lock);
+	return ret;
+
+}
+
+static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
+				struct list_head *list)
+{
+	struct vmw_fence_action *action, *next_action;
+
+	list_for_each_entry_safe(action, next_action, list, head) {
+		list_del_init(&action->head);
+		fman->pending_actions[action->type]--;
+		if (action->seq_passed != NULL)
+			action->seq_passed(action);
+
+		/*
+		 * Add the cleanup action to the cleanup list so that
+		 * it will be performed by a worker task.
+		 */
+
+		list_add_tail(&action->head, &fman->cleanup_list);
+	}
+}
+
+/**
+ * vmw_fence_goal_new_locked - Figure out a new device fence goal
+ * seqno if needed.
+ *
+ * @fman: Pointer to a fence manager.
+ * @passed_seqno: The seqno the device currently signals as passed.
+ *
+ * This function should be called with the fence manager lock held.
+ * It is typically called when we have a new passed_seqno, and
+ * we might need to update the fence goal. It checks to see whether
+ * the current fence goal has already passed, and, in that case,
+ * scans through all unsignaled fences to get the next fence object with an
+ * action attached, and sets the seqno of that fence as a new fence goal.
+ *
+ * returns true if the device goal seqno was updated. False otherwise.
+ */
+static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
+				      u32 passed_seqno)
+{
+	u32 goal_seqno;
+	u32 *fifo_mem;
+	struct vmw_fence_obj *fence;
+
+	if (likely(!fman->seqno_valid))
+		return false;
+
+	fifo_mem = fman->dev_priv->mmio_virt;
+	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
+		return false;
+
+	fman->seqno_valid = false;
+	list_for_each_entry(fence, &fman->fence_list, head) {
+		if (!list_empty(&fence->seq_passed_actions)) {
+			fman->seqno_valid = true;
+			vmw_mmio_write(fence->base.seqno,
+				       fifo_mem + SVGA_FIFO_FENCE_GOAL);
+			break;
+		}
+	}
+
+	return true;
+}
+
+
+/**
+ * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
+ * needed.
+ *
+ * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
+ * considered as a device fence goal.
+ *
+ * This function should be called with the fence manager lock held.
+ * It is typically called when an action has been attached to a fence to
+ * check whether the seqno of that fence should be used for a fence
+ * goal interrupt. This is typically needed if the current fence goal is
+ * invalid, or has a higher seqno than that of the current fence object.
+ *
+ * returns true if the device goal seqno was updated. False otherwise.
+ */
+static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
+{
+	struct vmw_fence_manager *fman = fman_from_fence(fence);
+	u32 goal_seqno;
+	u32 *fifo_mem;
+
+	if (dma_fence_is_signaled_locked(&fence->base))
+		return false;
+
+	fifo_mem = fman->dev_priv->mmio_virt;
+	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+	if (likely(fman->seqno_valid &&
+		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
+		return false;
+
+	vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
+	fman->seqno_valid = true;
+
+	return true;
+}
+
+static void __vmw_fences_update(struct vmw_fence_manager *fman)
+{
+	struct vmw_fence_obj *fence, *next_fence;
+	struct list_head action_list;
+	bool needs_rerun;
+	uint32_t seqno, new_seqno;
+	u32 *fifo_mem = fman->dev_priv->mmio_virt;
+
+	seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+rerun:
+	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
+		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
+			list_del_init(&fence->head);
+			dma_fence_signal_locked(&fence->base);
+			INIT_LIST_HEAD(&action_list);
+			list_splice_init(&fence->seq_passed_actions,
+					 &action_list);
+			vmw_fences_perform_actions(fman, &action_list);
+		} else
+			break;
+	}
+
+	/*
+	 * Rerun if the fence goal seqno was updated, and the
+	 * hardware might have raced with that update, so that
+	 * we missed a fence_goal irq.
+	 */
+
+	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
+	if (unlikely(needs_rerun)) {
+		new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+		if (new_seqno != seqno) {
+			seqno = new_seqno;
+			goto rerun;
+		}
+	}
+
+	if (!list_empty(&fman->cleanup_list))
+		(void) schedule_work(&fman->work);
+}
+
+void vmw_fences_update(struct vmw_fence_manager *fman)
+{
+	spin_lock(&fman->lock);
+	__vmw_fences_update(fman);
+	spin_unlock(&fman->lock);
+}
+
+bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
+{
+	struct vmw_fence_manager *fman = fman_from_fence(fence);
+
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+		return 1;
+
+	vmw_fences_update(fman);
+
+	return dma_fence_is_signaled(&fence->base);
+}
+
+int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
+		       bool interruptible, unsigned long timeout)
+{
+	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
+
+	if (likely(ret > 0))
+		return 0;
+	else if (ret == 0)
+		return -EBUSY;
+	else
+		return ret;
+}
+
+void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
+{
+	struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
+
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+}
+
+static void vmw_fence_destroy(struct vmw_fence_obj *fence)
+{
+	dma_fence_free(&fence->base);
+}
+
+int vmw_fence_create(struct vmw_fence_manager *fman,
+		     uint32_t seqno,
+		     struct vmw_fence_obj **p_fence)
+{
+	struct vmw_fence_obj *fence;
+ 	int ret;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (unlikely(!fence))
+		return -ENOMEM;
+
+	ret = vmw_fence_obj_init(fman, fence, seqno,
+				 vmw_fence_destroy);
+	if (unlikely(ret != 0))
+		goto out_err_init;
+
+	*p_fence = fence;
+	return 0;
+
+out_err_init:
+	kfree(fence);
+	return ret;
+}
+
+
+static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
+{
+	struct vmw_user_fence *ufence =
+		container_of(fence, struct vmw_user_fence, fence);
+	struct vmw_fence_manager *fman = fman_from_fence(fence);
+
+	ttm_base_object_kfree(ufence, base);
+	/*
+	 * Free kernel space accounting.
+	 */
+	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
+			    fman->user_fence_size);
+}
+
+static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct vmw_user_fence *ufence =
+		container_of(base, struct vmw_user_fence, base);
+	struct vmw_fence_obj *fence = &ufence->fence;
+
+	*p_base = NULL;
+	vmw_fence_obj_unreference(&fence);
+}
+
+int vmw_user_fence_create(struct drm_file *file_priv,
+			  struct vmw_fence_manager *fman,
+			  uint32_t seqno,
+			  struct vmw_fence_obj **p_fence,
+			  uint32_t *p_handle)
+{
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_user_fence *ufence;
+	struct vmw_fence_obj *tmp;
+	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
+	struct ttm_operation_ctx ctx = {
+		.interruptible = false,
+		.no_wait_gpu = false
+	};
+	int ret;
+
+	/*
+	 * Kernel memory space accounting, since this object may
+	 * be created by a user-space request.
+	 */
+
+	ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
+				   &ctx);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
+	if (unlikely(!ufence)) {
+		ret = -ENOMEM;
+		goto out_no_object;
+	}
+
+	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
+				 vmw_user_fence_destroy);
+	if (unlikely(ret != 0)) {
+		kfree(ufence);
+		goto out_no_object;
+	}
+
+	/*
+	 * The base object holds a reference which is freed in
+	 * vmw_user_fence_base_release.
+	 */
+	tmp = vmw_fence_obj_reference(&ufence->fence);
+	ret = ttm_base_object_init(tfile, &ufence->base, false,
+				   VMW_RES_FENCE,
+				   &vmw_user_fence_base_release, NULL);
+
+
+	if (unlikely(ret != 0)) {
+		/*
+		 * Free the base object's reference
+		 */
+		vmw_fence_obj_unreference(&tmp);
+		goto out_err;
+	}
+
+	*p_fence = &ufence->fence;
+	*p_handle = ufence->base.handle;
+
+	return 0;
+out_err:
+	tmp = &ufence->fence;
+	vmw_fence_obj_unreference(&tmp);
+out_no_object:
+	ttm_mem_global_free(mem_glob, fman->user_fence_size);
+	return ret;
+}
+
+
+/**
+ * vmw_wait_dma_fence - Wait for a dma fence
+ *
+ * @fman: pointer to a fence manager
+ * @fence: DMA fence to wait on
+ *
+ * This function handles the case when the fence is actually a fence
+ * array.  If that's the case, it'll wait on each of the child fence
+ */
+int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
+		       struct dma_fence *fence)
+{
+	struct dma_fence_array *fence_array;
+	int ret = 0;
+	int i;
+
+
+	if (dma_fence_is_signaled(fence))
+		return 0;
+
+	if (!dma_fence_is_array(fence))
+		return dma_fence_wait(fence, true);
+
+	/* From i915: Note that if the fence-array was created in
+	 * signal-on-any mode, we should *not* decompose it into its individual
+	 * fences. However, we don't currently store which mode the fence-array
+	 * is operating in. Fortunately, the only user of signal-on-any is
+	 * private to amdgpu and we should not see any incoming fence-array
+	 * from sync-file being in signal-on-any mode.
+	 */
+
+	fence_array = to_dma_fence_array(fence);
+	for (i = 0; i < fence_array->num_fences; i++) {
+		struct dma_fence *child = fence_array->fences[i];
+
+		ret = dma_fence_wait(child, true);
+
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+
+/**
+ * vmw_fence_fifo_down - signal all unsignaled fence objects.
+ */
+
+void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
+{
+	struct list_head action_list;
+	int ret;
+
+	/*
+	 * The list may be altered while we traverse it, so always
+	 * restart when we've released the fman->lock.
+	 */
+
+	spin_lock(&fman->lock);
+	fman->fifo_down = true;
+	while (!list_empty(&fman->fence_list)) {
+		struct vmw_fence_obj *fence =
+			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
+				   head);
+		dma_fence_get(&fence->base);
+		spin_unlock(&fman->lock);
+
+		ret = vmw_fence_obj_wait(fence, false, false,
+					 VMW_FENCE_WAIT_TIMEOUT);
+
+		if (unlikely(ret != 0)) {
+			list_del_init(&fence->head);
+			dma_fence_signal(&fence->base);
+			INIT_LIST_HEAD(&action_list);
+			list_splice_init(&fence->seq_passed_actions,
+					 &action_list);
+			vmw_fences_perform_actions(fman, &action_list);
+		}
+
+		BUG_ON(!list_empty(&fence->head));
+		dma_fence_put(&fence->base);
+		spin_lock(&fman->lock);
+	}
+	spin_unlock(&fman->lock);
+}
+
+void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
+{
+	spin_lock(&fman->lock);
+	fman->fifo_down = false;
+	spin_unlock(&fman->lock);
+}
+
+
+/**
+ * vmw_fence_obj_lookup - Look up a user-space fence object
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @handle: A handle identifying the fence object.
+ * @return: A struct vmw_user_fence base ttm object on success or
+ * an error pointer on failure.
+ *
+ * The fence object is looked up and type-checked. The caller needs
+ * to have opened the fence object first, but since that happens on
+ * creation and fence objects aren't shareable, that's not an
+ * issue currently.
+ */
+static struct ttm_base_object *
+vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
+
+	if (!base) {
+		pr_err("Invalid fence object handle 0x%08lx.\n",
+		       (unsigned long)handle);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (base->refcount_release != vmw_user_fence_base_release) {
+		pr_err("Invalid fence object handle 0x%08lx.\n",
+		       (unsigned long)handle);
+		ttm_base_object_unref(&base);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return base;
+}
+
+
+int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct drm_vmw_fence_wait_arg *arg =
+	    (struct drm_vmw_fence_wait_arg *)data;
+	unsigned long timeout;
+	struct ttm_base_object *base;
+	struct vmw_fence_obj *fence;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	int ret;
+	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
+
+	/*
+	 * 64-bit division not present on 32-bit systems, so do an
+	 * approximation. (Divide by 1000000).
+	 */
+
+	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
+	  (wait_timeout >> 26);
+
+	if (!arg->cookie_valid) {
+		arg->cookie_valid = 1;
+		arg->kernel_cookie = jiffies + wait_timeout;
+	}
+
+	base = vmw_fence_obj_lookup(tfile, arg->handle);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+
+	timeout = jiffies;
+	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
+		ret = ((vmw_fence_obj_signaled(fence)) ?
+		       0 : -EBUSY);
+		goto out;
+	}
+
+	timeout = (unsigned long)arg->kernel_cookie - timeout;
+
+	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
+
+out:
+	ttm_base_object_unref(&base);
+
+	/*
+	 * Optionally unref the fence object.
+	 */
+
+	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
+		return ttm_ref_object_base_unref(tfile, arg->handle,
+						 TTM_REF_USAGE);
+	return ret;
+}
+
+int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_vmw_fence_signaled_arg *arg =
+		(struct drm_vmw_fence_signaled_arg *) data;
+	struct ttm_base_object *base;
+	struct vmw_fence_obj *fence;
+	struct vmw_fence_manager *fman;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+
+	base = vmw_fence_obj_lookup(tfile, arg->handle);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+	fman = fman_from_fence(fence);
+
+	arg->signaled = vmw_fence_obj_signaled(fence);
+
+	arg->signaled_flags = arg->flags;
+	spin_lock(&fman->lock);
+	arg->passed_seqno = dev_priv->last_read_seqno;
+	spin_unlock(&fman->lock);
+
+	ttm_base_object_unref(&base);
+
+	return 0;
+}
+
+
+int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_vmw_fence_arg *arg =
+		(struct drm_vmw_fence_arg *) data;
+
+	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+					 arg->handle,
+					 TTM_REF_USAGE);
+}
+
+/**
+ * vmw_event_fence_action_seq_passed
+ *
+ * @action: The struct vmw_fence_action embedded in a struct
+ * vmw_event_fence_action.
+ *
+ * This function is called when the seqno of the fence where @action is
+ * attached has passed. It queues the event on the submitter's event list.
+ * This function is always called from atomic context.
+ */
+static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
+{
+	struct vmw_event_fence_action *eaction =
+		container_of(action, struct vmw_event_fence_action, action);
+	struct drm_device *dev = eaction->dev;
+	struct drm_pending_event *event = eaction->event;
+
+	if (unlikely(event == NULL))
+		return;
+
+	spin_lock_irq(&dev->event_lock);
+
+	if (likely(eaction->tv_sec != NULL)) {
+		struct timespec64 ts;
+
+		ktime_get_ts64(&ts);
+		/* monotonic time, so no y2038 overflow */
+		*eaction->tv_sec = ts.tv_sec;
+		*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+	}
+
+	drm_send_event_locked(dev, eaction->event);
+	eaction->event = NULL;
+	spin_unlock_irq(&dev->event_lock);
+}
+
+/**
+ * vmw_event_fence_action_cleanup
+ *
+ * @action: The struct vmw_fence_action embedded in a struct
+ * vmw_event_fence_action.
+ *
+ * This function is the struct vmw_fence_action destructor. It's typically
+ * called from a workqueue.
+ */
+static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
+{
+	struct vmw_event_fence_action *eaction =
+		container_of(action, struct vmw_event_fence_action, action);
+
+	vmw_fence_obj_unreference(&eaction->fence);
+	kfree(eaction);
+}
+
+
+/**
+ * vmw_fence_obj_add_action - Add an action to a fence object.
+ *
+ * @fence - The fence object.
+ * @action - The action to add.
+ *
+ * Note that the action callbacks may be executed before this function
+ * returns.
+ */
+static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
+			      struct vmw_fence_action *action)
+{
+	struct vmw_fence_manager *fman = fman_from_fence(fence);
+	bool run_update = false;
+
+	mutex_lock(&fman->goal_irq_mutex);
+	spin_lock(&fman->lock);
+
+	fman->pending_actions[action->type]++;
+	if (dma_fence_is_signaled_locked(&fence->base)) {
+		struct list_head action_list;
+
+		INIT_LIST_HEAD(&action_list);
+		list_add_tail(&action->head, &action_list);
+		vmw_fences_perform_actions(fman, &action_list);
+	} else {
+		list_add_tail(&action->head, &fence->seq_passed_actions);
+
+		/*
+		 * This function may set fman::seqno_valid, so it must
+		 * be run with the goal_irq_mutex held.
+		 */
+		run_update = vmw_fence_goal_check_locked(fence);
+	}
+
+	spin_unlock(&fman->lock);
+
+	if (run_update) {
+		if (!fman->goal_irq_on) {
+			fman->goal_irq_on = true;
+			vmw_goal_waiter_add(fman->dev_priv);
+		}
+		vmw_fences_update(fman);
+	}
+	mutex_unlock(&fman->goal_irq_mutex);
+
+}
+
+/**
+ * vmw_event_fence_action_create - Post an event for sending when a fence
+ * object seqno has passed.
+ *
+ * @file_priv: The file connection on which the event should be posted.
+ * @fence: The fence object on which to post the event.
+ * @event: Event to be posted. This event should've been alloced
+ * using k[mz]alloc, and should've been completely initialized.
+ * @interruptible: Interruptible waits if possible.
+ *
+ * As a side effect, the object pointed to by @event may have been
+ * freed when this function returns. If this function returns with
+ * an error code, the caller needs to free that object.
+ */
+
+int vmw_event_fence_action_queue(struct drm_file *file_priv,
+				 struct vmw_fence_obj *fence,
+				 struct drm_pending_event *event,
+				 uint32_t *tv_sec,
+				 uint32_t *tv_usec,
+				 bool interruptible)
+{
+	struct vmw_event_fence_action *eaction;
+	struct vmw_fence_manager *fman = fman_from_fence(fence);
+
+	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
+	if (unlikely(!eaction))
+		return -ENOMEM;
+
+	eaction->event = event;
+
+	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
+	eaction->action.cleanup = vmw_event_fence_action_cleanup;
+	eaction->action.type = VMW_ACTION_EVENT;
+
+	eaction->fence = vmw_fence_obj_reference(fence);
+	eaction->dev = fman->dev_priv->dev;
+	eaction->tv_sec = tv_sec;
+	eaction->tv_usec = tv_usec;
+
+	vmw_fence_obj_add_action(fence, &eaction->action);
+
+	return 0;
+}
+
+struct vmw_event_fence_pending {
+	struct drm_pending_event base;
+	struct drm_vmw_event_fence event;
+};
+
+static int vmw_event_fence_action_create(struct drm_file *file_priv,
+				  struct vmw_fence_obj *fence,
+				  uint32_t flags,
+				  uint64_t user_data,
+				  bool interruptible)
+{
+	struct vmw_event_fence_pending *event;
+	struct vmw_fence_manager *fman = fman_from_fence(fence);
+	struct drm_device *dev = fman->dev_priv->dev;
+	int ret;
+
+	event = kzalloc(sizeof(*event), GFP_KERNEL);
+	if (unlikely(!event)) {
+		DRM_ERROR("Failed to allocate an event.\n");
+		ret = -ENOMEM;
+		goto out_no_space;
+	}
+
+	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
+	event->event.base.length = sizeof(event->event);
+	event->event.user_data = user_data;
+
+	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
+
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate event space for this file.\n");
+		kfree(event);
+		goto out_no_space;
+	}
+
+	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
+		ret = vmw_event_fence_action_queue(file_priv, fence,
+						   &event->base,
+						   &event->event.tv_sec,
+						   &event->event.tv_usec,
+						   interruptible);
+	else
+		ret = vmw_event_fence_action_queue(file_priv, fence,
+						   &event->base,
+						   NULL,
+						   NULL,
+						   interruptible);
+	if (ret != 0)
+		goto out_no_queue;
+
+	return 0;
+
+out_no_queue:
+	drm_event_cancel_free(dev, &event->base);
+out_no_space:
+	return ret;
+}
+
+int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_fence_event_arg *arg =
+		(struct drm_vmw_fence_event_arg *) data;
+	struct vmw_fence_obj *fence = NULL;
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+	struct ttm_object_file *tfile = vmw_fp->tfile;
+	struct drm_vmw_fence_rep __user *user_fence_rep =
+		(struct drm_vmw_fence_rep __user *)(unsigned long)
+		arg->fence_rep;
+	uint32_t handle;
+	int ret;
+
+	/*
+	 * Look up an existing fence object,
+	 * and if user-space wants a new reference,
+	 * add one.
+	 */
+	if (arg->handle) {
+		struct ttm_base_object *base =
+			vmw_fence_obj_lookup(tfile, arg->handle);
+
+		if (IS_ERR(base))
+			return PTR_ERR(base);
+
+		fence = &(container_of(base, struct vmw_user_fence,
+				       base)->fence);
+		(void) vmw_fence_obj_reference(fence);
+
+		if (user_fence_rep != NULL) {
+			ret = ttm_ref_object_add(vmw_fp->tfile, base,
+						 TTM_REF_USAGE, NULL, false);
+			if (unlikely(ret != 0)) {
+				DRM_ERROR("Failed to reference a fence "
+					  "object.\n");
+				goto out_no_ref_obj;
+			}
+			handle = base->handle;
+		}
+		ttm_base_object_unref(&base);
+	}
+
+	/*
+	 * Create a new fence object.
+	 */
+	if (!fence) {
+		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
+						 &fence,
+						 (user_fence_rep) ?
+						 &handle : NULL);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Fence event failed to create fence.\n");
+			return ret;
+		}
+	}
+
+	BUG_ON(fence == NULL);
+
+	ret = vmw_event_fence_action_create(file_priv, fence,
+					    arg->flags,
+					    arg->user_data,
+					    true);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Failed to attach event to fence.\n");
+		goto out_no_create;
+	}
+
+	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
+				    handle, -1);
+	vmw_fence_obj_unreference(&fence);
+	return 0;
+out_no_create:
+	if (user_fence_rep != NULL)
+		ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+out_no_ref_obj:
+	vmw_fence_obj_unreference(&fence);
+	return ret;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
new file mode 100644
index 0000000..50e9fdd
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2011-2012 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_FENCE_H_
+
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
+
+#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
+
+struct drm_device;
+struct drm_file;
+struct drm_pending_event;
+
+struct vmw_private;
+struct vmw_fence_manager;
+
+/**
+ *
+ *
+ */
+enum vmw_action_type {
+	VMW_ACTION_EVENT = 0,
+	VMW_ACTION_MAX
+};
+
+struct vmw_fence_action {
+	struct list_head head;
+	enum vmw_action_type type;
+	void (*seq_passed) (struct vmw_fence_action *action);
+	void (*cleanup) (struct vmw_fence_action *action);
+};
+
+struct vmw_fence_obj {
+	struct dma_fence base;
+
+	struct list_head head;
+	struct list_head seq_passed_actions;
+	void (*destroy)(struct vmw_fence_obj *fence);
+};
+
+extern struct vmw_fence_manager *
+vmw_fence_manager_init(struct vmw_private *dev_priv);
+
+extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman);
+
+static inline void
+vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
+{
+	struct vmw_fence_obj *fence = *fence_p;
+
+	*fence_p = NULL;
+	if (fence)
+		dma_fence_put(&fence->base);
+}
+
+static inline struct vmw_fence_obj *
+vmw_fence_obj_reference(struct vmw_fence_obj *fence)
+{
+	if (fence)
+		dma_fence_get(&fence->base);
+	return fence;
+}
+
+extern void vmw_fences_update(struct vmw_fence_manager *fman);
+
+extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence);
+
+extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
+			      bool lazy,
+			      bool interruptible, unsigned long timeout);
+
+extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
+
+extern int vmw_fence_create(struct vmw_fence_manager *fman,
+			    uint32_t seqno,
+			    struct vmw_fence_obj **p_fence);
+
+extern int vmw_user_fence_create(struct drm_file *file_priv,
+				 struct vmw_fence_manager *fman,
+				 uint32_t sequence,
+				 struct vmw_fence_obj **p_fence,
+				 uint32_t *p_handle);
+
+extern int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
+			      struct dma_fence *fence);
+
+extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
+
+extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
+
+extern int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+
+extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file_priv);
+
+extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
+					struct vmw_fence_obj *fence,
+					struct drm_pending_event *event,
+					uint32_t *tv_sec,
+					uint32_t *tv_usec,
+					bool interruptible);
+#endif /* _VMWGFX_FENCE_H_ */
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
new file mode 100644
index 0000000..e5252ef
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -0,0 +1,693 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <linux/sched/signal.h>
+
+#include <drm/ttm/ttm_placement.h>
+
+#include "vmwgfx_drv.h"
+
+struct vmw_temp_set_context {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdDXTempSetContext body;
+};
+
+bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
+{
+	u32 *fifo_mem = dev_priv->mmio_virt;
+	uint32_t fifo_min, hwversion;
+	const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_3D))
+		return false;
+
+	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+		uint32_t result;
+
+		if (!dev_priv->has_mob)
+			return false;
+
+		spin_lock(&dev_priv->cap_lock);
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
+		result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+		spin_unlock(&dev_priv->cap_lock);
+
+		return (result != 0);
+	}
+
+	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+		return false;
+
+	fifo_min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
+	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
+		return false;
+
+	hwversion = vmw_mmio_read(fifo_mem +
+				  ((fifo->capabilities &
+				    SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+				   SVGA_FIFO_3D_HWVERSION_REVISED :
+				   SVGA_FIFO_3D_HWVERSION));
+
+	if (hwversion == 0)
+		return false;
+
+	if (hwversion < SVGA3D_HWVERSION_WS8_B1)
+		return false;
+
+	/* Legacy Display Unit does not support surfaces */
+	if (dev_priv->active_display_unit == vmw_du_legacy)
+		return false;
+
+	return true;
+}
+
+bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
+{
+	u32  *fifo_mem = dev_priv->mmio_virt;
+	uint32_t caps;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+		return false;
+
+	caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
+	if (caps & SVGA_FIFO_CAP_PITCHLOCK)
+		return true;
+
+	return false;
+}
+
+int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+{
+	u32  *fifo_mem = dev_priv->mmio_virt;
+	uint32_t max;
+	uint32_t min;
+
+	fifo->dx = false;
+	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
+	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
+	if (unlikely(fifo->static_buffer == NULL))
+		return -ENOMEM;
+
+	fifo->dynamic_buffer = NULL;
+	fifo->reserved_size = 0;
+	fifo->using_bounce_buffer = false;
+
+	mutex_init(&fifo->fifo_mutex);
+	init_rwsem(&fifo->rwsem);
+
+	DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
+	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
+	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
+
+	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
+	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
+
+	vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
+		  SVGA_REG_ENABLE_HIDE);
+	vmw_write(dev_priv, SVGA_REG_TRACES, 0);
+
+	min = 4;
+	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
+		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
+	min <<= 2;
+
+	if (min < PAGE_SIZE)
+		min = PAGE_SIZE;
+
+	vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
+	vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
+	wmb();
+	vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
+	vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_STOP);
+	vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
+	mb();
+
+	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
+
+	max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
+	min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
+	fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
+
+	DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
+		 (unsigned int) max,
+		 (unsigned int) min,
+		 (unsigned int) fifo->capabilities);
+
+	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+	vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
+	vmw_marker_queue_init(&fifo->marker_queue);
+
+	return 0;
+}
+
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
+{
+	u32 *fifo_mem = dev_priv->mmio_virt;
+
+	preempt_disable();
+	if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
+		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
+	preempt_enable();
+}
+
+void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+{
+	u32  *fifo_mem = dev_priv->mmio_virt;
+
+	vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
+		;
+
+	dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+
+	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
+		  dev_priv->config_done_state);
+	vmw_write(dev_priv, SVGA_REG_ENABLE,
+		  dev_priv->enable_state);
+	vmw_write(dev_priv, SVGA_REG_TRACES,
+		  dev_priv->traces_state);
+
+	vmw_marker_queue_takedown(&fifo->marker_queue);
+
+	if (likely(fifo->static_buffer != NULL)) {
+		vfree(fifo->static_buffer);
+		fifo->static_buffer = NULL;
+	}
+
+	if (likely(fifo->dynamic_buffer != NULL)) {
+		vfree(fifo->dynamic_buffer);
+		fifo->dynamic_buffer = NULL;
+	}
+}
+
+static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	u32  *fifo_mem = dev_priv->mmio_virt;
+	uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
+	uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
+	uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
+	uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
+
+	return ((max - next_cmd) + (stop - min) <= bytes);
+}
+
+static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
+			       uint32_t bytes, bool interruptible,
+			       unsigned long timeout)
+{
+	int ret = 0;
+	unsigned long end_jiffies = jiffies + timeout;
+	DEFINE_WAIT(__wait);
+
+	DRM_INFO("Fifo wait noirq.\n");
+
+	for (;;) {
+		prepare_to_wait(&dev_priv->fifo_queue, &__wait,
+				(interruptible) ?
+				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+		if (!vmw_fifo_is_full(dev_priv, bytes))
+			break;
+		if (time_after_eq(jiffies, end_jiffies)) {
+			ret = -EBUSY;
+			DRM_ERROR("SVGA device lockup.\n");
+			break;
+		}
+		schedule_timeout(1);
+		if (interruptible && signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+	finish_wait(&dev_priv->fifo_queue, &__wait);
+	wake_up_all(&dev_priv->fifo_queue);
+	DRM_INFO("Fifo noirq exit.\n");
+	return ret;
+}
+
+static int vmw_fifo_wait(struct vmw_private *dev_priv,
+			 uint32_t bytes, bool interruptible,
+			 unsigned long timeout)
+{
+	long ret = 1L;
+
+	if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
+		return 0;
+
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
+	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+		return vmw_fifo_wait_noirq(dev_priv, bytes,
+					   interruptible, timeout);
+
+	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
+			       &dev_priv->fifo_queue_waiters);
+
+	if (interruptible)
+		ret = wait_event_interruptible_timeout
+		    (dev_priv->fifo_queue,
+		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
+	else
+		ret = wait_event_timeout
+		    (dev_priv->fifo_queue,
+		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
+
+	if (unlikely(ret == 0))
+		ret = -EBUSY;
+	else if (likely(ret > 0))
+		ret = 0;
+
+	vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
+				  &dev_priv->fifo_queue_waiters);
+
+	return ret;
+}
+
+/**
+ * Reserve @bytes number of bytes in the fifo.
+ *
+ * This function will return NULL (error) on two conditions:
+ *  If it timeouts waiting for fifo space, or if @bytes is larger than the
+ *   available fifo space.
+ *
+ * Returns:
+ *   Pointer to the fifo, or null on error (possible hardware hang).
+ */
+static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
+				    uint32_t bytes)
+{
+	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+	u32  *fifo_mem = dev_priv->mmio_virt;
+	uint32_t max;
+	uint32_t min;
+	uint32_t next_cmd;
+	uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
+	int ret;
+
+	mutex_lock(&fifo_state->fifo_mutex);
+	max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
+	min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
+	next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
+
+	if (unlikely(bytes >= (max - min)))
+		goto out_err;
+
+	BUG_ON(fifo_state->reserved_size != 0);
+	BUG_ON(fifo_state->dynamic_buffer != NULL);
+
+	fifo_state->reserved_size = bytes;
+
+	while (1) {
+		uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
+		bool need_bounce = false;
+		bool reserve_in_place = false;
+
+		if (next_cmd >= stop) {
+			if (likely((next_cmd + bytes < max ||
+				    (next_cmd + bytes == max && stop > min))))
+				reserve_in_place = true;
+
+			else if (vmw_fifo_is_full(dev_priv, bytes)) {
+				ret = vmw_fifo_wait(dev_priv, bytes,
+						    false, 3 * HZ);
+				if (unlikely(ret != 0))
+					goto out_err;
+			} else
+				need_bounce = true;
+
+		} else {
+
+			if (likely((next_cmd + bytes < stop)))
+				reserve_in_place = true;
+			else {
+				ret = vmw_fifo_wait(dev_priv, bytes,
+						    false, 3 * HZ);
+				if (unlikely(ret != 0))
+					goto out_err;
+			}
+		}
+
+		if (reserve_in_place) {
+			if (reserveable || bytes <= sizeof(uint32_t)) {
+				fifo_state->using_bounce_buffer = false;
+
+				if (reserveable)
+					vmw_mmio_write(bytes, fifo_mem +
+						       SVGA_FIFO_RESERVED);
+				return (void __force *) (fifo_mem +
+							 (next_cmd >> 2));
+			} else {
+				need_bounce = true;
+			}
+		}
+
+		if (need_bounce) {
+			fifo_state->using_bounce_buffer = true;
+			if (bytes < fifo_state->static_buffer_size)
+				return fifo_state->static_buffer;
+			else {
+				fifo_state->dynamic_buffer = vmalloc(bytes);
+				if (!fifo_state->dynamic_buffer)
+					goto out_err;
+				return fifo_state->dynamic_buffer;
+			}
+		}
+	}
+out_err:
+	fifo_state->reserved_size = 0;
+	mutex_unlock(&fifo_state->fifo_mutex);
+
+	return NULL;
+}
+
+void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
+			  int ctx_id)
+{
+	void *ret;
+
+	if (dev_priv->cman)
+		ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
+					 ctx_id, false, NULL);
+	else if (ctx_id == SVGA3D_INVALID_ID)
+		ret = vmw_local_fifo_reserve(dev_priv, bytes);
+	else {
+		WARN(1, "Command buffer has not been allocated.\n");
+		ret = NULL;
+	}
+	if (IS_ERR_OR_NULL(ret))
+		return NULL;
+
+	return ret;
+}
+
+static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
+			      u32  *fifo_mem,
+			      uint32_t next_cmd,
+			      uint32_t max, uint32_t min, uint32_t bytes)
+{
+	uint32_t chunk_size = max - next_cmd;
+	uint32_t rest;
+	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
+	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
+
+	if (bytes < chunk_size)
+		chunk_size = bytes;
+
+	vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
+	mb();
+	memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
+	rest = bytes - chunk_size;
+	if (rest)
+		memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
+}
+
+static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
+			       u32  *fifo_mem,
+			       uint32_t next_cmd,
+			       uint32_t max, uint32_t min, uint32_t bytes)
+{
+	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
+	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
+
+	while (bytes > 0) {
+		vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
+		next_cmd += sizeof(uint32_t);
+		if (unlikely(next_cmd == max))
+			next_cmd = min;
+		mb();
+		vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+		mb();
+		bytes -= sizeof(uint32_t);
+	}
+}
+
+static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+	u32  *fifo_mem = dev_priv->mmio_virt;
+	uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
+	uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
+	uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
+	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
+
+	if (fifo_state->dx)
+		bytes += sizeof(struct vmw_temp_set_context);
+
+	fifo_state->dx = false;
+	BUG_ON((bytes & 3) != 0);
+	BUG_ON(bytes > fifo_state->reserved_size);
+
+	fifo_state->reserved_size = 0;
+
+	if (fifo_state->using_bounce_buffer) {
+		if (reserveable)
+			vmw_fifo_res_copy(fifo_state, fifo_mem,
+					  next_cmd, max, min, bytes);
+		else
+			vmw_fifo_slow_copy(fifo_state, fifo_mem,
+					   next_cmd, max, min, bytes);
+
+		if (fifo_state->dynamic_buffer) {
+			vfree(fifo_state->dynamic_buffer);
+			fifo_state->dynamic_buffer = NULL;
+		}
+
+	}
+
+	down_write(&fifo_state->rwsem);
+	if (fifo_state->using_bounce_buffer || reserveable) {
+		next_cmd += bytes;
+		if (next_cmd >= max)
+			next_cmd -= max - min;
+		mb();
+		vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+	}
+
+	if (reserveable)
+		vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
+	mb();
+	up_write(&fifo_state->rwsem);
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+	mutex_unlock(&fifo_state->fifo_mutex);
+}
+
+void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	if (dev_priv->cman)
+		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
+	else
+		vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+
+/**
+ * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @bytes: Number of bytes to commit.
+ */
+void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	if (dev_priv->cman)
+		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
+	else
+		vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+/**
+ * vmw_fifo_flush - Flush any buffered commands and make sure command processing
+ * starts.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @interruptible: Whether to wait interruptible if function needs to sleep.
+ */
+int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
+{
+	might_sleep();
+
+	if (dev_priv->cman)
+		return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
+	else
+		return 0;
+}
+
+int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+{
+	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+	struct svga_fifo_cmd_fence *cmd_fence;
+	u32 *fm;
+	int ret = 0;
+	uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
+
+	fm = VMW_FIFO_RESERVE(dev_priv, bytes);
+	if (unlikely(fm == NULL)) {
+		*seqno = atomic_read(&dev_priv->marker_seq);
+		ret = -ENOMEM;
+		(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
+					false, 3*HZ);
+		goto out_err;
+	}
+
+	do {
+		*seqno = atomic_add_return(1, &dev_priv->marker_seq);
+	} while (*seqno == 0);
+
+	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+
+		/*
+		 * Don't request hardware to send a fence. The
+		 * waiting code in vmwgfx_irq.c will emulate this.
+		 */
+
+		vmw_fifo_commit(dev_priv, 0);
+		return 0;
+	}
+
+	*fm++ = SVGA_CMD_FENCE;
+	cmd_fence = (struct svga_fifo_cmd_fence *) fm;
+	cmd_fence->fence = *seqno;
+	vmw_fifo_commit_flush(dev_priv, bytes);
+	(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
+	vmw_update_seqno(dev_priv, fifo_state);
+
+out_err:
+	return ret;
+}
+
+/**
+ * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
+ * legacy query commands.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * See the vmw_fifo_emit_dummy_query documentation.
+ */
+static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
+					    uint32_t cid)
+{
+	/*
+	 * A query wait without a preceding query end will
+	 * actually finish all queries for this cid
+	 * without writing to the query result structure.
+	 */
+
+	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdWaitForQuery body;
+	} *cmd;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = cid;
+	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
+
+	if (bo->mem.mem_type == TTM_PL_VRAM) {
+		cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
+		cmd->body.guestResult.offset = bo->offset;
+	} else {
+		cmd->body.guestResult.gmrId = bo->mem.start;
+		cmd->body.guestResult.offset = 0;
+	}
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * guest-backed resource query commands.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * See the vmw_fifo_emit_dummy_query documentation.
+ */
+static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
+					uint32_t cid)
+{
+	/*
+	 * A query wait without a preceding query end will
+	 * actually finish all queries for this cid
+	 * without writing to the query result structure.
+	 */
+
+	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdWaitForGBQuery body;
+	} *cmd;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = cid;
+	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.offset = 0;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+
+/**
+ * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * appropriate resource query commands.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * This function is used to emit a dummy occlusion query with
+ * no primitives rendered between query begin and query end.
+ * It's used to provide a query barrier, in order to know that when
+ * this query is finished, all preceding queries are also finished.
+ *
+ * A Query results structure should have been initialized at the start
+ * of the dev_priv->dummy_query_bo buffer object. And that buffer object
+ * must also be either reserved or pinned when this function is called.
+ *
+ * Returns -ENOMEM on failure to reserve fifo space.
+ */
+int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+			      uint32_t cid)
+{
+	if (dev_priv->has_mob)
+		return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
+
+	return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
new file mode 100644
index 0000000..83c0d5a
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_bo_driver.h>
+
+#include "vmwgfx_drv.h"
+
+#define VMW_PPN_SIZE (sizeof(unsigned long))
+/* A future safe maximum remap size. */
+#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
+#define DMA_ADDR_INVALID ((dma_addr_t) 0)
+#define DMA_PAGE_INVALID 0UL
+
+static int vmw_gmr2_bind(struct vmw_private *dev_priv,
+			 struct vmw_piter *iter,
+			 unsigned long num_pages,
+			 int gmr_id)
+{
+	SVGAFifoCmdDefineGMR2 define_cmd;
+	SVGAFifoCmdRemapGMR2 remap_cmd;
+	uint32_t *cmd;
+	uint32_t *cmd_orig;
+	uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
+	uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
+	uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
+	uint32_t remap_pos = 0;
+	uint32_t cmd_size = define_size + remap_size;
+	uint32_t i;
+
+	cmd_orig = cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	define_cmd.gmrId = gmr_id;
+	define_cmd.numPages = num_pages;
+
+	*cmd++ = SVGA_CMD_DEFINE_GMR2;
+	memcpy(cmd, &define_cmd, sizeof(define_cmd));
+	cmd += sizeof(define_cmd) / sizeof(*cmd);
+
+	/*
+	 * Need to split the command if there are too many
+	 * pages that goes into the gmr.
+	 */
+
+	remap_cmd.gmrId = gmr_id;
+	remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
+		SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
+
+	while (num_pages > 0) {
+		unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
+
+		remap_cmd.offsetPages = remap_pos;
+		remap_cmd.numPages = nr;
+
+		*cmd++ = SVGA_CMD_REMAP_GMR2;
+		memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
+		cmd += sizeof(remap_cmd) / sizeof(*cmd);
+
+		for (i = 0; i < nr; ++i) {
+			if (VMW_PPN_SIZE <= 4)
+				*cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
+			else
+				*((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
+					PAGE_SHIFT;
+
+			cmd += VMW_PPN_SIZE / sizeof(*cmd);
+			vmw_piter_next(iter);
+		}
+
+		num_pages -= nr;
+		remap_pos += nr;
+	}
+
+	BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
+
+	vmw_fifo_commit(dev_priv, cmd_size);
+
+	return 0;
+}
+
+static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
+			    int gmr_id)
+{
+	SVGAFifoCmdDefineGMR2 define_cmd;
+	uint32_t define_size = sizeof(define_cmd) + 4;
+	uint32_t *cmd;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, define_size);
+	if (unlikely(cmd == NULL))
+		return;
+
+	define_cmd.gmrId = gmr_id;
+	define_cmd.numPages = 0;
+
+	*cmd++ = SVGA_CMD_DEFINE_GMR2;
+	memcpy(cmd, &define_cmd, sizeof(define_cmd));
+
+	vmw_fifo_commit(dev_priv, define_size);
+}
+
+
+int vmw_gmr_bind(struct vmw_private *dev_priv,
+		 const struct vmw_sg_table *vsgt,
+		 unsigned long num_pages,
+		 int gmr_id)
+{
+	struct vmw_piter data_iter;
+
+	vmw_piter_start(&data_iter, vsgt, 0);
+
+	if (unlikely(!vmw_piter_next(&data_iter)))
+		return 0;
+
+	if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
+		return -EINVAL;
+
+	return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
+}
+
+
+void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
+{
+	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
+		vmw_gmr2_unbind(dev_priv, gmr_id);
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
new file mode 100644
index 0000000..7da752c
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "vmwgfx_drv.h"
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+
+struct vmwgfx_gmrid_man {
+	spinlock_t lock;
+	struct ida gmr_ida;
+	uint32_t max_gmr_ids;
+	uint32_t max_gmr_pages;
+	uint32_t used_gmr_pages;
+};
+
+static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
+				  struct ttm_buffer_object *bo,
+				  const struct ttm_place *place,
+				  struct ttm_mem_reg *mem)
+{
+	struct vmwgfx_gmrid_man *gman =
+		(struct vmwgfx_gmrid_man *)man->priv;
+	int id;
+
+	mem->mm_node = NULL;
+
+	id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
+	if (id < 0)
+		return (id != -ENOMEM ? 0 : id);
+
+	spin_lock(&gman->lock);
+
+	if (gman->max_gmr_pages > 0) {
+		gman->used_gmr_pages += bo->num_pages;
+		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
+			goto nospace;
+	}
+
+	mem->mm_node = gman;
+	mem->start = id;
+	mem->num_pages = bo->num_pages;
+
+	spin_unlock(&gman->lock);
+	return 0;
+
+nospace:
+	gman->used_gmr_pages -= bo->num_pages;
+	spin_unlock(&gman->lock);
+	ida_free(&gman->gmr_ida, id);
+	return 0;
+}
+
+static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
+				   struct ttm_mem_reg *mem)
+{
+	struct vmwgfx_gmrid_man *gman =
+		(struct vmwgfx_gmrid_man *)man->priv;
+
+	if (mem->mm_node) {
+		ida_free(&gman->gmr_ida, mem->start);
+		spin_lock(&gman->lock);
+		gman->used_gmr_pages -= mem->num_pages;
+		spin_unlock(&gman->lock);
+		mem->mm_node = NULL;
+	}
+}
+
+static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
+			      unsigned long p_size)
+{
+	struct vmw_private *dev_priv =
+		container_of(man->bdev, struct vmw_private, bdev);
+	struct vmwgfx_gmrid_man *gman =
+		kzalloc(sizeof(*gman), GFP_KERNEL);
+
+	if (unlikely(!gman))
+		return -ENOMEM;
+
+	spin_lock_init(&gman->lock);
+	gman->used_gmr_pages = 0;
+	ida_init(&gman->gmr_ida);
+
+	switch (p_size) {
+	case VMW_PL_GMR:
+		gman->max_gmr_ids = dev_priv->max_gmr_ids;
+		gman->max_gmr_pages = dev_priv->max_gmr_pages;
+		break;
+	case VMW_PL_MOB:
+		gman->max_gmr_ids = VMWGFX_NUM_MOB;
+		gman->max_gmr_pages = dev_priv->max_mob_pages;
+		break;
+	default:
+		BUG();
+	}
+	man->priv = (void *) gman;
+	return 0;
+}
+
+static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
+{
+	struct vmwgfx_gmrid_man *gman =
+		(struct vmwgfx_gmrid_man *)man->priv;
+
+	if (gman) {
+		ida_destroy(&gman->gmr_ida);
+		kfree(gman);
+	}
+	return 0;
+}
+
+static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
+				struct drm_printer *printer)
+{
+	drm_printf(printer, "No debug info available for the GMR id manager\n");
+}
+
+const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
+	.init = vmw_gmrid_man_init,
+	.takedown = vmw_gmrid_man_takedown,
+	.get_node = vmw_gmrid_man_get_node,
+	.put_node = vmw_gmrid_man_put_node,
+	.debug = vmw_gmrid_man_debug
+};
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
new file mode 100644
index 0000000..a15375e
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include <drm/vmwgfx_drm.h>
+#include "vmwgfx_kms.h"
+#include "device_include/svga3d_caps.h"
+
+struct svga_3d_compat_cap {
+	SVGA3dCapsRecordHeader header;
+	SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
+};
+
+int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_getparam_arg *param =
+	    (struct drm_vmw_getparam_arg *)data;
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+
+	switch (param->param) {
+	case DRM_VMW_PARAM_NUM_STREAMS:
+		param->value = vmw_overlay_num_overlays(dev_priv);
+		break;
+	case DRM_VMW_PARAM_NUM_FREE_STREAMS:
+		param->value = vmw_overlay_num_free_overlays(dev_priv);
+		break;
+	case DRM_VMW_PARAM_3D:
+		param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
+		break;
+	case DRM_VMW_PARAM_HW_CAPS:
+		param->value = dev_priv->capabilities;
+		break;
+	case DRM_VMW_PARAM_HW_CAPS2:
+		param->value = dev_priv->capabilities2;
+		break;
+	case DRM_VMW_PARAM_FIFO_CAPS:
+		param->value = dev_priv->fifo.capabilities;
+		break;
+	case DRM_VMW_PARAM_MAX_FB_SIZE:
+		param->value = dev_priv->prim_bb_mem;
+		break;
+	case DRM_VMW_PARAM_FIFO_HW_VERSION:
+	{
+		u32 *fifo_mem = dev_priv->mmio_virt;
+		const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
+			param->value = SVGA3D_HWVERSION_WS8_B1;
+			break;
+		}
+
+		param->value =
+			vmw_mmio_read(fifo_mem +
+				      ((fifo->capabilities &
+					SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+				       SVGA_FIFO_3D_HWVERSION_REVISED :
+				       SVGA_FIFO_3D_HWVERSION));
+		break;
+	}
+	case DRM_VMW_PARAM_MAX_SURF_MEMORY:
+		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+		    !vmw_fp->gb_aware)
+			param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
+		else
+			param->value = dev_priv->memory_size;
+		break;
+	case DRM_VMW_PARAM_3D_CAPS_SIZE:
+		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+		    vmw_fp->gb_aware)
+			param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+		else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+			param->value = sizeof(struct svga_3d_compat_cap) +
+				sizeof(uint32_t);
+		else
+			param->value = (SVGA_FIFO_3D_CAPS_LAST -
+					SVGA_FIFO_3D_CAPS + 1) *
+				sizeof(uint32_t);
+		break;
+	case DRM_VMW_PARAM_MAX_MOB_MEMORY:
+		vmw_fp->gb_aware = true;
+		param->value = dev_priv->max_mob_pages * PAGE_SIZE;
+		break;
+	case DRM_VMW_PARAM_MAX_MOB_SIZE:
+		param->value = dev_priv->max_mob_size;
+		break;
+	case DRM_VMW_PARAM_SCREEN_TARGET:
+		param->value =
+			(dev_priv->active_display_unit == vmw_du_screen_target);
+		break;
+	case DRM_VMW_PARAM_DX:
+		param->value = dev_priv->has_dx;
+		break;
+	case DRM_VMW_PARAM_SM4_1:
+		param->value = dev_priv->has_sm4_1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
+{
+	/*
+	 * A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES
+	 * to check the sample count supported by virtual device. Since there
+	 * never was support for multisample count for backing MOB return 0.
+	 */
+	if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
+		return 0;
+
+	return fmt_value;
+}
+
+static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
+			       size_t size)
+{
+	struct svga_3d_compat_cap *compat_cap =
+		(struct svga_3d_compat_cap *) bounce;
+	unsigned int i;
+	size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
+	unsigned int max_size;
+
+	if (size < pair_offset)
+		return -EINVAL;
+
+	max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
+
+	if (max_size > SVGA3D_DEVCAP_MAX)
+		max_size = SVGA3D_DEVCAP_MAX;
+
+	compat_cap->header.length =
+		(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
+	compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
+
+	spin_lock(&dev_priv->cap_lock);
+	for (i = 0; i < max_size; ++i) {
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+		compat_cap->pairs[i][0] = i;
+		compat_cap->pairs[i][1] = vmw_mask_multisample
+			(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
+	}
+	spin_unlock(&dev_priv->cap_lock);
+
+	return 0;
+}
+
+
+int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_vmw_get_3d_cap_arg *arg =
+		(struct drm_vmw_get_3d_cap_arg *) data;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	uint32_t size;
+	u32 *fifo_mem;
+	void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
+	void *bounce;
+	int ret;
+	bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+
+	if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
+		VMW_DEBUG_USER("Illegal GET_3D_CAP argument.\n");
+		return -EINVAL;
+	}
+
+	if (gb_objects && vmw_fp->gb_aware)
+		size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+	else if (gb_objects)
+		size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
+	else
+		size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
+			sizeof(uint32_t);
+
+	if (arg->max_size < size)
+		size = arg->max_size;
+
+	bounce = vzalloc(size);
+	if (unlikely(bounce == NULL)) {
+		DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
+		return -ENOMEM;
+	}
+
+	if (gb_objects && vmw_fp->gb_aware) {
+		int i, num;
+		uint32_t *bounce32 = (uint32_t *) bounce;
+
+		num = size / sizeof(uint32_t);
+		if (num > SVGA3D_DEVCAP_MAX)
+			num = SVGA3D_DEVCAP_MAX;
+
+		spin_lock(&dev_priv->cap_lock);
+		for (i = 0; i < num; ++i) {
+			vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+			*bounce32++ = vmw_mask_multisample
+				(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
+		}
+		spin_unlock(&dev_priv->cap_lock);
+	} else if (gb_objects) {
+		ret = vmw_fill_compat_cap(dev_priv, bounce, size);
+		if (unlikely(ret != 0))
+			goto out_err;
+	} else {
+		fifo_mem = dev_priv->mmio_virt;
+		memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+	}
+
+	ret = copy_to_user(buffer, bounce, size);
+	if (ret)
+		ret = -EFAULT;
+out_err:
+	vfree(bounce);
+
+	if (unlikely(ret != 0))
+		DRM_ERROR("Failed to report 3D caps info.\n");
+
+	return ret;
+}
+
+int vmw_present_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_present_arg *arg =
+		(struct drm_vmw_present_arg *)data;
+	struct vmw_surface *surface;
+	struct drm_vmw_rect __user *clips_ptr;
+	struct drm_vmw_rect *clips = NULL;
+	struct drm_framebuffer *fb;
+	struct vmw_framebuffer *vfb;
+	struct vmw_resource *res;
+	uint32_t num_clips;
+	int ret;
+
+	num_clips = arg->num_clips;
+	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
+
+	if (unlikely(num_clips == 0))
+		return 0;
+
+	if (clips_ptr == NULL) {
+		VMW_DEBUG_USER("Variable clips_ptr must be specified.\n");
+		ret = -EINVAL;
+		goto out_clips;
+	}
+
+	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
+	if (clips == NULL) {
+		DRM_ERROR("Failed to allocate clip rect list.\n");
+		ret = -ENOMEM;
+		goto out_clips;
+	}
+
+	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
+	if (ret) {
+		DRM_ERROR("Failed to copy clip rects from userspace.\n");
+		ret = -EFAULT;
+		goto out_no_copy;
+	}
+
+	drm_modeset_lock_all(dev);
+
+	fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
+	if (!fb) {
+		VMW_DEBUG_USER("Invalid framebuffer id.\n");
+		ret = -ENOENT;
+		goto out_no_fb;
+	}
+	vfb = vmw_framebuffer_to_vfb(fb);
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		goto out_no_ttm_lock;
+
+	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
+					      user_surface_converter,
+					      &res);
+	if (ret)
+		goto out_no_surface;
+
+	surface = vmw_res_to_srf(res);
+	ret = vmw_kms_present(dev_priv, file_priv,
+			      vfb, surface, arg->sid,
+			      arg->dest_x, arg->dest_y,
+			      clips, num_clips);
+
+	/* vmw_user_surface_lookup takes one ref so does new_fb */
+	vmw_surface_unreference(&surface);
+
+out_no_surface:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+out_no_ttm_lock:
+	drm_framebuffer_put(fb);
+out_no_fb:
+	drm_modeset_unlock_all(dev);
+out_no_copy:
+	kfree(clips);
+out_clips:
+	return ret;
+}
+
+int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_present_readback_arg *arg =
+		(struct drm_vmw_present_readback_arg *)data;
+	struct drm_vmw_fence_rep __user *user_fence_rep =
+		(struct drm_vmw_fence_rep __user *)
+		(unsigned long)arg->fence_rep;
+	struct drm_vmw_rect __user *clips_ptr;
+	struct drm_vmw_rect *clips = NULL;
+	struct drm_framebuffer *fb;
+	struct vmw_framebuffer *vfb;
+	uint32_t num_clips;
+	int ret;
+
+	num_clips = arg->num_clips;
+	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
+
+	if (unlikely(num_clips == 0))
+		return 0;
+
+	if (clips_ptr == NULL) {
+		VMW_DEBUG_USER("Argument clips_ptr must be specified.\n");
+		ret = -EINVAL;
+		goto out_clips;
+	}
+
+	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
+	if (clips == NULL) {
+		DRM_ERROR("Failed to allocate clip rect list.\n");
+		ret = -ENOMEM;
+		goto out_clips;
+	}
+
+	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
+	if (ret) {
+		DRM_ERROR("Failed to copy clip rects from userspace.\n");
+		ret = -EFAULT;
+		goto out_no_copy;
+	}
+
+	drm_modeset_lock_all(dev);
+
+	fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
+	if (!fb) {
+		VMW_DEBUG_USER("Invalid framebuffer id.\n");
+		ret = -ENOENT;
+		goto out_no_fb;
+	}
+
+	vfb = vmw_framebuffer_to_vfb(fb);
+	if (!vfb->bo) {
+		VMW_DEBUG_USER("Framebuffer not buffer backed.\n");
+		ret = -EINVAL;
+		goto out_no_ttm_lock;
+	}
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		goto out_no_ttm_lock;
+
+	ret = vmw_kms_readback(dev_priv, file_priv,
+			       vfb, user_fence_rep,
+			       clips, num_clips);
+
+	ttm_read_unlock(&dev_priv->reservation_sem);
+out_no_ttm_lock:
+	drm_framebuffer_put(fb);
+out_no_fb:
+	drm_modeset_unlock_all(dev);
+out_no_copy:
+	kfree(clips);
+out_clips:
+	return ret;
+}
+
+
+/**
+ * vmw_fops_poll - wrapper around the drm_poll function
+ *
+ * @filp: See the linux fops poll documentation.
+ * @wait: See the linux fops poll documentation.
+ *
+ * Wrapper around the drm_poll function that makes sure the device is
+ * processing the fifo if drm_poll decides to wait.
+ */
+__poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct vmw_private *dev_priv =
+		vmw_priv(file_priv->minor->dev);
+
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+	return drm_poll(filp, wait);
+}
+
+
+/**
+ * vmw_fops_read - wrapper around the drm_read function
+ *
+ * @filp: See the linux fops read documentation.
+ * @buffer: See the linux fops read documentation.
+ * @count: See the linux fops read documentation.
+ * offset: See the linux fops read documentation.
+ *
+ * Wrapper around the drm_read function that makes sure the device is
+ * processing the fifo if drm_read decides to wait.
+ */
+ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
+		      size_t count, loff_t *offset)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct vmw_private *dev_priv =
+		vmw_priv(file_priv->minor->dev);
+
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+	return drm_read(filp, buffer, count, offset);
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
new file mode 100644
index 0000000..75f3efe
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <linux/sched/signal.h>
+
+#include "vmwgfx_drv.h"
+
+#define VMW_FENCE_WRAP (1 << 24)
+
+/**
+ * vmw_thread_fn - Deferred (process context) irq handler
+ *
+ * @irq: irq number
+ * @arg: Closure argument. Pointer to a struct drm_device cast to void *
+ *
+ * This function implements the deferred part of irq processing.
+ * The function is guaranteed to run at least once after the
+ * vmw_irq_handler has returned with IRQ_WAKE_THREAD.
+ *
+ */
+static irqreturn_t vmw_thread_fn(int irq, void *arg)
+{
+	struct drm_device *dev = (struct drm_device *)arg;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	irqreturn_t ret = IRQ_NONE;
+
+	if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
+			       dev_priv->irqthread_pending)) {
+		vmw_fences_update(dev_priv->fman);
+		wake_up_all(&dev_priv->fence_queue);
+		ret = IRQ_HANDLED;
+	}
+
+	if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
+			       dev_priv->irqthread_pending)) {
+		vmw_cmdbuf_irqthread(dev_priv->cman);
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+/**
+ * vmw_irq_handler irq handler
+ *
+ * @irq: irq number
+ * @arg: Closure argument. Pointer to a struct drm_device cast to void *
+ *
+ * This function implements the quick part of irq processing.
+ * The function performs fast actions like clearing the device interrupt
+ * flags and also reasonably quick actions like waking processes waiting for
+ * FIFO space. Other IRQ actions are deferred to the IRQ thread.
+ */
+static irqreturn_t vmw_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = (struct drm_device *)arg;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	uint32_t status, masked_status;
+	irqreturn_t ret = IRQ_HANDLED;
+
+	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+	masked_status = status & READ_ONCE(dev_priv->irq_mask);
+
+	if (likely(status))
+		outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+
+	if (!status)
+		return IRQ_NONE;
+
+	if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
+		wake_up_all(&dev_priv->fifo_queue);
+
+	if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
+			      SVGA_IRQFLAG_FENCE_GOAL)) &&
+	    !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
+		ret = IRQ_WAKE_THREAD;
+
+	if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
+			      SVGA_IRQFLAG_ERROR)) &&
+	    !test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
+			      dev_priv->irqthread_pending))
+		ret = IRQ_WAKE_THREAD;
+
+	return ret;
+}
+
+static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
+{
+
+	return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
+}
+
+void vmw_update_seqno(struct vmw_private *dev_priv,
+			 struct vmw_fifo_state *fifo_state)
+{
+	u32 *fifo_mem = dev_priv->mmio_virt;
+	uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+
+	if (dev_priv->last_read_seqno != seqno) {
+		dev_priv->last_read_seqno = seqno;
+		vmw_marker_pull(&fifo_state->marker_queue, seqno);
+		vmw_fences_update(dev_priv->fman);
+	}
+}
+
+bool vmw_seqno_passed(struct vmw_private *dev_priv,
+			 uint32_t seqno)
+{
+	struct vmw_fifo_state *fifo_state;
+	bool ret;
+
+	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+		return true;
+
+	fifo_state = &dev_priv->fifo;
+	vmw_update_seqno(dev_priv, fifo_state);
+	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+		return true;
+
+	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
+	    vmw_fifo_idle(dev_priv, seqno))
+		return true;
+
+	/**
+	 * Then check if the seqno is higher than what we've actually
+	 * emitted. Then the fence is stale and signaled.
+	 */
+
+	ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
+	       > VMW_FENCE_WRAP);
+
+	return ret;
+}
+
+int vmw_fallback_wait(struct vmw_private *dev_priv,
+		      bool lazy,
+		      bool fifo_idle,
+		      uint32_t seqno,
+		      bool interruptible,
+		      unsigned long timeout)
+{
+	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+
+	uint32_t count = 0;
+	uint32_t signal_seq;
+	int ret;
+	unsigned long end_jiffies = jiffies + timeout;
+	bool (*wait_condition)(struct vmw_private *, uint32_t);
+	DEFINE_WAIT(__wait);
+
+	wait_condition = (fifo_idle) ? &vmw_fifo_idle :
+		&vmw_seqno_passed;
+
+	/**
+	 * Block command submission while waiting for idle.
+	 */
+
+	if (fifo_idle) {
+		down_read(&fifo_state->rwsem);
+		if (dev_priv->cman) {
+			ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
+					      10*HZ);
+			if (ret)
+				goto out_err;
+		}
+	}
+
+	signal_seq = atomic_read(&dev_priv->marker_seq);
+	ret = 0;
+
+	for (;;) {
+		prepare_to_wait(&dev_priv->fence_queue, &__wait,
+				(interruptible) ?
+				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+		if (wait_condition(dev_priv, seqno))
+			break;
+		if (time_after_eq(jiffies, end_jiffies)) {
+			DRM_ERROR("SVGA device lockup.\n");
+			break;
+		}
+		if (lazy)
+			schedule_timeout(1);
+		else if ((++count & 0x0F) == 0) {
+			/**
+			 * FIXME: Use schedule_hr_timeout here for
+			 * newer kernels and lower CPU utilization.
+			 */
+
+			__set_current_state(TASK_RUNNING);
+			schedule();
+			__set_current_state((interruptible) ?
+					    TASK_INTERRUPTIBLE :
+					    TASK_UNINTERRUPTIBLE);
+		}
+		if (interruptible && signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+	finish_wait(&dev_priv->fence_queue, &__wait);
+	if (ret == 0 && fifo_idle) {
+		u32 *fifo_mem = dev_priv->mmio_virt;
+
+		vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
+	}
+	wake_up_all(&dev_priv->fence_queue);
+out_err:
+	if (fifo_idle)
+		up_read(&fifo_state->rwsem);
+
+	return ret;
+}
+
+void vmw_generic_waiter_add(struct vmw_private *dev_priv,
+			    u32 flag, int *waiter_count)
+{
+	spin_lock_bh(&dev_priv->waiter_lock);
+	if ((*waiter_count)++ == 0) {
+		outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+		dev_priv->irq_mask |= flag;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+	}
+	spin_unlock_bh(&dev_priv->waiter_lock);
+}
+
+void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+			       u32 flag, int *waiter_count)
+{
+	spin_lock_bh(&dev_priv->waiter_lock);
+	if (--(*waiter_count) == 0) {
+		dev_priv->irq_mask &= ~flag;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+	}
+	spin_unlock_bh(&dev_priv->waiter_lock);
+}
+
+void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
+{
+	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
+			       &dev_priv->fence_queue_waiters);
+}
+
+void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
+{
+	vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
+				  &dev_priv->fence_queue_waiters);
+}
+
+void vmw_goal_waiter_add(struct vmw_private *dev_priv)
+{
+	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
+			       &dev_priv->goal_queue_waiters);
+}
+
+void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
+{
+	vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
+				  &dev_priv->goal_queue_waiters);
+}
+
+int vmw_wait_seqno(struct vmw_private *dev_priv,
+		      bool lazy, uint32_t seqno,
+		      bool interruptible, unsigned long timeout)
+{
+	long ret;
+	struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+		return 0;
+
+	if (likely(vmw_seqno_passed(dev_priv, seqno)))
+		return 0;
+
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+
+	if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
+		return vmw_fallback_wait(dev_priv, lazy, true, seqno,
+					 interruptible, timeout);
+
+	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+		return vmw_fallback_wait(dev_priv, lazy, false, seqno,
+					 interruptible, timeout);
+
+	vmw_seqno_waiter_add(dev_priv);
+
+	if (interruptible)
+		ret = wait_event_interruptible_timeout
+		    (dev_priv->fence_queue,
+		     vmw_seqno_passed(dev_priv, seqno),
+		     timeout);
+	else
+		ret = wait_event_timeout
+		    (dev_priv->fence_queue,
+		     vmw_seqno_passed(dev_priv, seqno),
+		     timeout);
+
+	vmw_seqno_waiter_remove(dev_priv);
+
+	if (unlikely(ret == 0))
+		ret = -EBUSY;
+	else if (likely(ret > 0))
+		ret = 0;
+
+	return ret;
+}
+
+static void vmw_irq_preinstall(struct drm_device *dev)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	uint32_t status;
+
+	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+}
+
+void vmw_irq_uninstall(struct drm_device *dev)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	uint32_t status;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+		return;
+
+	if (!dev->irq_enabled)
+		return;
+
+	vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
+
+	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+
+	dev->irq_enabled = false;
+	free_irq(dev->irq, dev);
+}
+
+/**
+ * vmw_irq_install - Install the irq handlers
+ *
+ * @dev:  Pointer to the drm device.
+ * @irq:  The irq number.
+ * Return:  Zero if successful. Negative number otherwise.
+ */
+int vmw_irq_install(struct drm_device *dev, int irq)
+{
+	int ret;
+
+	if (dev->irq_enabled)
+		return -EBUSY;
+
+	vmw_irq_preinstall(dev);
+
+	ret = request_threaded_irq(irq, vmw_irq_handler, vmw_thread_fn,
+				   IRQF_SHARED, VMWGFX_DRIVER_NAME, dev);
+	if (ret < 0)
+		return ret;
+
+	dev->irq_enabled = true;
+	dev->irq = irq;
+
+	return ret;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
new file mode 100644
index 0000000..85783ef
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -0,0 +1,2907 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_sysfs.h>
+#include <drm/drm_vblank.h>
+
+#include "vmwgfx_kms.h"
+
+/* Might need a hrtimer here? */
+#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+
+void vmw_du_cleanup(struct vmw_display_unit *du)
+{
+	drm_plane_cleanup(&du->primary);
+	drm_plane_cleanup(&du->cursor);
+
+	drm_connector_unregister(&du->connector);
+	drm_crtc_cleanup(&du->crtc);
+	drm_encoder_cleanup(&du->encoder);
+	drm_connector_cleanup(&du->connector);
+}
+
+/*
+ * Display Unit Cursor functions
+ */
+
+static int vmw_cursor_update_image(struct vmw_private *dev_priv,
+				   u32 *image, u32 width, u32 height,
+				   u32 hotspotX, u32 hotspotY)
+{
+	struct {
+		u32 cmd;
+		SVGAFifoCmdDefineAlphaCursor cursor;
+	} *cmd;
+	u32 image_size = width * height * 4;
+	u32 cmd_size = sizeof(*cmd) + image_size;
+
+	if (!image)
+		return -EINVAL;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	memset(cmd, 0, sizeof(*cmd));
+
+	memcpy(&cmd[1], image, image_size);
+
+	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+	cmd->cursor.id = 0;
+	cmd->cursor.width = width;
+	cmd->cursor.height = height;
+	cmd->cursor.hotspotX = hotspotX;
+	cmd->cursor.hotspotY = hotspotY;
+
+	vmw_fifo_commit_flush(dev_priv, cmd_size);
+
+	return 0;
+}
+
+static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
+				struct vmw_buffer_object *bo,
+				u32 width, u32 height,
+				u32 hotspotX, u32 hotspotY)
+{
+	struct ttm_bo_kmap_obj map;
+	unsigned long kmap_offset;
+	unsigned long kmap_num;
+	void *virtual;
+	bool dummy;
+	int ret;
+
+	kmap_offset = 0;
+	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	ret = ttm_bo_reserve(&bo->base, true, false, NULL);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("reserve failed\n");
+		return -EINVAL;
+	}
+
+	ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
+	if (unlikely(ret != 0))
+		goto err_unreserve;
+
+	virtual = ttm_kmap_obj_virtual(&map, &dummy);
+	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
+				      hotspotX, hotspotY);
+
+	ttm_bo_kunmap(&map);
+err_unreserve:
+	ttm_bo_unreserve(&bo->base);
+
+	return ret;
+}
+
+
+static void vmw_cursor_update_position(struct vmw_private *dev_priv,
+				       bool show, int x, int y)
+{
+	u32 *fifo_mem = dev_priv->mmio_virt;
+	uint32_t count;
+
+	spin_lock(&dev_priv->cursor_lock);
+	vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
+	vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
+	vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
+	count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+	vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+	spin_unlock(&dev_priv->cursor_lock);
+}
+
+
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+			  struct ttm_object_file *tfile,
+			  struct ttm_buffer_object *bo,
+			  SVGA3dCmdHeader *header)
+{
+	struct ttm_bo_kmap_obj map;
+	unsigned long kmap_offset;
+	unsigned long kmap_num;
+	SVGA3dCopyBox *box;
+	unsigned box_count;
+	void *virtual;
+	bool dummy;
+	struct vmw_dma_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSurfaceDMA dma;
+	} *cmd;
+	int i, ret;
+
+	cmd = container_of(header, struct vmw_dma_cmd, header);
+
+	/* No snooper installed */
+	if (!srf->snooper.image)
+		return;
+
+	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
+		DRM_ERROR("face and mipmap for cursors should never != 0\n");
+		return;
+	}
+
+	if (cmd->header.size < 64) {
+		DRM_ERROR("at least one full copy box must be given\n");
+		return;
+	}
+
+	box = (SVGA3dCopyBox *)&cmd[1];
+	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
+			sizeof(SVGA3dCopyBox);
+
+	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+	    box->x != 0    || box->y != 0    || box->z != 0    ||
+	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+	    box->d != 1    || box_count != 1 ||
+	    box->w > 64 || box->h > 64) {
+		/* TODO handle none page aligned offsets */
+		/* TODO handle more dst & src != 0 */
+		/* TODO handle more then one copy */
+		DRM_ERROR("Cant snoop dma request for cursor!\n");
+		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
+			  box->srcx, box->srcy, box->srcz,
+			  box->x, box->y, box->z,
+			  box->w, box->h, box->d, box_count,
+			  cmd->dma.guest.ptr.offset);
+		return;
+	}
+
+	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
+	kmap_num = (64*64*4) >> PAGE_SHIFT;
+
+	ret = ttm_bo_reserve(bo, true, false, NULL);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("reserve failed\n");
+		return;
+	}
+
+	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+	if (unlikely(ret != 0))
+		goto err_unreserve;
+
+	virtual = ttm_kmap_obj_virtual(&map, &dummy);
+
+	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
+		memcpy(srf->snooper.image, virtual, 64*64*4);
+	} else {
+		/* Image is unsigned pointer. */
+		for (i = 0; i < box->h; i++)
+			memcpy(srf->snooper.image + i * 64,
+			       virtual + i * cmd->dma.guest.pitch,
+			       box->w * 4);
+	}
+
+	srf->snooper.age++;
+
+	ttm_bo_kunmap(&map);
+err_unreserve:
+	ttm_bo_unreserve(bo);
+}
+
+/**
+ * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
+ *
+ * @dev_priv: Pointer to the device private struct.
+ *
+ * Clears all legacy hotspots.
+ */
+void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct vmw_display_unit *du;
+	struct drm_crtc *crtc;
+
+	drm_modeset_lock_all(dev);
+	drm_for_each_crtc(crtc, dev) {
+		du = vmw_crtc_to_du(crtc);
+
+		du->hotspot_x = 0;
+		du->hotspot_y = 0;
+	}
+	drm_modeset_unlock_all(dev);
+}
+
+void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct vmw_display_unit *du;
+	struct drm_crtc *crtc;
+
+	mutex_lock(&dev->mode_config.mutex);
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		du = vmw_crtc_to_du(crtc);
+		if (!du->cursor_surface ||
+		    du->cursor_age == du->cursor_surface->snooper.age)
+			continue;
+
+		du->cursor_age = du->cursor_surface->snooper.age;
+		vmw_cursor_update_image(dev_priv,
+					du->cursor_surface->snooper.image,
+					64, 64,
+					du->hotspot_x + du->core_hotspot_x,
+					du->hotspot_y + du->core_hotspot_y);
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+
+void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
+{
+	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
+
+	drm_plane_cleanup(plane);
+}
+
+
+void vmw_du_primary_plane_destroy(struct drm_plane *plane)
+{
+	drm_plane_cleanup(plane);
+
+	/* Planes are static in our case so we don't free it */
+}
+
+
+/**
+ * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface
+ *
+ * @vps: plane state associated with the display surface
+ * @unreference: true if we also want to unreference the display.
+ */
+void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
+			     bool unreference)
+{
+	if (vps->surf) {
+		if (vps->pinned) {
+			vmw_resource_unpin(&vps->surf->res);
+			vps->pinned--;
+		}
+
+		if (unreference) {
+			if (vps->pinned)
+				DRM_ERROR("Surface still pinned\n");
+			vmw_surface_unreference(&vps->surf);
+		}
+	}
+}
+
+
+/**
+ * vmw_du_plane_cleanup_fb - Unpins the cursor
+ *
+ * @plane:  display plane
+ * @old_state: Contains the FB to clean up
+ *
+ * Unpins the framebuffer surface
+ *
+ * Returns 0 on success
+ */
+void
+vmw_du_plane_cleanup_fb(struct drm_plane *plane,
+			struct drm_plane_state *old_state)
+{
+	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+
+	vmw_du_plane_unpin_surf(vps, false);
+}
+
+
+/**
+ * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
+ *
+ * @plane:  display plane
+ * @new_state: info on the new plane state, including the FB
+ *
+ * Returns 0 on success
+ */
+int
+vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
+			       struct drm_plane_state *new_state)
+{
+	struct drm_framebuffer *fb = new_state->fb;
+	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+
+
+	if (vps->surf)
+		vmw_surface_unreference(&vps->surf);
+
+	if (vps->bo)
+		vmw_bo_unreference(&vps->bo);
+
+	if (fb) {
+		if (vmw_framebuffer_to_vfb(fb)->bo) {
+			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
+			vmw_bo_reference(vps->bo);
+		} else {
+			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
+			vmw_surface_reference(vps->surf);
+		}
+	}
+
+	return 0;
+}
+
+
+void
+vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
+				  struct drm_plane_state *old_state)
+{
+	struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+	struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state);
+	s32 hotspot_x, hotspot_y;
+	int ret = 0;
+
+
+	hotspot_x = du->hotspot_x;
+	hotspot_y = du->hotspot_y;
+
+	if (plane->state->fb) {
+		hotspot_x += plane->state->fb->hot_x;
+		hotspot_y += plane->state->fb->hot_y;
+	}
+
+	du->cursor_surface = vps->surf;
+	du->cursor_bo = vps->bo;
+
+	if (vps->surf) {
+		du->cursor_age = du->cursor_surface->snooper.age;
+
+		ret = vmw_cursor_update_image(dev_priv,
+					      vps->surf->snooper.image,
+					      64, 64, hotspot_x,
+					      hotspot_y);
+	} else if (vps->bo) {
+		ret = vmw_cursor_update_bo(dev_priv, vps->bo,
+					   plane->state->crtc_w,
+					   plane->state->crtc_h,
+					   hotspot_x, hotspot_y);
+	} else {
+		vmw_cursor_update_position(dev_priv, false, 0, 0);
+		return;
+	}
+
+	if (!ret) {
+		du->cursor_x = plane->state->crtc_x + du->set_gui_x;
+		du->cursor_y = plane->state->crtc_y + du->set_gui_y;
+
+		vmw_cursor_update_position(dev_priv, true,
+					   du->cursor_x + hotspot_x,
+					   du->cursor_y + hotspot_y);
+
+		du->core_hotspot_x = hotspot_x - du->hotspot_x;
+		du->core_hotspot_y = hotspot_y - du->hotspot_y;
+	} else {
+		DRM_ERROR("Failed to update cursor image\n");
+	}
+}
+
+
+/**
+ * vmw_du_primary_plane_atomic_check - check if the new state is okay
+ *
+ * @plane: display plane
+ * @state: info on the new plane state, including the FB
+ *
+ * Check if the new state is settable given the current state.  Other
+ * than what the atomic helper checks, we care about crtc fitting
+ * the FB and maintaining one active framebuffer.
+ *
+ * Returns 0 on success
+ */
+int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
+				      struct drm_plane_state *state)
+{
+	struct drm_crtc_state *crtc_state = NULL;
+	struct drm_framebuffer *new_fb = state->fb;
+	int ret;
+
+	if (state->crtc)
+		crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
+
+	ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  false, true);
+
+	if (!ret && new_fb) {
+		struct drm_crtc *crtc = state->crtc;
+		struct vmw_connector_state *vcs;
+		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+		vcs = vmw_connector_state_to_vcs(du->connector.state);
+	}
+
+
+	return ret;
+}
+
+
+/**
+ * vmw_du_cursor_plane_atomic_check - check if the new state is okay
+ *
+ * @plane: cursor plane
+ * @state: info on the new plane state
+ *
+ * This is a chance to fail if the new cursor state does not fit
+ * our requirements.
+ *
+ * Returns 0 on success
+ */
+int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
+				     struct drm_plane_state *new_state)
+{
+	int ret = 0;
+	struct drm_crtc_state *crtc_state = NULL;
+	struct vmw_surface *surface = NULL;
+	struct drm_framebuffer *fb = new_state->fb;
+
+	if (new_state->crtc)
+		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+							   new_state->crtc);
+
+	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  true, true);
+	if (ret)
+		return ret;
+
+	/* Turning off */
+	if (!fb)
+		return 0;
+
+	/* A lot of the code assumes this */
+	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
+		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
+			  new_state->crtc_w, new_state->crtc_h);
+		ret = -EINVAL;
+	}
+
+	if (!vmw_framebuffer_to_vfb(fb)->bo)
+		surface = vmw_framebuffer_to_vfbs(fb)->surface;
+
+	if (surface && !surface->snooper.image) {
+		DRM_ERROR("surface not suitable for cursor\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+
+int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
+			     struct drm_crtc_state *new_state)
+{
+	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
+	int connector_mask = drm_connector_mask(&du->connector);
+	bool has_primary = new_state->plane_mask &
+			   drm_plane_mask(crtc->primary);
+
+	/* We always want to have an active plane with an active CRTC */
+	if (has_primary != new_state->enable)
+		return -EINVAL;
+
+
+	if (new_state->connector_mask != connector_mask &&
+	    new_state->connector_mask != 0) {
+		DRM_ERROR("Invalid connectors configuration\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Our virtual device does not have a dot clock, so use the logical
+	 * clock value as the dot clock.
+	 */
+	if (new_state->mode.crtc_clock == 0)
+		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
+
+	return 0;
+}
+
+
+void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
+			      struct drm_crtc_state *old_crtc_state)
+{
+}
+
+
+void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
+			      struct drm_crtc_state *old_crtc_state)
+{
+	struct drm_pending_vblank_event *event = crtc->state->event;
+
+	if (event) {
+		crtc->state->event = NULL;
+
+		spin_lock_irq(&crtc->dev->event_lock);
+		drm_crtc_send_vblank_event(crtc, event);
+		spin_unlock_irq(&crtc->dev->event_lock);
+	}
+}
+
+
+/**
+ * vmw_du_crtc_duplicate_state - duplicate crtc state
+ * @crtc: DRM crtc
+ *
+ * Allocates and returns a copy of the crtc state (both common and
+ * vmw-specific) for the specified crtc.
+ *
+ * Returns: The newly allocated crtc state, or NULL on failure.
+ */
+struct drm_crtc_state *
+vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+	struct drm_crtc_state *state;
+	struct vmw_crtc_state *vcs;
+
+	if (WARN_ON(!crtc->state))
+		return NULL;
+
+	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
+
+	if (!vcs)
+		return NULL;
+
+	state = &vcs->base;
+
+	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
+
+	return state;
+}
+
+
+/**
+ * vmw_du_crtc_reset - creates a blank vmw crtc state
+ * @crtc: DRM crtc
+ *
+ * Resets the atomic state for @crtc by freeing the state pointer (which
+ * might be NULL, e.g. at driver load time) and allocating a new empty state
+ * object.
+ */
+void vmw_du_crtc_reset(struct drm_crtc *crtc)
+{
+	struct vmw_crtc_state *vcs;
+
+
+	if (crtc->state) {
+		__drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+		kfree(vmw_crtc_state_to_vcs(crtc->state));
+	}
+
+	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
+
+	if (!vcs) {
+		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
+		return;
+	}
+
+	crtc->state = &vcs->base;
+	crtc->state->crtc = crtc;
+}
+
+
+/**
+ * vmw_du_crtc_destroy_state - destroy crtc state
+ * @crtc: DRM crtc
+ * @state: state object to destroy
+ *
+ * Destroys the crtc state (both common and vmw-specific) for the
+ * specified plane.
+ */
+void
+vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
+			  struct drm_crtc_state *state)
+{
+	drm_atomic_helper_crtc_destroy_state(crtc, state);
+}
+
+
+/**
+ * vmw_du_plane_duplicate_state - duplicate plane state
+ * @plane: drm plane
+ *
+ * Allocates and returns a copy of the plane state (both common and
+ * vmw-specific) for the specified plane.
+ *
+ * Returns: The newly allocated plane state, or NULL on failure.
+ */
+struct drm_plane_state *
+vmw_du_plane_duplicate_state(struct drm_plane *plane)
+{
+	struct drm_plane_state *state;
+	struct vmw_plane_state *vps;
+
+	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
+
+	if (!vps)
+		return NULL;
+
+	vps->pinned = 0;
+	vps->cpp = 0;
+
+	/* Each ref counted resource needs to be acquired again */
+	if (vps->surf)
+		(void) vmw_surface_reference(vps->surf);
+
+	if (vps->bo)
+		(void) vmw_bo_reference(vps->bo);
+
+	state = &vps->base;
+
+	__drm_atomic_helper_plane_duplicate_state(plane, state);
+
+	return state;
+}
+
+
+/**
+ * vmw_du_plane_reset - creates a blank vmw plane state
+ * @plane: drm plane
+ *
+ * Resets the atomic state for @plane by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void vmw_du_plane_reset(struct drm_plane *plane)
+{
+	struct vmw_plane_state *vps;
+
+
+	if (plane->state)
+		vmw_du_plane_destroy_state(plane, plane->state);
+
+	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
+
+	if (!vps) {
+		DRM_ERROR("Cannot allocate vmw_plane_state\n");
+		return;
+	}
+
+	__drm_atomic_helper_plane_reset(plane, &vps->base);
+}
+
+
+/**
+ * vmw_du_plane_destroy_state - destroy plane state
+ * @plane: DRM plane
+ * @state: state object to destroy
+ *
+ * Destroys the plane state (both common and vmw-specific) for the
+ * specified plane.
+ */
+void
+vmw_du_plane_destroy_state(struct drm_plane *plane,
+			   struct drm_plane_state *state)
+{
+	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
+
+
+	/* Should have been freed by cleanup_fb */
+	if (vps->surf)
+		vmw_surface_unreference(&vps->surf);
+
+	if (vps->bo)
+		vmw_bo_unreference(&vps->bo);
+
+	drm_atomic_helper_plane_destroy_state(plane, state);
+}
+
+
+/**
+ * vmw_du_connector_duplicate_state - duplicate connector state
+ * @connector: DRM connector
+ *
+ * Allocates and returns a copy of the connector state (both common and
+ * vmw-specific) for the specified connector.
+ *
+ * Returns: The newly allocated connector state, or NULL on failure.
+ */
+struct drm_connector_state *
+vmw_du_connector_duplicate_state(struct drm_connector *connector)
+{
+	struct drm_connector_state *state;
+	struct vmw_connector_state *vcs;
+
+	if (WARN_ON(!connector->state))
+		return NULL;
+
+	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
+
+	if (!vcs)
+		return NULL;
+
+	state = &vcs->base;
+
+	__drm_atomic_helper_connector_duplicate_state(connector, state);
+
+	return state;
+}
+
+
+/**
+ * vmw_du_connector_reset - creates a blank vmw connector state
+ * @connector: DRM connector
+ *
+ * Resets the atomic state for @connector by freeing the state pointer (which
+ * might be NULL, e.g. at driver load time) and allocating a new empty state
+ * object.
+ */
+void vmw_du_connector_reset(struct drm_connector *connector)
+{
+	struct vmw_connector_state *vcs;
+
+
+	if (connector->state) {
+		__drm_atomic_helper_connector_destroy_state(connector->state);
+
+		kfree(vmw_connector_state_to_vcs(connector->state));
+	}
+
+	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
+
+	if (!vcs) {
+		DRM_ERROR("Cannot allocate vmw_connector_state\n");
+		return;
+	}
+
+	__drm_atomic_helper_connector_reset(connector, &vcs->base);
+}
+
+
+/**
+ * vmw_du_connector_destroy_state - destroy connector state
+ * @connector: DRM connector
+ * @state: state object to destroy
+ *
+ * Destroys the connector state (both common and vmw-specific) for the
+ * specified plane.
+ */
+void
+vmw_du_connector_destroy_state(struct drm_connector *connector,
+			  struct drm_connector_state *state)
+{
+	drm_atomic_helper_connector_destroy_state(connector, state);
+}
+/*
+ * Generic framebuffer code
+ */
+
+/*
+ * Surface framebuffer code
+ */
+
+static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
+{
+	struct vmw_framebuffer_surface *vfbs =
+		vmw_framebuffer_to_vfbs(framebuffer);
+
+	drm_framebuffer_cleanup(framebuffer);
+	vmw_surface_unreference(&vfbs->surface);
+	if (vfbs->base.user_obj)
+		ttm_base_object_unref(&vfbs->base.user_obj);
+
+	kfree(vfbs);
+}
+
+/**
+ * vmw_kms_readback - Perform a readback from the screen system to
+ * a buffer-object backed framebuffer.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * Must be set to NULL if @user_fence_rep is NULL.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
+ * Must be set to non-NULL if @file_priv is non-NULL.
+ * @vclips: Array of clip rects.
+ * @num_clips: Number of clip rects in @vclips.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_readback(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips)
+{
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_screen_object:
+		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
+					    user_fence_rep, vclips, num_clips,
+					    NULL);
+	case vmw_du_screen_target:
+		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
+					user_fence_rep, NULL, vclips, num_clips,
+					1, false, true, NULL);
+	default:
+		WARN_ONCE(true,
+			  "Readback called with invalid display system.\n");
+}
+
+	return -ENOSYS;
+}
+
+
+static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
+	.destroy = vmw_framebuffer_surface_destroy,
+	.dirty = drm_atomic_helper_dirtyfb,
+};
+
+static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
+					   struct vmw_surface *surface,
+					   struct vmw_framebuffer **out,
+					   const struct drm_mode_fb_cmd2
+					   *mode_cmd,
+					   bool is_bo_proxy)
+
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct vmw_framebuffer_surface *vfbs;
+	enum SVGA3dSurfaceFormat format;
+	int ret;
+	struct drm_format_name_buf format_name;
+
+	/* 3D is only supported on HWv8 and newer hosts */
+	if (dev_priv->active_display_unit == vmw_du_legacy)
+		return -ENOSYS;
+
+	/*
+	 * Sanity checks.
+	 */
+
+	/* Surface must be marked as a scanout. */
+	if (unlikely(!surface->scanout))
+		return -EINVAL;
+
+	if (unlikely(surface->mip_levels[0] != 1 ||
+		     surface->num_sizes != 1 ||
+		     surface->base_size.width < mode_cmd->width ||
+		     surface->base_size.height < mode_cmd->height ||
+		     surface->base_size.depth != 1)) {
+		DRM_ERROR("Incompatible surface dimensions "
+			  "for requested mode.\n");
+		return -EINVAL;
+	}
+
+	switch (mode_cmd->pixel_format) {
+	case DRM_FORMAT_ARGB8888:
+		format = SVGA3D_A8R8G8B8;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		format = SVGA3D_X8R8G8B8;
+		break;
+	case DRM_FORMAT_RGB565:
+		format = SVGA3D_R5G6B5;
+		break;
+	case DRM_FORMAT_XRGB1555:
+		format = SVGA3D_A1R5G5B5;
+		break;
+	default:
+		DRM_ERROR("Invalid pixel format: %s\n",
+			  drm_get_format_name(mode_cmd->pixel_format, &format_name));
+		return -EINVAL;
+	}
+
+	/*
+	 * For DX, surface format validation is done when surface->scanout
+	 * is set.
+	 */
+	if (!dev_priv->has_dx && format != surface->format) {
+		DRM_ERROR("Invalid surface format for requested mode.\n");
+		return -EINVAL;
+	}
+
+	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
+	if (!vfbs) {
+		ret = -ENOMEM;
+		goto out_err1;
+	}
+
+	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
+	vfbs->surface = vmw_surface_reference(surface);
+	vfbs->base.user_handle = mode_cmd->handles[0];
+	vfbs->is_bo_proxy = is_bo_proxy;
+
+	*out = &vfbs->base;
+
+	ret = drm_framebuffer_init(dev, &vfbs->base.base,
+				   &vmw_framebuffer_surface_funcs);
+	if (ret)
+		goto out_err2;
+
+	return 0;
+
+out_err2:
+	vmw_surface_unreference(&surface);
+	kfree(vfbs);
+out_err1:
+	return ret;
+}
+
+/*
+ * Buffer-object framebuffer code
+ */
+
+static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
+{
+	struct vmw_framebuffer_bo *vfbd =
+		vmw_framebuffer_to_vfbd(framebuffer);
+
+	drm_framebuffer_cleanup(framebuffer);
+	vmw_bo_unreference(&vfbd->buffer);
+	if (vfbd->base.user_obj)
+		ttm_base_object_unref(&vfbd->base.user_obj);
+
+	kfree(vfbd);
+}
+
+static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
+				    struct drm_file *file_priv,
+				    unsigned int flags, unsigned int color,
+				    struct drm_clip_rect *clips,
+				    unsigned int num_clips)
+{
+	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+	struct vmw_framebuffer_bo *vfbd =
+		vmw_framebuffer_to_vfbd(framebuffer);
+	struct drm_clip_rect norect;
+	int ret, increment = 1;
+
+	drm_modeset_lock_all(dev_priv->dev);
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0)) {
+		drm_modeset_unlock_all(dev_priv->dev);
+		return ret;
+	}
+
+	if (!num_clips) {
+		num_clips = 1;
+		clips = &norect;
+		norect.x1 = norect.y1 = 0;
+		norect.x2 = framebuffer->width;
+		norect.y2 = framebuffer->height;
+	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+		num_clips /= 2;
+		increment = 2;
+	}
+
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_legacy:
+		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
+					      clips, num_clips, increment);
+		break;
+	default:
+		ret = -EINVAL;
+		WARN_ONCE(true, "Dirty called with invalid display system.\n");
+		break;
+	}
+
+	vmw_fifo_flush(dev_priv, false);
+	ttm_read_unlock(&dev_priv->reservation_sem);
+
+	drm_modeset_unlock_all(dev_priv->dev);
+
+	return ret;
+}
+
+static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
+					struct drm_file *file_priv,
+					unsigned int flags, unsigned int color,
+					struct drm_clip_rect *clips,
+					unsigned int num_clips)
+{
+	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+
+	if (dev_priv->active_display_unit == vmw_du_legacy)
+		return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
+						color, clips, num_clips);
+
+	return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
+					 clips, num_clips);
+}
+
+static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
+	.destroy = vmw_framebuffer_bo_destroy,
+	.dirty = vmw_framebuffer_bo_dirty_ext,
+};
+
+/**
+ * Pin the bofer in a location suitable for access by the
+ * display system.
+ */
+static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
+{
+	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+	struct vmw_buffer_object *buf;
+	struct ttm_placement *placement;
+	int ret;
+
+	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
+
+	if (!buf)
+		return 0;
+
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_legacy:
+		vmw_overlay_pause_all(dev_priv);
+		ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
+		vmw_overlay_resume_all(dev_priv);
+		break;
+	case vmw_du_screen_object:
+	case vmw_du_screen_target:
+		if (vfb->bo) {
+			if (dev_priv->capabilities & SVGA_CAP_3D) {
+				/*
+				 * Use surface DMA to get content to
+				 * sreen target surface.
+				 */
+				placement = &vmw_vram_gmr_placement;
+			} else {
+				/* Use CPU blit. */
+				placement = &vmw_sys_placement;
+			}
+		} else {
+			/* Use surface / image update */
+			placement = &vmw_mob_placement;
+		}
+
+		return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
+{
+	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+	struct vmw_buffer_object *buf;
+
+	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
+
+	if (WARN_ON(!buf))
+		return 0;
+
+	return vmw_bo_unpin(dev_priv, buf, false);
+}
+
+/**
+ * vmw_create_bo_proxy - create a proxy surface for the buffer object
+ *
+ * @dev: DRM device
+ * @mode_cmd: parameters for the new surface
+ * @bo_mob: MOB backing the buffer object
+ * @srf_out: newly created surface
+ *
+ * When the content FB is a buffer object, we create a surface as a proxy to the
+ * same buffer.  This way we can do a surface copy rather than a surface DMA.
+ * This is a more efficient approach
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_create_bo_proxy(struct drm_device *dev,
+			       const struct drm_mode_fb_cmd2 *mode_cmd,
+			       struct vmw_buffer_object *bo_mob,
+			       struct vmw_surface **srf_out)
+{
+	uint32_t format;
+	struct drm_vmw_size content_base_size = {0};
+	struct vmw_resource *res;
+	unsigned int bytes_pp;
+	struct drm_format_name_buf format_name;
+	int ret;
+
+	switch (mode_cmd->pixel_format) {
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_XRGB8888:
+		format = SVGA3D_X8R8G8B8;
+		bytes_pp = 4;
+		break;
+
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_XRGB1555:
+		format = SVGA3D_R5G6B5;
+		bytes_pp = 2;
+		break;
+
+	case 8:
+		format = SVGA3D_P8;
+		bytes_pp = 1;
+		break;
+
+	default:
+		DRM_ERROR("Invalid framebuffer format %s\n",
+			  drm_get_format_name(mode_cmd->pixel_format, &format_name));
+		return -EINVAL;
+	}
+
+	content_base_size.width  = mode_cmd->pitches[0] / bytes_pp;
+	content_base_size.height = mode_cmd->height;
+	content_base_size.depth  = 1;
+
+	ret = vmw_surface_gb_priv_define(dev,
+					 0, /* kernel visible only */
+					 0, /* flags */
+					 format,
+					 true, /* can be a scanout buffer */
+					 1, /* num of mip levels */
+					 0,
+					 0,
+					 content_base_size,
+					 SVGA3D_MS_PATTERN_NONE,
+					 SVGA3D_MS_QUALITY_NONE,
+					 srf_out);
+	if (ret) {
+		DRM_ERROR("Failed to allocate proxy content buffer\n");
+		return ret;
+	}
+
+	res = &(*srf_out)->res;
+
+	/* Reserve and switch the backing mob. */
+	mutex_lock(&res->dev_priv->cmdbuf_mutex);
+	(void) vmw_resource_reserve(res, false, true);
+	vmw_bo_unreference(&res->backup);
+	res->backup = vmw_bo_reference(bo_mob);
+	res->backup_offset = 0;
+	vmw_resource_unreserve(res, false, false, false, NULL, 0);
+	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+
+	return 0;
+}
+
+
+
+static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
+				      struct vmw_buffer_object *bo,
+				      struct vmw_framebuffer **out,
+				      const struct drm_mode_fb_cmd2
+				      *mode_cmd)
+
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct vmw_framebuffer_bo *vfbd;
+	unsigned int requested_size;
+	struct drm_format_name_buf format_name;
+	int ret;
+
+	requested_size = mode_cmd->height * mode_cmd->pitches[0];
+	if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
+		DRM_ERROR("Screen buffer object size is too small "
+			  "for requested mode.\n");
+		return -EINVAL;
+	}
+
+	/* Limited framebuffer color depth support for screen objects */
+	if (dev_priv->active_display_unit == vmw_du_screen_object) {
+		switch (mode_cmd->pixel_format) {
+		case DRM_FORMAT_XRGB8888:
+		case DRM_FORMAT_ARGB8888:
+			break;
+		case DRM_FORMAT_XRGB1555:
+		case DRM_FORMAT_RGB565:
+			break;
+		default:
+			DRM_ERROR("Invalid pixel format: %s\n",
+				  drm_get_format_name(mode_cmd->pixel_format, &format_name));
+			return -EINVAL;
+		}
+	}
+
+	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
+	if (!vfbd) {
+		ret = -ENOMEM;
+		goto out_err1;
+	}
+
+	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
+	vfbd->base.bo = true;
+	vfbd->buffer = vmw_bo_reference(bo);
+	vfbd->base.user_handle = mode_cmd->handles[0];
+	*out = &vfbd->base;
+
+	ret = drm_framebuffer_init(dev, &vfbd->base.base,
+				   &vmw_framebuffer_bo_funcs);
+	if (ret)
+		goto out_err2;
+
+	return 0;
+
+out_err2:
+	vmw_bo_unreference(&bo);
+	kfree(vfbd);
+out_err1:
+	return ret;
+}
+
+
+/**
+ * vmw_kms_srf_ok - check if a surface can be created
+ *
+ * @width: requested width
+ * @height: requested height
+ *
+ * Surfaces need to be less than texture size
+ */
+static bool
+vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
+{
+	if (width  > dev_priv->texture_max_width ||
+	    height > dev_priv->texture_max_height)
+		return false;
+
+	return true;
+}
+
+/**
+ * vmw_kms_new_framebuffer - Create a new framebuffer.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * @bo: Pointer to buffer object to wrap the kms framebuffer around.
+ * Either @bo or @surface must be NULL.
+ * @surface: Pointer to a surface to wrap the kms framebuffer around.
+ * Either @bo or @surface must be NULL.
+ * @only_2d: No presents will occur to this buffer object based framebuffer.
+ * This helps the code to do some important optimizations.
+ * @mode_cmd: Frame-buffer metadata.
+ */
+struct vmw_framebuffer *
+vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+			struct vmw_buffer_object *bo,
+			struct vmw_surface *surface,
+			bool only_2d,
+			const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct vmw_framebuffer *vfb = NULL;
+	bool is_bo_proxy = false;
+	int ret;
+
+	/*
+	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
+	 * therefore, wrap the buffer object in a surface so we can use the
+	 * SurfaceCopy command.
+	 */
+	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
+	    bo && only_2d &&
+	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
+	    dev_priv->active_display_unit == vmw_du_screen_target) {
+		ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
+					  bo, &surface);
+		if (ret)
+			return ERR_PTR(ret);
+
+		is_bo_proxy = true;
+	}
+
+	/* Create the new framebuffer depending one what we have */
+	if (surface) {
+		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
+						      mode_cmd,
+						      is_bo_proxy);
+
+		/*
+		 * vmw_create_bo_proxy() adds a reference that is no longer
+		 * needed
+		 */
+		if (is_bo_proxy)
+			vmw_surface_unreference(&surface);
+	} else if (bo) {
+		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
+						 mode_cmd);
+	} else {
+		BUG();
+	}
+
+	if (ret)
+		return ERR_PTR(ret);
+
+	vfb->pin = vmw_framebuffer_pin;
+	vfb->unpin = vmw_framebuffer_unpin;
+
+	return vfb;
+}
+
+/*
+ * Generic Kernel modesetting functions
+ */
+
+static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+						 struct drm_file *file_priv,
+						 const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_framebuffer *vfb = NULL;
+	struct vmw_surface *surface = NULL;
+	struct vmw_buffer_object *bo = NULL;
+	struct ttm_base_object *user_obj;
+	int ret;
+
+	/*
+	 * Take a reference on the user object of the resource
+	 * backing the kms fb. This ensures that user-space handle
+	 * lookups on that resource will always work as long as
+	 * it's registered with a kms framebuffer. This is important,
+	 * since vmw_execbuf_process identifies resources in the
+	 * command stream using user-space handles.
+	 */
+
+	user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
+	if (unlikely(user_obj == NULL)) {
+		DRM_ERROR("Could not locate requested kms frame buffer.\n");
+		return ERR_PTR(-ENOENT);
+	}
+
+	/**
+	 * End conditioned code.
+	 */
+
+	/* returns either a bo or surface */
+	ret = vmw_user_lookup_handle(dev_priv, tfile,
+				     mode_cmd->handles[0],
+				     &surface, &bo);
+	if (ret)
+		goto err_out;
+
+
+	if (!bo &&
+	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
+		DRM_ERROR("Surface size cannot exceed %dx%d",
+			dev_priv->texture_max_width,
+			dev_priv->texture_max_height);
+		ret = -EINVAL;
+		goto err_out;
+	}
+
+
+	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
+				      !(dev_priv->capabilities & SVGA_CAP_3D),
+				      mode_cmd);
+	if (IS_ERR(vfb)) {
+		ret = PTR_ERR(vfb);
+		goto err_out;
+ 	}
+
+err_out:
+	/* vmw_user_lookup_handle takes one ref so does new_fb */
+	if (bo)
+		vmw_bo_unreference(&bo);
+	if (surface)
+		vmw_surface_unreference(&surface);
+
+	if (ret) {
+		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+		ttm_base_object_unref(&user_obj);
+		return ERR_PTR(ret);
+	} else
+		vfb->user_obj = user_obj;
+
+	return &vfb->base;
+}
+
+/**
+ * vmw_kms_check_display_memory - Validates display memory required for a
+ * topology
+ * @dev: DRM device
+ * @num_rects: number of drm_rect in rects
+ * @rects: array of drm_rect representing the topology to validate indexed by
+ * crtc index.
+ *
+ * Returns:
+ * 0 on success otherwise negative error code
+ */
+static int vmw_kms_check_display_memory(struct drm_device *dev,
+					uint32_t num_rects,
+					struct drm_rect *rects)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_rect bounding_box = {0};
+	u64 total_pixels = 0, pixel_mem, bb_mem;
+	int i;
+
+	for (i = 0; i < num_rects; i++) {
+		/*
+		 * For STDU only individual screen (screen target) is limited by
+		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
+		 */
+		if (dev_priv->active_display_unit == vmw_du_screen_target &&
+		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
+		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
+			VMW_DEBUG_KMS("Screen size not supported.\n");
+			return -EINVAL;
+		}
+
+		/* Bounding box upper left is at (0,0). */
+		if (rects[i].x2 > bounding_box.x2)
+			bounding_box.x2 = rects[i].x2;
+
+		if (rects[i].y2 > bounding_box.y2)
+			bounding_box.y2 = rects[i].y2;
+
+		total_pixels += (u64) drm_rect_width(&rects[i]) *
+			(u64) drm_rect_height(&rects[i]);
+	}
+
+	/* Virtual svga device primary limits are always in 32-bpp. */
+	pixel_mem = total_pixels * 4;
+
+	/*
+	 * For HV10 and below prim_bb_mem is vram size. When
+	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
+	 * limit on primary bounding box
+	 */
+	if (pixel_mem > dev_priv->prim_bb_mem) {
+		VMW_DEBUG_KMS("Combined output size too large.\n");
+		return -EINVAL;
+	}
+
+	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
+	if (dev_priv->active_display_unit != vmw_du_screen_target ||
+	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
+		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
+
+		if (bb_mem > dev_priv->prim_bb_mem) {
+			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_crtc_state_and_lock - Return new or current crtc state with locked
+ * crtc mutex
+ * @state: The atomic state pointer containing the new atomic state
+ * @crtc: The crtc
+ *
+ * This function returns the new crtc state if it's part of the state update.
+ * Otherwise returns the current crtc state. It also makes sure that the
+ * crtc mutex is locked.
+ *
+ * Returns: A valid crtc state pointer or NULL. It may also return a
+ * pointer error, in particular -EDEADLK if locking needs to be rerun.
+ */
+static struct drm_crtc_state *
+vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+	struct drm_crtc_state *crtc_state;
+
+	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+	if (crtc_state) {
+		lockdep_assert_held(&crtc->mutex.mutex.base);
+	} else {
+		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
+
+		if (ret != 0 && ret != -EALREADY)
+			return ERR_PTR(ret);
+
+		crtc_state = crtc->state;
+	}
+
+	return crtc_state;
+}
+
+/**
+ * vmw_kms_check_implicit - Verify that all implicit display units scan out
+ * from the same fb after the new state is committed.
+ * @dev: The drm_device.
+ * @state: The new state to be checked.
+ *
+ * Returns:
+ *   Zero on success,
+ *   -EINVAL on invalid state,
+ *   -EDEADLK if modeset locking needs to be rerun.
+ */
+static int vmw_kms_check_implicit(struct drm_device *dev,
+				  struct drm_atomic_state *state)
+{
+	struct drm_framebuffer *implicit_fb = NULL;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	struct drm_plane_state *plane_state;
+
+	drm_for_each_crtc(crtc, dev) {
+		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+		if (!du->is_implicit)
+			continue;
+
+		crtc_state = vmw_crtc_state_and_lock(state, crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+
+		if (!crtc_state || !crtc_state->enable)
+			continue;
+
+		/*
+		 * Can't move primary planes across crtcs, so this is OK.
+		 * It also means we don't need to take the plane mutex.
+		 */
+		plane_state = du->primary.state;
+		if (plane_state->crtc != crtc)
+			continue;
+
+		if (!implicit_fb)
+			implicit_fb = plane_state->fb;
+		else if (implicit_fb != plane_state->fb)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_kms_check_topology - Validates topology in drm_atomic_state
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Returns:
+ * 0 on success otherwise negative error code
+ */
+static int vmw_kms_check_topology(struct drm_device *dev,
+				  struct drm_atomic_state *state)
+{
+	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+	struct drm_rect *rects;
+	struct drm_crtc *crtc;
+	uint32_t i;
+	int ret = 0;
+
+	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
+			GFP_KERNEL);
+	if (!rects)
+		return -ENOMEM;
+
+	drm_for_each_crtc(crtc, dev) {
+		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+		struct drm_crtc_state *crtc_state;
+
+		i = drm_crtc_index(crtc);
+
+		crtc_state = vmw_crtc_state_and_lock(state, crtc);
+		if (IS_ERR(crtc_state)) {
+			ret = PTR_ERR(crtc_state);
+			goto clean;
+		}
+
+		if (!crtc_state)
+			continue;
+
+		if (crtc_state->enable) {
+			rects[i].x1 = du->gui_x;
+			rects[i].y1 = du->gui_y;
+			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
+			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
+		} else {
+			rects[i].x1 = 0;
+			rects[i].y1 = 0;
+			rects[i].x2 = 0;
+			rects[i].y2 = 0;
+		}
+	}
+
+	/* Determine change to topology due to new atomic state */
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+				      new_crtc_state, i) {
+		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+		struct drm_connector *connector;
+		struct drm_connector_state *conn_state;
+		struct vmw_connector_state *vmw_conn_state;
+
+		if (!du->pref_active && new_crtc_state->enable) {
+			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
+			ret = -EINVAL;
+			goto clean;
+		}
+
+		/*
+		 * For vmwgfx each crtc has only one connector attached and it
+		 * is not changed so don't really need to check the
+		 * crtc->connector_mask and iterate over it.
+		 */
+		connector = &du->connector;
+		conn_state = drm_atomic_get_connector_state(state, connector);
+		if (IS_ERR(conn_state)) {
+			ret = PTR_ERR(conn_state);
+			goto clean;
+		}
+
+		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+		vmw_conn_state->gui_x = du->gui_x;
+		vmw_conn_state->gui_y = du->gui_y;
+	}
+
+	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
+					   rects);
+
+clean:
+	kfree(rects);
+	return ret;
+}
+
+/**
+ * vmw_kms_atomic_check_modeset- validate state object for modeset changes
+ *
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * This is a simple wrapper around drm_atomic_helper_check_modeset() for
+ * us to assign a value to mode->crtc_clock so that
+ * drm_calc_timestamping_constants() won't throw an error message
+ *
+ * Returns:
+ * Zero for success or -errno
+ */
+static int
+vmw_kms_atomic_check_modeset(struct drm_device *dev,
+			     struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	bool need_modeset = false;
+	int i, ret;
+
+	ret = drm_atomic_helper_check(dev, state);
+	if (ret)
+		return ret;
+
+	ret = vmw_kms_check_implicit(dev, state);
+	if (ret) {
+		VMW_DEBUG_KMS("Invalid implicit state\n");
+		return ret;
+	}
+
+	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+		if (drm_atomic_crtc_needs_modeset(crtc_state))
+			need_modeset = true;
+	}
+
+	if (need_modeset)
+		return vmw_kms_check_topology(dev, state);
+
+	return ret;
+}
+
+static const struct drm_mode_config_funcs vmw_kms_funcs = {
+	.fb_create = vmw_kms_fb_create,
+	.atomic_check = vmw_kms_atomic_check_modeset,
+	.atomic_commit = drm_atomic_helper_commit,
+};
+
+static int vmw_kms_generic_present(struct vmw_private *dev_priv,
+				   struct drm_file *file_priv,
+				   struct vmw_framebuffer *vfb,
+				   struct vmw_surface *surface,
+				   uint32_t sid,
+				   int32_t destX, int32_t destY,
+				   struct drm_vmw_rect *clips,
+				   uint32_t num_clips)
+{
+	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
+					    &surface->res, destX, destY,
+					    num_clips, 1, NULL, NULL);
+}
+
+
+int vmw_kms_present(struct vmw_private *dev_priv,
+		    struct drm_file *file_priv,
+		    struct vmw_framebuffer *vfb,
+		    struct vmw_surface *surface,
+		    uint32_t sid,
+		    int32_t destX, int32_t destY,
+		    struct drm_vmw_rect *clips,
+		    uint32_t num_clips)
+{
+	int ret;
+
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_screen_target:
+		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
+						 &surface->res, destX, destY,
+						 num_clips, 1, NULL, NULL);
+		break;
+	case vmw_du_screen_object:
+		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
+					      sid, destX, destY, clips,
+					      num_clips);
+		break;
+	default:
+		WARN_ONCE(true,
+			  "Present called with invalid display system.\n");
+		ret = -ENOSYS;
+		break;
+	}
+	if (ret)
+		return ret;
+
+	vmw_fifo_flush(dev_priv, false);
+
+	return 0;
+}
+
+static void
+vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
+{
+	if (dev_priv->hotplug_mode_update_property)
+		return;
+
+	dev_priv->hotplug_mode_update_property =
+		drm_property_create_range(dev_priv->dev,
+					  DRM_MODE_PROP_IMMUTABLE,
+					  "hotplug_mode_update", 0, 1);
+
+	if (!dev_priv->hotplug_mode_update_property)
+		return;
+
+}
+
+int vmw_kms_init(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int ret;
+
+	drm_mode_config_init(dev);
+	dev->mode_config.funcs = &vmw_kms_funcs;
+	dev->mode_config.min_width = 1;
+	dev->mode_config.min_height = 1;
+	dev->mode_config.max_width = dev_priv->texture_max_width;
+	dev->mode_config.max_height = dev_priv->texture_max_height;
+
+	drm_mode_create_suggested_offset_properties(dev);
+	vmw_kms_create_hotplug_mode_update_property(dev_priv);
+
+	ret = vmw_kms_stdu_init_display(dev_priv);
+	if (ret) {
+		ret = vmw_kms_sou_init_display(dev_priv);
+		if (ret) /* Fallback */
+			ret = vmw_kms_ldu_init_display(dev_priv);
+	}
+
+	return ret;
+}
+
+int vmw_kms_close(struct vmw_private *dev_priv)
+{
+	int ret = 0;
+
+	/*
+	 * Docs says we should take the lock before calling this function
+	 * but since it destroys encoders and our destructor calls
+	 * drm_encoder_cleanup which takes the lock we deadlock.
+	 */
+	drm_mode_config_cleanup(dev_priv->dev);
+	if (dev_priv->active_display_unit == vmw_du_legacy)
+		ret = vmw_kms_ldu_close_display(dev_priv);
+
+	return ret;
+}
+
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct drm_vmw_cursor_bypass_arg *arg = data;
+	struct vmw_display_unit *du;
+	struct drm_crtc *crtc;
+	int ret = 0;
+
+
+	mutex_lock(&dev->mode_config.mutex);
+	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
+
+		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+			du = vmw_crtc_to_du(crtc);
+			du->hotspot_x = arg->xhot;
+			du->hotspot_y = arg->yhot;
+		}
+
+		mutex_unlock(&dev->mode_config.mutex);
+		return 0;
+	}
+
+	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
+	if (!crtc) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	du = vmw_crtc_to_du(crtc);
+
+	du->hotspot_x = arg->xhot;
+	du->hotspot_y = arg->yhot;
+
+out:
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
+}
+
+int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+			unsigned width, unsigned height, unsigned pitch,
+			unsigned bpp, unsigned depth)
+{
+	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
+	else if (vmw_fifo_have_pitchlock(vmw_priv))
+		vmw_mmio_write(pitch, vmw_priv->mmio_virt +
+			       SVGA_FIFO_PITCHLOCK);
+	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
+	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
+	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
+
+	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
+		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
+			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int vmw_kms_save_vga(struct vmw_private *vmw_priv)
+{
+	struct vmw_vga_topology_state *save;
+	uint32_t i;
+
+	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
+	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
+	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
+	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+		vmw_priv->vga_pitchlock =
+		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
+	else if (vmw_fifo_have_pitchlock(vmw_priv))
+		vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
+							SVGA_FIFO_PITCHLOCK);
+
+	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
+		return 0;
+
+	vmw_priv->num_displays = vmw_read(vmw_priv,
+					  SVGA_REG_NUM_GUEST_DISPLAYS);
+
+	if (vmw_priv->num_displays == 0)
+		vmw_priv->num_displays = 1;
+
+	for (i = 0; i < vmw_priv->num_displays; ++i) {
+		save = &vmw_priv->vga_save[i];
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
+		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
+		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
+		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
+		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
+		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+		if (i == 0 && vmw_priv->num_displays == 1 &&
+		    save->width == 0 && save->height == 0) {
+
+			/*
+			 * It should be fairly safe to assume that these
+			 * values are uninitialized.
+			 */
+
+			save->width = vmw_priv->vga_width - save->pos_x;
+			save->height = vmw_priv->vga_height - save->pos_y;
+		}
+	}
+
+	return 0;
+}
+
+int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
+{
+	struct vmw_vga_topology_state *save;
+	uint32_t i;
+
+	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
+	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
+	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
+	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
+			  vmw_priv->vga_pitchlock);
+	else if (vmw_fifo_have_pitchlock(vmw_priv))
+		vmw_mmio_write(vmw_priv->vga_pitchlock,
+			       vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
+
+	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
+		return 0;
+
+	for (i = 0; i < vmw_priv->num_displays; ++i) {
+		save = &vmw_priv->vga_save[i];
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+	}
+
+	return 0;
+}
+
+bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+				uint32_t pitch,
+				uint32_t height)
+{
+	return ((u64) pitch * (u64) height) < (u64)
+		((dev_priv->active_display_unit == vmw_du_screen_target) ?
+		 dev_priv->prim_bb_mem : dev_priv->vram_size);
+}
+
+
+/**
+ * Function called by DRM code called with vbl_lock held.
+ */
+u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+{
+	return 0;
+}
+
+/**
+ * Function called by DRM code called with vbl_lock held.
+ */
+int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	return -EINVAL;
+}
+
+/**
+ * Function called by DRM code called with vbl_lock held.
+ */
+void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+}
+
+/**
+ * vmw_du_update_layout - Update the display unit with topology from resolution
+ * plugin and generate DRM uevent
+ * @dev_priv: device private
+ * @num_rects: number of drm_rect in rects
+ * @rects: toplogy to update
+ */
+static int vmw_du_update_layout(struct vmw_private *dev_priv,
+				unsigned int num_rects, struct drm_rect *rects)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct vmw_display_unit *du;
+	struct drm_connector *con;
+	struct drm_connector_list_iter conn_iter;
+	struct drm_modeset_acquire_ctx ctx;
+	struct drm_crtc *crtc;
+	int ret;
+
+	/* Currently gui_x/y is protected with the crtc mutex */
+	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_acquire_init(&ctx, 0);
+retry:
+	drm_for_each_crtc(crtc, dev) {
+		ret = drm_modeset_lock(&crtc->mutex, &ctx);
+		if (ret < 0) {
+			if (ret == -EDEADLK) {
+				drm_modeset_backoff(&ctx);
+				goto retry;
+      		}
+			goto out_fini;
+		}
+	}
+
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(con, &conn_iter) {
+		du = vmw_connector_to_du(con);
+		if (num_rects > du->unit) {
+			du->pref_width = drm_rect_width(&rects[du->unit]);
+			du->pref_height = drm_rect_height(&rects[du->unit]);
+			du->pref_active = true;
+			du->gui_x = rects[du->unit].x1;
+			du->gui_y = rects[du->unit].y1;
+		} else {
+			du->pref_width = 800;
+			du->pref_height = 600;
+			du->pref_active = false;
+			du->gui_x = 0;
+			du->gui_y = 0;
+		}
+	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
+		du = vmw_connector_to_du(con);
+		if (num_rects > du->unit) {
+			drm_object_property_set_value
+			  (&con->base, dev->mode_config.suggested_x_property,
+			   du->gui_x);
+			drm_object_property_set_value
+			  (&con->base, dev->mode_config.suggested_y_property,
+			   du->gui_y);
+		} else {
+			drm_object_property_set_value
+			  (&con->base, dev->mode_config.suggested_x_property,
+			   0);
+			drm_object_property_set_value
+			  (&con->base, dev->mode_config.suggested_y_property,
+			   0);
+		}
+		con->status = vmw_du_connector_detect(con, true);
+	}
+
+	drm_sysfs_hotplug_event(dev);
+out_fini:
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+	mutex_unlock(&dev->mode_config.mutex);
+ 
+	return 0;
+}
+
+int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+			  u16 *r, u16 *g, u16 *b,
+			  uint32_t size,
+			  struct drm_modeset_acquire_ctx *ctx)
+{
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	int i;
+
+	for (i = 0; i < size; i++) {
+		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
+			  r[i], g[i], b[i]);
+		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
+		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
+		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
+	}
+
+	return 0;
+}
+
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
+{
+	return 0;
+}
+
+enum drm_connector_status
+vmw_du_connector_detect(struct drm_connector *connector, bool force)
+{
+	uint32_t num_displays;
+	struct drm_device *dev = connector->dev;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_display_unit *du = vmw_connector_to_du(connector);
+
+	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
+
+	return ((vmw_connector_to_du(connector)->unit < num_displays &&
+		 du->pref_active) ?
+		connector_status_connected : connector_status_disconnected);
+}
+
+static struct drm_display_mode vmw_kms_connector_builtin[] = {
+	/* 640x480@60Hz */
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+		   752, 800, 0, 480, 489, 492, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 800x600@60Hz */
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+		   968, 1056, 0, 600, 601, 605, 628, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1024x768@60Hz */
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+		   1184, 1344, 0, 768, 771, 777, 806, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1152x864@75Hz */
+	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+		   1344, 1600, 0, 864, 865, 868, 900, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x768@60Hz */
+	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+		   1472, 1664, 0, 768, 771, 778, 798, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x800@60Hz */
+	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+		   1480, 1680, 0, 800, 803, 809, 831, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1280x960@60Hz */
+	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+		   1488, 1800, 0, 960, 961, 964, 1000, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x1024@60Hz */
+	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1360x768@60Hz */
+	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+		   1536, 1792, 0, 768, 771, 777, 795, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1440x1050@60Hz */
+	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1440x900@60Hz */
+	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+		   1672, 1904, 0, 900, 903, 909, 934, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1600x1200@60Hz */
+	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1680x1050@60Hz */
+	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1792x1344@60Hz */
+	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1853x1392@60Hz */
+	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1920x1200@60Hz */
+	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1920x1440@60Hz */
+	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 2560x1600@60Hz */
+	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* Terminate */
+	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
+};
+
+/**
+ * vmw_guess_mode_timing - Provide fake timings for a
+ * 60Hz vrefresh mode.
+ *
+ * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
+ * members filled in.
+ */
+void vmw_guess_mode_timing(struct drm_display_mode *mode)
+{
+	mode->hsync_start = mode->hdisplay + 50;
+	mode->hsync_end = mode->hsync_start + 50;
+	mode->htotal = mode->hsync_end + 50;
+
+	mode->vsync_start = mode->vdisplay + 50;
+	mode->vsync_end = mode->vsync_start + 50;
+	mode->vtotal = mode->vsync_end + 50;
+
+	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
+	mode->vrefresh = drm_mode_vrefresh(mode);
+}
+
+
+int vmw_du_connector_fill_modes(struct drm_connector *connector,
+				uint32_t max_width, uint32_t max_height)
+{
+	struct vmw_display_unit *du = vmw_connector_to_du(connector);
+	struct drm_device *dev = connector->dev;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *bmode;
+	struct drm_display_mode prefmode = { DRM_MODE("preferred",
+		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+	};
+	int i;
+	u32 assumed_bpp = 4;
+
+	if (dev_priv->assume_16bpp)
+		assumed_bpp = 2;
+
+	max_width  = min(max_width,  dev_priv->texture_max_width);
+	max_height = min(max_height, dev_priv->texture_max_height);
+
+	/*
+	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
+	 * HEIGHT registers.
+	 */
+	if (dev_priv->active_display_unit == vmw_du_screen_target) {
+		max_width  = min(max_width,  dev_priv->stdu_max_width);
+		max_height = min(max_height, dev_priv->stdu_max_height);
+	}
+
+	/* Add preferred mode */
+	mode = drm_mode_duplicate(dev, &prefmode);
+	if (!mode)
+		return 0;
+	mode->hdisplay = du->pref_width;
+	mode->vdisplay = du->pref_height;
+	vmw_guess_mode_timing(mode);
+
+	if (vmw_kms_validate_mode_vram(dev_priv,
+					mode->hdisplay * assumed_bpp,
+					mode->vdisplay)) {
+		drm_mode_probed_add(connector, mode);
+	} else {
+		drm_mode_destroy(dev, mode);
+		mode = NULL;
+	}
+
+	if (du->pref_mode) {
+		list_del_init(&du->pref_mode->head);
+		drm_mode_destroy(dev, du->pref_mode);
+	}
+
+	/* mode might be null here, this is intended */
+	du->pref_mode = mode;
+
+	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
+		bmode = &vmw_kms_connector_builtin[i];
+		if (bmode->hdisplay > max_width ||
+		    bmode->vdisplay > max_height)
+			continue;
+
+		if (!vmw_kms_validate_mode_vram(dev_priv,
+						bmode->hdisplay * assumed_bpp,
+						bmode->vdisplay))
+			continue;
+
+		mode = drm_mode_duplicate(dev, bmode);
+		if (!mode)
+			return 0;
+		mode->vrefresh = drm_mode_vrefresh(mode);
+
+		drm_mode_probed_add(connector, mode);
+	}
+
+	drm_connector_list_update(connector);
+	/* Move the prefered mode first, help apps pick the right mode. */
+	drm_mode_sort(&connector->modes);
+
+	return 1;
+}
+
+/**
+ * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Update preferred topology of display unit as per ioctl request. The topology
+ * is expressed as array of drm_vmw_rect.
+ * e.g.
+ * [0 0 640 480] [640 0 800 600] [0 480 640 480]
+ *
+ * NOTE:
+ * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
+ * device limit on topology, x + w and y + h (lower right) cannot be greater
+ * than INT_MAX. So topology beyond these limits will return with error.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_vmw_update_layout_arg *arg =
+		(struct drm_vmw_update_layout_arg *)data;
+	void __user *user_rects;
+	struct drm_vmw_rect *rects;
+	struct drm_rect *drm_rects;
+	unsigned rects_size;
+	int ret, i;
+
+	if (!arg->num_outputs) {
+		struct drm_rect def_rect = {0, 0, 800, 600};
+		VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
+			      def_rect.x1, def_rect.y1,
+			      def_rect.x2, def_rect.y2);
+		vmw_du_update_layout(dev_priv, 1, &def_rect);
+		return 0;
+	}
+
+	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
+	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+			GFP_KERNEL);
+	if (unlikely(!rects))
+		return -ENOMEM;
+
+	user_rects = (void __user *)(unsigned long)arg->rects;
+	ret = copy_from_user(rects, user_rects, rects_size);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to get rects.\n");
+		ret = -EFAULT;
+		goto out_free;
+	}
+
+	drm_rects = (struct drm_rect *)rects;
+
+	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
+	for (i = 0; i < arg->num_outputs; i++) {
+		struct drm_vmw_rect curr_rect;
+
+		/* Verify user-space for overflow as kernel use drm_rect */
+		if ((rects[i].x + rects[i].w > INT_MAX) ||
+		    (rects[i].y + rects[i].h > INT_MAX)) {
+			ret = -ERANGE;
+			goto out_free;
+		}
+
+		curr_rect = rects[i];
+		drm_rects[i].x1 = curr_rect.x;
+		drm_rects[i].y1 = curr_rect.y;
+		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
+		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
+
+		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
+			      drm_rects[i].x1, drm_rects[i].y1,
+			      drm_rects[i].x2, drm_rects[i].y2);
+
+		/*
+		 * Currently this check is limiting the topology within
+		 * mode_config->max (which actually is max texture size
+		 * supported by virtual device). This limit is here to address
+		 * window managers that create a big framebuffer for whole
+		 * topology.
+		 */
+		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
+		    drm_rects[i].x2 > mode_config->max_width ||
+		    drm_rects[i].y2 > mode_config->max_height) {
+			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
+				      drm_rects[i].x1, drm_rects[i].y1,
+				      drm_rects[i].x2, drm_rects[i].y2);
+			ret = -EINVAL;
+			goto out_free;
+		}
+	}
+
+	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
+
+	if (ret == 0)
+		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
+
+out_free:
+	kfree(rects);
+	return ret;
+}
+
+/**
+ * vmw_kms_helper_dirty - Helper to build commands and perform actions based
+ * on a set of cliprects and a set of display units.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @framebuffer: Pointer to the framebuffer on which to perform the actions.
+ * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
+ * Cliprects are given in framebuffer coordinates.
+ * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
+ * be NULL. Cliprects are given in source coordinates.
+ * @dest_x: X coordinate offset for the crtc / destination clip rects.
+ * @dest_y: Y coordinate offset for the crtc / destination clip rects.
+ * @num_clips: Number of cliprects in the @clips or @vclips array.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
+ */
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+			 struct vmw_framebuffer *framebuffer,
+			 const struct drm_clip_rect *clips,
+			 const struct drm_vmw_rect *vclips,
+			 s32 dest_x, s32 dest_y,
+			 int num_clips,
+			 int increment,
+			 struct vmw_kms_dirty *dirty)
+{
+	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+	struct drm_crtc *crtc;
+	u32 num_units = 0;
+	u32 i, k;
+
+	dirty->dev_priv = dev_priv;
+
+	/* If crtc is passed, no need to iterate over other display units */
+	if (dirty->crtc) {
+		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
+	} else {
+		list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
+				    head) {
+			struct drm_plane *plane = crtc->primary;
+
+			if (plane->state->fb == &framebuffer->base)
+				units[num_units++] = vmw_crtc_to_du(crtc);
+		}
+	}
+
+	for (k = 0; k < num_units; k++) {
+		struct vmw_display_unit *unit = units[k];
+		s32 crtc_x = unit->crtc.x;
+		s32 crtc_y = unit->crtc.y;
+		s32 crtc_width = unit->crtc.mode.hdisplay;
+		s32 crtc_height = unit->crtc.mode.vdisplay;
+		const struct drm_clip_rect *clips_ptr = clips;
+		const struct drm_vmw_rect *vclips_ptr = vclips;
+
+		dirty->unit = unit;
+		if (dirty->fifo_reserve_size > 0) {
+			dirty->cmd = VMW_FIFO_RESERVE(dev_priv,
+						      dirty->fifo_reserve_size);
+			if (!dirty->cmd)
+				return -ENOMEM;
+
+			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
+		}
+		dirty->num_hits = 0;
+		for (i = 0; i < num_clips; i++, clips_ptr += increment,
+		       vclips_ptr += increment) {
+			s32 clip_left;
+			s32 clip_top;
+
+			/*
+			 * Select clip array type. Note that integer type
+			 * in @clips is unsigned short, whereas in @vclips
+			 * it's 32-bit.
+			 */
+			if (clips) {
+				dirty->fb_x = (s32) clips_ptr->x1;
+				dirty->fb_y = (s32) clips_ptr->y1;
+				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
+					crtc_x;
+				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
+					crtc_y;
+			} else {
+				dirty->fb_x = vclips_ptr->x;
+				dirty->fb_y = vclips_ptr->y;
+				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
+					dest_x - crtc_x;
+				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
+					dest_y - crtc_y;
+			}
+
+			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
+			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
+
+			/* Skip this clip if it's outside the crtc region */
+			if (dirty->unit_x1 >= crtc_width ||
+			    dirty->unit_y1 >= crtc_height ||
+			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
+				continue;
+
+			/* Clip right and bottom to crtc limits */
+			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
+					       crtc_width);
+			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
+					       crtc_height);
+
+			/* Clip left and top to crtc limits */
+			clip_left = min_t(s32, dirty->unit_x1, 0);
+			clip_top = min_t(s32, dirty->unit_y1, 0);
+			dirty->unit_x1 -= clip_left;
+			dirty->unit_y1 -= clip_top;
+			dirty->fb_x -= clip_left;
+			dirty->fb_y -= clip_top;
+
+			dirty->clip(dirty);
+		}
+
+		dirty->fifo_commit(dirty);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_kms_helper_validation_finish - Helper for post KMS command submission
+ * cleanup and fencing
+ * @dev_priv: Pointer to the device-private struct
+ * @file_priv: Pointer identifying the client when user-space fencing is used
+ * @ctx: Pointer to the validation context
+ * @out_fence: If non-NULL, returned refcounted fence-pointer
+ * @user_fence_rep: If non-NULL, pointer to user-space address area
+ * in which to copy user-space fence info
+ */
+void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
+				      struct drm_file *file_priv,
+				      struct vmw_validation_context *ctx,
+				      struct vmw_fence_obj **out_fence,
+				      struct drm_vmw_fence_rep __user *
+				      user_fence_rep)
+{
+	struct vmw_fence_obj *fence = NULL;
+	uint32_t handle = 0;
+	int ret = 0;
+
+	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
+	    out_fence)
+		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
+						 file_priv ? &handle : NULL);
+	vmw_validation_done(ctx, fence);
+	if (file_priv)
+		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
+					    ret, user_fence_rep, fence,
+					    handle, -1);
+	if (out_fence)
+		*out_fence = fence;
+	else
+		vmw_fence_obj_unreference(&fence);
+}
+
+/**
+ * vmw_kms_update_proxy - Helper function to update a proxy surface from
+ * its backing MOB.
+ *
+ * @res: Pointer to the surface resource
+ * @clips: Clip rects in framebuffer (surface) space.
+ * @num_clips: Number of clips in @clips.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ *
+ * This function makes sure the proxy surface is updated from its backing MOB
+ * using the region given by @clips. The surface resource @res and its backing
+ * MOB needs to be reserved and validated on call.
+ */
+int vmw_kms_update_proxy(struct vmw_resource *res,
+			 const struct drm_clip_rect *clips,
+			 unsigned num_clips,
+			 int increment)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdUpdateGBImage body;
+	} *cmd;
+	SVGA3dBox *box;
+	size_t copy_size = 0;
+	int i;
+
+	if (!clips)
+		return 0;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
+	if (!cmd)
+		return -ENOMEM;
+
+	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
+		box = &cmd->body.box;
+
+		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+		cmd->header.size = sizeof(cmd->body);
+		cmd->body.image.sid = res->id;
+		cmd->body.image.face = 0;
+		cmd->body.image.mipmap = 0;
+
+		if (clips->x1 > size->width || clips->x2 > size->width ||
+		    clips->y1 > size->height || clips->y2 > size->height) {
+			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
+			return -EINVAL;
+		}
+
+		box->x = clips->x1;
+		box->y = clips->y1;
+		box->z = 0;
+		box->w = clips->x2 - clips->x1;
+		box->h = clips->y2 - clips->y1;
+		box->d = 1;
+
+		copy_size += sizeof(*cmd);
+	}
+
+	vmw_fifo_commit(dev_priv, copy_size);
+
+	return 0;
+}
+
+int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+			    unsigned unit,
+			    u32 max_width,
+			    u32 max_height,
+			    struct drm_connector **p_con,
+			    struct drm_crtc **p_crtc,
+			    struct drm_display_mode **p_mode)
+{
+	struct drm_connector *con;
+	struct vmw_display_unit *du;
+	struct drm_display_mode *mode;
+	int i = 0;
+	int ret = 0;
+
+	mutex_lock(&dev_priv->dev->mode_config.mutex);
+	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
+			    head) {
+		if (i == unit)
+			break;
+
+		++i;
+	}
+
+	if (&con->head == &dev_priv->dev->mode_config.connector_list) {
+		DRM_ERROR("Could not find initial display unit.\n");
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (list_empty(&con->modes))
+		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
+
+	if (list_empty(&con->modes)) {
+		DRM_ERROR("Could not find initial display mode.\n");
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	du = vmw_connector_to_du(con);
+	*p_con = con;
+	*p_crtc = &du->crtc;
+
+	list_for_each_entry(mode, &con->modes, head) {
+		if (mode->type & DRM_MODE_TYPE_PREFERRED)
+			break;
+	}
+
+	if (&mode->head == &con->modes) {
+		WARN_ONCE(true, "Could not find initial preferred mode.\n");
+		*p_mode = list_first_entry(&con->modes,
+					   struct drm_display_mode,
+					   head);
+	} else {
+		*p_mode = mode;
+	}
+
+ out_unlock:
+	mutex_unlock(&dev_priv->dev->mode_config.mutex);
+
+	return ret;
+}
+
+/**
+ * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
+ * property.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ *
+ * Sets up the implicit placement property unless it's already set up.
+ */
+void
+vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
+{
+	if (dev_priv->implicit_placement_property)
+		return;
+
+	dev_priv->implicit_placement_property =
+		drm_property_create_range(dev_priv->dev,
+					  DRM_MODE_PROP_IMMUTABLE,
+					  "implicit_placement", 0, 1);
+}
+
+/**
+ * vmw_kms_suspend - Save modesetting state and turn modesetting off.
+ *
+ * @dev: Pointer to the drm device
+ * Return: 0 on success. Negative error code on failure.
+ */
+int vmw_kms_suspend(struct drm_device *dev)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+
+	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
+	if (IS_ERR(dev_priv->suspend_state)) {
+		int ret = PTR_ERR(dev_priv->suspend_state);
+
+		DRM_ERROR("Failed kms suspend: %d\n", ret);
+		dev_priv->suspend_state = NULL;
+
+		return ret;
+	}
+
+	return 0;
+}
+
+
+/**
+ * vmw_kms_resume - Re-enable modesetting and restore state
+ *
+ * @dev: Pointer to the drm device
+ * Return: 0 on success. Negative error code on failure.
+ *
+ * State is resumed from a previous vmw_kms_suspend(). It's illegal
+ * to call this function without a previous vmw_kms_suspend().
+ */
+int vmw_kms_resume(struct drm_device *dev)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	int ret;
+
+	if (WARN_ON(!dev_priv->suspend_state))
+		return 0;
+
+	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
+	dev_priv->suspend_state = NULL;
+
+	return ret;
+}
+
+/**
+ * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
+ *
+ * @dev: Pointer to the drm device
+ */
+void vmw_kms_lost_device(struct drm_device *dev)
+{
+	drm_atomic_helper_shutdown(dev);
+}
+
+/**
+ * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
+ * @update: The closure structure.
+ *
+ * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
+ * update on display unit.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
+{
+	struct drm_plane_state *state = update->plane->state;
+	struct drm_plane_state *old_state = update->old_state;
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_rect clip;
+	struct drm_rect bb;
+	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
+	uint32_t reserved_size = 0;
+	uint32_t submit_size = 0;
+	uint32_t curr_size = 0;
+	uint32_t num_hits = 0;
+	void *cmd_start;
+	char *cmd_next;
+	int ret;
+
+	/*
+	 * Iterate in advance to check if really need plane update and find the
+	 * number of clips that actually are in plane src for fifo allocation.
+	 */
+	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	if (num_hits == 0)
+		return 0;
+
+	if (update->vfb->bo) {
+		struct vmw_framebuffer_bo *vfbbo =
+			container_of(update->vfb, typeof(*vfbbo), base);
+
+		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
+					    update->cpu_blit);
+	} else {
+		struct vmw_framebuffer_surface *vfbs =
+			container_of(update->vfb, typeof(*vfbs), base);
+
+		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
+						  0, VMW_RES_DIRTY_NONE, NULL,
+						  NULL);
+	}
+
+	if (ret)
+		return ret;
+
+	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
+	if (ret)
+		goto out_unref;
+
+	reserved_size = update->calc_fifo_size(update, num_hits);
+	cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size);
+	if (!cmd_start) {
+		ret = -ENOMEM;
+		goto out_revert;
+	}
+
+	cmd_next = cmd_start;
+
+	if (update->post_prepare) {
+		curr_size = update->post_prepare(update, cmd_next);
+		cmd_next += curr_size;
+		submit_size += curr_size;
+	}
+
+	if (update->pre_clip) {
+		curr_size = update->pre_clip(update, cmd_next, num_hits);
+		cmd_next += curr_size;
+		submit_size += curr_size;
+	}
+
+	bb.x1 = INT_MAX;
+	bb.y1 = INT_MAX;
+	bb.x2 = INT_MIN;
+	bb.y2 = INT_MIN;
+
+	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+	drm_atomic_for_each_plane_damage(&iter, &clip) {
+		uint32_t fb_x = clip.x1;
+		uint32_t fb_y = clip.y1;
+
+		vmw_du_translate_to_crtc(state, &clip);
+		if (update->clip) {
+			curr_size = update->clip(update, cmd_next, &clip, fb_x,
+						 fb_y);
+			cmd_next += curr_size;
+			submit_size += curr_size;
+		}
+		bb.x1 = min_t(int, bb.x1, clip.x1);
+		bb.y1 = min_t(int, bb.y1, clip.y1);
+		bb.x2 = max_t(int, bb.x2, clip.x2);
+		bb.y2 = max_t(int, bb.y2, clip.y2);
+	}
+
+	curr_size = update->post_clip(update, cmd_next, &bb);
+	submit_size += curr_size;
+
+	if (reserved_size < submit_size)
+		submit_size = 0;
+
+	vmw_fifo_commit(update->dev_priv, submit_size);
+
+	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
+					 update->out_fence, NULL);
+	return ret;
+
+out_revert:
+	vmw_validation_revert(&val_ctx);
+
+out_unref:
+	vmw_validation_unref_lists(&val_ctx);
+	return ret;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
new file mode 100644
index 0000000..3ee0322
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -0,0 +1,579 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_KMS_H_
+#define VMWGFX_KMS_H_
+
+#include <drm/drm_encoder.h>
+#include <drm/drm_probe_helper.h>
+
+#include "vmwgfx_drv.h"
+
+/**
+ * struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update
+ * @plane: Plane which is being updated.
+ * @old_state: Old state of plane.
+ * @dev_priv: Device private.
+ * @du: Display unit on which to update the plane.
+ * @vfb: Framebuffer which is blitted to display unit.
+ * @out_fence: Out fence for resource finish.
+ * @mutex: The mutex used to protect resource reservation.
+ * @cpu_blit: True if need cpu blit.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * This structure loosely represent the set of operations needed to perform a
+ * plane update on a display unit. Implementer will define that functionality
+ * according to the function callbacks for this structure. In brief it involves
+ * surface/buffer object validation, populate FIFO commands and command
+ * submission to the device.
+ */
+struct vmw_du_update_plane {
+	/**
+	 * @calc_fifo_size: Calculate fifo size.
+	 *
+	 * Determine fifo size for the commands needed for update. The number of
+	 * damage clips on display unit @num_hits will be passed to allocate
+	 * sufficient fifo space.
+	 *
+	 * Return: Fifo size needed
+	 */
+	uint32_t (*calc_fifo_size)(struct vmw_du_update_plane *update,
+				   uint32_t num_hits);
+
+	/**
+	 * @post_prepare: Populate fifo for resource preparation.
+	 *
+	 * Some surface resource or buffer object need some extra cmd submission
+	 * like update GB image for proxy surface and define a GMRFB for screen
+	 * object. That should should be done here as this callback will be
+	 * called after FIFO allocation with the address of command buufer.
+	 *
+	 * This callback is optional.
+	 *
+	 * Return: Size of commands populated to command buffer.
+	 */
+	uint32_t (*post_prepare)(struct vmw_du_update_plane *update, void *cmd);
+
+	/**
+	 * @pre_clip: Populate fifo before clip.
+	 *
+	 * This is where pre clip related command should be populated like
+	 * surface copy/DMA, etc.
+	 *
+	 * This callback is optional.
+	 *
+	 * Return: Size of commands populated to command buffer.
+	 */
+	uint32_t (*pre_clip)(struct vmw_du_update_plane *update, void *cmd,
+			     uint32_t num_hits);
+
+	/**
+	 * @clip: Populate fifo for clip.
+	 *
+	 * This is where to populate clips for surface copy/dma or blit commands
+	 * if needed. This will be called times have damage in display unit,
+	 * which is one if doing full update. @clip is the damage in destination
+	 * coordinates which is crtc/DU and @src_x, @src_y is damage clip src in
+	 * framebuffer coordinate.
+	 *
+	 * This callback is optional.
+	 *
+	 * Return: Size of commands populated to command buffer.
+	 */
+	uint32_t (*clip)(struct vmw_du_update_plane *update, void *cmd,
+			 struct drm_rect *clip, uint32_t src_x, uint32_t src_y);
+
+	/**
+	 * @post_clip: Populate fifo after clip.
+	 *
+	 * This is where to populate display unit update commands or blit
+	 * commands.
+	 *
+	 * Return: Size of commands populated to command buffer.
+	 */
+	uint32_t (*post_clip)(struct vmw_du_update_plane *update, void *cmd,
+				    struct drm_rect *bb);
+
+	struct drm_plane *plane;
+	struct drm_plane_state *old_state;
+	struct vmw_private *dev_priv;
+	struct vmw_display_unit *du;
+	struct vmw_framebuffer *vfb;
+	struct vmw_fence_obj **out_fence;
+	struct mutex *mutex;
+	bool cpu_blit;
+	bool intr;
+};
+
+/**
+ * struct vmw_du_update_plane_surface - closure structure for surface
+ * @base: base closure structure.
+ * @cmd_start: FIFO command start address (used by SOU only).
+ */
+struct vmw_du_update_plane_surface {
+	struct vmw_du_update_plane base;
+	/* This member is to handle special case SOU surface update */
+	void *cmd_start;
+};
+
+/**
+ * struct vmw_du_update_plane_buffer - Closure structure for buffer object
+ * @base: Base closure structure.
+ * @fb_left: x1 for fb damage bounding box.
+ * @fb_top: y1 for fb damage bounding box.
+ */
+struct vmw_du_update_plane_buffer {
+	struct vmw_du_update_plane base;
+	int fb_left, fb_top;
+};
+
+/**
+ * struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
+ * function.
+ *
+ * @fifo_commit: Callback that is called once for each display unit after
+ * all clip rects. This function must commit the fifo space reserved by the
+ * helper. Set up by the caller.
+ * @clip: Callback that is called for each cliprect on each display unit.
+ * Set up by the caller.
+ * @fifo_reserve_size: Fifo size that the helper should try to allocat for
+ * each display unit. Set up by the caller.
+ * @dev_priv: Pointer to the device private. Set up by the helper.
+ * @unit: The current display unit. Set up by the helper before a call to @clip.
+ * @cmd: The allocated fifo space. Set up by the helper before the first @clip
+ * call.
+ * @crtc: The crtc for which to build dirty commands.
+ * @num_hits: Number of clip rect commands for this display unit.
+ * Cleared by the helper before the first @clip call. Updated by the @clip
+ * callback.
+ * @fb_x: Clip rect left side in framebuffer coordinates.
+ * @fb_y: Clip rect right side in framebuffer coordinates.
+ * @unit_x1: Clip rect left side in crtc coordinates.
+ * @unit_y1: Clip rect top side in crtc coordinates.
+ * @unit_x2: Clip rect right side in crtc coordinates.
+ * @unit_y2: Clip rect bottom side in crtc coordinates.
+ *
+ * The clip rect coordinates are updated by the helper for each @clip call.
+ * Note that this may be derived from if more info needs to be passed between
+ * helper caller and helper callbacks.
+ */
+struct vmw_kms_dirty {
+	void (*fifo_commit)(struct vmw_kms_dirty *);
+	void (*clip)(struct vmw_kms_dirty *);
+	size_t fifo_reserve_size;
+	struct vmw_private *dev_priv;
+	struct vmw_display_unit *unit;
+	void *cmd;
+	struct drm_crtc *crtc;
+	u32 num_hits;
+	s32 fb_x;
+	s32 fb_y;
+	s32 unit_x1;
+	s32 unit_y1;
+	s32 unit_x2;
+	s32 unit_y2;
+};
+
+#define VMWGFX_NUM_DISPLAY_UNITS 8
+
+
+#define vmw_framebuffer_to_vfb(x) \
+	container_of(x, struct vmw_framebuffer, base)
+#define vmw_framebuffer_to_vfbs(x) \
+	container_of(x, struct vmw_framebuffer_surface, base.base)
+#define vmw_framebuffer_to_vfbd(x) \
+	container_of(x, struct vmw_framebuffer_bo, base.base)
+
+/**
+ * Base class for framebuffers
+ *
+ * @pin is called the when ever a crtc uses this framebuffer
+ * @unpin is called
+ */
+struct vmw_framebuffer {
+	struct drm_framebuffer base;
+	int (*pin)(struct vmw_framebuffer *fb);
+	int (*unpin)(struct vmw_framebuffer *fb);
+	bool bo;
+	struct ttm_base_object *user_obj;
+	uint32_t user_handle;
+};
+
+/*
+ * Clip rectangle
+ */
+struct vmw_clip_rect {
+	int x1, x2, y1, y2;
+};
+
+struct vmw_framebuffer_surface {
+	struct vmw_framebuffer base;
+	struct vmw_surface *surface;
+	struct vmw_buffer_object *buffer;
+	struct list_head head;
+	bool is_bo_proxy;  /* true if this is proxy surface for DMA buf */
+};
+
+
+struct vmw_framebuffer_bo {
+	struct vmw_framebuffer base;
+	struct vmw_buffer_object *buffer;
+};
+
+
+static const uint32_t vmw_primary_plane_formats[] = {
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+};
+
+static const uint32_t vmw_cursor_plane_formats[] = {
+	DRM_FORMAT_ARGB8888,
+};
+
+
+#define vmw_crtc_state_to_vcs(x) container_of(x, struct vmw_crtc_state, base)
+#define vmw_plane_state_to_vps(x) container_of(x, struct vmw_plane_state, base)
+#define vmw_connector_state_to_vcs(x) \
+		container_of(x, struct vmw_connector_state, base)
+
+/**
+ * Derived class for crtc state object
+ *
+ * @base DRM crtc object
+ */
+struct vmw_crtc_state {
+	struct drm_crtc_state base;
+};
+
+/**
+ * Derived class for plane state object
+ *
+ * @base DRM plane object
+ * @surf Display surface for STDU
+ * @bo display bo for SOU
+ * @content_fb_type Used by STDU.
+ * @bo_size Size of the bo, used by Screen Object Display Unit
+ * @pinned pin count for STDU display surface
+ */
+struct vmw_plane_state {
+	struct drm_plane_state base;
+	struct vmw_surface *surf;
+	struct vmw_buffer_object *bo;
+
+	int content_fb_type;
+	unsigned long bo_size;
+
+	int pinned;
+
+	/* For CPU Blit */
+	unsigned int cpp;
+};
+
+
+/**
+ * Derived class for connector state object
+ *
+ * @base DRM connector object
+ * @is_implicit connector property
+ *
+ */
+struct vmw_connector_state {
+	struct drm_connector_state base;
+
+	/**
+	 * @gui_x:
+	 *
+	 * vmwgfx connector property representing the x position of this display
+	 * unit (connector is synonymous to display unit) in overall topology.
+	 * This is what the device expect as xRoot while creating screen.
+	 */
+	int gui_x;
+
+	/**
+	 * @gui_y:
+	 *
+	 * vmwgfx connector property representing the y position of this display
+	 * unit (connector is synonymous to display unit) in overall topology.
+	 * This is what the device expect as yRoot while creating screen.
+	 */
+	int gui_y;
+};
+
+/**
+ * Base class display unit.
+ *
+ * Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
+ * so the display unit is all of them at the same time. This is true for both
+ * legacy multimon and screen objects.
+ */
+struct vmw_display_unit {
+	struct drm_crtc crtc;
+	struct drm_encoder encoder;
+	struct drm_connector connector;
+	struct drm_plane primary;
+	struct drm_plane cursor;
+
+	struct vmw_surface *cursor_surface;
+	struct vmw_buffer_object *cursor_bo;
+	size_t cursor_age;
+
+	int cursor_x;
+	int cursor_y;
+
+	int hotspot_x;
+	int hotspot_y;
+	s32 core_hotspot_x;
+	s32 core_hotspot_y;
+
+	unsigned unit;
+
+	/*
+	 * Prefered mode tracking.
+	 */
+	unsigned pref_width;
+	unsigned pref_height;
+	bool pref_active;
+	struct drm_display_mode *pref_mode;
+
+	/*
+	 * Gui positioning
+	 */
+	int gui_x;
+	int gui_y;
+	bool is_implicit;
+	int set_gui_x;
+	int set_gui_y;
+};
+
+struct vmw_validation_ctx {
+	struct vmw_resource *res;
+	struct vmw_buffer_object *buf;
+};
+
+#define vmw_crtc_to_du(x) \
+	container_of(x, struct vmw_display_unit, crtc)
+#define vmw_connector_to_du(x) \
+	container_of(x, struct vmw_display_unit, connector)
+
+
+/*
+ * Shared display unit functions - vmwgfx_kms.c
+ */
+void vmw_du_cleanup(struct vmw_display_unit *du);
+void vmw_du_crtc_save(struct drm_crtc *crtc);
+void vmw_du_crtc_restore(struct drm_crtc *crtc);
+int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+			   u16 *r, u16 *g, u16 *b,
+			   uint32_t size,
+			   struct drm_modeset_acquire_ctx *ctx);
+int vmw_du_connector_set_property(struct drm_connector *connector,
+				  struct drm_property *property,
+				  uint64_t val);
+int vmw_du_connector_atomic_set_property(struct drm_connector *connector,
+					 struct drm_connector_state *state,
+					 struct drm_property *property,
+					 uint64_t val);
+int
+vmw_du_connector_atomic_get_property(struct drm_connector *connector,
+				     const struct drm_connector_state *state,
+				     struct drm_property *property,
+				     uint64_t *val);
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
+void vmw_du_connector_save(struct drm_connector *connector);
+void vmw_du_connector_restore(struct drm_connector *connector);
+enum drm_connector_status
+vmw_du_connector_detect(struct drm_connector *connector, bool force);
+int vmw_du_connector_fill_modes(struct drm_connector *connector,
+				uint32_t max_width, uint32_t max_height);
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+			 struct vmw_framebuffer *framebuffer,
+			 const struct drm_clip_rect *clips,
+			 const struct drm_vmw_rect *vclips,
+			 s32 dest_x, s32 dest_y,
+			 int num_clips,
+			 int increment,
+			 struct vmw_kms_dirty *dirty);
+
+void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
+				      struct drm_file *file_priv,
+				      struct vmw_validation_context *ctx,
+				      struct vmw_fence_obj **out_fence,
+				      struct drm_vmw_fence_rep __user *
+				      user_fence_rep);
+int vmw_kms_readback(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips);
+struct vmw_framebuffer *
+vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+			struct vmw_buffer_object *bo,
+			struct vmw_surface *surface,
+			bool only_2d,
+			const struct drm_mode_fb_cmd2 *mode_cmd);
+int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+			    unsigned unit,
+			    u32 max_width,
+			    u32 max_height,
+			    struct drm_connector **p_con,
+			    struct drm_crtc **p_crtc,
+			    struct drm_display_mode **p_mode);
+void vmw_guess_mode_timing(struct drm_display_mode *mode);
+void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
+void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
+
+/* Universal Plane Helpers */
+void vmw_du_primary_plane_destroy(struct drm_plane *plane);
+void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
+
+/* Atomic Helpers */
+int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
+				      struct drm_plane_state *state);
+int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
+				     struct drm_plane_state *state);
+void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
+				       struct drm_plane_state *old_state);
+int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
+				   struct drm_plane_state *new_state);
+void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
+			     struct drm_plane_state *old_state);
+void vmw_du_plane_reset(struct drm_plane *plane);
+struct drm_plane_state *vmw_du_plane_duplicate_state(struct drm_plane *plane);
+void vmw_du_plane_destroy_state(struct drm_plane *plane,
+				struct drm_plane_state *state);
+void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
+			     bool unreference);
+
+int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
+			     struct drm_crtc_state *state);
+void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
+			      struct drm_crtc_state *old_crtc_state);
+void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
+			      struct drm_crtc_state *old_crtc_state);
+void vmw_du_crtc_reset(struct drm_crtc *crtc);
+struct drm_crtc_state *vmw_du_crtc_duplicate_state(struct drm_crtc *crtc);
+void vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
+				struct drm_crtc_state *state);
+void vmw_du_connector_reset(struct drm_connector *connector);
+struct drm_connector_state *
+vmw_du_connector_duplicate_state(struct drm_connector *connector);
+
+void vmw_du_connector_destroy_state(struct drm_connector *connector,
+				    struct drm_connector_state *state);
+
+/*
+ * Legacy display unit functions - vmwgfx_ldu.c
+ */
+int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
+int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
+int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+			    struct vmw_framebuffer *framebuffer,
+			    unsigned int flags, unsigned int color,
+			    struct drm_clip_rect *clips,
+			    unsigned int num_clips, int increment);
+int vmw_kms_update_proxy(struct vmw_resource *res,
+			 const struct drm_clip_rect *clips,
+			 unsigned num_clips,
+			 int increment);
+
+/*
+ * Screen Objects display functions - vmwgfx_scrn.c
+ */
+int vmw_kms_sou_init_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
+				 struct vmw_framebuffer *framebuffer,
+				 struct drm_clip_rect *clips,
+				 struct drm_vmw_rect *vclips,
+				 struct vmw_resource *srf,
+				 s32 dest_x,
+				 s32 dest_y,
+				 unsigned num_clips, int inc,
+				 struct vmw_fence_obj **out_fence,
+				 struct drm_crtc *crtc);
+int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
+			    struct vmw_framebuffer *framebuffer,
+			    struct drm_clip_rect *clips,
+			    struct drm_vmw_rect *vclips,
+			    unsigned int num_clips, int increment,
+			    bool interruptible,
+			    struct vmw_fence_obj **out_fence,
+			    struct drm_crtc *crtc);
+int vmw_kms_sou_readback(struct vmw_private *dev_priv,
+			 struct drm_file *file_priv,
+			 struct vmw_framebuffer *vfb,
+			 struct drm_vmw_fence_rep __user *user_fence_rep,
+			 struct drm_vmw_rect *vclips,
+			 uint32_t num_clips,
+			 struct drm_crtc *crtc);
+
+/*
+ * Screen Target Display Unit functions - vmwgfx_stdu.c
+ */
+int vmw_kms_stdu_init_display(struct vmw_private *dev_priv);
+int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
+			       struct vmw_framebuffer *framebuffer,
+			       struct drm_clip_rect *clips,
+			       struct drm_vmw_rect *vclips,
+			       struct vmw_resource *srf,
+			       s32 dest_x,
+			       s32 dest_y,
+			       unsigned num_clips, int inc,
+			       struct vmw_fence_obj **out_fence,
+			       struct drm_crtc *crtc);
+int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_clip_rect *clips,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips,
+		     int increment,
+		     bool to_surface,
+		     bool interruptible,
+		     struct drm_crtc *crtc);
+
+int vmw_du_helper_plane_update(struct vmw_du_update_plane *update);
+
+/**
+ * vmw_du_translate_to_crtc - Translate a rect from framebuffer to crtc
+ * @state: Plane state.
+ * @r: Rectangle to translate.
+ */
+static inline void vmw_du_translate_to_crtc(struct drm_plane_state *state,
+					    struct drm_rect *r)
+{
+	int translate_crtc_x = -((state->src_x >> 16) - state->crtc_x);
+	int translate_crtc_y = -((state->src_y >> 16) - state->crtc_y);
+
+	drm_rect_translate(r, translate_crtc_x, translate_crtc_y);
+}
+
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
new file mode 100644
index 0000000..7b54c1f
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "vmwgfx_kms.h"
+
+#define vmw_crtc_to_ldu(x) \
+	container_of(x, struct vmw_legacy_display_unit, base.crtc)
+#define vmw_encoder_to_ldu(x) \
+	container_of(x, struct vmw_legacy_display_unit, base.encoder)
+#define vmw_connector_to_ldu(x) \
+	container_of(x, struct vmw_legacy_display_unit, base.connector)
+
+struct vmw_legacy_display {
+	struct list_head active;
+
+	unsigned num_active;
+	unsigned last_num_active;
+
+	struct vmw_framebuffer *fb;
+};
+
+/**
+ * Display unit using the legacy register interface.
+ */
+struct vmw_legacy_display_unit {
+	struct vmw_display_unit base;
+
+	struct list_head active;
+};
+
+static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
+{
+	list_del_init(&ldu->active);
+	vmw_du_cleanup(&ldu->base);
+	kfree(ldu);
+}
+
+
+/*
+ * Legacy Display Unit CRTC functions
+ */
+
+static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
+{
+	vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
+}
+
+static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
+{
+	struct vmw_legacy_display *lds = dev_priv->ldu_priv;
+	struct vmw_legacy_display_unit *entry;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_crtc *crtc = NULL;
+	int i;
+
+	/* If there is no display topology the host just assumes
+	 * that the guest will set the same layout as the host.
+	 */
+	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
+		int w = 0, h = 0;
+		list_for_each_entry(entry, &lds->active, active) {
+			crtc = &entry->base.crtc;
+			w = max(w, crtc->x + crtc->mode.hdisplay);
+			h = max(h, crtc->y + crtc->mode.vdisplay);
+		}
+
+		if (crtc == NULL)
+			return 0;
+		fb = crtc->primary->state->fb;
+
+		return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
+					  fb->format->cpp[0] * 8,
+					  fb->format->depth);
+	}
+
+	if (!list_empty(&lds->active)) {
+		entry = list_entry(lds->active.next, typeof(*entry), active);
+		fb = entry->base.crtc.primary->state->fb;
+
+		vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
+				   fb->format->cpp[0] * 8, fb->format->depth);
+	}
+
+	/* Make sure we always show something. */
+	vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
+		  lds->num_active ? lds->num_active : 1);
+
+	i = 0;
+	list_for_each_entry(entry, &lds->active, active) {
+		crtc = &entry->base.crtc;
+
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x);
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+
+		i++;
+	}
+
+	BUG_ON(i != lds->num_active);
+
+	lds->last_num_active = lds->num_active;
+
+	return 0;
+}
+
+static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
+			      struct vmw_legacy_display_unit *ldu)
+{
+	struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
+	if (list_empty(&ldu->active))
+		return 0;
+
+	/* Must init otherwise list_empty(&ldu->active) will not work. */
+	list_del_init(&ldu->active);
+	if (--(ld->num_active) == 0) {
+		BUG_ON(!ld->fb);
+		if (ld->fb->unpin)
+			ld->fb->unpin(ld->fb);
+		ld->fb = NULL;
+	}
+
+	return 0;
+}
+
+static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
+			      struct vmw_legacy_display_unit *ldu,
+			      struct vmw_framebuffer *vfb)
+{
+	struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
+	struct vmw_legacy_display_unit *entry;
+	struct list_head *at;
+
+	BUG_ON(!ld->num_active && ld->fb);
+	if (vfb != ld->fb) {
+		if (ld->fb && ld->fb->unpin)
+			ld->fb->unpin(ld->fb);
+		vmw_svga_enable(vmw_priv);
+		if (vfb->pin)
+			vfb->pin(vfb);
+		ld->fb = vfb;
+	}
+
+	if (!list_empty(&ldu->active))
+		return 0;
+
+	at = &ld->active;
+	list_for_each_entry(entry, &ld->active, active) {
+		if (entry->base.unit > ldu->base.unit)
+			break;
+
+		at = &entry->active;
+	}
+
+	list_add(&ldu->active, at);
+
+	ld->num_active++;
+
+	return 0;
+}
+
+/**
+ * vmw_ldu_crtc_mode_set_nofb - Enable svga
+ *
+ * @crtc: CRTC associated with the new screen
+ *
+ * For LDU, just enable the svga
+ */
+static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+}
+
+/**
+ * vmw_ldu_crtc_atomic_enable - Noop
+ *
+ * @crtc: CRTC associated with the new screen
+ *
+ * This is called after a mode set has been completed.  Here's
+ * usually a good place to call vmw_ldu_add_active/vmw_ldu_del_active
+ * but since for LDU the display plane is closely tied to the
+ * CRTC, it makes more sense to do those at plane update time.
+ */
+static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc,
+				       struct drm_crtc_state *old_state)
+{
+}
+
+/**
+ * vmw_ldu_crtc_atomic_disable - Turns off CRTC
+ *
+ * @crtc: CRTC to be turned off
+ */
+static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc,
+					struct drm_crtc_state *old_state)
+{
+}
+
+static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
+	.gamma_set = vmw_du_crtc_gamma_set,
+	.destroy = vmw_ldu_crtc_destroy,
+	.reset = vmw_du_crtc_reset,
+	.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
+	.atomic_destroy_state = vmw_du_crtc_destroy_state,
+	.set_config = drm_atomic_helper_set_config,
+};
+
+
+/*
+ * Legacy Display Unit encoder functions
+ */
+
+static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
+{
+	vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
+}
+
+static const struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
+	.destroy = vmw_ldu_encoder_destroy,
+};
+
+/*
+ * Legacy Display Unit connector functions
+ */
+
+static void vmw_ldu_connector_destroy(struct drm_connector *connector)
+{
+	vmw_ldu_destroy(vmw_connector_to_ldu(connector));
+}
+
+static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
+	.dpms = vmw_du_connector_dpms,
+	.detect = vmw_du_connector_detect,
+	.fill_modes = vmw_du_connector_fill_modes,
+	.destroy = vmw_ldu_connector_destroy,
+	.reset = vmw_du_connector_reset,
+	.atomic_duplicate_state = vmw_du_connector_duplicate_state,
+	.atomic_destroy_state = vmw_du_connector_destroy_state,
+};
+
+static const struct
+drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
+};
+
+/*
+ * Legacy Display Plane Functions
+ */
+
+static void
+vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane,
+				    struct drm_plane_state *old_state)
+{
+	struct vmw_private *dev_priv;
+	struct vmw_legacy_display_unit *ldu;
+	struct vmw_framebuffer *vfb;
+	struct drm_framebuffer *fb;
+	struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
+
+
+	ldu = vmw_crtc_to_ldu(crtc);
+	dev_priv = vmw_priv(plane->dev);
+	fb       = plane->state->fb;
+
+	vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
+
+	if (vfb)
+		vmw_ldu_add_active(dev_priv, ldu, vfb);
+	else
+		vmw_ldu_del_active(dev_priv, ldu);
+
+	vmw_ldu_commit_list(dev_priv);
+}
+
+
+static const struct drm_plane_funcs vmw_ldu_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = vmw_du_primary_plane_destroy,
+	.reset = vmw_du_plane_reset,
+	.atomic_duplicate_state = vmw_du_plane_duplicate_state,
+	.atomic_destroy_state = vmw_du_plane_destroy_state,
+};
+
+static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = vmw_du_cursor_plane_destroy,
+	.reset = vmw_du_plane_reset,
+	.atomic_duplicate_state = vmw_du_plane_duplicate_state,
+	.atomic_destroy_state = vmw_du_plane_destroy_state,
+};
+
+/*
+ * Atomic Helpers
+ */
+static const struct
+drm_plane_helper_funcs vmw_ldu_cursor_plane_helper_funcs = {
+	.atomic_check = vmw_du_cursor_plane_atomic_check,
+	.atomic_update = vmw_du_cursor_plane_atomic_update,
+	.prepare_fb = vmw_du_cursor_plane_prepare_fb,
+	.cleanup_fb = vmw_du_plane_cleanup_fb,
+};
+
+static const struct
+drm_plane_helper_funcs vmw_ldu_primary_plane_helper_funcs = {
+	.atomic_check = vmw_du_primary_plane_atomic_check,
+	.atomic_update = vmw_ldu_primary_plane_atomic_update,
+};
+
+static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
+	.mode_set_nofb = vmw_ldu_crtc_mode_set_nofb,
+	.atomic_check = vmw_du_crtc_atomic_check,
+	.atomic_begin = vmw_du_crtc_atomic_begin,
+	.atomic_flush = vmw_du_crtc_atomic_flush,
+	.atomic_enable = vmw_ldu_crtc_atomic_enable,
+	.atomic_disable = vmw_ldu_crtc_atomic_disable,
+};
+
+
+static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
+{
+	struct vmw_legacy_display_unit *ldu;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_plane *primary, *cursor;
+	struct drm_crtc *crtc;
+	int ret;
+
+	ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
+	if (!ldu)
+		return -ENOMEM;
+
+	ldu->base.unit = unit;
+	crtc = &ldu->base.crtc;
+	encoder = &ldu->base.encoder;
+	connector = &ldu->base.connector;
+	primary = &ldu->base.primary;
+	cursor = &ldu->base.cursor;
+
+	INIT_LIST_HEAD(&ldu->active);
+
+	ldu->base.pref_active = (unit == 0);
+	ldu->base.pref_width = dev_priv->initial_width;
+	ldu->base.pref_height = dev_priv->initial_height;
+	ldu->base.pref_mode = NULL;
+
+	/*
+	 * Remove this after enabling atomic because property values can
+	 * only exist in a state object
+	 */
+	ldu->base.is_implicit = true;
+
+	/* Initialize primary plane */
+	vmw_du_plane_reset(primary);
+
+	ret = drm_universal_plane_init(dev, &ldu->base.primary,
+				       0, &vmw_ldu_plane_funcs,
+				       vmw_primary_plane_formats,
+				       ARRAY_SIZE(vmw_primary_plane_formats),
+				       NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize primary plane");
+		goto err_free;
+	}
+
+	drm_plane_helper_add(primary, &vmw_ldu_primary_plane_helper_funcs);
+
+	/* Initialize cursor plane */
+	vmw_du_plane_reset(cursor);
+
+	ret = drm_universal_plane_init(dev, &ldu->base.cursor,
+			0, &vmw_ldu_cursor_funcs,
+			vmw_cursor_plane_formats,
+			ARRAY_SIZE(vmw_cursor_plane_formats),
+			NULL, DRM_PLANE_TYPE_CURSOR, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize cursor plane");
+		drm_plane_cleanup(&ldu->base.primary);
+		goto err_free;
+	}
+
+	drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
+
+	vmw_du_connector_reset(connector);
+	ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+				 DRM_MODE_CONNECTOR_VIRTUAL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize connector\n");
+		goto err_free;
+	}
+
+	drm_connector_helper_add(connector, &vmw_ldu_connector_helper_funcs);
+	connector->status = vmw_du_connector_detect(connector, true);
+
+	ret = drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
+			       DRM_MODE_ENCODER_VIRTUAL, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize encoder\n");
+		goto err_free_connector;
+	}
+
+	(void) drm_connector_attach_encoder(connector, encoder);
+	encoder->possible_crtcs = (1 << unit);
+	encoder->possible_clones = 0;
+
+	ret = drm_connector_register(connector);
+	if (ret) {
+		DRM_ERROR("Failed to register connector\n");
+		goto err_free_encoder;
+	}
+
+	vmw_du_crtc_reset(crtc);
+	ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
+					&ldu->base.cursor,
+					&vmw_legacy_crtc_funcs, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize CRTC\n");
+		goto err_free_unregister;
+	}
+
+	drm_crtc_helper_add(crtc, &vmw_ldu_crtc_helper_funcs);
+
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+
+	drm_object_attach_property(&connector->base,
+				   dev_priv->hotplug_mode_update_property, 1);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_x_property, 0);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_y_property, 0);
+	if (dev_priv->implicit_placement_property)
+		drm_object_attach_property
+			(&connector->base,
+			 dev_priv->implicit_placement_property,
+			 1);
+
+	return 0;
+
+err_free_unregister:
+	drm_connector_unregister(connector);
+err_free_encoder:
+	drm_encoder_cleanup(encoder);
+err_free_connector:
+	drm_connector_cleanup(connector);
+err_free:
+	kfree(ldu);
+	return ret;
+}
+
+int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int i, ret;
+
+	if (dev_priv->ldu_priv) {
+		DRM_INFO("ldu system already on\n");
+		return -EINVAL;
+	}
+
+	dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
+	if (!dev_priv->ldu_priv)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
+	dev_priv->ldu_priv->num_active = 0;
+	dev_priv->ldu_priv->last_num_active = 0;
+	dev_priv->ldu_priv->fb = NULL;
+
+	/* for old hardware without multimon only enable one display */
+	if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
+		ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+	else
+		ret = drm_vblank_init(dev, 1);
+	if (ret != 0)
+		goto err_free;
+
+	vmw_kms_create_implicit_placement_property(dev_priv);
+
+	if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
+		for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
+			vmw_ldu_init(dev_priv, i);
+	else
+		vmw_ldu_init(dev_priv, 0);
+
+	dev_priv->active_display_unit = vmw_du_legacy;
+
+	DRM_INFO("Legacy Display Unit initialized\n");
+
+	return 0;
+
+err_free:
+	kfree(dev_priv->ldu_priv);
+	dev_priv->ldu_priv = NULL;
+	return ret;
+}
+
+int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
+{
+	if (!dev_priv->ldu_priv)
+		return -ENOSYS;
+
+	BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
+
+	kfree(dev_priv->ldu_priv);
+
+	return 0;
+}
+
+
+int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+			    struct vmw_framebuffer *framebuffer,
+			    unsigned int flags, unsigned int color,
+			    struct drm_clip_rect *clips,
+			    unsigned int num_clips, int increment)
+{
+	size_t fifo_size;
+	int i;
+
+	struct {
+		uint32_t header;
+		SVGAFifoCmdUpdate body;
+	} *cmd;
+
+	fifo_size = sizeof(*cmd) * num_clips;
+	cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	memset(cmd, 0, fifo_size);
+	for (i = 0; i < num_clips; i++, clips += increment) {
+		cmd[i].header = SVGA_CMD_UPDATE;
+		cmd[i].body.x = clips->x1;
+		cmd[i].body.y = clips->y1;
+		cmd[i].body.width = clips->x2 - clips->x1;
+		cmd[i].body.height = clips->y2 - clips->y1;
+	}
+
+	vmw_fifo_commit(dev_priv, fifo_size);
+	return 0;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
new file mode 100644
index 0000000..e53bc63
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2010 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "vmwgfx_drv.h"
+
+struct vmw_marker {
+	struct list_head head;
+	uint32_t seqno;
+	u64 submitted;
+};
+
+void vmw_marker_queue_init(struct vmw_marker_queue *queue)
+{
+	INIT_LIST_HEAD(&queue->head);
+	queue->lag = 0;
+	queue->lag_time = ktime_get_raw_ns();
+	spin_lock_init(&queue->lock);
+}
+
+void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
+{
+	struct vmw_marker *marker, *next;
+
+	spin_lock(&queue->lock);
+	list_for_each_entry_safe(marker, next, &queue->head, head) {
+		kfree(marker);
+	}
+	spin_unlock(&queue->lock);
+}
+
+int vmw_marker_push(struct vmw_marker_queue *queue,
+		   uint32_t seqno)
+{
+	struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
+
+	if (unlikely(!marker))
+		return -ENOMEM;
+
+	marker->seqno = seqno;
+	marker->submitted = ktime_get_raw_ns();
+	spin_lock(&queue->lock);
+	list_add_tail(&marker->head, &queue->head);
+	spin_unlock(&queue->lock);
+
+	return 0;
+}
+
+int vmw_marker_pull(struct vmw_marker_queue *queue,
+		   uint32_t signaled_seqno)
+{
+	struct vmw_marker *marker, *next;
+	bool updated = false;
+	u64 now;
+
+	spin_lock(&queue->lock);
+	now = ktime_get_raw_ns();
+
+	if (list_empty(&queue->head)) {
+		queue->lag = 0;
+		queue->lag_time = now;
+		updated = true;
+		goto out_unlock;
+	}
+
+	list_for_each_entry_safe(marker, next, &queue->head, head) {
+		if (signaled_seqno - marker->seqno > (1 << 30))
+			continue;
+
+		queue->lag = now - marker->submitted;
+		queue->lag_time = now;
+		updated = true;
+		list_del(&marker->head);
+		kfree(marker);
+	}
+
+out_unlock:
+	spin_unlock(&queue->lock);
+
+	return (updated) ? 0 : -EBUSY;
+}
+
+static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
+{
+	u64 now;
+
+	spin_lock(&queue->lock);
+	now = ktime_get_raw_ns();
+	queue->lag += now - queue->lag_time;
+	queue->lag_time = now;
+	spin_unlock(&queue->lock);
+	return queue->lag;
+}
+
+
+static bool vmw_lag_lt(struct vmw_marker_queue *queue,
+		       uint32_t us)
+{
+	u64 cond = (u64) us * NSEC_PER_USEC;
+
+	return vmw_fifo_lag(queue) <= cond;
+}
+
+int vmw_wait_lag(struct vmw_private *dev_priv,
+		 struct vmw_marker_queue *queue, uint32_t us)
+{
+	struct vmw_marker *marker;
+	uint32_t seqno;
+	int ret;
+
+	while (!vmw_lag_lt(queue, us)) {
+		spin_lock(&queue->lock);
+		if (list_empty(&queue->head))
+			seqno = atomic_read(&dev_priv->marker_seq);
+		else {
+			marker = list_first_entry(&queue->head,
+						 struct vmw_marker, head);
+			seqno = marker->seqno;
+		}
+		spin_unlock(&queue->lock);
+
+		ret = vmw_wait_seqno(dev_priv, false, seqno, true,
+					3*HZ);
+
+		if (unlikely(ret != 0))
+			return ret;
+
+		(void) vmw_marker_pull(queue, seqno);
+	}
+	return 0;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
new file mode 100644
index 0000000..0a6bbac
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -0,0 +1,705 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <linux/highmem.h>
+
+#include "vmwgfx_drv.h"
+
+/*
+ * If we set up the screen target otable, screen objects stop working.
+ */
+
+#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
+
+#ifdef CONFIG_64BIT
+#define VMW_PPN_SIZE 8
+#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
+#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
+#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
+#else
+#define VMW_PPN_SIZE 4
+#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
+#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
+#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
+#endif
+
+/*
+ * struct vmw_mob - Structure containing page table and metadata for a
+ * Guest Memory OBject.
+ *
+ * @num_pages       Number of pages that make up the page table.
+ * @pt_level        The indirection level of the page table. 0-2.
+ * @pt_root_page    DMA address of the level 0 page of the page table.
+ */
+struct vmw_mob {
+	struct ttm_buffer_object *pt_bo;
+	unsigned long num_pages;
+	unsigned pt_level;
+	dma_addr_t pt_root_page;
+	uint32_t id;
+};
+
+/*
+ * struct vmw_otable - Guest Memory OBject table metadata
+ *
+ * @size:           Size of the table (page-aligned).
+ * @page_table:     Pointer to a struct vmw_mob holding the page table.
+ */
+static const struct vmw_otable pre_dx_tables[] = {
+	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
+};
+
+static const struct vmw_otable dx_tables[] = {
+	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
+	{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
+};
+
+static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
+			       struct vmw_mob *mob);
+static void vmw_mob_pt_setup(struct vmw_mob *mob,
+			     struct vmw_piter data_iter,
+			     unsigned long num_data_pages);
+
+/*
+ * vmw_setup_otable_base - Issue an object table base setup command to
+ * the device
+ *
+ * @dev_priv:       Pointer to a device private structure
+ * @type:           Type of object table base
+ * @offset          Start of table offset into dev_priv::otable_bo
+ * @otable          Pointer to otable metadata;
+ *
+ * This function returns -ENOMEM if it fails to reserve fifo space,
+ * and may block waiting for fifo space.
+ */
+static int vmw_setup_otable_base(struct vmw_private *dev_priv,
+				 SVGAOTableType type,
+				 struct ttm_buffer_object *otable_bo,
+				 unsigned long offset,
+				 struct vmw_otable *otable)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetOTableBase64 body;
+	} *cmd;
+	struct vmw_mob *mob;
+	const struct vmw_sg_table *vsgt;
+	struct vmw_piter iter;
+	int ret;
+
+	BUG_ON(otable->page_table != NULL);
+
+	vsgt = vmw_bo_sg_table(otable_bo);
+	vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
+	WARN_ON(!vmw_piter_next(&iter));
+
+	mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
+	if (unlikely(mob == NULL)) {
+		DRM_ERROR("Failed creating OTable page table.\n");
+		return -ENOMEM;
+	}
+
+	if (otable->size <= PAGE_SIZE) {
+		mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
+		mob->pt_root_page = vmw_piter_dma_addr(&iter);
+	} else if (vsgt->num_regions == 1) {
+		mob->pt_level = SVGA3D_MOBFMT_RANGE;
+		mob->pt_root_page = vmw_piter_dma_addr(&iter);
+	} else {
+		ret = vmw_mob_pt_populate(dev_priv, mob);
+		if (unlikely(ret != 0))
+			goto out_no_populate;
+
+		vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
+		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
+	}
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.type = type;
+	cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
+	cmd->body.sizeInBytes = otable->size;
+	cmd->body.validSizeInBytes = 0;
+	cmd->body.ptDepth = mob->pt_level;
+
+	/*
+	 * The device doesn't support this, But the otable size is
+	 * determined at compile-time, so this BUG shouldn't trigger
+	 * randomly.
+	 */
+	BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	otable->page_table = mob;
+
+	return 0;
+
+out_no_fifo:
+out_no_populate:
+	vmw_mob_destroy(mob);
+	return ret;
+}
+
+/*
+ * vmw_takedown_otable_base - Issue an object table base takedown command
+ * to the device
+ *
+ * @dev_priv:       Pointer to a device private structure
+ * @type:           Type of object table base
+ *
+ */
+static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
+				     SVGAOTableType type,
+				     struct vmw_otable *otable)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetOTableBase body;
+	} *cmd;
+	struct ttm_buffer_object *bo;
+
+	if (otable->page_table == NULL)
+		return;
+
+	bo = otable->page_table->pt_bo;
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.type = type;
+	cmd->body.baseAddress = 0;
+	cmd->body.sizeInBytes = 0;
+	cmd->body.validSizeInBytes = 0;
+	cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	if (bo) {
+		int ret;
+
+		ret = ttm_bo_reserve(bo, false, true, NULL);
+		BUG_ON(ret != 0);
+
+		vmw_bo_fence_single(bo, NULL);
+		ttm_bo_unreserve(bo);
+	}
+
+	vmw_mob_destroy(otable->page_table);
+	otable->page_table = NULL;
+}
+
+
+static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
+				  struct vmw_otable_batch *batch)
+{
+	unsigned long offset;
+	unsigned long bo_size;
+	struct vmw_otable *otables = batch->otables;
+	struct ttm_operation_ctx ctx = {
+		.interruptible = false,
+		.no_wait_gpu = false
+	};
+	SVGAOTableType i;
+	int ret;
+
+	bo_size = 0;
+	for (i = 0; i < batch->num_otables; ++i) {
+		if (!otables[i].enabled)
+			continue;
+
+		otables[i].size =
+			(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
+		bo_size += otables[i].size;
+	}
+
+	ret = ttm_bo_create(&dev_priv->bdev, bo_size,
+			    ttm_bo_type_device,
+			    &vmw_sys_ne_placement,
+			    0, false, &batch->otable_bo);
+
+	if (unlikely(ret != 0))
+		goto out_no_bo;
+
+	ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
+	BUG_ON(ret != 0);
+	ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx);
+	if (unlikely(ret != 0))
+		goto out_unreserve;
+	ret = vmw_bo_map_dma(batch->otable_bo);
+	if (unlikely(ret != 0))
+		goto out_unreserve;
+
+	ttm_bo_unreserve(batch->otable_bo);
+
+	offset = 0;
+	for (i = 0; i < batch->num_otables; ++i) {
+		if (!batch->otables[i].enabled)
+			continue;
+
+		ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
+					    offset,
+					    &otables[i]);
+		if (unlikely(ret != 0))
+			goto out_no_setup;
+		offset += otables[i].size;
+	}
+
+	return 0;
+
+out_unreserve:
+	ttm_bo_unreserve(batch->otable_bo);
+out_no_setup:
+	for (i = 0; i < batch->num_otables; ++i) {
+		if (batch->otables[i].enabled)
+			vmw_takedown_otable_base(dev_priv, i,
+						 &batch->otables[i]);
+	}
+
+	ttm_bo_put(batch->otable_bo);
+	batch->otable_bo = NULL;
+out_no_bo:
+	return ret;
+}
+
+/*
+ * vmw_otables_setup - Set up guest backed memory object tables
+ *
+ * @dev_priv:       Pointer to a device private structure
+ *
+ * Takes care of the device guest backed surface
+ * initialization, by setting up the guest backed memory object tables.
+ * Returns 0 on success and various error codes on failure. A successful return
+ * means the object tables can be taken down using the vmw_otables_takedown
+ * function.
+ */
+int vmw_otables_setup(struct vmw_private *dev_priv)
+{
+	struct vmw_otable **otables = &dev_priv->otable_batch.otables;
+	int ret;
+
+	if (dev_priv->has_dx) {
+		*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
+		if (!(*otables))
+			return -ENOMEM;
+
+		dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
+	} else {
+		*otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
+				   GFP_KERNEL);
+		if (!(*otables))
+			return -ENOMEM;
+
+		dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
+	}
+
+	ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
+	if (unlikely(ret != 0))
+		goto out_setup;
+
+	return 0;
+
+out_setup:
+	kfree(*otables);
+	return ret;
+}
+
+static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
+			       struct vmw_otable_batch *batch)
+{
+	SVGAOTableType i;
+	struct ttm_buffer_object *bo = batch->otable_bo;
+	int ret;
+
+	for (i = 0; i < batch->num_otables; ++i)
+		if (batch->otables[i].enabled)
+			vmw_takedown_otable_base(dev_priv, i,
+						 &batch->otables[i]);
+
+	ret = ttm_bo_reserve(bo, false, true, NULL);
+	BUG_ON(ret != 0);
+
+	vmw_bo_fence_single(bo, NULL);
+	ttm_bo_unreserve(bo);
+
+	ttm_bo_put(batch->otable_bo);
+	batch->otable_bo = NULL;
+}
+
+/*
+ * vmw_otables_takedown - Take down guest backed memory object tables
+ *
+ * @dev_priv:       Pointer to a device private structure
+ *
+ * Take down the Guest Memory Object tables.
+ */
+void vmw_otables_takedown(struct vmw_private *dev_priv)
+{
+	vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
+	kfree(dev_priv->otable_batch.otables);
+}
+
+/*
+ * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
+ * needed for a guest backed memory object.
+ *
+ * @data_pages:  Number of data pages in the memory object buffer.
+ */
+static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
+{
+	unsigned long data_size = data_pages * PAGE_SIZE;
+	unsigned long tot_size = 0;
+
+	while (likely(data_size > PAGE_SIZE)) {
+		data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
+		data_size *= VMW_PPN_SIZE;
+		tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
+	}
+
+	return tot_size >> PAGE_SHIFT;
+}
+
+/*
+ * vmw_mob_create - Create a mob, but don't populate it.
+ *
+ * @data_pages:  Number of data pages of the underlying buffer object.
+ */
+struct vmw_mob *vmw_mob_create(unsigned long data_pages)
+{
+	struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
+
+	if (unlikely(!mob))
+		return NULL;
+
+	mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
+
+	return mob;
+}
+
+/*
+ * vmw_mob_pt_populate - Populate the mob pagetable
+ *
+ * @mob:         Pointer to the mob the pagetable of which we want to
+ *               populate.
+ *
+ * This function allocates memory to be used for the pagetable, and
+ * adjusts TTM memory accounting accordingly. Returns ENOMEM if
+ * memory resources aren't sufficient and may cause TTM buffer objects
+ * to be swapped out by using the TTM memory accounting function.
+ */
+static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
+			       struct vmw_mob *mob)
+{
+	int ret;
+	struct ttm_operation_ctx ctx = {
+		.interruptible = false,
+		.no_wait_gpu = false
+	};
+
+	BUG_ON(mob->pt_bo != NULL);
+
+	ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
+			    ttm_bo_type_device,
+			    &vmw_sys_ne_placement,
+			    0, false, &mob->pt_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
+
+	BUG_ON(ret != 0);
+	ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx);
+	if (unlikely(ret != 0))
+		goto out_unreserve;
+	ret = vmw_bo_map_dma(mob->pt_bo);
+	if (unlikely(ret != 0))
+		goto out_unreserve;
+
+	ttm_bo_unreserve(mob->pt_bo);
+
+	return 0;
+
+out_unreserve:
+	ttm_bo_unreserve(mob->pt_bo);
+	ttm_bo_put(mob->pt_bo);
+	mob->pt_bo = NULL;
+
+	return ret;
+}
+
+/**
+ * vmw_mob_assign_ppn - Assign a value to a page table entry
+ *
+ * @addr: Pointer to pointer to page table entry.
+ * @val: The page table entry
+ *
+ * Assigns a value to a page table entry pointed to by *@addr and increments
+ * *@addr according to the page table entry size.
+ */
+#if (VMW_PPN_SIZE == 8)
+static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
+{
+	*((u64 *) *addr) = val >> PAGE_SHIFT;
+	*addr += 2;
+}
+#else
+static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
+{
+	*(*addr)++ = val >> PAGE_SHIFT;
+}
+#endif
+
+/*
+ * vmw_mob_build_pt - Build a pagetable
+ *
+ * @data_addr:      Array of DMA addresses to the underlying buffer
+ *                  object's data pages.
+ * @num_data_pages: Number of buffer object data pages.
+ * @pt_pages:       Array of page pointers to the page table pages.
+ *
+ * Returns the number of page table pages actually used.
+ * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
+ */
+static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
+				      unsigned long num_data_pages,
+				      struct vmw_piter *pt_iter)
+{
+	unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
+	unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
+	unsigned long pt_page;
+	u32 *addr, *save_addr;
+	unsigned long i;
+	struct page *page;
+
+	for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
+		page = vmw_piter_page(pt_iter);
+
+		save_addr = addr = kmap_atomic(page);
+
+		for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
+			vmw_mob_assign_ppn(&addr,
+					   vmw_piter_dma_addr(data_iter));
+			if (unlikely(--num_data_pages == 0))
+				break;
+			WARN_ON(!vmw_piter_next(data_iter));
+		}
+		kunmap_atomic(save_addr);
+		vmw_piter_next(pt_iter);
+	}
+
+	return num_pt_pages;
+}
+
+/*
+ * vmw_mob_build_pt - Set up a multilevel mob pagetable
+ *
+ * @mob:            Pointer to a mob whose page table needs setting up.
+ * @data_addr       Array of DMA addresses to the buffer object's data
+ *                  pages.
+ * @num_data_pages: Number of buffer object data pages.
+ *
+ * Uses tail recursion to set up a multilevel mob page table.
+ */
+static void vmw_mob_pt_setup(struct vmw_mob *mob,
+			     struct vmw_piter data_iter,
+			     unsigned long num_data_pages)
+{
+	unsigned long num_pt_pages = 0;
+	struct ttm_buffer_object *bo = mob->pt_bo;
+	struct vmw_piter save_pt_iter;
+	struct vmw_piter pt_iter;
+	const struct vmw_sg_table *vsgt;
+	int ret;
+
+	ret = ttm_bo_reserve(bo, false, true, NULL);
+	BUG_ON(ret != 0);
+
+	vsgt = vmw_bo_sg_table(bo);
+	vmw_piter_start(&pt_iter, vsgt, 0);
+	BUG_ON(!vmw_piter_next(&pt_iter));
+	mob->pt_level = 0;
+	while (likely(num_data_pages > 1)) {
+		++mob->pt_level;
+		BUG_ON(mob->pt_level > 2);
+		save_pt_iter = pt_iter;
+		num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
+						&pt_iter);
+		data_iter = save_pt_iter;
+		num_data_pages = num_pt_pages;
+	}
+
+	mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
+	ttm_bo_unreserve(bo);
+}
+
+/*
+ * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
+ *
+ * @mob:            Pointer to a mob to destroy.
+ */
+void vmw_mob_destroy(struct vmw_mob *mob)
+{
+	if (mob->pt_bo) {
+		ttm_bo_put(mob->pt_bo);
+		mob->pt_bo = NULL;
+	}
+	kfree(mob);
+}
+
+/*
+ * vmw_mob_unbind - Hide a mob from the device.
+ *
+ * @dev_priv:       Pointer to a device private.
+ * @mob_id:         Device id of the mob to unbind.
+ */
+void vmw_mob_unbind(struct vmw_private *dev_priv,
+		    struct vmw_mob *mob)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyGBMob body;
+	} *cmd;
+	int ret;
+	struct ttm_buffer_object *bo = mob->pt_bo;
+
+	if (bo) {
+		ret = ttm_bo_reserve(bo, false, true, NULL);
+		/*
+		 * Noone else should be using this buffer.
+		 */
+		BUG_ON(ret != 0);
+	}
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (cmd) {
+		cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
+		cmd->header.size = sizeof(cmd->body);
+		cmd->body.mobid = mob->id;
+		vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	}
+
+	if (bo) {
+		vmw_bo_fence_single(bo, NULL);
+		ttm_bo_unreserve(bo);
+	}
+	vmw_fifo_resource_dec(dev_priv);
+}
+
+/*
+ * vmw_mob_bind - Make a mob visible to the device after first
+ *                populating it if necessary.
+ *
+ * @dev_priv:       Pointer to a device private.
+ * @mob:            Pointer to the mob we're making visible.
+ * @data_addr:      Array of DMA addresses to the data pages of the underlying
+ *                  buffer object.
+ * @num_data_pages: Number of data pages of the underlying buffer
+ *                  object.
+ * @mob_id:         Device id of the mob to bind
+ *
+ * This function is intended to be interfaced with the ttm_tt backend
+ * code.
+ */
+int vmw_mob_bind(struct vmw_private *dev_priv,
+		 struct vmw_mob *mob,
+		 const struct vmw_sg_table *vsgt,
+		 unsigned long num_data_pages,
+		 int32_t mob_id)
+{
+	int ret;
+	bool pt_set_up = false;
+	struct vmw_piter data_iter;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBMob64 body;
+	} *cmd;
+
+	mob->id = mob_id;
+	vmw_piter_start(&data_iter, vsgt, 0);
+	if (unlikely(!vmw_piter_next(&data_iter)))
+		return 0;
+
+	if (likely(num_data_pages == 1)) {
+		mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
+		mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
+	} else if (vsgt->num_regions == 1) {
+		mob->pt_level = SVGA3D_MOBFMT_RANGE;
+		mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
+	} else if (unlikely(mob->pt_bo == NULL)) {
+		ret = vmw_mob_pt_populate(dev_priv, mob);
+		if (unlikely(ret != 0))
+			return ret;
+
+		vmw_mob_pt_setup(mob, data_iter, num_data_pages);
+		pt_set_up = true;
+		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
+	}
+
+	vmw_fifo_resource_inc(dev_priv);
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		goto out_no_cmd_space;
+
+	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.mobid = mob_id;
+	cmd->body.ptDepth = mob->pt_level;
+	cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
+	cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+
+out_no_cmd_space:
+	vmw_fifo_resource_dec(dev_priv);
+	if (pt_set_up) {
+		ttm_bo_put(mob->pt_bo);
+		mob->pt_bo = NULL;
+	}
+
+	return -ENOMEM;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
new file mode 100644
index 0000000..b6c5e4c
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/frame.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <asm/hypervisor.h>
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_msg.h"
+
+#define MESSAGE_STATUS_SUCCESS  0x0001
+#define MESSAGE_STATUS_DORECV   0x0002
+#define MESSAGE_STATUS_CPT      0x0010
+#define MESSAGE_STATUS_HB       0x0080
+
+#define RPCI_PROTOCOL_NUM       0x49435052
+#define GUESTMSG_FLAG_COOKIE    0x80000000
+
+#define RETRIES                 3
+
+#define VMW_HYPERVISOR_MAGIC    0x564D5868
+
+#define VMW_PORT_CMD_MSG        30
+#define VMW_PORT_CMD_HB_MSG     0
+#define VMW_PORT_CMD_OPEN_CHANNEL  (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG)
+#define VMW_PORT_CMD_CLOSE_CHANNEL (MSG_TYPE_CLOSE << 16 | VMW_PORT_CMD_MSG)
+#define VMW_PORT_CMD_SENDSIZE   (MSG_TYPE_SENDSIZE << 16 | VMW_PORT_CMD_MSG)
+#define VMW_PORT_CMD_RECVSIZE   (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG)
+#define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG)
+
+#define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
+
+static u32 vmw_msg_enabled = 1;
+
+enum rpc_msg_type {
+	MSG_TYPE_OPEN,
+	MSG_TYPE_SENDSIZE,
+	MSG_TYPE_SENDPAYLOAD,
+	MSG_TYPE_RECVSIZE,
+	MSG_TYPE_RECVPAYLOAD,
+	MSG_TYPE_RECVSTATUS,
+	MSG_TYPE_CLOSE,
+};
+
+struct rpc_channel {
+	u16 channel_id;
+	u32 cookie_high;
+	u32 cookie_low;
+};
+
+
+
+/**
+ * vmw_open_channel
+ *
+ * @channel: RPC channel
+ * @protocol:
+ *
+ * Returns: 0 on success
+ */
+static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
+{
+	unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
+
+	VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL,
+		(protocol | GUESTMSG_FLAG_COOKIE), si, di,
+		0,
+		VMW_HYPERVISOR_MAGIC,
+		eax, ebx, ecx, edx, si, di);
+
+	if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
+		return -EINVAL;
+
+	channel->channel_id  = HIGH_WORD(edx);
+	channel->cookie_high = si;
+	channel->cookie_low  = di;
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_close_channel
+ *
+ * @channel: RPC channel
+ *
+ * Returns: 0 on success
+ */
+static int vmw_close_channel(struct rpc_channel *channel)
+{
+	unsigned long eax, ebx, ecx, edx, si, di;
+
+	/* Set up additional parameters */
+	si  = channel->cookie_high;
+	di  = channel->cookie_low;
+
+	VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL,
+		0, si, di,
+		channel->channel_id << 16,
+		VMW_HYPERVISOR_MAGIC,
+		eax, ebx, ecx, edx, si, di);
+
+	if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * vmw_port_hb_out - Send the message payload either through the
+ * high-bandwidth port if available, or through the backdoor otherwise.
+ * @channel: The rpc channel.
+ * @msg: NULL-terminated message.
+ * @hb: Whether the high-bandwidth port is available.
+ *
+ * Return: The port status.
+ */
+static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
+				     const char *msg, bool hb)
+{
+	unsigned long si, di, eax, ebx, ecx, edx;
+	unsigned long msg_len = strlen(msg);
+
+	if (hb) {
+		unsigned long bp = channel->cookie_high;
+
+		si = (uintptr_t) msg;
+		di = channel->cookie_low;
+
+		VMW_PORT_HB_OUT(
+			(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
+			msg_len, si, di,
+			VMWARE_HYPERVISOR_HB | (channel->channel_id << 16) |
+			VMWARE_HYPERVISOR_OUT,
+			VMW_HYPERVISOR_MAGIC, bp,
+			eax, ebx, ecx, edx, si, di);
+
+		return ebx;
+	}
+
+	/* HB port not available. Send the message 4 bytes at a time. */
+	ecx = MESSAGE_STATUS_SUCCESS << 16;
+	while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) {
+		unsigned int bytes = min_t(size_t, msg_len, 4);
+		unsigned long word = 0;
+
+		memcpy(&word, msg, bytes);
+		msg_len -= bytes;
+		msg += bytes;
+		si = channel->cookie_high;
+		di = channel->cookie_low;
+
+		VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16),
+			 word, si, di,
+			 channel->channel_id << 16,
+			 VMW_HYPERVISOR_MAGIC,
+			 eax, ebx, ecx, edx, si, di);
+	}
+
+	return ecx;
+}
+
+/**
+ * vmw_port_hb_in - Receive the message payload either through the
+ * high-bandwidth port if available, or through the backdoor otherwise.
+ * @channel: The rpc channel.
+ * @reply: Pointer to buffer holding reply.
+ * @reply_len: Length of the reply.
+ * @hb: Whether the high-bandwidth port is available.
+ *
+ * Return: The port status.
+ */
+static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
+				    unsigned long reply_len, bool hb)
+{
+	unsigned long si, di, eax, ebx, ecx, edx;
+
+	if (hb) {
+		unsigned long bp = channel->cookie_low;
+
+		si = channel->cookie_high;
+		di = (uintptr_t) reply;
+
+		VMW_PORT_HB_IN(
+			(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
+			reply_len, si, di,
+			VMWARE_HYPERVISOR_HB | (channel->channel_id << 16),
+			VMW_HYPERVISOR_MAGIC, bp,
+			eax, ebx, ecx, edx, si, di);
+
+		return ebx;
+	}
+
+	/* HB port not available. Retrieve the message 4 bytes at a time. */
+	ecx = MESSAGE_STATUS_SUCCESS << 16;
+	while (reply_len) {
+		unsigned int bytes = min_t(unsigned long, reply_len, 4);
+
+		si = channel->cookie_high;
+		di = channel->cookie_low;
+
+		VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16),
+			 MESSAGE_STATUS_SUCCESS, si, di,
+			 channel->channel_id << 16,
+			 VMW_HYPERVISOR_MAGIC,
+			 eax, ebx, ecx, edx, si, di);
+
+		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
+			break;
+
+		memcpy(reply, &ebx, bytes);
+		reply_len -= bytes;
+		reply += bytes;
+	}
+
+	return ecx;
+}
+
+
+/**
+ * vmw_send_msg: Sends a message to the host
+ *
+ * @channel: RPC channel
+ * @logmsg: NULL terminated string
+ *
+ * Returns: 0 on success
+ */
+static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
+{
+	unsigned long eax, ebx, ecx, edx, si, di;
+	size_t msg_len = strlen(msg);
+	int retries = 0;
+
+	while (retries < RETRIES) {
+		retries++;
+
+		/* Set up additional parameters */
+		si  = channel->cookie_high;
+		di  = channel->cookie_low;
+
+		VMW_PORT(VMW_PORT_CMD_SENDSIZE,
+			msg_len, si, di,
+			channel->channel_id << 16,
+			VMW_HYPERVISOR_MAGIC,
+			eax, ebx, ecx, edx, si, di);
+
+		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
+			/* Expected success. Give up. */
+			return -EINVAL;
+		}
+
+		/* Send msg */
+		ebx = vmw_port_hb_out(channel, msg,
+				      !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
+
+		if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
+			return 0;
+		} else if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
+			/* A checkpoint occurred. Retry. */
+			continue;
+		} else {
+			break;
+		}
+	}
+
+	return -EINVAL;
+}
+STACK_FRAME_NON_STANDARD(vmw_send_msg);
+
+
+/**
+ * vmw_recv_msg: Receives a message from the host
+ *
+ * Note:  It is the caller's responsibility to call kfree() on msg.
+ *
+ * @channel:  channel opened by vmw_open_channel
+ * @msg:  [OUT] message received from the host
+ * @msg_len: message length
+ */
+static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
+			size_t *msg_len)
+{
+	unsigned long eax, ebx, ecx, edx, si, di;
+	char *reply;
+	size_t reply_len;
+	int retries = 0;
+
+
+	*msg_len = 0;
+	*msg = NULL;
+
+	while (retries < RETRIES) {
+		retries++;
+
+		/* Set up additional parameters */
+		si  = channel->cookie_high;
+		di  = channel->cookie_low;
+
+		VMW_PORT(VMW_PORT_CMD_RECVSIZE,
+			0, si, di,
+			channel->channel_id << 16,
+			VMW_HYPERVISOR_MAGIC,
+			eax, ebx, ecx, edx, si, di);
+
+		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
+			DRM_ERROR("Failed to get reply size for host message.\n");
+			return -EINVAL;
+		}
+
+		/* No reply available.  This is okay. */
+		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_DORECV) == 0)
+			return 0;
+
+		reply_len = ebx;
+		reply     = kzalloc(reply_len + 1, GFP_KERNEL);
+		if (!reply) {
+			DRM_ERROR("Cannot allocate memory for host message reply.\n");
+			return -ENOMEM;
+		}
+
+
+		/* Receive buffer */
+		ebx = vmw_port_hb_in(channel, reply, reply_len,
+				     !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
+		if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
+			kfree(reply);
+			reply = NULL;
+			if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
+				/* A checkpoint occurred. Retry. */
+				continue;
+			}
+
+			return -EINVAL;
+		}
+
+		reply[reply_len] = '\0';
+
+
+		/* Ack buffer */
+		si  = channel->cookie_high;
+		di  = channel->cookie_low;
+
+		VMW_PORT(VMW_PORT_CMD_RECVSTATUS,
+			MESSAGE_STATUS_SUCCESS, si, di,
+			channel->channel_id << 16,
+			VMW_HYPERVISOR_MAGIC,
+			eax, ebx, ecx, edx, si, di);
+
+		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
+			kfree(reply);
+			reply = NULL;
+			if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
+				/* A checkpoint occurred. Retry. */
+				continue;
+			}
+
+			return -EINVAL;
+		}
+
+		break;
+	}
+
+	if (!reply)
+		return -EINVAL;
+
+	*msg_len = reply_len;
+	*msg     = reply;
+
+	return 0;
+}
+STACK_FRAME_NON_STANDARD(vmw_recv_msg);
+
+
+/**
+ * vmw_host_get_guestinfo: Gets a GuestInfo parameter
+ *
+ * Gets the value of a  GuestInfo.* parameter.  The value returned will be in
+ * a string, and it is up to the caller to post-process.
+ *
+ * @guest_info_param:  Parameter to get, e.g. GuestInfo.svga.gl3
+ * @buffer: if NULL, *reply_len will contain reply size.
+ * @length: size of the reply_buf.  Set to size of reply upon return
+ *
+ * Returns: 0 on success
+ */
+int vmw_host_get_guestinfo(const char *guest_info_param,
+			   char *buffer, size_t *length)
+{
+	struct rpc_channel channel;
+	char *msg, *reply = NULL;
+	size_t reply_len = 0;
+
+	if (!vmw_msg_enabled)
+		return -ENODEV;
+
+	if (!guest_info_param || !length)
+		return -EINVAL;
+
+	msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
+	if (!msg) {
+		DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
+			  guest_info_param);
+		return -ENOMEM;
+	}
+
+	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
+		goto out_open;
+
+	if (vmw_send_msg(&channel, msg) ||
+	    vmw_recv_msg(&channel, (void *) &reply, &reply_len))
+		goto out_msg;
+
+	vmw_close_channel(&channel);
+	if (buffer && reply && reply_len > 0) {
+		/* Remove reply code, which are the first 2 characters of
+		 * the reply
+		 */
+		reply_len = max(reply_len - 2, (size_t) 0);
+		reply_len = min(reply_len, *length);
+
+		if (reply_len > 0)
+			memcpy(buffer, reply + 2, reply_len);
+	}
+
+	*length = reply_len;
+
+	kfree(reply);
+	kfree(msg);
+
+	return 0;
+
+out_msg:
+	vmw_close_channel(&channel);
+	kfree(reply);
+out_open:
+	*length = 0;
+	kfree(msg);
+	DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
+
+	return -EINVAL;
+}
+
+
+
+/**
+ * vmw_host_log: Sends a log message to the host
+ *
+ * @log: NULL terminated string
+ *
+ * Returns: 0 on success
+ */
+int vmw_host_log(const char *log)
+{
+	struct rpc_channel channel;
+	char *msg;
+	int ret = 0;
+
+
+	if (!vmw_msg_enabled)
+		return -ENODEV;
+
+	if (!log)
+		return ret;
+
+	msg = kasprintf(GFP_KERNEL, "log %s", log);
+	if (!msg) {
+		DRM_ERROR("Cannot allocate memory for host log message.\n");
+		return -ENOMEM;
+	}
+
+	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
+		goto out_open;
+
+	if (vmw_send_msg(&channel, msg))
+		goto out_msg;
+
+	vmw_close_channel(&channel);
+	kfree(msg);
+
+	return 0;
+
+out_msg:
+	vmw_close_channel(&channel);
+out_open:
+	kfree(msg);
+	DRM_ERROR("Failed to send host log message.\n");
+
+	return -EINVAL;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
new file mode 100644
index 0000000..f685c70
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************
+ *
+ * Based on code from vmware.c and vmmouse.c.
+ * Author:
+ *   Sinclair Yeh <syeh@vmware.com>
+ */
+#ifndef _VMWGFX_MSG_H
+#define _VMWGFX_MSG_H
+
+#include <asm/vmware.h>
+
+/**
+ * Hypervisor-specific bi-directional communication channel.  Should never
+ * execute on bare metal hardware.  The caller must make sure to check for
+ * supported hypervisor before using these macros.
+ *
+ * The last two parameters are both input and output and must be initialized.
+ *
+ * @cmd: [IN] Message Cmd
+ * @in_ebx: [IN] Message Len, through EBX
+ * @in_si: [IN] Input argument through SI, set to 0 if not used
+ * @in_di: [IN] Input argument through DI, set ot 0 if not used
+ * @flags: [IN] hypercall flags + [channel id]
+ * @magic: [IN] hypervisor magic value
+ * @eax: [OUT] value of EAX register
+ * @ebx: [OUT] e.g. status from an HB message status command
+ * @ecx: [OUT] e.g. status from a non-HB message status command
+ * @edx: [OUT] e.g. channel id
+ * @si:  [OUT]
+ * @di:  [OUT]
+ */
+#define VMW_PORT(cmd, in_ebx, in_si, in_di,	\
+		 flags, magic,		\
+		 eax, ebx, ecx, edx, si, di)	\
+({						\
+	asm volatile (VMWARE_HYPERCALL :	\
+		"=a"(eax),			\
+		"=b"(ebx),			\
+		"=c"(ecx),			\
+		"=d"(edx),			\
+		"=S"(si),			\
+		"=D"(di) :			\
+		"a"(magic),			\
+		"b"(in_ebx),			\
+		"c"(cmd),			\
+		"d"(flags),			\
+		"S"(in_si),			\
+		"D"(in_di) :			\
+		"memory");			\
+})
+
+
+/**
+ * Hypervisor-specific bi-directional communication channel.  Should never
+ * execute on bare metal hardware.  The caller must make sure to check for
+ * supported hypervisor before using these macros.
+ *
+ * The last 3 parameters are both input and output and must be initialized.
+ *
+ * @cmd: [IN] Message Cmd
+ * @in_ecx: [IN] Message Len, through ECX
+ * @in_si: [IN] Input argument through SI, set to 0 if not used
+ * @in_di: [IN] Input argument through DI, set to 0 if not used
+ * @flags: [IN] hypercall flags + [channel id]
+ * @magic: [IN] hypervisor magic value
+ * @bp:  [IN]
+ * @eax: [OUT] value of EAX register
+ * @ebx: [OUT] e.g. status from an HB message status command
+ * @ecx: [OUT] e.g. status from a non-HB message status command
+ * @edx: [OUT] e.g. channel id
+ * @si:  [OUT]
+ * @di:  [OUT]
+ */
+#ifdef __x86_64__
+
+#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di,	\
+			flags, magic, bp,		\
+			eax, ebx, ecx, edx, si, di)	\
+({							\
+	asm volatile ("push %%rbp;"			\
+		"mov %12, %%rbp;"			\
+		VMWARE_HYPERCALL_HB_OUT			\
+		"pop %%rbp;" :				\
+		"=a"(eax),				\
+		"=b"(ebx),				\
+		"=c"(ecx),				\
+		"=d"(edx),				\
+		"=S"(si),				\
+		"=D"(di) :				\
+		"a"(magic),				\
+		"b"(cmd),				\
+		"c"(in_ecx),				\
+		"d"(flags),				\
+		"S"(in_si),				\
+		"D"(in_di),				\
+		"r"(bp) :				\
+		"memory", "cc");			\
+})
+
+
+#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di,	\
+		       flags, magic, bp,		\
+		       eax, ebx, ecx, edx, si, di)	\
+({							\
+	asm volatile ("push %%rbp;"			\
+		"mov %12, %%rbp;"			\
+		VMWARE_HYPERCALL_HB_IN			\
+		"pop %%rbp" :				\
+		"=a"(eax),				\
+		"=b"(ebx),				\
+		"=c"(ecx),				\
+		"=d"(edx),				\
+		"=S"(si),				\
+		"=D"(di) :				\
+		"a"(magic),				\
+		"b"(cmd),				\
+		"c"(in_ecx),				\
+		"d"(flags),				\
+		"S"(in_si),				\
+		"D"(in_di),				\
+		"r"(bp) :				\
+		"memory", "cc");			\
+})
+
+#else
+
+/*
+ * In the 32-bit version of this macro, we store bp in a memory location
+ * because we've ran out of registers.
+ * Now we can't reference that memory location while we've modified
+ * %esp or %ebp, so we first push it on the stack, just before we push
+ * %ebp, and then when we need it we read it from the stack where we
+ * just pushed it.
+ */
+#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di,	\
+			flags, magic, bp,		\
+			eax, ebx, ecx, edx, si, di)	\
+({							\
+	asm volatile ("push %12;"			\
+		"push %%ebp;"				\
+		"mov 0x04(%%esp), %%ebp;"		\
+		VMWARE_HYPERCALL_HB_OUT			\
+		"pop %%ebp;"				\
+		"add $0x04, %%esp;" :			\
+		"=a"(eax),				\
+		"=b"(ebx),				\
+		"=c"(ecx),				\
+		"=d"(edx),				\
+		"=S"(si),				\
+		"=D"(di) :				\
+		"a"(magic),				\
+		"b"(cmd),				\
+		"c"(in_ecx),				\
+		"d"(flags),				\
+		"S"(in_si),				\
+		"D"(in_di),				\
+		"m"(bp) :				\
+		"memory", "cc");			\
+})
+
+
+#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di,	\
+		       flags, magic, bp,		\
+		       eax, ebx, ecx, edx, si, di)	\
+({							\
+	asm volatile ("push %12;"			\
+		"push %%ebp;"				\
+		"mov 0x04(%%esp), %%ebp;"		\
+		VMWARE_HYPERCALL_HB_IN			\
+		"pop %%ebp;"				\
+		"add $0x04, %%esp;" :			\
+		"=a"(eax),				\
+		"=b"(ebx),				\
+		"=c"(ecx),				\
+		"=d"(edx),				\
+		"=S"(si),				\
+		"=D"(di) :				\
+		"a"(magic),				\
+		"b"(cmd),				\
+		"c"(in_ecx),				\
+		"d"(flags),				\
+		"S"(in_si),				\
+		"D"(in_di),				\
+		"m"(bp) :				\
+		"memory", "cc");			\
+})
+#endif /* #if __x86_64__ */
+
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
new file mode 100644
index 0000000..395614f
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include "device_include/svga_overlay.h"
+#include "device_include/svga_escape.h"
+
+#include "vmwgfx_drv.h"
+
+#define VMW_MAX_NUM_STREAMS 1
+#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
+
+struct vmw_stream {
+	struct vmw_buffer_object *buf;
+	bool claimed;
+	bool paused;
+	struct drm_vmw_control_stream_arg saved;
+};
+
+/**
+ * Overlay control
+ */
+struct vmw_overlay {
+	/*
+	 * Each stream is a single overlay. In Xv these are called ports.
+	 */
+	struct mutex mutex;
+	struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
+};
+
+static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	return dev_priv ? dev_priv->overlay_priv : NULL;
+}
+
+struct vmw_escape_header {
+	uint32_t cmd;
+	SVGAFifoCmdEscape body;
+};
+
+struct vmw_escape_video_flush {
+	struct vmw_escape_header escape;
+	SVGAEscapeVideoFlush flush;
+};
+
+static inline void fill_escape(struct vmw_escape_header *header,
+			       uint32_t size)
+{
+	header->cmd = SVGA_CMD_ESCAPE;
+	header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
+	header->body.size = size;
+}
+
+static inline void fill_flush(struct vmw_escape_video_flush *cmd,
+			      uint32_t stream_id)
+{
+	fill_escape(&cmd->escape, sizeof(cmd->flush));
+	cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
+	cmd->flush.streamId = stream_id;
+}
+
+/**
+ * Send put command to hw.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_overlay_send_put(struct vmw_private *dev_priv,
+				struct vmw_buffer_object *buf,
+				struct drm_vmw_control_stream_arg *arg,
+				bool interruptible)
+{
+	struct vmw_escape_video_flush *flush;
+	size_t fifo_size;
+	bool have_so = (dev_priv->active_display_unit != vmw_du_legacy);
+	int i, num_items;
+	SVGAGuestPtr ptr;
+
+	struct {
+		struct vmw_escape_header escape;
+		struct {
+			uint32_t cmdType;
+			uint32_t streamId;
+		} header;
+	} *cmds;
+	struct {
+		uint32_t registerId;
+		uint32_t value;
+	} *items;
+
+	/* defines are a index needs + 1 */
+	if (have_so)
+		num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
+	else
+		num_items = SVGA_VIDEO_PITCH_3 + 1;
+
+	fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
+
+	cmds = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+	/* hardware has hung, can't do anything here */
+	if (!cmds)
+		return -ENOMEM;
+
+	items = (typeof(items))&cmds[1];
+	flush = (struct vmw_escape_video_flush *)&items[num_items];
+
+	/* the size is header + number of items */
+	fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
+
+	cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+	cmds->header.streamId = arg->stream_id;
+
+	/* the IDs are neatly numbered */
+	for (i = 0; i < num_items; i++)
+		items[i].registerId = i;
+
+	vmw_bo_get_guest_ptr(&buf->base, &ptr);
+	ptr.offset += arg->offset;
+
+	items[SVGA_VIDEO_ENABLED].value     = true;
+	items[SVGA_VIDEO_FLAGS].value       = arg->flags;
+	items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
+	items[SVGA_VIDEO_FORMAT].value      = arg->format;
+	items[SVGA_VIDEO_COLORKEY].value    = arg->color_key;
+	items[SVGA_VIDEO_SIZE].value        = arg->size;
+	items[SVGA_VIDEO_WIDTH].value       = arg->width;
+	items[SVGA_VIDEO_HEIGHT].value      = arg->height;
+	items[SVGA_VIDEO_SRC_X].value       = arg->src.x;
+	items[SVGA_VIDEO_SRC_Y].value       = arg->src.y;
+	items[SVGA_VIDEO_SRC_WIDTH].value   = arg->src.w;
+	items[SVGA_VIDEO_SRC_HEIGHT].value  = arg->src.h;
+	items[SVGA_VIDEO_DST_X].value       = arg->dst.x;
+	items[SVGA_VIDEO_DST_Y].value       = arg->dst.y;
+	items[SVGA_VIDEO_DST_WIDTH].value   = arg->dst.w;
+	items[SVGA_VIDEO_DST_HEIGHT].value  = arg->dst.h;
+	items[SVGA_VIDEO_PITCH_1].value     = arg->pitch[0];
+	items[SVGA_VIDEO_PITCH_2].value     = arg->pitch[1];
+	items[SVGA_VIDEO_PITCH_3].value     = arg->pitch[2];
+	if (have_so) {
+		items[SVGA_VIDEO_DATA_GMRID].value    = ptr.gmrId;
+		items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
+	}
+
+	fill_flush(flush, arg->stream_id);
+
+	vmw_fifo_commit(dev_priv, fifo_size);
+
+	return 0;
+}
+
+/**
+ * Send stop command to hw.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
+				 uint32_t stream_id,
+				 bool interruptible)
+{
+	struct {
+		struct vmw_escape_header escape;
+		SVGAEscapeVideoSetRegs body;
+		struct vmw_escape_video_flush flush;
+	} *cmds;
+	int ret;
+
+	for (;;) {
+		cmds = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmds));
+		if (cmds)
+			break;
+
+		ret = vmw_fallback_wait(dev_priv, false, true, 0,
+					interruptible, 3*HZ);
+		if (interruptible && ret == -ERESTARTSYS)
+			return ret;
+		else
+			BUG_ON(ret != 0);
+	}
+
+	fill_escape(&cmds->escape, sizeof(cmds->body));
+	cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+	cmds->body.header.streamId = stream_id;
+	cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
+	cmds->body.items[0].value = false;
+	fill_flush(&cmds->flush, stream_id);
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmds));
+
+	return 0;
+}
+
+/**
+ * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
+ *
+ * With the introduction of screen objects buffers could now be
+ * used with GMRs instead of being locked to vram.
+ */
+static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
+				   struct vmw_buffer_object *buf,
+				   bool pin, bool inter)
+{
+	if (!pin)
+		return vmw_bo_unpin(dev_priv, buf, inter);
+
+	if (dev_priv->active_display_unit == vmw_du_legacy)
+		return vmw_bo_pin_in_vram(dev_priv, buf, inter);
+
+	return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
+}
+
+/**
+ * Stop or pause a stream.
+ *
+ * If the stream is paused the no evict flag is removed from the buffer
+ * but left in vram. This allows for instance mode_set to evict it
+ * should it need to.
+ *
+ * The caller must hold the overlay lock.
+ *
+ * @stream_id which stream to stop/pause.
+ * @pause true to pause, false to stop completely.
+ */
+static int vmw_overlay_stop(struct vmw_private *dev_priv,
+			    uint32_t stream_id, bool pause,
+			    bool interruptible)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	struct vmw_stream *stream = &overlay->stream[stream_id];
+	int ret;
+
+	/* no buffer attached the stream is completely stopped */
+	if (!stream->buf)
+		return 0;
+
+	/* If the stream is paused this is already done */
+	if (!stream->paused) {
+		ret = vmw_overlay_send_stop(dev_priv, stream_id,
+					    interruptible);
+		if (ret)
+			return ret;
+
+		/* We just remove the NO_EVICT flag so no -ENOMEM */
+		ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
+					      interruptible);
+		if (interruptible && ret == -ERESTARTSYS)
+			return ret;
+		else
+			BUG_ON(ret != 0);
+	}
+
+	if (!pause) {
+		vmw_bo_unreference(&stream->buf);
+		stream->paused = false;
+	} else {
+		stream->paused = true;
+	}
+
+	return 0;
+}
+
+/**
+ * Update a stream and send any put or stop fifo commands needed.
+ *
+ * The caller must hold the overlay lock.
+ *
+ * Returns
+ * -ENOMEM if buffer doesn't fit in vram.
+ * -ERESTARTSYS if interrupted.
+ */
+static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
+				     struct vmw_buffer_object *buf,
+				     struct drm_vmw_control_stream_arg *arg,
+				     bool interruptible)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	struct vmw_stream *stream = &overlay->stream[arg->stream_id];
+	int ret = 0;
+
+	if (!buf)
+		return -EINVAL;
+
+	DRM_DEBUG("   %s: old %p, new %p, %spaused\n", __func__,
+		  stream->buf, buf, stream->paused ? "" : "not ");
+
+	if (stream->buf != buf) {
+		ret = vmw_overlay_stop(dev_priv, arg->stream_id,
+				       false, interruptible);
+		if (ret)
+			return ret;
+	} else if (!stream->paused) {
+		/* If the buffers match and not paused then just send
+		 * the put command, no need to do anything else.
+		 */
+		ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
+		if (ret == 0)
+			stream->saved = *arg;
+		else
+			BUG_ON(!interruptible);
+
+		return ret;
+	}
+
+	/* We don't start the old stream if we are interrupted.
+	 * Might return -ENOMEM if it can't fit the buffer in vram.
+	 */
+	ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
+	if (ret)
+		return ret;
+
+	ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
+	if (ret) {
+		/* This one needs to happen no matter what. We only remove
+		 * the NO_EVICT flag so this is safe from -ENOMEM.
+		 */
+		BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
+		       != 0);
+		return ret;
+	}
+
+	if (stream->buf != buf)
+		stream->buf = vmw_bo_reference(buf);
+	stream->saved = *arg;
+	/* stream is no longer stopped/paused */
+	stream->paused = false;
+
+	return 0;
+}
+
+/**
+ * Stop all streams.
+ *
+ * Used by the fb code when starting.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_stop_all(struct vmw_private *dev_priv)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	int i, ret;
+
+	if (!overlay)
+		return 0;
+
+	mutex_lock(&overlay->mutex);
+
+	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+		struct vmw_stream *stream = &overlay->stream[i];
+		if (!stream->buf)
+			continue;
+
+		ret = vmw_overlay_stop(dev_priv, i, false, false);
+		WARN_ON(ret != 0);
+	}
+
+	mutex_unlock(&overlay->mutex);
+
+	return 0;
+}
+
+/**
+ * Try to resume all paused streams.
+ *
+ * Used by the kms code after moving a new scanout buffer to vram.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_resume_all(struct vmw_private *dev_priv)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	int i, ret;
+
+	if (!overlay)
+		return 0;
+
+	mutex_lock(&overlay->mutex);
+
+	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+		struct vmw_stream *stream = &overlay->stream[i];
+		if (!stream->paused)
+			continue;
+
+		ret = vmw_overlay_update_stream(dev_priv, stream->buf,
+						&stream->saved, false);
+		if (ret != 0)
+			DRM_INFO("%s: *warning* failed to resume stream %i\n",
+				 __func__, i);
+	}
+
+	mutex_unlock(&overlay->mutex);
+
+	return 0;
+}
+
+/**
+ * Pauses all active streams.
+ *
+ * Used by the kms code when moving a new scanout buffer to vram.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_pause_all(struct vmw_private *dev_priv)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	int i, ret;
+
+	if (!overlay)
+		return 0;
+
+	mutex_lock(&overlay->mutex);
+
+	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+		if (overlay->stream[i].paused)
+			DRM_INFO("%s: *warning* stream %i already paused\n",
+				 __func__, i);
+		ret = vmw_overlay_stop(dev_priv, i, true, false);
+		WARN_ON(ret != 0);
+	}
+
+	mutex_unlock(&overlay->mutex);
+
+	return 0;
+}
+
+
+static bool vmw_overlay_available(const struct vmw_private *dev_priv)
+{
+	return (dev_priv->overlay_priv != NULL &&
+		((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
+		 VMW_OVERLAY_CAP_MASK));
+}
+
+int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	struct drm_vmw_control_stream_arg *arg =
+	    (struct drm_vmw_control_stream_arg *)data;
+	struct vmw_buffer_object *buf;
+	struct vmw_resource *res;
+	int ret;
+
+	if (!vmw_overlay_available(dev_priv))
+		return -ENOSYS;
+
+	ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
+	if (ret)
+		return ret;
+
+	mutex_lock(&overlay->mutex);
+
+	if (!arg->enabled) {
+		ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
+		goto out_unlock;
+	}
+
+	ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
+	if (ret)
+		goto out_unlock;
+
+	ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
+
+	vmw_bo_unreference(&buf);
+
+out_unlock:
+	mutex_unlock(&overlay->mutex);
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
+{
+	if (!vmw_overlay_available(dev_priv))
+		return 0;
+
+	return VMW_MAX_NUM_STREAMS;
+}
+
+int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	int i, k;
+
+	if (!vmw_overlay_available(dev_priv))
+		return 0;
+
+	mutex_lock(&overlay->mutex);
+
+	for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
+		if (!overlay->stream[i].claimed)
+			k++;
+
+	mutex_unlock(&overlay->mutex);
+
+	return k;
+}
+
+int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	int i;
+
+	if (!overlay)
+		return -ENOSYS;
+
+	mutex_lock(&overlay->mutex);
+
+	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+
+		if (overlay->stream[i].claimed)
+			continue;
+
+		overlay->stream[i].claimed = true;
+		*out = i;
+		mutex_unlock(&overlay->mutex);
+		return 0;
+	}
+
+	mutex_unlock(&overlay->mutex);
+	return -ESRCH;
+}
+
+int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+
+	BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
+
+	if (!overlay)
+		return -ENOSYS;
+
+	mutex_lock(&overlay->mutex);
+
+	WARN_ON(!overlay->stream[stream_id].claimed);
+	vmw_overlay_stop(dev_priv, stream_id, false, false);
+	overlay->stream[stream_id].claimed = false;
+
+	mutex_unlock(&overlay->mutex);
+	return 0;
+}
+
+int vmw_overlay_init(struct vmw_private *dev_priv)
+{
+	struct vmw_overlay *overlay;
+	int i;
+
+	if (dev_priv->overlay_priv)
+		return -EINVAL;
+
+	overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+	if (!overlay)
+		return -ENOMEM;
+
+	mutex_init(&overlay->mutex);
+	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+		overlay->stream[i].buf = NULL;
+		overlay->stream[i].paused = false;
+		overlay->stream[i].claimed = false;
+	}
+
+	dev_priv->overlay_priv = overlay;
+
+	return 0;
+}
+
+int vmw_overlay_close(struct vmw_private *dev_priv)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	bool forgotten_buffer = false;
+	int i;
+
+	if (!overlay)
+		return -ENOSYS;
+
+	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+		if (overlay->stream[i].buf) {
+			forgotten_buffer = true;
+			vmw_overlay_stop(dev_priv, i, false, false);
+		}
+	}
+
+	WARN_ON(forgotten_buffer);
+
+	dev_priv->overlay_priv = NULL;
+	kfree(overlay);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
new file mode 100644
index 0000000..e420675
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2013 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ *     Thomas Hellstrom <thellstrom@vmware.com>
+ *
+ */
+
+#include "vmwgfx_drv.h"
+#include "ttm_object.h"
+#include <linux/dma-buf.h>
+
+/*
+ * DMA-BUF attach- and mapping methods. No need to implement
+ * these until we have other virtual devices use them.
+ */
+
+static int vmw_prime_map_attach(struct dma_buf *dma_buf,
+				struct dma_buf_attachment *attach)
+{
+	return -ENOSYS;
+}
+
+static void vmw_prime_map_detach(struct dma_buf *dma_buf,
+				 struct dma_buf_attachment *attach)
+{
+}
+
+static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach,
+					      enum dma_data_direction dir)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+				    struct sg_table *sgb,
+				    enum dma_data_direction dir)
+{
+}
+
+static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+	return NULL;
+}
+
+static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+}
+
+static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
+		unsigned long page_num)
+{
+	return NULL;
+}
+
+static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
+		unsigned long page_num, void *addr)
+{
+
+}
+
+static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
+				 struct vm_area_struct *vma)
+{
+	WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
+	return -ENOSYS;
+}
+
+const struct dma_buf_ops vmw_prime_dmabuf_ops =  {
+	.attach = vmw_prime_map_attach,
+	.detach = vmw_prime_map_detach,
+	.map_dma_buf = vmw_prime_map_dma_buf,
+	.unmap_dma_buf = vmw_prime_unmap_dma_buf,
+	.release = NULL,
+	.map = vmw_prime_dmabuf_kmap,
+	.unmap = vmw_prime_dmabuf_kunmap,
+	.mmap = vmw_prime_dmabuf_mmap,
+	.vmap = vmw_prime_dmabuf_vmap,
+	.vunmap = vmw_prime_dmabuf_vunmap,
+};
+
+int vmw_prime_fd_to_handle(struct drm_device *dev,
+			   struct drm_file *file_priv,
+			   int fd, u32 *handle)
+{
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+	return ttm_prime_fd_to_handle(tfile, fd, handle);
+}
+
+int vmw_prime_handle_to_fd(struct drm_device *dev,
+			   struct drm_file *file_priv,
+			   uint32_t handle, uint32_t flags,
+			   int *prime_fd)
+{
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+	return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
new file mode 100644
index 0000000..e99f6cd
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * This file contains virtual hardware defines for kernel space.
+ */
+
+#ifndef _VMWGFX_REG_H_
+#define _VMWGFX_REG_H_
+
+#include <linux/types.h>
+
+#define VMWGFX_INDEX_PORT     0x0
+#define VMWGFX_VALUE_PORT     0x1
+#define VMWGFX_IRQSTATUS_PORT 0x8
+
+struct svga_guest_mem_descriptor {
+	u32 ppn;
+	u32 num_pages;
+};
+
+struct svga_fifo_cmd_fence {
+	u32 fence;
+};
+
+#define SVGA_SYNC_GENERIC         1
+#define SVGA_SYNC_FIFOFULL        2
+
+#include "device_include/svga3d_reg.h"
+
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
new file mode 100644
index 0000000..5581a78
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -0,0 +1,1010 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
+#include "vmwgfx_drv.h"
+
+#define VMW_RES_EVICT_ERR_COUNT 10
+
+/**
+ * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
+ * @res: The resource
+ */
+void vmw_resource_mob_attach(struct vmw_resource *res)
+{
+	struct vmw_buffer_object *backup = res->backup;
+
+	dma_resv_assert_held(res->backup->base.base.resv);
+	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
+		res->func->prio;
+	list_add_tail(&res->mob_head, &backup->res_list);
+	vmw_bo_prio_add(backup, res->used_prio);
+}
+
+/**
+ * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
+ * @res: The resource
+ */
+void vmw_resource_mob_detach(struct vmw_resource *res)
+{
+	struct vmw_buffer_object *backup = res->backup;
+
+	dma_resv_assert_held(backup->base.base.resv);
+	if (vmw_resource_mob_attached(res)) {
+		list_del_init(&res->mob_head);
+		vmw_bo_prio_del(backup, res->used_prio);
+	}
+}
+
+struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
+{
+	kref_get(&res->kref);
+	return res;
+}
+
+struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res)
+{
+	return kref_get_unless_zero(&res->kref) ? res : NULL;
+}
+
+/**
+ * vmw_resource_release_id - release a resource id to the id manager.
+ *
+ * @res: Pointer to the resource.
+ *
+ * Release the resource id to the resource id manager and set it to -1
+ */
+void vmw_resource_release_id(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
+
+	spin_lock(&dev_priv->resource_lock);
+	if (res->id != -1)
+		idr_remove(idr, res->id);
+	res->id = -1;
+	spin_unlock(&dev_priv->resource_lock);
+}
+
+static void vmw_resource_release(struct kref *kref)
+{
+	struct vmw_resource *res =
+	    container_of(kref, struct vmw_resource, kref);
+	struct vmw_private *dev_priv = res->dev_priv;
+	int id;
+	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
+
+	spin_lock(&dev_priv->resource_lock);
+	list_del_init(&res->lru_head);
+	spin_unlock(&dev_priv->resource_lock);
+	if (res->backup) {
+		struct ttm_buffer_object *bo = &res->backup->base;
+
+		ttm_bo_reserve(bo, false, false, NULL);
+		if (vmw_resource_mob_attached(res) &&
+		    res->func->unbind != NULL) {
+			struct ttm_validate_buffer val_buf;
+
+			val_buf.bo = bo;
+			val_buf.num_shared = 0;
+			res->func->unbind(res, false, &val_buf);
+		}
+		res->backup_dirty = false;
+		vmw_resource_mob_detach(res);
+		ttm_bo_unreserve(bo);
+		vmw_bo_unreference(&res->backup);
+	}
+
+	if (likely(res->hw_destroy != NULL)) {
+		mutex_lock(&dev_priv->binding_mutex);
+		vmw_binding_res_list_kill(&res->binding_head);
+		mutex_unlock(&dev_priv->binding_mutex);
+		res->hw_destroy(res);
+	}
+
+	id = res->id;
+	if (res->res_free != NULL)
+		res->res_free(res);
+	else
+		kfree(res);
+
+	spin_lock(&dev_priv->resource_lock);
+	if (id != -1)
+		idr_remove(idr, id);
+	spin_unlock(&dev_priv->resource_lock);
+}
+
+void vmw_resource_unreference(struct vmw_resource **p_res)
+{
+	struct vmw_resource *res = *p_res;
+
+	*p_res = NULL;
+	kref_put(&res->kref, vmw_resource_release);
+}
+
+
+/**
+ * vmw_resource_alloc_id - release a resource id to the id manager.
+ *
+ * @res: Pointer to the resource.
+ *
+ * Allocate the lowest free resource from the resource manager, and set
+ * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
+ */
+int vmw_resource_alloc_id(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
+
+	BUG_ON(res->id != -1);
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&dev_priv->resource_lock);
+
+	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
+	if (ret >= 0)
+		res->id = ret;
+
+	spin_unlock(&dev_priv->resource_lock);
+	idr_preload_end();
+	return ret < 0 ? ret : 0;
+}
+
+/**
+ * vmw_resource_init - initialize a struct vmw_resource
+ *
+ * @dev_priv:       Pointer to a device private struct.
+ * @res:            The struct vmw_resource to initialize.
+ * @obj_type:       Resource object type.
+ * @delay_id:       Boolean whether to defer device id allocation until
+ *                  the first validation.
+ * @res_free:       Resource destructor.
+ * @func:           Resource function table.
+ */
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+		      bool delay_id,
+		      void (*res_free) (struct vmw_resource *res),
+		      const struct vmw_res_func *func)
+{
+	kref_init(&res->kref);
+	res->hw_destroy = NULL;
+	res->res_free = res_free;
+	res->dev_priv = dev_priv;
+	res->func = func;
+	INIT_LIST_HEAD(&res->lru_head);
+	INIT_LIST_HEAD(&res->mob_head);
+	INIT_LIST_HEAD(&res->binding_head);
+	res->id = -1;
+	res->backup = NULL;
+	res->backup_offset = 0;
+	res->backup_dirty = false;
+	res->res_dirty = false;
+	res->used_prio = 3;
+	if (delay_id)
+		return 0;
+	else
+		return vmw_resource_alloc_id(res);
+}
+
+
+/**
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
+ *
+ * @dev_priv:     Pointer to a device private struct
+ * @tfile:        Pointer to a struct ttm_object_file identifying the caller
+ * @handle:       The TTM user-space handle
+ * @converter:    Pointer to an object describing the resource type
+ * @p_res:        On successful return the location pointed to will contain
+ *                a pointer to a refcounted struct vmw_resource.
+ *
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
+ */
+int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
+				    struct ttm_object_file *tfile,
+				    uint32_t handle,
+				    const struct vmw_user_resource_conv
+				    *converter,
+				    struct vmw_resource **p_res)
+{
+	struct ttm_base_object *base;
+	struct vmw_resource *res;
+	int ret = -EINVAL;
+
+	base = ttm_base_object_lookup(tfile, handle);
+	if (unlikely(base == NULL))
+		return -EINVAL;
+
+	if (unlikely(ttm_base_object_type(base) != converter->object_type))
+		goto out_bad_resource;
+
+	res = converter->base_obj_to_res(base);
+	kref_get(&res->kref);
+
+	*p_res = res;
+	ret = 0;
+
+out_bad_resource:
+	ttm_base_object_unref(&base);
+
+	return ret;
+}
+
+/**
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
+ *
+ * @dev_priv:     Pointer to a device private struct
+ * @tfile:        Pointer to a struct ttm_object_file identifying the caller
+ * @handle:       The TTM user-space handle
+ * @converter:    Pointer to an object describing the resource type
+ * @p_res:        On successful return the location pointed to will contain
+ *                a pointer to a refcounted struct vmw_resource.
+ *
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
+ */
+struct vmw_resource *
+vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
+				      struct ttm_object_file *tfile,
+				      uint32_t handle,
+				      const struct vmw_user_resource_conv
+				      *converter)
+{
+	struct ttm_base_object *base;
+
+	base = ttm_base_object_noref_lookup(tfile, handle);
+	if (!base)
+		return ERR_PTR(-ESRCH);
+
+	if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
+		ttm_base_object_noref_release();
+		return ERR_PTR(-EINVAL);
+	}
+
+	return converter->base_obj_to_res(base);
+}
+
+/**
+ * Helper function that looks either a surface or bo.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+			   struct ttm_object_file *tfile,
+			   uint32_t handle,
+			   struct vmw_surface **out_surf,
+			   struct vmw_buffer_object **out_buf)
+{
+	struct vmw_resource *res;
+	int ret;
+
+	BUG_ON(*out_surf || *out_buf);
+
+	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
+					      user_surface_converter,
+					      &res);
+	if (!ret) {
+		*out_surf = vmw_res_to_srf(res);
+		return 0;
+	}
+
+	*out_surf = NULL;
+	ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
+	return ret;
+}
+
+/**
+ * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ *
+ * @res:            The resource for which to allocate a backup buffer.
+ * @interruptible:  Whether any sleeps during allocation should be
+ *                  performed while interruptible.
+ */
+static int vmw_resource_buf_alloc(struct vmw_resource *res,
+				  bool interruptible)
+{
+	unsigned long size =
+		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+	struct vmw_buffer_object *backup;
+	int ret;
+
+	if (likely(res->backup)) {
+		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+		return 0;
+	}
+
+	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
+	if (unlikely(!backup))
+		return -ENOMEM;
+
+	ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
+			      res->func->backup_placement,
+			      interruptible,
+			      &vmw_bo_bo_free);
+	if (unlikely(ret != 0))
+		goto out_no_bo;
+
+	res->backup = backup;
+
+out_no_bo:
+	return ret;
+}
+
+/**
+ * vmw_resource_do_validate - Make a resource up-to-date and visible
+ *                            to the device.
+ *
+ * @res:            The resource to make visible to the device.
+ * @val_buf:        Information about a buffer possibly
+ *                  containing backup data if a bind operation is needed.
+ *
+ * On hardware resource shortage, this function returns -EBUSY and
+ * should be retried once resources have been freed up.
+ */
+static int vmw_resource_do_validate(struct vmw_resource *res,
+				    struct ttm_validate_buffer *val_buf)
+{
+	int ret = 0;
+	const struct vmw_res_func *func = res->func;
+
+	if (unlikely(res->id == -1)) {
+		ret = func->create(res);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	if (func->bind &&
+	    ((func->needs_backup && !vmw_resource_mob_attached(res) &&
+	      val_buf->bo != NULL) ||
+	     (!func->needs_backup && val_buf->bo != NULL))) {
+		ret = func->bind(res, val_buf);
+		if (unlikely(ret != 0))
+			goto out_bind_failed;
+		if (func->needs_backup)
+			vmw_resource_mob_attach(res);
+	}
+
+	return 0;
+
+out_bind_failed:
+	func->destroy(res);
+
+	return ret;
+}
+
+/**
+ * vmw_resource_unreserve - Unreserve a resource previously reserved for
+ * command submission.
+ *
+ * @res:               Pointer to the struct vmw_resource to unreserve.
+ * @dirty_set:         Change dirty status of the resource.
+ * @dirty:             When changing dirty status indicates the new status.
+ * @switch_backup:     Backup buffer has been switched.
+ * @new_backup:        Pointer to new backup buffer if command submission
+ *                     switched. May be NULL.
+ * @new_backup_offset: New backup offset if @switch_backup is true.
+ *
+ * Currently unreserving a resource means putting it back on the device's
+ * resource lru list, so that it can be evicted if necessary.
+ */
+void vmw_resource_unreserve(struct vmw_resource *res,
+			    bool dirty_set,
+			    bool dirty,
+			    bool switch_backup,
+			    struct vmw_buffer_object *new_backup,
+			    unsigned long new_backup_offset)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	if (!list_empty(&res->lru_head))
+		return;
+
+	if (switch_backup && new_backup != res->backup) {
+		if (res->backup) {
+			vmw_resource_mob_detach(res);
+			vmw_bo_unreference(&res->backup);
+		}
+
+		if (new_backup) {
+			res->backup = vmw_bo_reference(new_backup);
+			vmw_resource_mob_attach(res);
+		} else {
+			res->backup = NULL;
+		}
+	}
+	if (switch_backup)
+		res->backup_offset = new_backup_offset;
+
+	if (dirty_set)
+		res->res_dirty = dirty;
+
+	if (!res->func->may_evict || res->id == -1 || res->pin_count)
+		return;
+
+	spin_lock(&dev_priv->resource_lock);
+	list_add_tail(&res->lru_head,
+		      &res->dev_priv->res_lru[res->func->res_type]);
+	spin_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_check_buffer - Check whether a backup buffer is needed
+ *                             for a resource and in that case, allocate
+ *                             one, reserve and validate it.
+ *
+ * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
+ * @res:            The resource for which to allocate a backup buffer.
+ * @interruptible:  Whether any sleeps during allocation should be
+ *                  performed while interruptible.
+ * @val_buf:        On successful return contains data about the
+ *                  reserved and validated backup buffer.
+ */
+static int
+vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
+			  struct vmw_resource *res,
+			  bool interruptible,
+			  struct ttm_validate_buffer *val_buf)
+{
+	struct ttm_operation_ctx ctx = { true, false };
+	struct list_head val_list;
+	bool backup_dirty = false;
+	int ret;
+
+	if (unlikely(res->backup == NULL)) {
+		ret = vmw_resource_buf_alloc(res, interruptible);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	INIT_LIST_HEAD(&val_list);
+	ttm_bo_get(&res->backup->base);
+	val_buf->bo = &res->backup->base;
+	val_buf->num_shared = 0;
+	list_add_tail(&val_buf->head, &val_list);
+	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
+				     true);
+	if (unlikely(ret != 0))
+		goto out_no_reserve;
+
+	if (res->func->needs_backup && !vmw_resource_mob_attached(res))
+		return 0;
+
+	backup_dirty = res->backup_dirty;
+	ret = ttm_bo_validate(&res->backup->base,
+			      res->func->backup_placement,
+			      &ctx);
+
+	if (unlikely(ret != 0))
+		goto out_no_validate;
+
+	return 0;
+
+out_no_validate:
+	ttm_eu_backoff_reservation(ticket, &val_list);
+out_no_reserve:
+	ttm_bo_put(val_buf->bo);
+	val_buf->bo = NULL;
+	if (backup_dirty)
+		vmw_bo_unreference(&res->backup);
+
+	return ret;
+}
+
+/**
+ * vmw_resource_reserve - Reserve a resource for command submission
+ *
+ * @res:            The resource to reserve.
+ *
+ * This function takes the resource off the LRU list and make sure
+ * a backup buffer is present for guest-backed resources. However,
+ * the buffer may not be bound to the resource at this point.
+ *
+ */
+int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+			 bool no_backup)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+
+	spin_lock(&dev_priv->resource_lock);
+	list_del_init(&res->lru_head);
+	spin_unlock(&dev_priv->resource_lock);
+
+	if (res->func->needs_backup && res->backup == NULL &&
+	    !no_backup) {
+		ret = vmw_resource_buf_alloc(res, interruptible);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed to allocate a backup buffer "
+				  "of size %lu. bytes\n",
+				  (unsigned long) res->backup_size);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_resource_backoff_reservation - Unreserve and unreference a
+ *                                    backup buffer
+ *.
+ * @ticket:         The ww acquire ctx used for reservation.
+ * @val_buf:        Backup buffer information.
+ */
+static void
+vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
+				 struct ttm_validate_buffer *val_buf)
+{
+	struct list_head val_list;
+
+	if (likely(val_buf->bo == NULL))
+		return;
+
+	INIT_LIST_HEAD(&val_list);
+	list_add_tail(&val_buf->head, &val_list);
+	ttm_eu_backoff_reservation(ticket, &val_list);
+	ttm_bo_put(val_buf->bo);
+	val_buf->bo = NULL;
+}
+
+/**
+ * vmw_resource_do_evict - Evict a resource, and transfer its data
+ *                         to a backup buffer.
+ *
+ * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
+ * @res:            The resource to evict.
+ * @interruptible:  Whether to wait interruptible.
+ */
+static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
+				 struct vmw_resource *res, bool interruptible)
+{
+	struct ttm_validate_buffer val_buf;
+	const struct vmw_res_func *func = res->func;
+	int ret;
+
+	BUG_ON(!func->may_evict);
+
+	val_buf.bo = NULL;
+	val_buf.num_shared = 0;
+	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (unlikely(func->unbind != NULL &&
+		     (!func->needs_backup || vmw_resource_mob_attached(res)))) {
+		ret = func->unbind(res, res->res_dirty, &val_buf);
+		if (unlikely(ret != 0))
+			goto out_no_unbind;
+		vmw_resource_mob_detach(res);
+	}
+	ret = func->destroy(res);
+	res->backup_dirty = true;
+	res->res_dirty = false;
+out_no_unbind:
+	vmw_resource_backoff_reservation(ticket, &val_buf);
+
+	return ret;
+}
+
+
+/**
+ * vmw_resource_validate - Make a resource up-to-date and visible
+ *                         to the device.
+ * @res: The resource to make visible to the device.
+ * @intr: Perform waits interruptible if possible.
+ *
+ * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * be reserved and validated.
+ * On hardware resource shortage, this function will repeatedly evict
+ * resources of the same type until the validation succeeds.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
+ * on failure.
+ */
+int vmw_resource_validate(struct vmw_resource *res, bool intr)
+{
+	int ret;
+	struct vmw_resource *evict_res;
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
+	struct ttm_validate_buffer val_buf;
+	unsigned err_count = 0;
+
+	if (!res->func->create)
+		return 0;
+
+	val_buf.bo = NULL;
+	val_buf.num_shared = 0;
+	if (res->backup)
+		val_buf.bo = &res->backup->base;
+	do {
+		ret = vmw_resource_do_validate(res, &val_buf);
+		if (likely(ret != -EBUSY))
+			break;
+
+		spin_lock(&dev_priv->resource_lock);
+		if (list_empty(lru_list) || !res->func->may_evict) {
+			DRM_ERROR("Out of device device resources "
+				  "for %s.\n", res->func->type_name);
+			ret = -EBUSY;
+			spin_unlock(&dev_priv->resource_lock);
+			break;
+		}
+
+		evict_res = vmw_resource_reference
+			(list_first_entry(lru_list, struct vmw_resource,
+					  lru_head));
+		list_del_init(&evict_res->lru_head);
+
+		spin_unlock(&dev_priv->resource_lock);
+
+		/* Trylock backup buffers with a NULL ticket. */
+		ret = vmw_resource_do_evict(NULL, evict_res, intr);
+		if (unlikely(ret != 0)) {
+			spin_lock(&dev_priv->resource_lock);
+			list_add_tail(&evict_res->lru_head, lru_list);
+			spin_unlock(&dev_priv->resource_lock);
+			if (ret == -ERESTARTSYS ||
+			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
+				vmw_resource_unreference(&evict_res);
+				goto out_no_validate;
+			}
+		}
+
+		vmw_resource_unreference(&evict_res);
+	} while (1);
+
+	if (unlikely(ret != 0))
+		goto out_no_validate;
+	else if (!res->func->needs_backup && res->backup) {
+		WARN_ON_ONCE(vmw_resource_mob_attached(res));
+		vmw_bo_unreference(&res->backup);
+	}
+
+	return 0;
+
+out_no_validate:
+	return ret;
+}
+
+
+/**
+ * vmw_resource_unbind_list
+ *
+ * @vbo: Pointer to the current backing MOB.
+ *
+ * Evicts the Guest Backed hardware resource if the backup
+ * buffer is being moved out of MOB memory.
+ * Note that this function will not race with the resource
+ * validation code, since resource validation and eviction
+ * both require the backup buffer to be reserved.
+ */
+void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
+{
+
+	struct vmw_resource *res, *next;
+	struct ttm_validate_buffer val_buf = {
+		.bo = &vbo->base,
+		.num_shared = 0
+	};
+
+	dma_resv_assert_held(vbo->base.base.resv);
+	list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
+		if (!res->func->unbind)
+			continue;
+
+		(void) res->func->unbind(res, res->res_dirty, &val_buf);
+		res->backup_dirty = true;
+		res->res_dirty = false;
+		vmw_resource_mob_detach(res);
+	}
+
+	(void) ttm_bo_wait(&vbo->base, false, false);
+}
+
+
+/**
+ * vmw_query_readback_all - Read back cached query states
+ *
+ * @dx_query_mob: Buffer containing the DX query MOB
+ *
+ * Read back cached states from the device if they exist.  This function
+ * assumings binding_mutex is held.
+ */
+int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
+{
+	struct vmw_resource *dx_query_ctx;
+	struct vmw_private *dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackAllQuery body;
+	} *cmd;
+
+
+	/* No query bound, so do nothing */
+	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
+		return 0;
+
+	dx_query_ctx = dx_query_mob->dx_query_ctx;
+	dev_priv     = dx_query_ctx->dev_priv;
+
+	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid    = dx_query_ctx->id;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	/* Triggers a rebind the next time affected context is bound */
+	dx_query_mob->dx_query_ctx = NULL;
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_query_move_notify - Read back cached query states
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The memory region @bo is moving to.
+ *
+ * Called before the query MOB is swapped out to read back cached query
+ * states from the device.
+ */
+void vmw_query_move_notify(struct ttm_buffer_object *bo,
+			   struct ttm_mem_reg *mem)
+{
+	struct vmw_buffer_object *dx_query_mob;
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct vmw_private *dev_priv;
+
+
+	dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+	mutex_lock(&dev_priv->binding_mutex);
+
+	dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
+	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
+		mutex_unlock(&dev_priv->binding_mutex);
+		return;
+	}
+
+	/* If BO is being moved from MOB to system memory */
+	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
+		struct vmw_fence_obj *fence;
+
+		(void) vmw_query_readback_all(dx_query_mob);
+		mutex_unlock(&dev_priv->binding_mutex);
+
+		/* Create a fence and attach the BO to it */
+		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+		vmw_bo_fence_single(bo, fence);
+
+		if (fence != NULL)
+			vmw_fence_obj_unreference(&fence);
+
+		(void) ttm_bo_wait(bo, false, false);
+	} else
+		mutex_unlock(&dev_priv->binding_mutex);
+
+}
+
+/**
+ * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
+ *
+ * @res:            The resource being queried.
+ */
+bool vmw_resource_needs_backup(const struct vmw_resource *res)
+{
+	return res->func->needs_backup;
+}
+
+/**
+ * vmw_resource_evict_type - Evict all resources of a specific type
+ *
+ * @dev_priv:       Pointer to a device private struct
+ * @type:           The resource type to evict
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * try to evict all evictable resources of a specific type.
+ */
+static void vmw_resource_evict_type(struct vmw_private *dev_priv,
+				    enum vmw_res_type type)
+{
+	struct list_head *lru_list = &dev_priv->res_lru[type];
+	struct vmw_resource *evict_res;
+	unsigned err_count = 0;
+	int ret;
+	struct ww_acquire_ctx ticket;
+
+	do {
+		spin_lock(&dev_priv->resource_lock);
+
+		if (list_empty(lru_list))
+			goto out_unlock;
+
+		evict_res = vmw_resource_reference(
+			list_first_entry(lru_list, struct vmw_resource,
+					 lru_head));
+		list_del_init(&evict_res->lru_head);
+		spin_unlock(&dev_priv->resource_lock);
+
+		/* Wait lock backup buffers with a ticket. */
+		ret = vmw_resource_do_evict(&ticket, evict_res, false);
+		if (unlikely(ret != 0)) {
+			spin_lock(&dev_priv->resource_lock);
+			list_add_tail(&evict_res->lru_head, lru_list);
+			spin_unlock(&dev_priv->resource_lock);
+			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
+				vmw_resource_unreference(&evict_res);
+				return;
+			}
+		}
+
+		vmw_resource_unreference(&evict_res);
+	} while (1);
+
+out_unlock:
+	spin_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_evict_all - Evict all evictable resources
+ *
+ * @dev_priv:       Pointer to a device private struct
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources. In particular this means that all
+ * guest-backed resources that are registered with the device are
+ * evicted and the OTable becomes clean.
+ */
+void vmw_resource_evict_all(struct vmw_private *dev_priv)
+{
+	enum vmw_res_type type;
+
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+
+	for (type = 0; type < vmw_res_max; ++type)
+		vmw_resource_evict_type(dev_priv, type);
+
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
+
+/**
+ * vmw_resource_pin - Add a pin reference on a resource
+ *
+ * @res: The resource to add a pin reference on
+ *
+ * This function adds a pin reference, and if needed validates the resource.
+ * Having a pin reference means that the resource can never be evicted, and
+ * its id will never change as long as there is a pin reference.
+ * This function returns 0 on success and a negative error code on failure.
+ */
+int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
+{
+	struct ttm_operation_ctx ctx = { interruptible, false };
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+
+	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+	ret = vmw_resource_reserve(res, interruptible, false);
+	if (ret)
+		goto out_no_reserve;
+
+	if (res->pin_count == 0) {
+		struct vmw_buffer_object *vbo = NULL;
+
+		if (res->backup) {
+			vbo = res->backup;
+
+			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
+			if (!vbo->pin_count) {
+				ret = ttm_bo_validate
+					(&vbo->base,
+					 res->func->backup_placement,
+					 &ctx);
+				if (ret) {
+					ttm_bo_unreserve(&vbo->base);
+					goto out_no_validate;
+				}
+			}
+
+			/* Do we really need to pin the MOB as well? */
+			vmw_bo_pin_reserved(vbo, true);
+		}
+		ret = vmw_resource_validate(res, interruptible);
+		if (vbo)
+			ttm_bo_unreserve(&vbo->base);
+		if (ret)
+			goto out_no_validate;
+	}
+	res->pin_count++;
+
+out_no_validate:
+	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
+out_no_reserve:
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+	ttm_write_unlock(&dev_priv->reservation_sem);
+
+	return ret;
+}
+
+/**
+ * vmw_resource_unpin - Remove a pin reference from a resource
+ *
+ * @res: The resource to remove a pin reference from
+ *
+ * Having a pin reference means that the resource can never be evicted, and
+ * its id will never change as long as there is a pin reference.
+ */
+void vmw_resource_unpin(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+
+	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+
+	ret = vmw_resource_reserve(res, false, true);
+	WARN_ON(ret);
+
+	WARN_ON(res->pin_count == 0);
+	if (--res->pin_count == 0 && res->backup) {
+		struct vmw_buffer_object *vbo = res->backup;
+
+		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
+		vmw_bo_pin_reserved(vbo, false);
+		ttm_bo_unreserve(&vbo->base);
+	}
+
+	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
+
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+	ttm_read_unlock(&dev_priv->reservation_sem);
+}
+
+/**
+ * vmw_res_type - Return the resource type
+ *
+ * @res: Pointer to the resource
+ */
+enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
+{
+	return res->func->res_type;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
new file mode 100644
index 0000000..984e588
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2012-2014 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_RESOURCE_PRIV_H_
+#define _VMWGFX_RESOURCE_PRIV_H_
+
+#include "vmwgfx_drv.h"
+
+/*
+ * Extra memory required by the resource id's ida storage, which is allocated
+ * separately from the base object itself. We estimate an on-average 128 bytes
+ * per ida.
+ */
+#define VMW_IDA_ACC_SIZE 128
+
+enum vmw_cmdbuf_res_state {
+	VMW_CMDBUF_RES_COMMITTED,
+	VMW_CMDBUF_RES_ADD,
+	VMW_CMDBUF_RES_DEL
+};
+
+/**
+ * struct vmw_user_resource_conv - Identify a derived user-exported resource
+ * type and provide a function to convert its ttm_base_object pointer to
+ * a struct vmw_resource
+ */
+struct vmw_user_resource_conv {
+	enum ttm_object_type object_type;
+	struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
+	void (*res_free) (struct vmw_resource *res);
+};
+
+/**
+ * struct vmw_res_func - members and functions common for a resource type
+ *
+ * @res_type:          Enum that identifies the lru list to use for eviction.
+ * @needs_backup:      Whether the resource is guest-backed and needs
+ *                     persistent buffer storage.
+ * @type_name:         String that identifies the resource type.
+ * @backup_placement:  TTM placement for backup buffers.
+ * @may_evict          Whether the resource may be evicted.
+ * @create:            Create a hardware resource.
+ * @destroy:           Destroy a hardware resource.
+ * @bind:              Bind a hardware resource to persistent buffer storage.
+ * @unbind:            Unbind a hardware resource from persistent
+ *                     buffer storage.
+ * @commit_notify:     If the resource is a command buffer managed resource,
+ *                     callback to notify that a define or remove command
+ *                     has been committed to the device.
+ */
+struct vmw_res_func {
+	enum vmw_res_type res_type;
+	bool needs_backup;
+	const char *type_name;
+	struct ttm_placement *backup_placement;
+	bool may_evict;
+	u32 prio;
+	u32 dirty_prio;
+
+	int (*create) (struct vmw_resource *res);
+	int (*destroy) (struct vmw_resource *res);
+	int (*bind) (struct vmw_resource *res,
+		     struct ttm_validate_buffer *val_buf);
+	int (*unbind) (struct vmw_resource *res,
+		       bool readback,
+		       struct ttm_validate_buffer *val_buf);
+	void (*commit_notify)(struct vmw_resource *res,
+			      enum vmw_cmdbuf_res_state state);
+};
+
+/**
+ * struct vmw_simple_resource_func - members and functions common for the
+ * simple resource helpers.
+ * @res_func:  struct vmw_res_func as described above.
+ * @ttm_res_type:  TTM resource type used for handle recognition.
+ * @size:  Size of the simple resource information struct.
+ * @init:  Initialize the simple resource information.
+ * @hw_destroy:  A resource hw_destroy function.
+ * @set_arg_handle:  Set the handle output argument of the ioctl create struct.
+ */
+struct vmw_simple_resource_func {
+	const struct vmw_res_func res_func;
+	int ttm_res_type;
+	size_t size;
+	int (*init)(struct vmw_resource *res, void *data);
+	void (*hw_destroy)(struct vmw_resource *res);
+	void (*set_arg_handle)(void *data, u32 handle);
+};
+
+/**
+ * struct vmw_simple_resource - Kernel only side simple resource
+ * @res: The resource we derive from.
+ * @func: The method and member virtual table.
+ */
+struct vmw_simple_resource {
+	struct vmw_resource res;
+	const struct vmw_simple_resource_func *func;
+};
+
+int vmw_resource_alloc_id(struct vmw_resource *res);
+void vmw_resource_release_id(struct vmw_resource *res);
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+		      bool delay_id,
+		      void (*res_free) (struct vmw_resource *res),
+		      const struct vmw_res_func *func);
+int
+vmw_simple_resource_create_ioctl(struct drm_device *dev,
+				 void *data,
+				 struct drm_file *file_priv,
+				 const struct vmw_simple_resource_func *func);
+struct vmw_resource *
+vmw_simple_resource_lookup(struct ttm_object_file *tfile,
+			   uint32_t handle,
+			   const struct vmw_simple_resource_func *func);
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
new file mode 100644
index 0000000..e5a2832
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -0,0 +1,1393 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2011-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "vmwgfx_kms.h"
+
+#define vmw_crtc_to_sou(x) \
+	container_of(x, struct vmw_screen_object_unit, base.crtc)
+#define vmw_encoder_to_sou(x) \
+	container_of(x, struct vmw_screen_object_unit, base.encoder)
+#define vmw_connector_to_sou(x) \
+	container_of(x, struct vmw_screen_object_unit, base.connector)
+
+/**
+ * struct vmw_kms_sou_surface_dirty - Closure structure for
+ * blit surface to screen command.
+ * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ * @dst_x: Difference between source clip rects and framebuffer coordinates.
+ * @dst_y: Difference between source clip rects and framebuffer coordinates.
+ * @sid: Surface id of surface to copy from.
+ */
+struct vmw_kms_sou_surface_dirty {
+	struct vmw_kms_dirty base;
+	s32 left, right, top, bottom;
+	s32 dst_x, dst_y;
+	u32 sid;
+};
+
+/*
+ * SVGA commands that are used by this code. Please see the device headers
+ * for explanation.
+ */
+struct vmw_kms_sou_readback_blit {
+	uint32 header;
+	SVGAFifoCmdBlitScreenToGMRFB body;
+};
+
+struct vmw_kms_sou_bo_blit {
+	uint32 header;
+	SVGAFifoCmdBlitGMRFBToScreen body;
+};
+
+struct vmw_kms_sou_dirty_cmd {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdBlitSurfaceToScreen body;
+};
+
+struct vmw_kms_sou_define_gmrfb {
+	uint32_t header;
+	SVGAFifoCmdDefineGMRFB body;
+};
+
+/**
+ * Display unit using screen objects.
+ */
+struct vmw_screen_object_unit {
+	struct vmw_display_unit base;
+
+	unsigned long buffer_size; /**< Size of allocated buffer */
+	struct vmw_buffer_object *buffer; /**< Backing store buffer */
+
+	bool defined;
+};
+
+static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
+{
+	vmw_du_cleanup(&sou->base);
+	kfree(sou);
+}
+
+
+/*
+ * Screen Object Display Unit CRTC functions
+ */
+
+static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
+{
+	vmw_sou_destroy(vmw_crtc_to_sou(crtc));
+}
+
+/**
+ * Send the fifo command to create a screen.
+ */
+static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
+			       struct vmw_screen_object_unit *sou,
+			       int x, int y,
+			       struct drm_display_mode *mode)
+{
+	size_t fifo_size;
+
+	struct {
+		struct {
+			uint32_t cmdType;
+		} header;
+		SVGAScreenObject obj;
+	} *cmd;
+
+	BUG_ON(!sou->buffer);
+
+	fifo_size = sizeof(*cmd);
+	cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	memset(cmd, 0, fifo_size);
+	cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
+	cmd->obj.structSize = sizeof(SVGAScreenObject);
+	cmd->obj.id = sou->base.unit;
+	cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
+		(sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
+	cmd->obj.size.width = mode->hdisplay;
+	cmd->obj.size.height = mode->vdisplay;
+	cmd->obj.root.x = x;
+	cmd->obj.root.y = y;
+	sou->base.set_gui_x = cmd->obj.root.x;
+	sou->base.set_gui_y = cmd->obj.root.y;
+
+	/* Ok to assume that buffer is pinned in vram */
+	vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
+	cmd->obj.backingStore.pitch = mode->hdisplay * 4;
+
+	vmw_fifo_commit(dev_priv, fifo_size);
+
+	sou->defined = true;
+
+	return 0;
+}
+
+/**
+ * Send the fifo command to destroy a screen.
+ */
+static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
+				struct vmw_screen_object_unit *sou)
+{
+	size_t fifo_size;
+	int ret;
+
+	struct {
+		struct {
+			uint32_t cmdType;
+		} header;
+		SVGAFifoCmdDestroyScreen body;
+	} *cmd;
+
+	/* no need to do anything */
+	if (unlikely(!sou->defined))
+		return 0;
+
+	fifo_size = sizeof(*cmd);
+	cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	memset(cmd, 0, fifo_size);
+	cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
+	cmd->body.screenId = sou->base.unit;
+
+	vmw_fifo_commit(dev_priv, fifo_size);
+
+	/* Force sync */
+	ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
+	if (unlikely(ret != 0))
+		DRM_ERROR("Failed to sync with HW");
+	else
+		sou->defined = false;
+
+	return ret;
+}
+
+/**
+ * vmw_sou_crtc_mode_set_nofb - Create new screen
+ *
+ * @crtc: CRTC associated with the new screen
+ *
+ * This function creates/destroys a screen.  This function cannot fail, so if
+ * somehow we run into a failure, just do the best we can to get out.
+ */
+static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+	struct vmw_private *dev_priv;
+	struct vmw_screen_object_unit *sou;
+	struct vmw_framebuffer *vfb;
+	struct drm_framebuffer *fb;
+	struct drm_plane_state *ps;
+	struct vmw_plane_state *vps;
+	int ret;
+
+	sou = vmw_crtc_to_sou(crtc);
+	dev_priv = vmw_priv(crtc->dev);
+	ps = crtc->primary->state;
+	fb = ps->fb;
+	vps = vmw_plane_state_to_vps(ps);
+
+	vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
+
+	if (sou->defined) {
+		ret = vmw_sou_fifo_destroy(dev_priv, sou);
+		if (ret) {
+			DRM_ERROR("Failed to destroy Screen Object\n");
+			return;
+		}
+	}
+
+	if (vfb) {
+		struct drm_connector_state *conn_state;
+		struct vmw_connector_state *vmw_conn_state;
+		int x, y;
+
+		sou->buffer = vps->bo;
+		sou->buffer_size = vps->bo_size;
+
+		conn_state = sou->base.connector.state;
+		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+
+		x = vmw_conn_state->gui_x;
+		y = vmw_conn_state->gui_y;
+
+		ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode);
+		if (ret)
+			DRM_ERROR("Failed to define Screen Object %dx%d\n",
+				  crtc->x, crtc->y);
+
+	} else {
+		sou->buffer = NULL;
+		sou->buffer_size = 0;
+	}
+}
+
+/**
+ * vmw_sou_crtc_helper_prepare - Noop
+ *
+ * @crtc: CRTC associated with the new screen
+ *
+ * Prepares the CRTC for a mode set, but we don't need to do anything here.
+ */
+static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
+{
+}
+
+/**
+ * vmw_sou_crtc_atomic_enable - Noop
+ *
+ * @crtc: CRTC associated with the new screen
+ *
+ * This is called after a mode set has been completed.
+ */
+static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
+				       struct drm_crtc_state *old_state)
+{
+}
+
+/**
+ * vmw_sou_crtc_atomic_disable - Turns off CRTC
+ *
+ * @crtc: CRTC to be turned off
+ */
+static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
+					struct drm_crtc_state *old_state)
+{
+	struct vmw_private *dev_priv;
+	struct vmw_screen_object_unit *sou;
+	int ret;
+
+
+	if (!crtc) {
+		DRM_ERROR("CRTC is NULL\n");
+		return;
+	}
+
+	sou = vmw_crtc_to_sou(crtc);
+	dev_priv = vmw_priv(crtc->dev);
+
+	if (sou->defined) {
+		ret = vmw_sou_fifo_destroy(dev_priv, sou);
+		if (ret)
+			DRM_ERROR("Failed to destroy Screen Object\n");
+	}
+}
+
+static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
+	.gamma_set = vmw_du_crtc_gamma_set,
+	.destroy = vmw_sou_crtc_destroy,
+	.reset = vmw_du_crtc_reset,
+	.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
+	.atomic_destroy_state = vmw_du_crtc_destroy_state,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+};
+
+/*
+ * Screen Object Display Unit encoder functions
+ */
+
+static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
+{
+	vmw_sou_destroy(vmw_encoder_to_sou(encoder));
+}
+
+static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
+	.destroy = vmw_sou_encoder_destroy,
+};
+
+/*
+ * Screen Object Display Unit connector functions
+ */
+
+static void vmw_sou_connector_destroy(struct drm_connector *connector)
+{
+	vmw_sou_destroy(vmw_connector_to_sou(connector));
+}
+
+static const struct drm_connector_funcs vmw_sou_connector_funcs = {
+	.dpms = vmw_du_connector_dpms,
+	.detect = vmw_du_connector_detect,
+	.fill_modes = vmw_du_connector_fill_modes,
+	.destroy = vmw_sou_connector_destroy,
+	.reset = vmw_du_connector_reset,
+	.atomic_duplicate_state = vmw_du_connector_duplicate_state,
+	.atomic_destroy_state = vmw_du_connector_destroy_state,
+};
+
+
+static const struct
+drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
+};
+
+
+
+/*
+ * Screen Object Display Plane Functions
+ */
+
+/**
+ * vmw_sou_primary_plane_cleanup_fb - Frees sou backing buffer
+ *
+ * @plane:  display plane
+ * @old_state: Contains the FB to clean up
+ *
+ * Unpins the display surface
+ *
+ * Returns 0 on success
+ */
+static void
+vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
+				 struct drm_plane_state *old_state)
+{
+	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+	struct drm_crtc *crtc = plane->state->crtc ?
+		plane->state->crtc : old_state->crtc;
+
+	if (vps->bo)
+		vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
+	vmw_bo_unreference(&vps->bo);
+	vps->bo_size = 0;
+
+	vmw_du_plane_cleanup_fb(plane, old_state);
+}
+
+
+/**
+ * vmw_sou_primary_plane_prepare_fb - allocate backing buffer
+ *
+ * @plane:  display plane
+ * @new_state: info on the new plane state, including the FB
+ *
+ * The SOU backing buffer is our equivalent of the display plane.
+ *
+ * Returns 0 on success
+ */
+static int
+vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
+				 struct drm_plane_state *new_state)
+{
+	struct drm_framebuffer *new_fb = new_state->fb;
+	struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc;
+	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+	struct vmw_private *dev_priv;
+	size_t size;
+	int ret;
+
+
+	if (!new_fb) {
+		vmw_bo_unreference(&vps->bo);
+		vps->bo_size = 0;
+
+		return 0;
+	}
+
+	size = new_state->crtc_w * new_state->crtc_h * 4;
+	dev_priv = vmw_priv(crtc->dev);
+
+	if (vps->bo) {
+		if (vps->bo_size == size) {
+			/*
+			 * Note that this might temporarily up the pin-count
+			 * to 2, until cleanup_fb() is called.
+			 */
+			return vmw_bo_pin_in_vram(dev_priv, vps->bo,
+						      true);
+		}
+
+		vmw_bo_unreference(&vps->bo);
+		vps->bo_size = 0;
+	}
+
+	vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
+	if (!vps->bo)
+		return -ENOMEM;
+
+	vmw_svga_enable(dev_priv);
+
+	/* After we have alloced the backing store might not be able to
+	 * resume the overlays, this is preferred to failing to alloc.
+	 */
+	vmw_overlay_pause_all(dev_priv);
+	ret = vmw_bo_init(dev_priv, vps->bo, size,
+			      &vmw_vram_ne_placement,
+			      false, &vmw_bo_bo_free);
+	vmw_overlay_resume_all(dev_priv);
+	if (ret) {
+		vps->bo = NULL; /* vmw_bo_init frees on error */
+		return ret;
+	}
+
+	vps->bo_size = size;
+
+	/*
+	 * TTM already thinks the buffer is pinned, but make sure the
+	 * pin_count is upped.
+	 */
+	return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
+}
+
+static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update,
+				     uint32_t num_hits)
+{
+	return sizeof(struct vmw_kms_sou_define_gmrfb) +
+		sizeof(struct vmw_kms_sou_bo_blit) * num_hits;
+}
+
+static uint32_t vmw_sou_bo_define_gmrfb(struct vmw_du_update_plane *update,
+					void *cmd)
+{
+	struct vmw_framebuffer_bo *vfbbo =
+		container_of(update->vfb, typeof(*vfbbo), base);
+	struct vmw_kms_sou_define_gmrfb *gmr = cmd;
+	int depth = update->vfb->base.format->depth;
+
+	/* Emulate RGBA support, contrary to svga_reg.h this is not
+	 * supported by hosts. This is only a problem if we are reading
+	 * this value later and expecting what we uploaded back.
+	 */
+	if (depth == 32)
+		depth = 24;
+
+	gmr->header = SVGA_CMD_DEFINE_GMRFB;
+
+	gmr->body.format.bitsPerPixel = update->vfb->base.format->cpp[0] * 8;
+	gmr->body.format.colorDepth = depth;
+	gmr->body.format.reserved = 0;
+	gmr->body.bytesPerLine = update->vfb->base.pitches[0];
+	vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &gmr->body.ptr);
+
+	return sizeof(*gmr);
+}
+
+static uint32_t vmw_sou_bo_populate_clip(struct vmw_du_update_plane  *update,
+					 void *cmd, struct drm_rect *clip,
+					 uint32_t fb_x, uint32_t fb_y)
+{
+	struct vmw_kms_sou_bo_blit *blit = cmd;
+
+	blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+	blit->body.destScreenId = update->du->unit;
+	blit->body.srcOrigin.x = fb_x;
+	blit->body.srcOrigin.y = fb_y;
+	blit->body.destRect.left = clip->x1;
+	blit->body.destRect.top = clip->y1;
+	blit->body.destRect.right = clip->x2;
+	blit->body.destRect.bottom = clip->y2;
+
+	return sizeof(*blit);
+}
+
+static uint32_t vmw_stud_bo_post_clip(struct vmw_du_update_plane  *update,
+				      void *cmd, struct drm_rect *bb)
+{
+	return 0;
+}
+
+/**
+ * vmw_sou_plane_update_bo - Update display unit for bo backed fb.
+ * @dev_priv: Device private.
+ * @plane: Plane state.
+ * @old_state: Old plane state.
+ * @vfb: Framebuffer which is blitted to display unit.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ *             The returned fence pointer may be NULL in which case the device
+ *             has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_sou_plane_update_bo(struct vmw_private *dev_priv,
+				   struct drm_plane *plane,
+				   struct drm_plane_state *old_state,
+				   struct vmw_framebuffer *vfb,
+				   struct vmw_fence_obj **out_fence)
+{
+	struct vmw_du_update_plane_buffer bo_update;
+
+	memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer));
+	bo_update.base.plane = plane;
+	bo_update.base.old_state = old_state;
+	bo_update.base.dev_priv = dev_priv;
+	bo_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+	bo_update.base.vfb = vfb;
+	bo_update.base.out_fence = out_fence;
+	bo_update.base.mutex = NULL;
+	bo_update.base.cpu_blit = false;
+	bo_update.base.intr = true;
+
+	bo_update.base.calc_fifo_size = vmw_sou_bo_fifo_size;
+	bo_update.base.post_prepare = vmw_sou_bo_define_gmrfb;
+	bo_update.base.clip = vmw_sou_bo_populate_clip;
+	bo_update.base.post_clip = vmw_stud_bo_post_clip;
+
+	return vmw_du_helper_plane_update(&bo_update.base);
+}
+
+static uint32_t vmw_sou_surface_fifo_size(struct vmw_du_update_plane *update,
+					  uint32_t num_hits)
+{
+	return sizeof(struct vmw_kms_sou_dirty_cmd) + sizeof(SVGASignedRect) *
+		num_hits;
+}
+
+static uint32_t vmw_sou_surface_post_prepare(struct vmw_du_update_plane *update,
+					     void *cmd)
+{
+	struct vmw_du_update_plane_surface *srf_update;
+
+	srf_update = container_of(update, typeof(*srf_update), base);
+
+	/*
+	 * SOU SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN is special in the sense that
+	 * its bounding box is filled before iterating over all the clips. So
+	 * store the FIFO start address and revisit to fill the details.
+	 */
+	srf_update->cmd_start = cmd;
+
+	return 0;
+}
+
+static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
+					 void *cmd, uint32_t num_hits)
+{
+	struct vmw_kms_sou_dirty_cmd *blit = cmd;
+	struct vmw_framebuffer_surface *vfbs;
+
+	vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+	blit->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
+	blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) *
+		num_hits;
+
+	blit->body.srcImage.sid = vfbs->surface->res.id;
+	blit->body.destScreenId = update->du->unit;
+
+	/* Update the source and destination bounding box later in post_clip */
+	blit->body.srcRect.left = 0;
+	blit->body.srcRect.top = 0;
+	blit->body.srcRect.right = 0;
+	blit->body.srcRect.bottom = 0;
+
+	blit->body.destRect.left = 0;
+	blit->body.destRect.top = 0;
+	blit->body.destRect.right = 0;
+	blit->body.destRect.bottom = 0;
+
+	return sizeof(*blit);
+}
+
+static uint32_t vmw_sou_surface_clip_rect(struct vmw_du_update_plane *update,
+					  void *cmd, struct drm_rect *clip,
+					  uint32_t src_x, uint32_t src_y)
+{
+	SVGASignedRect *rect = cmd;
+
+	/*
+	 * rects are relative to dest bounding box rect on screen object, so
+	 * translate to it later in post_clip
+	 */
+	rect->left = clip->x1;
+	rect->top = clip->y1;
+	rect->right = clip->x2;
+	rect->bottom = clip->y2;
+
+	return sizeof(*rect);
+}
+
+static uint32_t vmw_sou_surface_post_clip(struct vmw_du_update_plane *update,
+					  void *cmd, struct drm_rect *bb)
+{
+	struct vmw_du_update_plane_surface *srf_update;
+	struct drm_plane_state *state = update->plane->state;
+	struct drm_rect src_bb;
+	struct vmw_kms_sou_dirty_cmd *blit;
+	SVGASignedRect *rect;
+	uint32_t num_hits;
+	int translate_src_x;
+	int translate_src_y;
+	int i;
+
+	srf_update = container_of(update, typeof(*srf_update), base);
+
+	blit = srf_update->cmd_start;
+	rect = (SVGASignedRect *)&blit[1];
+
+	num_hits = (blit->header.size - sizeof(blit->body))/
+		sizeof(SVGASignedRect);
+
+	src_bb = *bb;
+
+	/* To translate bb back to fb src coord */
+	translate_src_x = (state->src_x >> 16) - state->crtc_x;
+	translate_src_y = (state->src_y >> 16) - state->crtc_y;
+
+	drm_rect_translate(&src_bb, translate_src_x, translate_src_y);
+
+	blit->body.srcRect.left = src_bb.x1;
+	blit->body.srcRect.top = src_bb.y1;
+	blit->body.srcRect.right = src_bb.x2;
+	blit->body.srcRect.bottom = src_bb.y2;
+
+	blit->body.destRect.left = bb->x1;
+	blit->body.destRect.top = bb->y1;
+	blit->body.destRect.right = bb->x2;
+	blit->body.destRect.bottom = bb->y2;
+
+	/* rects are relative to dest bb rect */
+	for (i = 0; i < num_hits; i++) {
+		rect->left -= bb->x1;
+		rect->top -= bb->y1;
+		rect->right -= bb->x1;
+		rect->bottom -= bb->y1;
+		rect++;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_sou_plane_update_surface - Update display unit for surface backed fb.
+ * @dev_priv: Device private.
+ * @plane: Plane state.
+ * @old_state: Old plane state.
+ * @vfb: Framebuffer which is blitted to display unit
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ *             The returned fence pointer may be NULL in which case the device
+ *             has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_sou_plane_update_surface(struct vmw_private *dev_priv,
+					struct drm_plane *plane,
+					struct drm_plane_state *old_state,
+					struct vmw_framebuffer *vfb,
+					struct vmw_fence_obj **out_fence)
+{
+	struct vmw_du_update_plane_surface srf_update;
+
+	memset(&srf_update, 0, sizeof(struct vmw_du_update_plane_surface));
+	srf_update.base.plane = plane;
+	srf_update.base.old_state = old_state;
+	srf_update.base.dev_priv = dev_priv;
+	srf_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+	srf_update.base.vfb = vfb;
+	srf_update.base.out_fence = out_fence;
+	srf_update.base.mutex = &dev_priv->cmdbuf_mutex;
+	srf_update.base.cpu_blit = false;
+	srf_update.base.intr = true;
+
+	srf_update.base.calc_fifo_size = vmw_sou_surface_fifo_size;
+	srf_update.base.post_prepare = vmw_sou_surface_post_prepare;
+	srf_update.base.pre_clip = vmw_sou_surface_pre_clip;
+	srf_update.base.clip = vmw_sou_surface_clip_rect;
+	srf_update.base.post_clip = vmw_sou_surface_post_clip;
+
+	return vmw_du_helper_plane_update(&srf_update.base);
+}
+
+static void
+vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
+				    struct drm_plane_state *old_state)
+{
+	struct drm_crtc *crtc = plane->state->crtc;
+	struct drm_pending_vblank_event *event = NULL;
+	struct vmw_fence_obj *fence = NULL;
+	int ret;
+
+	/* In case of device error, maintain consistent atomic state */
+	if (crtc && plane->state->fb) {
+		struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+		struct vmw_framebuffer *vfb =
+			vmw_framebuffer_to_vfb(plane->state->fb);
+
+		if (vfb->bo)
+			ret = vmw_sou_plane_update_bo(dev_priv, plane,
+						      old_state, vfb, &fence);
+		else
+			ret = vmw_sou_plane_update_surface(dev_priv, plane,
+							   old_state, vfb,
+							   &fence);
+		if (ret != 0)
+			DRM_ERROR("Failed to update screen.\n");
+	} else {
+		/* Do nothing when fb and crtc is NULL (blank crtc) */
+		return;
+	}
+
+	/* For error case vblank event is send from vmw_du_crtc_atomic_flush */
+	event = crtc->state->event;
+	if (event && fence) {
+		struct drm_file *file_priv = event->base.file_priv;
+
+		ret = vmw_event_fence_action_queue(file_priv,
+						   fence,
+						   &event->base,
+						   &event->event.vbl.tv_sec,
+						   &event->event.vbl.tv_usec,
+						   true);
+
+		if (unlikely(ret != 0))
+			DRM_ERROR("Failed to queue event on fence.\n");
+		else
+			crtc->state->event = NULL;
+	}
+
+	if (fence)
+		vmw_fence_obj_unreference(&fence);
+}
+
+
+static const struct drm_plane_funcs vmw_sou_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = vmw_du_primary_plane_destroy,
+	.reset = vmw_du_plane_reset,
+	.atomic_duplicate_state = vmw_du_plane_duplicate_state,
+	.atomic_destroy_state = vmw_du_plane_destroy_state,
+};
+
+static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = vmw_du_cursor_plane_destroy,
+	.reset = vmw_du_plane_reset,
+	.atomic_duplicate_state = vmw_du_plane_duplicate_state,
+	.atomic_destroy_state = vmw_du_plane_destroy_state,
+};
+
+/*
+ * Atomic Helpers
+ */
+static const struct
+drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
+	.atomic_check = vmw_du_cursor_plane_atomic_check,
+	.atomic_update = vmw_du_cursor_plane_atomic_update,
+	.prepare_fb = vmw_du_cursor_plane_prepare_fb,
+	.cleanup_fb = vmw_du_plane_cleanup_fb,
+};
+
+static const struct
+drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = {
+	.atomic_check = vmw_du_primary_plane_atomic_check,
+	.atomic_update = vmw_sou_primary_plane_atomic_update,
+	.prepare_fb = vmw_sou_primary_plane_prepare_fb,
+	.cleanup_fb = vmw_sou_primary_plane_cleanup_fb,
+};
+
+static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
+	.prepare = vmw_sou_crtc_helper_prepare,
+	.mode_set_nofb = vmw_sou_crtc_mode_set_nofb,
+	.atomic_check = vmw_du_crtc_atomic_check,
+	.atomic_begin = vmw_du_crtc_atomic_begin,
+	.atomic_flush = vmw_du_crtc_atomic_flush,
+	.atomic_enable = vmw_sou_crtc_atomic_enable,
+	.atomic_disable = vmw_sou_crtc_atomic_disable,
+};
+
+
+static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
+{
+	struct vmw_screen_object_unit *sou;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_plane *primary, *cursor;
+	struct drm_crtc *crtc;
+	int ret;
+
+	sou = kzalloc(sizeof(*sou), GFP_KERNEL);
+	if (!sou)
+		return -ENOMEM;
+
+	sou->base.unit = unit;
+	crtc = &sou->base.crtc;
+	encoder = &sou->base.encoder;
+	connector = &sou->base.connector;
+	primary = &sou->base.primary;
+	cursor = &sou->base.cursor;
+
+	sou->base.pref_active = (unit == 0);
+	sou->base.pref_width = dev_priv->initial_width;
+	sou->base.pref_height = dev_priv->initial_height;
+	sou->base.pref_mode = NULL;
+
+	/*
+	 * Remove this after enabling atomic because property values can
+	 * only exist in a state object
+	 */
+	sou->base.is_implicit = false;
+
+	/* Initialize primary plane */
+	vmw_du_plane_reset(primary);
+
+	ret = drm_universal_plane_init(dev, &sou->base.primary,
+				       0, &vmw_sou_plane_funcs,
+				       vmw_primary_plane_formats,
+				       ARRAY_SIZE(vmw_primary_plane_formats),
+				       NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize primary plane");
+		goto err_free;
+	}
+
+	drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
+	drm_plane_enable_fb_damage_clips(primary);
+
+	/* Initialize cursor plane */
+	vmw_du_plane_reset(cursor);
+
+	ret = drm_universal_plane_init(dev, &sou->base.cursor,
+			0, &vmw_sou_cursor_funcs,
+			vmw_cursor_plane_formats,
+			ARRAY_SIZE(vmw_cursor_plane_formats),
+			NULL, DRM_PLANE_TYPE_CURSOR, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize cursor plane");
+		drm_plane_cleanup(&sou->base.primary);
+		goto err_free;
+	}
+
+	drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs);
+
+	vmw_du_connector_reset(connector);
+	ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
+				 DRM_MODE_CONNECTOR_VIRTUAL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize connector\n");
+		goto err_free;
+	}
+
+	drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
+	connector->status = vmw_du_connector_detect(connector, true);
+
+	ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
+			       DRM_MODE_ENCODER_VIRTUAL, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize encoder\n");
+		goto err_free_connector;
+	}
+
+	(void) drm_connector_attach_encoder(connector, encoder);
+	encoder->possible_crtcs = (1 << unit);
+	encoder->possible_clones = 0;
+
+	ret = drm_connector_register(connector);
+	if (ret) {
+		DRM_ERROR("Failed to register connector\n");
+		goto err_free_encoder;
+	}
+
+
+	vmw_du_crtc_reset(crtc);
+	ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary,
+					&sou->base.cursor,
+					&vmw_screen_object_crtc_funcs, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize CRTC\n");
+		goto err_free_unregister;
+	}
+
+	drm_crtc_helper_add(crtc, &vmw_sou_crtc_helper_funcs);
+
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+
+	drm_object_attach_property(&connector->base,
+				   dev_priv->hotplug_mode_update_property, 1);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_x_property, 0);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_y_property, 0);
+	return 0;
+
+err_free_unregister:
+	drm_connector_unregister(connector);
+err_free_encoder:
+	drm_encoder_cleanup(encoder);
+err_free_connector:
+	drm_connector_cleanup(connector);
+err_free:
+	kfree(sou);
+	return ret;
+}
+
+int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int i, ret;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
+		DRM_INFO("Not using screen objects,"
+			 " missing cap SCREEN_OBJECT_2\n");
+		return -ENOSYS;
+	}
+
+	ret = -ENOMEM;
+
+	ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+	if (unlikely(ret != 0))
+		return ret;
+
+	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
+		vmw_sou_init(dev_priv, i);
+
+	dev_priv->active_display_unit = vmw_du_screen_object;
+
+	DRM_INFO("Screen Objects Display Unit initialized\n");
+
+	return 0;
+}
+
+static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
+				  struct vmw_framebuffer *framebuffer)
+{
+	struct vmw_buffer_object *buf =
+		container_of(framebuffer, struct vmw_framebuffer_bo,
+			     base)->buffer;
+	int depth = framebuffer->base.format->depth;
+	struct {
+		uint32_t header;
+		SVGAFifoCmdDefineGMRFB body;
+	} *cmd;
+
+	/* Emulate RGBA support, contrary to svga_reg.h this is not
+	 * supported by hosts. This is only a problem if we are reading
+	 * this value later and expecting what we uploaded back.
+	 */
+	if (depth == 32)
+		depth = 24;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->header = SVGA_CMD_DEFINE_GMRFB;
+	cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8;
+	cmd->body.format.colorDepth = depth;
+	cmd->body.format.reserved = 0;
+	cmd->body.bytesPerLine = framebuffer->base.pitches[0];
+	/* Buffer is reserved in vram or GMR */
+	vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
+ * blit surface to screen command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in the command, and translates the cliprects
+ * to match the destination bounding box encoded.
+ */
+static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_kms_sou_surface_dirty *sdirty =
+		container_of(dirty, typeof(*sdirty), base);
+	struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
+	s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
+	s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
+	size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
+	SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
+	int i;
+
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
+	cmd->header.size = sizeof(cmd->body) + region_size;
+
+	/*
+	 * Use the destination bounding box to specify destination - and
+	 * source bounding regions.
+	 */
+	cmd->body.destRect.left = sdirty->left;
+	cmd->body.destRect.right = sdirty->right;
+	cmd->body.destRect.top = sdirty->top;
+	cmd->body.destRect.bottom = sdirty->bottom;
+
+	cmd->body.srcRect.left = sdirty->left + trans_x;
+	cmd->body.srcRect.right = sdirty->right + trans_x;
+	cmd->body.srcRect.top = sdirty->top + trans_y;
+	cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
+
+	cmd->body.srcImage.sid = sdirty->sid;
+	cmd->body.destScreenId = dirty->unit->unit;
+
+	/* Blits are relative to the destination rect. Translate. */
+	for (i = 0; i < dirty->num_hits; ++i, ++blit) {
+		blit->left -= sdirty->left;
+		blit->right -= sdirty->left;
+		blit->top -= sdirty->top;
+		blit->bottom -= sdirty->top;
+	}
+
+	vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
+
+	sdirty->left = sdirty->top = S32_MAX;
+	sdirty->right = sdirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a SVGASignedRect cliprect and updates the bounding box of the
+ * BLIT_SURFACE_TO_SCREEN command.
+ */
+static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_kms_sou_surface_dirty *sdirty =
+		container_of(dirty, typeof(*sdirty), base);
+	struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
+	SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
+
+	/* Destination rect. */
+	blit += dirty->num_hits;
+	blit->left = dirty->unit_x1;
+	blit->top = dirty->unit_y1;
+	blit->right = dirty->unit_x2;
+	blit->bottom = dirty->unit_y2;
+
+	/* Destination bounding box */
+	sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
+	sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
+	sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
+	sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
+
+	dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the surface-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @srf: Pointer to surface to blit from. If NULL, the surface attached
+ * to @framebuffer will be used.
+ * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
+ * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
+ * @num_clips: Number of clip rects in @clips.
+ * @inc: Increment to use when looping over @clips.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ * @crtc: If crtc is passed, perform surface dirty on that crtc only.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
+				 struct vmw_framebuffer *framebuffer,
+				 struct drm_clip_rect *clips,
+				 struct drm_vmw_rect *vclips,
+				 struct vmw_resource *srf,
+				 s32 dest_x,
+				 s32 dest_y,
+				 unsigned num_clips, int inc,
+				 struct vmw_fence_obj **out_fence,
+				 struct drm_crtc *crtc)
+{
+	struct vmw_framebuffer_surface *vfbs =
+		container_of(framebuffer, typeof(*vfbs), base);
+	struct vmw_kms_sou_surface_dirty sdirty;
+	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
+	int ret;
+
+	if (!srf)
+		srf = &vfbs->surface->res;
+
+	ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
+					  NULL, NULL);
+	if (ret)
+		return ret;
+
+	ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
+	if (ret)
+		goto out_unref;
+
+	sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
+	sdirty.base.clip = vmw_sou_surface_clip;
+	sdirty.base.dev_priv = dev_priv;
+	sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
+	  sizeof(SVGASignedRect) * num_clips;
+	sdirty.base.crtc = crtc;
+
+	sdirty.sid = srf->id;
+	sdirty.left = sdirty.top = S32_MAX;
+	sdirty.right = sdirty.bottom = S32_MIN;
+	sdirty.dst_x = dest_x;
+	sdirty.dst_y = dest_y;
+
+	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+				   dest_x, dest_y, num_clips, inc,
+				   &sdirty.base);
+	vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
+					 NULL);
+
+	return ret;
+
+out_unref:
+	vmw_validation_unref_lists(&val_ctx);
+	return ret;
+}
+
+/**
+ * vmw_sou_bo_fifo_commit - Callback to submit a set of readback clips.
+ *
+ * @dirty: The closure structure.
+ *
+ * Commits a previously built command buffer of readback clips.
+ */
+static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
+	vmw_fifo_commit(dirty->dev_priv,
+			sizeof(struct vmw_kms_sou_bo_blit) *
+			dirty->num_hits);
+}
+
+/**
+ * vmw_sou_bo_clip - Callback to encode a readback cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
+ */
+static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_kms_sou_bo_blit *blit = dirty->cmd;
+
+	blit += dirty->num_hits;
+	blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+	blit->body.destScreenId = dirty->unit->unit;
+	blit->body.srcOrigin.x = dirty->fb_x;
+	blit->body.srcOrigin.y = dirty->fb_y;
+	blit->body.destRect.left = dirty->unit_x1;
+	blit->body.destRect.top = dirty->unit_y1;
+	blit->body.destRect.right = dirty->unit_x2;
+	blit->body.destRect.bottom = dirty->unit_y2;
+	dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the buffer-object backed framebuffer.
+ * @clips: Array of clip rects.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @num_clips: Number of clip rects in @clips.
+ * @increment: Increment to use when looping over @clips.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ * @crtc: If crtc is passed, perform bo dirty on that crtc only.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
+				struct vmw_framebuffer *framebuffer,
+				struct drm_clip_rect *clips,
+				struct drm_vmw_rect *vclips,
+				unsigned num_clips, int increment,
+				bool interruptible,
+				struct vmw_fence_obj **out_fence,
+				struct drm_crtc *crtc)
+{
+	struct vmw_buffer_object *buf =
+		container_of(framebuffer, struct vmw_framebuffer_bo,
+			     base)->buffer;
+	struct vmw_kms_dirty dirty;
+	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
+	int ret;
+
+	ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
+	if (ret)
+		return ret;
+
+	ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
+	if (ret)
+		goto out_unref;
+
+	ret = do_bo_define_gmrfb(dev_priv, framebuffer);
+	if (unlikely(ret != 0))
+		goto out_revert;
+
+	dirty.crtc = crtc;
+	dirty.fifo_commit = vmw_sou_bo_fifo_commit;
+	dirty.clip = vmw_sou_bo_clip;
+	dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_bo_blit) *
+		num_clips;
+	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+				   0, 0, num_clips, increment, &dirty);
+	vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
+					 NULL);
+
+	return ret;
+
+out_revert:
+	vmw_validation_revert(&val_ctx);
+out_unref:
+	vmw_validation_unref_lists(&val_ctx);
+
+	return ret;
+}
+
+
+/**
+ * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
+ *
+ * @dirty: The closure structure.
+ *
+ * Commits a previously built command buffer of readback clips.
+ */
+static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
+	vmw_fifo_commit(dirty->dev_priv,
+			sizeof(struct vmw_kms_sou_readback_blit) *
+			dirty->num_hits);
+}
+
+/**
+ * vmw_sou_readback_clip - Callback to encode a readback cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
+ */
+static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
+
+	blit += dirty->num_hits;
+	blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
+	blit->body.srcScreenId = dirty->unit->unit;
+	blit->body.destOrigin.x = dirty->fb_x;
+	blit->body.destOrigin.y = dirty->fb_y;
+	blit->body.srcRect.left = dirty->unit_x1;
+	blit->body.srcRect.top = dirty->unit_y1;
+	blit->body.srcRect.right = dirty->unit_x2;
+	blit->body.srcRect.bottom = dirty->unit_y2;
+	dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_sou_readback - Perform a readback from the screen object system to
+ * a buffer-object backed framebuffer.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * Must be set to NULL if @user_fence_rep is NULL.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
+ * Must be set to non-NULL if @file_priv is non-NULL.
+ * @vclips: Array of clip rects.
+ * @num_clips: Number of clip rects in @vclips.
+ * @crtc: If crtc is passed, readback on that crtc only.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_readback(struct vmw_private *dev_priv,
+			 struct drm_file *file_priv,
+			 struct vmw_framebuffer *vfb,
+			 struct drm_vmw_fence_rep __user *user_fence_rep,
+			 struct drm_vmw_rect *vclips,
+			 uint32_t num_clips,
+			 struct drm_crtc *crtc)
+{
+	struct vmw_buffer_object *buf =
+		container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
+	struct vmw_kms_dirty dirty;
+	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
+	int ret;
+
+	ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
+	if (ret)
+		return ret;
+
+	ret = vmw_validation_prepare(&val_ctx, NULL, true);
+	if (ret)
+		goto out_unref;
+
+	ret = do_bo_define_gmrfb(dev_priv, vfb);
+	if (unlikely(ret != 0))
+		goto out_revert;
+
+	dirty.crtc = crtc;
+	dirty.fifo_commit = vmw_sou_readback_fifo_commit;
+	dirty.clip = vmw_sou_readback_clip;
+	dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
+		num_clips;
+	ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
+				   0, 0, num_clips, 1, &dirty);
+	vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
+					 user_fence_rep);
+
+	return ret;
+
+out_revert:
+	vmw_validation_revert(&val_ctx);
+out_unref:
+	vmw_validation_unref_lists(&val_ctx);
+	
+	return ret;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
new file mode 100644
index 0000000..e139fdf
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -0,0 +1,1054 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
+
+struct vmw_shader {
+	struct vmw_resource res;
+	SVGA3dShaderType type;
+	uint32_t size;
+	uint8_t num_input_sig;
+	uint8_t num_output_sig;
+};
+
+struct vmw_user_shader {
+	struct ttm_base_object base;
+	struct vmw_shader shader;
+};
+
+struct vmw_dx_shader {
+	struct vmw_resource res;
+	struct vmw_resource *ctx;
+	struct vmw_resource *cotable;
+	u32 id;
+	bool committed;
+	struct list_head cotable_head;
+};
+
+static uint64_t vmw_user_shader_size;
+static uint64_t vmw_shader_size;
+static size_t vmw_shader_dx_size;
+
+static void vmw_user_shader_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_shader_base_to_res(struct ttm_base_object *base);
+
+static int vmw_gb_shader_create(struct vmw_resource *res);
+static int vmw_gb_shader_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_gb_shader_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_gb_shader_destroy(struct vmw_resource *res);
+
+static int vmw_dx_shader_create(struct vmw_resource *res);
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+					enum vmw_cmdbuf_res_state state);
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
+static uint64_t vmw_user_shader_size;
+
+static const struct vmw_user_resource_conv user_shader_conv = {
+	.object_type = VMW_RES_SHADER,
+	.base_obj_to_res = vmw_user_shader_base_to_res,
+	.res_free = vmw_user_shader_free
+};
+
+const struct vmw_user_resource_conv *user_shader_converter =
+	&user_shader_conv;
+
+
+static const struct vmw_res_func vmw_gb_shader_func = {
+	.res_type = vmw_res_shader,
+	.needs_backup = true,
+	.may_evict = true,
+	.prio = 3,
+	.dirty_prio = 3,
+	.type_name = "guest backed shaders",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_gb_shader_create,
+	.destroy = vmw_gb_shader_destroy,
+	.bind = vmw_gb_shader_bind,
+	.unbind = vmw_gb_shader_unbind
+};
+
+static const struct vmw_res_func vmw_dx_shader_func = {
+	.res_type = vmw_res_shader,
+	.needs_backup = true,
+	.may_evict = true,
+	.prio = 3,
+	.dirty_prio = 3,
+	.type_name = "dx shaders",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_dx_shader_create,
+	/*
+	 * The destroy callback is only called with a committed resource on
+	 * context destroy, in which case we destroy the cotable anyway,
+	 * so there's no need to destroy DX shaders separately.
+	 */
+	.destroy = NULL,
+	.bind = vmw_dx_shader_bind,
+	.unbind = vmw_dx_shader_unbind,
+	.commit_notify = vmw_dx_shader_commit_notify,
+};
+
+/**
+ * Shader management:
+ */
+
+static inline struct vmw_shader *
+vmw_res_to_shader(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_shader, res);
+}
+
+/**
+ * vmw_res_to_dx_shader - typecast a struct vmw_resource to a
+ * struct vmw_dx_shader
+ *
+ * @res: Pointer to the struct vmw_resource.
+ */
+static inline struct vmw_dx_shader *
+vmw_res_to_dx_shader(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_dx_shader, res);
+}
+
+static void vmw_hw_shader_destroy(struct vmw_resource *res)
+{
+	if (likely(res->func->destroy))
+		(void) res->func->destroy(res);
+	else
+		res->id = -1;
+}
+
+
+static int vmw_gb_shader_init(struct vmw_private *dev_priv,
+			      struct vmw_resource *res,
+			      uint32_t size,
+			      uint64_t offset,
+			      SVGA3dShaderType type,
+			      uint8_t num_input_sig,
+			      uint8_t num_output_sig,
+			      struct vmw_buffer_object *byte_code,
+			      void (*res_free) (struct vmw_resource *res))
+{
+	struct vmw_shader *shader = vmw_res_to_shader(res);
+	int ret;
+
+	ret = vmw_resource_init(dev_priv, res, true, res_free,
+				&vmw_gb_shader_func);
+
+	if (unlikely(ret != 0)) {
+		if (res_free)
+			res_free(res);
+		else
+			kfree(res);
+		return ret;
+	}
+
+	res->backup_size = size;
+	if (byte_code) {
+		res->backup = vmw_bo_reference(byte_code);
+		res->backup_offset = offset;
+	}
+	shader->size = size;
+	shader->type = type;
+	shader->num_input_sig = num_input_sig;
+	shader->num_output_sig = num_output_sig;
+
+	res->hw_destroy = vmw_hw_shader_destroy;
+	return 0;
+}
+
+/*
+ * GB shader code:
+ */
+
+static int vmw_gb_shader_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_shader *shader = vmw_res_to_shader(res);
+	int ret;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBShader body;
+	} *cmd;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a shader id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.shid = res->id;
+	cmd->body.type = shader->type;
+	cmd->body.sizeInBytes = shader->size;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_fifo_resource_inc(dev_priv);
+
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	return ret;
+}
+
+static int vmw_gb_shader_bind(struct vmw_resource *res,
+			      struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBShader body;
+	} *cmd;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.shid = res->id;
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.offsetInBytes = res->backup_offset;
+	res->backup_dirty = false;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+static int vmw_gb_shader_unbind(struct vmw_resource *res,
+				bool readback,
+				struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBShader body;
+	} *cmd;
+	struct vmw_fence_obj *fence;
+
+	BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.shid = res->id;
+	cmd->body.mobid = SVGA3D_INVALID_ID;
+	cmd->body.offsetInBytes = 0;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_bo_fence_single(val_buf->bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+static int vmw_gb_shader_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyGBShader body;
+	} *cmd;
+
+	if (likely(res->id == -1))
+		return 0;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_binding_res_list_scrub(&res->binding_head);
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.shid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	mutex_unlock(&dev_priv->binding_mutex);
+	vmw_resource_release_id(res);
+	vmw_fifo_resource_dec(dev_priv);
+
+	return 0;
+}
+
+/*
+ * DX shader code:
+ */
+
+/**
+ * vmw_dx_shader_commit_notify - Notify that a shader operation has been
+ * committed to hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the shader resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+					enum vmw_cmdbuf_res_state state)
+{
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	if (state == VMW_CMDBUF_RES_ADD) {
+		mutex_lock(&dev_priv->binding_mutex);
+		vmw_cotable_add_resource(shader->cotable,
+					 &shader->cotable_head);
+		shader->committed = true;
+		res->id = shader->id;
+		mutex_unlock(&dev_priv->binding_mutex);
+	} else {
+		mutex_lock(&dev_priv->binding_mutex);
+		list_del_init(&shader->cotable_head);
+		shader->committed = false;
+		res->id = -1;
+		mutex_unlock(&dev_priv->binding_mutex);
+	}
+}
+
+/**
+ * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function reverts a scrub operation.
+ */
+static int vmw_dx_shader_unscrub(struct vmw_resource *res)
+{
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindShader body;
+	} *cmd;
+
+	if (!list_empty(&shader->cotable_head) || !shader->committed)
+		return 0;
+
+	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), shader->ctx->id);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = shader->ctx->id;
+	cmd->body.shid = shader->id;
+	cmd->body.mobid = res->backup->base.mem.start;
+	cmd->body.offsetInBytes = res->backup_offset;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_create - The DX shader create callback
+ *
+ * @res: The DX shader resource
+ *
+ * The create callback is called as part of resource validation and
+ * makes sure that we unscrub the shader if it's previously been scrubbed.
+ */
+static int vmw_dx_shader_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	int ret = 0;
+
+	WARN_ON_ONCE(!shader->committed);
+
+	if (vmw_resource_mob_attached(res)) {
+		mutex_lock(&dev_priv->binding_mutex);
+		ret = vmw_dx_shader_unscrub(res);
+		mutex_unlock(&dev_priv->binding_mutex);
+	}
+
+	res->id = shader->id;
+	return ret;
+}
+
+/**
+ * vmw_dx_shader_bind - The DX shader bind callback
+ *
+ * @res: The DX shader resource
+ * @val_buf: Pointer to the validate buffer.
+ *
+ */
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+			      struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_dx_shader_unscrub(res);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function unbinds a MOB from the DX shader without requiring the
+ * MOB dma_buffer to be reserved. The driver still considers the MOB bound.
+ * However, once the driver eventually decides to unbind the MOB, it doesn't
+ * need to access the context.
+ */
+static int vmw_dx_shader_scrub(struct vmw_resource *res)
+{
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindShader body;
+	} *cmd;
+
+	if (list_empty(&shader->cotable_head))
+		return 0;
+
+	WARN_ON_ONCE(!shader->committed);
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = shader->ctx->id;
+	cmd->body.shid = res->id;
+	cmd->body.mobid = SVGA3D_INVALID_ID;
+	cmd->body.offsetInBytes = 0;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	res->id = -1;
+	list_del_init(&shader->cotable_head);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_unbind - The dx shader unbind callback.
+ *
+ * @res: The shader resource
+ * @readback: Whether this is a readback unbind. Currently unused.
+ * @val_buf: MOB buffer information.
+ */
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+				bool readback,
+				struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_fence_obj *fence;
+	int ret;
+
+	BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	ret = vmw_dx_shader_scrub(res);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	if (ret)
+		return ret;
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+	vmw_bo_fence_single(val_buf->bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
+ * DX shaders.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: The list of cotable resources.
+ * @readback: Whether the call was part of a readback unbind.
+ *
+ * Scrubs all shader MOBs so that any subsequent shader unbind or shader
+ * destroy operation won't need to swap in the context.
+ */
+void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+				      struct list_head *list,
+				      bool readback)
+{
+	struct vmw_dx_shader *entry, *next;
+
+	lockdep_assert_held_once(&dev_priv->binding_mutex);
+
+	list_for_each_entry_safe(entry, next, list, cotable_head) {
+		WARN_ON(vmw_dx_shader_scrub(&entry->res));
+		if (!readback)
+			entry->committed = false;
+	}
+}
+
+/**
+ * vmw_dx_shader_res_free - The DX shader free callback
+ *
+ * @res: The shader resource
+ *
+ * Frees the DX shader resource and updates memory accounting.
+ */
+static void vmw_dx_shader_res_free(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+
+	vmw_resource_unreference(&shader->cotable);
+	kfree(shader);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+}
+
+/**
+ * vmw_dx_shader_add - Add a shader resource as a command buffer managed
+ * resource.
+ *
+ * @man: The command buffer resource manager.
+ * @ctx: Pointer to the context resource.
+ * @user_key: The id used for this shader.
+ * @shader_type: The shader type.
+ * @list: The list of staged command buffer managed resources.
+ */
+int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+		      struct vmw_resource *ctx,
+		      u32 user_key,
+		      SVGA3dShaderType shader_type,
+		      struct list_head *list)
+{
+	struct vmw_dx_shader *shader;
+	struct vmw_resource *res;
+	struct vmw_private *dev_priv = ctx->dev_priv;
+	struct ttm_operation_ctx ttm_opt_ctx = {
+		.interruptible = true,
+		.no_wait_gpu = false
+	};
+	int ret;
+
+	if (!vmw_shader_dx_size)
+		vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
+
+	if (!vmw_shader_id_ok(user_key, shader_type))
+		return -EINVAL;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
+				   &ttm_opt_ctx);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for shader "
+				  "creation.\n");
+		return ret;
+	}
+
+	shader = kmalloc(sizeof(*shader), GFP_KERNEL);
+	if (!shader) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+		return -ENOMEM;
+	}
+
+	res = &shader->res;
+	shader->ctx = ctx;
+	shader->cotable = vmw_resource_reference
+		(vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER));
+	shader->id = user_key;
+	shader->committed = false;
+	INIT_LIST_HEAD(&shader->cotable_head);
+	ret = vmw_resource_init(dev_priv, res, true,
+				vmw_dx_shader_res_free, &vmw_dx_shader_func);
+	if (ret)
+		goto out_resource_init;
+
+	/*
+	 * The user_key name-space is not per shader type for DX shaders,
+	 * so when hashing, use a single zero shader type.
+	 */
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+				 vmw_shader_key(user_key, 0),
+				 res, list);
+	if (ret)
+		goto out_resource_init;
+
+	res->id = shader->id;
+	res->hw_destroy = vmw_hw_shader_destroy;
+
+out_resource_init:
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+
+
+/**
+ * User-space shader management:
+ */
+
+static struct vmw_resource *
+vmw_user_shader_base_to_res(struct ttm_base_object *base)
+{
+	return &(container_of(base, struct vmw_user_shader, base)->
+		 shader.res);
+}
+
+static void vmw_user_shader_free(struct vmw_resource *res)
+{
+	struct vmw_user_shader *ushader =
+		container_of(res, struct vmw_user_shader, shader.res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	ttm_base_object_kfree(ushader, base);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv),
+			    vmw_user_shader_size);
+}
+
+static void vmw_shader_free(struct vmw_resource *res)
+{
+	struct vmw_shader *shader = vmw_res_to_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	kfree(shader);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv),
+			    vmw_shader_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct vmw_resource *res = vmw_user_shader_base_to_res(base);
+
+	*p_base = NULL;
+	vmw_resource_unreference(&res);
+}
+
+int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+	return ttm_ref_object_base_unref(tfile, arg->handle,
+					 TTM_REF_USAGE);
+}
+
+static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
+				 struct vmw_buffer_object *buffer,
+				 size_t shader_size,
+				 size_t offset,
+				 SVGA3dShaderType shader_type,
+				 uint8_t num_input_sig,
+				 uint8_t num_output_sig,
+				 struct ttm_object_file *tfile,
+				 u32 *handle)
+{
+	struct vmw_user_shader *ushader;
+	struct vmw_resource *res, *tmp;
+	struct ttm_operation_ctx ctx = {
+		.interruptible = true,
+		.no_wait_gpu = false
+	};
+	int ret;
+
+	if (unlikely(vmw_user_shader_size == 0))
+		vmw_user_shader_size =
+			ttm_round_pot(sizeof(struct vmw_user_shader)) +
+			VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   vmw_user_shader_size,
+				   &ctx);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for shader "
+				  "creation.\n");
+		goto out;
+	}
+
+	ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
+	if (unlikely(!ushader)) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    vmw_user_shader_size);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	res = &ushader->shader.res;
+	ushader->base.shareable = false;
+	ushader->base.tfile = NULL;
+
+	/*
+	 * From here on, the destructor takes over resource freeing.
+	 */
+
+	ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+				 offset, shader_type, num_input_sig,
+				 num_output_sig, buffer,
+				 vmw_user_shader_free);
+	if (unlikely(ret != 0))
+		goto out;
+
+	tmp = vmw_resource_reference(res);
+	ret = ttm_base_object_init(tfile, &ushader->base, false,
+				   VMW_RES_SHADER,
+				   &vmw_user_shader_base_release, NULL);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&tmp);
+		goto out_err;
+	}
+
+	if (handle)
+		*handle = ushader->base.handle;
+out_err:
+	vmw_resource_unreference(&res);
+out:
+	return ret;
+}
+
+
+static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
+					     struct vmw_buffer_object *buffer,
+					     size_t shader_size,
+					     size_t offset,
+					     SVGA3dShaderType shader_type)
+{
+	struct vmw_shader *shader;
+	struct vmw_resource *res;
+	struct ttm_operation_ctx ctx = {
+		.interruptible = true,
+		.no_wait_gpu = false
+	};
+	int ret;
+
+	if (unlikely(vmw_shader_size == 0))
+		vmw_shader_size =
+			ttm_round_pot(sizeof(struct vmw_shader)) +
+			VMW_IDA_ACC_SIZE;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   vmw_shader_size,
+				   &ctx);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for shader "
+				  "creation.\n");
+		goto out_err;
+	}
+
+	shader = kzalloc(sizeof(*shader), GFP_KERNEL);
+	if (unlikely(!shader)) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    vmw_shader_size);
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	res = &shader->res;
+
+	/*
+	 * From here on, the destructor takes over resource freeing.
+	 */
+	ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+				 offset, shader_type, 0, 0, buffer,
+				 vmw_shader_free);
+
+out_err:
+	return ret ? ERR_PTR(ret) : res;
+}
+
+
+static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
+			     enum drm_vmw_shader_type shader_type_drm,
+			     u32 buffer_handle, size_t size, size_t offset,
+			     uint8_t num_input_sig, uint8_t num_output_sig,
+			     uint32_t *shader_handle)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_buffer_object *buffer = NULL;
+	SVGA3dShaderType shader_type;
+	int ret;
+
+	if (buffer_handle != SVGA3D_INVALID_ID) {
+		ret = vmw_user_bo_lookup(tfile, buffer_handle,
+					     &buffer, NULL);
+		if (unlikely(ret != 0)) {
+			VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n");
+			return ret;
+		}
+
+		if ((u64)buffer->base.num_pages * PAGE_SIZE <
+		    (u64)size + (u64)offset) {
+			VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
+			ret = -EINVAL;
+			goto out_bad_arg;
+		}
+	}
+
+	switch (shader_type_drm) {
+	case drm_vmw_shader_type_vs:
+		shader_type = SVGA3D_SHADERTYPE_VS;
+		break;
+	case drm_vmw_shader_type_ps:
+		shader_type = SVGA3D_SHADERTYPE_PS;
+		break;
+	default:
+		VMW_DEBUG_USER("Illegal shader type.\n");
+		ret = -EINVAL;
+		goto out_bad_arg;
+	}
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		goto out_bad_arg;
+
+	ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
+				    shader_type, num_input_sig,
+				    num_output_sig, tfile, shader_handle);
+
+	ttm_read_unlock(&dev_priv->reservation_sem);
+out_bad_arg:
+	vmw_bo_unreference(&buffer);
+	return ret;
+}
+
+/**
+ * vmw_shader_id_ok - Check whether a compat shader user key and
+ * shader type are within valid bounds.
+ *
+ * @user_key: User space id of the shader.
+ * @shader_type: Shader type.
+ *
+ * Returns true if valid false if not.
+ */
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
+{
+	return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
+}
+
+/**
+ * vmw_shader_key - Compute a hash key suitable for a compat shader.
+ *
+ * @user_key: User space id of the shader.
+ * @shader_type: Shader type.
+ *
+ * Returns a hash key suitable for a command buffer managed resource
+ * manager hash table.
+ */
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
+{
+	return user_key | (shader_type << 20);
+}
+
+/**
+ * vmw_shader_remove - Stage a compat shader for removal.
+ *
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @shader_type: Shader type.
+ * @list: Caller's list of staged command buffer resource actions.
+ */
+int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+		      u32 user_key, SVGA3dShaderType shader_type,
+		      struct list_head *list)
+{
+	struct vmw_resource *dummy;
+
+	if (!vmw_shader_id_ok(user_key, shader_type))
+		return -EINVAL;
+
+	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
+				     vmw_shader_key(user_key, shader_type),
+				     list, &dummy);
+}
+
+/**
+ * vmw_compat_shader_add - Create a compat shader and stage it for addition
+ * as a command buffer managed resource.
+ *
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @bytecode: Pointer to the bytecode of the shader.
+ * @shader_type: Shader type.
+ * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
+ * to be created with.
+ * @list: Caller's list of staged command buffer resource actions.
+ *
+ */
+int vmw_compat_shader_add(struct vmw_private *dev_priv,
+			  struct vmw_cmdbuf_res_manager *man,
+			  u32 user_key, const void *bytecode,
+			  SVGA3dShaderType shader_type,
+			  size_t size,
+			  struct list_head *list)
+{
+	struct ttm_operation_ctx ctx = { false, true };
+	struct vmw_buffer_object *buf;
+	struct ttm_bo_kmap_obj map;
+	bool is_iomem;
+	int ret;
+	struct vmw_resource *res;
+
+	if (!vmw_shader_id_ok(user_key, shader_type))
+		return -EINVAL;
+
+	/* Allocate and pin a DMA buffer */
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (unlikely(!buf))
+		return -ENOMEM;
+
+	ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement,
+			      true, vmw_bo_bo_free);
+	if (unlikely(ret != 0))
+		goto out;
+
+	ret = ttm_bo_reserve(&buf->base, false, true, NULL);
+	if (unlikely(ret != 0))
+		goto no_reserve;
+
+	/* Map and copy shader bytecode. */
+	ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
+			  &map);
+	if (unlikely(ret != 0)) {
+		ttm_bo_unreserve(&buf->base);
+		goto no_reserve;
+	}
+
+	memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
+	WARN_ON(is_iomem);
+
+	ttm_bo_kunmap(&map);
+	ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, &ctx);
+	WARN_ON(ret != 0);
+	ttm_bo_unreserve(&buf->base);
+
+	res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
+	if (unlikely(ret != 0))
+		goto no_reserve;
+
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+				 vmw_shader_key(user_key, shader_type),
+				 res, list);
+	vmw_resource_unreference(&res);
+no_reserve:
+	vmw_bo_unreference(&buf);
+out:
+	return ret;
+}
+
+/**
+ * vmw_shader_lookup - Look up a compat shader
+ *
+ * @man: Pointer to the command buffer managed resource manager identifying
+ * the shader namespace.
+ * @user_key: The user space id of the shader.
+ * @shader_type: The shader type.
+ *
+ * Returns a refcounted pointer to a struct vmw_resource if the shader was
+ * found. An error pointer otherwise.
+ */
+struct vmw_resource *
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+		  u32 user_key,
+		  SVGA3dShaderType shader_type)
+{
+	if (!vmw_shader_id_ok(user_key, shader_type))
+		return ERR_PTR(-EINVAL);
+
+	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
+				     vmw_shader_key(user_key, shader_type));
+}
+
+int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct drm_vmw_shader_create_arg *arg =
+		(struct drm_vmw_shader_create_arg *)data;
+
+	return vmw_shader_define(dev, file_priv, arg->shader_type,
+				 arg->buffer_handle,
+				 arg->size, arg->offset,
+				 0, 0,
+				 &arg->shader_handle);
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
new file mode 100644
index 0000000..73e9a48
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+
+/**
+ * struct vmw_user_simple_resource - User-space simple resource struct
+ *
+ * @base: The TTM base object implementing user-space visibility.
+ * @account_size: How much memory was accounted for this object.
+ * @simple: The embedded struct vmw_simple_resource.
+ */
+struct vmw_user_simple_resource {
+	struct ttm_base_object base;
+	size_t account_size;
+	struct vmw_simple_resource simple;
+/*
+ * Nothing to be placed after @simple, since size of @simple is
+ * unknown.
+ */
+};
+
+
+/**
+ * vmw_simple_resource_init - Initialize a simple resource object.
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @simple: The struct vmw_simple_resource to initialize.
+ * @data: Data passed to the information initialization function.
+ * @res_free: Function pointer to destroy the simple resource.
+ *
+ * Returns:
+ *   0 if succeeded.
+ *   Negative error value if error, in which case the resource will have been
+ * freed.
+ */
+static int vmw_simple_resource_init(struct vmw_private *dev_priv,
+				    struct vmw_simple_resource *simple,
+				    void *data,
+				    void (*res_free)(struct vmw_resource *res))
+{
+	struct vmw_resource *res = &simple->res;
+	int ret;
+
+	ret = vmw_resource_init(dev_priv, res, false, res_free,
+				&simple->func->res_func);
+
+	if (ret) {
+		res_free(res);
+		return ret;
+	}
+
+	ret = simple->func->init(res, data);
+	if (ret) {
+		vmw_resource_unreference(&res);
+		return ret;
+	}
+
+	simple->res.hw_destroy = simple->func->hw_destroy;
+
+	return 0;
+}
+
+/**
+ * vmw_simple_resource_free - Free a simple resource object.
+ *
+ * @res: The struct vmw_resource member of the simple resource object.
+ *
+ * Frees memory and memory accounting for the object.
+ */
+static void vmw_simple_resource_free(struct vmw_resource *res)
+{
+	struct vmw_user_simple_resource *usimple =
+		container_of(res, struct vmw_user_simple_resource,
+			     simple.res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	size_t size = usimple->account_size;
+
+	ttm_base_object_kfree(usimple, base);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_simple_resource_base_release - TTM object release callback
+ *
+ * @p_base: The struct ttm_base_object member of the simple resource object.
+ *
+ * Called when the last reference to the embedded struct ttm_base_object is
+ * gone. Typically results in an object free, unless there are other
+ * references to the embedded struct vmw_resource.
+ */
+static void vmw_simple_resource_base_release(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct vmw_user_simple_resource *usimple =
+		container_of(base, struct vmw_user_simple_resource, base);
+	struct vmw_resource *res = &usimple->simple.res;
+
+	*p_base = NULL;
+	vmw_resource_unreference(&res);
+}
+
+/**
+ * vmw_simple_resource_create_ioctl - Helper to set up an ioctl function to
+ * create a struct vmw_simple_resource.
+ *
+ * @dev: Pointer to a struct drm device.
+ * @data: Ioctl argument.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @func: Pointer to a struct vmw_simple_resource_func identifying the
+ * simple resource type.
+ *
+ * Returns:
+ *   0 if success,
+ *   Negative error value on error.
+ */
+int
+vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv,
+				 const struct vmw_simple_resource_func *func)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_simple_resource *usimple;
+	struct vmw_resource *res;
+	struct vmw_resource *tmp;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct ttm_operation_ctx ctx = {
+		.interruptible = true,
+		.no_wait_gpu = false
+	};
+	size_t alloc_size;
+	size_t account_size;
+	int ret;
+
+	alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
+	  func->size;
+	account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
+		TTM_OBJ_EXTRA_SIZE;
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (ret)
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size,
+				   &ctx);
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for %s"
+				  " creation.\n", func->res_func.type_name);
+
+		goto out_ret;
+	}
+
+	usimple = kzalloc(alloc_size, GFP_KERNEL);
+	if (!usimple) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    account_size);
+		ret = -ENOMEM;
+		goto out_ret;
+	}
+
+	usimple->simple.func = func;
+	usimple->account_size = account_size;
+	res = &usimple->simple.res;
+	usimple->base.shareable = false;
+	usimple->base.tfile = NULL;
+
+	/*
+	 * From here on, the destructor takes over resource freeing.
+	 */
+	ret = vmw_simple_resource_init(dev_priv, &usimple->simple,
+				       data, vmw_simple_resource_free);
+	if (ret)
+		goto out_ret;
+
+	tmp = vmw_resource_reference(res);
+	ret = ttm_base_object_init(tfile, &usimple->base, false,
+				   func->ttm_res_type,
+				   &vmw_simple_resource_base_release, NULL);
+
+	if (ret) {
+		vmw_resource_unreference(&tmp);
+		goto out_err;
+	}
+
+	func->set_arg_handle(data, usimple->base.handle);
+out_err:
+	vmw_resource_unreference(&res);
+out_ret:
+	return ret;
+}
+
+/**
+ * vmw_simple_resource_lookup - Look up a simple resource from its user-space
+ * handle.
+ *
+ * @tfile: struct ttm_object_file identifying the caller.
+ * @handle: The user-space handle.
+ * @func: The struct vmw_simple_resource_func identifying the simple resource
+ * type.
+ *
+ * Returns: Refcounted pointer to the embedded struct vmw_resource if
+ * successfule. Error pointer otherwise.
+ */
+struct vmw_resource *
+vmw_simple_resource_lookup(struct ttm_object_file *tfile,
+			   uint32_t handle,
+			   const struct vmw_simple_resource_func *func)
+{
+	struct vmw_user_simple_resource *usimple;
+	struct ttm_base_object *base;
+	struct vmw_resource *res;
+
+	base = ttm_base_object_lookup(tfile, handle);
+	if (!base) {
+		VMW_DEBUG_USER("Invalid %s handle 0x%08lx.\n",
+			       func->res_func.type_name,
+			       (unsigned long) handle);
+		return ERR_PTR(-ESRCH);
+	}
+
+	if (ttm_base_object_type(base) != func->ttm_res_type) {
+		ttm_base_object_unref(&base);
+		VMW_DEBUG_USER("Invalid type of %s handle 0x%08lx.\n",
+			       func->res_func.type_name,
+			       (unsigned long) handle);
+		return ERR_PTR(-EINVAL);
+	}
+
+	usimple = container_of(base, typeof(*usimple), base);
+	res = vmw_resource_reference(&usimple->simple.res);
+	ttm_base_object_unref(&base);
+
+	return res;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
new file mode 100644
index 0000000..6380736
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
+
+/*
+ * The currently only reason we need to keep track of views is that if we
+ * destroy a hardware surface, all views pointing to it must also be destroyed,
+ * otherwise the device will error.
+ * So in particuar if a surface is evicted, we must destroy all views pointing
+ * to it, and all context bindings of that view. Similarly we must restore
+ * the view bindings, views and surfaces pointed to by the views when a
+ * context is referenced in the command stream.
+ */
+
+/**
+ * struct vmw_view - view metadata
+ *
+ * @res: The struct vmw_resource we derive from
+ * @ctx: Non-refcounted pointer to the context this view belongs to.
+ * @srf: Refcounted pointer to the surface pointed to by this view.
+ * @cotable: Refcounted pointer to the cotable holding this view.
+ * @srf_head: List head for the surface-to-view list.
+ * @cotable_head: List head for the cotable-to_view list.
+ * @view_type: View type.
+ * @view_id: User-space per context view id. Currently used also as per
+ * context device view id.
+ * @cmd_size: Size of the SVGA3D define view command that we've copied from the
+ * command stream.
+ * @committed: Whether the view is actually created or pending creation at the
+ * device level.
+ * @cmd: The SVGA3D define view command copied from the command stream.
+ */
+struct vmw_view {
+	struct rcu_head rcu;
+	struct vmw_resource res;
+	struct vmw_resource *ctx;      /* Immutable */
+	struct vmw_resource *srf;      /* Immutable */
+	struct vmw_resource *cotable;  /* Immutable */
+	struct list_head srf_head;     /* Protected by binding_mutex */
+	struct list_head cotable_head; /* Protected by binding_mutex */
+	unsigned view_type;            /* Immutable */
+	unsigned view_id;              /* Immutable */
+	u32 cmd_size;                  /* Immutable */
+	bool committed;                /* Protected by binding_mutex */
+	u32 cmd[1];                    /* Immutable */
+};
+
+static int vmw_view_create(struct vmw_resource *res);
+static int vmw_view_destroy(struct vmw_resource *res);
+static void vmw_hw_view_destroy(struct vmw_resource *res);
+static void vmw_view_commit_notify(struct vmw_resource *res,
+				   enum vmw_cmdbuf_res_state state);
+
+static const struct vmw_res_func vmw_view_func = {
+	.res_type = vmw_res_view,
+	.needs_backup = false,
+	.may_evict = false,
+	.type_name = "DX view",
+	.backup_placement = NULL,
+	.create = vmw_view_create,
+	.commit_notify = vmw_view_commit_notify,
+};
+
+/**
+ * struct vmw_view - view define command body stub
+ *
+ * @view_id: The device id of the view being defined
+ * @sid: The surface id of the view being defined
+ *
+ * This generic struct is used by the code to change @view_id and @sid of a
+ * saved view define command.
+ */
+struct vmw_view_define {
+	uint32 view_id;
+	uint32 sid;
+};
+
+/**
+ * vmw_view - Convert a struct vmw_resource to a struct vmw_view
+ *
+ * @res: Pointer to the resource to convert.
+ *
+ * Returns a pointer to a struct vmw_view.
+ */
+static struct vmw_view *vmw_view(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_view, res);
+}
+
+/**
+ * vmw_view_commit_notify - Notify that a view operation has been committed to
+ * hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the view resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_view_commit_notify(struct vmw_resource *res,
+				   enum vmw_cmdbuf_res_state state)
+{
+	struct vmw_view *view = vmw_view(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	if (state == VMW_CMDBUF_RES_ADD) {
+		struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+
+		list_add_tail(&view->srf_head, &srf->view_list);
+		vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+		view->committed = true;
+		res->id = view->view_id;
+
+	} else {
+		list_del_init(&view->cotable_head);
+		list_del_init(&view->srf_head);
+		view->committed = false;
+		res->id = -1;
+	}
+	mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_create - Create a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Create a hardware view. Typically used if that view has previously been
+ * destroyed by an eviction operation.
+ */
+static int vmw_view_create(struct vmw_resource *res)
+{
+	struct vmw_view *view = vmw_view(res);
+	struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		struct vmw_view_define body;
+	} *cmd;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	if (!view->committed) {
+		mutex_unlock(&dev_priv->binding_mutex);
+		return 0;
+	}
+
+	cmd = VMW_FIFO_RESERVE_DX(res->dev_priv, view->cmd_size, view->ctx->id);
+	if (!cmd) {
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+
+	memcpy(cmd, &view->cmd, view->cmd_size);
+	WARN_ON(cmd->body.view_id != view->view_id);
+	/* Sid may have changed due to surface eviction. */
+	WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
+	cmd->body.sid = view->srf->id;
+	vmw_fifo_commit(res->dev_priv, view->cmd_size);
+	res->id = view->view_id;
+	list_add_tail(&view->srf_head, &srf->view_list);
+	vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	return 0;
+}
+
+/**
+ * vmw_view_destroy - Destroy a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view. Typically used on unexpected termination of the
+ * owning process or if the surface the view is pointing to is destroyed.
+ */
+static int vmw_view_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_view *view = vmw_view(res);
+	struct {
+		SVGA3dCmdHeader header;
+		union vmw_view_destroy body;
+	} *cmd;
+
+	lockdep_assert_held_once(&dev_priv->binding_mutex);
+	vmw_binding_res_list_scrub(&res->binding_head);
+
+	if (!view->committed || res->id == -1)
+		return 0;
+
+	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), view->ctx->id);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->header.id = vmw_view_destroy_cmds[view->view_type];
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.view_id = view->view_id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	res->id = -1;
+	list_del_init(&view->cotable_head);
+	list_del_init(&view->srf_head);
+
+	return 0;
+}
+
+/**
+ * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static void vmw_hw_view_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	WARN_ON(vmw_view_destroy(res));
+	res->id = -1;
+	mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type)
+{
+	return user_key | (view_type << 20);
+}
+
+/**
+ * vmw_view_id_ok - Basic view id and type range checks.
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Checks that the view id and type (typically provided by user-space) is
+ * valid.
+ */
+static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
+{
+	return (user_key < SVGA_COTABLE_MAX_IDS &&
+		view_type < vmw_view_max);
+}
+
+/**
+ * vmw_view_res_free - resource res_free callback for view resources
+ *
+ * @res: Pointer to a struct vmw_resource
+ *
+ * Frees memory and memory accounting held by a struct vmw_view.
+ */
+static void vmw_view_res_free(struct vmw_resource *res)
+{
+	struct vmw_view *view = vmw_view(res);
+	size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	vmw_resource_unreference(&view->cotable);
+	vmw_resource_unreference(&view->srf);
+	kfree_rcu(view, rcu);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_view_add - Create a view resource and stage it for addition
+ * as a command buffer managed resource.
+ *
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
+ * @ctx: Pointer to a struct vmw_resource identifying the active context.
+ * @srf: Pointer to a struct vmw_resource identifying the surface the view
+ * points to.
+ * @view_type: The view type deduced from the view create command.
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the view type and to the context.
+ * @cmd: Pointer to the view create command in the command stream.
+ * @cmd_size: Size of the view create command in the command stream.
+ * @list: Caller's list of staged command buffer resource actions.
+ */
+int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+		 struct vmw_resource *ctx,
+		 struct vmw_resource *srf,
+		 enum vmw_view_type view_type,
+		 u32 user_key,
+		 const void *cmd,
+		 size_t cmd_size,
+		 struct list_head *list)
+{
+	static const size_t vmw_view_define_sizes[] = {
+		[vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
+		[vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
+		[vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
+	};
+
+	struct vmw_private *dev_priv = ctx->dev_priv;
+	struct vmw_resource *res;
+	struct vmw_view *view;
+	struct ttm_operation_ctx ttm_opt_ctx = {
+		.interruptible = true,
+		.no_wait_gpu = false
+	};
+	size_t size;
+	int ret;
+
+	if (cmd_size != vmw_view_define_sizes[view_type] +
+	    sizeof(SVGA3dCmdHeader)) {
+		VMW_DEBUG_USER("Illegal view create command size.\n");
+		return -EINVAL;
+	}
+
+	if (!vmw_view_id_ok(user_key, view_type)) {
+		VMW_DEBUG_USER("Illegal view add view id.\n");
+		return -EINVAL;
+	}
+
+	size = offsetof(struct vmw_view, cmd) + cmd_size;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for view creation\n");
+		return ret;
+	}
+
+	view = kmalloc(size, GFP_KERNEL);
+	if (!view) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+		return -ENOMEM;
+	}
+
+	res = &view->res;
+	view->ctx = ctx;
+	view->srf = vmw_resource_reference(srf);
+	view->cotable = vmw_resource_reference
+		(vmw_context_cotable(ctx, vmw_view_cotables[view_type]));
+	view->view_type = view_type;
+	view->view_id = user_key;
+	view->cmd_size = cmd_size;
+	view->committed = false;
+	INIT_LIST_HEAD(&view->srf_head);
+	INIT_LIST_HEAD(&view->cotable_head);
+	memcpy(&view->cmd, cmd, cmd_size);
+	ret = vmw_resource_init(dev_priv, res, true,
+				vmw_view_res_free, &vmw_view_func);
+	if (ret)
+		goto out_resource_init;
+
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view,
+				 vmw_view_key(user_key, view_type),
+				 res, list);
+	if (ret)
+		goto out_resource_init;
+
+	res->id = view->view_id;
+	res->hw_destroy = vmw_hw_view_destroy;
+
+out_resource_init:
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+/**
+ * vmw_view_remove - Stage a view for removal.
+ *
+ * @man: Pointer to the view manager identifying the shader namespace.
+ * @user_key: The key that is used to identify the view. The key is
+ * unique to the view type.
+ * @view_type: View type
+ * @list: Caller's list of staged command buffer resource actions.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
+ */
+int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+		    u32 user_key, enum vmw_view_type view_type,
+		    struct list_head *list,
+		    struct vmw_resource **res_p)
+{
+	if (!vmw_view_id_ok(user_key, view_type)) {
+		VMW_DEBUG_USER("Illegal view remove view id.\n");
+		return -EINVAL;
+	}
+
+	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view,
+				     vmw_view_key(user_key, view_type),
+				     list, res_p);
+}
+
+/**
+ * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views belonging to a cotable.
+ * @readback: Unused. Needed for function interface only.
+ *
+ * This function evicts all views belonging to a cotable.
+ * It must be called with the binding_mutex held, and the caller must hold
+ * a reference to the view resource. This is typically called before the
+ * cotable is paged out.
+ */
+void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+				   struct list_head *list,
+				   bool readback)
+{
+	struct vmw_view *entry, *next;
+
+	lockdep_assert_held_once(&dev_priv->binding_mutex);
+
+	list_for_each_entry_safe(entry, next, list, cotable_head)
+		WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_surface_list_destroy - Evict all views pointing to a surface
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views pointing to a surface.
+ *
+ * This function evicts all views pointing to a surface. This is typically
+ * called before the surface is evicted.
+ */
+void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+				   struct list_head *list)
+{
+	struct vmw_view *entry, *next;
+
+	lockdep_assert_held_once(&dev_priv->binding_mutex);
+
+	list_for_each_entry_safe(entry, next, list, srf_head)
+		WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_srf - Return a non-refcounted pointer to the surface a view is
+ * pointing to.
+ *
+ * @res: pointer to a view resource.
+ *
+ * Note that the view itself is holding a reference, so as long
+ * the view resource is alive, the surface resource will be.
+ */
+struct vmw_resource *vmw_view_srf(struct vmw_resource *res)
+{
+	return vmw_view(res)->srf;
+}
+
+/**
+ * vmw_view_lookup - Look up a view.
+ *
+ * @man: The context's cmdbuf ref manager.
+ * @view_type: The view type.
+ * @user_key: The view user id.
+ *
+ * returns a refcounted pointer to a view or an error pointer if not found.
+ */
+struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+				     enum vmw_view_type view_type,
+				     u32 user_key)
+{
+	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view,
+				     vmw_view_key(user_key, view_type));
+}
+
+/**
+ * vmw_view_dirtying - Return whether a view type is dirtying its resource
+ * @res: Pointer to the view
+ *
+ * Each time a resource is put on the validation list as the result of a
+ * view pointing to it, we need to determine whether that resource will
+ * be dirtied (written to by the GPU) as a result of the corresponding
+ * GPU operation. Currently only rendertarget- and depth-stencil views are
+ * capable of dirtying its resource.
+ *
+ * Return: Whether the view type of @res dirties the resource it points to.
+ */
+u32 vmw_view_dirtying(struct vmw_resource *res)
+{
+	static u32 view_is_dirtying[vmw_view_max] = {
+		[vmw_view_rt] = VMW_RES_DIRTY_SET,
+		[vmw_view_ds] = VMW_RES_DIRTY_SET,
+	};
+
+	/* Update this function as we add more view types */
+	BUILD_BUG_ON(vmw_view_max != 3);
+	return view_is_dirtying[vmw_view(res)->view_type];
+}
+
+const u32 vmw_view_destroy_cmds[] = {
+	[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+	[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+	[vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+};
+
+const SVGACOTableType vmw_view_cotables[] = {
+	[vmw_view_sr] = SVGA_COTABLE_SRVIEW,
+	[vmw_view_rt] = SVGA_COTABLE_RTVIEW,
+	[vmw_view_ds] = SVGA_COTABLE_DSVIEW,
+};
+
+const SVGACOTableType vmw_so_cotables[] = {
+	[vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT,
+	[vmw_so_bs] = SVGA_COTABLE_BLENDSTATE,
+	[vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
+	[vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
+	[vmw_so_ss] = SVGA_COTABLE_SAMPLER,
+	[vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT
+};
+
+
+/* To remove unused function warning */
+static void vmw_so_build_asserts(void) __attribute__((used));
+
+
+/*
+ * This function is unused at run-time, and only used to dump various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_so_build_asserts(void)
+{
+	/* Assert that our vmw_view_cmd_to_type() function is correct. */
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5);
+
+	/* Assert that our "one body fits all" assumption is valid */
+	BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32));
+
+	/* Assert that the view key space can hold all view ids. */
+	BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1));
+
+	/*
+	 * Assert that the offset of sid in all view define commands
+	 * is what we assume it to be.
+	 */
+	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+		     offsetof(SVGA3dCmdDXDefineShaderResourceView, sid));
+	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+		     offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
+	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+		     offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
new file mode 100644
index 0000000..1256504
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef VMW_SO_H
+#define VMW_SO_H
+
+enum vmw_view_type {
+	vmw_view_sr,
+	vmw_view_rt,
+	vmw_view_ds,
+	vmw_view_max,
+};
+
+enum vmw_so_type {
+	vmw_so_el,
+	vmw_so_bs,
+	vmw_so_ds,
+	vmw_so_rs,
+	vmw_so_ss,
+	vmw_so_so,
+	vmw_so_max,
+};
+
+/**
+ * union vmw_view_destroy - view destruction command body
+ *
+ * @rtv: RenderTarget view destruction command body
+ * @srv: ShaderResource view destruction command body
+ * @dsv: DepthStencil view destruction command body
+ * @view_id: A single u32 view id.
+ *
+ * The assumption here is that all union members are really represented by a
+ * single u32 in the command stream. If that's not the case,
+ * the size of this union will not equal the size of an u32, and the
+ * assumption is invalid, and we detect that at compile time in the
+ * vmw_so_build_asserts() function.
+ */
+union vmw_view_destroy {
+	struct SVGA3dCmdDXDestroyRenderTargetView rtv;
+	struct SVGA3dCmdDXDestroyShaderResourceView srv;
+	struct SVGA3dCmdDXDestroyDepthStencilView dsv;
+	u32 view_id;
+};
+
+/* Map enum vmw_view_type to view destroy command ids*/
+extern const u32 vmw_view_destroy_cmds[];
+
+/* Map enum vmw_view_type to SVGACOTableType */
+extern const SVGACOTableType vmw_view_cotables[];
+
+/* Map enum vmw_so_type to SVGACOTableType */
+extern const SVGACOTableType vmw_so_cotables[];
+
+/*
+ * vmw_view_cmd_to_type - Return the view type for a create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given view create or destroy command id, return the corresponding
+ * enum vmw_view_type. If the command is unknown, return vmw_view_max.
+ * The validity of the simplified calculation is verified in the
+ * vmw_so_build_asserts() function.
+ */
+static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
+{
+	u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
+
+	if (tmp > (u32)vmw_view_max)
+		return vmw_view_max;
+
+	return (enum vmw_view_type) tmp;
+}
+
+/*
+ * vmw_so_cmd_to_type - Return the state object type for a
+ * create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given state object create or destroy command id,
+ * return the corresponding enum vmw_so_type. If the command is uknown,
+ * return vmw_so_max. We should perhaps optimize this function using
+ * a similar strategy as vmw_view_cmd_to_type().
+ */
+static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
+{
+	switch (id) {
+	case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT:
+	case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT:
+		return vmw_so_el;
+	case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE:
+		return vmw_so_bs;
+	case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
+		return vmw_so_ds;
+	case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
+		return vmw_so_rs;
+	case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
+		return vmw_so_ss;
+	case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
+	case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
+		return vmw_so_so;
+	default:
+		break;
+	}
+	return vmw_so_max;
+}
+
+/*
+ * View management - vmwgfx_so.c
+ */
+extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+			struct vmw_resource *ctx,
+			struct vmw_resource *srf,
+			enum vmw_view_type view_type,
+			u32 user_key,
+			const void *cmd,
+			size_t cmd_size,
+			struct list_head *list);
+
+extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+			   u32 user_key, enum vmw_view_type view_type,
+			   struct list_head *list,
+			   struct vmw_resource **res_p);
+
+extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+					  struct list_head *view_list);
+extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+					  struct list_head *list,
+					  bool readback);
+extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
+extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+					    enum vmw_view_type view_type,
+					    u32 user_key);
+extern u32 vmw_view_dirtying(struct vmw_resource *res);
+#endif
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
new file mode 100644
index 0000000..41a96fb
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -0,0 +1,1912 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/******************************************************************************
+ *
+ * COPYRIGHT (C) 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "vmwgfx_kms.h"
+#include "device_include/svga3d_surfacedefs.h"
+
+#define vmw_crtc_to_stdu(x) \
+	container_of(x, struct vmw_screen_target_display_unit, base.crtc)
+#define vmw_encoder_to_stdu(x) \
+	container_of(x, struct vmw_screen_target_display_unit, base.encoder)
+#define vmw_connector_to_stdu(x) \
+	container_of(x, struct vmw_screen_target_display_unit, base.connector)
+
+
+
+enum stdu_content_type {
+	SAME_AS_DISPLAY = 0,
+	SEPARATE_SURFACE,
+	SEPARATE_BO
+};
+
+/**
+ * struct vmw_stdu_dirty - closure structure for the update functions
+ *
+ * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
+ * @transfer: Transfer direction for DMA command.
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ * @fb_left: Left side of the framebuffer/content bounding box
+ * @fb_top: Top of the framebuffer/content bounding box
+ * @buf: buffer object when DMA-ing between buffer and screen targets.
+ * @sid: Surface ID when copying between surface and screen targets.
+ */
+struct vmw_stdu_dirty {
+	struct vmw_kms_dirty base;
+	SVGA3dTransferType  transfer;
+	s32 left, right, top, bottom;
+	s32 fb_left, fb_top;
+	u32 pitch;
+	union {
+		struct vmw_buffer_object *buf;
+		u32 sid;
+	};
+};
+
+/*
+ * SVGA commands that are used by this code. Please see the device headers
+ * for explanation.
+ */
+struct vmw_stdu_update {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdUpdateGBScreenTarget body;
+};
+
+struct vmw_stdu_dma {
+	SVGA3dCmdHeader     header;
+	SVGA3dCmdSurfaceDMA body;
+};
+
+struct vmw_stdu_surface_copy {
+	SVGA3dCmdHeader      header;
+	SVGA3dCmdSurfaceCopy body;
+};
+
+struct vmw_stdu_update_gb_image {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdUpdateGBImage body;
+};
+
+/**
+ * struct vmw_screen_target_display_unit
+ *
+ * @base: VMW specific DU structure
+ * @display_srf: surface to be displayed.  The dimension of this will always
+ *               match the display mode.  If the display mode matches
+ *               content_vfbs dimensions, then this is a pointer into the
+ *               corresponding field in content_vfbs.  If not, then this
+ *               is a separate buffer to which content_vfbs will blit to.
+ * @content_type:  content_fb type
+ * @defined:  true if the current display unit has been initialized
+ */
+struct vmw_screen_target_display_unit {
+	struct vmw_display_unit base;
+	struct vmw_surface *display_srf;
+	enum stdu_content_type content_fb_type;
+	s32 display_width, display_height;
+
+	bool defined;
+
+	/* For CPU Blit */
+	unsigned int cpp;
+};
+
+
+
+static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit CRTC Functions
+ *****************************************************************************/
+
+
+/**
+ * vmw_stdu_crtc_destroy - cleans up the STDU
+ *
+ * @crtc: used to get a reference to the containing STDU
+ */
+static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc)
+{
+	vmw_stdu_destroy(vmw_crtc_to_stdu(crtc));
+}
+
+/**
+ * vmw_stdu_define_st - Defines a Screen Target
+ *
+ * @dev_priv:  VMW DRM device
+ * @stdu: display unit to create a Screen Target for
+ * @mode: The mode to set.
+ * @crtc_x: X coordinate of screen target relative to framebuffer origin.
+ * @crtc_y: Y coordinate of screen target relative to framebuffer origin.
+ *
+ * Creates a STDU that we can used later.  This function is called whenever the
+ * framebuffer size changes.
+ *
+ * RETURNs:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_define_st(struct vmw_private *dev_priv,
+			      struct vmw_screen_target_display_unit *stdu,
+			      struct drm_display_mode *mode,
+			      int crtc_x, int crtc_y)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBScreenTarget body;
+	} *cmd;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id   = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET;
+	cmd->header.size = sizeof(cmd->body);
+
+	cmd->body.stid   = stdu->base.unit;
+	cmd->body.width  = mode->hdisplay;
+	cmd->body.height = mode->vdisplay;
+	cmd->body.flags  = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
+	cmd->body.dpi    = 0;
+	cmd->body.xRoot  = crtc_x;
+	cmd->body.yRoot  = crtc_y;
+
+	stdu->base.set_gui_x = cmd->body.xRoot;
+	stdu->base.set_gui_y = cmd->body.yRoot;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	stdu->defined = true;
+	stdu->display_width  = mode->hdisplay;
+	stdu->display_height = mode->vdisplay;
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_stdu_bind_st - Binds a surface to a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit affected
+ * @res: Buffer to bind to the screen target.  Set to NULL to blank screen.
+ *
+ * Binding a surface to a Screen Target the same as flipping
+ */
+static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
+			    struct vmw_screen_target_display_unit *stdu,
+			    const struct vmw_resource *res)
+{
+	SVGA3dSurfaceImageId image;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBScreenTarget body;
+	} *cmd;
+
+
+	if (!stdu->defined) {
+		DRM_ERROR("No screen target defined\n");
+		return -EINVAL;
+	}
+
+	/* Set up image using information in vfb */
+	memset(&image, 0, sizeof(image));
+	image.sid = res ? res->id : SVGA3D_INVALID_ID;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id   = SVGA_3D_CMD_BIND_GB_SCREENTARGET;
+	cmd->header.size = sizeof(cmd->body);
+
+	cmd->body.stid   = stdu->base.unit;
+	cmd->body.image  = image;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_stdu_populate_update - populate an UPDATE_GB_SCREENTARGET command with a
+ * bounding box.
+ *
+ * @cmd: Pointer to command stream.
+ * @unit: Screen target unit.
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ */
+static void vmw_stdu_populate_update(void *cmd, int unit,
+				     s32 left, s32 right, s32 top, s32 bottom)
+{
+	struct vmw_stdu_update *update = cmd;
+
+	update->header.id   = SVGA_3D_CMD_UPDATE_GB_SCREENTARGET;
+	update->header.size = sizeof(update->body);
+
+	update->body.stid   = unit;
+	update->body.rect.x = left;
+	update->body.rect.y = top;
+	update->body.rect.w = right - left;
+	update->body.rect.h = bottom - top;
+}
+
+/**
+ * vmw_stdu_update_st - Full update of a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit affected
+ *
+ * This function needs to be called whenever the content of a screen
+ * target has changed completely. Typically as a result of a backing
+ * surface change.
+ *
+ * RETURNS:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_update_st(struct vmw_private *dev_priv,
+			      struct vmw_screen_target_display_unit *stdu)
+{
+	struct vmw_stdu_update *cmd;
+
+	if (!stdu->defined) {
+		DRM_ERROR("No screen target defined");
+		return -EINVAL;
+	}
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	vmw_stdu_populate_update(cmd, stdu->base.unit,
+				 0, stdu->display_width,
+				 0, stdu->display_height);
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_stdu_destroy_st - Destroy a Screen Target
+ *
+ * @dev_priv:  VMW DRM device
+ * @stdu: display unit to destroy
+ */
+static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
+			       struct vmw_screen_target_display_unit *stdu)
+{
+	int    ret;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyGBScreenTarget body;
+	} *cmd;
+
+
+	/* Nothing to do if not successfully defined */
+	if (unlikely(!stdu->defined))
+		return 0;
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	cmd->header.id   = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET;
+	cmd->header.size = sizeof(cmd->body);
+
+	cmd->body.stid   = stdu->base.unit;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	/* Force sync */
+	ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
+	if (unlikely(ret != 0))
+		DRM_ERROR("Failed to sync with HW");
+
+	stdu->defined = false;
+	stdu->display_width  = 0;
+	stdu->display_height = 0;
+
+	return ret;
+}
+
+
+/**
+ * vmw_stdu_crtc_mode_set_nofb - Updates screen target size
+ *
+ * @crtc: CRTC associated with the screen target
+ *
+ * This function defines/destroys a screen target
+ *
+ */
+static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+	struct vmw_private *dev_priv;
+	struct vmw_screen_target_display_unit *stdu;
+	struct drm_connector_state *conn_state;
+	struct vmw_connector_state *vmw_conn_state;
+	int x, y, ret;
+
+	stdu = vmw_crtc_to_stdu(crtc);
+	dev_priv = vmw_priv(crtc->dev);
+	conn_state = stdu->base.connector.state;
+	vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+
+	if (stdu->defined) {
+		ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+		if (ret)
+			DRM_ERROR("Failed to blank CRTC\n");
+
+		(void) vmw_stdu_update_st(dev_priv, stdu);
+
+		ret = vmw_stdu_destroy_st(dev_priv, stdu);
+		if (ret)
+			DRM_ERROR("Failed to destroy Screen Target\n");
+
+		stdu->content_fb_type = SAME_AS_DISPLAY;
+	}
+
+	if (!crtc->state->enable)
+		return;
+
+	x = vmw_conn_state->gui_x;
+	y = vmw_conn_state->gui_y;
+
+	vmw_svga_enable(dev_priv);
+	ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, x, y);
+
+	if (ret)
+		DRM_ERROR("Failed to define Screen Target of size %dx%d\n",
+			  crtc->x, crtc->y);
+}
+
+
+static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
+{
+}
+
+static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
+					struct drm_crtc_state *old_state)
+{
+}
+
+static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
+					 struct drm_crtc_state *old_state)
+{
+	struct vmw_private *dev_priv;
+	struct vmw_screen_target_display_unit *stdu;
+	int ret;
+
+
+	if (!crtc) {
+		DRM_ERROR("CRTC is NULL\n");
+		return;
+	}
+
+	stdu     = vmw_crtc_to_stdu(crtc);
+	dev_priv = vmw_priv(crtc->dev);
+
+	if (stdu->defined) {
+		ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+		if (ret)
+			DRM_ERROR("Failed to blank CRTC\n");
+
+		(void) vmw_stdu_update_st(dev_priv, stdu);
+
+		ret = vmw_stdu_destroy_st(dev_priv, stdu);
+		if (ret)
+			DRM_ERROR("Failed to destroy Screen Target\n");
+
+		stdu->content_fb_type = SAME_AS_DISPLAY;
+	}
+}
+
+/**
+ * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
+ *
+ * @dirty: The closure structure.
+ *
+ * Encodes a surface DMA command cliprect and updates the bounding box
+ * for the DMA.
+ */
+static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *ddirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_stdu_dma *cmd = dirty->cmd;
+	struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+	blit += dirty->num_hits;
+	blit->srcx = dirty->fb_x;
+	blit->srcy = dirty->fb_y;
+	blit->x = dirty->unit_x1;
+	blit->y = dirty->unit_y1;
+	blit->d = 1;
+	blit->w = dirty->unit_x2 - dirty->unit_x1;
+	blit->h = dirty->unit_y2 - dirty->unit_y1;
+	dirty->num_hits++;
+
+	if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM)
+		return;
+
+	/* Destination bounding box */
+	ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
+	ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
+	ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
+	ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+}
+
+/**
+ * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in a DMA command, and optionally encodes
+ * a screen target update command, depending on transfer direction.
+ */
+static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *ddirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_screen_target_display_unit *stdu =
+		container_of(dirty->unit, typeof(*stdu), base);
+	struct vmw_stdu_dma *cmd = dirty->cmd;
+	struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+	SVGA3dCmdSurfaceDMASuffix *suffix =
+		(SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits];
+	size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
+
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SURFACE_DMA;
+	cmd->header.size = sizeof(cmd->body) + blit_size;
+	vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr);
+	cmd->body.guest.pitch = ddirty->pitch;
+	cmd->body.host.sid = stdu->display_srf->res.id;
+	cmd->body.host.face = 0;
+	cmd->body.host.mipmap = 0;
+	cmd->body.transfer = ddirty->transfer;
+	suffix->suffixSize = sizeof(*suffix);
+	suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
+
+	if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
+		blit_size += sizeof(struct vmw_stdu_update);
+
+		vmw_stdu_populate_update(&suffix[1], stdu->base.unit,
+					 ddirty->left, ddirty->right,
+					 ddirty->top, ddirty->bottom);
+	}
+
+	vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
+
+	stdu->display_srf->res.res_dirty = true;
+	ddirty->left = ddirty->top = S32_MAX;
+	ddirty->right = ddirty->bottom = S32_MIN;
+}
+
+
+/**
+ * vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit
+ *
+ * @dirty: The closure structure.
+ *
+ * This function calculates the bounding box for all the incoming clips.
+ */
+static void vmw_stdu_bo_cpu_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *ddirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+
+	dirty->num_hits = 1;
+
+	/* Calculate destination bounding box */
+	ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
+	ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
+	ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
+	ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+
+	/*
+	 * Calculate content bounding box.  We only need the top-left
+	 * coordinate because width and height will be the same as the
+	 * destination bounding box above
+	 */
+	ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x);
+	ddirty->fb_top  = min_t(s32, ddirty->fb_top, dirty->fb_y);
+}
+
+
+/**
+ * vmw_stdu_bo_cpu_commit - Callback to do a CPU blit from buffer object
+ *
+ * @dirty: The closure structure.
+ *
+ * For the special case when we cannot create a proxy surface in a
+ * 2D VM, we have to do a CPU blit ourselves.
+ */
+static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *ddirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_screen_target_display_unit *stdu =
+		container_of(dirty->unit, typeof(*stdu), base);
+	s32 width, height;
+	s32 src_pitch, dst_pitch;
+	struct ttm_buffer_object *src_bo, *dst_bo;
+	u32 src_offset, dst_offset;
+	struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(stdu->cpp);
+
+	if (!dirty->num_hits)
+		return;
+
+	width = ddirty->right - ddirty->left;
+	height = ddirty->bottom - ddirty->top;
+
+	if (width == 0 || height == 0)
+		return;
+
+	/* Assume we are blitting from Guest (bo) to Host (display_srf) */
+	dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+	dst_bo = &stdu->display_srf->res.backup->base;
+	dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
+
+	src_pitch = ddirty->pitch;
+	src_bo = &ddirty->buf->base;
+	src_offset = ddirty->fb_top * src_pitch + ddirty->fb_left * stdu->cpp;
+
+	/* Swap src and dst if the assumption was wrong. */
+	if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM) {
+		swap(dst_pitch, src_pitch);
+		swap(dst_bo, src_bo);
+		swap(src_offset, dst_offset);
+	}
+
+	(void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
+			       src_bo, src_offset, src_pitch,
+			       width * stdu->cpp, height, &diff);
+
+	if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM &&
+	    drm_rect_visible(&diff.rect)) {
+		struct vmw_private *dev_priv;
+		struct vmw_stdu_update *cmd;
+		struct drm_clip_rect region;
+		int ret;
+
+		/* We are updating the actual surface, not a proxy */
+		region.x1 = diff.rect.x1;
+		region.x2 = diff.rect.x2;
+		region.y1 = diff.rect.y1;
+		region.y2 = diff.rect.y2;
+		ret = vmw_kms_update_proxy(&stdu->display_srf->res, &region,
+					   1, 1);
+		if (ret)
+			goto out_cleanup;
+
+
+		dev_priv = vmw_priv(stdu->base.crtc.dev);
+		cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+		if (!cmd)
+			goto out_cleanup;
+
+		vmw_stdu_populate_update(cmd, stdu->base.unit,
+					 region.x1, region.x2,
+					 region.y1, region.y2);
+
+		vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	}
+
+out_cleanup:
+	ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
+	ddirty->right = ddirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed
+ * framebuffer and the screen target system.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm-file identifying the caller. May be
+ * set to NULL, but then @user_fence_rep must also be set to NULL.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @num_clips: Number of clip rects in @clips or @vclips.
+ * @increment: Increment to use when looping over @clips or @vclips.
+ * @to_surface: Whether to DMA to the screen target system as opposed to
+ * from the screen target system.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ * @crtc: If crtc is passed, perform stdu dma on that crtc only.
+ *
+ * If DMA-ing till the screen target system, the function will also notify
+ * the screen target system that a bounding box of the cliprects has been
+ * updated.
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_clip_rect *clips,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips,
+		     int increment,
+		     bool to_surface,
+		     bool interruptible,
+		     struct drm_crtc *crtc)
+{
+	struct vmw_buffer_object *buf =
+		container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
+	struct vmw_stdu_dirty ddirty;
+	int ret;
+	bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
+	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
+
+	/*
+	 * VMs without 3D support don't have the surface DMA command and
+	 * we'll be using a CPU blit, and the framebuffer should be moved out
+	 * of VRAM.
+	 */
+	ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit);
+	if (ret)
+		return ret;
+
+	ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
+	if (ret)
+		goto out_unref;
+
+	ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
+		SVGA3D_READ_HOST_VRAM;
+	ddirty.left = ddirty.top = S32_MAX;
+	ddirty.right = ddirty.bottom = S32_MIN;
+	ddirty.fb_left = ddirty.fb_top = S32_MAX;
+	ddirty.pitch = vfb->base.pitches[0];
+	ddirty.buf = buf;
+	ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit;
+	ddirty.base.clip = vmw_stdu_bo_clip;
+	ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
+		num_clips * sizeof(SVGA3dCopyBox) +
+		sizeof(SVGA3dCmdSurfaceDMASuffix);
+	if (to_surface)
+		ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
+
+
+	if (cpu_blit) {
+		ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
+		ddirty.base.clip = vmw_stdu_bo_cpu_clip;
+		ddirty.base.fifo_reserve_size = 0;
+	}
+
+	ddirty.base.crtc = crtc;
+
+	ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
+				   0, 0, num_clips, increment, &ddirty.base);
+
+	vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
+					 user_fence_rep);
+	return ret;
+
+out_unref:
+	vmw_validation_unref_lists(&val_ctx);
+	return ret;
+}
+
+/**
+ * vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect
+ *
+ * @dirty: The closure structure.
+ *
+ * Encodes a surface copy command cliprect and updates the bounding box
+ * for the copy.
+ */
+static void vmw_kms_stdu_surface_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *sdirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_stdu_surface_copy *cmd = dirty->cmd;
+	struct vmw_screen_target_display_unit *stdu =
+		container_of(dirty->unit, typeof(*stdu), base);
+
+	if (sdirty->sid != stdu->display_srf->res.id) {
+		struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+		blit += dirty->num_hits;
+		blit->srcx = dirty->fb_x;
+		blit->srcy = dirty->fb_y;
+		blit->x = dirty->unit_x1;
+		blit->y = dirty->unit_y1;
+		blit->d = 1;
+		blit->w = dirty->unit_x2 - dirty->unit_x1;
+		blit->h = dirty->unit_y2 - dirty->unit_y1;
+	}
+
+	dirty->num_hits++;
+
+	/* Destination bounding box */
+	sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
+	sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
+	sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
+	sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
+}
+
+/**
+ * vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface
+ * copy command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in a surface copy command, and encodes a screen
+ * target update command.
+ */
+static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *sdirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_screen_target_display_unit *stdu =
+		container_of(dirty->unit, typeof(*stdu), base);
+	struct vmw_stdu_surface_copy *cmd = dirty->cmd;
+	struct vmw_stdu_update *update;
+	size_t blit_size = sizeof(SVGA3dCopyBox) * dirty->num_hits;
+	size_t commit_size;
+
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
+	if (sdirty->sid != stdu->display_srf->res.id) {
+		struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+		cmd->header.id = SVGA_3D_CMD_SURFACE_COPY;
+		cmd->header.size = sizeof(cmd->body) + blit_size;
+		cmd->body.src.sid = sdirty->sid;
+		cmd->body.dest.sid = stdu->display_srf->res.id;
+		update = (struct vmw_stdu_update *) &blit[dirty->num_hits];
+		commit_size = sizeof(*cmd) + blit_size + sizeof(*update);
+		stdu->display_srf->res.res_dirty = true;
+	} else {
+		update = dirty->cmd;
+		commit_size = sizeof(*update);
+	}
+
+	vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left,
+				 sdirty->right, sdirty->top, sdirty->bottom);
+
+	vmw_fifo_commit(dirty->dev_priv, commit_size);
+
+	sdirty->left = sdirty->top = S32_MAX;
+	sdirty->right = sdirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_kms_stdu_surface_dirty - Dirty part of a surface backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the surface-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @srf: Pointer to surface to blit from. If NULL, the surface attached
+ * to @framebuffer will be used.
+ * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
+ * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
+ * @num_clips: Number of clip rects in @clips.
+ * @inc: Increment to use when looping over @clips.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ * @crtc: If crtc is passed, perform surface dirty on that crtc only.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
+			       struct vmw_framebuffer *framebuffer,
+			       struct drm_clip_rect *clips,
+			       struct drm_vmw_rect *vclips,
+			       struct vmw_resource *srf,
+			       s32 dest_x,
+			       s32 dest_y,
+			       unsigned num_clips, int inc,
+			       struct vmw_fence_obj **out_fence,
+			       struct drm_crtc *crtc)
+{
+	struct vmw_framebuffer_surface *vfbs =
+		container_of(framebuffer, typeof(*vfbs), base);
+	struct vmw_stdu_dirty sdirty;
+	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
+	int ret;
+
+	if (!srf)
+		srf = &vfbs->surface->res;
+
+	ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
+					  NULL, NULL);
+	if (ret)
+		return ret;
+
+	ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
+	if (ret)
+		goto out_unref;
+
+	if (vfbs->is_bo_proxy) {
+		ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
+		if (ret)
+			goto out_finish;
+	}
+
+	sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit;
+	sdirty.base.clip = vmw_kms_stdu_surface_clip;
+	sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
+		sizeof(SVGA3dCopyBox) * num_clips +
+		sizeof(struct vmw_stdu_update);
+	sdirty.base.crtc = crtc;
+	sdirty.sid = srf->id;
+	sdirty.left = sdirty.top = S32_MAX;
+	sdirty.right = sdirty.bottom = S32_MIN;
+
+	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+				   dest_x, dest_y, num_clips, inc,
+				   &sdirty.base);
+out_finish:
+	vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
+					 NULL);
+
+	return ret;
+
+out_unref:
+	vmw_validation_unref_lists(&val_ctx);
+	return ret;
+}
+
+
+/*
+ *  Screen Target CRTC dispatch table
+ */
+static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
+	.gamma_set = vmw_du_crtc_gamma_set,
+	.destroy = vmw_stdu_crtc_destroy,
+	.reset = vmw_du_crtc_reset,
+	.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
+	.atomic_destroy_state = vmw_du_crtc_destroy_state,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+};
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit Encoder Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_encoder_destroy - cleans up the STDU
+ *
+ * @encoder: used the get the containing STDU
+ *
+ * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
+ * this can be a no-op.  Nevertheless, it doesn't hurt of have this in case
+ * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
+ * get called.
+ */
+static void vmw_stdu_encoder_destroy(struct drm_encoder *encoder)
+{
+	vmw_stdu_destroy(vmw_encoder_to_stdu(encoder));
+}
+
+static const struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
+	.destroy = vmw_stdu_encoder_destroy,
+};
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit Connector Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_connector_destroy - cleans up the STDU
+ *
+ * @connector: used to get the containing STDU
+ *
+ * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
+ * this can be a no-op.  Nevertheless, it doesn't hurt of have this in case
+ * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
+ * get called.
+ */
+static void vmw_stdu_connector_destroy(struct drm_connector *connector)
+{
+	vmw_stdu_destroy(vmw_connector_to_stdu(connector));
+}
+
+
+
+static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
+	.dpms = vmw_du_connector_dpms,
+	.detect = vmw_du_connector_detect,
+	.fill_modes = vmw_du_connector_fill_modes,
+	.destroy = vmw_stdu_connector_destroy,
+	.reset = vmw_du_connector_reset,
+	.atomic_duplicate_state = vmw_du_connector_duplicate_state,
+	.atomic_destroy_state = vmw_du_connector_destroy_state,
+};
+
+
+static const struct
+drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
+};
+
+
+
+/******************************************************************************
+ * Screen Target Display Plane Functions
+ *****************************************************************************/
+
+
+
+/**
+ * vmw_stdu_primary_plane_cleanup_fb - Unpins the display surface
+ *
+ * @plane:  display plane
+ * @old_state: Contains the FB to clean up
+ *
+ * Unpins the display surface
+ *
+ * Returns 0 on success
+ */
+static void
+vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
+				  struct drm_plane_state *old_state)
+{
+	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+
+	if (vps->surf)
+		WARN_ON(!vps->pinned);
+
+	vmw_du_plane_cleanup_fb(plane, old_state);
+
+	vps->content_fb_type = SAME_AS_DISPLAY;
+	vps->cpp = 0;
+}
+
+
+
+/**
+ * vmw_stdu_primary_plane_prepare_fb - Readies the display surface
+ *
+ * @plane:  display plane
+ * @new_state: info on the new plane state, including the FB
+ *
+ * This function allocates a new display surface if the content is
+ * backed by a buffer object.  The display surface is pinned here, and it'll
+ * be unpinned in .cleanup_fb()
+ *
+ * Returns 0 on success
+ */
+static int
+vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
+				  struct drm_plane_state *new_state)
+{
+	struct vmw_private *dev_priv = vmw_priv(plane->dev);
+	struct drm_framebuffer *new_fb = new_state->fb;
+	struct vmw_framebuffer *vfb;
+	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+	enum stdu_content_type new_content_type;
+	struct vmw_framebuffer_surface *new_vfbs;
+	struct drm_crtc *crtc = new_state->crtc;
+	uint32_t hdisplay = new_state->crtc_w, vdisplay = new_state->crtc_h;
+	int ret;
+
+	/* No FB to prepare */
+	if (!new_fb) {
+		if (vps->surf) {
+			WARN_ON(vps->pinned != 0);
+			vmw_surface_unreference(&vps->surf);
+		}
+
+		return 0;
+	}
+
+	vfb = vmw_framebuffer_to_vfb(new_fb);
+	new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
+
+	if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay &&
+	    new_vfbs->surface->base_size.height == vdisplay)
+		new_content_type = SAME_AS_DISPLAY;
+	else if (vfb->bo)
+		new_content_type = SEPARATE_BO;
+	else
+		new_content_type = SEPARATE_SURFACE;
+
+	if (new_content_type != SAME_AS_DISPLAY) {
+		struct vmw_surface content_srf;
+		struct drm_vmw_size display_base_size = {0};
+
+		display_base_size.width  = hdisplay;
+		display_base_size.height = vdisplay;
+		display_base_size.depth  = 1;
+
+		/*
+		 * If content buffer is a buffer object, then we have to
+		 * construct surface info
+		 */
+		if (new_content_type == SEPARATE_BO) {
+
+			switch (new_fb->format->cpp[0]*8) {
+			case 32:
+				content_srf.format = SVGA3D_X8R8G8B8;
+				break;
+
+			case 16:
+				content_srf.format = SVGA3D_R5G6B5;
+				break;
+
+			case 8:
+				content_srf.format = SVGA3D_P8;
+				break;
+
+			default:
+				DRM_ERROR("Invalid format\n");
+				return -EINVAL;
+			}
+
+			content_srf.flags             = 0;
+			content_srf.mip_levels[0]     = 1;
+			content_srf.multisample_count = 0;
+			content_srf.multisample_pattern =
+				SVGA3D_MS_PATTERN_NONE;
+			content_srf.quality_level = SVGA3D_MS_QUALITY_NONE;
+		} else {
+			content_srf = *new_vfbs->surface;
+		}
+
+		if (vps->surf) {
+			struct drm_vmw_size cur_base_size = vps->surf->base_size;
+
+			if (cur_base_size.width != display_base_size.width ||
+			    cur_base_size.height != display_base_size.height ||
+			    vps->surf->format != content_srf.format) {
+				WARN_ON(vps->pinned != 0);
+				vmw_surface_unreference(&vps->surf);
+			}
+
+		}
+
+		if (!vps->surf) {
+			ret = vmw_surface_gb_priv_define
+				(crtc->dev,
+				 /* Kernel visible only */
+				 0,
+				 content_srf.flags,
+				 content_srf.format,
+				 true,  /* a scanout buffer */
+				 content_srf.mip_levels[0],
+				 content_srf.multisample_count,
+				 0,
+				 display_base_size,
+				 content_srf.multisample_pattern,
+				 content_srf.quality_level,
+				 &vps->surf);
+			if (ret != 0) {
+				DRM_ERROR("Couldn't allocate STDU surface.\n");
+				return ret;
+			}
+		}
+	} else {
+		/*
+		 * prepare_fb and clean_fb should only take care of pinning
+		 * and unpinning.  References are tracked by state objects.
+		 * The only time we add a reference in prepare_fb is if the
+		 * state object doesn't have a reference to begin with
+		 */
+		if (vps->surf) {
+			WARN_ON(vps->pinned != 0);
+			vmw_surface_unreference(&vps->surf);
+		}
+
+		vps->surf = vmw_surface_reference(new_vfbs->surface);
+	}
+
+	if (vps->surf) {
+
+		/* Pin new surface before flipping */
+		ret = vmw_resource_pin(&vps->surf->res, false);
+		if (ret)
+			goto out_srf_unref;
+
+		vps->pinned++;
+	}
+
+	vps->content_fb_type = new_content_type;
+
+	/*
+	 * This should only happen if the buffer object is too large to create a
+	 * proxy surface for.
+	 * If we are a 2D VM with a buffer object then we have to use CPU blit
+	 * so cache these mappings
+	 */
+	if (vps->content_fb_type == SEPARATE_BO &&
+	    !(dev_priv->capabilities & SVGA_CAP_3D))
+		vps->cpp = new_fb->pitches[0] / new_fb->width;
+
+	return 0;
+
+out_srf_unref:
+	vmw_surface_unreference(&vps->surf);
+	return ret;
+}
+
+static uint32_t vmw_stdu_bo_fifo_size(struct vmw_du_update_plane *update,
+				      uint32_t num_hits)
+{
+	return sizeof(struct vmw_stdu_dma) + sizeof(SVGA3dCopyBox) * num_hits +
+		sizeof(SVGA3dCmdSurfaceDMASuffix) +
+		sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update,
+					  uint32_t num_hits)
+{
+	return sizeof(struct vmw_stdu_update_gb_image) +
+		sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_populate_dma(struct vmw_du_update_plane  *update,
+					 void *cmd, uint32_t num_hits)
+{
+	struct vmw_screen_target_display_unit *stdu;
+	struct vmw_framebuffer_bo *vfbbo;
+	struct vmw_stdu_dma *cmd_dma = cmd;
+
+	stdu = container_of(update->du, typeof(*stdu), base);
+	vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+	cmd_dma->header.id = SVGA_3D_CMD_SURFACE_DMA;
+	cmd_dma->header.size = sizeof(cmd_dma->body) +
+		sizeof(struct SVGA3dCopyBox) * num_hits +
+		sizeof(SVGA3dCmdSurfaceDMASuffix);
+	vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &cmd_dma->body.guest.ptr);
+	cmd_dma->body.guest.pitch = update->vfb->base.pitches[0];
+	cmd_dma->body.host.sid = stdu->display_srf->res.id;
+	cmd_dma->body.host.face = 0;
+	cmd_dma->body.host.mipmap = 0;
+	cmd_dma->body.transfer = SVGA3D_WRITE_HOST_VRAM;
+
+	return sizeof(*cmd_dma);
+}
+
+static uint32_t vmw_stdu_bo_populate_clip(struct vmw_du_update_plane  *update,
+					  void *cmd, struct drm_rect *clip,
+					  uint32_t fb_x, uint32_t fb_y)
+{
+	struct SVGA3dCopyBox *box = cmd;
+
+	box->srcx = fb_x;
+	box->srcy = fb_y;
+	box->srcz = 0;
+	box->x = clip->x1;
+	box->y = clip->y1;
+	box->z = 0;
+	box->w = drm_rect_width(clip);
+	box->h = drm_rect_height(clip);
+	box->d = 1;
+
+	return sizeof(*box);
+}
+
+static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane  *update,
+					    void *cmd, struct drm_rect *bb)
+{
+	struct vmw_screen_target_display_unit *stdu;
+	struct vmw_framebuffer_bo *vfbbo;
+	SVGA3dCmdSurfaceDMASuffix *suffix = cmd;
+
+	stdu = container_of(update->du, typeof(*stdu), base);
+	vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+	suffix->suffixSize = sizeof(*suffix);
+	suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
+
+	vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
+				 bb->y1, bb->y2);
+
+	return sizeof(*suffix) + sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_pre_clip_cpu(struct vmw_du_update_plane  *update,
+					 void *cmd, uint32_t num_hits)
+{
+	struct vmw_du_update_plane_buffer *bo_update =
+		container_of(update, typeof(*bo_update), base);
+
+	bo_update->fb_left = INT_MAX;
+	bo_update->fb_top = INT_MAX;
+
+	return 0;
+}
+
+static uint32_t vmw_stdu_bo_clip_cpu(struct vmw_du_update_plane  *update,
+				     void *cmd, struct drm_rect *clip,
+				     uint32_t fb_x, uint32_t fb_y)
+{
+	struct vmw_du_update_plane_buffer *bo_update =
+		container_of(update, typeof(*bo_update), base);
+
+	bo_update->fb_left = min_t(int, bo_update->fb_left, fb_x);
+	bo_update->fb_top = min_t(int, bo_update->fb_top, fb_y);
+
+	return 0;
+}
+
+static uint32_t
+vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane  *update, void *cmd,
+				struct drm_rect *bb)
+{
+	struct vmw_du_update_plane_buffer *bo_update;
+	struct vmw_screen_target_display_unit *stdu;
+	struct vmw_framebuffer_bo *vfbbo;
+	struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0);
+	struct vmw_stdu_update_gb_image *cmd_img = cmd;
+	struct vmw_stdu_update *cmd_update;
+	struct ttm_buffer_object *src_bo, *dst_bo;
+	u32 src_offset, dst_offset;
+	s32 src_pitch, dst_pitch;
+	s32 width, height;
+
+	bo_update = container_of(update, typeof(*bo_update), base);
+	stdu = container_of(update->du, typeof(*stdu), base);
+	vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+	width = bb->x2 - bb->x1;
+	height = bb->y2 - bb->y1;
+
+	diff.cpp = stdu->cpp;
+
+	dst_bo = &stdu->display_srf->res.backup->base;
+	dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+	dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
+
+	src_bo = &vfbbo->buffer->base;
+	src_pitch = update->vfb->base.pitches[0];
+	src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
+		stdu->cpp;
+
+	(void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch, src_bo,
+			       src_offset, src_pitch, width * stdu->cpp, height,
+			       &diff);
+
+	if (drm_rect_visible(&diff.rect)) {
+		SVGA3dBox *box = &cmd_img->body.box;
+
+		cmd_img->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+		cmd_img->header.size = sizeof(cmd_img->body);
+		cmd_img->body.image.sid = stdu->display_srf->res.id;
+		cmd_img->body.image.face = 0;
+		cmd_img->body.image.mipmap = 0;
+
+		box->x = diff.rect.x1;
+		box->y = diff.rect.y1;
+		box->z = 0;
+		box->w = drm_rect_width(&diff.rect);
+		box->h = drm_rect_height(&diff.rect);
+		box->d = 1;
+
+		cmd_update = (struct vmw_stdu_update *)&cmd_img[1];
+		vmw_stdu_populate_update(cmd_update, stdu->base.unit,
+					 diff.rect.x1, diff.rect.x2,
+					 diff.rect.y1, diff.rect.y2);
+
+		return sizeof(*cmd_img) + sizeof(*cmd_update);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_stdu_plane_update_bo - Update display unit for bo backed fb.
+ * @dev_priv: device private.
+ * @plane: plane state.
+ * @old_state: old plane state.
+ * @vfb: framebuffer which is blitted to display unit.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ *             The returned fence pointer may be NULL in which case the device
+ *             has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_stdu_plane_update_bo(struct vmw_private *dev_priv,
+				    struct drm_plane *plane,
+				    struct drm_plane_state *old_state,
+				    struct vmw_framebuffer *vfb,
+				    struct vmw_fence_obj **out_fence)
+{
+	struct vmw_du_update_plane_buffer bo_update;
+
+	memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer));
+	bo_update.base.plane = plane;
+	bo_update.base.old_state = old_state;
+	bo_update.base.dev_priv = dev_priv;
+	bo_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+	bo_update.base.vfb = vfb;
+	bo_update.base.out_fence = out_fence;
+	bo_update.base.mutex = NULL;
+	bo_update.base.cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
+	bo_update.base.intr = false;
+
+	/*
+	 * VM without 3D support don't have surface DMA command and framebuffer
+	 * should be moved out of VRAM.
+	 */
+	if (bo_update.base.cpu_blit) {
+		bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu;
+		bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu;
+		bo_update.base.clip = vmw_stdu_bo_clip_cpu;
+		bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu;
+	} else {
+		bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size;
+		bo_update.base.pre_clip = vmw_stdu_bo_populate_dma;
+		bo_update.base.clip = vmw_stdu_bo_populate_clip;
+		bo_update.base.post_clip = vmw_stdu_bo_populate_update;
+	}
+
+	return vmw_du_helper_plane_update(&bo_update.base);
+}
+
+static uint32_t
+vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update,
+					uint32_t num_hits)
+{
+	struct vmw_framebuffer_surface *vfbs;
+	uint32_t size = 0;
+
+	vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+	if (vfbs->is_bo_proxy)
+		size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+
+	size += sizeof(struct vmw_stdu_update);
+
+	return size;
+}
+
+static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update,
+					   uint32_t num_hits)
+{
+	struct vmw_framebuffer_surface *vfbs;
+	uint32_t size = 0;
+
+	vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+	if (vfbs->is_bo_proxy)
+		size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+
+	size += sizeof(struct vmw_stdu_surface_copy) + sizeof(SVGA3dCopyBox) *
+		num_hits + sizeof(struct vmw_stdu_update);
+
+	return size;
+}
+
+static uint32_t
+vmw_stdu_surface_update_proxy(struct vmw_du_update_plane *update, void *cmd)
+{
+	struct vmw_framebuffer_surface *vfbs;
+	struct drm_plane_state *state = update->plane->state;
+	struct drm_plane_state *old_state = update->old_state;
+	struct vmw_stdu_update_gb_image *cmd_update = cmd;
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_rect clip;
+	uint32_t copy_size = 0;
+
+	vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+	/*
+	 * proxy surface is special where a buffer object type fb is wrapped
+	 * in a surface and need an update gb image command to sync with device.
+	 */
+	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+	drm_atomic_for_each_plane_damage(&iter, &clip) {
+		SVGA3dBox *box = &cmd_update->body.box;
+
+		cmd_update->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+		cmd_update->header.size = sizeof(cmd_update->body);
+		cmd_update->body.image.sid = vfbs->surface->res.id;
+		cmd_update->body.image.face = 0;
+		cmd_update->body.image.mipmap = 0;
+
+		box->x = clip.x1;
+		box->y = clip.y1;
+		box->z = 0;
+		box->w = drm_rect_width(&clip);
+		box->h = drm_rect_height(&clip);
+		box->d = 1;
+
+		copy_size += sizeof(*cmd_update);
+		cmd_update++;
+	}
+
+	return copy_size;
+}
+
+static uint32_t
+vmw_stdu_surface_populate_copy(struct vmw_du_update_plane  *update, void *cmd,
+			       uint32_t num_hits)
+{
+	struct vmw_screen_target_display_unit *stdu;
+	struct vmw_framebuffer_surface *vfbs;
+	struct vmw_stdu_surface_copy *cmd_copy = cmd;
+
+	stdu = container_of(update->du, typeof(*stdu), base);
+	vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+	cmd_copy->header.id = SVGA_3D_CMD_SURFACE_COPY;
+	cmd_copy->header.size = sizeof(cmd_copy->body) + sizeof(SVGA3dCopyBox) *
+		num_hits;
+	cmd_copy->body.src.sid = vfbs->surface->res.id;
+	cmd_copy->body.dest.sid = stdu->display_srf->res.id;
+
+	return sizeof(*cmd_copy);
+}
+
+static uint32_t
+vmw_stdu_surface_populate_clip(struct vmw_du_update_plane  *update, void *cmd,
+			       struct drm_rect *clip, uint32_t fb_x,
+			       uint32_t fb_y)
+{
+	struct SVGA3dCopyBox *box = cmd;
+
+	box->srcx = fb_x;
+	box->srcy = fb_y;
+	box->srcz = 0;
+	box->x = clip->x1;
+	box->y = clip->y1;
+	box->z = 0;
+	box->w = drm_rect_width(clip);
+	box->h = drm_rect_height(clip);
+	box->d = 1;
+
+	return sizeof(*box);
+}
+
+static uint32_t
+vmw_stdu_surface_populate_update(struct vmw_du_update_plane  *update, void *cmd,
+				 struct drm_rect *bb)
+{
+	vmw_stdu_populate_update(cmd, update->du->unit, bb->x1, bb->x2, bb->y1,
+				 bb->y2);
+
+	return sizeof(struct vmw_stdu_update);
+}
+
+/**
+ * vmw_stdu_plane_update_surface - Update display unit for surface backed fb
+ * @dev_priv: Device private
+ * @plane: Plane state
+ * @old_state: Old plane state
+ * @vfb: Framebuffer which is blitted to display unit
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ *             The returned fence pointer may be NULL in which case the device
+ *             has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
+					 struct drm_plane *plane,
+					 struct drm_plane_state *old_state,
+					 struct vmw_framebuffer *vfb,
+					 struct vmw_fence_obj **out_fence)
+{
+	struct vmw_du_update_plane srf_update;
+	struct vmw_screen_target_display_unit *stdu;
+	struct vmw_framebuffer_surface *vfbs;
+
+	stdu = vmw_crtc_to_stdu(plane->state->crtc);
+	vfbs = container_of(vfb, typeof(*vfbs), base);
+
+	memset(&srf_update, 0, sizeof(struct vmw_du_update_plane));
+	srf_update.plane = plane;
+	srf_update.old_state = old_state;
+	srf_update.dev_priv = dev_priv;
+	srf_update.du = vmw_crtc_to_du(plane->state->crtc);
+	srf_update.vfb = vfb;
+	srf_update.out_fence = out_fence;
+	srf_update.mutex = &dev_priv->cmdbuf_mutex;
+	srf_update.cpu_blit = false;
+	srf_update.intr = true;
+
+	if (vfbs->is_bo_proxy)
+		srf_update.post_prepare = vmw_stdu_surface_update_proxy;
+
+	if (vfbs->surface->res.id != stdu->display_srf->res.id) {
+		srf_update.calc_fifo_size = vmw_stdu_surface_fifo_size;
+		srf_update.pre_clip = vmw_stdu_surface_populate_copy;
+		srf_update.clip = vmw_stdu_surface_populate_clip;
+	} else {
+		srf_update.calc_fifo_size =
+			vmw_stdu_surface_fifo_size_same_display;
+	}
+
+	srf_update.post_clip = vmw_stdu_surface_populate_update;
+
+	return vmw_du_helper_plane_update(&srf_update);
+}
+
+/**
+ * vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane
+ * @plane: display plane
+ * @old_state: Only used to get crtc info
+ *
+ * Formally update stdu->display_srf to the new plane, and bind the new
+ * plane STDU.  This function is called during the commit phase when
+ * all the preparation have been done and all the configurations have
+ * been checked.
+ */
+static void
+vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
+				     struct drm_plane_state *old_state)
+{
+	struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state);
+	struct drm_crtc *crtc = plane->state->crtc;
+	struct vmw_screen_target_display_unit *stdu;
+	struct drm_pending_vblank_event *event;
+	struct vmw_fence_obj *fence = NULL;
+	struct vmw_private *dev_priv;
+	int ret;
+
+	/* If case of device error, maintain consistent atomic state */
+	if (crtc && plane->state->fb) {
+		struct vmw_framebuffer *vfb =
+			vmw_framebuffer_to_vfb(plane->state->fb);
+		stdu = vmw_crtc_to_stdu(crtc);
+		dev_priv = vmw_priv(crtc->dev);
+
+		stdu->display_srf = vps->surf;
+		stdu->content_fb_type = vps->content_fb_type;
+		stdu->cpp = vps->cpp;
+
+		ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
+		if (ret)
+			DRM_ERROR("Failed to bind surface to STDU.\n");
+
+		if (vfb->bo)
+			ret = vmw_stdu_plane_update_bo(dev_priv, plane,
+						       old_state, vfb, &fence);
+		else
+			ret = vmw_stdu_plane_update_surface(dev_priv, plane,
+							    old_state, vfb,
+							    &fence);
+		if (ret)
+			DRM_ERROR("Failed to update STDU.\n");
+	} else {
+		crtc = old_state->crtc;
+		stdu = vmw_crtc_to_stdu(crtc);
+		dev_priv = vmw_priv(crtc->dev);
+
+		/* Blank STDU when fb and crtc are NULL */
+		if (!stdu->defined)
+			return;
+
+		ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+		if (ret)
+			DRM_ERROR("Failed to blank STDU\n");
+
+		ret = vmw_stdu_update_st(dev_priv, stdu);
+		if (ret)
+			DRM_ERROR("Failed to update STDU.\n");
+
+		return;
+	}
+
+	/* In case of error, vblank event is send in vmw_du_crtc_atomic_flush */
+	event = crtc->state->event;
+	if (event && fence) {
+		struct drm_file *file_priv = event->base.file_priv;
+
+		ret = vmw_event_fence_action_queue(file_priv,
+						   fence,
+						   &event->base,
+						   &event->event.vbl.tv_sec,
+						   &event->event.vbl.tv_usec,
+						   true);
+		if (ret)
+			DRM_ERROR("Failed to queue event on fence.\n");
+		else
+			crtc->state->event = NULL;
+	}
+
+	if (fence)
+		vmw_fence_obj_unreference(&fence);
+}
+
+
+static const struct drm_plane_funcs vmw_stdu_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = vmw_du_primary_plane_destroy,
+	.reset = vmw_du_plane_reset,
+	.atomic_duplicate_state = vmw_du_plane_duplicate_state,
+	.atomic_destroy_state = vmw_du_plane_destroy_state,
+};
+
+static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = vmw_du_cursor_plane_destroy,
+	.reset = vmw_du_plane_reset,
+	.atomic_duplicate_state = vmw_du_plane_duplicate_state,
+	.atomic_destroy_state = vmw_du_plane_destroy_state,
+};
+
+
+/*
+ * Atomic Helpers
+ */
+static const struct
+drm_plane_helper_funcs vmw_stdu_cursor_plane_helper_funcs = {
+	.atomic_check = vmw_du_cursor_plane_atomic_check,
+	.atomic_update = vmw_du_cursor_plane_atomic_update,
+	.prepare_fb = vmw_du_cursor_plane_prepare_fb,
+	.cleanup_fb = vmw_du_plane_cleanup_fb,
+};
+
+static const struct
+drm_plane_helper_funcs vmw_stdu_primary_plane_helper_funcs = {
+	.atomic_check = vmw_du_primary_plane_atomic_check,
+	.atomic_update = vmw_stdu_primary_plane_atomic_update,
+	.prepare_fb = vmw_stdu_primary_plane_prepare_fb,
+	.cleanup_fb = vmw_stdu_primary_plane_cleanup_fb,
+};
+
+static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = {
+	.prepare = vmw_stdu_crtc_helper_prepare,
+	.mode_set_nofb = vmw_stdu_crtc_mode_set_nofb,
+	.atomic_check = vmw_du_crtc_atomic_check,
+	.atomic_begin = vmw_du_crtc_atomic_begin,
+	.atomic_flush = vmw_du_crtc_atomic_flush,
+	.atomic_enable = vmw_stdu_crtc_atomic_enable,
+	.atomic_disable = vmw_stdu_crtc_atomic_disable,
+};
+
+
+/**
+ * vmw_stdu_init - Sets up a Screen Target Display Unit
+ *
+ * @dev_priv: VMW DRM device
+ * @unit: unit number range from 0 to VMWGFX_NUM_DISPLAY_UNITS
+ *
+ * This function is called once per CRTC, and allocates one Screen Target
+ * display unit to represent that CRTC.  Since the SVGA device does not separate
+ * out encoder and connector, they are represented as part of the STDU as well.
+ */
+static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
+{
+	struct vmw_screen_target_display_unit *stdu;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_plane *primary, *cursor;
+	struct drm_crtc *crtc;
+	int    ret;
+
+
+	stdu = kzalloc(sizeof(*stdu), GFP_KERNEL);
+	if (!stdu)
+		return -ENOMEM;
+
+	stdu->base.unit = unit;
+	crtc = &stdu->base.crtc;
+	encoder = &stdu->base.encoder;
+	connector = &stdu->base.connector;
+	primary = &stdu->base.primary;
+	cursor = &stdu->base.cursor;
+
+	stdu->base.pref_active = (unit == 0);
+	stdu->base.pref_width  = dev_priv->initial_width;
+	stdu->base.pref_height = dev_priv->initial_height;
+	stdu->base.is_implicit = false;
+
+	/* Initialize primary plane */
+	vmw_du_plane_reset(primary);
+
+	ret = drm_universal_plane_init(dev, primary,
+				       0, &vmw_stdu_plane_funcs,
+				       vmw_primary_plane_formats,
+				       ARRAY_SIZE(vmw_primary_plane_formats),
+				       NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize primary plane");
+		goto err_free;
+	}
+
+	drm_plane_helper_add(primary, &vmw_stdu_primary_plane_helper_funcs);
+	drm_plane_enable_fb_damage_clips(primary);
+
+	/* Initialize cursor plane */
+	vmw_du_plane_reset(cursor);
+
+	ret = drm_universal_plane_init(dev, cursor,
+			0, &vmw_stdu_cursor_funcs,
+			vmw_cursor_plane_formats,
+			ARRAY_SIZE(vmw_cursor_plane_formats),
+			NULL, DRM_PLANE_TYPE_CURSOR, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize cursor plane");
+		drm_plane_cleanup(&stdu->base.primary);
+		goto err_free;
+	}
+
+	drm_plane_helper_add(cursor, &vmw_stdu_cursor_plane_helper_funcs);
+
+	vmw_du_connector_reset(connector);
+
+	ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
+				 DRM_MODE_CONNECTOR_VIRTUAL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize connector\n");
+		goto err_free;
+	}
+
+	drm_connector_helper_add(connector, &vmw_stdu_connector_helper_funcs);
+	connector->status = vmw_du_connector_detect(connector, false);
+
+	ret = drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
+			       DRM_MODE_ENCODER_VIRTUAL, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize encoder\n");
+		goto err_free_connector;
+	}
+
+	(void) drm_connector_attach_encoder(connector, encoder);
+	encoder->possible_crtcs = (1 << unit);
+	encoder->possible_clones = 0;
+
+	ret = drm_connector_register(connector);
+	if (ret) {
+		DRM_ERROR("Failed to register connector\n");
+		goto err_free_encoder;
+	}
+
+	vmw_du_crtc_reset(crtc);
+	ret = drm_crtc_init_with_planes(dev, crtc, &stdu->base.primary,
+					&stdu->base.cursor,
+					&vmw_stdu_crtc_funcs, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize CRTC\n");
+		goto err_free_unregister;
+	}
+
+	drm_crtc_helper_add(crtc, &vmw_stdu_crtc_helper_funcs);
+
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+
+	drm_object_attach_property(&connector->base,
+				   dev_priv->hotplug_mode_update_property, 1);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_x_property, 0);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_y_property, 0);
+	return 0;
+
+err_free_unregister:
+	drm_connector_unregister(connector);
+err_free_encoder:
+	drm_encoder_cleanup(encoder);
+err_free_connector:
+	drm_connector_cleanup(connector);
+err_free:
+	kfree(stdu);
+	return ret;
+}
+
+
+
+/**
+ *  vmw_stdu_destroy - Cleans up a vmw_screen_target_display_unit
+ *
+ *  @stdu:  Screen Target Display Unit to be destroyed
+ *
+ *  Clean up after vmw_stdu_init
+ */
+static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
+{
+	vmw_du_cleanup(&stdu->base);
+	kfree(stdu);
+}
+
+
+
+/******************************************************************************
+ * Screen Target Display KMS Functions
+ *
+ * These functions are called by the common KMS code in vmwgfx_kms.c
+ *****************************************************************************/
+
+/**
+ * vmw_kms_stdu_init_display - Initializes a Screen Target based display
+ *
+ * @dev_priv: VMW DRM device
+ *
+ * This function initialize a Screen Target based display device.  It checks
+ * the capability bits to make sure the underlying hardware can support
+ * screen targets, and then creates the maximum number of CRTCs, a.k.a Display
+ * Units, as supported by the display hardware.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int i, ret;
+
+
+	/* Do nothing if Screen Target support is turned off */
+	if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
+		return -ENOSYS;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
+		return -ENOSYS;
+
+	ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+	if (unlikely(ret != 0))
+		return ret;
+
+	dev_priv->active_display_unit = vmw_du_screen_target;
+
+	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
+		ret = vmw_stdu_init(dev_priv, i);
+
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed to initialize STDU %d", i);
+			return ret;
+		}
+	}
+
+	DRM_INFO("Screen Target Display device initialized\n");
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
new file mode 100644
index 0000000..29d8794
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -0,0 +1,1760 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
+#include "device_include/svga3d_surfacedefs.h"
+
+#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
+#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
+#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
+	(svga3d_flags & ((uint64_t)U32_MAX))
+
+/**
+ * struct vmw_user_surface - User-space visible surface resource
+ *
+ * @base:           The TTM base object handling user-space visibility.
+ * @srf:            The surface metadata.
+ * @size:           TTM accounting size for the surface.
+ * @master: master of the creating client. Used for security check.
+ */
+struct vmw_user_surface {
+	struct ttm_prime_object prime;
+	struct vmw_surface srf;
+	uint32_t size;
+	struct drm_master *master;
+	struct ttm_base_object *backup_base;
+};
+
+/**
+ * struct vmw_surface_offset - Backing store mip level offset info
+ *
+ * @face:           Surface face.
+ * @mip:            Mip level.
+ * @bo_offset:      Offset into backing store of this mip level.
+ *
+ */
+struct vmw_surface_offset {
+	uint32_t face;
+	uint32_t mip;
+	uint32_t bo_offset;
+};
+
+static void vmw_user_surface_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base);
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_create(struct vmw_resource *res);
+static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+static int vmw_gb_surface_create(struct vmw_resource *res);
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_destroy(struct vmw_resource *res);
+static int
+vmw_gb_surface_define_internal(struct drm_device *dev,
+			       struct drm_vmw_gb_surface_create_ext_req *req,
+			       struct drm_vmw_gb_surface_create_rep *rep,
+			       struct drm_file *file_priv);
+static int
+vmw_gb_surface_reference_internal(struct drm_device *dev,
+				  struct drm_vmw_surface_arg *req,
+				  struct drm_vmw_gb_surface_ref_ext_rep *rep,
+				  struct drm_file *file_priv);
+
+static const struct vmw_user_resource_conv user_surface_conv = {
+	.object_type = VMW_RES_SURFACE,
+	.base_obj_to_res = vmw_user_surface_base_to_res,
+	.res_free = vmw_user_surface_free
+};
+
+const struct vmw_user_resource_conv *user_surface_converter =
+	&user_surface_conv;
+
+
+static uint64_t vmw_user_surface_size;
+
+static const struct vmw_res_func vmw_legacy_surface_func = {
+	.res_type = vmw_res_surface,
+	.needs_backup = false,
+	.may_evict = true,
+	.prio = 1,
+	.dirty_prio = 1,
+	.type_name = "legacy surfaces",
+	.backup_placement = &vmw_srf_placement,
+	.create = &vmw_legacy_srf_create,
+	.destroy = &vmw_legacy_srf_destroy,
+	.bind = &vmw_legacy_srf_bind,
+	.unbind = &vmw_legacy_srf_unbind
+};
+
+static const struct vmw_res_func vmw_gb_surface_func = {
+	.res_type = vmw_res_surface,
+	.needs_backup = true,
+	.may_evict = true,
+	.prio = 1,
+	.dirty_prio = 2,
+	.type_name = "guest backed surfaces",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_gb_surface_create,
+	.destroy = vmw_gb_surface_destroy,
+	.bind = vmw_gb_surface_bind,
+	.unbind = vmw_gb_surface_unbind
+};
+
+/**
+ * struct vmw_surface_dma - SVGA3D DMA command
+ */
+struct vmw_surface_dma {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdSurfaceDMA body;
+	SVGA3dCopyBox cb;
+	SVGA3dCmdSurfaceDMASuffix suffix;
+};
+
+/**
+ * struct vmw_surface_define - SVGA3D Surface Define command
+ */
+struct vmw_surface_define {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdDefineSurface body;
+};
+
+/**
+ * struct vmw_surface_destroy - SVGA3D Surface Destroy command
+ */
+struct vmw_surface_destroy {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdDestroySurface body;
+};
+
+
+/**
+ * vmw_surface_dma_size - Compute fifo size for a dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface dma command for backup or
+ * restoration of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
+{
+	return srf->num_sizes * sizeof(struct vmw_surface_dma);
+}
+
+
+/**
+ * vmw_surface_define_size - Compute fifo size for a surface define command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface define command for the definition
+ * of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
+{
+	return sizeof(struct vmw_surface_define) + srf->num_sizes *
+		sizeof(SVGA3dSize);
+}
+
+
+/**
+ * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ *
+ * Computes the required size for a surface destroy command for the destruction
+ * of a hw surface.
+ */
+static inline uint32_t vmw_surface_destroy_size(void)
+{
+	return sizeof(struct vmw_surface_destroy);
+}
+
+/**
+ * vmw_surface_destroy_encode - Encode a surface_destroy command.
+ *
+ * @id: The surface id
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_destroy_encode(uint32_t id,
+				       void *cmd_space)
+{
+	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
+		cmd_space;
+
+	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.sid = id;
+}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_define_encode(const struct vmw_surface *srf,
+				      void *cmd_space)
+{
+	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
+		cmd_space;
+	struct drm_vmw_size *src_size;
+	SVGA3dSize *cmd_size;
+	uint32_t cmd_len;
+	int i;
+
+	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
+	cmd->header.size = cmd_len;
+	cmd->body.sid = srf->res.id;
+	/*
+	 * Downcast of surfaceFlags, was upcasted when received from user-space,
+	 * since driver internally stores as 64 bit.
+	 * For legacy surface define only 32 bit flag is supported.
+	 */
+	cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->flags;
+	cmd->body.format = srf->format;
+	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+
+	cmd += 1;
+	cmd_size = (SVGA3dSize *) cmd;
+	src_size = srf->sizes;
+
+	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+		cmd_size->width = src_size->width;
+		cmd_size->height = src_size->height;
+		cmd_size->depth = src_size->depth;
+	}
+}
+
+/**
+ * vmw_surface_dma_encode - Encode a surface_dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
+ * should be placed or read from.
+ * @to_surface: Boolean whether to DMA to the surface or from the surface.
+ */
+static void vmw_surface_dma_encode(struct vmw_surface *srf,
+				   void *cmd_space,
+				   const SVGAGuestPtr *ptr,
+				   bool to_surface)
+{
+	uint32_t i;
+	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
+	const struct svga3d_surface_desc *desc =
+		svga3dsurface_get_desc(srf->format);
+
+	for (i = 0; i < srf->num_sizes; ++i) {
+		SVGA3dCmdHeader *header = &cmd->header;
+		SVGA3dCmdSurfaceDMA *body = &cmd->body;
+		SVGA3dCopyBox *cb = &cmd->cb;
+		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
+		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
+		const struct drm_vmw_size *cur_size = &srf->sizes[i];
+
+		header->id = SVGA_3D_CMD_SURFACE_DMA;
+		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
+
+		body->guest.ptr = *ptr;
+		body->guest.ptr.offset += cur_offset->bo_offset;
+		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
+								  cur_size);
+		body->host.sid = srf->res.id;
+		body->host.face = cur_offset->face;
+		body->host.mipmap = cur_offset->mip;
+		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
+				  SVGA3D_READ_HOST_VRAM);
+		cb->x = 0;
+		cb->y = 0;
+		cb->z = 0;
+		cb->srcx = 0;
+		cb->srcy = 0;
+		cb->srcz = 0;
+		cb->w = cur_size->width;
+		cb->h = cur_size->height;
+		cb->d = cur_size->depth;
+
+		suffix->suffixSize = sizeof(*suffix);
+		suffix->maximumOffset =
+			svga3dsurface_get_image_buffer_size(desc, cur_size,
+							    body->guest.pitch);
+		suffix->flags.discard = 0;
+		suffix->flags.unsynchronized = 0;
+		suffix->flags.reserved = 0;
+		++cmd;
+	}
+};
+
+
+/**
+ * vmw_hw_surface_destroy - destroy a Device surface
+ *
+ * @res:        Pointer to a struct vmw_resource embedded in a struct
+ *              vmw_surface.
+ *
+ * Destroys a the device surface associated with a struct vmw_surface if
+ * any, and adjusts accounting and resource count accordingly.
+ */
+static void vmw_hw_surface_destroy(struct vmw_resource *res)
+{
+
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_surface *srf;
+	void *cmd;
+
+	if (res->func->destroy == vmw_gb_surface_destroy) {
+		(void) vmw_gb_surface_destroy(res);
+		return;
+	}
+
+	if (res->id != -1) {
+
+		cmd = VMW_FIFO_RESERVE(dev_priv, vmw_surface_destroy_size());
+		if (unlikely(!cmd))
+			return;
+
+		vmw_surface_destroy_encode(res->id, cmd);
+		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+
+		/*
+		 * used_memory_size_atomic, or separate lock
+		 * to avoid taking dev_priv::cmdbuf_mutex in
+		 * the destroy path.
+		 */
+
+		mutex_lock(&dev_priv->cmdbuf_mutex);
+		srf = vmw_res_to_srf(res);
+		dev_priv->used_memory_size -= res->backup_size;
+		mutex_unlock(&dev_priv->cmdbuf_mutex);
+	}
+}
+
+/**
+ * vmw_legacy_srf_create - Create a device surface as part of the
+ * resource validation process.
+ *
+ * @res: Pointer to a struct vmw_surface.
+ *
+ * If the surface doesn't have a hw id.
+ *
+ * Returns -EBUSY if there wasn't sufficient device resources to
+ * complete the validation. Retry after freeing up resources.
+ *
+ * May return other errors if the kernel is out of guest resources.
+ */
+static int vmw_legacy_srf_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_surface *srf;
+	uint32_t submit_size;
+	uint8_t *cmd;
+	int ret;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	srf = vmw_res_to_srf(res);
+	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
+		     dev_priv->memory_size))
+		return -EBUSY;
+
+	/*
+	 * Alloc id for the resource.
+	 */
+
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a surface id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	/*
+	 * Encode surface define- commands.
+	 */
+
+	submit_size = vmw_surface_define_size(srf);
+	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	if (unlikely(!cmd)) {
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	vmw_surface_define_encode(srf, cmd);
+	vmw_fifo_commit(dev_priv, submit_size);
+	vmw_fifo_resource_inc(dev_priv);
+
+	/*
+	 * Surface memory usage accounting.
+	 */
+
+	dev_priv->used_memory_size += res->backup_size;
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	return ret;
+}
+
+/**
+ * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ * @bind:           Boolean wether to DMA to the surface.
+ *
+ * Transfer backup data to or from a legacy surface as part of the
+ * validation process.
+ * May return other errors if the kernel is out of guest resources.
+ * The backup buffer will be fenced or idle upon successful completion,
+ * and if the surface needs persistent backup storage, the backup buffer
+ * will also be returned reserved iff @bind is true.
+ */
+static int vmw_legacy_srf_dma(struct vmw_resource *res,
+			      struct ttm_validate_buffer *val_buf,
+			      bool bind)
+{
+	SVGAGuestPtr ptr;
+	struct vmw_fence_obj *fence;
+	uint32_t submit_size;
+	struct vmw_surface *srf = vmw_res_to_srf(res);
+	uint8_t *cmd;
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	BUG_ON(!val_buf->bo);
+	submit_size = vmw_surface_dma_size(srf);
+	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	if (unlikely(!cmd))
+		return -ENOMEM;
+
+	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
+	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
+
+	vmw_fifo_commit(dev_priv, submit_size);
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_bo_fence_single(val_buf->bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+/**
+ * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
+ *                       surface validation process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ *
+ * This function will copy backup data to the surface if the
+ * backup buffer is dirty.
+ */
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf)
+{
+	if (!res->backup_dirty)
+		return 0;
+
+	return vmw_legacy_srf_dma(res, val_buf, true);
+}
+
+
+/**
+ * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
+ *                         surface eviction process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ *
+ * This function will copy backup data from the surface.
+ */
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf)
+{
+	if (unlikely(readback))
+		return vmw_legacy_srf_dma(res, val_buf, false);
+	return 0;
+}
+
+/**
+ * vmw_legacy_srf_destroy - Destroy a device surface as part of a
+ *                          resource eviction process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ */
+static int vmw_legacy_srf_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	uint32_t submit_size;
+	uint8_t *cmd;
+
+	BUG_ON(res->id == -1);
+
+	/*
+	 * Encode the dma- and surface destroy commands.
+	 */
+
+	submit_size = vmw_surface_destroy_size();
+	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	if (unlikely(!cmd))
+		return -ENOMEM;
+
+	vmw_surface_destroy_encode(res->id, cmd);
+	vmw_fifo_commit(dev_priv, submit_size);
+
+	/*
+	 * Surface memory usage accounting.
+	 */
+
+	dev_priv->used_memory_size -= res->backup_size;
+
+	/*
+	 * Release the surface ID.
+	 */
+
+	vmw_resource_release_id(res);
+	vmw_fifo_resource_dec(dev_priv);
+
+	return 0;
+}
+
+
+/**
+ * vmw_surface_init - initialize a struct vmw_surface
+ *
+ * @dev_priv:       Pointer to a device private struct.
+ * @srf:            Pointer to the struct vmw_surface to initialize.
+ * @res_free:       Pointer to a resource destructor used to free
+ *                  the object.
+ */
+static int vmw_surface_init(struct vmw_private *dev_priv,
+			    struct vmw_surface *srf,
+			    void (*res_free) (struct vmw_resource *res))
+{
+	int ret;
+	struct vmw_resource *res = &srf->res;
+
+	BUG_ON(!res_free);
+	ret = vmw_resource_init(dev_priv, res, true, res_free,
+				(dev_priv->has_mob) ? &vmw_gb_surface_func :
+				&vmw_legacy_surface_func);
+
+	if (unlikely(ret != 0)) {
+		res_free(res);
+		return ret;
+	}
+
+	/*
+	 * The surface won't be visible to hardware until a
+	 * surface validate.
+	 */
+
+	INIT_LIST_HEAD(&srf->view_list);
+	res->hw_destroy = vmw_hw_surface_destroy;
+	return ret;
+}
+
+/**
+ * vmw_user_surface_base_to_res - TTM base object to resource converter for
+ *                                user visible surfaces
+ *
+ * @base:           Pointer to a TTM base object
+ *
+ * Returns the struct vmw_resource embedded in a struct vmw_surface
+ * for the user-visible object identified by the TTM base object @base.
+ */
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base)
+{
+	return &(container_of(base, struct vmw_user_surface,
+			      prime.base)->srf.res);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface resource destructor
+ *
+ * @res:            A struct vmw_resource embedded in a struct vmw_surface.
+ */
+static void vmw_user_surface_free(struct vmw_resource *res)
+{
+	struct vmw_surface *srf = vmw_res_to_srf(res);
+	struct vmw_user_surface *user_srf =
+	    container_of(srf, struct vmw_user_surface, srf);
+	struct vmw_private *dev_priv = srf->res.dev_priv;
+	uint32_t size = user_srf->size;
+
+	if (user_srf->master)
+		drm_master_put(&user_srf->master);
+	kfree(srf->offsets);
+	kfree(srf->sizes);
+	kfree(srf->snooper.image);
+	ttm_prime_object_kfree(user_srf, prime);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface TTM base object destructor
+ *
+ * @p_base:         Pointer to a pointer to a TTM base object
+ *                  embedded in a struct vmw_user_surface.
+ *
+ * Drops the base object's reference on its resource, and the
+ * pointer pointed to by *p_base is set to NULL.
+ */
+static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct vmw_user_surface *user_srf =
+	    container_of(base, struct vmw_user_surface, prime.base);
+	struct vmw_resource *res = &user_srf->srf.res;
+
+	*p_base = NULL;
+	if (user_srf->backup_base)
+		ttm_base_object_unref(&user_srf->backup_base);
+	vmw_resource_unreference(&res);
+}
+
+/**
+ * vmw_user_surface_destroy_ioctl - Ioctl function implementing
+ *                                  the user surface destroy functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ *                                  the user surface define functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_surface *user_srf;
+	struct vmw_surface *srf;
+	struct vmw_resource *res;
+	struct vmw_resource *tmp;
+	union drm_vmw_surface_create_arg *arg =
+	    (union drm_vmw_surface_create_arg *)data;
+	struct drm_vmw_surface_create_req *req = &arg->req;
+	struct drm_vmw_surface_arg *rep = &arg->rep;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct ttm_operation_ctx ctx = {
+		.interruptible = true,
+		.no_wait_gpu = false
+	};
+	int ret;
+	int i, j;
+	uint32_t cur_bo_offset;
+	struct drm_vmw_size *cur_size;
+	struct vmw_surface_offset *cur_offset;
+	uint32_t num_sizes;
+	uint32_t size;
+	const struct svga3d_surface_desc *desc;
+
+	if (unlikely(vmw_user_surface_size == 0))
+		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+			VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
+
+	num_sizes = 0;
+	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+		if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
+			return -EINVAL;
+		num_sizes += req->mip_levels[i];
+	}
+
+	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+	    num_sizes == 0)
+		return -EINVAL;
+
+	size = vmw_user_surface_size +
+		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
+		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
+
+	desc = svga3dsurface_get_desc(req->format);
+	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+		VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
+			       req->format);
+		return -EINVAL;
+	}
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   size, &ctx);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for surface.\n");
+		goto out_unlock;
+	}
+
+	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+	if (unlikely(!user_srf)) {
+		ret = -ENOMEM;
+		goto out_no_user_srf;
+	}
+
+	srf = &user_srf->srf;
+	res = &srf->res;
+
+	/* Driver internally stores as 64-bit flags */
+	srf->flags = (SVGA3dSurfaceAllFlags)req->flags;
+	srf->format = req->format;
+	srf->scanout = req->scanout;
+
+	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+	srf->num_sizes = num_sizes;
+	user_srf->size = size;
+	srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
+				 req->size_addr,
+				 sizeof(*srf->sizes) * srf->num_sizes);
+	if (IS_ERR(srf->sizes)) {
+		ret = PTR_ERR(srf->sizes);
+		goto out_no_sizes;
+	}
+	srf->offsets = kmalloc_array(srf->num_sizes,
+				     sizeof(*srf->offsets),
+				     GFP_KERNEL);
+	if (unlikely(!srf->offsets)) {
+		ret = -ENOMEM;
+		goto out_no_offsets;
+	}
+
+	srf->base_size = *srf->sizes;
+	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+	srf->multisample_count = 0;
+	srf->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+	srf->quality_level = SVGA3D_MS_QUALITY_NONE;
+
+	cur_bo_offset = 0;
+	cur_offset = srf->offsets;
+	cur_size = srf->sizes;
+
+	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+		for (j = 0; j < srf->mip_levels[i]; ++j) {
+			uint32_t stride = svga3dsurface_calculate_pitch
+				(desc, cur_size);
+
+			cur_offset->face = i;
+			cur_offset->mip = j;
+			cur_offset->bo_offset = cur_bo_offset;
+			cur_bo_offset += svga3dsurface_get_image_buffer_size
+				(desc, cur_size, stride);
+			++cur_offset;
+			++cur_size;
+		}
+	}
+	res->backup_size = cur_bo_offset;
+	if (srf->scanout &&
+	    srf->num_sizes == 1 &&
+	    srf->sizes[0].width == 64 &&
+	    srf->sizes[0].height == 64 &&
+	    srf->format == SVGA3D_A8R8G8B8) {
+
+		srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
+		if (!srf->snooper.image) {
+			DRM_ERROR("Failed to allocate cursor_image\n");
+			ret = -ENOMEM;
+			goto out_no_copy;
+		}
+	} else {
+		srf->snooper.image = NULL;
+	}
+
+	user_srf->prime.base.shareable = false;
+	user_srf->prime.base.tfile = NULL;
+	if (drm_is_primary_client(file_priv))
+		user_srf->master = drm_master_get(file_priv->master);
+
+	/**
+	 * From this point, the generic resource management functions
+	 * destroy the object on failure.
+	 */
+
+	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+	if (unlikely(ret != 0))
+		goto out_unlock;
+
+	/*
+	 * A gb-aware client referencing a shared surface will
+	 * expect a backup buffer to be present.
+	 */
+	if (dev_priv->has_mob && req->shareable) {
+		uint32_t backup_handle;
+
+		ret = vmw_user_bo_alloc(dev_priv, tfile,
+					res->backup_size,
+					true,
+					&backup_handle,
+					&res->backup,
+					&user_srf->backup_base);
+		if (unlikely(ret != 0)) {
+			vmw_resource_unreference(&res);
+			goto out_unlock;
+		}
+	}
+
+	tmp = vmw_resource_reference(&srf->res);
+	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+				    req->shareable, VMW_RES_SURFACE,
+				    &vmw_user_surface_base_release, NULL);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&tmp);
+		vmw_resource_unreference(&res);
+		goto out_unlock;
+	}
+
+	rep->sid = user_srf->prime.base.handle;
+	vmw_resource_unreference(&res);
+
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return 0;
+out_no_copy:
+	kfree(srf->offsets);
+out_no_offsets:
+	kfree(srf->sizes);
+out_no_sizes:
+	ttm_prime_object_kfree(user_srf, prime);
+out_no_user_srf:
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
+
+
+static int
+vmw_surface_handle_reference(struct vmw_private *dev_priv,
+			     struct drm_file *file_priv,
+			     uint32_t u_handle,
+			     enum drm_vmw_handle_type handle_type,
+			     struct ttm_base_object **base_p)
+{
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_user_surface *user_srf;
+	uint32_t handle;
+	struct ttm_base_object *base;
+	int ret;
+	bool require_exist = false;
+
+	if (handle_type == DRM_VMW_HANDLE_PRIME) {
+		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
+		if (unlikely(ret != 0))
+			return ret;
+	} else {
+		if (unlikely(drm_is_render_client(file_priv)))
+			require_exist = true;
+
+		handle = u_handle;
+	}
+
+	ret = -EINVAL;
+	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
+	if (unlikely(!base)) {
+		VMW_DEBUG_USER("Could not find surface to reference.\n");
+		goto out_no_lookup;
+	}
+
+	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
+		VMW_DEBUG_USER("Referenced object is not a surface.\n");
+		goto out_bad_resource;
+	}
+
+	if (handle_type != DRM_VMW_HANDLE_PRIME) {
+		user_srf = container_of(base, struct vmw_user_surface,
+					prime.base);
+
+		/*
+		 * Make sure the surface creator has the same
+		 * authenticating master, or is already registered with us.
+		 */
+		if (drm_is_primary_client(file_priv) &&
+		    user_srf->master != file_priv->master)
+			require_exist = true;
+
+		ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
+					 require_exist);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Could not add a reference to a surface.\n");
+			goto out_bad_resource;
+		}
+	}
+
+	*base_p = base;
+	return 0;
+
+out_bad_resource:
+	ttm_base_object_unref(&base);
+out_no_lookup:
+	if (handle_type == DRM_VMW_HANDLE_PRIME)
+		(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+
+	return ret;
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ *                                  the user surface reference functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	union drm_vmw_surface_reference_arg *arg =
+	    (union drm_vmw_surface_reference_arg *)data;
+	struct drm_vmw_surface_arg *req = &arg->req;
+	struct drm_vmw_surface_create_req *rep = &arg->rep;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_surface *srf;
+	struct vmw_user_surface *user_srf;
+	struct drm_vmw_size __user *user_sizes;
+	struct ttm_base_object *base;
+	int ret;
+
+	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
+					   req->handle_type, &base);
+	if (unlikely(ret != 0))
+		return ret;
+
+	user_srf = container_of(base, struct vmw_user_surface, prime.base);
+	srf = &user_srf->srf;
+
+	/* Downcast of flags when sending back to user space */
+	rep->flags = (uint32_t)srf->flags;
+	rep->format = srf->format;
+	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+	    rep->size_addr;
+
+	if (user_sizes)
+		ret = copy_to_user(user_sizes, &srf->base_size,
+				   sizeof(srf->base_size));
+	if (unlikely(ret != 0)) {
+		VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
+			       srf->num_sizes);
+		ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
+		ret = -EFAULT;
+	}
+
+	ttm_base_object_unref(&base);
+
+	return ret;
+}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static int vmw_gb_surface_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_surface *srf = vmw_res_to_srf(res);
+	uint32_t cmd_len, cmd_id, submit_len;
+	int ret;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBSurface body;
+	} *cmd;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBSurface_v2 body;
+	} *cmd2;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBSurface_v3 body;
+	} *cmd3;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	vmw_fifo_resource_inc(dev_priv);
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a surface id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
+		cmd_len = sizeof(cmd3->body);
+		submit_len = sizeof(*cmd3);
+	} else if (srf->array_size > 0) {
+		/* has_dx checked on creation time. */
+		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
+		cmd_len = sizeof(cmd2->body);
+		submit_len = sizeof(*cmd2);
+	} else {
+		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
+		cmd_len = sizeof(cmd->body);
+		submit_len = sizeof(*cmd);
+	}
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
+	cmd2 = (typeof(cmd2))cmd;
+	cmd3 = (typeof(cmd3))cmd;
+	if (unlikely(!cmd)) {
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+		cmd3->header.id = cmd_id;
+		cmd3->header.size = cmd_len;
+		cmd3->body.sid = srf->res.id;
+		cmd3->body.surfaceFlags = srf->flags;
+		cmd3->body.format = srf->format;
+		cmd3->body.numMipLevels = srf->mip_levels[0];
+		cmd3->body.multisampleCount = srf->multisample_count;
+		cmd3->body.multisamplePattern = srf->multisample_pattern;
+		cmd3->body.qualityLevel = srf->quality_level;
+		cmd3->body.autogenFilter = srf->autogen_filter;
+		cmd3->body.size.width = srf->base_size.width;
+		cmd3->body.size.height = srf->base_size.height;
+		cmd3->body.size.depth = srf->base_size.depth;
+		cmd3->body.arraySize = srf->array_size;
+	} else if (srf->array_size > 0) {
+		cmd2->header.id = cmd_id;
+		cmd2->header.size = cmd_len;
+		cmd2->body.sid = srf->res.id;
+		cmd2->body.surfaceFlags = srf->flags;
+		cmd2->body.format = srf->format;
+		cmd2->body.numMipLevels = srf->mip_levels[0];
+		cmd2->body.multisampleCount = srf->multisample_count;
+		cmd2->body.autogenFilter = srf->autogen_filter;
+		cmd2->body.size.width = srf->base_size.width;
+		cmd2->body.size.height = srf->base_size.height;
+		cmd2->body.size.depth = srf->base_size.depth;
+		cmd2->body.arraySize = srf->array_size;
+	} else {
+		cmd->header.id = cmd_id;
+		cmd->header.size = cmd_len;
+		cmd->body.sid = srf->res.id;
+		cmd->body.surfaceFlags = srf->flags;
+		cmd->body.format = srf->format;
+		cmd->body.numMipLevels = srf->mip_levels[0];
+		cmd->body.multisampleCount = srf->multisample_count;
+		cmd->body.autogenFilter = srf->autogen_filter;
+		cmd->body.size.width = srf->base_size.width;
+		cmd->body.size.height = srf->base_size.height;
+		cmd->body.size.depth = srf->base_size.depth;
+	}
+
+	vmw_fifo_commit(dev_priv, submit_len);
+
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	vmw_fifo_resource_dec(dev_priv);
+	return ret;
+}
+
+
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBSurface body;
+	} *cmd1;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdUpdateGBSurface body;
+	} *cmd2;
+	uint32_t submit_size;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
+
+	cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	if (unlikely(!cmd1))
+		return -ENOMEM;
+
+	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+	cmd1->header.size = sizeof(cmd1->body);
+	cmd1->body.sid = res->id;
+	cmd1->body.mobid = bo->mem.start;
+	if (res->backup_dirty) {
+		cmd2 = (void *) &cmd1[1];
+		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
+		cmd2->header.size = sizeof(cmd2->body);
+		cmd2->body.sid = res->id;
+		res->backup_dirty = false;
+	}
+	vmw_fifo_commit(dev_priv, submit_size);
+
+	return 0;
+}
+
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	struct vmw_fence_obj *fence;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdReadbackGBSurface body;
+	} *cmd1;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdInvalidateGBSurface body;
+	} *cmd2;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBSurface body;
+	} *cmd3;
+	uint32_t submit_size;
+	uint8_t *cmd;
+
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
+	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+	if (unlikely(!cmd))
+		return -ENOMEM;
+
+	if (readback) {
+		cmd1 = (void *) cmd;
+		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
+		cmd1->header.size = sizeof(cmd1->body);
+		cmd1->body.sid = res->id;
+		cmd3 = (void *) &cmd1[1];
+	} else {
+		cmd2 = (void *) cmd;
+		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
+		cmd2->header.size = sizeof(cmd2->body);
+		cmd2->body.sid = res->id;
+		cmd3 = (void *) &cmd2[1];
+	}
+
+	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+	cmd3->header.size = sizeof(cmd3->body);
+	cmd3->body.sid = res->id;
+	cmd3->body.mobid = SVGA3D_INVALID_ID;
+
+	vmw_fifo_commit(dev_priv, submit_size);
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_bo_fence_single(val_buf->bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+static int vmw_gb_surface_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_surface *srf = vmw_res_to_srf(res);
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyGBSurface body;
+	} *cmd;
+
+	if (likely(res->id == -1))
+		return 0;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
+	vmw_binding_res_list_scrub(&res->binding_head);
+
+	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+	if (unlikely(!cmd)) {
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.sid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	mutex_unlock(&dev_priv->binding_mutex);
+	vmw_resource_release_id(res);
+	vmw_fifo_resource_dec(dev_priv);
+
+	return 0;
+}
+
+
+/**
+ * vmw_gb_surface_define_ioctl - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	union drm_vmw_gb_surface_create_arg *arg =
+	    (union drm_vmw_gb_surface_create_arg *)data;
+	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
+	struct drm_vmw_gb_surface_create_ext_req req_ext;
+
+	req_ext.base = arg->req;
+	req_ext.version = drm_vmw_gb_surface_v1;
+	req_ext.svga3d_flags_upper_32_bits = 0;
+	req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+	req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
+	req_ext.must_be_zero = 0;
+
+	return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
+}
+
+/**
+ * vmw_gb_surface_reference_ioctl - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv)
+{
+	union drm_vmw_gb_surface_reference_arg *arg =
+	    (union drm_vmw_gb_surface_reference_arg *)data;
+	struct drm_vmw_surface_arg *req = &arg->req;
+	struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
+	struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
+	int ret;
+
+	ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
+
+	if (unlikely(ret != 0))
+		return ret;
+
+	rep->creq = rep_ext.creq.base;
+	rep->crep = rep_ext.crep;
+
+	return ret;
+}
+
+/**
+ * vmw_surface_gb_priv_define - Define a private GB surface
+ *
+ * @dev:  Pointer to a struct drm_device
+ * @user_accounting_size:  Used to track user-space memory usage, set
+ *                         to 0 for kernel mode only memory
+ * @svga3d_flags: SVGA3d surface flags for the device
+ * @format: requested surface format
+ * @for_scanout: true if inteded to be used for scanout buffer
+ * @num_mip_levels:  number of MIP levels
+ * @multisample_count:
+ * @array_size: Surface array size.
+ * @size: width, heigh, depth of the surface requested
+ * @multisample_pattern: Multisampling pattern when msaa is supported
+ * @quality_level: Precision settings
+ * @user_srf_out: allocated user_srf.  Set to NULL on failure.
+ *
+ * GB surfaces allocated by this function will not have a user mode handle, and
+ * thus will only be visible to vmwgfx.  For optimization reasons the
+ * surface may later be given a user mode handle by another function to make
+ * it available to user mode drivers.
+ */
+int vmw_surface_gb_priv_define(struct drm_device *dev,
+			       uint32_t user_accounting_size,
+			       SVGA3dSurfaceAllFlags svga3d_flags,
+			       SVGA3dSurfaceFormat format,
+			       bool for_scanout,
+			       uint32_t num_mip_levels,
+			       uint32_t multisample_count,
+			       uint32_t array_size,
+			       struct drm_vmw_size size,
+			       SVGA3dMSPattern multisample_pattern,
+			       SVGA3dMSQualityLevel quality_level,
+			       struct vmw_surface **srf_out)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_surface *user_srf;
+	struct ttm_operation_ctx ctx = {
+		.interruptible = true,
+		.no_wait_gpu = false
+	};
+	struct vmw_surface *srf;
+	int ret;
+	u32 num_layers = 1;
+	u32 sample_count = 1;
+
+	*srf_out = NULL;
+
+	if (for_scanout) {
+		if (!svga3dsurface_is_screen_target_format(format)) {
+			VMW_DEBUG_USER("Invalid Screen Target surface format.");
+			return -EINVAL;
+		}
+
+		if (size.width > dev_priv->texture_max_width ||
+		    size.height > dev_priv->texture_max_height) {
+			VMW_DEBUG_USER("%ux%u\n, exceeds max surface size %ux%u",
+				       size.width, size.height,
+				       dev_priv->texture_max_width,
+				       dev_priv->texture_max_height);
+			return -EINVAL;
+		}
+	} else {
+		const struct svga3d_surface_desc *desc;
+
+		desc = svga3dsurface_get_desc(format);
+		if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+			VMW_DEBUG_USER("Invalid surface format.\n");
+			return -EINVAL;
+		}
+	}
+
+	/* array_size must be null for non-GL3 host. */
+	if (array_size > 0 && !dev_priv->has_dx) {
+		VMW_DEBUG_USER("Tried to create DX surface on non-DX host.\n");
+		return -EINVAL;
+	}
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   user_accounting_size, &ctx);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for surface"
+				  " creation.\n");
+		goto out_unlock;
+	}
+
+	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+	if (unlikely(!user_srf)) {
+		ret = -ENOMEM;
+		goto out_no_user_srf;
+	}
+
+	*srf_out  = &user_srf->srf;
+	user_srf->size = user_accounting_size;
+	user_srf->prime.base.shareable = false;
+	user_srf->prime.base.tfile     = NULL;
+
+	srf = &user_srf->srf;
+	srf->flags             = svga3d_flags;
+	srf->format            = format;
+	srf->scanout           = for_scanout;
+	srf->mip_levels[0]     = num_mip_levels;
+	srf->num_sizes         = 1;
+	srf->sizes             = NULL;
+	srf->offsets           = NULL;
+	srf->base_size         = size;
+	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
+	srf->array_size        = array_size;
+	srf->multisample_count = multisample_count;
+	srf->multisample_pattern = multisample_pattern;
+	srf->quality_level = quality_level;
+
+	if (array_size)
+		num_layers = array_size;
+	else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
+		num_layers = SVGA3D_MAX_SURFACE_FACES;
+
+	if (srf->flags & SVGA3D_SURFACE_MULTISAMPLE)
+		sample_count = srf->multisample_count;
+
+	srf->res.backup_size   =
+		svga3dsurface_get_serialized_size_extended(srf->format,
+							   srf->base_size,
+							   srf->mip_levels[0],
+							   num_layers,
+							   sample_count);
+
+	if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
+		srf->res.backup_size += sizeof(SVGA3dDXSOState);
+
+	/*
+	 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
+	 * size greater than STDU max width/height. This is really a workaround
+	 * to support creation of big framebuffer requested by some user-space
+	 * for whole topology. That big framebuffer won't really be used for
+	 * binding with screen target as during prepare_fb a separate surface is
+	 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
+	 */
+	if (dev_priv->active_display_unit == vmw_du_screen_target &&
+	    for_scanout && size.width <= dev_priv->stdu_max_width &&
+	    size.height <= dev_priv->stdu_max_height)
+		srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
+
+	/*
+	 * From this point, the generic resource management functions
+	 * destroy the object on failure.
+	 */
+	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+
+out_no_user_srf:
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
+
+out_unlock:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
+
+/**
+ * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	union drm_vmw_gb_surface_create_ext_arg *arg =
+	    (union drm_vmw_gb_surface_create_ext_arg *)data;
+	struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
+	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
+
+	return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
+}
+
+/**
+ * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv)
+{
+	union drm_vmw_gb_surface_reference_ext_arg *arg =
+	    (union drm_vmw_gb_surface_reference_ext_arg *)data;
+	struct drm_vmw_surface_arg *req = &arg->req;
+	struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
+
+	return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
+}
+
+/**
+ * vmw_gb_surface_define_internal - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @req: Request argument from user-space.
+ * @rep: Response argument to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+static int
+vmw_gb_surface_define_internal(struct drm_device *dev,
+			       struct drm_vmw_gb_surface_create_ext_req *req,
+			       struct drm_vmw_gb_surface_create_rep *rep,
+			       struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_surface *user_srf;
+	struct vmw_surface *srf;
+	struct vmw_resource *res;
+	struct vmw_resource *tmp;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	int ret;
+	uint32_t size;
+	uint32_t backup_handle = 0;
+	SVGA3dSurfaceAllFlags svga3d_flags_64 =
+		SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
+				req->base.svga3d_flags);
+
+	if (!dev_priv->has_sm4_1) {
+		/*
+		 * If SM4_1 is not support then cannot send 64-bit flag to
+		 * device.
+		 */
+		if (req->svga3d_flags_upper_32_bits != 0)
+			return -EINVAL;
+
+		if (req->base.multisample_count != 0)
+			return -EINVAL;
+
+		if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
+			return -EINVAL;
+
+		if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
+			return -EINVAL;
+	}
+
+	if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
+	    req->base.multisample_count == 0)
+		return -EINVAL;
+
+	if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+		return -EINVAL;
+
+	if (unlikely(vmw_user_surface_size == 0))
+		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+			VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
+
+	size = vmw_user_surface_size;
+
+	/* Define a surface based on the parameters. */
+	ret = vmw_surface_gb_priv_define(dev,
+					 size,
+					 svga3d_flags_64,
+					 req->base.format,
+					 req->base.drm_surface_flags &
+					 drm_vmw_surface_flag_scanout,
+					 req->base.mip_levels,
+					 req->base.multisample_count,
+					 req->base.array_size,
+					 req->base.base_size,
+					 req->multisample_pattern,
+					 req->quality_level,
+					 &srf);
+	if (unlikely(ret != 0))
+		return ret;
+
+	user_srf = container_of(srf, struct vmw_user_surface, srf);
+	if (drm_is_primary_client(file_priv))
+		user_srf->master = drm_master_get(file_priv->master);
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	res = &user_srf->srf.res;
+
+	if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
+		ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle,
+					 &res->backup,
+					 &user_srf->backup_base);
+		if (ret == 0) {
+			if (res->backup->base.num_pages * PAGE_SIZE <
+			    res->backup_size) {
+				VMW_DEBUG_USER("Surface backup buffer too small.\n");
+				vmw_bo_unreference(&res->backup);
+				ret = -EINVAL;
+				goto out_unlock;
+			} else {
+				backup_handle = req->base.buffer_handle;
+			}
+		}
+	} else if (req->base.drm_surface_flags &
+		   drm_vmw_surface_flag_create_buffer)
+		ret = vmw_user_bo_alloc(dev_priv, tfile,
+					res->backup_size,
+					req->base.drm_surface_flags &
+					drm_vmw_surface_flag_shareable,
+					&backup_handle,
+					&res->backup,
+					&user_srf->backup_base);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&res);
+		goto out_unlock;
+	}
+
+	tmp = vmw_resource_reference(res);
+	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+				    req->base.drm_surface_flags &
+				    drm_vmw_surface_flag_shareable,
+				    VMW_RES_SURFACE,
+				    &vmw_user_surface_base_release, NULL);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&tmp);
+		vmw_resource_unreference(&res);
+		goto out_unlock;
+	}
+
+	rep->handle      = user_srf->prime.base.handle;
+	rep->backup_size = res->backup_size;
+	if (res->backup) {
+		rep->buffer_map_handle =
+			drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
+		rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+		rep->buffer_handle = backup_handle;
+	} else {
+		rep->buffer_map_handle = 0;
+		rep->buffer_size = 0;
+		rep->buffer_handle = SVGA3D_INVALID_ID;
+	}
+
+	vmw_resource_unreference(&res);
+
+out_unlock:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
+
+/**
+ * vmw_gb_surface_reference_internal - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @req: Pointer to user-space request surface arg.
+ * @rep: Pointer to response to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+static int
+vmw_gb_surface_reference_internal(struct drm_device *dev,
+				  struct drm_vmw_surface_arg *req,
+				  struct drm_vmw_gb_surface_ref_ext_rep *rep,
+				  struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_surface *srf;
+	struct vmw_user_surface *user_srf;
+	struct ttm_base_object *base;
+	uint32_t backup_handle;
+	int ret = -EINVAL;
+
+	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
+					   req->handle_type, &base);
+	if (unlikely(ret != 0))
+		return ret;
+
+	user_srf = container_of(base, struct vmw_user_surface, prime.base);
+	srf = &user_srf->srf;
+	if (!srf->res.backup) {
+		DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
+		goto out_bad_resource;
+	}
+
+	mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
+	ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Could not add a reference to a GB surface "
+			  "backup buffer.\n");
+		(void) ttm_ref_object_base_unref(tfile, base->handle,
+						 TTM_REF_USAGE);
+		goto out_bad_resource;
+	}
+
+	rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(srf->flags);
+	rep->creq.base.format = srf->format;
+	rep->creq.base.mip_levels = srf->mip_levels[0];
+	rep->creq.base.drm_surface_flags = 0;
+	rep->creq.base.multisample_count = srf->multisample_count;
+	rep->creq.base.autogen_filter = srf->autogen_filter;
+	rep->creq.base.array_size = srf->array_size;
+	rep->creq.base.buffer_handle = backup_handle;
+	rep->creq.base.base_size = srf->base_size;
+	rep->crep.handle = user_srf->prime.base.handle;
+	rep->crep.backup_size = srf->res.backup_size;
+	rep->crep.buffer_handle = backup_handle;
+	rep->crep.buffer_map_handle =
+		drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
+	rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+
+	rep->creq.version = drm_vmw_gb_surface_v1;
+	rep->creq.svga3d_flags_upper_32_bits =
+		SVGA3D_FLAGS_UPPER_32(srf->flags);
+	rep->creq.multisample_pattern = srf->multisample_pattern;
+	rep->creq.quality_level = srf->quality_level;
+	rep->creq.must_be_zero = 0;
+
+out_bad_resource:
+	ttm_base_object_unref(&base);
+
+	return ret;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
new file mode 100644
index 0000000..d8ea3dd
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -0,0 +1,880 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_page_alloc.h>
+
+static const struct ttm_place vram_placement_flags = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+};
+
+static const struct ttm_place vram_ne_placement_flags = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
+static const struct ttm_place sys_placement_flags = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+};
+
+static const struct ttm_place sys_ne_placement_flags = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
+static const struct ttm_place gmr_placement_flags = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+};
+
+static const struct ttm_place gmr_ne_placement_flags = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
+static const struct ttm_place mob_placement_flags = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+};
+
+static const struct ttm_place mob_ne_placement_flags = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
+struct ttm_placement vmw_vram_placement = {
+	.num_placement = 1,
+	.placement = &vram_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &vram_placement_flags
+};
+
+static const struct ttm_place vram_gmr_placement_flags[] = {
+	{
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+	}, {
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+	}
+};
+
+static const struct ttm_place gmr_vram_placement_flags[] = {
+	{
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+	}, {
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+	}
+};
+
+struct ttm_placement vmw_vram_gmr_placement = {
+	.num_placement = 2,
+	.placement = vram_gmr_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &gmr_placement_flags
+};
+
+static const struct ttm_place vram_gmr_ne_placement_flags[] = {
+	{
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
+			 TTM_PL_FLAG_NO_EVICT
+	}, {
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
+			 TTM_PL_FLAG_NO_EVICT
+	}
+};
+
+struct ttm_placement vmw_vram_gmr_ne_placement = {
+	.num_placement = 2,
+	.placement = vram_gmr_ne_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &gmr_ne_placement_flags
+};
+
+struct ttm_placement vmw_vram_sys_placement = {
+	.num_placement = 1,
+	.placement = &vram_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &sys_placement_flags
+};
+
+struct ttm_placement vmw_vram_ne_placement = {
+	.num_placement = 1,
+	.placement = &vram_ne_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &vram_ne_placement_flags
+};
+
+struct ttm_placement vmw_sys_placement = {
+	.num_placement = 1,
+	.placement = &sys_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &sys_placement_flags
+};
+
+struct ttm_placement vmw_sys_ne_placement = {
+	.num_placement = 1,
+	.placement = &sys_ne_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &sys_ne_placement_flags
+};
+
+static const struct ttm_place evictable_placement_flags[] = {
+	{
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+	}, {
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+	}, {
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+	}, {
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+	}
+};
+
+static const struct ttm_place nonfixed_placement_flags[] = {
+	{
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+	}, {
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+	}, {
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+	}
+};
+
+struct ttm_placement vmw_evictable_placement = {
+	.num_placement = 4,
+	.placement = evictable_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &sys_placement_flags
+};
+
+struct ttm_placement vmw_srf_placement = {
+	.num_placement = 1,
+	.num_busy_placement = 2,
+	.placement = &gmr_placement_flags,
+	.busy_placement = gmr_vram_placement_flags
+};
+
+struct ttm_placement vmw_mob_placement = {
+	.num_placement = 1,
+	.num_busy_placement = 1,
+	.placement = &mob_placement_flags,
+	.busy_placement = &mob_placement_flags
+};
+
+struct ttm_placement vmw_mob_ne_placement = {
+	.num_placement = 1,
+	.num_busy_placement = 1,
+	.placement = &mob_ne_placement_flags,
+	.busy_placement = &mob_ne_placement_flags
+};
+
+struct ttm_placement vmw_nonfixed_placement = {
+	.num_placement = 3,
+	.placement = nonfixed_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &sys_placement_flags
+};
+
+struct vmw_ttm_tt {
+	struct ttm_dma_tt dma_ttm;
+	struct vmw_private *dev_priv;
+	int gmr_id;
+	struct vmw_mob *mob;
+	int mem_type;
+	struct sg_table sgt;
+	struct vmw_sg_table vsgt;
+	uint64_t sg_alloc_size;
+	bool mapped;
+};
+
+const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
+
+/**
+ * Helper functions to advance a struct vmw_piter iterator.
+ *
+ * @viter: Pointer to the iterator.
+ *
+ * These functions return false if past the end of the list,
+ * true otherwise. Functions are selected depending on the current
+ * DMA mapping mode.
+ */
+static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
+{
+	return ++(viter->i) < viter->num_pages;
+}
+
+static bool __vmw_piter_sg_next(struct vmw_piter *viter)
+{
+	bool ret = __vmw_piter_non_sg_next(viter);
+
+	return __sg_page_iter_dma_next(&viter->iter) && ret;
+}
+
+
+/**
+ * Helper functions to return a pointer to the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * These functions return a pointer to the page currently
+ * pointed to by @viter. Functions are selected depending on the
+ * current mapping mode.
+ */
+static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
+{
+	return viter->pages[viter->i];
+}
+
+/**
+ * Helper functions to return the DMA address of the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * These functions return the DMA address of the page currently
+ * pointed to by @viter. Functions are selected depending on the
+ * current mapping mode.
+ */
+static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
+{
+	return page_to_phys(viter->pages[viter->i]);
+}
+
+static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
+{
+	return viter->addrs[viter->i];
+}
+
+static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
+{
+	return sg_page_iter_dma_address(&viter->iter);
+}
+
+
+/**
+ * vmw_piter_start - Initialize a struct vmw_piter.
+ *
+ * @viter: Pointer to the iterator to initialize
+ * @vsgt: Pointer to a struct vmw_sg_table to initialize from
+ *
+ * Note that we're following the convention of __sg_page_iter_start, so that
+ * the iterator doesn't point to a valid page after initialization; it has
+ * to be advanced one step first.
+ */
+void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
+		     unsigned long p_offset)
+{
+	viter->i = p_offset - 1;
+	viter->num_pages = vsgt->num_pages;
+	viter->page = &__vmw_piter_non_sg_page;
+	viter->pages = vsgt->pages;
+	switch (vsgt->mode) {
+	case vmw_dma_phys:
+		viter->next = &__vmw_piter_non_sg_next;
+		viter->dma_address = &__vmw_piter_phys_addr;
+		break;
+	case vmw_dma_alloc_coherent:
+		viter->next = &__vmw_piter_non_sg_next;
+		viter->dma_address = &__vmw_piter_dma_addr;
+		viter->addrs = vsgt->addrs;
+		break;
+	case vmw_dma_map_populate:
+	case vmw_dma_map_bind:
+		viter->next = &__vmw_piter_sg_next;
+		viter->dma_address = &__vmw_piter_sg_addr;
+		__sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
+				     vsgt->sgt->orig_nents, p_offset);
+		break;
+	default:
+		BUG();
+	}
+}
+
+/**
+ * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
+ * TTM pages
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_backend
+ *
+ * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
+ */
+static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
+{
+	struct device *dev = vmw_tt->dev_priv->dev->dev;
+
+	dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
+		DMA_BIDIRECTIONAL);
+	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
+}
+
+/**
+ * vmw_ttm_map_for_dma - map TTM pages to get device addresses
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_backend
+ *
+ * This function is used to get device addresses from the kernel DMA layer.
+ * However, it's violating the DMA API in that when this operation has been
+ * performed, it's illegal for the CPU to write to the pages without first
+ * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
+ * therefore only legal to call this function if we know that the function
+ * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
+ * a CPU write buffer flush.
+ */
+static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
+{
+	struct device *dev = vmw_tt->dev_priv->dev->dev;
+	int ret;
+
+	ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
+			 DMA_BIDIRECTIONAL);
+	if (unlikely(ret == 0))
+		return -ENOMEM;
+
+	vmw_tt->sgt.nents = ret;
+
+	return 0;
+}
+
+/**
+ * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_tt
+ *
+ * Select the correct function for and make sure the TTM pages are
+ * visible to the device. Allocate storage for the device mappings.
+ * If a mapping has already been performed, indicated by the storage
+ * pointer being non NULL, the function returns success.
+ */
+static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
+{
+	struct vmw_private *dev_priv = vmw_tt->dev_priv;
+	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
+	struct ttm_operation_ctx ctx = {
+		.interruptible = true,
+		.no_wait_gpu = false
+	};
+	struct vmw_piter iter;
+	dma_addr_t old;
+	int ret = 0;
+	static size_t sgl_size;
+	static size_t sgt_size;
+
+	if (vmw_tt->mapped)
+		return 0;
+
+	vsgt->mode = dev_priv->map_mode;
+	vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
+	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
+	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
+	vsgt->sgt = &vmw_tt->sgt;
+
+	switch (dev_priv->map_mode) {
+	case vmw_dma_map_bind:
+	case vmw_dma_map_populate:
+		if (unlikely(!sgl_size)) {
+			sgl_size = ttm_round_pot(sizeof(struct scatterlist));
+			sgt_size = ttm_round_pot(sizeof(struct sg_table));
+		}
+		vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
+		ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
+		if (unlikely(ret != 0))
+			return ret;
+
+		ret = __sg_alloc_table_from_pages
+			(&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+			 (unsigned long) vsgt->num_pages << PAGE_SHIFT,
+			 dma_get_max_seg_size(dev_priv->dev->dev),
+			 GFP_KERNEL);
+		if (unlikely(ret != 0))
+			goto out_sg_alloc_fail;
+
+		if (vsgt->num_pages > vmw_tt->sgt.nents) {
+			uint64_t over_alloc =
+				sgl_size * (vsgt->num_pages -
+					    vmw_tt->sgt.nents);
+
+			ttm_mem_global_free(glob, over_alloc);
+			vmw_tt->sg_alloc_size -= over_alloc;
+		}
+
+		ret = vmw_ttm_map_for_dma(vmw_tt);
+		if (unlikely(ret != 0))
+			goto out_map_fail;
+
+		break;
+	default:
+		break;
+	}
+
+	old = ~((dma_addr_t) 0);
+	vmw_tt->vsgt.num_regions = 0;
+	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
+		dma_addr_t cur = vmw_piter_dma_addr(&iter);
+
+		if (cur != old + PAGE_SIZE)
+			vmw_tt->vsgt.num_regions++;
+		old = cur;
+	}
+
+	vmw_tt->mapped = true;
+	return 0;
+
+out_map_fail:
+	sg_free_table(vmw_tt->vsgt.sgt);
+	vmw_tt->vsgt.sgt = NULL;
+out_sg_alloc_fail:
+	ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
+	return ret;
+}
+
+/**
+ * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_tt
+ *
+ * Tear down any previously set up device DMA mappings and free
+ * any storage space allocated for them. If there are no mappings set up,
+ * this function is a NOP.
+ */
+static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
+{
+	struct vmw_private *dev_priv = vmw_tt->dev_priv;
+
+	if (!vmw_tt->vsgt.sgt)
+		return;
+
+	switch (dev_priv->map_mode) {
+	case vmw_dma_map_bind:
+	case vmw_dma_map_populate:
+		vmw_ttm_unmap_from_dma(vmw_tt);
+		sg_free_table(vmw_tt->vsgt.sgt);
+		vmw_tt->vsgt.sgt = NULL;
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    vmw_tt->sg_alloc_size);
+		break;
+	default:
+		break;
+	}
+	vmw_tt->mapped = false;
+}
+
+
+/**
+ * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ * Note that the buffer object must be either pinned or reserved before
+ * calling this function.
+ */
+int vmw_bo_map_dma(struct ttm_buffer_object *bo)
+{
+	struct vmw_ttm_tt *vmw_tt =
+		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+	return vmw_ttm_map_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ */
+void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
+{
+	struct vmw_ttm_tt *vmw_tt =
+		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+	vmw_ttm_unmap_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
+ * TTM buffer object
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Returns a pointer to a struct vmw_sg_table object. The object should
+ * not be freed after use.
+ * Note that for the device addresses to be valid, the buffer object must
+ * either be reserved or pinned.
+ */
+const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
+{
+	struct vmw_ttm_tt *vmw_tt =
+		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+	return &vmw_tt->vsgt;
+}
+
+
+static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+	struct vmw_ttm_tt *vmw_be =
+		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+	int ret;
+
+	ret = vmw_ttm_map_dma(vmw_be);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_be->gmr_id = bo_mem->start;
+	vmw_be->mem_type = bo_mem->mem_type;
+
+	switch (bo_mem->mem_type) {
+	case VMW_PL_GMR:
+		return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
+				    ttm->num_pages, vmw_be->gmr_id);
+	case VMW_PL_MOB:
+		if (unlikely(vmw_be->mob == NULL)) {
+			vmw_be->mob =
+				vmw_mob_create(ttm->num_pages);
+			if (unlikely(vmw_be->mob == NULL))
+				return -ENOMEM;
+		}
+
+		return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
+				    &vmw_be->vsgt, ttm->num_pages,
+				    vmw_be->gmr_id);
+	default:
+		BUG();
+	}
+	return 0;
+}
+
+static int vmw_ttm_unbind(struct ttm_tt *ttm)
+{
+	struct vmw_ttm_tt *vmw_be =
+		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+	switch (vmw_be->mem_type) {
+	case VMW_PL_GMR:
+		vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+		break;
+	case VMW_PL_MOB:
+		vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
+		break;
+	default:
+		BUG();
+	}
+
+	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
+		vmw_ttm_unmap_dma(vmw_be);
+
+	return 0;
+}
+
+
+static void vmw_ttm_destroy(struct ttm_tt *ttm)
+{
+	struct vmw_ttm_tt *vmw_be =
+		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+	vmw_ttm_unmap_dma(vmw_be);
+	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+		ttm_dma_tt_fini(&vmw_be->dma_ttm);
+	else
+		ttm_tt_fini(ttm);
+
+	if (vmw_be->mob)
+		vmw_mob_destroy(vmw_be->mob);
+
+	kfree(vmw_be);
+}
+
+
+static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
+{
+	struct vmw_ttm_tt *vmw_tt =
+		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+	struct vmw_private *dev_priv = vmw_tt->dev_priv;
+	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+	int ret;
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
+		size_t size =
+			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
+		ret = ttm_mem_global_alloc(glob, size, ctx);
+		if (unlikely(ret != 0))
+			return ret;
+
+		ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
+					ctx);
+		if (unlikely(ret != 0))
+			ttm_mem_global_free(glob, size);
+	} else
+		ret = ttm_pool_populate(ttm, ctx);
+
+	return ret;
+}
+
+static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
+{
+	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
+						 dma_ttm.ttm);
+	struct vmw_private *dev_priv = vmw_tt->dev_priv;
+	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+
+
+	if (vmw_tt->mob) {
+		vmw_mob_destroy(vmw_tt->mob);
+		vmw_tt->mob = NULL;
+	}
+
+	vmw_ttm_unmap_dma(vmw_tt);
+	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
+		size_t size =
+			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
+
+		ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+		ttm_mem_global_free(glob, size);
+	} else
+		ttm_pool_unpopulate(ttm);
+}
+
+static struct ttm_backend_func vmw_ttm_func = {
+	.bind = vmw_ttm_bind,
+	.unbind = vmw_ttm_unbind,
+	.destroy = vmw_ttm_destroy,
+};
+
+static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
+					uint32_t page_flags)
+{
+	struct vmw_ttm_tt *vmw_be;
+	int ret;
+
+	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
+	if (!vmw_be)
+		return NULL;
+
+	vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
+	vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
+	vmw_be->mob = NULL;
+
+	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
+	else
+		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
+	if (unlikely(ret != 0))
+		goto out_no_init;
+
+	return &vmw_be->dma_ttm.ttm;
+out_no_init:
+	kfree(vmw_be);
+	return NULL;
+}
+
+static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+	return 0;
+}
+
+static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+		      struct ttm_mem_type_manager *man)
+{
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_CACHED;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+		/* "On-card" video ram */
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = 0;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_CACHED;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case VMW_PL_GMR:
+	case VMW_PL_MOB:
+		/*
+		 * "Guest Memory Regions" is an aperture like feature with
+		 *  one slot per bo. There is an upper limit of the number of
+		 *  slots as well as the bo size.
+		 */
+		man->func = &vmw_gmrid_manager_func;
+		man->gpu_offset = 0;
+		man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_CACHED;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void vmw_evict_flags(struct ttm_buffer_object *bo,
+		     struct ttm_placement *placement)
+{
+	*placement = vmw_sys_placement;
+}
+
+static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	struct ttm_object_file *tfile =
+		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
+
+	return vmw_user_bo_verify_access(bo, tfile);
+}
+
+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.is_iomem = false;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+	case VMW_PL_GMR:
+	case VMW_PL_MOB:
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = dev_priv->vram_start;
+		mem->bus.is_iomem = true;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+	return 0;
+}
+
+/**
+ * vmw_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ *       region the move is taking place.
+ *
+ * Calls move_notify for all subsystems needing it.
+ * (currently only resources).
+ */
+static void vmw_move_notify(struct ttm_buffer_object *bo,
+			    bool evict,
+			    struct ttm_mem_reg *mem)
+{
+	vmw_bo_move_notify(bo, mem);
+	vmw_query_move_notify(bo, mem);
+}
+
+
+/**
+ * vmw_swap_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to be swapped out.
+ */
+static void vmw_swap_notify(struct ttm_buffer_object *bo)
+{
+	vmw_bo_swap_notify(bo);
+	(void) ttm_bo_wait(bo, false, false);
+}
+
+
+struct ttm_bo_driver vmw_bo_driver = {
+	.ttm_tt_create = &vmw_ttm_tt_create,
+	.ttm_tt_populate = &vmw_ttm_populate,
+	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
+	.invalidate_caches = vmw_invalidate_caches,
+	.init_mem_type = vmw_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
+	.evict_flags = vmw_evict_flags,
+	.move = NULL,
+	.verify_access = vmw_verify_access,
+	.move_notify = vmw_move_notify,
+	.swap_notify = vmw_swap_notify,
+	.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
+	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
+	.io_mem_free = &vmw_ttm_io_mem_free,
+};
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
new file mode 100644
index 0000000..5a7b8bb
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2009-2011 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+
+int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
+
+	return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
+}
+
+/* struct vmw_validation_mem callback */
+static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
+{
+	static struct ttm_operation_ctx ctx = {.interruptible = false,
+					       .no_wait_gpu = false};
+	struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+	return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
+}
+
+/* struct vmw_validation_mem callback */
+static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
+{
+	struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+	return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_validation_mem_init_ttm - Interface the validation memory tracker
+ * to ttm.
+ * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
+ * rather than a struct vmw_validation_mem is to make sure assumption in the
+ * callbacks that struct vmw_private derives from struct vmw_validation_mem
+ * holds true.
+ * @gran: The recommended allocation granularity
+ */
+void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
+{
+	struct vmw_validation_mem *vvm = &dev_priv->vvm;
+
+	vvm->reserve_mem = vmw_vmt_reserve;
+	vvm->unreserve_mem = vmw_vmt_unreserve;
+	vvm->gran = gran;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_va.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
new file mode 100644
index 0000000..ebc1d83
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2012-2016 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+
+/**
+ * struct vmw_stream - Overlay stream simple resource.
+ * @sres: The simple resource we derive from.
+ * @stream_id: The overlay stream id.
+ */
+struct vmw_stream {
+	struct vmw_simple_resource sres;
+	u32 stream_id;
+};
+
+/**
+ * vmw_stream - Typecast a struct vmw_resource to a struct vmw_stream.
+ * @res: Pointer to the struct vmw_resource.
+ *
+ * Returns: Returns a pointer to the struct vmw_stream.
+ */
+static struct vmw_stream *
+vmw_stream(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_stream, sres.res);
+}
+
+/***************************************************************************
+ * Simple resource callbacks for struct vmw_stream
+ **************************************************************************/
+static void vmw_stream_hw_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_stream *stream = vmw_stream(res);
+	int ret;
+
+	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
+	WARN_ON_ONCE(ret != 0);
+}
+
+static int vmw_stream_init(struct vmw_resource *res, void *data)
+{
+	struct vmw_stream *stream = vmw_stream(res);
+
+	return vmw_overlay_claim(res->dev_priv, &stream->stream_id);
+}
+
+static void vmw_stream_set_arg_handle(void *data, u32 handle)
+{
+	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
+
+	arg->stream_id = handle;
+}
+
+static const struct vmw_simple_resource_func va_stream_func = {
+	.res_func = {
+		.res_type = vmw_res_stream,
+		.needs_backup = false,
+		.may_evict = false,
+		.type_name = "overlay stream",
+		.backup_placement = NULL,
+		.create = NULL,
+		.destroy = NULL,
+		.bind = NULL,
+		.unbind = NULL
+	},
+	.ttm_res_type = VMW_RES_STREAM,
+	.size = sizeof(struct vmw_stream),
+	.init = vmw_stream_init,
+	.hw_destroy = vmw_stream_hw_destroy,
+	.set_arg_handle = vmw_stream_set_arg_handle,
+};
+
+/***************************************************************************
+ * End simple resource callbacks for struct vmw_stream
+ **************************************************************************/
+
+/**
+ * vmw_stream_unref_ioctl - Ioctl to unreference a user-space handle to
+ * a struct vmw_stream.
+ * @dev: Pointer to the drm device.
+ * @data: The ioctl argument
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ *
+ * Return:
+ *   0 if successful.
+ *   Negative error value on failure.
+ */
+int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
+
+	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+					 arg->stream_id, TTM_REF_USAGE);
+}
+
+/**
+ * vmw_stream_claim_ioctl - Ioctl to claim a struct vmw_stream overlay.
+ * @dev: Pointer to the drm device.
+ * @data: The ioctl argument
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ *
+ * Return:
+ *   0 if successful.
+ *   Negative error value on failure.
+ */
+int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	return vmw_simple_resource_create_ioctl(dev, data, file_priv,
+						&va_stream_func);
+}
+
+/**
+ * vmw_user_stream_lookup - Look up a struct vmw_user_stream from a handle.
+ * @dev_priv: Pointer to a struct vmw_private.
+ * @tfile: struct ttm_object_file identifying the caller.
+ * @inout_id: In: The user-space handle. Out: The stream id.
+ * @out: On output contains a refcounted pointer to the embedded
+ * struct vmw_resource.
+ *
+ * Return:
+ *   0 if successful.
+ *   Negative error value on failure.
+ */
+int vmw_user_stream_lookup(struct vmw_private *dev_priv,
+			   struct ttm_object_file *tfile,
+			   uint32_t *inout_id, struct vmw_resource **out)
+{
+	struct vmw_stream *stream;
+	struct vmw_resource *res =
+		vmw_simple_resource_lookup(tfile, *inout_id, &va_stream_func);
+
+	if (IS_ERR(res))
+		return PTR_ERR(res);
+
+	stream = vmw_stream(res);
+	*inout_id = stream->stream_id;
+	*out = res;
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
new file mode 100644
index 0000000..f611b22
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -0,0 +1,830 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#include <linux/slab.h>
+#include "vmwgfx_validation.h"
+#include "vmwgfx_drv.h"
+
+/**
+ * struct vmw_validation_bo_node - Buffer object validation metadata.
+ * @base: Metadata used for TTM reservation- and validation.
+ * @hash: A hash entry used for the duplicate detection hash table.
+ * @as_mob: Validate as mob.
+ * @cpu_blit: Validate for cpu blit access.
+ *
+ * Bit fields are used since these structures are allocated and freed in
+ * large numbers and space conservation is desired.
+ */
+struct vmw_validation_bo_node {
+	struct ttm_validate_buffer base;
+	struct drm_hash_item hash;
+	u32 as_mob : 1;
+	u32 cpu_blit : 1;
+};
+
+/**
+ * struct vmw_validation_res_node - Resource validation metadata.
+ * @head: List head for the resource validation list.
+ * @hash: A hash entry used for the duplicate detection hash table.
+ * @res: Reference counted resource pointer.
+ * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
+ * to a resource.
+ * @new_backup_offset: Offset into the new backup mob for resources that can
+ * share MOBs.
+ * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
+ * the command stream provides a mob bind operation.
+ * @switching_backup: The validation process is switching backup MOB.
+ * @first_usage: True iff the resource has been seen only once in the current
+ * validation batch.
+ * @reserved: Whether the resource is currently reserved by this process.
+ * @private: Optionally additional memory for caller-private data.
+ *
+ * Bit fields are used since these structures are allocated and freed in
+ * large numbers and space conservation is desired.
+ */
+struct vmw_validation_res_node {
+	struct list_head head;
+	struct drm_hash_item hash;
+	struct vmw_resource *res;
+	struct vmw_buffer_object *new_backup;
+	unsigned long new_backup_offset;
+	u32 no_buffer_needed : 1;
+	u32 switching_backup : 1;
+	u32 first_usage : 1;
+	u32 reserved : 1;
+	u32 dirty : 1;
+	u32 dirty_set : 1;
+	unsigned long private[0];
+};
+
+/**
+ * vmw_validation_mem_alloc - Allocate kernel memory from the validation
+ * context based allocator
+ * @ctx: The validation context
+ * @size: The number of bytes to allocated.
+ *
+ * The memory allocated may not exceed PAGE_SIZE, and the returned
+ * address is aligned to sizeof(long). All memory allocated this way is
+ * reclaimed after validation when calling any of the exported functions:
+ * vmw_validation_unref_lists()
+ * vmw_validation_revert()
+ * vmw_validation_done()
+ *
+ * Return: Pointer to the allocated memory on success. NULL on failure.
+ */
+void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
+			       unsigned int size)
+{
+	void *addr;
+
+	size = vmw_validation_align(size);
+	if (size > PAGE_SIZE)
+		return NULL;
+
+	if (ctx->mem_size_left < size) {
+		struct page *page;
+
+		if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
+			int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
+
+			if (ret)
+				return NULL;
+
+			ctx->vm_size_left += ctx->vm->gran;
+			ctx->total_mem += ctx->vm->gran;
+		}
+
+		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+		if (!page)
+			return NULL;
+
+		if (ctx->vm)
+			ctx->vm_size_left -= PAGE_SIZE;
+
+		list_add_tail(&page->lru, &ctx->page_list);
+		ctx->page_address = page_address(page);
+		ctx->mem_size_left = PAGE_SIZE;
+	}
+
+	addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
+	ctx->mem_size_left -= size;
+
+	return addr;
+}
+
+/**
+ * vmw_validation_mem_free - Free all memory allocated using
+ * vmw_validation_mem_alloc()
+ * @ctx: The validation context
+ *
+ * All memory previously allocated for this context using
+ * vmw_validation_mem_alloc() is freed.
+ */
+static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
+{
+	struct page *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
+		list_del_init(&entry->lru);
+		__free_page(entry);
+	}
+
+	ctx->mem_size_left = 0;
+	if (ctx->vm && ctx->total_mem) {
+		ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
+		ctx->total_mem = 0;
+		ctx->vm_size_left = 0;
+	}
+}
+
+/**
+ * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
+ * validation context's lists.
+ * @ctx: The validation context to search.
+ * @vbo: The buffer object to search for.
+ *
+ * Return: Pointer to the struct vmw_validation_bo_node referencing the
+ * duplicate, or NULL if none found.
+ */
+static struct vmw_validation_bo_node *
+vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
+			   struct vmw_buffer_object *vbo)
+{
+	struct  vmw_validation_bo_node *bo_node = NULL;
+
+	if (!ctx->merge_dups)
+		return NULL;
+
+	if (ctx->ht) {
+		struct drm_hash_item *hash;
+
+		if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
+			bo_node = container_of(hash, typeof(*bo_node), hash);
+	} else {
+		struct  vmw_validation_bo_node *entry;
+
+		list_for_each_entry(entry, &ctx->bo_list, base.head) {
+			if (entry->base.bo == &vbo->base) {
+				bo_node = entry;
+				break;
+			}
+		}
+	}
+
+	return bo_node;
+}
+
+/**
+ * vmw_validation_find_res_dup - Find a duplicate resource entry in the
+ * validation context's lists.
+ * @ctx: The validation context to search.
+ * @vbo: The buffer object to search for.
+ *
+ * Return: Pointer to the struct vmw_validation_bo_node referencing the
+ * duplicate, or NULL if none found.
+ */
+static struct vmw_validation_res_node *
+vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
+			    struct vmw_resource *res)
+{
+	struct  vmw_validation_res_node *res_node = NULL;
+
+	if (!ctx->merge_dups)
+		return NULL;
+
+	if (ctx->ht) {
+		struct drm_hash_item *hash;
+
+		if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
+			res_node = container_of(hash, typeof(*res_node), hash);
+	} else {
+		struct  vmw_validation_res_node *entry;
+
+		list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
+			if (entry->res == res) {
+				res_node = entry;
+				goto out;
+			}
+		}
+
+		list_for_each_entry(entry, &ctx->resource_list, head) {
+			if (entry->res == res) {
+				res_node = entry;
+				break;
+			}
+		}
+
+	}
+out:
+	return res_node;
+}
+
+/**
+ * vmw_validation_add_bo - Add a buffer object to the validation context.
+ * @ctx: The validation context.
+ * @vbo: The buffer object.
+ * @as_mob: Validate as mob, otherwise suitable for GMR operations.
+ * @cpu_blit: Validate in a page-mappable location.
+ *
+ * Return: Zero on success, negative error code otherwise.
+ */
+int vmw_validation_add_bo(struct vmw_validation_context *ctx,
+			  struct vmw_buffer_object *vbo,
+			  bool as_mob,
+			  bool cpu_blit)
+{
+	struct vmw_validation_bo_node *bo_node;
+
+	bo_node = vmw_validation_find_bo_dup(ctx, vbo);
+	if (bo_node) {
+		if (bo_node->as_mob != as_mob ||
+		    bo_node->cpu_blit != cpu_blit) {
+			DRM_ERROR("Inconsistent buffer usage.\n");
+			return -EINVAL;
+		}
+	} else {
+		struct ttm_validate_buffer *val_buf;
+		int ret;
+
+		bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
+		if (!bo_node)
+			return -ENOMEM;
+
+		if (ctx->ht) {
+			bo_node->hash.key = (unsigned long) vbo;
+			ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
+			if (ret) {
+				DRM_ERROR("Failed to initialize a buffer "
+					  "validation entry.\n");
+				return ret;
+			}
+		}
+		val_buf = &bo_node->base;
+		val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
+		if (!val_buf->bo)
+			return -ESRCH;
+		val_buf->num_shared = 0;
+		list_add_tail(&val_buf->head, &ctx->bo_list);
+		bo_node->as_mob = as_mob;
+		bo_node->cpu_blit = cpu_blit;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_validation_add_resource - Add a resource to the validation context.
+ * @ctx: The validation context.
+ * @res: The resource.
+ * @priv_size: Size of private, additional metadata.
+ * @dirty: Whether to change dirty status.
+ * @p_node: Output pointer of additional metadata address.
+ * @first_usage: Whether this was the first time this resource was seen.
+ *
+ * Return: Zero on success, negative error code otherwise.
+ */
+int vmw_validation_add_resource(struct vmw_validation_context *ctx,
+				struct vmw_resource *res,
+				size_t priv_size,
+				u32 dirty,
+				void **p_node,
+				bool *first_usage)
+{
+	struct vmw_validation_res_node *node;
+	int ret;
+
+	node = vmw_validation_find_res_dup(ctx, res);
+	if (node) {
+		node->first_usage = 0;
+		goto out_fill;
+	}
+
+	node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
+	if (!node) {
+		VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
+		return -ENOMEM;
+	}
+
+	if (ctx->ht) {
+		node->hash.key = (unsigned long) res;
+		ret = drm_ht_insert_item(ctx->ht, &node->hash);
+		if (ret) {
+			DRM_ERROR("Failed to initialize a resource validation "
+				  "entry.\n");
+			return ret;
+		}
+	}
+	node->res = vmw_resource_reference_unless_doomed(res);
+	if (!node->res)
+		return -ESRCH;
+
+	node->first_usage = 1;
+	if (!res->dev_priv->has_mob) {
+		list_add_tail(&node->head, &ctx->resource_list);
+	} else {
+		switch (vmw_res_type(res)) {
+		case vmw_res_context:
+		case vmw_res_dx_context:
+			list_add(&node->head, &ctx->resource_ctx_list);
+			break;
+		case vmw_res_cotable:
+			list_add_tail(&node->head, &ctx->resource_ctx_list);
+			break;
+		default:
+			list_add_tail(&node->head, &ctx->resource_list);
+			break;
+		}
+	}
+
+out_fill:
+	if (dirty) {
+		node->dirty_set = 1;
+		/* Overwriting previous information here is intentional! */
+		node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
+	}
+	if (first_usage)
+		*first_usage = node->first_usage;
+	if (p_node)
+		*p_node = &node->private;
+
+	return 0;
+}
+
+/**
+ * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
+ * validation.
+ * @ctx: The validation context.
+ * @val_private: The additional meta-data pointer returned when the
+ * resource was registered with the validation context. Used to identify
+ * the resource.
+ * @dirty: Dirty information VMW_RES_DIRTY_XX
+ */
+void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
+				  void *val_private, u32 dirty)
+{
+	struct vmw_validation_res_node *val;
+
+	if (!dirty)
+		return;
+
+	val = container_of(val_private, typeof(*val), private);
+	val->dirty_set = 1;
+	/* Overwriting previous information here is intentional! */
+	val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
+}
+
+/**
+ * vmw_validation_res_switch_backup - Register a backup MOB switch during
+ * validation.
+ * @ctx: The validation context.
+ * @val_private: The additional meta-data pointer returned when the
+ * resource was registered with the validation context. Used to identify
+ * the resource.
+ * @vbo: The new backup buffer object MOB. This buffer object needs to have
+ * already been registered with the validation context.
+ * @backup_offset: Offset into the new backup MOB.
+ */
+void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
+				      void *val_private,
+				      struct vmw_buffer_object *vbo,
+				      unsigned long backup_offset)
+{
+	struct vmw_validation_res_node *val;
+
+	val = container_of(val_private, typeof(*val), private);
+
+	val->switching_backup = 1;
+	if (val->first_usage)
+		val->no_buffer_needed = 1;
+
+	val->new_backup = vbo;
+	val->new_backup_offset = backup_offset;
+}
+
+/**
+ * vmw_validation_res_reserve - Reserve all resources registered with this
+ * validation context.
+ * @ctx: The validation context.
+ * @intr: Use interruptible waits when possible.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
+ * code on failure.
+ */
+int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
+			       bool intr)
+{
+	struct vmw_validation_res_node *val;
+	int ret = 0;
+
+	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
+
+	list_for_each_entry(val, &ctx->resource_list, head) {
+		struct vmw_resource *res = val->res;
+
+		ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
+		if (ret)
+			goto out_unreserve;
+
+		val->reserved = 1;
+		if (res->backup) {
+			struct vmw_buffer_object *vbo = res->backup;
+
+			ret = vmw_validation_add_bo
+				(ctx, vbo, vmw_resource_needs_backup(res),
+				 false);
+			if (ret)
+				goto out_unreserve;
+		}
+	}
+
+	return 0;
+
+out_unreserve:
+	vmw_validation_res_unreserve(ctx, true);
+	return ret;
+}
+
+/**
+ * vmw_validation_res_unreserve - Unreserve all reserved resources
+ * registered with this validation context.
+ * @ctx: The validation context.
+ * @backoff: Whether this is a backoff- of a commit-type operation. This
+ * is used to determine whether to switch backup MOBs or not.
+ */
+void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
+				 bool backoff)
+{
+	struct vmw_validation_res_node *val;
+
+	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
+	if (backoff)
+		list_for_each_entry(val, &ctx->resource_list, head) {
+			if (val->reserved)
+				vmw_resource_unreserve(val->res,
+						       false, false, false,
+						       NULL, 0);
+		}
+	else
+		list_for_each_entry(val, &ctx->resource_list, head) {
+			if (val->reserved)
+				vmw_resource_unreserve(val->res,
+						       val->dirty_set,
+						       val->dirty,
+						       val->switching_backup,
+						       val->new_backup,
+						       val->new_backup_offset);
+		}
+}
+
+/**
+ * vmw_validation_bo_validate_single - Validate a single buffer object.
+ * @bo: The TTM buffer object base.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ * @validate_as_mob: Whether to validate in MOB memory.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
+ * code on failure.
+ */
+int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
+				      bool interruptible,
+				      bool validate_as_mob)
+{
+	struct vmw_buffer_object *vbo =
+		container_of(bo, struct vmw_buffer_object, base);
+	struct ttm_operation_ctx ctx = {
+		.interruptible = interruptible,
+		.no_wait_gpu = false
+	};
+	int ret;
+
+	if (vbo->pin_count > 0)
+		return 0;
+
+	if (validate_as_mob)
+		return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
+
+	/**
+	 * Put BO in VRAM if there is space, otherwise as a GMR.
+	 * If there is no space in VRAM and GMR ids are all used up,
+	 * start evicting GMRs to make room. If the DMA buffer can't be
+	 * used as a GMR, this will return -ENOMEM.
+	 */
+
+	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+	if (ret == 0 || ret == -ERESTARTSYS)
+		return ret;
+
+	/**
+	 * If that failed, try VRAM again, this time evicting
+	 * previous contents.
+	 */
+
+	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+	return ret;
+}
+
+/**
+ * vmw_validation_bo_validate - Validate all buffer objects registered with
+ * the validation context.
+ * @ctx: The validation context.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted,
+ * negative error code on failure.
+ */
+int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
+{
+	struct vmw_validation_bo_node *entry;
+	int ret;
+
+	list_for_each_entry(entry, &ctx->bo_list, base.head) {
+		if (entry->cpu_blit) {
+			struct ttm_operation_ctx ctx = {
+				.interruptible = intr,
+				.no_wait_gpu = false
+			};
+
+			ret = ttm_bo_validate(entry->base.bo,
+					      &vmw_nonfixed_placement, &ctx);
+		} else {
+			ret = vmw_validation_bo_validate_single
+			(entry->base.bo, intr, entry->as_mob);
+		}
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/**
+ * vmw_validation_res_validate - Validate all resources registered with the
+ * validation context.
+ * @ctx: The validation context.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted,
+ * negative error code on failure.
+ */
+int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
+{
+	struct vmw_validation_res_node *val;
+	int ret;
+
+	list_for_each_entry(val, &ctx->resource_list, head) {
+		struct vmw_resource *res = val->res;
+		struct vmw_buffer_object *backup = res->backup;
+
+		ret = vmw_resource_validate(res, intr);
+		if (ret) {
+			if (ret != -ERESTARTSYS)
+				DRM_ERROR("Failed to validate resource.\n");
+			return ret;
+		}
+
+		/* Check if the resource switched backup buffer */
+		if (backup && res->backup && (backup != res->backup)) {
+			struct vmw_buffer_object *vbo = res->backup;
+
+			ret = vmw_validation_add_bo
+				(ctx, vbo, vmw_resource_needs_backup(res),
+				 false);
+			if (ret)
+				return ret;
+		}
+	}
+	return 0;
+}
+
+/**
+ * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
+ * and unregister it from this validation context.
+ * @ctx: The validation context.
+ *
+ * The hash table used for duplicate finding is an expensive resource and
+ * may be protected by mutexes that may cause deadlocks during resource
+ * unreferencing if held. After resource- and buffer object registering,
+ * there is no longer any use for this hash table, so allow freeing it
+ * either to shorten any mutex locking time, or before resources- and
+ * buffer objects are freed during validation context cleanup.
+ */
+void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
+{
+	struct vmw_validation_bo_node *entry;
+	struct vmw_validation_res_node *val;
+
+	if (!ctx->ht)
+		return;
+
+	list_for_each_entry(entry, &ctx->bo_list, base.head)
+		(void) drm_ht_remove_item(ctx->ht, &entry->hash);
+
+	list_for_each_entry(val, &ctx->resource_list, head)
+		(void) drm_ht_remove_item(ctx->ht, &val->hash);
+
+	list_for_each_entry(val, &ctx->resource_ctx_list, head)
+		(void) drm_ht_remove_item(ctx->ht, &val->hash);
+
+	ctx->ht = NULL;
+}
+
+/**
+ * vmw_validation_unref_lists - Unregister previously registered buffer
+ * object and resources.
+ * @ctx: The validation context.
+ *
+ * Note that this function may cause buffer object- and resource destructors
+ * to be invoked.
+ */
+void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
+{
+	struct vmw_validation_bo_node *entry;
+	struct vmw_validation_res_node *val;
+
+	list_for_each_entry(entry, &ctx->bo_list, base.head) {
+		ttm_bo_put(entry->base.bo);
+		entry->base.bo = NULL;
+	}
+
+	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
+	list_for_each_entry(val, &ctx->resource_list, head)
+		vmw_resource_unreference(&val->res);
+
+	/*
+	 * No need to detach each list entry since they are all freed with
+	 * vmw_validation_free_mem. Just make the inaccessible.
+	 */
+	INIT_LIST_HEAD(&ctx->bo_list);
+	INIT_LIST_HEAD(&ctx->resource_list);
+
+	vmw_validation_mem_free(ctx);
+}
+
+/**
+ * vmw_validation_prepare - Prepare a validation context for command
+ * submission.
+ * @ctx: The validation context.
+ * @mutex: The mutex used to protect resource reservation.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * Note that the single reservation mutex @mutex is an unfortunate
+ * construct. Ideally resource reservation should be moved to per-resource
+ * ww_mutexes.
+ * If this functions doesn't return Zero to indicate success, all resources
+ * are left unreserved but still referenced.
+ * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
+ * on error.
+ */
+int vmw_validation_prepare(struct vmw_validation_context *ctx,
+			   struct mutex *mutex,
+			   bool intr)
+{
+	int ret = 0;
+
+	if (mutex) {
+		if (intr)
+			ret = mutex_lock_interruptible(mutex);
+		else
+			mutex_lock(mutex);
+		if (ret)
+			return -ERESTARTSYS;
+	}
+
+	ctx->res_mutex = mutex;
+	ret = vmw_validation_res_reserve(ctx, intr);
+	if (ret)
+		goto out_no_res_reserve;
+
+	ret = vmw_validation_bo_reserve(ctx, intr);
+	if (ret)
+		goto out_no_bo_reserve;
+
+	ret = vmw_validation_bo_validate(ctx, intr);
+	if (ret)
+		goto out_no_validate;
+
+	ret = vmw_validation_res_validate(ctx, intr);
+	if (ret)
+		goto out_no_validate;
+
+	return 0;
+
+out_no_validate:
+	vmw_validation_bo_backoff(ctx);
+out_no_bo_reserve:
+	vmw_validation_res_unreserve(ctx, true);
+out_no_res_reserve:
+	if (mutex)
+		mutex_unlock(mutex);
+
+	return ret;
+}
+
+/**
+ * vmw_validation_revert - Revert validation actions if command submission
+ * failed.
+ *
+ * @ctx: The validation context.
+ *
+ * The caller still needs to unref resources after a call to this function.
+ */
+void vmw_validation_revert(struct vmw_validation_context *ctx)
+{
+	vmw_validation_bo_backoff(ctx);
+	vmw_validation_res_unreserve(ctx, true);
+	if (ctx->res_mutex)
+		mutex_unlock(ctx->res_mutex);
+	vmw_validation_unref_lists(ctx);
+}
+
+/**
+ * vmw_validation_cone - Commit validation actions after command submission
+ * success.
+ * @ctx: The validation context.
+ * @fence: Fence with which to fence all buffer objects taking part in the
+ * command submission.
+ *
+ * The caller does NOT need to unref resources after a call to this function.
+ */
+void vmw_validation_done(struct vmw_validation_context *ctx,
+			 struct vmw_fence_obj *fence)
+{
+	vmw_validation_bo_fence(ctx, fence);
+	vmw_validation_res_unreserve(ctx, false);
+	if (ctx->res_mutex)
+		mutex_unlock(ctx->res_mutex);
+	vmw_validation_unref_lists(ctx);
+}
+
+/**
+ * vmw_validation_preload_bo - Preload the validation memory allocator for a
+ * call to vmw_validation_add_bo().
+ * @ctx: Pointer to the validation context.
+ *
+ * Iff this function returns successfully, the next call to
+ * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
+ * but voids the guarantee.
+ *
+ * Returns: Zero if successful, %-EINVAL otherwise.
+ */
+int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
+{
+	unsigned int size = sizeof(struct vmw_validation_bo_node);
+
+	if (!vmw_validation_mem_alloc(ctx, size))
+		return -ENOMEM;
+
+	ctx->mem_size_left += size;
+	return 0;
+}
+
+/**
+ * vmw_validation_preload_res - Preload the validation memory allocator for a
+ * call to vmw_validation_add_res().
+ * @ctx: Pointer to the validation context.
+ * @size: Size of the validation node extra data. See below.
+ *
+ * Iff this function returns successfully, the next call to
+ * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
+ * sleep. An error is not fatal but voids the guarantee.
+ *
+ * Returns: Zero if successful, %-EINVAL otherwise.
+ */
+int vmw_validation_preload_res(struct vmw_validation_context *ctx,
+			       unsigned int size)
+{
+	size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
+				    size) +
+		vmw_validation_align(sizeof(struct vmw_validation_bo_node));
+	if (!vmw_validation_mem_alloc(ctx, size))
+		return -ENOMEM;
+
+	ctx->mem_size_left += size;
+	return 0;
+}
diff --git a/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
new file mode 100644
index 0000000..0e06374
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _VMWGFX_VALIDATION_H_
+#define _VMWGFX_VALIDATION_H_
+
+#include <linux/list.h>
+#include <linux/ww_mutex.h>
+
+#include <drm/drm_hashtab.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+
+#define VMW_RES_DIRTY_NONE 0
+#define VMW_RES_DIRTY_SET BIT(0)
+#define VMW_RES_DIRTY_CLEAR BIT(1)
+
+/**
+ * struct vmw_validation_mem - Custom interface to provide memory reservations
+ * for the validation code.
+ * @reserve_mem: Callback to reserve memory
+ * @unreserve_mem: Callback to unreserve memory
+ * @gran: Reservation granularity. Contains a hint how much memory should
+ * be reserved in each call to @reserve_mem(). A slow implementation may want
+ * reservation to be done in large batches.
+ */
+struct vmw_validation_mem {
+	int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
+	void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
+	size_t gran;
+};
+
+/**
+ * struct vmw_validation_context - Per command submission validation context
+ * @ht: Hash table used to find resource- or buffer object duplicates
+ * @resource_list: List head for resource validation metadata
+ * @resource_ctx_list: List head for resource validation metadata for
+ * resources that need to be validated before those in @resource_list
+ * @bo_list: List head for buffer objects
+ * @page_list: List of pages used by the memory allocator
+ * @ticket: Ticked used for ww mutex locking
+ * @res_mutex: Pointer to mutex used for resource reserving
+ * @merge_dups: Whether to merge metadata for duplicate resources or
+ * buffer objects
+ * @mem_size_left: Free memory left in the last page in @page_list
+ * @page_address: Kernel virtual address of the last page in @page_list
+ * @vm: A pointer to the memory reservation interface or NULL if no
+ * memory reservation is needed.
+ * @vm_size_left: Amount of reserved memory that so far has not been allocated.
+ * @total_mem: Amount of reserved memory.
+ */
+struct vmw_validation_context {
+	struct drm_open_hash *ht;
+	struct list_head resource_list;
+	struct list_head resource_ctx_list;
+	struct list_head bo_list;
+	struct list_head page_list;
+	struct ww_acquire_ctx ticket;
+	struct mutex *res_mutex;
+	unsigned int merge_dups;
+	unsigned int mem_size_left;
+	u8 *page_address;
+	struct vmw_validation_mem *vm;
+	size_t vm_size_left;
+	size_t total_mem;
+};
+
+struct vmw_buffer_object;
+struct vmw_resource;
+struct vmw_fence_obj;
+
+#if 0
+/**
+ * DECLARE_VAL_CONTEXT - Declare a validation context with initialization
+ * @_name: The name of the variable
+ * @_ht: The hash table used to find dups or NULL if none
+ * @_merge_dups: Whether to merge duplicate buffer object- or resource
+ * entries. If set to true, ideally a hash table pointer should be supplied
+ * as well unless the number of resources and buffer objects per validation
+ * is known to be very small
+ */
+#endif
+#define DECLARE_VAL_CONTEXT(_name, _ht, _merge_dups)			\
+	struct vmw_validation_context _name =				\
+	{ .ht = _ht,							\
+	  .resource_list = LIST_HEAD_INIT((_name).resource_list),	\
+	  .resource_ctx_list = LIST_HEAD_INIT((_name).resource_ctx_list), \
+	  .bo_list = LIST_HEAD_INIT((_name).bo_list),			\
+	  .page_list = LIST_HEAD_INIT((_name).page_list),		\
+	  .res_mutex = NULL,						\
+	  .merge_dups = _merge_dups,					\
+	  .mem_size_left = 0,						\
+	}
+
+/**
+ * vmw_validation_has_bos - return whether the validation context has
+ * any buffer objects registered.
+ *
+ * @ctx: The validation context
+ * Returns: Whether any buffer objects are registered
+ */
+static inline bool
+vmw_validation_has_bos(struct vmw_validation_context *ctx)
+{
+	return !list_empty(&ctx->bo_list);
+}
+
+/**
+ * vmw_validation_set_val_mem - Register a validation mem object for
+ * validation memory reservation
+ * @ctx: The validation context
+ * @vm: Pointer to a struct vmw_validation_mem
+ *
+ * Must be set before the first attempt to allocate validation memory.
+ */
+static inline void
+vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
+			   struct vmw_validation_mem *vm)
+{
+	ctx->vm = vm;
+}
+
+/**
+ * vmw_validation_set_ht - Register a hash table for duplicate finding
+ * @ctx: The validation context
+ * @ht: Pointer to a hash table to use for duplicate finding
+ * This function is intended to be used if the hash table wasn't
+ * available at validation context declaration time
+ */
+static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
+					 struct drm_open_hash *ht)
+{
+	ctx->ht = ht;
+}
+
+/**
+ * vmw_validation_bo_reserve - Reserve buffer objects registered with a
+ * validation context
+ * @ctx: The validation context
+ * @intr: Perform waits interruptible
+ *
+ * Return: Zero on success, -ERESTARTSYS when interrupted, negative error
+ * code on failure
+ */
+static inline int
+vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
+			  bool intr)
+{
+	return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
+				      NULL, true);
+}
+
+/**
+ * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
+ * validation context
+ * @ctx: The validation context
+ *
+ * This function unreserves the buffer objects previously reserved using
+ * vmw_validation_bo_reserve. It's typically used as part of an error path
+ */
+static inline void
+vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
+{
+	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
+}
+
+/**
+ * vmw_validation_bo_fence - Unreserve and fence buffer objects registered
+ * with a validation context
+ * @ctx: The validation context
+ *
+ * This function unreserves the buffer objects previously reserved using
+ * vmw_validation_bo_reserve, and fences them with a fence object.
+ */
+static inline void
+vmw_validation_bo_fence(struct vmw_validation_context *ctx,
+			struct vmw_fence_obj *fence)
+{
+	ttm_eu_fence_buffer_objects(&ctx->ticket, &ctx->bo_list,
+				    (void *) fence);
+}
+
+/**
+ * vmw_validation_context_init - Initialize a validation context
+ * @ctx: Pointer to the validation context to initialize
+ *
+ * This function initializes a validation context with @merge_dups set
+ * to false
+ */
+static inline void
+vmw_validation_context_init(struct vmw_validation_context *ctx)
+{
+	memset(ctx, 0, sizeof(*ctx));
+	INIT_LIST_HEAD(&ctx->resource_list);
+	INIT_LIST_HEAD(&ctx->resource_ctx_list);
+	INIT_LIST_HEAD(&ctx->bo_list);
+}
+
+/**
+ * vmw_validation_align - Align a validation memory allocation
+ * @val: The size to be aligned
+ *
+ * Returns: @val aligned to the granularity used by the validation memory
+ * allocator.
+ */
+static inline unsigned int vmw_validation_align(unsigned int val)
+{
+	return ALIGN(val, sizeof(long));
+}
+
+int vmw_validation_add_bo(struct vmw_validation_context *ctx,
+			  struct vmw_buffer_object *vbo,
+			  bool as_mob, bool cpu_blit);
+int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
+				      bool interruptible,
+				      bool validate_as_mob);
+int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr);
+void vmw_validation_unref_lists(struct vmw_validation_context *ctx);
+int vmw_validation_add_resource(struct vmw_validation_context *ctx,
+				struct vmw_resource *res,
+				size_t priv_size,
+				u32 dirty,
+				void **p_node,
+				bool *first_usage);
+void vmw_validation_drop_ht(struct vmw_validation_context *ctx);
+int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
+			       bool intr);
+void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
+				  bool backoff);
+void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
+				      void *val_private,
+				      struct vmw_buffer_object *vbo,
+				      unsigned long backup_offset);
+int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr);
+
+int vmw_validation_prepare(struct vmw_validation_context *ctx,
+			   struct mutex *mutex, bool intr);
+void vmw_validation_revert(struct vmw_validation_context *ctx);
+void vmw_validation_done(struct vmw_validation_context *ctx,
+			 struct vmw_fence_obj *fence);
+
+void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
+			       unsigned int size);
+int vmw_validation_preload_bo(struct vmw_validation_context *ctx);
+int vmw_validation_preload_res(struct vmw_validation_context *ctx,
+			       unsigned int size);
+void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
+				  void *val_private, u32 dirty);
+#endif