[Feature] add GA346 baseline version

Change-Id: Ic62933698569507dcf98240cdf5d9931ae34348f
diff --git a/src/kernel/linux/v4.19/drivers/tee/Kconfig b/src/kernel/linux/v4.19/drivers/tee/Kconfig
new file mode 100644
index 0000000..3b54f6b
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/Kconfig
@@ -0,0 +1,21 @@
+# Generic Trusted Execution Environment Configuration
+config TEE
+	tristate "Trusted Execution Environment support"
+	depends on HAVE_ARM_SMCCC || COMPILE_TEST
+	select DMA_SHARED_BUFFER
+	select GENERIC_ALLOCATOR
+	help
+	  This implements a generic interface towards a Trusted Execution
+	  Environment (TEE).
+
+if TEE
+
+menu "TEE drivers"
+
+source "drivers/tee/optee/Kconfig"
+source "drivers/tee/gud/Kconfig"
+source "drivers/tee/tkcore/Kconfig"
+
+endmenu
+
+endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/Makefile b/src/kernel/linux/v4.19/drivers/tee/Makefile
new file mode 100644
index 0000000..d732359
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_TEE) += tee.o
+tee-objs += tee_core.o
+tee-objs += tee_shm.o
+tee-objs += tee_shm_pool.o
+obj-$(CONFIG_OPTEE) += optee/
+obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) += gud/
+obj-$(CONFIG_TRUSTKERNEL_TEE_SUPPORT) += tkcore/
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/Kconfig b/src/kernel/linux/v4.19/drivers/tee/gud/410/Kconfig
new file mode 100644
index 0000000..d247f05
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/Kconfig
@@ -0,0 +1,49 @@
+#
+# Trustonic drivers configuration
+#
+config TRUSTONIC_TEE
+	tristate "Trustonic TEE Driver"
+	depends on ARM || ARM64
+	default y
+	help
+	  Enable Trustonic TEE Support.
+	  This option enables MobiCore Driver which is used to communicate with
+	  TEE OS, for things like, setting up log buffers, interrupt pins,
+	  and IPC channels.
+
+config TRUSTONIC_TEE_LPAE
+	bool "Trustonic TEE uses LPAE"
+	depends on TRUSTONIC_TEE
+	default y if ARM64
+	default n if ARM
+	help
+	  Enable Trustonic TEE 64-bit physical addresses support.
+	  This allows 32-bit trusted driver has capabilities of map/unmap
+	  64-bit physical addresses, which is useful in device equipped with
+	  RAM size more than 4GB.
+
+config TRUSTONIC_TEE_DEBUG
+	bool "Trustonic TEE driver debug mode"
+	depends on TRUSTONIC_TEE
+	default n
+	help
+	  Enable the debug mode in the Trustonic TEE Driver.
+	  Enable Debug mode in the MobiCore Driver.
+	  MobiCore Driver has loglevel control thru DEBUG and VERBOSE
+	  This option enables MCDRV_DBG() macros to print useful debugging
+	  information about mobicore operations.
+
+config TRUSTONIC_TRUSTED_UI
+	tristate "Trustonic Trusted UI"
+	depends on TRUSTONIC_TEE
+	help
+	  Enable Trustonic Trusted User Interface
+	  Trustonic TUI features was based on Trustonic TEE mechanism to protect
+	  Touch/Display inside TEE, to prevent malicious access of sensitive
+	  data used by APPs like Banking.
+
+config TRUSTONIC_TRUSTED_UI_FB_BLANK
+	bool "Trustonic Trusted UI with fb_blank"
+	depends on TRUSTONIC_TRUSTED_UI
+	help
+	  Blank the framebuffer before starting a TUI session
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/Makefile b/src/kernel/linux/v4.19/drivers/tee/gud/410/Makefile
new file mode 100644
index 0000000..63aec37
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/Makefile
@@ -0,0 +1,18 @@
+# Copyright (c) 2013-2018 TRUSTONIC LIMITED
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+#
+# Makefile for the Kinibi core and trusted UI drivers
+#
+export GUD_ROOT_FOLDER := $(dir $(lastword $(MAKEFILE_LIST)))
+obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) := MobiCoreDriver/
+obj-$(CONFIG_TRUSTONIC_TRUSTED_UI) += TlcTui/
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/Makefile b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/Makefile
new file mode 100644
index 0000000..23bc4c6
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/Makefile
@@ -0,0 +1,49 @@
+# Copyright (c) 2013-2018 TRUSTONIC LIMITED
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+#
+# Makefile for the Kinibi core driver
+#
+
+GUD_ROOT_FOLDER := drivers/gud/
+
+# add our modules to kernel.
+obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) += mcDrvModule.o
+
+mcDrvModule-y := \
+	admin.o \
+	client.o \
+	clientlib.o \
+	clock.o \
+	fastcall.o \
+	iwp.o \
+	logging.o \
+	main.o \
+	mcp.o \
+	mmu.o \
+	nq.o \
+	session.o \
+	teeclientapi.o \
+	user.o \
+	xen_be.o \
+	xen_common.o \
+	xen_fe.o
+
+# Release mode by default
+ccflags-y += -DNDEBUG
+ccflags-y += -Wno-declaration-after-statement
+
+ccflags-$(CONFIG_TRUSTONIC_TEE_DEBUG) += -DDEBUG
+
+# MobiCore Driver includes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver
+ccflags-y += -I$(srctree)/drivers/staging/android/ion
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/admin.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/admin.c
new file mode 100644
index 0000000..2e6adff
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/admin.c
@@ -0,0 +1,1166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/sched/signal.h>
+#include <linux/freezer.h>
+
+#include "public/mc_user.h"
+#include "public/mc_admin.h"
+
+#include "main.h"
+#include "mmu.h"	/* For load_check and load_token */
+#include "mcp.h"
+#include "nq.h"
+#include "client.h"
+#include "admin.h"
+
+static struct {
+	struct mutex admin_tgid_mutex;  /* Lock for admin_tgid below */
+	pid_t admin_tgid;
+	int (*tee_start_cb)(void);
+	void (*tee_stop_cb)(void);
+	int last_tee_ret;
+	struct notifier_block tee_stop_notifier;
+} l_ctx;
+
+static struct mc_admin_driver_request {
+	/* Global */
+	struct mutex mutex;		/* Protects access to this struct */
+	struct mutex states_mutex;	/* Protect access to the states */
+	enum client_state {
+		IDLE,
+		REQUEST_SENT,
+		BUFFERS_READY,
+	} client_state;
+	enum server_state {
+		NOT_CONNECTED,		/* Device not open */
+		READY,			/* Waiting for requests */
+		REQUEST_RECEIVED,	/* Got a request, is working */
+		RESPONSE_SENT,		/* Has sent a response header */
+		DATA_SENT,		/* Blocked until data is consumed */
+	} server_state;
+	/* Request */
+	u32 request_id;
+	struct mc_admin_request request;
+	struct completion client_complete;
+	/* Response */
+	struct mc_admin_response response;
+	struct completion server_complete;
+	void *buffer;			/* Reception buffer (pre-allocated) */
+	size_t size;			/* Size of the reception buffer */
+	bool lock_channel_during_freeze;/* Is freezing ongoing ? */
+} g_request;
+
+/* The mutex around the channel communication has to be wrapped in order
+ * to handle this use case :
+ * client 1 calls request_send()
+ *	    wait on wait_for_completion_interruptible (with channel mutex taken)
+ * client 2 calls request_send()
+ *	    waits on mutex_lock(channel mutex)
+ * kernel starts freezing tasks (suspend or reboot ongoing)
+ * if we do nothing, then the freezing will be aborted because client 1
+ * and 2 have to enter the refrigerator by themselves.
+ * Note : mutex cannot be held during freezing, so client 1 has release it
+ * => step 1 : client 1 sets a bool that says that the channel is still in use
+ * => step 2 : client 1 release the lock and enter the refrigerator
+ * => now any client trying to use the channel will face the bool preventing
+ * to use the channel. They also have to enter the refrigerator.
+ *
+ * These 3 functions handle this
+ */
+static void check_freezing_ongoing(void)
+{
+	/* We don't want to let the channel be used. Let everyone know
+	 * that we're using it
+	 */
+	g_request.lock_channel_during_freeze = 1;
+	/* Now we can safely release the lock */
+	mutex_unlock(&g_request.mutex);
+	/* Let's try to freeze */
+	try_to_freeze();
+	/* Either freezing happened or was canceled.
+	 * In both cases, reclaim the lock
+	 */
+	mutex_lock(&g_request.mutex);
+	g_request.lock_channel_during_freeze = 0;
+}
+
+static void channel_lock(void)
+{
+	while (1) {
+		mutex_lock(&g_request.mutex);
+		/* We took the lock, but is there any freezing ongoing? */
+		if (g_request.lock_channel_during_freeze == 0)
+			break;
+
+		/* yes, so let's freeze */
+		mutex_unlock(&g_request.mutex);
+		try_to_freeze();
+		/* Either freezing succeeded or was canceled.
+		 * In both case, try again to get the lock.
+		 * Give some CPU time to let the contender
+		 * finish his channel operation
+		 */
+		msleep(500);
+	};
+}
+
+static void channel_unlock(void)
+{
+	mutex_unlock(&g_request.mutex);
+}
+
+static inline void reinit_completion_local(struct completion *x)
+{
+	reinit_completion(x);
+}
+
+static struct tee_object *tee_object_alloc(bool is_sp_trustlet, size_t length)
+{
+	struct tee_object *obj;
+	size_t size = sizeof(*obj) + length;
+	size_t header_length = 0;
+
+	/* Determine required size */
+	if (is_sp_trustlet) {
+		/* Need space for lengths info and containers */
+		header_length = sizeof(struct mc_blob_len_info);
+		size += header_length + 3 * MAX_SO_CONT_SIZE;
+	}
+
+	/* Check size for overflow */
+	if (size < length || size > OBJECT_LENGTH_MAX) {
+		mc_dev_err(-ENOMEM, "cannot allocate object of size %zu",
+			   length);
+		return NULL;
+	}
+
+	/* Allocate memory */
+	obj = vzalloc(size);
+	if (!obj)
+		return NULL;
+
+	/* A non-zero header_length indicates that we have a SP trustlet */
+	obj->header_length = (u32)header_length;
+	obj->length = (u32)length;
+	return obj;
+}
+
+void tee_object_free(struct tee_object *obj)
+{
+	vfree(obj);
+}
+
+static inline void client_state_change(enum client_state state)
+{
+	mutex_lock(&g_request.states_mutex);
+	mc_dev_devel("client state changes from %d to %d",
+		     g_request.client_state, state);
+	g_request.client_state = state;
+	mutex_unlock(&g_request.states_mutex);
+}
+
+static inline bool client_state_is(enum client_state state)
+{
+	bool is;
+
+	mutex_lock(&g_request.states_mutex);
+	is = g_request.client_state == state;
+	mutex_unlock(&g_request.states_mutex);
+	return is;
+}
+
+static inline void server_state_change(enum server_state state)
+{
+	mutex_lock(&g_request.states_mutex);
+	mc_dev_devel("server state changes from %d to %d",
+		     g_request.server_state, state);
+	g_request.server_state = state;
+	mutex_unlock(&g_request.states_mutex);
+}
+
+static inline bool server_state_is(enum server_state state)
+{
+	bool is;
+
+	mutex_lock(&g_request.states_mutex);
+	is = g_request.server_state == state;
+	mutex_unlock(&g_request.states_mutex);
+	return is;
+}
+
+static void request_cancel(void);
+
+static int request_send(u32 command, const struct mc_uuid_t *uuid, bool is_gp,
+			u32 spid)
+{
+	int counter = 0;
+	int wait_tens = 0;
+	int ret = 0;
+
+	/* Prepare request */
+	mutex_lock(&g_request.states_mutex);
+	/* Wait a little for daemon to connect */
+	while (g_request.server_state == NOT_CONNECTED) {
+		mutex_unlock(&g_request.states_mutex);
+		if (signal_pending(current))
+			return -ERESTARTSYS;
+
+		if (counter++ == 10) {
+			wait_tens++;
+			mc_dev_info("daemon not connected after %d0s, waiting",
+				    wait_tens);
+			counter = 0;
+		}
+
+		ssleep(1);
+		mutex_lock(&g_request.states_mutex);
+	}
+
+	WARN_ON(g_request.client_state != IDLE);
+	if (g_request.server_state != READY) {
+		mutex_unlock(&g_request.states_mutex);
+		if (g_request.server_state != NOT_CONNECTED) {
+			ret = -EPROTO;
+			mc_dev_err(ret, "invalid daemon state %d",
+				   g_request.server_state);
+			goto end;
+		} else {
+			ret = -EHOSTUNREACH;
+			mc_dev_err(ret, "daemon not connected");
+			goto end;
+		}
+	}
+
+	memset(&g_request.request, 0, sizeof(g_request.request));
+	memset(&g_request.response, 0, sizeof(g_request.response));
+	/*
+	 * Do not update the request ID until it is dealt with, in case the
+	 * daemon arrives later.
+	 */
+	g_request.request.request_id = g_request.request_id;
+	g_request.request.command = command;
+	if (uuid)
+		memcpy(&g_request.request.uuid, uuid, sizeof(*uuid));
+	else
+		memset(&g_request.request.uuid, 0, sizeof(*uuid));
+
+	g_request.request.is_gp = is_gp;
+	g_request.request.spid = spid;
+	g_request.client_state = REQUEST_SENT;
+	mutex_unlock(&g_request.states_mutex);
+
+	/* Send request */
+	complete(&g_request.client_complete);
+	mc_dev_devel("request sent");
+
+	/* Wait for header */
+	do {
+		ret = wait_for_completion_interruptible(
+						&g_request.server_complete);
+		if (!ret)
+			break;
+		/* we may have to freeze now */
+		check_freezing_ongoing();
+		/* freezing happened or was canceled,
+		 * let's sleep and try again
+		 */
+		msleep(500);
+	} while (1);
+	mc_dev_devel("response received");
+
+	/* Server should be waiting with some data for us */
+	mutex_lock(&g_request.states_mutex);
+	switch (g_request.server_state) {
+	case NOT_CONNECTED:
+		/* Daemon gone */
+		ret = -EPIPE;
+		mc_dev_devel("daemon disconnected");
+		break;
+	case READY:
+		/* No data to come, likely an error */
+		ret = -g_request.response.error_no;
+		mc_dev_devel("daemon ret=%d", ret);
+		break;
+	case RESPONSE_SENT:
+	case DATA_SENT:
+		/* Normal case, data to come */
+		ret = 0;
+		break;
+	case REQUEST_RECEIVED:
+		/* Should not happen as complete means the state changed */
+		ret = -EPIPE;
+		mc_dev_err(ret, "daemon is in a bad state: %d",
+			   g_request.server_state);
+		break;
+	}
+
+	mutex_unlock(&g_request.states_mutex);
+
+end:
+	if (ret)
+		request_cancel();
+
+	mc_dev_devel("ret=%d", ret);
+	return ret;
+}
+
+static int request_receive(void *address, u32 size)
+{
+	/*
+	 * At this point we have received the header and prepared some buffers
+	 * to receive data that we know are coming from the server.
+	 */
+
+	/* Check server state */
+	bool server_ok;
+
+	mutex_lock(&g_request.states_mutex);
+	server_ok = (g_request.server_state == RESPONSE_SENT) ||
+		    (g_request.server_state == DATA_SENT);
+	mutex_unlock(&g_request.states_mutex);
+	if (!server_ok) {
+		int ret = -EPIPE;
+
+		mc_dev_err(ret, "expected server state %d or %d, not %d",
+			   RESPONSE_SENT, DATA_SENT, g_request.server_state);
+		request_cancel();
+		return ret;
+	}
+
+	/* Setup reception buffer */
+	g_request.buffer = address;
+	g_request.size = size;
+	client_state_change(BUFFERS_READY);
+
+	/* Unlock write of data */
+	complete(&g_request.client_complete);
+
+	/* Wait for data */
+	do {
+		int ret = 0;
+
+		ret = wait_for_completion_interruptible(
+					     &g_request.server_complete);
+		if (!ret)
+			break;
+		/* We may have to freeze now */
+		check_freezing_ongoing();
+		/* freezing happened or was canceled,
+		 * let's sleep and try again
+		 */
+		msleep(500);
+	} while (1);
+
+	/* Reset reception buffer */
+	g_request.buffer = NULL;
+	g_request.size = 0;
+
+	/* Return to idle state */
+	client_state_change(IDLE);
+	return 0;
+}
+
+/* Must be called instead of request_receive() to cancel a pending request */
+static void request_cancel(void)
+{
+	/* Unlock write of data */
+	mutex_lock(&g_request.states_mutex);
+	if (g_request.server_state == DATA_SENT)
+		complete(&g_request.client_complete);
+
+	/* Return to idle state */
+	g_request.client_state = IDLE;
+	mutex_unlock(&g_request.states_mutex);
+}
+
+static int admin_get_root_container(void *address)
+{
+	int ret = 0;
+
+	/* Lock communication channel */
+	channel_lock();
+
+	/* Send request and wait for header */
+	ret = request_send(MC_DRV_GET_ROOT_CONTAINER, NULL, 0, 0);
+	if (ret)
+		goto end;
+
+	/* Check length against max */
+	if (g_request.response.length >= MAX_SO_CONT_SIZE) {
+		request_cancel();
+		ret = EREMOTEIO;
+		mc_dev_err(ret, "response length exceeds maximum");
+		goto end;
+	}
+
+	/* Get data */
+	ret = request_receive(address, g_request.response.length);
+	if (!ret)
+		ret = g_request.response.length;
+
+end:
+	channel_unlock();
+	return ret;
+}
+
+static int admin_get_sp_container(void *address, u32 spid)
+{
+	int ret = 0;
+
+	/* Lock communication channel */
+	channel_lock();
+
+	/* Send request and wait for header */
+	ret = request_send(MC_DRV_GET_SP_CONTAINER, NULL, 0, spid);
+	if (ret)
+		goto end;
+
+	/* Check length against max */
+	if (g_request.response.length >= MAX_SO_CONT_SIZE) {
+		request_cancel();
+		ret = EREMOTEIO;
+		mc_dev_err(ret, "response length exceeds maximum");
+		goto end;
+	}
+
+	/* Get data */
+	ret = request_receive(address, g_request.response.length);
+	if (!ret)
+		ret = g_request.response.length;
+
+end:
+	channel_unlock();
+	return ret;
+}
+
+static int admin_get_trustlet_container(void *address,
+					const struct mc_uuid_t *uuid, u32 spid)
+{
+	int ret = 0;
+
+	/* Lock communication channel */
+	channel_lock();
+
+	/* Send request and wait for header */
+	ret = request_send(MC_DRV_GET_TRUSTLET_CONTAINER, uuid, 0, spid);
+	if (ret)
+		goto end;
+
+	/* Check length against max */
+	if (g_request.response.length >= MAX_SO_CONT_SIZE) {
+		request_cancel();
+		ret = EREMOTEIO;
+		mc_dev_err(ret, "response length exceeds maximum");
+		goto end;
+	}
+
+	/* Get data */
+	ret = request_receive(address, g_request.response.length);
+	if (!ret)
+		ret = g_request.response.length;
+
+end:
+	channel_unlock();
+	return ret;
+}
+
+static struct tee_object *admin_get_trustlet(const struct mc_uuid_t *uuid,
+					     bool is_gp, u32 *spid)
+{
+	struct tee_object *obj = NULL;
+	bool is_sp_tl;
+	int ret = 0;
+
+	/* Lock communication channel */
+	channel_lock();
+
+	/* Send request and wait for header */
+	ret = request_send(MC_DRV_GET_TRUSTLET, uuid, is_gp, 0);
+	if (ret)
+		goto end;
+
+	/* Allocate memory */
+	is_sp_tl = g_request.response.service_type == SERVICE_TYPE_SP_TRUSTLET;
+	obj = tee_object_alloc(is_sp_tl, g_request.response.length);
+	if (!obj) {
+		request_cancel();
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	/* Get data */
+	ret = request_receive(&obj->data[obj->header_length], obj->length);
+	*spid = g_request.response.spid;
+
+end:
+	channel_unlock();
+	if (ret)
+		return ERR_PTR(ret);
+
+	return obj;
+}
+
+static void mc_admin_sendcrashdump(void)
+{
+	int ret = 0;
+
+	/* Lock communication channel */
+	channel_lock();
+
+	/* Send request and wait for header */
+	ret = request_send(MC_DRV_SIGNAL_CRASH, NULL, false, 0);
+	if (ret)
+		goto end;
+
+	/* Done */
+	request_cancel();
+
+end:
+	channel_unlock();
+}
+
+static int tee_stop_notifier_fn(struct notifier_block *nb, unsigned long event,
+				void *data)
+{
+	mc_admin_sendcrashdump();
+	l_ctx.last_tee_ret = -EHOSTUNREACH;
+	return 0;
+}
+
+static int tee_object_make(u32 spid, struct tee_object *obj)
+{
+	struct mc_blob_len_info *l_info = (struct mc_blob_len_info *)obj->data;
+	u8 *address = &obj->data[obj->header_length + obj->length];
+	struct mclf_header_v2 *thdr;
+	int ret;
+
+	/* Get root container */
+	ret = admin_get_root_container(address);
+	if (ret < 0)
+		goto err;
+
+	l_info->root_size = ret;
+	address += ret;
+
+	/* Get SP container */
+	ret = admin_get_sp_container(address, spid);
+	if (ret < 0)
+		goto err;
+
+	l_info->sp_size = ret;
+	address += ret;
+
+	/* Get trustlet container */
+	thdr = (struct mclf_header_v2 *)&obj->data[obj->header_length];
+	ret = admin_get_trustlet_container(address, &thdr->uuid, spid);
+	if (ret < 0)
+		goto err;
+
+	l_info->ta_size = ret;
+	address += ret;
+
+	/* Setup lengths information */
+	l_info->magic = MC_TLBLOBLEN_MAGIC;
+	obj->length += sizeof(*l_info);
+	obj->length += l_info->root_size + l_info->sp_size + l_info->ta_size;
+	ret = 0;
+
+err:
+	return ret;
+}
+
+struct tee_object *tee_object_copy(uintptr_t address, size_t length)
+{
+	struct tee_object *obj;
+
+	/* Allocate memory */
+	obj = tee_object_alloc(false, length);
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
+
+	/* Copy trustlet */
+	memcpy(obj->data, (void *)address, length);
+	return obj;
+}
+
+struct tee_object *tee_object_read(u32 spid, uintptr_t address, size_t length)
+{
+	char __user *addr = (char __user *)address;
+	struct tee_object *obj;
+	u8 *data;
+	struct mclf_header_v2 thdr;
+	int ret;
+
+	/* Check length */
+	if (length < sizeof(thdr)) {
+		ret = -EFAULT;
+		mc_dev_err(ret, "buffer shorter than header size");
+		return ERR_PTR(ret);
+	}
+
+	/* Read header */
+	if (copy_from_user(&thdr, addr, sizeof(thdr))) {
+		ret = -EFAULT;
+		mc_dev_err(ret, "header: copy_from_user failed");
+		return ERR_PTR(ret);
+	}
+
+	/* Check header */
+	if ((thdr.intro.magic != MC_SERVICE_HEADER_MAGIC_BE) &&
+	    (thdr.intro.magic != MC_SERVICE_HEADER_MAGIC_LE)) {
+		ret = -EINVAL;
+		mc_dev_err(ret, "header: invalid magic");
+		return ERR_PTR(ret);
+	}
+
+	/* Allocate memory */
+	obj = tee_object_alloc(thdr.service_type == SERVICE_TYPE_SP_TRUSTLET,
+			       length);
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
+
+	/* Copy header */
+	data = &obj->data[obj->header_length];
+	memcpy(data, &thdr, sizeof(thdr));
+	/* Copy the rest of the data */
+	data += sizeof(thdr);
+	if (copy_from_user(data, &addr[sizeof(thdr)], length - sizeof(thdr))) {
+		ret = -EFAULT;
+		mc_dev_err(ret, "data: copy_from_user failed");
+		vfree(obj);
+		return ERR_PTR(ret);
+	}
+
+	if (obj->header_length) {
+		ret = tee_object_make(spid, obj);
+		if (ret) {
+			vfree(obj);
+			return ERR_PTR(ret);
+		}
+	}
+
+	return obj;
+}
+
+struct tee_object *tee_object_select(const struct mc_uuid_t *uuid)
+{
+	struct tee_object *obj;
+	struct mclf_header_v2 *thdr;
+
+	obj = tee_object_alloc(false, sizeof(*thdr));
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
+
+	thdr = (struct mclf_header_v2 *)&obj->data[obj->header_length];
+	memcpy(&thdr->uuid, uuid, sizeof(thdr->uuid));
+	return obj;
+}
+
+struct tee_object *tee_object_get(const struct mc_uuid_t *uuid, bool is_gp)
+{
+	struct tee_object *obj;
+	u32 spid = 0;
+
+	/* admin_get_trustlet creates the right object based on service type */
+	obj = admin_get_trustlet(uuid, is_gp, &spid);
+	if (IS_ERR(obj))
+		return obj;
+
+	/* SP trustlet: create full secure object with all containers */
+	if (obj->header_length) {
+		int ret;
+
+		/* Do not return EINVAL in this case as SPID was not found */
+		if (!spid) {
+			vfree(obj);
+			return ERR_PTR(-ENOENT);
+		}
+
+		ret = tee_object_make(spid, obj);
+		if (ret) {
+			vfree(obj);
+			return ERR_PTR(ret);
+		}
+	}
+
+	return obj;
+}
+
+static inline int load_driver(struct tee_client *client,
+			      struct mc_admin_load_info *load_info)
+{
+	struct mcp_open_info info = {
+		.spid = load_info->spid,
+		.va = load_info->address,
+		.len = load_info->length,
+		.uuid = &load_info->uuid,
+		.tci_len = PAGE_SIZE,
+		.user = true,
+	};
+
+	u32 session_id = 0;
+	int ret;
+
+	if (info.va)
+		info.type = TEE_MC_DRIVER;
+	else
+		info.type = TEE_MC_DRIVER_UUID;
+
+	/* Create DCI in case it's needed */
+	ret = client_cbuf_create(client, info.tci_len, &info.tci_va, NULL);
+	if (ret)
+		return ret;
+
+	/* Open session */
+	ret = client_mc_open_common(client, &info, &session_id);
+	if (!ret)
+		mc_dev_devel("driver loaded with session id %x", session_id);
+
+	/*
+	 * Always 'free' the buffer (will remain as long as used), never freed
+	 * otherwise
+	 */
+	client_cbuf_free(client, info.tci_va);
+
+	return ret;
+}
+
+static inline int load_token(struct mc_admin_load_info *token)
+{
+	struct tee_mmu *mmu;
+	struct mcp_buffer_map map;
+	struct mc_ioctl_buffer buf;
+	int ret;
+
+	buf.va = (uintptr_t)token->address;
+	buf.len = token->length;
+	buf.flags = MC_IO_MAP_INPUT;
+	mmu = tee_mmu_create(current->mm, &buf);
+	if (IS_ERR(mmu))
+		return PTR_ERR(mmu);
+
+	tee_mmu_buffer(mmu, &map);
+	ret = mcp_load_token(token->address, &map);
+	tee_mmu_put(mmu);
+	return ret;
+}
+
+static inline int load_check(struct mc_admin_load_info *info)
+{
+	struct tee_object *obj;
+	struct tee_mmu *mmu;
+	struct mcp_buffer_map map;
+	struct mc_ioctl_buffer buf;
+	int ret;
+
+	obj = tee_object_read(info->spid, info->address, info->length);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	buf.va = (uintptr_t)obj->data;
+	buf.len = obj->length;
+	buf.flags = MC_IO_MAP_INPUT;
+	mmu = tee_mmu_create(NULL, &buf);
+	if (IS_ERR(mmu))
+		return PTR_ERR(mmu);
+
+	tee_mmu_buffer(mmu, &map);
+	ret = mcp_load_check(obj, &map);
+	tee_mmu_put(mmu);
+	return ret;
+}
+
+static inline int load_key_so(struct mc_admin_load_info *key_so)
+{
+	struct tee_mmu *mmu;
+	struct mcp_buffer_map map;
+	struct mc_ioctl_buffer buf;
+	int ret;
+
+	buf.va = (uintptr_t)key_so->address;
+	buf.len = key_so->length;
+	buf.flags = MC_IO_MAP_INPUT;
+	mmu = tee_mmu_create(current->mm, &buf);
+	if (IS_ERR(mmu))
+		return PTR_ERR(mmu);
+
+	tee_mmu_buffer(mmu, &map);
+	ret = mcp_load_key_so(key_so->address, &map);
+	tee_mmu_put(mmu);
+	return ret;
+}
+
+static ssize_t admin_write(struct file *file, const char __user *user,
+			   size_t len, loff_t *off)
+{
+	int ret;
+
+	/* No offset allowed */
+	if (*off) {
+		ret = -ECOMM;
+		mc_dev_err(ret, "offset not supported");
+		g_request.response.error_no = EPIPE;
+		goto err;
+	}
+
+	if (server_state_is(REQUEST_RECEIVED)) {
+		/* Check client state */
+		if (!client_state_is(REQUEST_SENT)) {
+			ret = -EPIPE;
+			mc_dev_err(ret, "expected client state %d, not %d",
+				   REQUEST_SENT, g_request.client_state);
+			g_request.response.error_no = EPIPE;
+			goto err;
+		}
+
+		/* Receive response header */
+		if (copy_from_user(&g_request.response, user,
+				   sizeof(g_request.response))) {
+			ret = -ECOMM;
+			mc_dev_err(ret, "failed to get response from daemon");
+			g_request.response.error_no = EPIPE;
+			goto err;
+		}
+
+		/* Check request ID */
+		if (g_request.request.request_id !=
+						g_request.response.request_id) {
+			ret = -EBADE;
+			mc_dev_err(ret, "expected id %d, not %d",
+				   g_request.request.request_id,
+				   g_request.response.request_id);
+			g_request.response.error_no = EPIPE;
+			goto err;
+		}
+
+		/* Response header is acceptable */
+		ret = sizeof(g_request.response);
+		if (g_request.response.length)
+			server_state_change(RESPONSE_SENT);
+		else
+			server_state_change(READY);
+
+		goto end;
+	} else if (server_state_is(RESPONSE_SENT)) {
+		/* Server is waiting */
+		server_state_change(DATA_SENT);
+
+		/* Get data */
+		ret = wait_for_completion_interruptible(
+						&g_request.client_complete);
+
+		/* Server received a signal, let see if it tries again */
+		if (ret) {
+			server_state_change(RESPONSE_SENT);
+			return ret;
+		}
+
+		/* Check client state */
+		if (!client_state_is(BUFFERS_READY)) {
+			ret = -EPIPE;
+			mc_dev_err(ret, "expected client state %d, not %d",
+				   BUFFERS_READY, g_request.client_state);
+			g_request.response.error_no = EPIPE;
+			goto err;
+		}
+
+		/* We do not deal with several writes */
+		if (len != g_request.size)
+			len = g_request.size;
+
+		ret = copy_from_user(g_request.buffer, user, len);
+		if (ret) {
+			ret = -ECOMM;
+			mc_dev_err(ret, "failed to get data from daemon");
+			g_request.response.error_no = EPIPE;
+			goto err;
+		}
+
+		ret = len;
+		server_state_change(READY);
+		goto end;
+	} else {
+		ret = -ECOMM;
+		goto err;
+	}
+
+err:
+	server_state_change(READY);
+end:
+	complete(&g_request.server_complete);
+	return ret;
+}
+
+static ssize_t admin_read(struct file *file, char __user *user, size_t len,
+			  loff_t *off)
+{
+	/* No offset allowed */
+	if (*off) {
+		int ret = -ECOMM;
+
+		mc_dev_err(ret, "offset not supported");
+		return ret;
+	}
+
+	return nq_get_stop_message(user, len);
+}
+
+static long admin_ioctl(struct file *file, unsigned int cmd,
+			unsigned long arg)
+{
+	void __user *uarg = (void __user *)arg;
+	int ret = -EINVAL;
+
+	mc_dev_devel("%u from %s", _IOC_NR(cmd), current->comm);
+
+	switch (cmd) {
+	case MC_ADMIN_IO_GET_DRIVER_REQUEST: {
+		/* Update TGID as it may change (when becoming a daemon) */
+		if (l_ctx.admin_tgid != current->tgid) {
+			l_ctx.admin_tgid = current->tgid;
+			mc_dev_info("daemon PID changed to %d",
+				    l_ctx.admin_tgid);
+		}
+
+		/* Block until a request is available */
+		server_state_change(READY);
+		ret = wait_for_completion_interruptible(
+						&g_request.client_complete);
+		if (ret)
+			/* Interrupted by signal */
+			break;
+
+		/* Check client state */
+		if (!client_state_is(REQUEST_SENT)) {
+			ret = -EPIPE;
+			mc_dev_err(ret, "expected client state %d, not %d",
+				   REQUEST_SENT, g_request.client_state);
+			g_request.response.error_no = EPIPE;
+			complete(&g_request.server_complete);
+			break;
+		}
+
+		/* Send request (the driver request mutex is held) */
+		ret = copy_to_user(uarg, &g_request.request,
+				   sizeof(g_request.request));
+		if (ret) {
+			server_state_change(READY);
+			complete(&g_request.server_complete);
+			ret = -EPROTO;
+			break;
+		}
+
+		/* Now that the daemon got it, update the request ID */
+		g_request.request_id++;
+
+		server_state_change(REQUEST_RECEIVED);
+		break;
+	}
+	case MC_ADMIN_IO_GET_INFO: {
+		struct mc_admin_driver_info info;
+
+		info.drv_version = MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
+					      MCDRVMODULEAPI_VERSION_MINOR);
+		info.initial_cmd_id = g_request.request_id;
+		ret = copy_to_user(uarg, &info, sizeof(info));
+		break;
+	}
+	case MC_ADMIN_IO_LOAD_DRIVER: {
+		struct tee_client *client = file->private_data;
+		struct mc_admin_load_info info;
+
+		if (copy_from_user(&info, uarg, sizeof(info))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		/* Make sure we have a local client */
+		if (!client) {
+			client = client_create(true);
+			/* Store client for future use/close */
+			file->private_data = client;
+		}
+
+		if (!client) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		ret = load_driver(client, &info);
+		break;
+	}
+	case MC_ADMIN_IO_LOAD_TOKEN: {
+		struct mc_admin_load_info info;
+
+		if (copy_from_user(&info, uarg, sizeof(info))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = load_token(&info);
+		break;
+	}
+	case MC_ADMIN_IO_LOAD_CHECK: {
+		struct mc_admin_load_info info;
+
+		if (copy_from_user(&info, uarg, sizeof(info))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = load_check(&info);
+		break;
+	}
+	case MC_ADMIN_IO_LOAD_KEY_SO: {
+		struct mc_admin_load_info info;
+
+		if (copy_from_user(&info, uarg, sizeof(info))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = load_key_so(&info);
+		break;
+	}
+	default:
+		ret = -ENOIOCTLCMD;
+	}
+
+	return ret;
+}
+
+/*
+ * mc_fd_release() - This function will be called from user space as close(...)
+ * The client data are freed and the associated memory pages are unreserved.
+ *
+ * @inode
+ * @file
+ *
+ * Returns 0
+ */
+static int admin_release(struct inode *inode, struct file *file)
+{
+	/* Close client if any */
+	if (file->private_data)
+		client_close((struct tee_client *)file->private_data);
+
+	/* Requests from driver to daemon */
+	mutex_lock(&g_request.states_mutex);
+	mc_dev_devel("server state changes from %d to %d",
+		     g_request.server_state, NOT_CONNECTED);
+	g_request.server_state = NOT_CONNECTED;
+	/* A non-zero command indicates that a thread is waiting */
+	if (g_request.client_state != IDLE) {
+		g_request.response.error_no = ESHUTDOWN;
+		complete(&g_request.server_complete);
+	}
+	mutex_unlock(&g_request.states_mutex);
+	mc_dev_info("daemon connection closed, TGID %d", l_ctx.admin_tgid);
+	l_ctx.admin_tgid = 0;
+
+	/*
+	 * ret is quite irrelevant here as most apps don't care about the
+	 * return value from close() and it's quite difficult to recover
+	 */
+	return 0;
+}
+
+static int admin_open(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+
+	/* Only one connection allowed to admin interface */
+	mutex_lock(&l_ctx.admin_tgid_mutex);
+	if (l_ctx.admin_tgid) {
+		ret = -EBUSY;
+		mc_dev_err(ret, "daemon connection already open, PID %d",
+			   l_ctx.admin_tgid);
+	} else {
+		l_ctx.admin_tgid = current->tgid;
+	}
+	mutex_unlock(&l_ctx.admin_tgid_mutex);
+	if (ret)
+		return ret;
+
+	/* Setup the usual variables */
+	mc_dev_devel("accept %s as daemon", current->comm);
+
+	/*
+	 * daemon is connected so now we can safely suppose
+	 * the secure world is loaded too
+	 */
+	if (l_ctx.last_tee_ret == TEE_START_NOT_TRIGGERED)
+		l_ctx.last_tee_ret = l_ctx.tee_start_cb();
+
+	/* Failed to start the TEE, either now or before */
+	if (l_ctx.last_tee_ret) {
+		mutex_lock(&l_ctx.admin_tgid_mutex);
+		l_ctx.admin_tgid = 0;
+		mutex_unlock(&l_ctx.admin_tgid_mutex);
+		return l_ctx.last_tee_ret;
+	}
+
+	reinit_completion_local(&g_request.client_complete);
+	reinit_completion_local(&g_request.server_complete);
+	/* Requests from driver to daemon */
+	mc_dev_info("daemon connection open, TGID %d", l_ctx.admin_tgid);
+	return 0;
+}
+
+/* function table structure of this device driver. */
+static const struct file_operations mc_admin_fops = {
+	.owner = THIS_MODULE,
+	.open = admin_open,
+	.release = admin_release,
+	.unlocked_ioctl = admin_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = admin_ioctl,
+#endif
+	.write = admin_write,
+	.read = admin_read,
+};
+
+int mc_admin_init(struct cdev *cdev, int (*tee_start_cb)(void),
+		  void (*tee_stop_cb)(void))
+{
+	mutex_init(&l_ctx.admin_tgid_mutex);
+	/* Requests from driver to daemon */
+	mutex_init(&g_request.mutex);
+	mutex_init(&g_request.states_mutex);
+	g_request.request_id = 42;
+	init_completion(&g_request.client_complete);
+	init_completion(&g_request.server_complete);
+	l_ctx.tee_stop_notifier.notifier_call = tee_stop_notifier_fn;
+	nq_register_tee_stop_notifier(&l_ctx.tee_stop_notifier);
+	/* Create char device */
+	cdev_init(cdev, &mc_admin_fops);
+	/* Register the call back for starting the secure world */
+	l_ctx.tee_start_cb = tee_start_cb;
+	l_ctx.tee_stop_cb = tee_stop_cb;
+	l_ctx.last_tee_ret = TEE_START_NOT_TRIGGERED;
+	return 0;
+}
+
+void mc_admin_exit(void)
+{
+	nq_unregister_tee_stop_notifier(&l_ctx.tee_stop_notifier);
+	if (!l_ctx.last_tee_ret)
+		l_ctx.tee_stop_cb();
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/admin.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/admin.h
new file mode 100644
index 0000000..05eb2c3
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/admin.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _MC_ADMIN_H_
+#define _MC_ADMIN_H_
+
+struct cdev;
+struct mc_uuid_t;
+struct tee_object;
+
+int mc_admin_init(struct cdev *cdev, int (*tee_start_cb)(void),
+		  void (*tee_stop_cb)(void));
+void mc_admin_exit(void);
+
+struct tee_object *tee_object_select(const struct mc_uuid_t *uuid);
+struct tee_object *tee_object_get(const struct mc_uuid_t *uuid, bool is_gp);
+struct tee_object *tee_object_copy(uintptr_t address, size_t length);
+struct tee_object *tee_object_read(u32 spid, uintptr_t address, size_t length);
+void tee_object_free(struct tee_object *object);
+
+#endif /* _MC_ADMIN_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/arm.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/arm.h
new file mode 100644
index 0000000..9325d8f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/arm.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+#ifndef _MC_ARM_H_
+#define _MC_ARM_H_
+
+#include "main.h"
+
+#ifdef CONFIG_ARM64
+static inline bool has_security_extensions(void)
+{
+	return true;
+}
+
+static inline bool is_secure_mode(void)
+{
+	return false;
+}
+#else
+/*
+ * ARM Trustzone specific masks and modes
+ * Vanilla Linux is unaware of TrustZone extension.
+ * I.e. arch/arm/include/asm/ptrace.h does not define monitor mode.
+ * Also TZ bits in cpuid are not defined, ARM port uses magic numbers,
+ * see arch/arm/kernel/setup.c
+ */
+#define ARM_MONITOR_MODE		(0x16) /*(0b10110)*/
+#define ARM_SECURITY_EXTENSION_MASK	(0x30)
+
+/* check if CPU supports the ARM TrustZone Security Extensions */
+static inline bool has_security_extensions(void)
+{
+	u32 fea = 0;
+
+	asm volatile(
+		"mrc p15, 0, %[fea], cr0, cr1, 0" :
+		[fea]"=r" (fea));
+
+	mc_dev_devel("CPU Features: 0x%X", fea);
+
+	/*
+	 * If the CPU features ID has 0 for security features then the CPU
+	 * doesn't support TrustZone at all!
+	 */
+	if ((fea & ARM_SECURITY_EXTENSION_MASK) == 0)
+		return false;
+
+	return true;
+}
+
+/* check if running in secure mode */
+static inline bool is_secure_mode(void)
+{
+	u32 cpsr = 0;
+	u32 nsacr = 0;
+
+	asm volatile(
+		"mrc	p15, 0, %[nsacr], cr1, cr1, 2\n"
+		"mrs %[cpsr], cpsr\n" :
+		[nsacr]"=r" (nsacr),
+		[cpsr]"=r"(cpsr));
+
+	mc_dev_devel("CPRS.M = set to 0x%X", cpsr & MODE_MASK);
+	mc_dev_devel("SCR.NS = set to 0x%X", nsacr);
+
+	/*
+	 * If the NSACR contains the reset value(=0) then most likely we are
+	 * running in Secure MODE.
+	 * If the cpsr mode is set to monitor mode then we cannot load!
+	 */
+	if (nsacr == 0 || ((cpsr & MODE_MASK) == ARM_MONITOR_MODE))
+		return true;
+
+	return false;
+}
+#endif
+
+#endif /* _MC_ARM_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/build_tag.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/build_tag.h
new file mode 100644
index 0000000..db7db6f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/build_tag.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+#ifndef MOBICORE_COMPONENT_BUILD_TAG
+#define MOBICORE_COMPONENT_BUILD_TAG \
+	"t-base-MTK-A64-Android-410a-V002-20190318_110437_65733_93363"
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/client.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/client.c
new file mode 100644
index 0000000..93fac7d
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/client.c
@@ -0,0 +1,1474 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/sched.h>	/* struct task_struct */
+#include <linux/version.h>
+#include <linux/sched/mm.h>	/* get_task_mm */
+#include <linux/sched/task.h>	/* put_task_struct */
+#include <net/sock.h>		/* sockfd_lookup */
+#include <linux/file.h>		/* fput */
+
+#include "public/mc_user.h"
+#include "public/mc_admin.h"
+
+#include "main.h"
+#include "mmu.h"
+#include "session.h"
+#include "client.h"
+
+/* Client/context */
+struct tee_client {
+	/* PID of task that opened the device, 0 if kernel */
+	pid_t			pid;
+	/* Command for task*/
+	char			comm[TASK_COMM_LEN];
+	/* Number of references kept to this object */
+	struct kref		kref;
+	/* List of contiguous buffers allocated by mcMallocWsm for the client */
+	struct list_head	cbufs;
+	struct mutex		cbufs_lock;	/* lock for the cbufs list */
+	/* List of TA sessions opened by this client */
+	struct list_head	sessions;
+	struct list_head	closing_sessions;
+	struct mutex		sessions_lock;	/* sessions list + closing */
+	/* Client lock for quick WSMs and operations changes */
+	struct mutex		quick_lock;
+	/* Client lock for CWSMs release functions */
+	struct mutex		cwsm_release_lock;
+	/* List of WSMs for a client */
+	struct list_head	cwsms;
+	/* List of GP operation for a client */
+	struct list_head	operations;
+	/* The list entry to attach to "ctx.clients" list */
+	struct list_head	list;
+	/* task_struct for the client application, if going through a proxy */
+	struct task_struct	*task;
+};
+
+/* Context */
+static struct client_ctx {
+	/* Clients list */
+	struct mutex		clients_lock;
+	struct list_head	clients;
+	/* Clients waiting for their last cbuf to be released */
+	struct mutex		closing_clients_lock;
+	struct list_head	closing_clients;
+} client_ctx;
+
+/* Buffer shared with SWd at client level */
+struct cwsm {
+	/* Client this cbuf belongs to */
+	struct tee_client	*client;
+	/* Buffer info */
+	struct gp_shared_memory	memref;
+	/* MMU L2 table */
+	struct tee_mmu		*mmu;
+	/* Buffer SWd addr */
+	u32			sva;
+	/* Number of references kept to this object */
+	struct kref		kref;
+	/* The list entry for the client to list its WSMs */
+	struct list_head	list;
+};
+
+/*
+ * Contiguous buffer allocated to TLCs.
+ * These buffers are used as world shared memory (wsm) to share with
+ * secure world.
+ */
+struct cbuf {
+	/* Client this cbuf belongs to */
+	struct tee_client	*client;
+	/* List element for client's list of cbuf's */
+	struct list_head	list;
+	/* Number of references kept to this buffer */
+	struct kref		kref;
+	/* virtual Kernel start address */
+	uintptr_t		addr;
+	/* virtual Userspace start address */
+	uintptr_t		uaddr;
+	/* physical start address */
+	phys_addr_t		phys;
+	/* 2^order = number of pages allocated */
+	unsigned int		order;
+	/* Length of memory mapped to user */
+	u32			len;
+	/* Has been freed via the API */
+	bool			api_freed;
+};
+
+static inline void cbuf_get(struct cbuf *cbuf)
+{
+	kref_get(&cbuf->kref);
+}
+
+/* Must only be called by tee_cbuf_put */
+static void cbuf_release(struct kref *kref)
+{
+	struct cbuf *cbuf = container_of(kref, struct cbuf, kref);
+	struct tee_client *client = cbuf->client;
+
+	/* Unlist from client */
+	list_del_init(&cbuf->list);
+	/* Release client token */
+	client_put(client);
+	/* Free */
+	free_pages(cbuf->addr, cbuf->order);
+	mc_dev_devel("freed cbuf %p: client %p addr %lx uaddr %lx len %u",
+		     cbuf, client, cbuf->addr, cbuf->uaddr, cbuf->len);
+	kfree(cbuf);
+	/* Decrement debug counter */
+	atomic_dec(&g_ctx.c_cbufs);
+}
+
+void tee_cbuf_put(struct cbuf *cbuf)
+{
+	struct tee_client *client = cbuf->client;
+
+	mutex_lock(&client->cbufs_lock);
+	kref_put(&cbuf->kref, cbuf_release);
+	mutex_unlock(&client->cbufs_lock);
+}
+
+/*
+ * Map a kernel contiguous buffer to user space
+ */
+static int cbuf_map(struct vm_area_struct *vmarea, uintptr_t addr, u32 len,
+		    uintptr_t *uaddr)
+{
+	int ret;
+
+	if (!uaddr)
+		return -EINVAL;
+
+	if (!vmarea)
+		return -EINVAL;
+
+	if (!addr)
+		return -EINVAL;
+
+	if (len != (u32)(vmarea->vm_end - vmarea->vm_start)) {
+		ret = -EINVAL;
+		mc_dev_err(ret, "cbuf incompatible with vma");
+		return ret;
+	}
+
+	vmarea->vm_flags |= VM_IO;
+	ret = remap_pfn_range(vmarea, vmarea->vm_start,
+			      page_to_pfn(virt_to_page(addr)),
+			      vmarea->vm_end - vmarea->vm_start,
+			      vmarea->vm_page_prot);
+	if (ret) {
+		*uaddr = 0;
+		mc_dev_err(ret, "User mapping failed");
+		return ret;
+	}
+
+	*uaddr = vmarea->vm_start;
+	return 0;
+}
+
+/*
+ * Returns true if client is a kernel object.
+ */
+static inline bool client_is_kernel(struct tee_client *client)
+{
+	return !client->pid;
+}
+
+static struct cwsm *cwsm_create(struct tee_client *client,
+				struct tee_mmu *mmu,
+				const struct gp_shared_memory *memref,
+				struct gp_return *gp_ret)
+{
+	struct cwsm *cwsm;
+	u32 sva;
+	int ret;
+
+	cwsm = kzalloc(sizeof(*cwsm), GFP_KERNEL);
+	if (!cwsm)
+		return ERR_PTR(iwp_set_ret(-ENOMEM, gp_ret));
+
+	if (mmu) {
+		cwsm->mmu = mmu;
+		tee_mmu_get(cwsm->mmu);
+	} else {
+		struct mc_ioctl_buffer buf = {
+			.va = (uintptr_t)memref->buffer,
+			.len = memref->size,
+			.flags = memref->flags,
+		};
+
+		if (client_is_kernel(client)) {
+			cwsm->mmu = tee_mmu_create(NULL, &buf);
+		} else {
+			struct mm_struct *mm = get_task_mm(current);
+
+			if (!mm) {
+				ret = -EPERM;
+				mc_dev_err(ret, "can't get mm");
+				goto err_cwsm;
+			}
+
+			/* Build MMU table for buffer */
+			cwsm->mmu = tee_mmu_create(mm, &buf);
+			mmput(mm);
+		}
+
+		if (IS_ERR(cwsm->mmu)) {
+			ret = iwp_set_ret(PTR_ERR(cwsm->mmu), gp_ret);
+			goto err_cwsm;
+		}
+	}
+
+	ret = iwp_register_shared_mem(cwsm->mmu, &sva, gp_ret);
+	if (ret)
+		goto err_mmu;
+
+	/* Get a token on the client */
+	client_get(client);
+	cwsm->client = client;
+	memcpy(&cwsm->memref, memref, sizeof(cwsm->memref));
+	cwsm->sva = sva;
+	kref_init(&cwsm->kref);
+	INIT_LIST_HEAD(&cwsm->list);
+	/* Add buffer to list */
+	mutex_lock(&client->quick_lock);
+	list_add_tail(&cwsm->list, &client->cwsms);
+	mutex_unlock(&client->quick_lock);
+	mc_dev_devel("created cwsm %p: client %p sva %x", cwsm, client, sva);
+	/* Increment debug counter */
+	atomic_inc(&g_ctx.c_cwsms);
+	return cwsm;
+
+err_mmu:
+	tee_mmu_put(cwsm->mmu);
+err_cwsm:
+	kfree(cwsm);
+	return ERR_PTR(ret);
+}
+
+static inline void cwsm_get(struct cwsm *cwsm)
+{
+	kref_get(&cwsm->kref);
+}
+
+/* Must only be called by cwsm_put */
+static void cwsm_release(struct kref *kref)
+{
+	struct cwsm *cwsm = container_of(kref, struct cwsm, kref);
+	struct tee_client *client = cwsm->client;
+	struct mcp_buffer_map map;
+
+	/* Unlist from client */
+	list_del_init(&cwsm->list);
+	/* Unmap buffer from SWd (errors ignored) */
+	tee_mmu_buffer(cwsm->mmu, &map);
+	map.secure_va = cwsm->sva;
+	iwp_release_shared_mem(&map);
+	/* Release MMU */
+	tee_mmu_put(cwsm->mmu);
+	/* Release client token */
+	client_put(client);
+	/* Free */
+	mc_dev_devel("freed cwsm %p: client %p", cwsm, client);
+	kfree(cwsm);
+	/* Decrement debug counter */
+	atomic_dec(&g_ctx.c_cwsms);
+}
+
+static inline void cwsm_put(struct cwsm *cwsm)
+{
+	struct tee_client *client = cwsm->client;
+
+	mutex_lock(&client->quick_lock);
+	kref_put(&cwsm->kref, cwsm_release);
+	mutex_unlock(&client->quick_lock);
+}
+
+static inline struct cwsm *cwsm_find(struct tee_client *client,
+				     const struct gp_shared_memory *memref)
+{
+	struct cwsm *cwsm = NULL, *candidate;
+
+	mc_dev_devel("find shared mem for buf %llx size %llu flags %x",
+		     memref->buffer, memref->size, memref->flags);
+	mutex_lock(&client->quick_lock);
+	list_for_each_entry(candidate, &client->cwsms, list) {
+		mc_dev_devel("candidate buf %llx size %llu flags %x",
+			     candidate->memref.buffer, candidate->memref.size,
+			     candidate->memref.flags);
+		if (candidate->memref.buffer == memref->buffer &&
+		    candidate->memref.size == memref->size &&
+		    candidate->memref.flags == memref->flags) {
+			cwsm = candidate;
+			cwsm_get(cwsm);
+			mc_dev_devel("match");
+			break;
+		}
+	}
+	mutex_unlock(&client->quick_lock);
+	return cwsm;
+}
+
+static inline struct cwsm *cwsm_find_by_sva(struct tee_client *client, u32 sva)
+{
+	struct cwsm *cwsm = NULL, *candidate;
+
+	mutex_lock(&client->quick_lock);
+	list_for_each_entry(candidate, &client->cwsms, list)
+		if (candidate->sva == sva) {
+			cwsm = candidate;
+			cwsm_get(cwsm);
+			break;
+		}
+	mutex_unlock(&client->quick_lock);
+	return cwsm;
+}
+
+/*
+ * Returns the secure virtual address from a registered mem
+ */
+u32 client_get_cwsm_sva(struct tee_client *client,
+			const struct gp_shared_memory *memref)
+{
+	struct cwsm *cwsm = cwsm_find(client, memref);
+
+	if (!cwsm)
+		return 0;
+
+	mc_dev_devel("found sva %x", cwsm->sva);
+	return cwsm->sva;
+}
+
+void client_get(struct tee_client *client)
+{
+	kref_get(&client->kref);
+}
+
+void client_put_cwsm_sva(struct tee_client *client, u32 sva)
+{
+	struct cwsm *cwsm;
+
+	mutex_lock(&client->cwsm_release_lock);
+	cwsm = cwsm_find_by_sva(client, sva);
+	if (!cwsm)
+		goto end;
+
+	/* Release reference taken by cwsm_find_by_sva */
+	cwsm_put(cwsm);
+	cwsm_put(cwsm);
+end:
+	mutex_unlock(&client->cwsm_release_lock);
+}
+
+/*
+ * Allocate and initialize a client object
+ */
+struct tee_client *client_create(bool is_from_kernel)
+{
+	struct tee_client *client;
+
+	/* Allocate client structure */
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return NULL;
+
+	/* Increment debug counter */
+	atomic_inc(&g_ctx.c_clients);
+	/* initialize members */
+	client->pid = is_from_kernel ? 0 : current->pid;
+	memcpy(client->comm, current->comm, sizeof(client->comm));
+	kref_init(&client->kref);
+	INIT_LIST_HEAD(&client->cbufs);
+	mutex_init(&client->cbufs_lock);
+	INIT_LIST_HEAD(&client->sessions);
+	INIT_LIST_HEAD(&client->closing_sessions);
+	mutex_init(&client->sessions_lock);
+	INIT_LIST_HEAD(&client->list);
+	mutex_init(&client->quick_lock);
+	mutex_init(&client->cwsm_release_lock);
+	INIT_LIST_HEAD(&client->cwsms);
+	INIT_LIST_HEAD(&client->operations);
+	/* Add client to list of clients */
+	mutex_lock(&client_ctx.clients_lock);
+	list_add_tail(&client->list, &client_ctx.clients);
+	mutex_unlock(&client_ctx.clients_lock);
+	mc_dev_devel("created client %p", client);
+	return client;
+}
+
+/* Must only be called by client_put */
+static void client_release(struct kref *kref)
+{
+	struct tee_client *client;
+
+	client = container_of(kref, struct tee_client, kref);
+	/* Client is closed, remove from closing list */
+	list_del(&client->list);
+	mc_dev_devel("freed client %p", client);
+	if (client->task)
+		put_task_struct(client->task);
+
+	kfree(client);
+	/* Decrement debug counter */
+	atomic_dec(&g_ctx.c_clients);
+}
+
+int client_put(struct tee_client *client)
+{
+	int ret;
+
+	mutex_lock(&client_ctx.closing_clients_lock);
+	ret = kref_put(&client->kref, client_release);
+	mutex_unlock(&client_ctx.closing_clients_lock);
+	return ret;
+}
+
+/*
+ * Set client "closing" state, only if it contains no session.
+ * Once in "closing" state, system "close" can be called.
+ * Return: 0 if this state could be set.
+ */
+bool client_has_sessions(struct tee_client *client)
+{
+	bool ret;
+
+	/* Check for sessions */
+	mutex_lock(&client->sessions_lock);
+	ret = !list_empty(&client->sessions);
+	mutex_unlock(&client->sessions_lock);
+	mc_dev_devel("client %p, exit with %d", client, ret);
+	return ret;
+}
+
+static inline void client_put_session(struct tee_client *client,
+				      struct tee_session *session)
+{
+	/* Remove session from client's closing list */
+	mutex_lock(&client->sessions_lock);
+	list_del(&session->list);
+	mutex_unlock(&client->sessions_lock);
+	/* Release the ref we took on creation */
+	session_put(session);
+}
+
+/*
+ * At this point, nobody has access to the client anymore, so no new sessions
+ * are being created.
+ */
+static void client_close_sessions(struct tee_client *client)
+{
+	struct tee_session *session;
+
+	mutex_lock(&client->sessions_lock);
+	while (!list_empty(&client->sessions)) {
+		session = list_first_entry(&client->sessions,
+					   struct tee_session, list);
+
+		/* Move session to closing sessions list */
+		list_move(&session->list, &client->closing_sessions);
+		/* Call session_close without lock */
+		mutex_unlock(&client->sessions_lock);
+		if (!session_close(session))
+			client_put_session(client, session);
+		mutex_lock(&client->sessions_lock);
+	}
+
+	mutex_unlock(&client->sessions_lock);
+}
+
+/*
+ * At this point, nobody has access to the client anymore, so no new contiguous
+ * buffers are being created.
+ */
+static void client_close_kernel_cbufs(struct tee_client *client)
+{
+	/* Put buffers allocated and not freed via the kernel API */
+	if (!client_is_kernel(client))
+		return;
+
+	/* Look for cbufs that the client has not freed and put them */
+	while (true) {
+		struct cbuf *cbuf = NULL, *candidate;
+
+		mutex_lock(&client->cbufs_lock);
+		list_for_each_entry(candidate, &client->cbufs, list) {
+			if (!candidate->api_freed) {
+				candidate->api_freed = true;
+				cbuf = candidate;
+				break;
+			}
+		}
+		mutex_unlock(&client->cbufs_lock);
+
+		if (!cbuf)
+			break;
+
+		tee_cbuf_put(cbuf);
+	}
+}
+
+/* Client is closing: make sure all CSMs are gone */
+static void client_release_cwsms(struct tee_client *client)
+{
+	/* Look for cbufs that the client has not freed and put them */
+	while (!list_empty(&client->cwsms)) {
+		struct cwsm *cwsm;
+
+		cwsm = list_first_entry(&client->cwsms, struct cwsm, list);
+		cwsm_put(cwsm);
+	}
+}
+
+/* Client is closing: make sure all cancelled operations are gone */
+static void client_release_gp_operations(struct tee_client *client)
+{
+	struct client_gp_operation *op, *nop;
+
+	mutex_lock(&client->quick_lock);
+	list_for_each_entry_safe(op, nop, &client->operations, list) {
+		/* Only cancelled operations are kzalloc'd */
+		mc_dev_devel("flush cancelled operation %p for started %llu",
+			     op, op->started);
+		if (op->cancelled)
+			kfree(op);
+	}
+	mutex_unlock(&client->quick_lock);
+}
+
+/*
+ * Release a client and the session+cbuf objects it contains.
+ * @param client_t client
+ * @return driver error code
+ */
+void client_close(struct tee_client *client)
+{
+	/* Move client from active clients to closing clients for debug */
+	mutex_lock(&client_ctx.clients_lock);
+	mutex_lock(&client_ctx.closing_clients_lock);
+	list_move(&client->list, &client_ctx.closing_clients);
+	mutex_unlock(&client_ctx.closing_clients_lock);
+	mutex_unlock(&client_ctx.clients_lock);
+	client_close_kernel_cbufs(client);
+	/* Close all remaining sessions */
+	client_close_sessions(client);
+	/* Release all cwsms, no need to lock as sessions are closed */
+	client_release_cwsms(client);
+	client_release_gp_operations(client);
+	client_put(client);
+	mc_dev_devel("client %p closed", client);
+}
+
+/*
+ * Clean all structures shared with the SWd (note: incomplete but unused)
+ */
+void client_cleanup(void)
+{
+	struct tee_client *client;
+
+	mutex_lock(&client_ctx.clients_lock);
+	list_for_each_entry(client, &client_ctx.clients, list) {
+		mutex_lock(&client->sessions_lock);
+		while (!list_empty(&client->sessions)) {
+			struct tee_session *session;
+
+			session = list_first_entry(&client->sessions,
+						   struct tee_session, list);
+			list_del(&session->list);
+			session_mc_cleanup_session(session);
+		}
+		mutex_unlock(&client->sessions_lock);
+	}
+	mutex_unlock(&client_ctx.clients_lock);
+}
+
+/*
+ * Open TA for given client. TA binary is provided by the daemon.
+ * @param
+ * @return driver error code
+ */
+int client_mc_open_session(struct tee_client *client,
+			   const struct mc_uuid_t *uuid,
+			   uintptr_t tci_va, size_t tci_len, u32 *session_id)
+{
+	struct mcp_open_info info = {
+		.type = TEE_MC_UUID,
+		.uuid = uuid,
+		.tci_va = tci_va,
+		.tci_len = tci_len,
+		.user = !client_is_kernel(client),
+	};
+	int ret;
+
+	ret = client_mc_open_common(client, &info, session_id);
+	mc_dev_devel("session %x, exit with %d", *session_id, ret);
+	return ret;
+}
+
+/*
+ * Open TA for given client. TA binary is provided by the client.
+ * @param
+ * @return driver error code
+ */
+int client_mc_open_trustlet(struct tee_client *client,
+			    u32 spid, uintptr_t ta_va, size_t ta_len,
+			    uintptr_t tci_va, size_t tci_len, u32 *session_id)
+{
+	struct mcp_open_info info = {
+		.type = TEE_MC_TA,
+		.spid = spid,
+		.va = ta_va,
+		.len = ta_len,
+		.tci_va = tci_va,
+		.tci_len = tci_len,
+		.user = !client_is_kernel(client),
+	};
+	int ret;
+
+	ret = client_mc_open_common(client, &info, session_id);
+	mc_dev_devel("session %x, exit with %d", *session_id, ret);
+	return ret;
+}
+
+/*
+ * Opens a TA and add corresponding session object to given client
+ * return: driver error code
+ */
+int client_mc_open_common(struct tee_client *client, struct mcp_open_info *info,
+			  u32 *session_id)
+{
+	struct tee_session *session = NULL;
+	int ret = 0;
+
+	/*
+	 * Create session object with temp sid=0 BEFORE session is started,
+	 * otherwise if a GP TA is started and NWd session object allocation
+	 * fails, we cannot handle the potentially delayed GP closing.
+	 * Adding session to list must be done AFTER it is started (once we have
+	 * sid), therefore it cannot be done within session_create().
+	 */
+	session = session_create(client, NULL);
+	if (IS_ERR(session))
+		return PTR_ERR(session);
+
+	ret = session_mc_open_session(session, info);
+	if (ret)
+		goto err;
+
+	mutex_lock(&client->sessions_lock);
+	/* Add session to client */
+	list_add_tail(&session->list, &client->sessions);
+	/* Set session ID returned by SWd */
+	*session_id = session->mcp_session.sid;
+	mutex_unlock(&client->sessions_lock);
+
+err:
+	/* Close or free session on error */
+	if (ret == -ENODEV) {
+		/* The session must enter the closing process... */
+		list_add_tail(&session->list, &client->closing_sessions);
+		if (!session_close(session))
+			client_put_session(client, session);
+	} else if (ret) {
+		session_put(session);
+	}
+
+	return ret;
+}
+
+/*
+ * Remove a session object from client and close corresponding TA
+ * Return: true if session was found and closed
+ */
+int client_remove_session(struct tee_client *client, u32 session_id)
+{
+	struct tee_session *session = NULL, *candidate;
+	int ret;
+
+	/* Move session from main list to closing list */
+	mutex_lock(&client->sessions_lock);
+	list_for_each_entry(candidate, &client->sessions, list) {
+		if (candidate->mcp_session.sid == session_id) {
+			session = candidate;
+			list_move(&session->list, &client->closing_sessions);
+			break;
+		}
+	}
+
+	mutex_unlock(&client->sessions_lock);
+	if (!session)
+		return -ENXIO;
+
+	/* Close session */
+	ret = session_close(session);
+	if (!ret)
+		client_put_session(client, session);
+
+	return ret;
+}
+
+/*
+ * Find a session object and increment its reference counter.
+ * Object cannot be freed until its counter reaches 0.
+ * return: pointer to the object, NULL if not found.
+ */
+static struct tee_session *client_get_session(struct tee_client *client,
+					      u32 session_id)
+{
+	struct tee_session *session = NULL, *candidate;
+
+	mutex_lock(&client->sessions_lock);
+	list_for_each_entry(candidate, &client->sessions, list) {
+		if (candidate->mcp_session.sid == session_id) {
+			session = candidate;
+			session_get(session);
+			break;
+		}
+	}
+
+	mutex_unlock(&client->sessions_lock);
+	if (!session)
+		mc_dev_err(-ENXIO, "session %x not found", session_id);
+
+	return session;
+}
+
+/*
+ * Send a notification to TA
+ * @return driver error code
+ */
+int client_notify_session(struct tee_client *client, u32 session_id)
+{
+	struct tee_session *session;
+	int ret;
+
+	/* Find/get session */
+	session = client_get_session(client, session_id);
+	if (!session)
+		return -ENXIO;
+
+	/* Send command to SWd */
+	ret = session_mc_notify(session);
+	/* Put session */
+	session_put(session);
+	mc_dev_devel("session %x, exit with %d", session_id, ret);
+	return ret;
+}
+
+/*
+ * Wait for a notification from TA
+ * @return driver error code
+ */
+int client_waitnotif_session(struct tee_client *client, u32 session_id,
+			     s32 timeout, bool silent_expiry)
+{
+	struct tee_session *session;
+	int ret;
+
+	/* Find/get session */
+	session = client_get_session(client, session_id);
+	if (!session)
+		return -ENXIO;
+
+	ret = session_mc_wait(session, timeout, silent_expiry);
+	/* Put session */
+	session_put(session);
+	mc_dev_devel("session %x, exit with %d", session_id, ret);
+	return ret;
+}
+
+/*
+ * Read session exit/termination code
+ */
+int client_get_session_exitcode(struct tee_client *client, u32 session_id,
+				s32 *err)
+{
+	struct tee_session *session;
+	int ret;
+
+	/* Find/get session */
+	session = client_get_session(client, session_id);
+	if (!session)
+		return -ENXIO;
+
+	/* Retrieve error */
+	ret = session_mc_get_err(session, err);
+	/* Put session */
+	session_put(session);
+	mc_dev_devel("session %x, exit code %d", session_id, *err);
+	return ret;
+}
+
+/* Share a buffer with given TA in SWd */
+int client_mc_map(struct tee_client *client, u32 session_id,
+		  struct tee_mmu *mmu, struct mc_ioctl_buffer *buf)
+{
+	struct tee_session *session;
+	int ret;
+
+	/* Find/get session */
+	session = client_get_session(client, session_id);
+	if (!session)
+		return -ENXIO;
+
+	/* Add buffer to the session */
+	ret = session_mc_map(session, mmu, buf);
+	/* Put session */
+	session_put(session);
+	mc_dev_devel("session %x, exit with %d", session_id, ret);
+	return ret;
+}
+
+/* Stop sharing a buffer with SWd */
+int client_mc_unmap(struct tee_client *client, u32 session_id,
+		    const struct mc_ioctl_buffer *buf)
+{
+	struct tee_session *session;
+	int ret;
+
+	/* Find/get session */
+	session = client_get_session(client, session_id);
+	if (!session)
+		return -ENXIO;
+
+	/* Remove buffer from session */
+	ret = session_mc_unmap(session, buf);
+	/* Put session */
+	session_put(session);
+	mc_dev_devel("session %x, exit with %d", session_id, ret);
+	return ret;
+}
+
+int client_gp_initialize_context(struct tee_client *client,
+				 struct gp_return *gp_ret)
+{
+	return iwp_set_ret(0, gp_ret);
+}
+
+int client_gp_register_shared_mem(struct tee_client *client,
+				  struct tee_mmu *mmu, u32 *sva,
+				  const struct gp_shared_memory *memref,
+				  struct gp_return *gp_ret)
+{
+	struct cwsm *cwsm = NULL;
+
+	if (memref->size > BUFFER_LENGTH_MAX) {
+		mc_dev_err(-EINVAL, "buffer size %llu too big", memref->size);
+		return -EINVAL;
+	}
+
+	if (!mmu)
+		/* cwsm_find automatically takes a reference */
+		cwsm = cwsm_find(client, memref);
+
+	if (!cwsm)
+		cwsm = cwsm_create(client, mmu, memref, gp_ret);
+
+	/* gp_ret set by callee */
+	if (IS_ERR(cwsm))
+		return PTR_ERR(cwsm);
+
+	if (sva)
+		*sva = cwsm->sva;
+
+	return iwp_set_ret(0, gp_ret);
+}
+
+int client_gp_release_shared_mem(struct tee_client *client,
+				 const struct gp_shared_memory *memref)
+{
+	struct cwsm *cwsm;
+	int ret = 0;
+
+	mutex_lock(&client->cwsm_release_lock);
+	cwsm = cwsm_find(client, memref);
+	if (!cwsm) {
+		ret = -ENOENT;
+		goto end;
+	}
+
+	/* Release reference taken by cwsm_find */
+	cwsm_put(cwsm);
+	cwsm_put(cwsm);
+end:
+	mutex_unlock(&client->cwsm_release_lock);
+	return ret;
+}
+
+/*
+ * Opens a TA and add corresponding session object to given client
+ * return: driver error code
+ */
+int client_gp_open_session(struct tee_client *client,
+			   const struct mc_uuid_t *uuid,
+			   struct gp_operation *operation,
+			   const struct mc_identity *identity,
+			   struct gp_return *gp_ret,
+			   u32 *session_id)
+{
+	struct tee_session *session = NULL;
+	int ret = 0;
+
+	/*
+	 * Create session object with temp sid=0 BEFORE session is started,
+	 * otherwise if a GP TA is started and NWd session object allocation
+	 * fails, we cannot handle the potentially delayed GP closing.
+	 * Adding session to list must be done AFTER it is started (once we have
+	 * sid), therefore it cannot be done within session_create().
+	 */
+	session = session_create(client, identity);
+	if (IS_ERR(session))
+		return iwp_set_ret(PTR_ERR(session), gp_ret);
+
+	/* Open session */
+	ret = session_gp_open_session(session, uuid, operation, gp_ret);
+	if (ret)
+		goto end;
+
+	mutex_lock(&client->sessions_lock);
+	/* Add session to client */
+	list_add_tail(&session->list, &client->sessions);
+	mutex_unlock(&client->sessions_lock);
+	/* Set sid returned by SWd */
+	*session_id = session->iwp_session.sid;
+
+end:
+	if (ret)
+		session_put(session);
+
+	mc_dev_devel("gp session %x, exit with %d", *session_id, ret);
+	return ret;
+}
+
+int client_gp_open_session_domu(struct tee_client *client,
+				const struct mc_uuid_t *uuid, u64 started,
+				struct interworld_session *iws,
+				struct tee_mmu **mmus,
+				struct gp_return *gp_ret)
+{
+	struct tee_session *session = NULL;
+	int ret = 0;
+
+	/* Don't pass NULL for identity as it would make a MC session */
+	session = session_create(client, ERR_PTR(-ENOENT));
+	if (IS_ERR(session))
+		return iwp_set_ret(PTR_ERR(session), gp_ret);
+
+	/* Open session */
+	ret = session_gp_open_session_domu(session, uuid, started, iws,
+					   mmus, gp_ret);
+	if (ret)
+		goto end;
+
+	mutex_lock(&client->sessions_lock);
+	/* Add session to client */
+	list_add_tail(&session->list, &client->sessions);
+	mutex_unlock(&client->sessions_lock);
+
+end:
+	if (ret)
+		session_put(session);
+
+	mc_dev_devel("gp session %x, exit with %d",
+		     session->iwp_session.sid, ret);
+	return ret;
+}
+
+int client_gp_close_session(struct tee_client *client, u32 session_id)
+{
+	struct tee_session *session = NULL, *candidate;
+	int ret = 0;
+
+	/* Move session from main list to closing list */
+	mutex_lock(&client->sessions_lock);
+	list_for_each_entry(candidate, &client->sessions, list) {
+		if (candidate->iwp_session.sid == session_id) {
+			session = candidate;
+			list_move(&session->list, &client->closing_sessions);
+			break;
+		}
+	}
+
+	mutex_unlock(&client->sessions_lock);
+	if (!session)
+		return -ENXIO;
+
+	ret = session_close(session);
+	if (!ret)
+		client_put_session(client, session);
+
+	return ret;
+}
+
+/*
+ * Send a command to the TA
+ * @param
+ * @return driver error code
+ */
+int client_gp_invoke_command(struct tee_client *client, u32 session_id,
+			     u32 command_id,
+			     struct gp_operation *operation,
+			     struct gp_return *gp_ret)
+{
+	struct tee_session *session;
+	int ret = 0;
+
+	session = client_get_session(client, session_id);
+	if (!session)
+		return iwp_set_ret(-ENXIO, gp_ret);
+
+	ret = session_gp_invoke_command(session, command_id, operation, gp_ret);
+
+	/* Put session */
+	session_put(session);
+	return ret;
+}
+
+int client_gp_invoke_command_domu(struct tee_client *client, u32 session_id,
+				  u64 started, struct interworld_session *iws,
+				  struct tee_mmu **mmus,
+				  struct gp_return *gp_ret)
+{
+	struct tee_session *session;
+	int ret = 0;
+
+	session = client_get_session(client, session_id);
+	if (!session)
+		return iwp_set_ret(-ENXIO, gp_ret);
+
+	ret = session_gp_invoke_command_domu(session, started, iws, mmus,
+					     gp_ret);
+
+	/* Put session */
+	session_put(session);
+	return ret;
+}
+
+void client_gp_request_cancellation(struct tee_client *client, u64 started)
+{
+	struct client_gp_operation *op;
+	u64 slot;
+	bool found = false;
+
+	/* Look for operation */
+	mutex_lock(&client->quick_lock);
+	list_for_each_entry(op, &client->operations, list)
+		if (op->started == started) {
+			slot = op->slot;
+			found = true;
+			mc_dev_devel(
+				"found no operation cancel for started %llu",
+				started);
+			break;
+		}
+
+	/* Operation not found: assume it is coming */
+	if (!found) {
+		op = kzalloc(sizeof(*op), GFP_KERNEL);
+		if (op) {
+			op->started = started;
+			op->cancelled = true;
+			list_add_tail(&op->list, &client->operations);
+			mc_dev_devel(
+				"add cancelled operation %p for started %llu",
+				op, op->started);
+		}
+	}
+	mutex_unlock(&client->quick_lock);
+
+	if (found)
+		session_gp_request_cancellation(slot);
+}
+
+/*
+ * This callback is called on remap
+ */
+static void cbuf_vm_open(struct vm_area_struct *vmarea)
+{
+	struct cbuf *cbuf = vmarea->vm_private_data;
+
+	cbuf_get(cbuf);
+}
+
+/*
+ * This callback is called on unmap
+ */
+static void cbuf_vm_close(struct vm_area_struct *vmarea)
+{
+	struct cbuf *cbuf = vmarea->vm_private_data;
+
+	tee_cbuf_put(cbuf);
+}
+
+static const struct vm_operations_struct cbuf_vm_ops = {
+	.open = cbuf_vm_open,
+	.close = cbuf_vm_close,
+};
+
+/*
+ * Create a cbuf object and add it to client
+ */
+int client_cbuf_create(struct tee_client *client, u32 len, uintptr_t *addr,
+		       struct vm_area_struct *vmarea)
+{
+	struct cbuf *cbuf = NULL;
+	unsigned int order;
+	int ret = 0;
+
+	if (!client)
+		return -EINVAL;
+
+	if (!len) {
+		mc_dev_err(-EINVAL, "buffer size 0 not supported");
+		return -EINVAL;
+	}
+
+	if (len > BUFFER_LENGTH_MAX) {
+		mc_dev_err(-EINVAL, "buffer size %u too big", len);
+		return -EINVAL;
+	}
+
+	order = get_order(len);
+	if (order > MAX_ORDER) {
+		ret = -ENOMEM;
+		mc_dev_err(ret, "Buffer size too large");
+		return ret;
+	}
+
+	/* Allocate buffer descriptor structure */
+	cbuf = kzalloc(sizeof(*cbuf), GFP_KERNEL);
+	if (!cbuf)
+		return -ENOMEM;
+
+	/* Increment debug counter */
+	atomic_inc(&g_ctx.c_cbufs);
+	/* Allocate buffer */
+	cbuf->addr = __get_free_pages(GFP_USER | __GFP_ZERO, order);
+	if (!cbuf->addr) {
+		kfree(cbuf);
+		/* Decrement debug counter */
+		atomic_dec(&g_ctx.c_cbufs);
+		return -ENOMEM;
+	}
+
+	/* Map to user space if applicable */
+	if (!client_is_kernel(client)) {
+		ret = cbuf_map(vmarea, cbuf->addr, len, &cbuf->uaddr);
+		if (ret) {
+			free_pages(cbuf->addr, order);
+			kfree(cbuf);
+			/* Decrement debug counter */
+			atomic_dec(&g_ctx.c_cbufs);
+			return ret;
+		}
+	}
+
+	/* Init descriptor members */
+	cbuf->client = client;
+	cbuf->phys = virt_to_phys((void *)cbuf->addr);
+	cbuf->len = len;
+	cbuf->order = order;
+	kref_init(&cbuf->kref);
+	INIT_LIST_HEAD(&cbuf->list);
+
+	/* Keep cbuf in VMA private data for refcounting (user-space clients) */
+	if (vmarea) {
+		vmarea->vm_private_data = cbuf;
+		vmarea->vm_ops = &cbuf_vm_ops;
+	}
+
+	/* Fill return parameter for k-api */
+	if (addr)
+		*addr = cbuf->addr;
+
+	/* Get a token on the client */
+	client_get(client);
+
+	/* Add buffer to list */
+	mutex_lock(&client->cbufs_lock);
+	list_add_tail(&cbuf->list, &client->cbufs);
+	mutex_unlock(&client->cbufs_lock);
+	mc_dev_devel("created cbuf %p: client %p addr %lx uaddr %lx len %u",
+		     cbuf, client, cbuf->addr, cbuf->uaddr, cbuf->len);
+	return ret;
+}
+
+/*
+ * Find a contiguous buffer (cbuf) in the cbuf list of given client that
+ * contains given address and take a reference on it.
+ * Return pointer to the object, or NULL if not found.
+ */
+static struct cbuf *cbuf_get_by_addr(struct tee_client *client, uintptr_t addr)
+{
+	struct cbuf *cbuf = NULL, *candidate;
+	bool is_kernel = client_is_kernel(client);
+
+	mutex_lock(&client->cbufs_lock);
+	list_for_each_entry(candidate, &client->cbufs, list) {
+		/* Compare to kernel VA or user VA depending on client type */
+		uintptr_t start = is_kernel ?
+			candidate->addr : candidate->uaddr;
+		uintptr_t end = start + candidate->len;
+
+		/* Check that (user) cbuf has not been unmapped */
+		if (!start)
+			break;
+
+		if (addr >= start && addr < end) {
+			cbuf = candidate;
+			break;
+		}
+	}
+
+	if (cbuf)
+		cbuf_get(cbuf);
+
+	mutex_unlock(&client->cbufs_lock);
+	return cbuf;
+}
+
+/*
+ * Remove a cbuf object from client, and mark it for freeing.
+ * Freeing will happen once all current references are released.
+ *
+ * Note: this function could be subject to the same race condition as
+ * client_gp_release_shared_mem() and client_put_cwsm_sva(), but it is trusted
+ * as it can only be called by kernel drivers. So no lock around
+ * cbuf_get_by_addr() and the two tee_cbuf_put().
+ */
+int client_cbuf_free(struct tee_client *client, uintptr_t addr)
+{
+	struct cbuf *cbuf = cbuf_get_by_addr(client, addr);
+
+	if (!cbuf) {
+		mc_dev_err(-EINVAL, "cbuf %lu not found", addr);
+		return -EINVAL;
+	}
+
+	/* Release reference taken by cbuf_get_by_addr */
+	tee_cbuf_put(cbuf);
+	mutex_lock(&client->cbufs_lock);
+	cbuf->api_freed = true;
+	mutex_unlock(&client->cbufs_lock);
+	tee_cbuf_put(cbuf);
+	return 0;
+}
+
+bool client_gp_operation_add(struct tee_client *client,
+			     struct client_gp_operation *operation)
+{
+	struct client_gp_operation *op;
+	bool found = false;
+
+	mutex_lock(&client->quick_lock);
+	list_for_each_entry(op, &client->operations, list)
+		if (op->started == operation->started && op->cancelled) {
+			found = true;
+			break;
+		}
+
+	if (found) {
+		list_del(&op->list);
+		mc_dev_devel("found cancelled operation %p for started %llu",
+			     op, op->started);
+		kfree(op);
+	} else {
+		list_add_tail(&operation->list, &client->operations);
+		mc_dev_devel("add operation for started %llu",
+			     operation->started);
+	}
+	mutex_unlock(&client->quick_lock);
+	return !found;
+}
+
+void client_gp_operation_remove(struct tee_client *client,
+				struct client_gp_operation *operation)
+{
+	mutex_lock(&client->quick_lock);
+	list_del(&operation->list);
+	mutex_unlock(&client->quick_lock);
+}
+
+struct tee_mmu *client_mmu_create(struct tee_client *client,
+				  const struct mc_ioctl_buffer *buf_in,
+				  struct cbuf **cbuf_p)
+{
+	/* Check if buffer is contained in a cbuf */
+	struct mc_ioctl_buffer buf = *buf_in;
+	struct cbuf *cbuf = cbuf_get_by_addr(client, buf.va);
+	struct mm_struct *mm = NULL;
+	struct tee_mmu *mmu;
+
+	*cbuf_p = cbuf;
+	if (cbuf) {
+		uintptr_t offset;
+
+		if (client_is_kernel(client)) {
+			offset = buf.va - cbuf->addr;
+		} else {
+			offset = buf.va - cbuf->uaddr;
+			/* Update va to point to kernel address */
+			buf.va = cbuf->addr + offset;
+		}
+
+		if ((offset + buf.len) > cbuf->len) {
+			mc_dev_err(-EINVAL, "crosses cbuf boundary");
+			tee_cbuf_put(cbuf);
+			return ERR_PTR(-EINVAL);
+		}
+	} else if (!client_is_kernel(client)) {
+		mm = get_task_mm(current);
+		if (!mm) {
+			mc_dev_err(-EPERM, "can't get mm");
+			return ERR_PTR(-EPERM);
+		}
+	}
+
+	/* Build MMU table for buffer */
+	mmu = tee_mmu_create(mm, &buf);
+	if (mm)
+		mmput(mm);
+
+	if (IS_ERR_OR_NULL(mmu) && cbuf)
+		tee_cbuf_put(cbuf);
+
+	return mmu;
+}
+
+void client_init(void)
+{
+	INIT_LIST_HEAD(&client_ctx.clients);
+	mutex_init(&client_ctx.clients_lock);
+
+	INIT_LIST_HEAD(&client_ctx.closing_clients);
+	mutex_init(&client_ctx.closing_clients_lock);
+}
+
+static inline int cbuf_debug_structs(struct kasnprintf_buf *buf,
+				     struct cbuf *cbuf)
+{
+	return kasnprintf(buf,
+			  "\tcbuf %pK [%d]: addr %pK uaddr %pK len %u\n",
+			  cbuf, kref_read(&cbuf->kref), (void *)cbuf->addr,
+			  (void *)cbuf->uaddr, cbuf->len);
+}
+
+static inline int cwsm_debug_structs(struct kasnprintf_buf *buf,
+				     struct cwsm *cwsm)
+{
+	return kasnprintf(buf,
+			  "\tcwsm %pK [%d]: buf %pK len %llu flags 0x%x\n",
+			  cwsm, kref_read(&cwsm->kref),
+			  (void *)(uintptr_t)cwsm->memref.buffer,
+			  cwsm->memref.size, cwsm->memref.flags);
+}
+
+static int client_debug_structs(struct kasnprintf_buf *buf,
+				struct tee_client *client, bool is_closing)
+{
+	struct cbuf *cbuf;
+	struct cwsm *cwsm;
+	struct tee_session *session;
+	int ret;
+
+	if (client->pid)
+		ret = kasnprintf(buf, "client %pK [%d]: %s (%d)%s\n",
+				 client, kref_read(&client->kref),
+				 client->comm, client->pid,
+				 is_closing ? " <closing>" : "");
+	else
+		ret = kasnprintf(buf, "client %pK [%d]: [kernel]%s\n",
+				 client, kref_read(&client->kref),
+				 is_closing ? " <closing>" : "");
+
+	if (ret < 0)
+		return ret;
+
+	/* Buffers */
+	mutex_lock(&client->cbufs_lock);
+	if (list_empty(&client->cbufs))
+		goto done_cbufs;
+
+	list_for_each_entry(cbuf, &client->cbufs, list) {
+		ret = cbuf_debug_structs(buf, cbuf);
+		if (ret < 0)
+			goto done_cbufs;
+	}
+
+done_cbufs:
+	mutex_unlock(&client->cbufs_lock);
+	if (ret < 0)
+		return ret;
+
+	/* WMSs */
+	mutex_lock(&client->quick_lock);
+	if (list_empty(&client->cwsms))
+		goto done_cwsms;
+
+	list_for_each_entry(cwsm, &client->cwsms, list) {
+		ret = cwsm_debug_structs(buf, cwsm);
+		if (ret < 0)
+			goto done_cwsms;
+	}
+
+done_cwsms:
+	mutex_unlock(&client->quick_lock);
+	if (ret < 0)
+		return ret;
+
+	/* Sessions */
+	mutex_lock(&client->sessions_lock);
+	list_for_each_entry(session, &client->sessions, list) {
+		ret = session_debug_structs(buf, session, false);
+		if (ret < 0)
+			goto done_sessions;
+	}
+
+	list_for_each_entry(session, &client->closing_sessions, list) {
+		ret = session_debug_structs(buf, session, true);
+		if (ret < 0)
+			goto done_sessions;
+	}
+
+done_sessions:
+	mutex_unlock(&client->sessions_lock);
+
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+int clients_debug_structs(struct kasnprintf_buf *buf)
+{
+	struct tee_client *client;
+	ssize_t ret = 0;
+
+	mutex_lock(&client_ctx.clients_lock);
+	list_for_each_entry(client, &client_ctx.clients, list) {
+		ret = client_debug_structs(buf, client, false);
+		if (ret < 0)
+			break;
+	}
+	mutex_unlock(&client_ctx.clients_lock);
+
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&client_ctx.closing_clients_lock);
+	list_for_each_entry(client, &client_ctx.closing_clients, list) {
+		ret = client_debug_structs(buf, client, true);
+		if (ret < 0)
+			break;
+	}
+	mutex_unlock(&client_ctx.closing_clients_lock);
+
+	return ret;
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/client.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/client.h
new file mode 100644
index 0000000..43867fd
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/client.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _CLIENT_H_
+#define _CLIENT_H_
+
+#include <linux/list.h>
+#include <linux/sched.h>	/* TASK_COMM_LEN */
+
+#include "public/mc_user.h"	/* many types */
+
+struct tee_client;
+struct mcp_open_info;
+struct tee_mmu;
+struct interworld_session;
+
+/* Client */
+struct tee_client *client_create(bool is_from_kernel);
+void client_get(struct tee_client *client);
+int client_put(struct tee_client *client);
+bool client_has_sessions(struct tee_client *client);
+void client_close(struct tee_client *client);
+void client_cleanup(void);
+
+/* MC */
+int client_mc_open_session(struct tee_client *client,
+			   const struct mc_uuid_t *uuid,
+			   uintptr_t tci_va, size_t tci_len, u32 *session_id);
+int client_mc_open_trustlet(struct tee_client *client,
+			    u32 spid, uintptr_t ta_va, size_t ta_len,
+			    uintptr_t tci_va, size_t tci_len, u32 *session_id);
+int client_mc_open_common(struct tee_client *client, struct mcp_open_info *info,
+			  u32 *session_id);
+int client_remove_session(struct tee_client *client, u32 session_id);
+int client_notify_session(struct tee_client *client, u32 session_id);
+int client_waitnotif_session(struct tee_client *client, u32 session_id,
+			     s32 timeout, bool silent_expiry);
+int client_get_session_exitcode(struct tee_client *client, u32 session_id,
+				s32 *exit_code);
+int client_mc_map(struct tee_client *client, u32 session_id,
+		  struct tee_mmu *mmu, struct mc_ioctl_buffer *buf);
+int client_mc_unmap(struct tee_client *client, u32 session_id,
+		    const struct mc_ioctl_buffer *buf);
+
+/* GP */
+int client_gp_initialize_context(struct tee_client *client,
+				 struct gp_return *gp_ret);
+int client_gp_register_shared_mem(struct tee_client *client,
+				  struct tee_mmu *mmu, u32 *sva,
+				  const struct gp_shared_memory *memref,
+				  struct gp_return *gp_ret);
+int client_gp_release_shared_mem(struct tee_client *client,
+				 const struct gp_shared_memory *memref);
+int client_gp_open_session(struct tee_client *client,
+			   const struct mc_uuid_t *uuid,
+			   struct gp_operation *operation,
+			   const struct mc_identity *identity,
+			   struct gp_return *gp_ret,
+			   u32 *session_id);
+int client_gp_open_session_domu(struct tee_client *client,
+				const struct mc_uuid_t *uuid, u64 started,
+				struct interworld_session *iws,
+				struct tee_mmu **mmus,
+				struct gp_return *gp_ret);
+int client_gp_close_session(struct tee_client *client, u32 session_id);
+int client_gp_invoke_command(struct tee_client *client, u32 session_id,
+			     u32 command_id,
+			     struct gp_operation *operation,
+			     struct gp_return *gp_ret);
+int client_gp_invoke_command_domu(struct tee_client *client, u32 session_id,
+				  u64 started, struct interworld_session *iws,
+				  struct tee_mmu **mmus,
+				  struct gp_return *gp_ret);
+void client_gp_request_cancellation(struct tee_client *client, u64 started);
+
+/* Contiguous buffer */
+int client_cbuf_create(struct tee_client *client, u32 len, uintptr_t *addr,
+		       struct vm_area_struct *vmarea);
+int client_cbuf_free(struct tee_client *client, uintptr_t addr);
+
+/* GP internal */
+struct client_gp_operation {
+	struct list_head	list;
+	u64			started;
+	u64			slot;
+	int			cancelled;
+};
+
+/* Called from session when a new operation starts/ends */
+bool client_gp_operation_add(struct tee_client *client,
+			     struct client_gp_operation *operation);
+void client_gp_operation_remove(struct tee_client *client,
+				struct client_gp_operation *operation);
+
+/* MMU */
+struct cbuf;
+
+struct tee_mmu *client_mmu_create(struct tee_client *client,
+				  const struct mc_ioctl_buffer *buf_in,
+				  struct cbuf **cbuf_p);
+void tee_cbuf_put(struct cbuf *cbuf);
+
+/* Buffer shared with SWd at client level */
+u32 client_get_cwsm_sva(struct tee_client *client,
+			const struct gp_shared_memory *memref);
+void client_put_cwsm_sva(struct tee_client *client, u32 sva);
+
+/* Global */
+void client_init(void);
+
+/* Debug */
+int clients_debug_structs(struct kasnprintf_buf *buf);
+
+#endif /* _CLIENT_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/clientlib.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/clientlib.c
new file mode 100644
index 0000000..29a79a9
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/clientlib.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+
+#include "public/mc_user.h"
+#include "public/mc_admin.h"
+#include "public/mobicore_driver_api.h"
+
+#include "main.h"
+#include "client.h"
+
+static enum mc_result convert(int err)
+{
+	switch (-err) {
+	case 0:
+		return MC_DRV_OK;
+	case ENOMSG:
+		return MC_DRV_NO_NOTIFICATION;
+	case EBADMSG:
+		return MC_DRV_ERR_NOTIFICATION;
+	case EAGAIN:
+		return MC_DRV_ERR_OUT_OF_RESOURCES;
+	case EHOSTDOWN:
+		return MC_DRV_ERR_INIT;
+	case ENODEV:
+		return MC_DRV_ERR_UNKNOWN_DEVICE;
+	case ENXIO:
+		return MC_DRV_ERR_UNKNOWN_SESSION;
+	case EPERM:
+		return MC_DRV_ERR_INVALID_OPERATION;
+	case EBADE:
+		return MC_DRV_ERR_INVALID_RESPONSE;
+	case ETIME:
+		return MC_DRV_ERR_TIMEOUT;
+	case ENOMEM:
+		return MC_DRV_ERR_NO_FREE_MEMORY;
+	case EUCLEAN:
+		return MC_DRV_ERR_FREE_MEMORY_FAILED;
+	case ENOTEMPTY:
+		return MC_DRV_ERR_SESSION_PENDING;
+	case EHOSTUNREACH:
+		return MC_DRV_ERR_DAEMON_UNREACHABLE;
+	case ENOENT:
+		return MC_DRV_ERR_INVALID_DEVICE_FILE;
+	case EINVAL:
+		return MC_DRV_ERR_INVALID_PARAMETER;
+	case EPROTO:
+		return MC_DRV_ERR_KERNEL_MODULE;
+	case ECOMM:
+		return MC_DRV_INFO_NOTIFICATION;
+	case EUNATCH:
+		return MC_DRV_ERR_NQ_FAILED;
+	case ERESTARTSYS:
+		return MC_DRV_ERR_INTERRUPTED_BY_SIGNAL;
+	default:
+		mc_dev_devel("error is %d", err);
+		return MC_DRV_ERR_UNKNOWN;
+	}
+}
+
+static inline bool is_valid_device(u32 device_id)
+{
+	return device_id == MC_DEVICE_ID_DEFAULT;
+}
+
+static struct tee_client *client;
+static int open_count;
+static DEFINE_MUTEX(dev_mutex);	/* Lock for the device */
+
+static bool clientlib_client_get(void)
+{
+	int ret = true;
+
+	mutex_lock(&dev_mutex);
+	if (!client)
+		ret = false;
+	else
+		client_get(client);
+
+	mutex_unlock(&dev_mutex);
+	return ret;
+}
+
+static void clientlib_client_put(void)
+{
+	mutex_lock(&dev_mutex);
+	if (client_put(client))
+		client = NULL;
+
+	mutex_unlock(&dev_mutex);
+}
+
+enum mc_result mc_open_device(u32 device_id)
+{
+	enum mc_result mc_result = MC_DRV_OK;
+	int ret;
+
+	/* Check parameters */
+	if (!is_valid_device(device_id))
+		return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+	mutex_lock(&dev_mutex);
+	/* Make sure TEE was started */
+	ret = mc_wait_tee_start();
+	if (ret) {
+		mc_dev_err(ret, "TEE failed to start, now or in the past");
+		mc_result = MC_DRV_ERR_INVALID_DEVICE_FILE;
+		goto end;
+	}
+
+	if (!open_count)
+		client = client_create(true);
+
+	if (client) {
+		open_count++;
+		mc_dev_devel("successfully opened the device");
+	} else {
+		mc_result = MC_DRV_ERR_INVALID_DEVICE_FILE;
+		mc_dev_err(-ENOMEM, "could not open device");
+	}
+
+end:
+	mutex_unlock(&dev_mutex);
+	return mc_result;
+}
+EXPORT_SYMBOL(mc_open_device);
+
+enum mc_result mc_close_device(u32 device_id)
+{
+	enum mc_result mc_result = MC_DRV_OK;
+
+	/* Check parameters */
+	if (!is_valid_device(device_id))
+		return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+	if (!clientlib_client_get())
+		return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+	mutex_lock(&dev_mutex);
+	if (open_count > 1) {
+		open_count--;
+		goto end;
+	}
+
+	/* Check sessions and freeze client */
+	if (client_has_sessions(client)) {
+		mc_result = MC_DRV_ERR_SESSION_PENDING;
+		goto end;
+	}
+
+	/* Close the device */
+	client_close(client);
+	open_count = 0;
+
+end:
+	mutex_unlock(&dev_mutex);
+	clientlib_client_put();
+	return mc_result;
+}
+EXPORT_SYMBOL(mc_close_device);
+
+enum mc_result mc_open_session(struct mc_session_handle *session,
+			       const struct mc_uuid_t *uuid,
+			       u8 *tci_va, u32 tci_len)
+{
+	enum mc_result ret;
+
+	/* Check parameters */
+	if (!session || !uuid)
+		return MC_DRV_ERR_INVALID_PARAMETER;
+
+	if (!is_valid_device(session->device_id))
+		return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+	if (!clientlib_client_get())
+		return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+	/* Call core api */
+	ret = convert(
+		client_mc_open_session(client, uuid, (uintptr_t)tci_va, tci_len,
+				       &session->session_id));
+	clientlib_client_put();
+	return ret;
+}
+EXPORT_SYMBOL(mc_open_session);
+
+enum mc_result mc_open_trustlet(struct mc_session_handle *session, u32 spid,
+				u8 *ta_va, u32 ta_len, u8 *tci_va, u32 tci_len)
+{
+	enum mc_result ret;
+
+	/* Check parameters */
+	if (!session || !ta_va || !ta_len)
+		return MC_DRV_ERR_INVALID_PARAMETER;
+
+	if (!is_valid_device(session->device_id))
+		return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+	if (!clientlib_client_get())
+		return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+	/* Call core api */
+	ret = convert(
+		client_mc_open_trustlet(client, spid, (uintptr_t)ta_va, ta_len,
+					(uintptr_t)tci_va, tci_len,
+					&session->session_id));
+	clientlib_client_put();
+	return ret;
+}
+EXPORT_SYMBOL(mc_open_trustlet);
+
+enum mc_result mc_close_session(struct mc_session_handle *session)
+{
+	enum mc_result ret;
+
+	/* Check parameters */
+	if (!session)
+		return MC_DRV_ERR_INVALID_PARAMETER;
+
+	if (!is_valid_device(session->device_id))
+		return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+	if (!clientlib_client_get())
+		return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+	/* Call core api */
+	ret = convert(client_remove_session(client, session->session_id));
+	clientlib_client_put();
+	return ret;
+}
+EXPORT_SYMBOL(mc_close_session);
+
+enum mc_result mc_notify(struct mc_session_handle *session)
+{
+	enum mc_result ret;
+
+	/* Check parameters */
+	if (!session)
+		return MC_DRV_ERR_INVALID_PARAMETER;
+
+	if (!is_valid_device(session->device_id))
+		return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+	if (!clientlib_client_get())
+		return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+	/* Call core api */
+	ret = convert(client_notify_session(client, session->session_id));
+	clientlib_client_put();
+	return ret;
+}
+EXPORT_SYMBOL(mc_notify);
+
+enum mc_result mc_wait_notification(struct mc_session_handle *session,
+				    s32 timeout)
+{
+	enum mc_result ret;
+
+	/* Check parameters */
+	if (!session)
+		return MC_DRV_ERR_INVALID_PARAMETER;
+
+	if (!is_valid_device(session->device_id))
+		return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+	if (!clientlib_client_get())
+		return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+	/* Call core api */
+	do {
+		ret = convert(client_waitnotif_session(client,
+						       session->session_id,
+						       timeout, false));
+	} while ((timeout == MC_INFINITE_TIMEOUT) &&
+		 (ret == MC_DRV_ERR_INTERRUPTED_BY_SIGNAL));
+
+	clientlib_client_put();
+	return ret;
+}
+EXPORT_SYMBOL(mc_wait_notification);
+
+enum mc_result mc_malloc_wsm(u32 device_id, u32 align, u32 len, u8 **wsm,
+			     u32 wsm_flags)
+{
+	enum mc_result ret;
+	uintptr_t va;
+
+	/* Check parameters */
+	if (!is_valid_device(device_id))
+		return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+	if (!len)
+		return MC_DRV_ERR_INVALID_PARAMETER;
+
+	if (!wsm)
+		return MC_DRV_ERR_INVALID_PARAMETER;
+
+	if (!clientlib_client_get())
+		return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+	/* Call core api */
+	ret = convert(client_cbuf_create(client, len, &va, NULL));
+	if (ret == MC_DRV_OK)
+		*wsm = (u8 *)va;
+
+	clientlib_client_put();
+	return ret;
+}
+EXPORT_SYMBOL(mc_malloc_wsm);
+
+enum mc_result mc_free_wsm(u32 device_id, u8 *wsm)
+{
+	enum mc_result ret;
+	uintptr_t va = (uintptr_t)wsm;
+
+	/* Check parameters */
+	if (!is_valid_device(device_id))
+		return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+	if (!clientlib_client_get())
+		return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+	/* Call core api */
+	ret = convert(client_cbuf_free(client, va));
+	clientlib_client_put();
+	return ret;
+}
+EXPORT_SYMBOL(mc_free_wsm);
+
+enum mc_result mc_map(struct mc_session_handle *session, void *address,
+		      u32 length, struct mc_bulk_map *map_info)
+{
+	enum mc_result ret;
+	struct mc_ioctl_buffer buf = {
+		.va = (uintptr_t)address,
+		.len = length,
+		.flags = MC_IO_MAP_INPUT_OUTPUT,
+	};
+
+	/* Check parameters */
+	if (!session)
+		return MC_DRV_ERR_INVALID_PARAMETER;
+
+	if (!is_valid_device(session->device_id))
+		return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+	if (!map_info)
+		return MC_DRV_ERR_INVALID_PARAMETER;
+
+	if (!clientlib_client_get())
+		return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+	/* Call core api */
+	ret = convert(client_mc_map(client, session->session_id, NULL, &buf));
+	if (ret == MC_DRV_OK) {
+		map_info->secure_virt_addr = buf.sva;
+		map_info->secure_virt_len = buf.len;
+	}
+
+	clientlib_client_put();
+	return ret;
+}
+EXPORT_SYMBOL(mc_map);
+
+enum mc_result mc_unmap(struct mc_session_handle *session, void *address,
+			struct mc_bulk_map *map_info)
+{
+	enum mc_result ret;
+	struct mc_ioctl_buffer buf = {
+		.va = (uintptr_t)address,
+		.flags = MC_IO_MAP_INPUT_OUTPUT,
+	};
+
+	/* Check parameters */
+	if (!session)
+		return MC_DRV_ERR_INVALID_PARAMETER;
+
+	if (!is_valid_device(session->device_id))
+		return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+	if (!map_info)
+		return MC_DRV_ERR_INVALID_PARAMETER;
+
+	if (!clientlib_client_get())
+		return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+	/* Call core api */
+	buf.len = map_info->secure_virt_len;
+	buf.sva = map_info->secure_virt_addr;
+
+	ret = convert(client_mc_unmap(client, session->session_id, &buf));
+	clientlib_client_put();
+	return ret;
+}
+EXPORT_SYMBOL(mc_unmap);
+
+enum mc_result mc_get_session_error_code(struct mc_session_handle *session,
+					 s32 *exit_code)
+{
+	enum mc_result ret;
+
+	/* Check parameters */
+	if (!session)
+		return MC_DRV_ERR_INVALID_PARAMETER;
+
+	if (!is_valid_device(session->device_id))
+		return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+	if (!exit_code)
+		return MC_DRV_ERR_INVALID_PARAMETER;
+
+	if (!clientlib_client_get())
+		return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+	/* Call core api */
+	ret = convert(client_get_session_exitcode(client, session->session_id,
+						  exit_code));
+	clientlib_client_put();
+	return ret;
+}
+EXPORT_SYMBOL(mc_get_session_error_code);
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/clock.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/clock.c
new file mode 100644
index 0000000..8c06428
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/clock.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include "platform.h"
+
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+
+#include "main.h"
+#include "clock.h"
+
+static struct clk_context {
+	struct clk		*mc_ce_iface_clk;
+	struct clk		*mc_ce_core_clk;
+	struct clk		*mc_ce_bus_clk;
+	struct clk		*mc_ce_core_src_clk;
+	/* Clocks are managed by Linux Kernel. No need to do anything */
+	bool			no_clock_support;
+} clk_ctx;
+
+int mc_clock_init(void)
+{
+	int ret;
+#ifdef MC_CLOCK_CORESRC_DEFAULTRATE
+	int core_src_rate = MC_CLOCK_CORESRC_DEFAULTRATE;
+#ifdef MC_CRYPTO_CLOCK_CORESRC_PROPNAME
+	u32 of_core_src_rate = MC_CLOCK_CORESRC_DEFAULTRATE;
+#endif
+#endif
+#ifdef TT_CRYPTO_NO_CLOCK_SUPPORT_FEATURE
+	struct device_node *np;
+
+	np = of_find_node_by_name(NULL, TT_CLOCK_DEVICE_NAME);
+	if (!np) {
+		ret = -ENOENT;
+		mc_dev_err(ret, "cannot get clock device from DT");
+		goto error;
+	}
+
+	clk_ctx.no_clock_support =
+		of_property_read_bool(np, TT_CRYPTO_NO_CLOCK_SUPPORT_FEATURE);
+	if (clk_ctx.no_clock_support)
+		return 0;
+#endif /* TT_CRYPTO_NO_CLOCK_SUPPORT_FEATURE */
+
+#ifdef MC_CLOCK_CORESRC_DEFAULTRATE
+#ifdef MC_CRYPTO_CLOCK_CORESRC_PROPNAME
+	/* Get core clk src */
+	clk_ctx.mc_ce_core_src_clk = clk_get(g_ctx.mcd, "core_clk_src");
+	if (IS_ERR(clk_ctx.mc_ce_core_src_clk)) {
+		ret = PTR_ERR(clk_ctx.mc_ce_core_src_clk);
+		mc_dev_err(ret, "cannot get core src clock");
+		goto error;
+	}
+#endif
+
+#ifdef MC_CRYPTO_CLOCK_CORESRC_PROPNAME
+	ret = of_property_read_u32(g_ctx.mcd->of_node,
+				   MC_CRYPTO_CLOCK_CORESRC_PROPNAME,
+				   &of_core_src_rate);
+	if (ret) {
+		core_src_rate = MC_CLOCK_CORESRC_DEFAULTRATE;
+		mc_dev_info("cannot get clock frequency from DT, use %d",
+			    core_src_rate);
+	} else {
+		core_src_rate = of_core_src_rate;
+	}
+
+#endif /* MC_CRYPTO_CLOCK_CORESRC_PROPNAME */
+
+	ret = clk_set_rate(clk_ctx.mc_ce_core_src_clk, core_src_rate);
+	if (ret) {
+		clk_put(clk_ctx.mc_ce_core_src_clk);
+		clk_ctx.mc_ce_core_src_clk = NULL;
+		mc_dev_err(ret, "cannot set core clock src rate");
+		ret = -EIO;
+		goto error;
+	}
+#endif  /* MC_CLOCK_CORESRC_DEFAULTRATE */
+
+	/* Get core clk */
+	clk_ctx.mc_ce_core_clk = clk_get(g_ctx.mcd, "core_clk");
+	if (IS_ERR(clk_ctx.mc_ce_core_clk)) {
+		ret = PTR_ERR(clk_ctx.mc_ce_core_clk);
+		mc_dev_err(ret, "cannot get core clock");
+		goto error;
+	}
+
+	/* Get Interface clk */
+	clk_ctx.mc_ce_iface_clk = clk_get(g_ctx.mcd, "iface_clk");
+	if (IS_ERR(clk_ctx.mc_ce_iface_clk)) {
+		clk_put(clk_ctx.mc_ce_core_clk);
+		ret = PTR_ERR(clk_ctx.mc_ce_iface_clk);
+		mc_dev_err(ret, "cannot get iface clock");
+		goto error;
+	}
+
+	/* Get AXI clk */
+	clk_ctx.mc_ce_bus_clk = clk_get(g_ctx.mcd, "bus_clk");
+	if (IS_ERR(clk_ctx.mc_ce_bus_clk)) {
+		clk_put(clk_ctx.mc_ce_iface_clk);
+		clk_put(clk_ctx.mc_ce_core_clk);
+		ret = PTR_ERR(clk_ctx.mc_ce_bus_clk);
+		mc_dev_err(ret, "cannot get AXI bus clock");
+		goto error;
+	}
+
+	return 0;
+
+error:
+	clk_ctx.mc_ce_core_clk = NULL;
+	clk_ctx.mc_ce_iface_clk = NULL;
+	clk_ctx.mc_ce_bus_clk = NULL;
+	clk_ctx.mc_ce_core_src_clk = NULL;
+	return ret;
+}
+
+void mc_clock_exit(void)
+{
+	if (clk_ctx.no_clock_support)
+		return;
+
+	if (clk_ctx.mc_ce_iface_clk)
+		clk_put(clk_ctx.mc_ce_iface_clk);
+
+	if (clk_ctx.mc_ce_core_clk)
+		clk_put(clk_ctx.mc_ce_core_clk);
+
+	if (clk_ctx.mc_ce_bus_clk)
+		clk_put(clk_ctx.mc_ce_bus_clk);
+
+	if (clk_ctx.mc_ce_core_src_clk)
+		clk_put(clk_ctx.mc_ce_core_src_clk);
+}
+
+int mc_clock_enable(void)
+{
+	int ret;
+
+	if (clk_ctx.no_clock_support)
+		return 0;
+
+	ret = clk_prepare_enable(clk_ctx.mc_ce_core_clk);
+	if (ret) {
+		mc_dev_err(ret, "cannot enable core clock");
+		goto err_core;
+	}
+
+	ret = clk_prepare_enable(clk_ctx.mc_ce_iface_clk);
+	if (ret) {
+		mc_dev_err(ret, "cannot enable interface clock");
+		goto err_iface;
+	}
+
+	ret = clk_prepare_enable(clk_ctx.mc_ce_bus_clk);
+	if (ret) {
+		mc_dev_err(ret, "cannot enable bus clock");
+		goto err_bus;
+	}
+
+	return 0;
+
+err_bus:
+	clk_disable_unprepare(clk_ctx.mc_ce_iface_clk);
+err_iface:
+	clk_disable_unprepare(clk_ctx.mc_ce_core_clk);
+err_core:
+	return ret;
+}
+
+void mc_clock_disable(void)
+{
+	if (clk_ctx.no_clock_support)
+		return;
+
+	if (clk_ctx.mc_ce_iface_clk)
+		clk_disable_unprepare(clk_ctx.mc_ce_iface_clk);
+
+	if (clk_ctx.mc_ce_core_clk)
+		clk_disable_unprepare(clk_ctx.mc_ce_core_clk);
+
+	if (clk_ctx.mc_ce_bus_clk)
+		clk_disable_unprepare(clk_ctx.mc_ce_bus_clk);
+}
+
+#endif /* MC_CRYPTO_CLOCK_MANAGEMENT */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/clock.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/clock.h
new file mode 100644
index 0000000..09b4884
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/clock.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _MC_CLOCK_H_
+#define _MC_CLOCK_H_
+
+#include "platform.h"	/* MC_CRYPTO_CLOCK_MANAGEMENT */
+
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+
+/* Initialize secure crypto clocks */
+int mc_clock_init(void);
+/* Free secure crypto clocks */
+void mc_clock_exit(void);
+/* Enable secure crypto clocks */
+int mc_clock_enable(void);
+/* Disable secure crypto clocks */
+void mc_clock_disable(void);
+
+#else /* MC_CRYPTO_CLOCK_MANAGEMENT */
+
+static inline int mc_clock_init(void)
+{
+	return 0;
+}
+
+static inline void mc_clock_exit(void)
+{
+}
+
+static inline int mc_clock_enable(void)
+{
+	return 0;
+}
+
+static inline void mc_clock_disable(void)
+{
+}
+
+#endif /* !MC_CRYPTO_CLOCK_MANAGEMENT */
+
+#endif /* _MC_CLOCK_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/fastcall.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/fastcall.c
new file mode 100644
index 0000000..895a245
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/fastcall.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/sched.h>	/* local_clock */
+#include <linux/version.h>
+#include <linux/sched/clock.h>	/* local_clock */
+
+#include "mci/mcifc.h"
+
+#include "platform.h"	/* MC_SMC_FASTCALL */
+#include "main.h"
+#include "fastcall.h"
+
+/* Use the arch_extension sec pseudo op before switching to secure world */
+#if defined(__GNUC__) && \
+	defined(__GNUC_MINOR__) && \
+	defined(__GNUC_PATCHLEVEL__) && \
+	((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)) \
+	>= 40502
+#ifndef CONFIG_ARM64
+#define MC_ARCH_EXTENSION_SEC
+#endif
+#endif
+
+/* Base for all fastcalls, do not use outside of other structs */
+union fc_common {
+	struct {
+		u32 cmd;
+		u32 param[3];
+	} in;
+
+	struct {
+		u32 resp;
+		u32 ret;
+		u32 param[2];
+	} out;
+};
+
+union fc_init {
+	union fc_common common;
+
+	struct {
+		u32 cmd;
+		u32 base;
+		u32 nq_info;
+		u32 mcp_info;
+	} in;
+
+	struct {
+		u32 resp;
+		u32 ret;
+		u32 flags;
+		u32 rfu;
+	} out;
+};
+
+union fc_info {
+	union fc_common common;
+
+	struct {
+		u32 cmd;
+		u32 ext_info_id;
+	} in;
+
+	struct {
+		u32 resp;
+		u32 ret;
+		u32 state;
+		u32 ext_info;
+	} out;
+};
+
+union fc_trace {
+	union fc_common common;
+
+	struct {
+		u32 cmd;
+		u32 buffer_low;
+		u32 buffer_high;
+		u32 size;
+	} in;
+
+	struct {
+		u32 resp;
+		u32 ret;
+	} out;
+};
+
+union fc_nsiq {
+	union fc_common common;
+
+	struct {
+		u32 cmd;
+		u32 debug_ret;
+		u32 debug_session_id;
+		u32 debug_payload;
+	} in;
+
+	struct {
+		u32 resp;
+		u32 ret;
+	} out;
+};
+
+union fc_yield {
+	union fc_common common;
+
+	struct {
+		u32 cmd;
+		u32 debug_ret;
+		u32 debug_timeslice;
+	} in;
+
+	struct {
+		u32 resp;
+		u32 ret;
+	} out;
+};
+
+/* Structure to log SMC calls */
+struct smc_log_entry {
+	u64 cpu_clk;
+	int cpu_id;
+	union fc_common fc;
+};
+
+#define SMC_LOG_SIZE 1024
+static struct smc_log_entry smc_log[SMC_LOG_SIZE];
+static int smc_log_index;
+
+/*
+ * convert fast call return code to linux driver module error code
+ */
+static int convert_fc_ret(u32 ret)
+{
+	switch (ret) {
+	case MC_FC_RET_OK:
+		return 0;
+	case MC_FC_RET_ERR_INVALID:
+		return -EINVAL;
+	case MC_FC_RET_ERR_ALREADY_INITIALIZED:
+		return -EBUSY;
+	default:
+		return -EFAULT;
+	}
+}
+
+/*
+ * __smc() - fast call to MobiCore
+ *
+ * @data: pointer to fast call data
+ */
+static inline int __smc(union fc_common *fc, const char *func)
+{
+	int ret = 0;
+
+	/* Log SMC call */
+	smc_log[smc_log_index].cpu_clk = local_clock();
+	smc_log[smc_log_index].cpu_id  = raw_smp_processor_id();
+	smc_log[smc_log_index].fc = *fc;
+	if (++smc_log_index >= SMC_LOG_SIZE)
+		smc_log_index = 0;
+
+#ifdef MC_SMC_FASTCALL
+	ret = smc_fastcall(fc, sizeof(*fc));
+#else /* MC_SMC_FASTCALL */
+	{
+#ifdef CONFIG_ARM64
+		/* SMC expect values in x0-x3 */
+		register u64 reg0 __asm__("x0") = fc->in.cmd;
+		register u64 reg1 __asm__("x1") = fc->in.param[0];
+		register u64 reg2 __asm__("x2") = fc->in.param[1];
+		register u64 reg3 __asm__("x3") = fc->in.param[2];
+
+		/*
+		 * According to AARCH64 SMC Calling Convention (ARM DEN 0028A),
+		 * section 3.1: registers x4-x17 are unpredictable/scratch
+		 * registers.  So we have to make sure that the compiler does
+		 * not allocate any of those registers by letting him know that
+		 * the asm code might clobber them.
+		 */
+		__asm__ volatile (
+			"smc #0\n"
+			: "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
+			:
+			: "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
+			  "x12", "x13", "x14", "x15", "x16", "x17"
+		);
+#else /* CONFIG_ARM64 */
+		/* SMC expect values in r0-r3 */
+		register u32 reg0 __asm__("r0") = fc->in.cmd;
+		register u32 reg1 __asm__("r1") = fc->in.param[0];
+		register u32 reg2 __asm__("r2") = fc->in.param[1];
+		register u32 reg3 __asm__("r3") = fc->in.param[2];
+
+		__asm__ volatile (
+#ifdef MC_ARCH_EXTENSION_SEC
+			/*
+			 * This pseudo op is supported and required from
+			 * binutils 2.21 on
+			 */
+			".arch_extension sec\n"
+#endif /* MC_ARCH_EXTENSION_SEC */
+			"smc #0\n"
+			: "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
+		);
+
+#endif /* !CONFIG_ARM64 */
+
+		/* set response */
+		fc->out.resp     = reg0;
+		fc->out.ret      = reg1;
+		fc->out.param[0] = reg2;
+		fc->out.param[1] = reg3;
+	}
+#endif /* !MC_SMC_FASTCALL */
+
+	if (ret) {
+		mc_dev_err(ret, "failed for %s", func);
+	} else {
+		ret = convert_fc_ret(fc->out.ret);
+		if (ret)
+			mc_dev_err(ret, "%s failed (%x)", func, fc->out.ret);
+	}
+
+	return ret;
+}
+
+#define smc(__fc__) __smc(__fc__.common, __func__)
+
+int fc_init(uintptr_t addr, ptrdiff_t off, size_t q_len, size_t buf_len)
+{
+	union fc_init fc;
+#ifdef CONFIG_ARM64
+	u32 addr_high = (u32)(addr >> 32);
+#else
+	u32 addr_high = 0;
+#endif
+
+	/* Call the INIT fastcall to setup MobiCore initialization */
+	memset(&fc, 0, sizeof(fc));
+	fc.in.cmd = MC_FC_INIT;
+	/* base address of mci buffer PAGE_SIZE (default is 4KB) aligned */
+	fc.in.base = (u32)addr;
+	/* notification buffer start/length [16:16] [start, length] */
+	fc.in.nq_info = (u32)(((addr_high & 0xFFFF) << 16) | (q_len & 0xFFFF));
+	/* mcp buffer start/length [16:16] [start, length] */
+	fc.in.mcp_info = (u32)((off << 16) | (buf_len & 0xFFFF));
+	mc_dev_devel("cmd=0x%08x, base=0x%08x, nq_info=0x%08x, mcp_info=0x%08x",
+		     fc.in.cmd, fc.in.base, fc.in.nq_info,
+		     fc.in.mcp_info);
+	return smc(&fc);
+}
+
+int fc_info(u32 ext_info_id, u32 *state, u32 *ext_info)
+{
+	union fc_info fc;
+	int ret = 0;
+
+	memset(&fc, 0, sizeof(fc));
+	fc.in.cmd = MC_FC_INFO;
+	fc.in.ext_info_id = ext_info_id;
+	ret = smc(&fc);
+	if (ret) {
+		if (state)
+			*state = MC_STATUS_NOT_INITIALIZED;
+
+		if (ext_info)
+			*ext_info = 0;
+
+		mc_dev_err(ret, "failed for index %d", ext_info_id);
+	} else {
+		if (state)
+			*state = fc.out.state;
+
+		if (ext_info)
+			*ext_info = fc.out.ext_info;
+	}
+
+	return ret;
+}
+
+int fc_trace_init(phys_addr_t buffer, u32 size)
+{
+	union fc_trace fc;
+
+	memset(&fc, 0, sizeof(fc));
+	fc.in.cmd = MC_FC_MEM_TRACE;
+	fc.in.buffer_low = (u32)buffer;
+#ifdef CONFIG_ARM64
+	fc.in.buffer_high = (u32)(buffer >> 32);
+#endif
+	fc.in.size = size;
+	return smc(&fc);
+}
+
+int fc_trace_deinit(void)
+{
+	return fc_trace_init(0, 0);
+}
+
+/* sid, payload only used for debug purpose */
+int fc_nsiq(u32 session_id, u32 payload)
+{
+	union fc_nsiq fc;
+
+	memset(&fc, 0, sizeof(fc));
+	fc.in.cmd = MC_SMC_N_SIQ;
+	fc.in.debug_session_id = session_id;
+	fc.in.debug_payload = payload;
+	return smc(&fc);
+}
+
+/* timeslice only used for debug purpose */
+int fc_yield(u32 timeslice)
+{
+	union fc_yield fc;
+
+	memset(&fc, 0, sizeof(fc));
+	fc.in.cmd = MC_SMC_N_YIELD;
+	fc.in.debug_timeslice = timeslice;
+	return smc(&fc);
+}
+
+static int show_smc_log_entry(struct kasnprintf_buf *buf,
+			      struct smc_log_entry *entry)
+{
+	return kasnprintf(buf, "%20llu %10d 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			  entry->cpu_clk, entry->cpu_id, entry->fc.in.cmd,
+			  entry->fc.in.param[0], entry->fc.in.param[1],
+			  entry->fc.in.param[2]);
+}
+
+/*
+ * Dump SMC log circular buffer, starting from oldest command. It is assumed
+ * nothing goes in any more at this point.
+ */
+int mc_fastcall_debug_smclog(struct kasnprintf_buf *buf)
+{
+	int i, ret = 0;
+
+	ret = kasnprintf(buf, "%10s %20s %10s %-10s %-10s %-10s\n", "CPU id",
+			 "CPU clock", "command", "param1", "param2", "param3");
+	if (ret < 0)
+		return ret;
+
+	if (smc_log[smc_log_index].cpu_clk)
+		/* Buffer has wrapped around, dump end (oldest records) */
+		for (i = smc_log_index; i < SMC_LOG_SIZE; i++) {
+			ret = show_smc_log_entry(buf, &smc_log[i]);
+			if (ret < 0)
+				return ret;
+		}
+
+	/* Dump first records */
+	for (i = 0; i < smc_log_index; i++) {
+		ret = show_smc_log_entry(buf, &smc_log[i]);
+		if (ret < 0)
+			return ret;
+	}
+
+	return ret;
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/fastcall.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/fastcall.h
new file mode 100644
index 0000000..c3adbda
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/fastcall.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _TBASE_FASTCALL_H_
+#define _TBASE_FASTCALL_H_
+
+int fc_init(uintptr_t base_pa, ptrdiff_t off, size_t q_len, size_t buf_len);
+int fc_info(u32 ext_info_id, u32 *state, u32 *ext_info);
+int fc_trace_init(phys_addr_t buffer, u32 size);
+int fc_trace_deinit(void);
+int fc_nsiq(u32 session_id, u32 payload);
+int fc_yield(u32 timeslice);
+
+int mc_fastcall_debug_smclog(struct kasnprintf_buf *buf);
+
+#endif /* _TBASE_FASTCALL_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/iwp.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/iwp.c
new file mode 100644
index 0000000..76f43fe
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/iwp.c
@@ -0,0 +1,1206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
+#include <linux/freezer.h>
+#include <asm/barrier.h>
+#include <linux/irq.h>
+#include <linux/version.h>
+#include <linux/sched/clock.h>	/* local_clock */
+
+#include "public/GP/tee_client_api.h"	/* GP error codes/origins FIXME move */
+#include "public/mc_user.h"
+#include "public/mc_admin.h"
+
+#include "mci/mcimcp.h"
+#include "mci/mcifc.h"
+#include "mci/mcinq.h"		/* SID_MCP */
+#include "mci/mcitime.h"	/* struct mcp_time */
+#include "mci/mciiwp.h"
+
+#include "main.h"
+#include "admin.h"              /* tee_object* for 'blob' */
+#include "mmu.h"                /* MMU for 'blob' */
+#include "nq.h"
+#include "xen_fe.h"
+#include "iwp.h"
+
+#define IWP_RETRIES		5
+#define IWP_TIMEOUT		10
+#define INVALID_IWS_SLOT	0xFFFFFFFF
+
+/* Macros */
+#define _TEEC_GET_PARAM_TYPE(t, i) (((t) >> (4 * (i))) & 0xF)
+
+/* Parameter number */
+#define _TEEC_PARAMETER_NUMBER	4
+
+struct iws {
+	struct list_head list;
+	u64 slot;
+};
+
+static struct {
+	bool iwp_dead;
+	struct interworld_session *iws;
+	/* Interworld lists lock */
+	struct mutex		iws_list_lock;
+	/* InterWorld lists */
+	struct iws		*iws_list_pool;
+	struct list_head	free_iws;
+	struct list_head	allocd_iws;
+	/* Sessions */
+	struct mutex		sessions_lock;
+	struct list_head	sessions;
+	/* TEE bad state detection */
+	struct notifier_block	tee_stop_notifier;
+	/* Log of last commands */
+#define LAST_CMDS_SIZE 256
+	struct mutex		last_cmds_mutex;	/* Log protection */
+	struct command_info {
+		u64			cpu_clk;	/* Kernel time */
+		pid_t			pid;		/* Caller PID */
+		u32			id;		/* IWP command ID */
+		u32			session_id;
+		char			uuid_str[34];
+		enum state {
+			UNUSED,		/* Unused slot */
+			PENDING,	/* Previous command in progress */
+			SENT,		/* Waiting for response */
+			COMPLETE,	/* Got result */
+			FAILED,		/* Something went wrong */
+		}			state;	/* Command processing state */
+		struct gp_return	result;	/* Command result */
+		int			errno;	/* Return code */
+	}				last_cmds[LAST_CMDS_SIZE];
+	int				last_cmds_index;
+} l_ctx;
+
+static void iwp_notif_handler(u32 id, u32 payload)
+{
+	struct iwp_session *iwp_session = NULL, *candidate;
+
+	mutex_lock(&l_ctx.sessions_lock);
+	list_for_each_entry(candidate, &l_ctx.sessions, list) {
+		mc_dev_devel("candidate->slot [%08llx]", candidate->slot);
+		/* If id is SID_CANCEL_OPERATION, there is pseudo session */
+		if (candidate->slot == payload &&
+		    (id != SID_CANCEL_OPERATION || candidate->sid == id)) {
+			iwp_session = candidate;
+			break;
+		}
+	}
+	mutex_unlock(&l_ctx.sessions_lock);
+
+	if (!iwp_session) {
+		mc_dev_err(-ENXIO, "IWP no session found for id=0x%x slot=0x%x",
+			   id, payload);
+		return;
+	}
+
+	mc_dev_devel("IWP: iwp_session [%p] id [%08x] slot [%08x]",
+		     iwp_session, id, payload);
+	nq_session_state_update(&iwp_session->nq_session, NQ_NOTIF_RECEIVED);
+	complete(&iwp_session->completion);
+}
+
+void iwp_session_init(struct iwp_session *iwp_session,
+		      const struct identity *identity)
+{
+	nq_session_init(&iwp_session->nq_session, true);
+	iwp_session->sid = SID_INVALID;
+	iwp_session->slot = INVALID_IWS_SLOT;
+	INIT_LIST_HEAD(&iwp_session->list);
+	mutex_init(&iwp_session->notif_wait_lock);
+	init_completion(&iwp_session->completion);
+	mutex_init(&iwp_session->iws_lock);
+	iwp_session->state = IWP_SESSION_RUNNING;
+	if (identity)
+		iwp_session->client_identity = *identity;
+}
+
+static u64 iws_slot_get(void)
+{
+	struct iws *iws;
+	u64 slot = INVALID_IWS_SLOT;
+
+	if (is_xen_domu())
+		return (uintptr_t)kzalloc(sizeof(*iws), GFP_KERNEL);
+
+	mutex_lock(&l_ctx.iws_list_lock);
+	if (!list_empty(&l_ctx.free_iws)) {
+		iws = list_first_entry(&l_ctx.free_iws, struct iws, list);
+		slot = iws->slot;
+		list_move(&iws->list, &l_ctx.allocd_iws);
+		atomic_inc(&g_ctx.c_slots);
+		mc_dev_devel("got slot %llu", slot);
+	}
+	mutex_unlock(&l_ctx.iws_list_lock);
+	return slot;
+}
+
+/* Passing INVALID_IWS_SLOT is supported */
+static void iws_slot_put(u64 slot)
+{
+	struct iws *iws;
+	bool found = false;
+
+	if (is_xen_domu()) {
+		kfree((void *)(uintptr_t)slot);
+		return;
+	}
+
+	mutex_lock(&l_ctx.iws_list_lock);
+	list_for_each_entry(iws, &l_ctx.allocd_iws, list) {
+		if (slot == iws->slot) {
+			list_move(&iws->list, &l_ctx.free_iws);
+			atomic_dec(&g_ctx.c_slots);
+			found = true;
+			mc_dev_devel("put slot %llu", slot);
+			break;
+		}
+	}
+	mutex_unlock(&l_ctx.iws_list_lock);
+
+	if (!found)
+		mc_dev_err(-EINVAL, "slot %llu not found", slot);
+}
+
+static inline struct interworld_session *slot_to_iws(u64 slot)
+{
+	if (is_xen_domu())
+		return (struct interworld_session *)(uintptr_t)slot;
+
+	return (struct interworld_session *)((uintptr_t)l_ctx.iws + (u32)slot);
+}
+
+/*
+ * IWP command functions
+ */
+static int iwp_cmd(struct iwp_session *iwp_session, u32 id,
+		   struct teec_uuid *uuid, bool killable)
+{
+	struct command_info *cmd_info;
+	int ret;
+
+	/* Initialize MCP log */
+	mutex_lock(&l_ctx.last_cmds_mutex);
+	cmd_info = &l_ctx.last_cmds[l_ctx.last_cmds_index];
+	memset(cmd_info, 0, sizeof(*cmd_info));
+	cmd_info->cpu_clk = local_clock();
+	cmd_info->pid = current->pid;
+	cmd_info->id = id;
+	if (id == SID_OPEN_SESSION || id == SID_OPEN_TA) {
+		/* Keep UUID because it's an 'open session' cmd */
+		const char *cuuid = (const char *)uuid;
+		size_t i;
+
+		cmd_info->uuid_str[0] = ' ';
+		for (i = 0; i < sizeof(*uuid); i++) {
+			snprintf(&cmd_info->uuid_str[1 + i * 2], 3, "%02x",
+				 cuuid[i]);
+		}
+	} else if (id == SID_CANCEL_OPERATION) {
+		struct interworld_session *iws = slot_to_iws(iwp_session->slot);
+
+		if (iws)
+			cmd_info->session_id = iws->session_handle;
+		else
+			cmd_info->session_id = 0;
+	} else {
+		cmd_info->session_id = iwp_session->sid;
+	}
+
+	cmd_info->state = PENDING;
+	iwp_set_ret(0, &cmd_info->result);
+	if (++l_ctx.last_cmds_index >= LAST_CMDS_SIZE)
+		l_ctx.last_cmds_index = 0;
+	mutex_unlock(&l_ctx.last_cmds_mutex);
+
+	if (l_ctx.iwp_dead)
+		return -EHOSTUNREACH;
+
+	mc_dev_devel("psid [%08x], sid [%08x]", id, iwp_session->sid);
+	ret = nq_session_notify(&iwp_session->nq_session, id,
+				iwp_session->slot);
+	if (ret) {
+		mc_dev_err(ret, "sid [%08x]: sending failed", iwp_session->sid);
+		mutex_lock(&l_ctx.last_cmds_mutex);
+		cmd_info->errno = ret;
+		cmd_info->state = FAILED;
+		mutex_unlock(&l_ctx.last_cmds_mutex);
+		return ret;
+	}
+
+	/* Update MCP log */
+	mutex_lock(&l_ctx.last_cmds_mutex);
+	cmd_info->state = SENT;
+	mutex_unlock(&l_ctx.last_cmds_mutex);
+
+	/*
+	 * NB: Wait cannot be interruptible as we need an answer from SWd. It's
+	 * up to the user-space to request a cancellation (for open session and
+	 * command invocation operations.)
+	 *
+	 * We do provide a way out to make applications killable in some cases
+	 * though.
+	 */
+	if (killable) {
+		ret = wait_for_completion_killable(&iwp_session->completion);
+		if (ret) {
+			iwp_request_cancellation(iwp_session->slot);
+			/* Make sure the SWd did not die in the meantime */
+			if (l_ctx.iwp_dead)
+				return -EHOSTUNREACH;
+
+			wait_for_completion(&iwp_session->completion);
+		}
+	} else {
+		wait_for_completion(&iwp_session->completion);
+	}
+
+	if (l_ctx.iwp_dead)
+		return -EHOSTUNREACH;
+
+	/* Update MCP log */
+	mutex_lock(&l_ctx.last_cmds_mutex);
+	{
+		struct interworld_session *iws = slot_to_iws(iwp_session->slot);
+
+		cmd_info->result.origin = iws->return_origin;
+		cmd_info->result.value = iws->status;
+		if (id == SID_OPEN_SESSION || id == SID_OPEN_TA)
+			cmd_info->session_id = iws->session_handle;
+	}
+	cmd_info->state = COMPLETE;
+	mutex_unlock(&l_ctx.last_cmds_mutex);
+	nq_session_state_update(&iwp_session->nq_session, NQ_NOTIF_CONSUMED);
+	return 0;
+}
+
+/*
+ * Convert errno into GP error and set origin to COMMS.
+ * Note: -ECHILD is used to tell the caller that we have a GP error in value, so
+ * we return 0 on success and -ECHILD on error. If -ECHILD is given, we assume
+ * that value is already correctly set.
+ */
+int iwp_set_ret(int ret, struct gp_return *gp_ret)
+{
+	if (ret == -ECHILD) {
+		/* Already set */
+		return ret;
+	}
+
+	gp_ret->origin = TEEC_ORIGIN_COMMS;
+	switch (ret) {
+	case 0:
+		gp_ret->origin = TEEC_ORIGIN_TRUSTED_APP;
+		gp_ret->value = TEEC_SUCCESS;
+		return 0;
+	case -EACCES:
+		gp_ret->value = TEEC_ERROR_ACCESS_DENIED;
+		break;
+	case -EBUSY:
+		gp_ret->value = TEEC_ERROR_BUSY;
+		break;
+	case -ECANCELED:
+		gp_ret->value = TEEC_ERROR_CANCEL;
+		break;
+	case -EINVAL:
+	case -EFAULT:
+		gp_ret->value = TEEC_ERROR_BAD_PARAMETERS;
+		break;
+	case -EKEYREJECTED:
+		gp_ret->value = TEEC_ERROR_SECURITY;
+		break;
+	case -ENOENT:
+		gp_ret->value = TEEC_ERROR_ITEM_NOT_FOUND;
+		break;
+	case -ENOMEM:
+		gp_ret->value = TEEC_ERROR_OUT_OF_MEMORY;
+		break;
+	case -EHOSTUNREACH:
+		/* Tee crashed */
+		gp_ret->value = TEEC_ERROR_TARGET_DEAD;
+		break;
+	case -ENXIO:
+		/* Session not found or not running */
+		gp_ret->value = TEEC_ERROR_BAD_STATE;
+		break;
+	default:
+		gp_ret->value = TEEC_ERROR_GENERIC;
+	}
+	return -ECHILD;
+}
+
+int iwp_register_shared_mem(struct tee_mmu *mmu, u32 *sva,
+			    struct gp_return *gp_ret)
+{
+	int ret;
+
+#ifdef TRUSTONIC_XEN_DOMU
+	if (is_xen_domu())
+		return xen_gp_register_shared_mem(mmu, sva, gp_ret);
+#endif
+
+	ret = mcp_map(SID_MEMORY_REFERENCE, mmu, sva);
+	/* iwp_set_ret would override the origin if called after */
+	ret = iwp_set_ret(ret, gp_ret);
+	if (ret)
+		gp_ret->origin = TEEC_ORIGIN_TEE;
+
+	return ret;
+}
+
+int iwp_release_shared_mem(struct mcp_buffer_map *map)
+{
+#ifdef TRUSTONIC_XEN_DOMU
+	if (is_xen_domu())
+		return xen_gp_release_shared_mem(map);
+#endif
+
+	return mcp_unmap(SID_MEMORY_REFERENCE, map);
+}
+
+static int iwp_operation_to_iws(struct gp_operation *operation,
+				struct interworld_session *iws,
+				struct mc_ioctl_buffer *bufs,
+				struct gp_shared_memory **parents)
+{
+	int param_type, i;
+
+	iws->param_types = 0;
+	for (i = 0; i < _TEEC_PARAMETER_NUMBER; i++) {
+		/* Reset reference for temporary memory */
+		bufs[i].va = 0;
+		/* Reset reference for registered memory */
+		parents[i] = NULL;
+		param_type = _TEEC_GET_PARAM_TYPE(operation->param_types, i);
+
+		switch (param_type) {
+		case TEEC_NONE:
+		case TEEC_VALUE_OUTPUT:
+			break;
+		case TEEC_VALUE_INPUT:
+		case TEEC_VALUE_INOUT:
+			iws->params[i].value.a = operation->params[i].value.a;
+			iws->params[i].value.b = operation->params[i].value.b;
+			break;
+		case TEEC_MEMREF_TEMP_INPUT:
+		case TEEC_MEMREF_TEMP_OUTPUT:
+		case TEEC_MEMREF_TEMP_INOUT:
+			if (operation->params[i].tmpref.buffer) {
+				struct gp_temp_memref *tmpref;
+
+				tmpref = &operation->params[i].tmpref;
+				/* Prepare buffer to map */
+				bufs[i].va = tmpref->buffer;
+				if (tmpref->size > BUFFER_LENGTH_MAX) {
+					mc_dev_err(-EINVAL,
+						   "buffer size %llu too big",
+						   tmpref->size);
+					return -EINVAL;
+				}
+
+				bufs[i].len = tmpref->size;
+				if (param_type == TEEC_MEMREF_TEMP_INPUT)
+					bufs[i].flags = MC_IO_MAP_INPUT;
+				else if (param_type == TEEC_MEMREF_TEMP_OUTPUT)
+					bufs[i].flags = MC_IO_MAP_OUTPUT;
+				else
+					bufs[i].flags = MC_IO_MAP_INPUT_OUTPUT;
+			} else {
+				if (operation->params[i].tmpref.size)
+					return -EINVAL;
+
+				/* Null buffer, won't get mapped */
+				iws->params[i].tmpref.physical_address = 0;
+				iws->params[i].tmpref.size = 0;
+				iws->params[i].tmpref.offset = 0;
+				iws->params[i].tmpref.wsm_type = WSM_INVALID;
+			}
+			break;
+		case TEEC_MEMREF_WHOLE:
+			parents[i] = &operation->params[i].memref.parent;
+			iws->params[i].memref.offset = 0;
+			iws->params[i].memref.size =
+				operation->params[i].memref.parent.size;
+			break;
+		case TEEC_MEMREF_PARTIAL_INPUT:
+		case TEEC_MEMREF_PARTIAL_OUTPUT:
+		case TEEC_MEMREF_PARTIAL_INOUT:
+			parents[i] = &operation->params[i].memref.parent;
+			iws->params[i].memref.offset =
+				operation->params[i].memref.offset;
+			iws->params[i].memref.size =
+				operation->params[i].memref.size;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		iws->param_types |= (u32)(param_type << (i * 4));
+	}
+
+	return 0;
+}
+
+static inline void iwp_iws_set_tmpref(struct interworld_session *iws, int i,
+				      const struct mcp_buffer_map *map)
+{
+	iws->params[i].tmpref.physical_address = map->addr;
+	iws->params[i].tmpref.size = map->length;
+	iws->params[i].tmpref.offset = map->offset;
+	iws->params[i].tmpref.wsm_type = map->type;
+}
+
+static inline void iwp_iws_set_memref(struct interworld_session *iws, int i,
+				      u32 sva)
+{
+	iws->params[i].memref.memref_handle = sva;
+}
+
+static inline void iwp_iws_set_refs(struct interworld_session *iws,
+				    const struct iwp_buffer_map *maps)
+{
+	int i;
+
+	for (i = 0; i < _TEEC_PARAMETER_NUMBER; i++)
+		if (maps[i].sva)
+			iwp_iws_set_memref(iws, i, maps[i].sva);
+		else if (maps[i].map.addr)
+			iwp_iws_set_tmpref(iws, i, &maps[i].map);
+}
+
+static void iwp_iws_to_operation(const struct interworld_session *iws,
+				 struct gp_operation *operation)
+{
+	int i;
+
+	for (i = 0; i < _TEEC_PARAMETER_NUMBER; i++) {
+		switch (_TEEC_GET_PARAM_TYPE(operation->param_types, i)) {
+		case TEEC_VALUE_OUTPUT:
+		case TEEC_VALUE_INOUT:
+			operation->params[i].value.a = iws->params[i].value.a;
+			operation->params[i].value.b = iws->params[i].value.b;
+			break;
+		case TEEC_MEMREF_TEMP_OUTPUT:
+		case TEEC_MEMREF_TEMP_INOUT:
+			operation->params[i].tmpref.size =
+				iws->params[i].tmpref.size;
+			break;
+		case TEEC_MEMREF_WHOLE:
+			if (operation->params[i].memref.parent.flags !=
+			    TEEC_MEM_INPUT)
+				operation->params[i].memref.size =
+					iws->params[i].tmpref.size;
+			break;
+		case TEEC_MEMREF_PARTIAL_OUTPUT:
+		case TEEC_MEMREF_PARTIAL_INOUT:
+			operation->params[i].memref.size =
+				iws->params[i].tmpref.size;
+			break;
+		case TEEC_NONE:
+		case TEEC_VALUE_INPUT:
+		case TEEC_MEMREF_TEMP_INPUT:
+		case TEEC_MEMREF_PARTIAL_INPUT:
+			break;
+		default:
+			/* Error caught by iwp_operation_to_iws() */
+			break;
+		}
+	}
+}
+
+static inline void mcuuid_to_tee_uuid(const struct mc_uuid_t *in,
+				      struct teec_uuid *out)
+{
+	/*
+	 * Warning: this code works only on little-endian platforms.
+	 */
+	out->time_low = in->value[3] +
+		(in->value[2] << 8) +
+		(in->value[1] << 16) +
+		(in->value[0] << 24);
+	out->time_mid = in->value[5] +
+		(in->value[4] << 8);
+	out->time_hi_and_version = in->value[7] +
+		(in->value[6] << 8);
+	memcpy(out->clock_seq_and_node, in->value + 8, 8);
+}
+
+static const char *origin_to_string(u32 origin)
+{
+	switch (origin) {
+	case TEEC_ORIGIN_API:
+		return "API";
+	case TEEC_ORIGIN_COMMS:
+		return "COMMS";
+	case TEEC_ORIGIN_TEE:
+		return "TEE";
+	case TEEC_ORIGIN_TRUSTED_APP:
+		return "TRUSTED_APP";
+	}
+	return "UNKNOWN";
+}
+
+static const char *value_to_string(u32 value)
+{
+	switch (value) {
+	case TEEC_SUCCESS:
+		return "SUCCESS";
+	case TEEC_ERROR_GENERIC:
+		return "GENERIC";
+	case TEEC_ERROR_ACCESS_DENIED:
+		return "ACCESS_DENIED";
+	case TEEC_ERROR_CANCEL:
+		return "CANCEL";
+	case TEEC_ERROR_ACCESS_CONFLICT:
+		return "ACCESS_CONFLICT";
+	case TEEC_ERROR_EXCESS_DATA:
+		return "EXCESS_DATA";
+	case TEEC_ERROR_BAD_FORMAT:
+		return "BAD_FORMAT";
+	case TEEC_ERROR_BAD_PARAMETERS:
+		return "BAD_PARAMETERS";
+	case TEEC_ERROR_BAD_STATE:
+		return "BAD_STATE";
+	case TEEC_ERROR_ITEM_NOT_FOUND:
+		return "ITEM_NOT_FOUND";
+	case TEEC_ERROR_NOT_IMPLEMENTED:
+		return "NOT_IMPLEMENTED";
+	case TEEC_ERROR_NOT_SUPPORTED:
+		return "NOT_SUPPORTED";
+	case TEEC_ERROR_NO_DATA:
+		return "NO_DATA";
+	case TEEC_ERROR_OUT_OF_MEMORY:
+		return "OUT_OF_MEMORY";
+	case TEEC_ERROR_BUSY:
+		return "BUSY";
+	case TEEC_ERROR_COMMUNICATION:
+		return "COMMUNICATION";
+	case TEEC_ERROR_SECURITY:
+		return "SECURITY";
+	case TEEC_ERROR_SHORT_BUFFER:
+		return "SHORT_BUFFER";
+	case TEEC_ERROR_TARGET_DEAD:
+		return "TARGET_DEAD";
+	case TEEC_ERROR_STORAGE_NO_SPACE:
+		return "STORAGE_NO_SPACE";
+	}
+	return NULL;
+}
+
+static const char *cmd_to_string(u32 id)
+{
+	switch (id) {
+	case SID_OPEN_SESSION:
+		return "open session";
+	case SID_INVOKE_COMMAND:
+		return "invoke command";
+	case SID_CLOSE_SESSION:
+		return "close session";
+	case SID_CANCEL_OPERATION:
+		return "cancel operation";
+	case SID_MEMORY_REFERENCE:
+		return "memory reference";
+	case SID_OPEN_TA:
+		return "open TA";
+	case SID_REQ_TA:
+		return "request TA";
+	}
+	return "unknown";
+}
+
+static const char *state_to_string(enum iwp_session_state state)
+{
+	switch (state) {
+	case IWP_SESSION_RUNNING:
+		return "running";
+	case IWP_SESSION_CLOSE_REQUESTED:
+		return "close requested";
+	case IWP_SESSION_CLOSED:
+		return "closed";
+	}
+	return "error";
+}
+
+int iwp_open_session_prepare(
+	struct iwp_session *iwp_session,
+	struct gp_operation *operation,
+	struct mc_ioctl_buffer *bufs,
+	struct gp_shared_memory **parents,
+	struct gp_return *gp_ret)
+{
+	struct interworld_session *iws;
+	u64 slot, op_slot;
+	int ret = 0;
+
+	/* Get session final slot */
+	slot = iws_slot_get();
+	mc_dev_devel("slot [%08llx]", slot);
+	if (slot == INVALID_IWS_SLOT) {
+		ret = -ENOMEM;
+		mc_dev_err(ret, "can't get slot");
+		return iwp_set_ret(ret, gp_ret);
+	}
+
+	/* Get session temporary slot */
+	op_slot = iws_slot_get();
+	mc_dev_devel("op_slot [%08llx]", op_slot);
+	if (op_slot == INVALID_IWS_SLOT) {
+		ret = -ENOMEM;
+		mc_dev_err(ret, "can't get op_slot");
+		iws_slot_put(slot);
+		return iwp_set_ret(ret, gp_ret);
+	}
+
+	mutex_lock(&iwp_session->iws_lock);
+
+	/* Prepare final session: refer to temporary slot in final one */
+	iwp_session->slot = slot;
+	iws = slot_to_iws(slot);
+	memset(iws, 0, sizeof(*iws));
+
+	/* Prepare temporary session */
+	iwp_session->op_slot = op_slot;
+	iws = slot_to_iws(op_slot);
+	memset(iws, 0, sizeof(*iws));
+
+	if (operation) {
+		ret = iwp_operation_to_iws(operation, iws, bufs, parents);
+		if (ret)
+			iwp_open_session_abort(iwp_session);
+	}
+
+	return iwp_set_ret(ret, gp_ret);
+}
+
+void iwp_open_session_abort(struct iwp_session *iwp_session)
+{
+	iws_slot_put(iwp_session->slot);
+	iws_slot_put(iwp_session->op_slot);
+	mutex_unlock(&iwp_session->iws_lock);
+}
+
+/*
+ * Like open session except we pass the TA blob from NWd to SWd
+ */
+int iwp_open_session(
+	struct iwp_session *iwp_session,
+	const struct mc_uuid_t *uuid,
+	struct gp_operation *operation,
+	const struct iwp_buffer_map *maps,
+	struct interworld_session *iws_in,
+	struct tee_mmu **mmus,
+	struct gp_return *gp_ret)
+{
+	struct interworld_session *iws = slot_to_iws(iwp_session->slot);
+	struct interworld_session *op_iws = slot_to_iws(iwp_session->op_slot);
+	struct tee_object *obj = NULL;
+	struct tee_mmu *obj_mmu = NULL;
+	struct mcp_buffer_map obj_map;
+	int ret;
+
+	/* Operation is NULL when called from Xen BE */
+	if (operation) {
+		/* Login info */
+		op_iws->login = iwp_session->client_identity.login_type;
+		mc_dev_devel("iws->login [%08x]", op_iws->login);
+		memcpy(&op_iws->client_uuid,
+		       iwp_session->client_identity.login_data,
+		       sizeof(op_iws->client_uuid));
+
+		/* Put ingoing operation in temporary IWS */
+		iwp_iws_set_refs(op_iws, maps);
+	} else {
+		struct mcp_buffer_map map;
+		int i;
+
+		*op_iws = *iws_in;
+
+		/* Insert correct mapping in operation */
+		for (i = 0; i < 4; i++) {
+			if (!mmus[i])
+				continue;
+
+			tee_mmu_buffer(mmus[i], &map);
+			iwp_iws_set_tmpref(op_iws, i, &map);
+		}
+	}
+
+	/* For the SWd to find the TA slot from the main one */
+	iws->command_id = (u32)iwp_session->op_slot;
+
+	/* TA blob handling */
+	if (!is_xen_domu()) {
+		union mclf_header *header;
+
+		obj = tee_object_get(uuid, true);
+		if (IS_ERR(obj)) {
+			/* Tell SWd to load TA from SFS as not in registry */
+			if (PTR_ERR(obj) == -ENOENT)
+				obj = tee_object_select(uuid);
+
+			if (IS_ERR(obj))
+				return PTR_ERR(obj);
+		}
+
+		/* Convert UUID */
+		header = (union mclf_header *)(&obj->data[obj->header_length]);
+		mcuuid_to_tee_uuid(&header->mclf_header_v2.uuid,
+				   &op_iws->target_uuid);
+
+		/* Create mapping for blob (alloc'd by driver => task = NULL) */
+		{
+			struct mc_ioctl_buffer buf = {
+				.va = (uintptr_t)obj->data,
+				.len = obj->length,
+				.flags = MC_IO_MAP_INPUT,
+			};
+
+			obj_mmu = tee_mmu_create(NULL, &buf);
+			if (IS_ERR(obj_mmu)) {
+				ret = PTR_ERR(obj_mmu);
+				goto err_mmu;
+			}
+
+			iws->param_types = TEEC_MEMREF_TEMP_INPUT;
+			tee_mmu_buffer(obj_mmu, &obj_map);
+			iwp_iws_set_tmpref(iws, 0, &obj_map);
+			mc_dev_devel("wsm_type [%04x], offset [%04x]",
+				     obj_map.type, obj_map.offset);
+			mc_dev_devel("size [%08x], physical_address [%08llx]",
+				     obj_map.length, obj_map.addr);
+		}
+	}
+
+	/* Add to local list of sessions so we can receive the notification */
+	mutex_lock(&l_ctx.sessions_lock);
+	list_add_tail(&iwp_session->list, &l_ctx.sessions);
+	mutex_unlock(&l_ctx.sessions_lock);
+
+	/* Send IWP open command */
+#ifdef TRUSTONIC_XEN_DOMU
+	if (is_xen_domu())
+		ret = xen_gp_open_session(iwp_session, uuid, maps, iws, op_iws,
+					  gp_ret);
+	else
+#endif
+		ret = iwp_cmd(iwp_session, SID_OPEN_TA, &op_iws->target_uuid,
+			      true);
+
+	/* Temporary slot is not needed any more */
+	iws_slot_put(iwp_session->op_slot);
+	/* Treat remote errors as errors, just use a specific errno */
+	if (!ret && iws->status != TEEC_SUCCESS) {
+		gp_ret->origin = iws->return_origin;
+		gp_ret->value = iws->status;
+		ret = -ECHILD;
+	}
+
+	if (!ret) {
+		/* set unique identifier for list search */
+		iwp_session->sid = iws->session_handle;
+		/* Get outgoing operation from main IWS */
+		if (operation)
+			iwp_iws_to_operation(iws, operation);
+		else
+			*iws_in = *iws;
+
+	} else {
+		/* Remove from list of sessions */
+		mutex_lock(&l_ctx.sessions_lock);
+		list_del(&iwp_session->list);
+		mutex_unlock(&l_ctx.sessions_lock);
+		iws_slot_put(iwp_session->slot);
+		mc_dev_devel("failed: %s from %s, ret %d",
+			     value_to_string(gp_ret->value),
+			     origin_to_string(gp_ret->origin), ret);
+	}
+
+	mutex_unlock(&iwp_session->iws_lock);
+
+	/* Blob not needed as re-mapped by the SWd */
+	if (obj_mmu)
+		tee_mmu_put(obj_mmu);
+
+err_mmu:
+	/* Delete secure object */
+	if (obj)
+		tee_object_free(obj);
+
+	return iwp_set_ret(ret, gp_ret);
+}
+
+static void iwp_session_release(
+	struct iwp_session *iwp_session)
+{
+	iwp_session->state = IWP_SESSION_CLOSED;
+
+	/* Remove from list of sessions */
+	mutex_lock(&l_ctx.sessions_lock);
+	list_del(&iwp_session->list);
+	mutex_unlock(&l_ctx.sessions_lock);
+
+	nq_session_exit(&iwp_session->nq_session);
+	iws_slot_put(iwp_session->slot);
+}
+
+/*
+ * Legacy and GP TAs close differently:
+ * - GP TAs always send a notification with payload, whether on close or crash
+ * - GP TAs may take time to close
+ */
+int iwp_close_session(
+	struct iwp_session *iwp_session)
+{
+	int ret = 0;
+
+	if (is_xen_domu()) {
+#ifdef TRUSTONIC_XEN_DOMU
+		ret = xen_gp_close_session(iwp_session);
+#endif
+	} else {
+		mutex_lock(&iwp_session->iws_lock);
+		iwp_session->state = IWP_SESSION_CLOSE_REQUESTED;
+
+		/* Send IWP open command */
+		ret = iwp_cmd(iwp_session, SID_CLOSE_SESSION, NULL, false);
+		mutex_unlock(&iwp_session->iws_lock);
+	}
+
+	iwp_session_release(iwp_session);
+	mc_dev_devel("close session %x ret %d state %s", iwp_session->sid,
+		     ret, state_to_string(iwp_session->state));
+	return ret;
+}
+
+int iwp_invoke_command_prepare(
+	struct iwp_session *iwp_session,
+	u32 command_id,
+	struct gp_operation *operation,
+	struct mc_ioctl_buffer *bufs,
+	struct gp_shared_memory **parents,
+	struct gp_return *gp_ret)
+{
+	struct interworld_session *iws;
+	int ret = 0;
+
+	if (iwp_session->state != IWP_SESSION_RUNNING)
+		return iwp_set_ret(-EBADFD, gp_ret);
+
+	mutex_lock(&iwp_session->iws_lock);
+	iws = slot_to_iws(iwp_session->slot);
+	memset(iws, 0, sizeof(*iws));
+	iws->session_handle = iwp_session->sid;
+	if (operation) {
+		iws->command_id = command_id;
+		ret = iwp_operation_to_iws(operation, iws, bufs, parents);
+		if (ret)
+			iwp_invoke_command_abort(iwp_session);
+	}
+
+	return iwp_set_ret(ret, gp_ret);
+}
+
+void iwp_invoke_command_abort(
+	struct iwp_session *iwp_session)
+{
+	mutex_unlock(&iwp_session->iws_lock);
+}
+
+int iwp_invoke_command(
+	struct iwp_session *iwp_session,
+	struct gp_operation *operation,
+	const struct iwp_buffer_map *maps,
+	struct interworld_session *iws_in,
+	struct tee_mmu **mmus,
+	struct gp_return *gp_ret)
+{
+	struct interworld_session *iws = slot_to_iws(iwp_session->slot);
+	int ret = 0;
+
+	/* Operation is NULL when called from Xen BE */
+	if (operation) {
+		/* Update IWS with operation maps */
+		iwp_iws_set_refs(iws, maps);
+	} else {
+		struct mcp_buffer_map map;
+		int i;
+
+		*iws = *iws_in;
+
+		/* Insert correct mapping in operation */
+		for (i = 0; i < 4; i++) {
+			if (!mmus[i])
+				continue;
+
+			tee_mmu_buffer(mmus[i], &map);
+			iwp_iws_set_tmpref(iws, i, &map);
+		}
+	}
+
+#ifdef TRUSTONIC_XEN_DOMU
+	if (is_xen_domu())
+		ret = xen_gp_invoke_command(iwp_session, maps, iws, gp_ret);
+	else
+#endif
+		ret = iwp_cmd(iwp_session, SID_INVOKE_COMMAND, NULL, true);
+
+	/* Treat remote errors as errors, just use a specific errno */
+	if (!ret && iws->status != TEEC_SUCCESS)
+		ret = -ECHILD;
+
+	if (operation)
+		iwp_iws_to_operation(iws, operation);
+	else
+		*iws_in = *iws;
+
+	if (ret && (ret != -ECHILD)) {
+		ret = iwp_set_ret(ret, gp_ret);
+		mc_dev_devel("failed with ret [%08x]", ret);
+	} else {
+		gp_ret->origin = iws->return_origin;
+		gp_ret->value = iws->status;
+	}
+
+	mutex_unlock(&iwp_session->iws_lock);
+	return ret;
+}
+
+int iwp_request_cancellation(
+	u64 slot)
+{
+	/* Pseudo IWP session for cancellation */
+	struct iwp_session iwp_session;
+	int ret;
+
+#ifdef TRUSTONIC_XEN_DOMU
+	if (is_xen_domu())
+		return xen_gp_request_cancellation(
+			(uintptr_t)slot_to_iws(slot));
+#endif
+
+	iwp_session_init(&iwp_session, NULL);
+	/* sid is local. Set is to SID_CANCEL_OPERATION to make things clear */
+	iwp_session.sid = SID_CANCEL_OPERATION;
+	iwp_session.slot = slot;
+	mutex_lock(&l_ctx.sessions_lock);
+	list_add_tail(&iwp_session.list, &l_ctx.sessions);
+	mutex_unlock(&l_ctx.sessions_lock);
+	ret = iwp_cmd(&iwp_session, SID_CANCEL_OPERATION, NULL, false);
+	mutex_lock(&l_ctx.sessions_lock);
+	list_del(&iwp_session.list);
+	mutex_unlock(&l_ctx.sessions_lock);
+	return ret;
+}
+
+static int debug_sessions(struct kasnprintf_buf *buf)
+{
+	struct iwp_session *session;
+	int ret;
+
+	/* Header */
+	ret = kasnprintf(buf, "%20s %4s %-15s %-11s %7s\n",
+			 "CPU clock", "ID", "state", "notif state", "slot");
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&l_ctx.sessions_lock);
+	list_for_each_entry(session, &l_ctx.sessions, list) {
+		const char *state_str;
+		u64 cpu_clk;
+
+		state_str = nq_session_state(&session->nq_session, &cpu_clk);
+		ret = kasnprintf(buf, "%20llu %4x %-15s %-11s %7llu\n", cpu_clk,
+				 session->sid == SID_INVALID ? 0 : session->sid,
+				 state_to_string(session->state), state_str,
+				 session->slot);
+		if (ret < 0)
+			break;
+	}
+	mutex_unlock(&l_ctx.sessions_lock);
+	return ret;
+}
+
+static ssize_t debug_sessions_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	return debug_generic_read(file, user_buf, count, ppos,
+				  debug_sessions);
+}
+
+static const struct file_operations debug_sessions_ops = {
+	.read = debug_sessions_read,
+	.llseek = default_llseek,
+	.open = debug_generic_open,
+	.release = debug_generic_release,
+};
+
+static inline int show_log_entry(struct kasnprintf_buf *buf,
+				 struct command_info *cmd_info)
+{
+	const char *state_str = "unknown";
+	const char *value_str = value_to_string(cmd_info->result.value);
+	char value[16];
+
+	switch (cmd_info->state) {
+	case UNUSED:
+		state_str = "unused";
+		break;
+	case PENDING:
+		state_str = "pending";
+		break;
+	case SENT:
+		state_str = "sent";
+		break;
+	case COMPLETE:
+		state_str = "complete";
+		break;
+	case FAILED:
+		state_str = "failed";
+		break;
+	}
+
+	if (!value_str) {
+		snprintf(value, sizeof(value), "%08x", cmd_info->result.value);
+		value_str = value;
+	}
+
+	return kasnprintf(buf, "%20llu %5d %-16s %5x %-8s %5d %-11s %-17s%s\n",
+			  cmd_info->cpu_clk, cmd_info->pid,
+			  cmd_to_string(cmd_info->id), cmd_info->session_id,
+			  state_str, cmd_info->errno,
+			  origin_to_string(cmd_info->result.origin), value_str,
+			  cmd_info->uuid_str);
+}
+
+static int debug_last_cmds(struct kasnprintf_buf *buf)
+{
+	struct command_info *cmd_info;
+	int i, ret = 0;
+
+	/* Initialize MCP log */
+	mutex_lock(&l_ctx.last_cmds_mutex);
+	ret = kasnprintf(buf, "%20s %5s %-16s %5s %-8s %5s %-11s %-17s%s\n",
+			 "CPU clock", "PID", "command", "S-ID",
+			 "state", "errno", "origin", "value", "UUID");
+	if (ret < 0)
+		goto out;
+
+	cmd_info = &l_ctx.last_cmds[l_ctx.last_cmds_index];
+	if (cmd_info->state != UNUSED)
+		/* Buffer has wrapped around, dump end (oldest records) */
+		for (i = l_ctx.last_cmds_index; i < LAST_CMDS_SIZE; i++) {
+			ret = show_log_entry(buf, cmd_info++);
+			if (ret < 0)
+				goto out;
+		}
+
+	/* Dump first records */
+	cmd_info = &l_ctx.last_cmds[0];
+	for (i = 0; i < l_ctx.last_cmds_index; i++) {
+		ret = show_log_entry(buf, cmd_info++);
+		if (ret < 0)
+			goto out;
+	}
+
+out:
+	mutex_unlock(&l_ctx.last_cmds_mutex);
+	return ret;
+}
+
+static ssize_t debug_last_cmds_read(struct file *file, char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	return debug_generic_read(file, user_buf, count, ppos, debug_last_cmds);
+}
+
+static const struct file_operations debug_last_cmds_ops = {
+	.read = debug_last_cmds_read,
+	.llseek = default_llseek,
+	.open = debug_generic_open,
+	.release = debug_generic_release,
+};
+
+static inline void mark_iwp_dead(void)
+{
+	struct iwp_session *session;
+
+	l_ctx.iwp_dead = true;
+	/* Signal all potential waiters that SWd is going away */
+	mutex_lock(&l_ctx.sessions_lock);
+	list_for_each_entry(session, &l_ctx.sessions, list)
+		complete(&session->completion);
+	mutex_unlock(&l_ctx.sessions_lock);
+}
+
+static int tee_stop_notifier_fn(struct notifier_block *nb, unsigned long event,
+				void *data)
+{
+	mark_iwp_dead();
+	return 0;
+}
+
+int iwp_init(void)
+{
+	int i;
+
+	l_ctx.iws = nq_get_iwp_buffer();
+	INIT_LIST_HEAD(&l_ctx.free_iws);
+	INIT_LIST_HEAD(&l_ctx.allocd_iws);
+	l_ctx.iws_list_pool = kcalloc(MAX_IW_SESSION, sizeof(struct iws),
+				      GFP_KERNEL);
+	if (!l_ctx.iws_list_pool)
+		return -ENOMEM;
+
+	for (i = 0; i < MAX_IW_SESSION; i++) {
+		l_ctx.iws_list_pool[i].slot =
+			i * sizeof(struct interworld_session);
+		list_add(&l_ctx.iws_list_pool[i].list, &l_ctx.free_iws);
+	}
+
+	mutex_init(&l_ctx.iws_list_lock);
+	INIT_LIST_HEAD(&l_ctx.sessions);
+	mutex_init(&l_ctx.sessions_lock);
+	nq_register_notif_handler(iwp_notif_handler, true);
+	l_ctx.tee_stop_notifier.notifier_call = tee_stop_notifier_fn;
+	nq_register_tee_stop_notifier(&l_ctx.tee_stop_notifier);
+	/* Debugfs */
+	mutex_init(&l_ctx.last_cmds_mutex);
+	return 0;
+}
+
+void iwp_exit(void)
+{
+	mark_iwp_dead();
+	nq_unregister_tee_stop_notifier(&l_ctx.tee_stop_notifier);
+}
+
+int iwp_start(void)
+{
+	/* Create debugfs sessions and last commands entries */
+	debugfs_create_file("iwp_sessions", 0400, g_ctx.debug_dir, NULL,
+			    &debug_sessions_ops);
+	debugfs_create_file("last_iwp_commands", 0400, g_ctx.debug_dir, NULL,
+			    &debug_last_cmds_ops);
+	return 0;
+}
+
+void iwp_stop(void)
+{
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/iwp.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/iwp.h
new file mode 100644
index 0000000..5e98322
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/iwp.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _MC_IWP_H_
+#define _MC_IWP_H_
+
+#include "mci/mcloadformat.h"		/* struct identity */
+
+#include "nq.h"
+#include "mcp.h" /* mcp_buffer_map FIXME move to nq? */
+
+struct iwp_session {
+	/* Notification queue session */
+	struct nq_session	nq_session;
+	/* Session ID */
+	u32			sid;
+	/* IWS slot */
+	u64			slot;
+	/* IWS other slot needed at open */
+	u64			op_slot;
+	/* Sessions list (protected by iwp sessions_lock) */
+	struct list_head	list;
+	/* Notification waiter lock */
+	struct mutex		notif_wait_lock;	/* Only one at a time */
+	/* Notification received */
+	struct completion	completion;
+	/* Interworld struct lock */
+	struct mutex		iws_lock;
+	/* Session state (protected by iwp sessions_lock) */
+	enum iwp_session_state {
+		IWP_SESSION_RUNNING,
+		IWP_SESSION_CLOSE_REQUESTED,
+		IWP_SESSION_CLOSED,
+	}			state;
+	/* GP TAs have login information */
+	struct identity		client_identity;
+};
+
+struct iwp_buffer_map {
+	struct mcp_buffer_map map;
+	u32 sva;
+};
+
+/* Private to iwp_session structure */
+void iwp_session_init(struct iwp_session *session,
+		      const struct identity *identity);
+
+/* Getters */
+static inline u32 iwp_session_id(struct iwp_session *session)
+{
+	return session->sid;
+}
+
+static inline u64 iwp_session_slot(struct iwp_session *session)
+{
+	return session->slot;
+}
+
+/* Convert local errno to GP return values */
+int iwp_set_ret(int ret, struct gp_return *gp_ret);
+
+/* Commands */
+int iwp_register_shared_mem(
+	struct tee_mmu *mmu,
+	u32 *sva,
+	struct gp_return *gp_ret);
+int iwp_release_shared_mem(
+	struct mcp_buffer_map *map);
+int iwp_open_session_prepare(
+	struct iwp_session *session,
+	struct gp_operation *operation,
+	struct mc_ioctl_buffer *bufs,
+	struct gp_shared_memory **parents,
+	struct gp_return *gp_ret);
+void iwp_open_session_abort(
+	struct iwp_session *iwp_session);
+int iwp_open_session(
+	struct iwp_session *iwp_session,
+	const struct mc_uuid_t *uuid,
+	struct gp_operation *operation,
+	const struct iwp_buffer_map *maps,
+	struct interworld_session *iws,
+	struct tee_mmu **mmus,
+	struct gp_return *gp_ret);
+int iwp_close_session(
+	struct iwp_session *iwp_session);
+int iwp_invoke_command_prepare(
+	struct iwp_session *iwp_session,
+	u32 command_id,
+	struct gp_operation *operation,
+	struct mc_ioctl_buffer *bufs,
+	struct gp_shared_memory **parents,
+	struct gp_return *gp_ret);
+void iwp_invoke_command_abort(
+	struct iwp_session *iwp_session);
+int iwp_invoke_command(
+	struct iwp_session *iwp_session,
+	struct gp_operation *operation,
+	const struct iwp_buffer_map *maps,
+	struct interworld_session *iws,
+	struct tee_mmu **mmus,
+	struct gp_return *gp_ret);
+int iwp_request_cancellation(
+	u64 slot);
+
+/* Initialisation/cleanup */
+int iwp_init(void);
+void iwp_exit(void);
+int iwp_start(void);
+void iwp_stop(void);
+
+#endif /* _MC_IWP_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/logging.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/logging.c
new file mode 100644
index 0000000..d92ffb4
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/logging.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/version.h>
+
+#include "main.h"
+#include "logging.h"
+
+/* Supported log buffer version */
+#define MC_LOG_VERSION			2
+
+/* Default length of the log ring buffer 256KiB */
+#define LOG_BUF_ORDER			6
+
+/* Max Len of a log line for printing */
+#define LOG_LINE_SIZE			256
+
+/* Definitions for log version 2 */
+#define LOG_TYPE_MASK			(0x0007)
+#define LOG_TYPE_CHAR			0
+#define LOG_TYPE_INTEGER		1
+
+/* Field length */
+#define LOG_LENGTH_MASK			(0x00F8)
+#define LOG_LENGTH_SHIFT		3
+
+/* Extra attributes */
+#define LOG_EOL				(0x0100)
+#define LOG_INTEGER_DECIMAL		(0x0200)
+#define LOG_INTEGER_SIGNED		(0x0400)
+
+/* active cpu id */
+#define LOG_CPUID_MASK            (0xF000)
+#define LOG_CPUID_SHIFT           12
+
+struct mc_logmsg {
+	u16	ctrl;		/* Type and format of data */
+	u16	source;		/* Unique value for each event source */
+	u32	log_data;	/* Value, if any */
+};
+
+/* MobiCore internal trace buffer structure. */
+struct mc_trace_buf {
+	u32	version;	/* version of trace buffer */
+	u32	length;		/* length of buff */
+	u32	head;		/* last write position */
+	u8	buff[];		/* start of the log buffer */
+};
+
+static struct logging_ctx {
+	struct kthread_work work;
+	struct kthread_worker worker;
+	struct task_struct *thread;
+	union {
+		struct mc_trace_buf *trace_buf;	/* Circular log buffer */
+		unsigned long trace_page;
+	};
+	u32	tail;			/* MobiCore log read position */
+	int	thread_err;
+	u16	prev_source;		/* Previous Log source */
+	char	line[LOG_LINE_SIZE + 1];/* Log Line buffer */
+	u32	line_len;		/* Log Line buffer current length */
+	bool	enabled;		/* Log can be disabled via debugfs */
+	bool	dead;
+} log_ctx;
+
+static inline void log_eol(u16 source, u32 cpuid)
+{
+	if (!log_ctx.line_len)
+		return;
+
+	if (log_ctx.prev_source)
+		/* TEE user-space */
+		dev_info(g_ctx.mcd, "%03x(%u)|%s\n", log_ctx.prev_source,
+			 cpuid, log_ctx.line);
+	else
+		/* TEE kernel */
+		dev_info(g_ctx.mcd, "mtk(%u)|%s\n", cpuid, log_ctx.line);
+	log_ctx.line[0] = '\0';
+	log_ctx.line_len = 0;
+}
+
+/*
+ * Collect chars in log_ctx.line buffer and output the buffer when it is full.
+ * No locking needed because only "mobicore_log" thread updates this buffer.
+ */
+static inline void log_char(char ch, u16 source, u32 cpuid)
+{
+	if (ch == '\0')
+		return;
+
+	if (ch == '\n' || ch == '\r') {
+		log_eol(source, cpuid);
+		return;
+	}
+
+	if (log_ctx.line_len >= LOG_LINE_SIZE || source != log_ctx.prev_source)
+		log_eol(source, cpuid);
+
+	log_ctx.line[log_ctx.line_len++] = ch;
+	log_ctx.line[log_ctx.line_len] = 0;
+	log_ctx.prev_source = source;
+}
+
+static inline void log_string(u32 ch, u16 source, u32 cpuid)
+{
+	while (ch) {
+		log_char(ch & 0xFF, source, cpuid);
+		ch >>= 8;
+	}
+}
+
+static inline void log_number(u32 format, u32 value, u16 source, u32 cpuid)
+{
+	int width = (format & LOG_LENGTH_MASK) >> LOG_LENGTH_SHIFT;
+	char fmt[16];
+	char buffer[32];
+	const char *reader = buffer;
+
+	if (format & LOG_INTEGER_DECIMAL)
+		if (format & LOG_INTEGER_SIGNED)
+			snprintf(fmt, sizeof(fmt), "%%%ud", width);
+		else
+			snprintf(fmt, sizeof(fmt), "%%%uu", width);
+	else
+		snprintf(fmt, sizeof(fmt), "%%0%ux", width);
+
+	snprintf(buffer, sizeof(buffer), fmt, value);
+	while (*reader)
+		log_char(*reader++, source, cpuid);
+}
+
+static inline int log_msg(void *data)
+{
+	struct mc_logmsg *msg = (struct mc_logmsg *)data;
+	int log_type = msg->ctrl & LOG_TYPE_MASK;
+	int cpuid = ((msg->ctrl & LOG_CPUID_MASK) >> LOG_CPUID_SHIFT);
+
+	switch (log_type) {
+	case LOG_TYPE_CHAR:
+		log_string(msg->log_data, msg->source, cpuid);
+		break;
+	case LOG_TYPE_INTEGER:
+		log_number(msg->ctrl, msg->log_data, msg->source, cpuid);
+		break;
+	}
+	if (msg->ctrl & LOG_EOL)
+		log_eol(msg->source, cpuid);
+
+	return sizeof(*msg);
+}
+
+static void logging_worker(struct kthread_work *work)
+{
+	static DEFINE_MUTEX(local_mutex);
+
+	mutex_lock(&local_mutex);
+	while (log_ctx.trace_buf->head != log_ctx.tail) {
+		if (log_ctx.trace_buf->version != MC_LOG_VERSION) {
+			mc_dev_err(-EINVAL, "Bad log data v%d (exp. v%d), stop",
+				   log_ctx.trace_buf->version, MC_LOG_VERSION);
+			log_ctx.dead = true;
+			break;
+		}
+
+		log_ctx.tail += log_msg(&log_ctx.trace_buf->buff[log_ctx.tail]);
+		/* Wrap over if no space left for a complete message */
+		if ((log_ctx.tail + sizeof(struct mc_logmsg)) >
+						log_ctx.trace_buf->length)
+			log_ctx.tail = 0;
+	}
+	mutex_unlock(&local_mutex);
+}
+
+/*
+ * Wake up the log reader thread
+ * This should be called from the places where calls into MobiCore have
+ * generated some logs(eg, yield, SIQ...)
+ */
+void logging_run(void)
+{
+	if (log_ctx.enabled && !log_ctx.dead &&
+	    log_ctx.trace_buf->head != log_ctx.tail)
+		kthread_queue_work(&log_ctx.worker, &log_ctx.work);
+}
+
+/*
+ * Setup MobiCore kernel log. It assumes it's running on CORE 0!
+ * The fastcall will complain if that is not the case!
+ */
+int logging_init(phys_addr_t *buffer, u32 *size)
+{
+	/*
+	 * We are going to map this buffer into virtual address space in SWd.
+	 * To reduce complexity there, we use a contiguous buffer.
+	 */
+	log_ctx.trace_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+					      LOG_BUF_ORDER);
+	if (!log_ctx.trace_page)
+		return -ENOMEM;
+
+	*buffer = virt_to_phys((void *)(log_ctx.trace_page));
+	*size = BIT(LOG_BUF_ORDER) * PAGE_SIZE;
+
+	/* Logging thread */
+	kthread_init_work(&log_ctx.work, logging_worker);
+	kthread_init_worker(&log_ctx.worker);
+	log_ctx.thread = kthread_create(kthread_worker_fn, &log_ctx.worker,
+					"tee_log");
+	if (IS_ERR(log_ctx.thread))
+		return PTR_ERR(log_ctx.thread);
+
+	wake_up_process(log_ctx.thread);
+
+	/* Debugfs switch */
+	log_ctx.enabled = true;
+	debugfs_create_bool("swd_debug", 0600, g_ctx.debug_dir,
+			    &log_ctx.enabled);
+	return 0;
+}
+
+void logging_exit(bool buffer_busy)
+{
+	/*
+	 * This is not racey as the only caller for logging_run is the
+	 * scheduler which gets stopped before us, and long before we exit.
+	 */
+	kthread_stop(log_ctx.thread);
+	if (!buffer_busy)
+		free_pages(log_ctx.trace_page, LOG_BUF_ORDER);
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/logging.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/logging.h
new file mode 100644
index 0000000..7a4bc96
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/logging.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _MC_LOGGING_H_
+#define _MC_LOGGING_H_
+
+void logging_run(void);
+int logging_init(phys_addr_t *buffer, u32 *size);
+void logging_exit(bool buffer_busy);
+
+#endif /* _MC_LOGGING_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/main.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/main.c
new file mode 100644
index 0000000..99ef68f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/main.c
@@ -0,0 +1,738 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+
+#include "public/mc_user.h"
+#include "public/mc_admin.h"		/* MC_ADMIN_DEVNODE */
+
+#include "platform.h"			/* MC_PM_RUNTIME */
+#include "main.h"
+#include "arm.h"
+#include "admin.h"
+#include "user.h"
+#include "iwp.h"
+#include "mcp.h"
+#include "nq.h"
+#include "client.h"
+#include "xen_be.h"
+#include "xen_fe.h"
+#include "build_tag.h"
+
+/* Default entry for our driver in device tree */
+#ifndef MC_DEVICE_PROPNAME
+#define MC_DEVICE_PROPNAME "trustonic,mobicore"
+#endif
+
+/* Define a MobiCore device structure for use with dev_debug() etc */
+static struct device_driver driver = {
+	.name = "Trustonic"
+};
+
+static struct device device = {
+	.driver = &driver
+};
+
+struct mc_device_ctx g_ctx = {
+	.mcd = &device
+};
+
+static struct {
+	/* Device tree compatibility */
+	bool use_platform_driver;
+	/* TEE start return code mutex */
+	struct mutex start_mutex;
+	/* TEE start return code */
+	int start_ret;
+#ifdef MC_PM_RUNTIME
+	/* Whether hibernation succeeded */
+	bool did_hibernate;
+	/* Reboot notifications */
+	struct notifier_block reboot_notifier;
+	/* PM notifications */
+	struct notifier_block pm_notifier;
+#endif
+	/* Devices */
+	dev_t device;
+	struct class *class;
+	/* Admin device */
+	struct cdev admin_cdev;
+	/* User device */
+	dev_t user_dev;
+	struct cdev user_cdev;
+	/* Debug counters */
+	struct mutex struct_counters_buf_mutex;
+	char struct_counters_buf[256];
+	int struct_counters_buf_len;
+} main_ctx;
+
+static int mobicore_start(void);
+static void mobicore_stop(void);
+
+static bool mobicore_ready;
+bool is_mobicore_ready(void)
+{
+	return mobicore_ready;
+}
+
+int kasnprintf(struct kasnprintf_buf *buf, const char *fmt, ...)
+{
+	va_list args;
+	int max_size = buf->size - buf->off;
+	int i;
+
+	va_start(args, fmt);
+	i = vsnprintf(buf->buf + buf->off, max_size, fmt, args);
+	if (i >= max_size) {
+		int new_size = PAGE_ALIGN(buf->size + i + 1);
+		char *new_buf = krealloc(buf->buf, new_size, buf->gfp);
+
+		if (!new_buf) {
+			i = -ENOMEM;
+		} else {
+			buf->buf = new_buf;
+			buf->size = new_size;
+			max_size = buf->size - buf->off;
+			i = vsnprintf(buf->buf + buf->off, max_size, fmt, args);
+		}
+	}
+
+	if (i > 0)
+		buf->off += i;
+
+	va_end(args);
+	return i;
+}
+
+static inline void kasnprintf_buf_reset(struct kasnprintf_buf *buf)
+{
+	kfree(buf->buf);
+	buf->buf = NULL;
+	buf->size = 0;
+	buf->off = 0;
+}
+
+ssize_t debug_generic_read(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos,
+			   int (*function)(struct kasnprintf_buf *buf))
+{
+	struct kasnprintf_buf *buf = file->private_data;
+	int ret = 0;
+
+	mutex_lock(&buf->mutex);
+	/* Add/update buffer */
+	if (!*ppos) {
+		kasnprintf_buf_reset(buf);
+		ret = function(buf);
+		if (ret < 0) {
+			kasnprintf_buf_reset(buf);
+			goto end;
+		}
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf->buf,
+				      buf->off);
+
+end:
+	mutex_unlock(&buf->mutex);
+	return ret;
+}
+
+int debug_generic_open(struct inode *inode, struct file *file)
+{
+	struct kasnprintf_buf *buf;
+
+	file->private_data = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!file->private_data)
+		return -ENOMEM;
+
+	buf = file->private_data;
+	mutex_init(&buf->mutex);
+	buf->gfp = GFP_KERNEL;
+	return 0;
+}
+
+int debug_generic_release(struct inode *inode, struct file *file)
+{
+	struct kasnprintf_buf *buf = file->private_data;
+
+	if (!buf)
+		return 0;
+
+	kasnprintf_buf_reset(buf);
+	kfree(buf);
+	return 0;
+}
+
+static ssize_t debug_structs_read(struct file *file, char __user *user_buf,
+				  size_t count, loff_t *ppos)
+{
+	return debug_generic_read(file, user_buf, count, ppos,
+				  clients_debug_structs);
+}
+
+static const struct file_operations debug_structs_ops = {
+	.read = debug_structs_read,
+	.llseek = default_llseek,
+	.open = debug_generic_open,
+	.release = debug_generic_release,
+};
+
+static ssize_t debug_struct_counters_read(struct file *file,
+					  char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	if (!*ppos) {
+		int ret;
+
+		mutex_lock(&main_ctx.struct_counters_buf_mutex);
+		ret = snprintf(main_ctx.struct_counters_buf,
+			       sizeof(main_ctx.struct_counters_buf),
+			       "clients:  %d\n"
+			       "cbufs:    %d\n"
+			       "cwsms:    %d\n"
+			       "sessions: %d\n"
+			       "swsms:    %d\n"
+			       "mmus:     %d\n"
+			       "maps:     %d\n"
+			       "slots:    %d\n"
+			       "xen maps: %d\n"
+			       "xen fes:  %d\n",
+			       atomic_read(&g_ctx.c_clients),
+			       atomic_read(&g_ctx.c_cbufs),
+			       atomic_read(&g_ctx.c_cwsms),
+			       atomic_read(&g_ctx.c_sessions),
+			       atomic_read(&g_ctx.c_wsms),
+			       atomic_read(&g_ctx.c_mmus),
+			       atomic_read(&g_ctx.c_maps),
+			       atomic_read(&g_ctx.c_slots),
+			       atomic_read(&g_ctx.c_xen_maps),
+			       atomic_read(&g_ctx.c_xen_fes));
+		mutex_unlock(&main_ctx.struct_counters_buf_mutex);
+		if (ret > 0)
+			main_ctx.struct_counters_buf_len = ret;
+	}
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+				       main_ctx.struct_counters_buf,
+				       main_ctx.struct_counters_buf_len);
+}
+
+static const struct file_operations debug_struct_counters_ops = {
+	.read = debug_struct_counters_read,
+	.llseek = default_llseek,
+};
+
+static inline int device_user_init(void)
+{
+	struct device *dev;
+	int ret = 0;
+
+	main_ctx.user_dev = MKDEV(MAJOR(main_ctx.device), 1);
+	/* Create the user node */
+	mc_user_init(&main_ctx.user_cdev);
+	ret = cdev_add(&main_ctx.user_cdev, main_ctx.user_dev, 1);
+	if (ret) {
+		mc_dev_err(ret, "user cdev_add failed");
+		return ret;
+	}
+
+	main_ctx.user_cdev.owner = THIS_MODULE;
+	dev = device_create(main_ctx.class, NULL, main_ctx.user_dev, NULL,
+			    MC_USER_DEVNODE);
+	if (IS_ERR(dev)) {
+		ret = PTR_ERR(dev);
+		cdev_del(&main_ctx.user_cdev);
+		mc_dev_err(ret, "user device_create failed");
+		return ret;
+	}
+
+	/* Create debugfs structs entry */
+	debugfs_create_file("structs", 0400, g_ctx.debug_dir, NULL,
+			    &debug_structs_ops);
+
+	return 0;
+}
+
+static inline void device_user_exit(void)
+{
+	device_destroy(main_ctx.class, main_ctx.user_dev);
+	cdev_del(&main_ctx.user_cdev);
+}
+
+#ifdef MC_PM_RUNTIME
+static int reboot_notifier(struct notifier_block *nb, unsigned long event,
+			   void *dummy)
+{
+	switch (event) {
+	case SYS_HALT:
+	case SYS_POWER_OFF:
+		main_ctx.did_hibernate = true;
+		break;
+	}
+
+	return 0;
+}
+
+static int suspend_notifier(struct notifier_block *nb, unsigned long event,
+			    void *dummy)
+{
+	int ret = 0;
+
+	main_ctx.did_hibernate = false;
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		return nq_suspend();
+	case PM_POST_SUSPEND:
+		return nq_resume();
+#ifdef TRUSTONIC_HIBERNATION_SUPPORT
+	case PM_HIBERNATION_PREPARE:
+		/* Try to stop the TEE nicely (ignore failure) */
+		nq_stop();
+		break;
+	case PM_POST_HIBERNATION:
+		if (main_ctx.did_hibernate) {
+			/* Really did hibernate */
+			client_cleanup();
+			main_ctx.start_ret = TEE_START_NOT_TRIGGERED;
+			return mobicore_start();
+		}
+
+		/* Did not hibernate, just restart the TEE */
+		ret = nq_start();
+#endif
+	}
+
+	return ret;
+}
+#endif /* MC_PM_RUNTIME */
+
+static inline int check_version(void)
+{
+	struct mc_version_info version_info;
+	int ret;
+
+	/* Must be called before creating the user device node to avoid race */
+	ret = mcp_get_version(&version_info);
+	if (ret)
+		return ret;
+
+	/* CMP version is meaningless in this case and is thus not printed */
+	mc_dev_info("\n"
+		    "    product_id        = %s\n"
+		    "    version_mci       = 0x%08x\n"
+		    "    version_so        = 0x%08x\n"
+		    "    version_mclf      = 0x%08x\n"
+		    "    version_container = 0x%08x\n"
+		    "    version_mc_config = 0x%08x\n"
+		    "    version_tl_api    = 0x%08x\n"
+		    "    version_dr_api    = 0x%08x\n"
+		    "    version_nwd       = 0x%08x\n",
+		    version_info.product_id,
+		    version_info.version_mci,
+		    version_info.version_so,
+		    version_info.version_mclf,
+		    version_info.version_container,
+		    version_info.version_mc_config,
+		    version_info.version_tl_api,
+		    version_info.version_dr_api,
+		    version_info.version_nwd);
+
+	/* Determine which features are supported */
+	if (version_info.version_mci != MC_VERSION(1, 7)) {
+		ret = -EHOSTDOWN;
+		mc_dev_err(ret, "TEE incompatible with this driver");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int mobicore_start_domu(void)
+{
+	mutex_lock(&main_ctx.start_mutex);
+	if (main_ctx.start_ret != TEE_START_NOT_TRIGGERED)
+		goto end;
+
+	/* Must be called before creating the user device node to avoid race */
+	main_ctx.start_ret = check_version();
+	if (main_ctx.start_ret)
+		goto end;
+
+	main_ctx.start_ret = device_user_init();
+end:
+	mutex_unlock(&main_ctx.start_mutex);
+	return main_ctx.start_ret;
+}
+
+static int mobicore_start(void)
+{
+	int ret;
+
+	mutex_lock(&main_ctx.start_mutex);
+	if (main_ctx.start_ret != TEE_START_NOT_TRIGGERED)
+		goto got_ret;
+
+	ret = nq_start();
+	if (ret) {
+		mc_dev_err(ret, "NQ start failed");
+		goto err_nq;
+	}
+
+	ret = mcp_start();
+	if (ret) {
+		mc_dev_err(ret, "MCP start failed");
+		goto err_mcp;
+	}
+
+	ret = iwp_start();
+	if (ret) {
+		mc_dev_err(ret, "IWP start failed");
+		goto err_iwp;
+	}
+
+	/* Must be called before creating the user device node to avoid race */
+	ret = check_version();
+	if (ret)
+		goto err_version;
+
+#ifdef MC_PM_RUNTIME
+	main_ctx.reboot_notifier.notifier_call = reboot_notifier;
+	ret = register_reboot_notifier(&main_ctx.reboot_notifier);
+	if (ret) {
+		mc_dev_err(ret, "reboot notifier registration failed");
+		goto err_pm_notif;
+	}
+
+	main_ctx.pm_notifier.notifier_call = suspend_notifier;
+	ret = register_pm_notifier(&main_ctx.pm_notifier);
+	if (ret) {
+		unregister_reboot_notifier(&main_ctx.reboot_notifier);
+		mc_dev_err(ret, "PM notifier register failed");
+		goto err_pm_notif;
+	}
+#endif
+
+	if (is_xen_dom0()) {
+		ret = xen_be_init();
+		if (ret)
+			goto err_xen_be;
+	}
+
+	ret = device_user_init();
+	if (ret)
+		goto err_device_user;
+
+	main_ctx.start_ret = 0;
+	mobicore_ready = true;
+	goto got_ret;
+
+err_device_user:
+	if (is_xen_dom0())
+		xen_be_exit();
+err_xen_be:
+#ifdef MC_PM_RUNTIME
+	unregister_reboot_notifier(&main_ctx.reboot_notifier);
+	unregister_pm_notifier(&main_ctx.pm_notifier);
+err_pm_notif:
+#endif
+err_version:
+	iwp_stop();
+err_iwp:
+	mcp_stop();
+err_mcp:
+	nq_stop();
+err_nq:
+	main_ctx.start_ret = ret;
+got_ret:
+	mutex_unlock(&main_ctx.start_mutex);
+	return main_ctx.start_ret;
+}
+
+static void mobicore_stop(void)
+{
+	device_user_exit();
+	if (is_xen_dom0())
+		xen_be_exit();
+
+	if (!is_xen_domu()) {
+#ifdef MC_PM_RUNTIME
+		unregister_reboot_notifier(&main_ctx.reboot_notifier);
+		unregister_pm_notifier(&main_ctx.pm_notifier);
+#endif
+		iwp_stop();
+		mcp_stop();
+		nq_stop();
+	}
+}
+
+int mc_wait_tee_start(void)
+{
+	int ret;
+
+	while (!is_mobicore_ready())
+		ssleep(1);
+
+	mutex_lock(&main_ctx.start_mutex);
+	while (main_ctx.start_ret == TEE_START_NOT_TRIGGERED) {
+		mutex_unlock(&main_ctx.start_mutex);
+		ssleep(1);
+		mutex_lock(&main_ctx.start_mutex);
+	}
+
+	ret = main_ctx.start_ret;
+	mutex_unlock(&main_ctx.start_mutex);
+	return ret;
+}
+
+static inline int device_common_init(void)
+{
+	int ret;
+
+	ret = alloc_chrdev_region(&main_ctx.device, 0, 2, "trustonic_tee");
+	if (ret) {
+		mc_dev_err(ret, "alloc_chrdev_region failed");
+		return ret;
+	}
+
+	main_ctx.class = class_create(THIS_MODULE, "trustonic_tee");
+	if (IS_ERR(main_ctx.class)) {
+		ret = PTR_ERR(main_ctx.class);
+		mc_dev_err(ret, "class_create failed");
+		unregister_chrdev_region(main_ctx.device, 2);
+		return ret;
+	}
+
+	return 0;
+}
+
+static inline void device_common_exit(void)
+{
+	class_destroy(main_ctx.class);
+	unregister_chrdev_region(main_ctx.device, 2);
+}
+
+static inline int device_admin_init(void)
+{
+	struct device *dev;
+	int ret = 0;
+
+	/* Create the ADMIN node */
+	ret = mc_admin_init(&main_ctx.admin_cdev, mobicore_start,
+			    mobicore_stop);
+	if (ret)
+		goto err_init;
+
+	ret = cdev_add(&main_ctx.admin_cdev, main_ctx.device, 1);
+	if (ret) {
+		mc_dev_err(ret, "admin cdev_add failed");
+		goto err_cdev;
+	}
+
+	main_ctx.admin_cdev.owner = THIS_MODULE;
+	dev = device_create(main_ctx.class, NULL, main_ctx.device, NULL,
+			    MC_ADMIN_DEVNODE);
+	if (IS_ERR(dev)) {
+		ret = PTR_ERR(dev);
+		mc_dev_err(ret, "admin device_create failed");
+		goto err_device;
+	}
+
+	return 0;
+
+err_device:
+	cdev_del(&main_ctx.admin_cdev);
+err_cdev:
+	mc_admin_exit();
+err_init:
+	return ret;
+}
+
+static inline void device_admin_exit(void)
+{
+	device_destroy(main_ctx.class, main_ctx.device);
+	cdev_del(&main_ctx.admin_cdev);
+	mc_admin_exit();
+}
+
+/*
+ * This function is called by the kernel during startup or by a insmod command.
+ * This device is installed and registered as cdev, then interrupt and
+ * queue handling is set up
+ */
+static int mobicore_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	if (pdev)
+		g_ctx.mcd->of_node = pdev->dev.of_node;
+
+#ifdef MOBICORE_COMPONENT_BUILD_TAG
+	mc_dev_info("MobiCore %s", MOBICORE_COMPONENT_BUILD_TAG);
+#endif
+	/* Hardware does not support ARM TrustZone -> Cannot continue! */
+	if (!is_xen_domu() && !has_security_extensions()) {
+		ret = -ENODEV;
+		mc_dev_err(ret, "Hardware doesn't support ARM TrustZone!");
+		return ret;
+	}
+
+	/* Running in secure mode -> Cannot load the driver! */
+	if (is_secure_mode()) {
+		ret = -ENODEV;
+		mc_dev_err(ret, "Running in secure MODE!");
+		return ret;
+	}
+
+	/* Make sure we can create debugfs entries */
+	g_ctx.debug_dir = debugfs_create_dir("trustonic_tee", NULL);
+
+	/* Initialize debug counters */
+	atomic_set(&g_ctx.c_clients, 0);
+	atomic_set(&g_ctx.c_cbufs, 0);
+	atomic_set(&g_ctx.c_cwsms, 0);
+	atomic_set(&g_ctx.c_sessions, 0);
+	atomic_set(&g_ctx.c_wsms, 0);
+	atomic_set(&g_ctx.c_mmus, 0);
+	atomic_set(&g_ctx.c_maps, 0);
+	atomic_set(&g_ctx.c_slots, 0);
+	atomic_set(&g_ctx.c_xen_maps, 0);
+	atomic_set(&g_ctx.c_xen_fes, 0);
+	main_ctx.start_ret = TEE_START_NOT_TRIGGERED;
+	mutex_init(&main_ctx.start_mutex);
+	mutex_init(&main_ctx.struct_counters_buf_mutex);
+	/* Create debugfs info entries */
+	debugfs_create_file("structs_counters", 0400, g_ctx.debug_dir, NULL,
+			    &debug_struct_counters_ops);
+
+	/* Initialize common API layer */
+	client_init();
+
+	/* Initialize plenty of nice features */
+	ret = nq_init();
+	if (ret) {
+		mc_dev_err(ret, "NQ init failed");
+		goto fail_nq_init;
+	}
+
+	ret = mcp_init();
+	if (ret) {
+		mc_dev_err(ret, "MCP init failed");
+		goto err_mcp;
+	}
+
+	ret = iwp_init();
+	if (ret) {
+		mc_dev_err(ret, "IWP init failed");
+		goto err_iwp;
+	}
+
+	ret = device_common_init();
+	if (ret)
+		goto err_common;
+
+	if (!is_xen_domu()) {
+		/* Admin dev is for the daemon to communicate with the driver */
+		ret = device_admin_init();
+		if (ret)
+			goto err_admin;
+
+#ifndef MC_DELAYED_TEE_START
+		ret = mobicore_start();
+#endif
+		if (ret)
+			goto err_start;
+	}
+
+	return 0;
+
+err_start:
+	device_admin_exit();
+err_admin:
+	device_common_exit();
+err_common:
+	iwp_exit();
+err_iwp:
+	mcp_exit();
+err_mcp:
+	nq_exit();
+fail_nq_init:
+	debugfs_remove_recursive(g_ctx.debug_dir);
+	return ret;
+}
+
+static int mobicore_probe_not_of(void)
+{
+	return mobicore_probe(NULL);
+}
+
+static const struct of_device_id of_match_table[] = {
+	{ .compatible = MC_DEVICE_PROPNAME },
+	{ }
+};
+
+static struct platform_driver mc_plat_driver = {
+	.probe = mobicore_probe,
+	.driver = {
+		.name = "mcd",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_table,
+	}
+};
+
+static int __init mobicore_init(void)
+{
+	dev_set_name(g_ctx.mcd, "TEE");
+	/*
+	 * Do not remove or change the following trace.
+	 * The string "MobiCore" is used to detect if the TEE is in of the image
+	 */
+	mc_dev_info("MobiCore mcDrvModuleApi version is %d.%d",
+		    MCDRVMODULEAPI_VERSION_MAJOR,
+		    MCDRVMODULEAPI_VERSION_MINOR);
+
+	/* In a Xen DomU, just register the front-end */
+	if (is_xen_domu())
+		return xen_fe_init(mobicore_probe_not_of, mobicore_start_domu);
+
+	main_ctx.use_platform_driver =
+		of_find_compatible_node(NULL, NULL, MC_DEVICE_PROPNAME);
+	if (main_ctx.use_platform_driver)
+		return platform_driver_register(&mc_plat_driver);
+
+	return mobicore_probe_not_of();
+}
+
+static void __exit mobicore_exit(void)
+{
+	if (is_xen_domu())
+		xen_fe_exit();
+
+	if (main_ctx.use_platform_driver)
+		platform_driver_unregister(&mc_plat_driver);
+
+	if (!is_xen_domu())
+		device_admin_exit();
+
+	device_common_exit();
+	iwp_exit();
+	mcp_exit();
+	nq_exit();
+	debugfs_remove_recursive(g_ctx.debug_dir);
+}
+
+module_init(mobicore_init);
+module_exit(mobicore_exit);
+
+MODULE_AUTHOR("Trustonic Limited");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MobiCore driver");
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/main.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/main.h
new file mode 100644
index 0000000..0b25123
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/main.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _MC_MAIN_H_
+#define _MC_MAIN_H_
+
+#include <linux/device.h>	/* dev_* macros */
+#include <linux/slab.h>		/* gfp_t */
+#include <linux/fs.h>		/* struct inode and struct file */
+#include <linux/mutex.h>
+#include <linux/version.h>
+#include <xen/xen.h>
+
+#define MC_VERSION(major, minor) \
+		((((major) & 0x0000ffff) << 16) | ((minor) & 0x0000ffff))
+#define MC_VERSION_MAJOR(x) ((x) >> 16)
+#define MC_VERSION_MINOR(x) ((x) & 0xffff)
+
+#define mc_dev_err(__ret__, fmt, ...) \
+	dev_err(g_ctx.mcd, "ERROR %d %s: " fmt "\n", \
+		__ret__, __func__, ##__VA_ARGS__)
+
+#define mc_dev_info(fmt, ...) \
+	dev_info(g_ctx.mcd, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+
+#ifdef DEBUG
+#define mc_dev_devel(fmt, ...) \
+	dev_info(g_ctx.mcd, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* DEBUG */
+#define mc_dev_devel(...)		do {} while (0)
+#endif /* !DEBUG */
+
+#define TEEC_TT_LOGIN_KERNEL	0x80000000
+
+#define TEE_START_NOT_TRIGGERED 1
+
+/* MobiCore Driver Kernel Module context data. */
+struct mc_device_ctx {
+	struct device		*mcd;
+	/* debugfs root */
+	struct dentry		*debug_dir;
+
+	/* Debug counters */
+	atomic_t		c_clients;
+	atomic_t		c_cbufs;
+	atomic_t		c_cwsms;
+	atomic_t		c_sessions;
+	atomic_t		c_wsms;
+	atomic_t		c_mmus;
+	atomic_t		c_maps;
+	atomic_t		c_slots;
+	atomic_t		c_xen_maps;
+	atomic_t		c_xen_fes;
+};
+
+extern struct mc_device_ctx g_ctx;
+
+/* Debug stuff */
+struct kasnprintf_buf {
+	struct mutex mutex;	/* Protect buf/size/off access */
+	gfp_t gfp;
+	void *buf;
+	int size;
+	int off;
+};
+
+/* Wait for TEE to start and get status */
+int mc_wait_tee_start(void);
+
+extern __printf(2, 3)
+int kasnprintf(struct kasnprintf_buf *buf, const char *fmt, ...);
+ssize_t debug_generic_read(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos,
+			   int (*function)(struct kasnprintf_buf *buf));
+int debug_generic_open(struct inode *inode, struct file *file);
+int debug_generic_release(struct inode *inode, struct file *file);
+
+/* Xen support */
+
+#ifdef CONFIG_XEN
+#define TRUSTONIC_XEN_DOMU
+#endif
+
+static inline bool is_xen_dom0(void)
+{
+	return xen_domain() && xen_initial_domain();
+}
+
+static inline bool is_xen_domu(void)
+{
+#ifdef TRUSTONIC_XEN_DOMU
+	return xen_domain() && !xen_initial_domain();
+#else
+	return false;
+#endif
+}
+
+#endif /* _MC_MAIN_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/gptci.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/gptci.h
new file mode 100644
index 0000000..40363e6
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/gptci.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _GP_TCI_H_
+#define _GP_TCI_H_
+
+struct tee_value {
+	u32 a;
+	u32 b;
+};
+
+struct _teec_memory_reference_internal {
+	u32 sva;
+	u32 len;
+	u32 output_size;
+};
+
+union _teec_parameter_internal {
+	struct tee_value		       value;
+	struct _teec_memory_reference_internal memref;
+};
+
+enum _teec_tci_type {
+	_TA_OPERATION_OPEN_SESSION   = 1,
+	_TA_OPERATION_INVOKE_COMMAND = 2,
+	_TA_OPERATION_CLOSE_SESSION  = 3,
+};
+
+struct _teec_operation_internal {
+	enum _teec_tci_type	       type;
+	u32			       command_id;
+	u32			       param_types;
+	union _teec_parameter_internal params[4];
+	bool			       is_cancelled;
+	u8			       rfu_padding[3];
+};
+
+struct _teec_tci {
+	char				header[8];
+	struct teec_uuid		destination;
+	struct _teec_operation_internal operation;
+	u32				ready;
+	u32				return_origin;
+	u32				return_status;
+};
+
+/**
+ * Termination codes
+ */
+#define TA_EXIT_CODE_PANIC	  300
+#define TA_EXIT_CODE_TCI	  301
+#define TA_EXIT_CODE_PARAMS	  302
+#define TA_EXIT_CODE_FINISHED	  303
+#define TA_EXIT_CODE_SESSIONSTATE 304
+#define TA_EXIT_CODE_CREATEFAILED 305
+
+#endif /* _GP_TCI_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcifc.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcifc.h
new file mode 100644
index 0000000..85875cf
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcifc.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+#ifndef MCIFC_H_
+#define MCIFC_H_
+
+#include "platform.h"
+
+/** @name MobiCore FastCall Defines
+ * Defines for the two different FastCall's.
+ */
+/**/
+
+/* --- global ---- */
+#define MC_FC_INVALID	((u32)0)  /**< Invalid FastCall ID */
+
+#if (defined(CONFIG_ARM64) && !defined(MC_ARMV7_FC)) || (defined(MC_AARCH32_FC))
+
+#define FASTCALL_OWNER_TZOS          (0x3F000000)
+#define FASTCALL_ATOMIC_MASK         BIT(31)
+/**Trusted OS Fastcalls SMC32 */
+#define MC_FC_STD32_BASE \
+			((u32)(FASTCALL_OWNER_TZOS | FASTCALL_ATOMIC_MASK))
+/* SMC32 Trusted OS owned Fastcalls */
+#define MC_FC_STD32(x)	((u32)(MC_FC_STD32_BASE + (x)))
+
+#define MC_FC_INIT	MC_FC_STD32(1)  /**< Initializing FastCall. */
+#define MC_FC_INFO	MC_FC_STD32(2)  /**< Info FastCall. */
+#define MC_FC_MEM_TRACE	MC_FC_STD32(10)  /**< Enable SWd tracing via memory */
+
+#else
+
+#define MC_FC_INIT	((u32)(-1))  /**< Initializing FastCall. */
+#define MC_FC_INFO	((u32)(-2))  /**< Info FastCall. */
+#define MC_FC_MEM_TRACE	((u32)(-31))  /**< Enable SWd tracing via memory */
+
+#endif
+
+/** @} */
+
+/** @name MobiCore SMC Defines
+ * Defines the different secure monitor calls (SMC) for world switching.
+ */
+/**< Yield to switch from NWd to SWd. */
+#define MC_SMC_N_YIELD			3
+/**< SIQ to switch from NWd to SWd. */
+#define MC_SMC_N_SIQ			4
+/** @} */
+
+/** @name MobiCore status
+ *  MobiCore status information.
+ */
+/**< MobiCore is not yet initialized. FastCall FcInit() to set up MobiCore.*/
+#define MC_STATUS_NOT_INITIALIZED	0
+/**< Bad parameters have been passed in FcInit(). */
+#define MC_STATUS_BAD_INIT		1
+/**< MobiCore did initialize properly. */
+#define MC_STATUS_INITIALIZED		2
+/**< MobiCore kernel halted due to an unrecoverable exception. Further
+ * information is available extended info
+ */
+#define MC_STATUS_HALT			3
+/** @} */
+
+/** @name Extended Info Identifiers
+ *  Extended info parameters for MC_FC_INFO to obtain further information
+ *  depending on MobiCore state.
+ */
+/**< Version of the MobiCore Control Interface (MCI) */
+#define MC_EXT_INFO_ID_MCI_VERSION	0
+/**< MobiCore control flags */
+#define MC_EXT_INFO_ID_FLAGS		1
+/**< MobiCore halt condition code */
+#define MC_EXT_INFO_ID_HALT_CODE	2
+/**< MobiCore halt condition instruction pointer */
+#define MC_EXT_INFO_ID_HALT_IP		3
+/**< MobiCore fault counter */
+#define MC_EXT_INFO_ID_FAULT_CNT	4
+/**< MobiCore last fault cause */
+#define MC_EXT_INFO_ID_FAULT_CAUSE	5
+/**< MobiCore last fault meta */
+#define MC_EXT_INFO_ID_FAULT_META	6
+/**< MobiCore last fault threadid */
+#define MC_EXT_INFO_ID_FAULT_THREAD	7
+/**< MobiCore last fault instruction pointer */
+#define MC_EXT_INFO_ID_FAULT_IP		8
+/**< MobiCore last fault stack pointer */
+#define MC_EXT_INFO_ID_FAULT_SP		9
+/**< MobiCore last fault ARM arch information */
+#define MC_EXT_INFO_ID_FAULT_ARCH_DFSR	10
+/**< MobiCore last fault ARM arch information */
+#define MC_EXT_INFO_ID_FAULT_ARCH_ADFSR	11
+/**< MobiCore last fault ARM arch information */
+#define MC_EXT_INFO_ID_FAULT_ARCH_DFAR	12
+/**< MobiCore last fault ARM arch information */
+#define MC_EXT_INFO_ID_FAULT_ARCH_IFSR	13
+/**< MobiCore last fault ARM arch information */
+#define MC_EXT_INFO_ID_FAULT_ARCH_AIFSR	14
+/**< MobiCore last fault ARM arch information */
+#define MC_EXT_INFO_ID_FAULT_ARCH_IFAR	15
+/**< MobiCore configured by Daemon via fc_init flag */
+#define MC_EXT_INFO_ID_MC_CONFIGURED	16
+/**< MobiCore scheduling status: idle/non-idle */
+#define MC_EXT_INFO_ID_MC_SCHED_STATUS	17
+/**< MobiCore runtime status: initialized, halted */
+#define MC_EXT_INFO_ID_MC_STATUS	18
+/**< MobiCore exception handler last partner */
+#define MC_EXT_INFO_ID_MC_EXC_PARTNER	19
+/**< MobiCore exception handler last peer */
+#define MC_EXT_INFO_ID_MC_EXC_IPCPEER	20
+/**< MobiCore exception handler last IPC message */
+#define MC_EXT_INFO_ID_MC_EXC_IPCMSG	21
+/**< MobiCore exception handler last IPC data */
+#define MC_EXT_INFO_ID_MC_EXC_IPCDATA	22
+/**< MobiCore exception handler last UUID (uses 4 slots: 23 to 26) */
+#define MC_EXT_INFO_ID_MC_EXC_UUID	23
+#define MC_EXT_INFO_ID_MC_EXC_UUID1	24
+#define MC_EXT_INFO_ID_MC_EXC_UUID2	25
+#define MC_EXT_INFO_ID_MC_EXC_UUID3	26
+/**< MobiCore exception handler last crashing task offset */
+#define MC_EXT_INFO_ID_TASK_OFFSET	27
+/**< MobiCore exception handler last crashing task's mclib offset */
+#define MC_EXT_INFO_ID_MCLIB_OFFSET	28
+
+/** @} */
+
+/** @name FastCall return values
+ * Return values of the MobiCore FastCalls.
+ */
+/**< No error. Everything worked fine. */
+#define MC_FC_RET_OK				0
+/**< FastCall was not successful. */
+#define MC_FC_RET_ERR_INVALID			1
+/**< MobiCore has already been initialized. */
+#define MC_FC_RET_ERR_ALREADY_INITIALIZED	5
+/**< Call is not allowed. */
+#define TEE_FC_RET_ERR_NOABILITY            6
+/** @} */
+
+/** @name Init FastCall flags
+ * Return flags of the Init FastCall.
+ */
+/**< SWd uses LPAE MMU table format. */
+#define MC_FC_INIT_FLAG_LPAE			BIT(0)
+/** @} */
+
+#endif /** MCIFC_H_ */
+
+/** @} */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mciiwp.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mciiwp.h
new file mode 100644
index 0000000..970e8fa
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mciiwp.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef MCIIWP_H_
+#define MCIIWP_H_
+
+#include "public/GP/tee_client_types.h" /* teec_uuid FIXME it's all mixed up! */
+
+/** Session ID for notifications for the Dragon CA-to-TA communication protocol
+ *
+ * Session ID are distinct from any valid MCP session identifier
+ * and from the existing pseudo-session identifiers :
+ * - SID_MCP = 0
+ * - SID_INVALID = 0xffffffff
+ *
+ * A session ID is a thread ID, and since thread IDs have a nonzero task ID as
+ * their lowest 16 bits, we can use values of the form 0x????0000
+ */
+#define SID_OPEN_SESSION        (0x00010000)
+#define SID_INVOKE_COMMAND      (0x00020000)
+#define SID_CLOSE_SESSION       (0x00030000)
+#define SID_CANCEL_OPERATION    (0x00040000)
+#define SID_MEMORY_REFERENCE    (0x00050000)
+#define SID_OPEN_TA             (0x00060000)
+#define SID_REQ_TA              (0x00070000)
+
+/* To quickly detect IWP notifications */
+#define SID_IWP_NOTIFICATION \
+	(SID_OPEN_SESSION | SID_INVOKE_COMMAND | SID_CLOSE_SESSION | \
+	 SID_CANCEL_OPERATION | SID_MEMORY_REFERENCE | SID_OPEN_TA | SID_REQ_TA)
+
+struct interworld_parameter_value {
+	u32	a;
+	u32	b;
+	u8	unused[8];
+};
+
+/** The API parameter type TEEC_MEMREF_WHOLE is translated into these types
+ * and does not appear in the inter-world protocol.
+ *
+ * - memref_handle references a previously registered memory reference
+ *   'offset' bytes <= memref_handle < 'offset + size' bytes
+ *
+ * These sizes must be contained within the memory reference.
+ */
+struct interworld_parameter_memref {
+	u32	offset;
+	u32	size;
+	u32	memref_handle;
+	u32	unused;
+};
+
+/** This structure is used for the parameter types TEEC_MEMREF_TEMP_xxx.
+ *
+ * The parameter is located in World Shared Memory which is established
+ * for the command and torn down afterwards.
+ *
+ * The number of pages to share is 'size + offset' divided by the page
+ * size, rounded up.
+ * Inside the shared pages, the buffer starts at address 'offset'
+ * and ends after 'size' bytes.
+ *
+ * - wsm_type parameter may be WSM_CONTIGUOUS or WSM_L1.
+ * - offset must be less than the page size (4096).
+ * - size must be less than 0xfffff000.
+ */
+struct interworld_parameter_tmpref {
+	u16	wsm_type;
+	u16	offset;
+	u32	size;
+	u64	physical_address;
+};
+
+/**
+ *
+ */
+union interworld_parameter {
+	struct interworld_parameter_value	value;
+	struct interworld_parameter_memref	memref;
+	struct interworld_parameter_tmpref	tmpref;
+};
+
+/**
+ * An inter-world session structure represents an active session between
+ * a normal world client and RTM.
+ * It is located in the MCI buffer, must be 8-byte aligned
+ *
+ * NB : since the session structure is in shared memory, it must have the
+ * same layout on both sides (normal world kernel and RTM).
+ * All types use platform endianness (specifically, the endianness used by
+ * the secure world).
+ */
+struct interworld_session {
+	u32	status;
+	u32	return_origin;
+	u16	session_handle;
+	u16	param_types;
+
+	union {
+		u32 command_id;    /** invoke-command only */
+		u32 login;         /** open-session only */
+	};
+
+	union interworld_parameter params[4];
+
+	/* The following fields are only used during open-session */
+	struct teec_uuid target_uuid;
+	struct teec_uuid client_uuid;
+};
+
+#endif /** MCIIWP_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcimcp.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcimcp.h
new file mode 100644
index 0000000..386237f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcimcp.h
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef MCP_H_
+#define MCP_H_
+
+#include "mci/mcloadformat.h"
+
+/** Indicates a response */
+#define FLAG_RESPONSE		BIT(31)
+
+/** MobiCore Return Code Defines.
+ * List of the possible MobiCore return codes.
+ */
+enum mcp_result {
+	/** Memory has successfully been mapped */
+	MC_MCP_RET_OK                                   =  0,
+	/** The session ID is invalid */
+	MC_MCP_RET_ERR_INVALID_SESSION                  =  1,
+	/** The UUID of the Trustlet is unknown */
+	MC_MCP_RET_ERR_UNKNOWN_UUID                     =  2,
+	/** The ID of the driver is unknown */
+	MC_MCP_RET_ERR_UNKNOWN_DRIVER_ID                =  3,
+	/** No more session are allowed */
+	MC_MCP_RET_ERR_NO_MORE_SESSIONS                 =  4,
+	/** The container is invalid */
+	MC_MCP_RET_ERR_CONTAINER_INVALID                =  5,
+	/** The Trustlet is invalid */
+	MC_MCP_RET_ERR_TRUSTLET_INVALID                 =  6,
+	/** The memory block has already been mapped before */
+	MC_MCP_RET_ERR_ALREADY_MAPPED                   =  7,
+	/** Alignment or length error in the command parameters */
+	MC_MCP_RET_ERR_INVALID_PARAM                    =  8,
+	/** No space left in the virtual address space of the session */
+	MC_MCP_RET_ERR_OUT_OF_RESOURCES                 =  9,
+	/** WSM type unknown or broken WSM */
+	MC_MCP_RET_ERR_INVALID_WSM                      = 10,
+	/** unknown error */
+	MC_MCP_RET_ERR_UNKNOWN                          = 11,
+	/** Length of map invalid */
+	MC_MCP_RET_ERR_INVALID_MAPPING_LENGTH           = 12,
+	/** Map can only be applied to Trustlet session */
+	MC_MCP_RET_ERR_MAPPING_TARGET                   = 13,
+	/** Couldn't open crypto session */
+	MC_MCP_RET_ERR_OUT_OF_CRYPTO_RESOURCES          = 14,
+	/** System Trustlet signature verification failed */
+	MC_MCP_RET_ERR_SIGNATURE_VERIFICATION_FAILED    = 15,
+	/** System Trustlet public key is wrong */
+	MC_MCP_RET_ERR_WRONG_PUBLIC_KEY                 = 16,
+	/** Wrong containter type(s) */
+	MC_MCP_RET_ERR_CONTAINER_TYPE_MISMATCH          = 17,
+	/** Container is locked (or not activated) */
+	MC_MCP_RET_ERR_CONTAINER_LOCKED                 = 18,
+	/** SPID is not registered with root container */
+	MC_MCP_RET_ERR_SP_NO_CHILD                      = 19,
+	/** UUID is not registered with sp container */
+	MC_MCP_RET_ERR_TL_NO_CHILD                      = 20,
+	/** Unwrapping of root container failed */
+	MC_MCP_RET_ERR_UNWRAP_ROOT_FAILED               = 21,
+	/** Unwrapping of service provider container failed */
+	MC_MCP_RET_ERR_UNWRAP_SP_FAILED                 = 22,
+	/** Unwrapping of Trustlet container failed */
+	MC_MCP_RET_ERR_UNWRAP_TRUSTLET_FAILED           = 23,
+	/** Container version mismatch */
+	MC_MCP_RET_ERR_CONTAINER_VERSION_MISMATCH       = 24,
+	/** Decryption of service provider trustlet failed */
+	MC_MCP_RET_ERR_SP_TL_DECRYPTION_FAILED          = 25,
+	/** Hash check of service provider trustlet failed */
+	MC_MCP_RET_ERR_SP_TL_HASH_CHECK_FAILED          = 26,
+	/** Activation/starting of task failed */
+	MC_MCP_RET_ERR_LAUNCH_TASK_FAILED               = 27,
+	/** Closing of task not yet possible, try again later */
+	MC_MCP_RET_ERR_CLOSE_TASK_FAILED                = 28,
+	/**< Service is blocked and a session cannot be opened to it */
+	MC_MCP_RET_ERR_SERVICE_BLOCKED                  = 29,
+	/**< Service is locked and a session cannot be opened to it */
+	MC_MCP_RET_ERR_SERVICE_LOCKED                   = 30,
+	/**< Service was forcefully killed (due to an administrative command) */
+	MC_MCP_RET_ERR_SERVICE_KILLED                   = 31,
+	/**< Service version is lower than the one installed. */
+	MC_MCP_RET_ERR_DOWNGRADE_NOT_AUTHORIZED         = 32,
+	/**< Filesystem not yet ready. */
+	MC_MCP_RET_ERR_SYSTEM_NOT_READY                 = 33,
+	/** The command is unknown */
+	MC_MCP_RET_ERR_UNKNOWN_COMMAND                  = 50,
+	/** The command data is invalid */
+	MC_MCP_RET_ERR_INVALID_DATA                     = 51
+};
+
+/** Possible MCP Command IDs
+ * Command ID must be between 0 and 0x7FFFFFFF.
+ */
+enum cmd_id {
+	/** Invalid command ID */
+	MC_MCP_CMD_ID_INVALID		= 0x00,
+	/** Open a session */
+	MC_MCP_CMD_OPEN_SESSION		= 0x01,
+	/** Close an existing session */
+	MC_MCP_CMD_CLOSE_SESSION	= 0x03,
+	/** Map WSM to session */
+	MC_MCP_CMD_MAP			= 0x04,
+	/** Unmap WSM from session */
+	MC_MCP_CMD_UNMAP		= 0x05,
+	/** Prepare for suspend */
+	MC_MCP_CMD_SUSPEND		= 0x06,
+	/** Resume from suspension */
+	MC_MCP_CMD_RESUME		= 0x07,
+	/** Get MobiCore version information */
+	MC_MCP_CMD_GET_MOBICORE_VERSION	= 0x09,
+	/** Close MCP and unmap MCI */
+	MC_MCP_CMD_CLOSE_MCP		= 0x0A,
+	/** Load token for device attestation */
+	MC_MCP_CMD_LOAD_TOKEN		= 0x0B,
+	/** Check that TA can be loaded */
+	MC_MCP_CMD_CHECK_LOAD_TA	= 0x0C,
+	/** Load a decryption key */
+	MC_MCP_CMD_LOAD_SYSENC_KEY_SO = 0x0D,
+};
+
+/*
+ * Types of WSM known to the MobiCore.
+ */
+#define WSM_TYPE_MASK		0xFF
+#define WSM_INVALID		0	/** Invalid memory type */
+#define WSM_L1			3	/** Buffer mapping uses fake L1 table */
+/**< Bitflag indicating that the buffer should be uncached */
+#define WSM_UNCACHED		0x100
+
+/*
+ * Magic number used to identify if Open Command supports GP client
+ * authentication.
+ */
+#define MC_GP_CLIENT_AUTH_MAGIC	0x47504131	/* "GPA1" */
+
+/*
+ * Initialisation values flags
+ */
+/* Set if IRQ is present */
+#define MC_IV_FLAG_IRQ		BIT(0)
+/* Set if GP TIME is supported */
+#define MC_IV_FLAG_TIME		BIT(1)
+/* Set if GP client uses interworld session */
+#define MC_IV_FLAG_IWP		BIT(2)
+
+struct init_values {
+	u32	flags;
+	u32	irq;
+	u32	time_ofs;
+	u32	time_len;
+	/* interworld session buffer offset in MCI */
+	u32	iws_buf_ofs;
+	/* interworld session buffer size */
+	u32	iws_buf_size;
+	u8      padding[8];
+};
+
+/** Command header.
+ * It just contains the command ID. Only values specified in cmd_id are
+ * allowed as command IDs.  If the command ID is unspecified the MobiCore
+ * returns an empty response with the result set to
+ * MC_MCP_RET_ERR_UNKNOWN_COMMAND.
+ */
+struct cmd_header {
+	enum cmd_id	cmd_id;	/** Command ID of the command */
+};
+
+/** Response header.
+ * MobiCore will reply to every MCP command with an MCP response.  Like the MCP
+ * command the response consists of a header followed by response data. The
+ * response is written to the same memory location as the MCP command.
+ */
+struct rsp_header {
+	u32		rsp_id;	/** Command ID | FLAG_RESPONSE */
+	enum mcp_result	result;	/** Result of the command execution */
+};
+
+/** @defgroup CMD MCP Commands
+ */
+
+/** @defgroup ASMCMD Administrative Commands
+ */
+
+/** @defgroup MCPGETMOBICOREVERSION GET_MOBICORE_VERSION
+ * Get MobiCore version info.
+ *
+ */
+
+/** Get MobiCore Version Command */
+struct cmd_get_version {
+	struct cmd_header	cmd_header;	/** Command header */
+};
+
+/** Get MobiCore Version Command Response */
+struct rsp_get_version {
+	struct rsp_header	rsp_header;	/** Response header */
+	struct mc_version_info	version_info;	/** MobiCore version info */
+};
+
+/** @defgroup POWERCMD Power Management Commands
+ */
+
+/** @defgroup MCPSUSPEND SUSPEND
+ * Prepare MobiCore suspension.
+ * This command allows MobiCore and MobiCore drivers to release or clean
+ * resources and save device state.
+ *
+ */
+
+/** Suspend Command */
+struct cmd_suspend {
+	struct cmd_header	cmd_header;	/** Command header */
+};
+
+/** Suspend Command Response */
+struct rsp_suspend {
+	struct rsp_header	rsp_header;	/** Response header */
+};
+
+/** @defgroup MCPRESUME RESUME
+ * Resume MobiCore from suspension.
+ * This command allows MobiCore and MobiCore drivers to reinitialize hardware
+ * affected by suspension.
+ *
+ */
+
+/** Resume Command */
+struct cmd_resume {
+	struct cmd_header	cmd_header;	/** Command header */
+};
+
+/** Resume Command Response */
+struct rsp_resume {
+	struct rsp_header	rsp_header;	/** Response header */
+};
+
+/** @defgroup SESSCMD Session Management Commands
+ */
+
+/** @defgroup MCPOPEN OPEN
+ * Load and open a session to a Trustlet.
+ * The OPEN command loads Trustlet data to the MobiCore context and opens a
+ * session to the Trustlet.  If wsm_data_type is WSM_INVALID MobiCore tries to
+ * start a pre-installed Trustlet associated with the uuid passed.  The uuid
+ * passed must match the uuid contained in the load data (if available).
+ * On success, MobiCore returns the session ID which can be used for further
+ * communication.
+ */
+
+/** GP client authentication data */
+struct cmd_open_data {
+	u32		mclf_magic;	/** ASCII "MCLF" on older versions */
+	struct identity	identity;	/** Login method and data */
+};
+
+/** Open Command */
+struct cmd_open {
+	struct cmd_header cmd_header;	/** Command header */
+	struct mc_uuid_t uuid;		/** Service UUID */
+	u8		unused[4];	/** Padding to be 64-bit aligned */
+	u64		adr_tci_buffer;	/** Physical address of the TCI MMU */
+	u64		adr_load_data;	/** Physical address of the data MMU */
+	u32		ofs_tci_buffer;	/** Offset to the data */
+	u32		len_tci_buffer;	/** Length of the TCI */
+	u32		wsmtype_tci;	/** Type of WSM used for the TCI */
+	u32		wsm_data_type;	/** Type of MMU */
+	u32		ofs_load_data;	/** Offset to the data */
+	u32		len_load_data;	/** Length of the data to load */
+	union {
+		struct cmd_open_data	cmd_open_data;	/** Client login data */
+		union mclf_header	tl_header;	/** Service header */
+	};
+	u32		is_gpta;	/** true if looking for an SD/GP-TA */
+};
+
+/** Open Command Response */
+struct rsp_open {
+	struct rsp_header	rsp_header;	/** Response header */
+	u32	session_id;	/** Session ID */
+};
+
+/** TA Load Check Command */
+struct cmd_check_load {
+	struct cmd_header cmd_header;	/** Command header */
+	struct mc_uuid_t uuid;	/** Service UUID */
+	u8		unused[4];	/** Padding to be 64-bit aligned */
+	u64		adr_load_data;	/** Physical address of the data */
+	u32		wsm_data_type;	/** Type of MMU */
+	u32		ofs_load_data;	/** Offset to the data */
+	u32		len_load_data;	/** Length of the data to load */
+	union mclf_header tl_header;	/** Service header */
+};
+
+/** TA Load Check Response */
+struct rsp_check_load {
+	struct rsp_header	rsp_header;	/** Response header */
+};
+
+/** @defgroup MCPCLOSE CLOSE
+ * Close an existing session to a Trustlet.
+ * The CLOSE command terminates a session and frees all resources in the
+ * MobiCore system which are currently occupied by the session. Before closing
+ * the session, the MobiCore runtime management waits until all pending
+ * operations, like calls to drivers, invoked by the Trustlet have been
+ * terminated.  Mapped memory will automatically be unmapped from the MobiCore
+ * context. The NWd is responsible for processing the freed memory according to
+ * the Rich-OS needs.
+ *
+ */
+
+/** Close Command */
+struct cmd_close {
+	struct cmd_header	cmd_header;	/** Command header */
+	u32		session_id;	/** Session ID */
+};
+
+/** Close Command Response */
+struct rsp_close {
+	struct rsp_header	rsp_header;	/** Response header */
+};
+
+/** @defgroup MCPMAP MAP
+ * Map a portion of memory to a session.
+ * The MAP command provides a block of memory to the context of a service.
+ * The memory then becomes world-shared memory (WSM).
+ * The only allowed memory type here is WSM_L1.
+ */
+
+/** Map Command */
+struct cmd_map {
+	struct cmd_header cmd_header;	/** Command header */
+	u32		session_id;	/** Session ID */
+	u32		wsm_type;	/** Type of MMU */
+	u32		ofs_buffer;	/** Offset to the payload */
+	u64		adr_buffer;	/** Physical address of the MMU */
+	u32		len_buffer;	/** Length of the buffer */
+	u32		flags;		/** Attributes (read/write) */
+};
+
+#define MCP_MAP_MAX         0x100000    /** Maximum length for MCP map */
+
+/** Map Command Response */
+struct rsp_map {
+	struct rsp_header rsp_header;	/** Response header */
+	/** Virtual address the WSM is mapped to, may include an offset! */
+	u32		secure_va;
+};
+
+/** @defgroup MCPUNMAP UNMAP
+ * Unmap a portion of world-shared memory from a session.
+ * The UNMAP command is used to unmap a previously mapped block of
+ * world shared memory from the context of a session.
+ *
+ * Attention: The memory block will be immediately unmapped from the specified
+ * session.  If the service is still accessing the memory, the service will
+ * trigger a segmentation fault.
+ */
+
+/** Unmap Command */
+struct cmd_unmap {
+	struct cmd_header cmd_header;	/** Command header */
+	u32		session_id;	/** Session ID */
+	u32		wsm_type;	/** Type of WSM used of the memory */
+	/** Virtual address the WSM is mapped to, may include an offset! */
+	u32		secure_va;
+	u32		virtual_buffer_len;  /** Length of virtual buffer */
+};
+
+/** Unmap Command Response */
+struct rsp_unmap {
+	struct rsp_header rsp_header;	/** Response header */
+};
+
+/** @defgroup MCPLOADTOKEN
+ * Load a token from the normal world and share it with the TEE
+ * If something fails, the device attestation functionality will be disabled
+ */
+
+/** Load Token */
+struct cmd_load_token {
+	struct cmd_header cmd_header;	/** Command header */
+	u32		wsm_data_type;	/** Type of MMU */
+	u64		adr_load_data;	/** Physical address of the MMU */
+	u64		ofs_load_data;	/** Offset to the data */
+	u64		len_load_data;	/** Length of the data */
+};
+
+/** Load Token Command Response */
+struct rsp_load_token {
+	struct rsp_header rsp_header;	/** Response header */
+};
+
+/** @defgroup MCPLOADKEYSO
+ * Load a key SO from the normal world and share it with the TEE
+ * If something fails, the device attestation functionality will be disabled
+ */
+
+/** Load key SO */
+struct cmd_load_key_so {
+	struct cmd_header cmd_header;	/** Command header */
+	u32		wsm_data_type;	/** Type of MMU */
+	u64		adr_load_data;	/** Physical address of the MMU */
+	u64		ofs_load_data;	/** Offset to the data */
+	u64		len_load_data;	/** Length of the data */
+};
+
+/** Load key SO Command Response */
+struct rsp_load_key_so {
+	struct rsp_header rsp_header;	/** Response header */
+};
+
+/** Structure of the MCP buffer */
+union mcp_message {
+	struct init_values	init_values;	/** Initialisation values */
+	struct cmd_header	cmd_header;	/** Command header */
+	struct rsp_header	rsp_header;
+	struct cmd_open		cmd_open;	/** Load and open service */
+	struct rsp_open		rsp_open;
+	struct cmd_close	cmd_close;	/** Close command */
+	struct rsp_close	rsp_close;
+	struct cmd_map		cmd_map;	/** Map WSM to service */
+	struct rsp_map		rsp_map;
+	struct cmd_unmap	cmd_unmap;	/** Unmap WSM from service */
+	struct rsp_unmap	rsp_unmap;
+	struct cmd_suspend	cmd_suspend;	/** Suspend MobiCore */
+	struct rsp_suspend	rsp_suspend;
+	struct cmd_resume	cmd_resume;	/** Resume MobiCore */
+	struct rsp_resume	rsp_resume;
+	struct cmd_get_version	cmd_get_version; /** Get MobiCore Version */
+	struct rsp_get_version	rsp_get_version;
+	struct cmd_load_token	cmd_load_token;	/** Load token */
+	struct rsp_load_token	rsp_load_token;
+	struct cmd_check_load	cmd_check_load;	/** TA load check */
+	struct rsp_check_load	rsp_check_load;
+	struct cmd_load_key_so	cmd_load_key_so;/** Load key SO */
+	struct rsp_load_key_so	rsp_load_key_so;
+};
+
+#define MC_FLAG_NO_SLEEP_REQ   0
+#define MC_FLAG_REQ_TO_SLEEP   1
+
+#define MC_STATE_NORMAL_EXECUTION 0
+#define MC_STATE_READY_TO_SLEEP   1
+
+#define MC_STATE_FLAG_TEE_HALT_MASK BIT(0)
+
+struct sleep_mode {
+	u16		sleep_req;	/** Ask SWd to get ready to sleep */
+	u16		ready_to_sleep;	/** SWd is now ready to sleep */
+};
+
+/** MobiCore status flags */
+struct mcp_flags {
+	/** If not MC_FLAG_SCHEDULE_IDLE, MobiCore needs scheduling */
+	u32		schedule;
+	struct sleep_mode sleep_mode;
+	/** Secure-world sleep timeout in milliseconds */
+	s32		timeout_ms;
+	/** TEE flags */
+	u8		tee_flags;
+	/** Reserved for future use */
+	u8		RFU_padding[3];
+};
+
+/** MobiCore is idle. No scheduling required */
+#define MC_FLAG_SCHEDULE_IDLE      0
+/** MobiCore is non idle, scheduling is required */
+#define MC_FLAG_SCHEDULE_NON_IDLE  1
+
+/** MCP buffer structure */
+struct mcp_buffer {
+	struct mcp_flags flags;		/** MobiCore Flags */
+	union mcp_message message;	/** MCP message buffer */
+};
+
+#endif /* MCP_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcinq.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcinq.h
new file mode 100644
index 0000000..2207bda
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcinq.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef NQ_H_
+#define NQ_H_
+
+/** \name NQ Size Defines
+ * Minimum and maximum count of elements in the notification queue.
+ */
+#define MIN_NQ_ELEM 1	/** Minimum notification queue elements */
+#define MAX_NQ_ELEM 64	/** Maximum notification queue elements */
+
+/* Compute notification queue size in bytes from its number of elements */
+#define NQ_SIZE(n)   (2 * (sizeof(struct notification_queue_header)\
+			+ (n) * sizeof(struct notification)))
+
+/** \name NQ Length Defines
+ * Note that there is one queue for NWd->SWd and one queue for SWd->NWd
+ */
+/** Minimum size for the notification queue data structure */
+#define MIN_NQ_LEN NQ_SIZE(MIN_NQ_ELEM)
+/** Maximum size for the notification queue data structure */
+#define MAX_NQ_LEN NQ_SIZE(MAX_NQ_ELEM)
+
+/** \name Session ID Defines
+ * Standard Session IDs.
+ */
+/** MCP session ID, used to communicate with MobiCore (e.g. to start/stop TA) */
+#define SID_MCP       0
+/** Invalid session id, returned in case of error */
+#define SID_INVALID   0xffffffff
+
+/** Notification data structure */
+struct notification {
+	u32	session_id;	/** Session ID */
+	s32	payload;	/** Additional notification info */
+};
+
+/** Notification payload codes.
+ * 0 indicated a plain simple notification,
+ * a positive value is a termination reason from the task,
+ * a negative value is a termination reason from MobiCore.
+ * Possible negative values are given below.
+ */
+enum notification_payload {
+	/** task terminated, but exit code is invalid */
+	ERR_INVALID_EXIT_CODE = -1,
+	/** task terminated due to session end, no exit code available */
+	ERR_SESSION_CLOSE     = -2,
+	/** task terminated due to invalid operation */
+	ERR_INVALID_OPERATION = -3,
+	/** session ID is unknown */
+	ERR_INVALID_SID       = -4,
+	/** session is not active */
+	ERR_SID_NOT_ACTIVE    = -5,
+	/** session was force-killed (due to an administrative command). */
+	ERR_SESSION_KILLED    = -6,
+};
+
+/** Declaration of the notification queue header.
+ * layout as specified in the data structure specification.
+ */
+struct notification_queue_header {
+	u32	write_cnt;	/** Write counter */
+	u32	read_cnt;	/** Read counter */
+	u32	queue_size;	/** Queue size */
+};
+
+/** Queue struct which defines a queue object.
+ * The queue struct is accessed by the queue<operation> type of
+ * function. elementCnt must be a power of two and the power needs
+ * to be smaller than power of u32 (obviously 32).
+ */
+struct notification_queue {
+	struct notification_queue_header hdr;		/** Queue header */
+	struct notification notification[MIN_NQ_ELEM];	/** Elements */
+};
+
+#endif /** NQ_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcitime.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcitime.h
new file mode 100644
index 0000000..94dac30
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcitime.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef MCITIME_H_
+#define MCITIME_H_
+
+/*
+ * Trustonic TEE RICH OS Time:
+ * -seconds and nanoseconds since Jan 1, 1970, UTC
+ * -monotonic counter
+ */
+struct mcp_time {
+	u64	wall_clock_seconds;
+	u64	wall_clock_nsec;
+	u64	monotonic_seconds;
+	u64	monotonic_nsec;
+};
+
+#endif /* MCITIME_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcloadformat.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcloadformat.h
new file mode 100644
index 0000000..57501bf
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mci/mcloadformat.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+#ifndef MCLOADFORMAT_H_
+#define MCLOADFORMAT_H_
+
+/** Trustlet Blob length info */
+#define MC_TLBLOBLEN_MAGIC	0x7672746C	/* Magic for SWd: vrtl */
+#define MAX_SO_CONT_SIZE	512		/* Max size for a container */
+
+/** MCLF magic */
+/**< "MCLF" in big endian integer representation */
+#define MC_SERVICE_HEADER_MAGIC_BE \
+	((uint32_t)('M' | ('C' << 8) | ('L' << 16) | ('F' << 24)))
+/**< "MCLF" in little endian integer representation */
+#define MC_SERVICE_HEADER_MAGIC_LE \
+	((uint32_t)(('M' << 24) | ('C' << 16) | ('L' << 8) | 'F'))
+
+/** MCLF flags */
+/**< Loaded service cannot be unloaded from MobiCore. */
+#define MC_SERVICE_HEADER_FLAGS_PERMANENT		BIT(0)
+/**< Service has no WSM control interface. */
+#define MC_SERVICE_HEADER_FLAGS_NO_CONTROL_INTERFACE	BIT(1)
+/**< Service can be debugged. */
+#define MC_SERVICE_HEADER_FLAGS_DEBUGGABLE		BIT(2)
+/**< New-layout trusted application or trusted driver. */
+#define MC_SERVICE_HEADER_FLAGS_EXTENDED_LAYOUT		BIT(3)
+
+/** Service type.
+ * The service type defines the type of executable.
+ */
+enum service_type {
+	SERVICE_TYPE_ILLEGAL		= 0,
+	SERVICE_TYPE_DRIVER		= 1,
+	SERVICE_TYPE_SP_TRUSTLET	= 2,
+	SERVICE_TYPE_SYSTEM_TRUSTLET	= 3,
+	SERVICE_TYPE_MIDDLEWARE		= 4,
+	SERVICE_TYPE_LAST_ENTRY		= 5,
+};
+
+/**
+ * Descriptor for a memory segment.
+ */
+struct segment_descriptor {
+	u32	start;	/**< Virtual start address */
+	u32	len;	/**< Segment length in bytes */
+};
+
+/**
+ * MCLF intro for data structure identification.
+ * Must be the first element of a valid MCLF file.
+ */
+struct mclf_intro {
+	u32	magic;		/**< Header magic value ASCII "MCLF" */
+	u32	version;	/**< Version the MCLF header struct */
+};
+
+/**
+ * @defgroup MCLF_VER_V2   MCLF Version 32
+ * @ingroup MCLF_VER
+ *
+ * @addtogroup MCLF_VER_V2
+ */
+
+/*
+ * GP TA identity.
+ */
+struct identity {
+	/**< GP TA login type */
+	u32	login_type;
+	/**< GP TA login data */
+	u8	login_data[16];
+};
+
+/**
+ * Version 2.1/2.2 MCLF header.
+ */
+struct mclf_header_v2 {
+	/**< MCLF header start with the mandatory intro */
+	struct mclf_intro	intro;
+	/**< Service flags */
+	u32	flags;
+	/**< Type of memory the service must be executed from */
+	u32	mem_type;
+	/**< Type of service */
+	enum service_type	service_type;
+	/**< Number of instances which can be run simultaneously */
+	u32	num_instances;
+	/**< Loadable service unique identifier (UUID) */
+	struct mc_uuid_t	uuid;
+	/**< If the service_type is SERVICE_TYPE_DRIVER the Driver ID is used */
+	u32	driver_id;
+	/**<
+	 * Number of threads (N) in a service:
+	 *   SERVICE_TYPE_SP_TRUSTLET: N = 1
+	 *   SERVICE_TYPE_SYSTEM_TRUSTLET: N = 1
+	 *   SERVICE_TYPE_DRIVER: N >= 1
+	 */
+	u32	num_threads;
+	/**< Virtual text segment */
+	struct segment_descriptor text;
+	/**< Virtual data segment */
+	struct segment_descriptor data;
+	/**< Length of the BSS segment in bytes. MUST be at least 8 byte */
+	u32	bss_len;
+	/**< Virtual start address of service code */
+	u32	entry;
+	/**< Version of the interface the driver exports */
+	u32	service_version;
+};
+
+/**
+ * @addtogroup MCLF
+ */
+
+/** MCLF header */
+union mclf_header {
+	/**< Intro for data identification */
+	struct mclf_intro	intro;
+	/**< Version 2 header */
+	struct mclf_header_v2	mclf_header_v2;
+};
+
+struct mc_blob_len_info {
+	u32	magic;		/**< New blob format magic number */
+	u32	root_size;	/**< Root container size */
+	u32	sp_size;	/**< SP container size */
+	u32	ta_size;	/**< TA container size */
+	u32	reserved[4];	/**< Reserved for further Use */
+};
+
+#endif /* MCLOADFORMAT_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mcp.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mcp.c
new file mode 100644
index 0000000..5ab0c07
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mcp.c
@@ -0,0 +1,1025 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
+#include <linux/freezer.h>
+#include <asm/barrier.h>
+#include <linux/irq.h>
+#include <linux/version.h>
+#include <linux/sched/clock.h>	/* local_clock */
+
+#include "public/mc_user.h"
+#include "public/mc_admin.h"
+
+#include "mci/mcimcp.h"
+#include "mci/mcifc.h"
+#include "mci/mcinq.h"		/* SID_MCP */
+#include "mci/mcitime.h"	/* struct mcp_time */
+#include "mci/mciiwp.h"
+
+#include "main.h"
+#include "admin.h"		/* tee_object* for 'blob' */
+#include "mmu.h"		/* MMU for 'blob' */
+#include "nq.h"
+#include "xen_fe.h"
+#include "mcp.h"
+
+/* respond timeout for MCP notification, in secs */
+#define MCP_TIMEOUT		10
+#define MCP_RETRIES		5
+#define MCP_NF_QUEUE_SZ		8
+
+static struct {
+	union mcp_message	*buffer;	/* MCP communication buffer */
+	struct mutex		buffer_mutex;	/* Lock for the buffer above */
+	struct completion complete;
+	bool mcp_dead;
+	struct mcp_session	mcp_session;	/* Pseudo session for MCP */
+	/* Unexpected notification (during MCP open) */
+	struct mutex		unexp_notif_mutex;
+	struct notification	unexp_notif;
+	/* Sessions */
+	struct mutex		sessions_lock;
+	struct list_head	sessions;
+	/* TEE bad state detection */
+	struct notifier_block	tee_stop_notifier;
+	u32			timeout_period;
+	/* Log of last commands */
+#define LAST_CMDS_SIZE 1024
+	struct mutex		last_cmds_mutex;	/* Log protection */
+	struct command_info {
+		u64			cpu_clk;	/* Kernel time */
+		pid_t			pid;		/* Caller PID */
+		enum cmd_id		id;		/* MCP command ID */
+		u32			session_id;
+		char			uuid_str[34];
+		enum state {
+			UNUSED,		/* Unused slot */
+			PENDING,	/* Previous command in progress */
+			SENT,		/* Waiting for response */
+			COMPLETE,	/* Got result */
+			FAILED,		/* Something went wrong */
+		}			state;	/* Command processing state */
+		int			errno;	/* Return code */
+		enum mcp_result		result;	/* Command result */
+	}				last_cmds[LAST_CMDS_SIZE];
+	int				last_cmds_index;
+} l_ctx;
+
+static const char *cmd_to_string(enum cmd_id id)
+{
+	switch (id) {
+	case MC_MCP_CMD_ID_INVALID:
+		return "invalid";
+	case MC_MCP_CMD_OPEN_SESSION:
+		return "open session";
+	case MC_MCP_CMD_CLOSE_SESSION:
+		return "close session";
+	case MC_MCP_CMD_MAP:
+		return "map";
+	case MC_MCP_CMD_UNMAP:
+		return "unmap";
+	case MC_MCP_CMD_SUSPEND:
+		return "suspend";
+	case MC_MCP_CMD_RESUME:
+		return "resume";
+	case MC_MCP_CMD_GET_MOBICORE_VERSION:
+		return "get version";
+	case MC_MCP_CMD_CLOSE_MCP:
+		return "close mcp";
+	case MC_MCP_CMD_LOAD_TOKEN:
+		return "load token";
+	case MC_MCP_CMD_CHECK_LOAD_TA:
+		return "check load TA";
+	case MC_MCP_CMD_LOAD_SYSENC_KEY_SO:
+		return "load Key SO";
+	}
+	return "unknown";
+}
+
+static const char *state_to_string(enum mcp_session_state state)
+{
+	switch (state) {
+	case MCP_SESSION_RUNNING:
+		return "running";
+	case MCP_SESSION_CLOSE_FAILED:
+		return "close failed";
+	case MCP_SESSION_CLOSED:
+		return "closed";
+	}
+	return "error";
+}
+
+static inline void mark_mcp_dead(void)
+{
+	struct mcp_session *session;
+
+	l_ctx.mcp_dead = true;
+	complete(&l_ctx.complete);
+	/* Signal all potential waiters that SWd is going away */
+	list_for_each_entry(session, &l_ctx.sessions, list)
+		complete(&session->completion);
+}
+
+static int tee_stop_notifier_fn(struct notifier_block *nb, unsigned long event,
+				void *data)
+{
+	mark_mcp_dead();
+	return 0;
+}
+
+void mcp_session_init(struct mcp_session *session)
+{
+	nq_session_init(&session->nq_session, false);
+	session->sid = SID_INVALID;
+	INIT_LIST_HEAD(&session->list);
+	mutex_init(&session->notif_wait_lock);
+	init_completion(&session->completion);
+	mutex_init(&session->exit_code_lock);
+	session->exit_code = 0;
+	session->state = MCP_SESSION_RUNNING;
+	session->notif_count = 0;
+}
+
+static inline bool mcp_session_isrunning(struct mcp_session *session)
+{
+	bool ret;
+
+	mutex_lock(&l_ctx.sessions_lock);
+	ret = session->state == MCP_SESSION_RUNNING;
+	mutex_unlock(&l_ctx.sessions_lock);
+	return ret;
+}
+
+/*
+ * session remains valid thanks to the upper layers reference counters, but the
+ * SWd session may have died, in which case we are informed.
+ */
+int mcp_wait(struct mcp_session *session, s32 timeout, bool silent_expiry)
+{
+	s32 err;
+	int ret = 0;
+
+	mutex_lock(&session->notif_wait_lock);
+#ifdef TRUSTONIC_XEN_DOMU
+	if (is_xen_domu()) {
+		ret = xen_mc_wait(session, timeout, silent_expiry);
+		mutex_unlock(&session->notif_wait_lock);
+		return ret;
+	}
+#endif
+
+	if (l_ctx.mcp_dead) {
+		ret = -EHOSTUNREACH;
+		goto end;
+	}
+
+	if (!mcp_session_isrunning(session)) {
+		ret = -ENXIO;
+		goto end;
+	}
+
+	mcp_get_err(session, &err);
+	if (err) {
+		ret = -ECOMM;
+		goto end;
+	}
+
+	if (timeout < 0) {
+		ret = wait_for_completion_interruptible(&session->completion);
+		if (ret)
+			goto end;
+	} else {
+		ret = wait_for_completion_interruptible_timeout(
+			&session->completion, timeout * HZ / 1000);
+		if (ret < 0)
+			/* Interrupted */
+			goto end;
+
+		if (!ret) {
+			/* Timed out */
+			ret = -ETIME;
+			goto end;
+		}
+
+		ret = 0;
+	}
+
+	if (l_ctx.mcp_dead) {
+		ret = -EHOSTUNREACH;
+		goto end;
+	}
+
+	mcp_get_err(session, &err);
+	if (err) {
+		ret = -ECOMM;
+		goto end;
+	}
+
+	if (!mcp_session_isrunning(session)) {
+		ret = -ENXIO;
+		goto end;
+	}
+
+end:
+	if (!ret)
+		nq_session_state_update(&session->nq_session,
+					NQ_NOTIF_CONSUMED);
+	else if (ret != -ERESTARTSYS)
+		nq_session_state_update(&session->nq_session, NQ_NOTIF_DEAD);
+
+	mutex_unlock(&session->notif_wait_lock);
+	if (ret && ((ret != -ETIME) || !silent_expiry)) {
+#ifdef CONFIG_FREEZER
+		if (ret == -ERESTARTSYS && system_freezing_cnt.counter == 1)
+			mc_dev_devel("freezing session %x", session->sid);
+		else
+#endif
+			mc_dev_devel("session %x ec %d ret %d",
+				     session->sid, session->exit_code, ret);
+	}
+
+	return ret;
+}
+
+int mcp_get_err(struct mcp_session *session, s32 *err)
+{
+#ifdef TRUSTONIC_XEN_DOMU
+	if (is_xen_domu())
+		return xen_mc_get_err(session, err);
+#endif
+
+	mutex_lock(&session->exit_code_lock);
+	*err = session->exit_code;
+	mutex_unlock(&session->exit_code_lock);
+	if (*err)
+		mc_dev_info("session %x ec %d", session->sid, *err);
+
+	return 0;
+}
+
+static inline int wait_mcp_notification(void)
+{
+	unsigned long timeout = msecs_to_jiffies(l_ctx.timeout_period * 1000);
+	int try, ret = -ETIME;
+
+	/*
+	 * Total timeout is l_ctx.timeout_period * MCP_RETRIES, but we check for
+	 * a crash to try and terminate before then if things go wrong.
+	 */
+	for (try = 1; try <= MCP_RETRIES; try++) {
+		/*
+		 * Wait non-interruptible to keep MCP synchronised even if
+		 * caller is interrupted by signal.
+		 */
+		if (wait_for_completion_timeout(&l_ctx.complete, timeout) > 0)
+			return 0;
+
+		mc_dev_err(ret, "no answer after %ds",
+			   l_ctx.timeout_period * try);
+	}
+
+	mc_dev_err(ret, "timed out waiting for MCP notification");
+	nq_signal_tee_hung();
+	return ret;
+}
+
+static int mcp_cmd(union mcp_message *cmd,
+		   /* The fields below are for debug purpose only */
+		   u32 in_session_id,
+		   u32 *out_session_id,
+		   struct mc_uuid_t *uuid)
+{
+	int err = 0, ret = -EHOSTUNREACH;
+	union mcp_message *msg;
+	enum cmd_id cmd_id = cmd->cmd_header.cmd_id;
+	struct command_info *cmd_info;
+
+	/* Initialize MCP log */
+	mutex_lock(&l_ctx.last_cmds_mutex);
+	cmd_info = &l_ctx.last_cmds[l_ctx.last_cmds_index];
+	cmd_info->cpu_clk = local_clock();
+	cmd_info->pid = current->pid;
+	cmd_info->id = cmd_id;
+	cmd_info->session_id = in_session_id;
+	if (uuid) {
+		/* Keep UUID because it's an 'open session' cmd */
+		size_t i;
+
+		cmd_info->uuid_str[0] = ' ';
+		for (i = 0; i < sizeof(uuid->value); i++) {
+			snprintf(&cmd_info->uuid_str[1 + i * 2], 3, "%02x",
+				 uuid->value[i]);
+		}
+	} else {
+		cmd_info->uuid_str[0] = '\0';
+	}
+
+	cmd_info->state = PENDING;
+	cmd_info->errno = 0;
+	cmd_info->result = MC_MCP_RET_OK;
+	if (++l_ctx.last_cmds_index >= LAST_CMDS_SIZE)
+		l_ctx.last_cmds_index = 0;
+	mutex_unlock(&l_ctx.last_cmds_mutex);
+
+	mutex_lock(&l_ctx.buffer_mutex);
+	msg = l_ctx.buffer;
+	if (l_ctx.mcp_dead)
+		goto out;
+
+	/* Copy message to MCP buffer */
+	memcpy(msg, cmd, sizeof(*msg));
+
+	/* Send MCP notification, with cmd_id as payload for debug purpose */
+	nq_session_notify(&l_ctx.mcp_session.nq_session, l_ctx.mcp_session.sid,
+			  cmd_id);
+
+	/* Update MCP log */
+	mutex_lock(&l_ctx.last_cmds_mutex);
+	cmd_info->state = SENT;
+	mutex_unlock(&l_ctx.last_cmds_mutex);
+	ret = wait_mcp_notification();
+	if (ret)
+		goto out;
+
+	/* Check response ID */
+	if (msg->rsp_header.rsp_id != (cmd_id | FLAG_RESPONSE)) {
+		ret = -EBADE;
+		mc_dev_err(ret, "MCP command got invalid response (0x%X)",
+			   msg->rsp_header.rsp_id);
+		goto out;
+	}
+
+	/* Convert result */
+	switch (msg->rsp_header.result) {
+	case MC_MCP_RET_OK:
+		err = 0;
+		break;
+	case MC_MCP_RET_ERR_CLOSE_TASK_FAILED:
+		err = -EAGAIN;
+		break;
+	case MC_MCP_RET_ERR_NO_MORE_SESSIONS:
+		err = -EBUSY;
+		break;
+	case MC_MCP_RET_ERR_OUT_OF_RESOURCES:
+		err = -ENOSPC;
+		break;
+	case MC_MCP_RET_ERR_UNKNOWN_UUID:
+		err = -ENOENT;
+		break;
+	case MC_MCP_RET_ERR_WRONG_PUBLIC_KEY:
+		err = -EKEYREJECTED;
+		break;
+	case MC_MCP_RET_ERR_SERVICE_BLOCKED:
+		err = -ECONNREFUSED;
+		break;
+	case MC_MCP_RET_ERR_SERVICE_LOCKED:
+		err = -ECONNABORTED;
+		break;
+	case MC_MCP_RET_ERR_SERVICE_KILLED:
+		err = -ECONNRESET;
+		break;
+	case MC_MCP_RET_ERR_SYSTEM_NOT_READY:
+		err = -EAGAIN;
+		break;
+	case MC_MCP_RET_ERR_DOWNGRADE_NOT_AUTHORIZED:
+		err = -EPERM;
+		break;
+	default:
+		err = -EPERM;
+	}
+
+	/* Copy response back to caller struct */
+	memcpy(cmd, msg, sizeof(*cmd));
+
+out:
+	/* Update MCP log */
+	mutex_lock(&l_ctx.last_cmds_mutex);
+	if (ret) {
+		cmd_info->state = FAILED;
+		cmd_info->errno = -ret;
+	} else {
+		cmd_info->state = COMPLETE;
+		cmd_info->errno = -err;
+		cmd_info->result = msg->rsp_header.result;
+		/* For open session: get SID */
+		if (!err && out_session_id)
+			cmd_info->session_id = *out_session_id;
+	}
+	mutex_unlock(&l_ctx.last_cmds_mutex);
+	mutex_unlock(&l_ctx.buffer_mutex);
+	if (ret) {
+		mc_dev_err(ret, "%s: sending failed", cmd_to_string(cmd_id));
+		return ret;
+	}
+
+	if (err) {
+		if (cmd_id == MC_MCP_CMD_CLOSE_SESSION && err == -EAGAIN)
+			mc_dev_devel("%s: try again",
+				     cmd_to_string(cmd_id));
+		else
+			mc_dev_err(err, "%s: res %d", cmd_to_string(cmd_id),
+				   msg->rsp_header.result);
+		return err;
+	}
+
+	return 0;
+}
+
+static inline int __mcp_get_version(struct mc_version_info *version_info)
+{
+	union mcp_message cmd;
+	u32 version;
+	int ret;
+
+#ifdef TRUSTONIC_XEN_DOMU
+	if (is_xen_domu())
+		return xen_mc_get_version(version_info);
+#endif
+
+	version = MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
+			     MCDRVMODULEAPI_VERSION_MINOR);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.cmd_header.cmd_id = MC_MCP_CMD_GET_MOBICORE_VERSION;
+	ret = mcp_cmd(&cmd, 0, NULL, NULL);
+	if (ret)
+		return ret;
+
+	memcpy(version_info, &cmd.rsp_get_version.version_info,
+	       sizeof(*version_info));
+	/*
+	 * The CMP version is meaningless in this case, and is replaced
+	 * by the driver's own version.
+	 */
+	version_info->version_nwd = version;
+	return 0;
+}
+
+int mcp_get_version(struct mc_version_info *version_info)
+{
+	static struct mc_version_info static_version_info;
+
+	/* If cache empty, get version from the SWd and cache it */
+	if (!static_version_info.version_nwd) {
+		int ret = __mcp_get_version(&static_version_info);
+
+		if (ret)
+			return ret;
+	}
+
+	/* Copy cached version */
+	memcpy(version_info, &static_version_info, sizeof(*version_info));
+	nq_set_version_ptr(static_version_info.product_id);
+	return 0;
+}
+
+int mcp_load_token(uintptr_t data, const struct mcp_buffer_map *map)
+{
+	union mcp_message cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.cmd_header.cmd_id = MC_MCP_CMD_LOAD_TOKEN;
+	cmd.cmd_load_token.wsm_data_type = map->type;
+	cmd.cmd_load_token.adr_load_data = map->addr;
+	cmd.cmd_load_token.ofs_load_data = map->offset;
+	cmd.cmd_load_token.len_load_data = map->length;
+	return mcp_cmd(&cmd, 0, NULL, NULL);
+}
+
+int mcp_load_check(const struct tee_object *obj,
+		   const struct mcp_buffer_map *map)
+{
+	const union mclf_header *header;
+	union mcp_message cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.cmd_header.cmd_id = MC_MCP_CMD_CHECK_LOAD_TA;
+	/* Data */
+	cmd.cmd_check_load.wsm_data_type = map->type;
+	cmd.cmd_check_load.adr_load_data = map->addr;
+	cmd.cmd_check_load.ofs_load_data = map->offset;
+	cmd.cmd_check_load.len_load_data = map->length;
+	/* Header */
+	header = (union mclf_header *)(obj->data + obj->header_length);
+	cmd.cmd_check_load.uuid = header->mclf_header_v2.uuid;
+	return mcp_cmd(&cmd, 0, NULL, &cmd.cmd_check_load.uuid);
+}
+
+int mcp_load_key_so(uintptr_t data, const struct mcp_buffer_map *map)
+{
+	union mcp_message cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.cmd_header.cmd_id = MC_MCP_CMD_LOAD_SYSENC_KEY_SO;
+	cmd.cmd_load_key_so.wsm_data_type = map->type;
+	cmd.cmd_load_key_so.adr_load_data = map->addr;
+	cmd.cmd_load_key_so.ofs_load_data = map->offset;
+	cmd.cmd_load_key_so.len_load_data = map->length;
+	return mcp_cmd(&cmd, 0, NULL, NULL);
+}
+
+int mcp_open_session(struct mcp_session *session, struct mcp_open_info *info,
+		     bool *tci_in_use)
+{
+	static DEFINE_MUTEX(local_mutex);
+	struct tee_object *obj;
+	const union mclf_header *header;
+	struct tee_mmu *obj_mmu;
+	struct mcp_buffer_map obj_map;
+	union mcp_message cmd;
+	int ret;
+
+#ifdef TRUSTONIC_XEN_DOMU
+	if (is_xen_domu()) {
+		ret = xen_mc_open_session(session, info);
+		if (ret)
+			return ret;
+
+		/* Add to list of sessions */
+		mutex_lock(&l_ctx.sessions_lock);
+		list_add_tail(&session->list, &l_ctx.sessions);
+		mutex_unlock(&l_ctx.sessions_lock);
+		return 0;
+	}
+#endif
+
+	/* Create 'blob' */
+	if (info->type == TEE_MC_UUID) {
+		/* Get TA from registry */
+		obj = tee_object_get(info->uuid, false);
+		/* Tell SWd to load TA from SFS as not in registry */
+		if (IS_ERR(obj) && (PTR_ERR(obj) == -ENOENT))
+			obj = tee_object_select(info->uuid);
+	} else if (info->type == TEE_MC_DRIVER_UUID) {
+		/* Load driver using only uuid */
+		obj = tee_object_select(info->uuid);
+		*tci_in_use = false;
+	} else if (info->user) {
+		/* Create secure object from user-space trustlet binary */
+		obj = tee_object_read(info->spid, info->va, info->len);
+	} else {
+		/* Create secure object from kernel-space trustlet binary */
+		obj = tee_object_copy(info->va, info->len);
+	}
+
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	/* Header */
+	header = (const union mclf_header *)(&obj->data[obj->header_length]);
+	if (info->type == TEE_MC_DRIVER &&
+	    (header->mclf_header_v2.flags &
+			MC_SERVICE_HEADER_FLAGS_NO_CONTROL_INTERFACE))
+		*tci_in_use = false;
+
+	/* Create mapping for blob (allocated by driver, so task = NULL) */
+	{
+		struct mc_ioctl_buffer buf = {
+			.va = (uintptr_t)obj->data,
+			.len = obj->length,
+			.flags = MC_IO_MAP_INPUT,
+		};
+
+		obj_mmu = tee_mmu_create(NULL, &buf);
+		if (IS_ERR(obj_mmu)) {
+			ret = PTR_ERR(obj_mmu);
+			goto err_mmu;
+		}
+
+		tee_mmu_buffer(obj_mmu, &obj_map);
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.cmd_header.cmd_id = MC_MCP_CMD_OPEN_SESSION;
+	/* Data */
+	cmd.cmd_open.uuid = header->mclf_header_v2.uuid;
+	cmd.cmd_open.wsm_data_type = obj_map.type;
+	cmd.cmd_open.adr_load_data = obj_map.addr;
+	cmd.cmd_open.ofs_load_data = obj_map.offset;
+	cmd.cmd_open.len_load_data = obj_map.length;
+	/* Buffer */
+	if (*tci_in_use) {
+		struct mcp_buffer_map map;
+
+		tee_mmu_buffer(info->tci_mmu, &map);
+		cmd.cmd_open.wsmtype_tci = map.type;
+		cmd.cmd_open.adr_tci_buffer = map.addr;
+		cmd.cmd_open.ofs_tci_buffer = map.offset;
+		cmd.cmd_open.len_tci_buffer = map.length;
+	} else {
+		cmd.cmd_open.wsmtype_tci = WSM_INVALID;
+	}
+
+	/* Reset unexpected notification */
+	mutex_lock(&local_mutex);
+	l_ctx.unexp_notif.session_id = SID_MCP;	/* Cannot be */
+	cmd.cmd_open.cmd_open_data.mclf_magic = MC_GP_CLIENT_AUTH_MAGIC;
+
+	/* Send MCP open command */
+	ret = mcp_cmd(&cmd, 0, &cmd.rsp_open.session_id, &cmd.cmd_open.uuid);
+	/* Make sure we have a valid session ID */
+	if (!ret && !cmd.rsp_open.session_id)
+		ret = -EBADE;
+
+	if (!ret) {
+		session->sid = cmd.rsp_open.session_id;
+		/* Add to list of sessions */
+		mutex_lock(&l_ctx.sessions_lock);
+		list_add_tail(&session->list, &l_ctx.sessions);
+		mutex_unlock(&l_ctx.sessions_lock);
+		/* Check for spurious notification */
+		mutex_lock(&l_ctx.unexp_notif_mutex);
+		if (l_ctx.unexp_notif.session_id == session->sid) {
+			mutex_lock(&session->exit_code_lock);
+			session->exit_code = l_ctx.unexp_notif.payload;
+			mutex_unlock(&session->exit_code_lock);
+			nq_session_state_update(&session->nq_session,
+						NQ_NOTIF_RECEIVED);
+			complete(&session->completion);
+		}
+
+		mutex_unlock(&l_ctx.unexp_notif_mutex);
+	}
+
+	mutex_unlock(&local_mutex);
+
+	/* Blob for UUID/TA not needed as re-mapped by the SWd */
+	tee_mmu_put(obj_mmu);
+
+err_mmu:
+	/* Delete secure object */
+	tee_object_free(obj);
+
+	return ret;
+}
+
+/*
+ * Legacy and GP TAs close differently:
+ * - GP TAs always send a notification with payload, whether on close or crash
+ * - Legacy TAs only send a notification with payload on crash
+ * - GP TAs may take time to close, and we get -EAGAIN back from mcp_cmd
+ * - Legacy TAs always close when asked, unless they are driver in which case
+ *   they just don't close at all
+ */
+int mcp_close_session(struct mcp_session *session)
+{
+	union mcp_message cmd;
+	int ret;
+
+	if (is_xen_domu()) {
+#ifdef TRUSTONIC_XEN_DOMU
+		ret = xen_mc_close_session(session);
+#endif
+	} else {
+		/* Signal a potential waiter that SWd session is going away */
+		complete(&session->completion);
+		/* Send MCP command */
+		memset(&cmd, 0, sizeof(cmd));
+		cmd.cmd_header.cmd_id = MC_MCP_CMD_CLOSE_SESSION;
+		cmd.cmd_close.session_id = session->sid;
+		ret = mcp_cmd(&cmd, cmd.cmd_close.session_id, NULL, NULL);
+	}
+
+	mutex_lock(&l_ctx.sessions_lock);
+	if (!ret) {
+		session->state = MCP_SESSION_CLOSED;
+		list_del(&session->list);
+		nq_session_exit(&session->nq_session);
+	} else {
+		/* Something is not right, assume session is still running */
+		session->state = MCP_SESSION_CLOSE_FAILED;
+	}
+	mutex_unlock(&l_ctx.sessions_lock);
+	mc_dev_devel("close session %x ret %d state %s",
+		     session->sid, ret, state_to_string(session->state));
+	return ret;
+}
+
+/*
+ * Session is to be removed from NWd records as SWd has been wiped clean
+ */
+void mcp_cleanup_session(struct mcp_session *session)
+{
+	mutex_lock(&l_ctx.sessions_lock);
+	session->state = MCP_SESSION_CLOSED;
+	list_del(&session->list);
+	nq_session_exit(&session->nq_session);
+	mutex_unlock(&l_ctx.sessions_lock);
+}
+
+int mcp_map(u32 session_id, struct tee_mmu *mmu, u32 *sva)
+{
+	struct mcp_buffer_map map;
+	union mcp_message cmd;
+	int ret;
+
+#ifdef TRUSTONIC_XEN_DOMU
+	if (is_xen_domu())
+		return xen_mc_map(session_id, mmu, sva);
+#endif
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.cmd_header.cmd_id = MC_MCP_CMD_MAP;
+	cmd.cmd_map.session_id = session_id;
+	tee_mmu_buffer(mmu, &map);
+	cmd.cmd_map.wsm_type = map.type;
+	cmd.cmd_map.adr_buffer = map.addr;
+	cmd.cmd_map.ofs_buffer = map.offset;
+	cmd.cmd_map.len_buffer = map.length;
+	cmd.cmd_map.flags = map.flags;
+	ret = mcp_cmd(&cmd, session_id, NULL, NULL);
+	if (!ret) {
+		*sva = cmd.rsp_map.secure_va;
+		atomic_inc(&g_ctx.c_maps);
+	}
+
+	return ret;
+}
+
+int mcp_unmap(u32 session_id, const struct mcp_buffer_map *map)
+{
+	union mcp_message cmd;
+	int ret;
+
+#ifdef TRUSTONIC_XEN_DOMU
+	if (is_xen_domu())
+		return xen_mc_unmap(session_id, map);
+#endif
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.cmd_header.cmd_id = MC_MCP_CMD_UNMAP;
+	cmd.cmd_unmap.session_id = session_id;
+	cmd.cmd_unmap.wsm_type = map->type;
+	cmd.cmd_unmap.virtual_buffer_len = map->length;
+	cmd.cmd_unmap.secure_va = map->secure_va;
+	ret = mcp_cmd(&cmd, session_id, NULL, NULL);
+	if (!ret)
+		atomic_dec(&g_ctx.c_maps);
+
+	return ret;
+}
+
+static int mcp_close(void)
+{
+	union mcp_message cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.cmd_header.cmd_id = MC_MCP_CMD_CLOSE_MCP;
+	return mcp_cmd(&cmd, 0, NULL, NULL);
+}
+
+int mcp_notify(struct mcp_session *session)
+{
+	if (l_ctx.mcp_dead)
+		return -EHOSTUNREACH;
+
+	if (session->sid == SID_MCP)
+		mc_dev_devel("notify MCP");
+	else
+		mc_dev_devel("notify session %x", session->sid);
+
+#ifdef TRUSTONIC_XEN_DOMU
+	if (is_xen_domu())
+		return xen_mc_notify(session);
+#endif
+
+	/* Put notif_count as payload for debug purpose */
+	return nq_session_notify(&session->nq_session, session->sid,
+				 ++session->notif_count);
+}
+
+static inline void session_notif_handler(struct mcp_session *session, u32 id,
+					 u32 payload)
+{
+	mutex_lock(&l_ctx.sessions_lock);
+	mc_dev_devel("MCP notif from session %x exit code %d state %d",
+		     id, payload, session ? session->state : -1);
+	if (session) {
+		/* TA has terminated */
+		if (payload) {
+			/* Update exit code, or not */
+			mutex_lock(&session->exit_code_lock);
+			session->exit_code = payload;
+			mutex_unlock(&session->exit_code_lock);
+		}
+
+		nq_session_state_update(&session->nq_session,
+					NQ_NOTIF_RECEIVED);
+
+		/* Unblock waiter */
+		complete(&session->completion);
+	}
+	mutex_unlock(&l_ctx.sessions_lock);
+
+	/* Unknown session, probably being started */
+	if (!session) {
+		mutex_lock(&l_ctx.unexp_notif_mutex);
+		l_ctx.unexp_notif.session_id = id;
+		l_ctx.unexp_notif.payload = payload;
+		mutex_unlock(&l_ctx.unexp_notif_mutex);
+	}
+}
+
+static void mcp_notif_handler(u32 id, u32 payload)
+{
+	if (id == SID_MCP) {
+		/* MCP notification */
+		mc_dev_devel("notification from MCP");
+		complete(&l_ctx.complete);
+	} else {
+		/* Session notification */
+		struct mcp_session *session = NULL, *candidate;
+
+		mutex_lock(&l_ctx.sessions_lock);
+		list_for_each_entry(candidate, &l_ctx.sessions, list) {
+			if (candidate->sid == id) {
+				session = candidate;
+				break;
+			}
+		}
+		mutex_unlock(&l_ctx.sessions_lock);
+
+		/* session is NULL if id not found */
+		session_notif_handler(session, id, payload);
+	}
+}
+
+static int debug_sessions(struct kasnprintf_buf *buf)
+{
+	struct mcp_session *session;
+	int ret;
+
+	/* Header */
+	ret = kasnprintf(buf, "%20s %4s %-15s %-11s %4s\n",
+			 "CPU clock", "ID", "state", "notif state", "ec");
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&l_ctx.sessions_lock);
+	list_for_each_entry(session, &l_ctx.sessions, list) {
+		const char *state_str;
+		u64 cpu_clk;
+		s32 err;
+
+		state_str = nq_session_state(&session->nq_session, &cpu_clk);
+		mcp_get_err(session, &err);
+		ret = kasnprintf(buf, "%20llu %4x %-15s %-11s %4d\n", cpu_clk,
+				 session->sid, state_to_string(session->state),
+				 state_str, err);
+		if (ret < 0)
+			break;
+	}
+	mutex_unlock(&l_ctx.sessions_lock);
+	return ret;
+}
+
+static ssize_t debug_sessions_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	return debug_generic_read(file, user_buf, count, ppos,
+				  debug_sessions);
+}
+
+static const struct file_operations debug_sessions_ops = {
+	.read = debug_sessions_read,
+	.llseek = default_llseek,
+	.open = debug_generic_open,
+	.release = debug_generic_release,
+};
+
+static inline int show_log_entry(struct kasnprintf_buf *buf,
+				 struct command_info *cmd_info)
+{
+	const char *state_str = "unknown";
+
+	switch (cmd_info->state) {
+	case UNUSED:
+		state_str = "unused";
+		break;
+	case PENDING:
+		state_str = "pending";
+		break;
+	case SENT:
+		state_str = "sent";
+		break;
+	case COMPLETE:
+		state_str = "complete";
+		break;
+	case FAILED:
+		state_str = "failed";
+		break;
+	}
+
+	return kasnprintf(buf, "%20llu %5d %-16s %5x %-8s %5d %6d%s\n",
+			  cmd_info->cpu_clk, cmd_info->pid,
+			  cmd_to_string(cmd_info->id), cmd_info->session_id,
+			  state_str, cmd_info->errno, cmd_info->result,
+			  cmd_info->uuid_str);
+}
+
+static int debug_last_cmds(struct kasnprintf_buf *buf)
+{
+	struct command_info *cmd_info;
+	int i, ret = 0;
+
+	/* Initialize MCP log */
+	mutex_lock(&l_ctx.last_cmds_mutex);
+	ret = kasnprintf(buf, "%20s %5s %-16s %5s %-8s %5s %6s %s\n",
+			 "CPU clock", "PID", "command", "S-ID",
+			 "state", "errno", "result", "UUID");
+	if (ret < 0)
+		goto out;
+
+	cmd_info = &l_ctx.last_cmds[l_ctx.last_cmds_index];
+	if (cmd_info->state != UNUSED)
+		/* Buffer has wrapped around, dump end (oldest records) */
+		for (i = l_ctx.last_cmds_index; i < LAST_CMDS_SIZE; i++) {
+			ret = show_log_entry(buf, cmd_info++);
+			if (ret < 0)
+				goto out;
+		}
+
+	/* Dump first records */
+	cmd_info = &l_ctx.last_cmds[0];
+	for (i = 0; i < l_ctx.last_cmds_index; i++) {
+		ret = show_log_entry(buf, cmd_info++);
+		if (ret < 0)
+			goto out;
+	}
+
+out:
+	mutex_unlock(&l_ctx.last_cmds_mutex);
+	return ret;
+}
+
+static ssize_t debug_last_cmds_read(struct file *file, char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	return debug_generic_read(file, user_buf, count, ppos, debug_last_cmds);
+}
+
+static const struct file_operations debug_last_cmds_ops = {
+	.read = debug_last_cmds_read,
+	.llseek = default_llseek,
+	.open = debug_generic_open,
+	.release = debug_generic_release,
+};
+
+int mcp_init(void)
+{
+	l_ctx.buffer = nq_get_mcp_buffer();
+	mutex_init(&l_ctx.buffer_mutex);
+	init_completion(&l_ctx.complete);
+	/* Setup notification queue mutex */
+	mcp_session_init(&l_ctx.mcp_session);
+	l_ctx.mcp_session.sid = SID_MCP;
+	mutex_init(&l_ctx.unexp_notif_mutex);
+	INIT_LIST_HEAD(&l_ctx.sessions);
+	mutex_init(&l_ctx.sessions_lock);
+	mutex_init(&l_ctx.last_cmds_mutex);
+
+	l_ctx.timeout_period = MCP_TIMEOUT;
+
+	nq_register_notif_handler(mcp_notif_handler, false);
+	l_ctx.tee_stop_notifier.notifier_call = tee_stop_notifier_fn;
+	nq_register_tee_stop_notifier(&l_ctx.tee_stop_notifier);
+
+	return 0;
+}
+
+void mcp_exit(void)
+{
+	mark_mcp_dead();
+	nq_unregister_tee_stop_notifier(&l_ctx.tee_stop_notifier);
+}
+
+int mcp_start(void)
+{
+	/* Create debugfs sessions and last commands entries */
+	debugfs_create_file("sessions", 0400, g_ctx.debug_dir, NULL,
+			    &debug_sessions_ops);
+	debugfs_create_file("last_mcp_commands", 0400, g_ctx.debug_dir, NULL,
+			    &debug_last_cmds_ops);
+	debugfs_create_u32("mcp_timeout", 0600, g_ctx.debug_dir,
+			   &l_ctx.timeout_period);
+	return 0;
+}
+
+void mcp_stop(void)
+{
+	mcp_close();
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mcp.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mcp.h
new file mode 100644
index 0000000..cf4c6a7
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mcp.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _MC_MCP_H_
+#define _MC_MCP_H_
+
+#include "mci/mcloadformat.h"		/* struct identity */
+#include "nq.h"
+
+struct tee_mmu;
+
+/* Structure to hold the TA/driver information at open */
+struct mcp_open_info {
+	enum {
+		TEE_MC_UUID,
+		TEE_MC_TA,
+		TEE_MC_DRIVER,
+		TEE_MC_DRIVER_UUID,
+	}	type;
+	/* TA/driver */
+	const struct mc_uuid_t	*uuid;
+	u32			spid;
+	uintptr_t		va;
+	size_t			len;
+	/* TCI */
+	uintptr_t		tci_va;
+	size_t			tci_len;
+	struct tee_mmu		*tci_mmu;
+	/* Origin */
+	bool			user;
+};
+
+/* Structure to hold the TA/driver descriptor to pass to MCP */
+struct tee_object {
+	u32	length;		/* Total length */
+	u32	header_length;	/* Length of header before payload */
+	u8	data[];		/* Header followed by payload */
+};
+
+/* Structure to hold all mapped buffer data to pass to MCP */
+struct mcp_buffer_map {
+	u64		addr;		/** Page-aligned PA, or VA */
+	unsigned long	nr_pages;	/** Total number of pages mapped */
+	u32		secure_va;	/** SWd virtual address */
+	u32		offset;		/** Data offset inside the first page */
+	u32		length;		/** Length of the data */
+	u32		type;		/** Type of MMU */
+	u32		flags;		/** Flags (typically read/write) */
+	struct tee_mmu	*mmu;		/** MMU from which the map was made */
+};
+
+struct mcp_session {
+	/* Notification queue session */
+	struct nq_session	nq_session;
+	/* Session ID */
+	u32			sid;
+	/* Sessions list (protected by mcp sessions_lock) */
+	struct list_head	list;
+	/* Notification waiter lock */
+	struct mutex		notif_wait_lock;	/* Only one at a time */
+	/* Notification received */
+	struct completion	completion;
+	/* Notification lock */
+	struct mutex		exit_code_lock;
+	/* Last notification */
+	s32			exit_code;
+	/* Session state (protected by mcp sessions_lock) */
+	enum mcp_session_state {
+		MCP_SESSION_RUNNING,
+		MCP_SESSION_CLOSE_FAILED,
+		MCP_SESSION_CLOSED,
+	}			state;
+	/* Notification counter */
+	u32			notif_count;
+};
+
+/* Init for the mcp_session structure */
+void mcp_session_init(struct mcp_session *session);
+
+/* Commands */
+int mcp_get_version(struct mc_version_info *version_info);
+int mcp_load_token(uintptr_t data, const struct mcp_buffer_map *buffer_map);
+int mcp_load_check(const struct tee_object *obj,
+		   const struct mcp_buffer_map *buffer_map);
+int mcp_load_key_so(uintptr_t data, const struct mcp_buffer_map *buffer_map);
+int mcp_open_session(struct mcp_session *session, struct mcp_open_info *info,
+		     bool *tci_in_use);
+int mcp_close_session(struct mcp_session *session);
+void mcp_cleanup_session(struct mcp_session *session);
+int mcp_map(u32 session_id, struct tee_mmu *mmu, u32 *sva);
+int mcp_unmap(u32 session_id, const struct mcp_buffer_map *map);
+int mcp_notify(struct mcp_session *mcp_session);
+int mcp_wait(struct mcp_session *session, s32 timeout, bool silent_expiry);
+int mcp_get_err(struct mcp_session *session, s32 *err);
+
+/* Initialisation/cleanup */
+int mcp_init(void);
+void mcp_exit(void);
+int mcp_start(void);
+void mcp_stop(void);
+
+#endif /* _MC_MCP_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mmu.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mmu.c
new file mode 100644
index 0000000..dce1e43
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mmu.c
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <asm/pgtable.h>
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+#include <linux/device.h>
+#include <linux/version.h>
+#include <linux/dma-buf.h>
+#ifdef CONFIG_DMA_SHARED_BUFFER
+#include <ion.h>
+#endif
+
+#ifdef CONFIG_XEN
+/* To get the MFN */
+#include <linux/pfn.h>
+#include <xen/page.h>
+#endif
+
+#include "public/mc_user.h"
+
+#include "mci/mcimcp.h"
+
+#include "main.h"
+#include "mcp.h"	/* mcp_buffer_map */
+#include "mmu.h"
+
+#define PHYS_48BIT_MASK (BIT(48) - 1)
+
+/* Common */
+#define MMU_BUFFERABLE		BIT(2)		/* AttrIndx[0] */
+#define MMU_CACHEABLE		BIT(3)		/* AttrIndx[1] */
+#define MMU_EXT_NG		BIT(11)		/* ARMv6 and higher */
+
+/* LPAE */
+#define MMU_TYPE_PAGE		(3 << 0)
+#define MMU_NS			BIT(5)
+#define MMU_AP_RW_ALL		BIT(6) /* AP[2:1], RW, at any privilege level */
+#define	MMU_AP2_RO		BIT(7)
+#define MMU_EXT_SHARED_64	(3 << 8)	/* SH[1:0], inner shareable */
+#define MMU_EXT_AF		BIT(10)		/* Access Flag */
+#define MMU_EXT_XN		(((u64)1) << 54) /* XN */
+
+/* Non-LPAE */
+#define MMU_TYPE_EXT		(3 << 0)	/* v5 */
+#define MMU_TYPE_SMALL		(2 << 0)
+#define MMU_EXT_AP0		BIT(4)
+#define MMU_EXT_AP1		(2 << 4)
+#define MMU_EXT_AP2		BIT(9)
+#define MMU_EXT_TEX(x)		((x) << 6)	/* v5 */
+#define MMU_EXT_SHARED_32	BIT(10)		/* ARMv6 and higher */
+
+/* ION */
+/* Trustonic Specific flag to detect ION mem */
+#define MMU_ION_BUF		BIT(24)
+
+static inline long gup_local(struct mm_struct *mm, uintptr_t start,
+			     unsigned long nr_pages, int write,
+			     struct page **pages)
+{
+	unsigned int gup_flags = 0;
+
+	if (write)
+		gup_flags |= FOLL_WRITE;
+
+	return get_user_pages_remote(NULL, mm, start, nr_pages, gup_flags,
+				    pages, NULL, NULL);
+}
+
+static inline long gup_local_repeat(struct mm_struct *mm, uintptr_t start,
+				    unsigned long nr_pages, int write,
+				    struct page **pages)
+{
+	int retries = 10;
+	long ret = 0;
+
+	while (retries--) {
+		ret = gup_local(mm, start, nr_pages, write, pages);
+
+		if (-EBUSY != ret)
+			break;
+	}
+
+	return ret;
+}
+
+/*
+ * A table that could be either a pmd or pte
+ */
+union mmu_table {
+	u64		*entries;	/* Array of PTEs */
+	/* Array of pages */
+	struct page	**pages;
+	/* Array of VAs */
+	uintptr_t	*vas;
+	/* Address of table */
+	void		*addr;
+	/* Page for table */
+	unsigned long	page;
+};
+
+/*
+ * MMU table allocated to the Daemon or a TLC describing a world shared
+ * buffer.
+ * When users map a malloc()ed area into SWd, a MMU table is allocated.
+ * In addition, the area of maximum 1MB virtual address space is mapped into
+ * the MMU table and a handle for this table is returned to the user.
+ */
+struct tee_mmu {
+	struct kref			kref;
+	/* Array of pages that hold buffer ptes*/
+	union mmu_table			pte_tables[PMD_ENTRIES_MAX];
+	/* Actual number of ptes tables */
+	size_t				nr_pmd_entries;
+	/* Contains phys @ of ptes tables */
+	union mmu_table			pmd_table;
+	struct tee_deleter		*deleter;	/* Xen map to free */
+	unsigned long			nr_pages;
+	int				pages_created;	/* Leak check */
+	int				pages_locked;	/* Leak check */
+	u32				offset;
+	u32				length;
+	u32				flags;
+	/* Pages are from user space */
+	bool				user;
+	bool				use_pages_and_vas;
+	/* ION case only */
+	struct dma_buf			*dma_buf;
+	struct dma_buf_attachment	*attach;
+	struct sg_table			*sgt;
+};
+
+static void tee_mmu_delete(struct tee_mmu *mmu)
+{
+	unsigned long chunk, nr_pages_left = mmu->nr_pages;
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+	if (mmu->dma_buf) {
+		dma_buf_unmap_attachment(mmu->attach, mmu->sgt,
+					 DMA_BIDIRECTIONAL);
+		dma_buf_detach(mmu->dma_buf, mmu->attach);
+		dma_buf_put(mmu->dma_buf);
+	}
+#endif
+
+	/* Release all locked user space pages */
+	for (chunk = 0; chunk < mmu->nr_pmd_entries; chunk++) {
+		union mmu_table *pte_table = &mmu->pte_tables[chunk];
+		unsigned long nr_pages = nr_pages_left;
+
+		if (nr_pages > PTE_ENTRIES_MAX)
+			nr_pages = PTE_ENTRIES_MAX;
+
+		nr_pages_left -= nr_pages;
+
+		if (!pte_table->page)
+			break;
+
+		if (mmu->user && mmu->use_pages_and_vas) {
+			struct page **page = pte_table->pages;
+			int i;
+
+			for (i = 0; i < nr_pages; i++, page++)
+				put_page(*page);
+
+			mmu->pages_locked -= nr_pages;
+		} else if (mmu->user) {
+			u64 *pte64 = pte_table->entries;
+			pte_t pte;
+			int i;
+
+			for (i = 0; i < nr_pages; i++) {
+#if defined(CONFIG_ARM)
+				{
+					pte = *pte64++;
+					/* Unused entries are 0 */
+					if (!pte)
+						break;
+				}
+#else
+				{
+					pte.pte = *pte64++;
+					/* Unused entries are 0 */
+					if (!pte.pte)
+						break;
+				}
+#endif
+
+				/* pte_page() cannot return NULL */
+				put_page(pte_page(pte));
+			}
+
+			mmu->pages_locked -= nr_pages;
+		}
+
+		free_page(pte_table->page);
+		mmu->pages_created--;
+	}
+
+	if (mmu->pmd_table.page) {
+		free_page(mmu->pmd_table.page);
+		mmu->pages_created--;
+	}
+
+	if (mmu->pages_created || mmu->pages_locked)
+		mc_dev_err(-EUCLEAN,
+			   "leak detected: still in use %d, still locked %d",
+			   mmu->pages_created, mmu->pages_locked);
+
+	if (mmu->deleter)
+		mmu->deleter->delete(mmu->deleter->object);
+
+	kfree(mmu);
+
+	/* Decrement debug counter */
+	atomic_dec(&g_ctx.c_mmus);
+}
+
+static struct tee_mmu *tee_mmu_create_common(const struct mcp_buffer_map *b_map)
+{
+	struct tee_mmu *mmu;
+	int ret = -ENOMEM;
+
+	if (b_map->nr_pages > (PMD_ENTRIES_MAX * PTE_ENTRIES_MAX)) {
+		ret = -EINVAL;
+		mc_dev_err(ret, "data mapping exceeds %d pages: %lu",
+			   PMD_ENTRIES_MAX * PTE_ENTRIES_MAX, b_map->nr_pages);
+		return ERR_PTR(ret);
+	}
+
+	/* Allocate the struct */
+	mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
+	if (!mmu)
+		return ERR_PTR(-ENOMEM);
+
+	/* Increment debug counter */
+	atomic_inc(&g_ctx.c_mmus);
+	kref_init(&mmu->kref);
+
+	/* The Xen front-end does not use PTEs */
+	if (is_xen_domu())
+		mmu->use_pages_and_vas = true;
+
+	/* Buffer info */
+	mmu->offset = b_map->offset;
+	mmu->length = b_map->length;
+	mmu->flags = b_map->flags;
+
+	/* Pages info */
+	mmu->nr_pages = b_map->nr_pages;
+	mmu->nr_pmd_entries = (mmu->nr_pages + PTE_ENTRIES_MAX - 1) /
+			    PTE_ENTRIES_MAX;
+	mc_dev_devel("mmu->nr_pages %lu num_ptes_pages %zu",
+		     mmu->nr_pages, mmu->nr_pmd_entries);
+
+	/* Allocate a page for the L1 table, always used for DomU */
+	mmu->pmd_table.page = get_zeroed_page(GFP_KERNEL);
+	if (!mmu->pmd_table.page)
+		goto end;
+
+	mmu->pages_created++;
+
+	return mmu;
+
+end:
+	tee_mmu_delete(mmu);
+	return ERR_PTR(ret);
+}
+
+static bool mmu_get_dma_buffer(struct tee_mmu *mmu, int va)
+{
+#ifdef CONFIG_DMA_SHARED_BUFFER
+	struct dma_buf *buf;
+
+	buf = dma_buf_get(va);
+	if (IS_ERR(buf))
+		return false;
+
+	mmu->dma_buf = buf;
+	mmu->attach = dma_buf_attach(mmu->dma_buf, g_ctx.mcd);
+	if (IS_ERR(mmu->attach))
+		goto err_attach;
+
+	mmu->sgt = dma_buf_map_attachment(mmu->attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR(mmu->sgt))
+		goto err_map;
+
+	return true;
+
+err_map:
+	dma_buf_detach(mmu->dma_buf, mmu->attach);
+
+err_attach:
+	dma_buf_put(mmu->dma_buf);
+#endif
+	return false;
+}
+
+/*
+ * Allocate MMU table and map buffer into it.
+ * That is, create respective table entries.
+ */
+struct tee_mmu *tee_mmu_create(struct mm_struct *mm,
+			       const struct mc_ioctl_buffer *buf)
+{
+	struct tee_mmu	*mmu;
+	const void	*data = (const void *)(uintptr_t)buf->va;
+	const void	*reader = (const void *)((uintptr_t)data & PAGE_MASK);
+	struct page	**pages;	/* Same as below, conveniently typed */
+	unsigned long	pages_page = 0;	/* Page to contain the page pointers */
+	unsigned long	chunk;
+	struct mcp_buffer_map b_map = {
+		.offset = (u32)(buf->va & ~PAGE_MASK),
+		.length = buf->len,
+		.flags = buf->flags,
+	};
+	bool		writeable = buf->flags & MC_IO_MAP_OUTPUT;
+	int		ret = 0;
+
+#ifndef CONFIG_DMA_SHARED_BUFFER
+	if (buf->flags & MMU_ION_BUF) {
+		mc_dev_err(-EINVAL, "ION buffers not supported by kernel");
+		return ERR_PTR(-EINVAL);
+	}
+#endif
+
+	/* Check input arguments */
+	if (!(buf->flags & MMU_ION_BUF) && !buf->va)
+		return ERR_PTR(-EINVAL);
+
+	if (buf->flags & MMU_ION_BUF)
+		/* buf->va is not a valid address. ION buffers are aligned */
+		b_map.offset = 0;
+
+	/* Allocate the struct */
+	b_map.nr_pages = PAGE_ALIGN(b_map.offset + b_map.length) / PAGE_SIZE;
+	/* Allow Registered Shared mem with valid pointer and zero size. */
+	if (!b_map.nr_pages)
+		b_map.nr_pages = 1;
+
+	mmu = tee_mmu_create_common(&b_map);
+	if (IS_ERR(mmu))
+		return mmu;
+
+	if (buf->flags & MMU_ION_BUF) {
+		mc_dev_devel("Buffer is ION");
+		/* Buffer is ION -
+		 * va is the client's dma_buf fd, which should be converted
+		 * to a struct sg_table * directly.
+		 */
+		if (!mmu_get_dma_buffer(mmu, buf->va)) {
+			mc_dev_err(ret, "mmu_get_dma_buffer failed");
+			ret = -EINVAL;
+			goto end;
+		}
+	}
+	/* Get a page to store page pointers */
+	pages_page = get_zeroed_page(GFP_KERNEL);
+	if (!pages_page) {
+		ret = -ENOMEM;
+		goto end;
+	}
+	mmu->pages_created++;
+
+	pages = (struct page **)pages_page;
+	for (chunk = 0; chunk < mmu->nr_pmd_entries; chunk++) {
+		unsigned long nr_pages;
+		int i;
+
+		/* Size to map for this chunk */
+		if (chunk == (mmu->nr_pmd_entries - 1))
+			nr_pages = ((mmu->nr_pages - 1) % PTE_ENTRIES_MAX) + 1;
+		else
+			nr_pages = PTE_ENTRIES_MAX;
+
+		/* Allocate a page to hold ptes that describe buffer pages */
+		mmu->pte_tables[chunk].page = get_zeroed_page(GFP_KERNEL);
+		if (!mmu->pte_tables[chunk].page) {
+			ret = -ENOMEM;
+			goto end;
+		}
+		mmu->pages_created++;
+
+		/* Add page address to pmd table if needed */
+		if (mmu->use_pages_and_vas)
+			mmu->pmd_table.vas[chunk] =
+				mmu->pte_tables[chunk].page;
+		else
+			mmu->pmd_table.entries[chunk] =
+			       virt_to_phys(mmu->pte_tables[chunk].addr);
+
+		/* Get pages */
+		if (mmu->dma_buf) {
+			/* Buffer is ION */
+			struct sg_mapping_iter miter;
+			struct page **page_ptr;
+
+			page_ptr = &pages[0];
+			sg_miter_start(&miter, mmu->sgt->sgl,
+				       mmu->sgt->nents,
+				       SG_MITER_FROM_SG);
+			while (sg_miter_next(&miter))
+				*page_ptr++ = miter.page;
+
+			sg_miter_stop(&miter);
+		} else if (mm) {
+			long gup_ret;
+
+			/* Buffer was allocated in user space */
+			down_read(&mm->mmap_sem);
+			/*
+			 * Always try to map read/write from a Linux PoV, so
+			 * Linux creates (page faults) the underlying pages if
+			 * missing.
+			 */
+			gup_ret = gup_local_repeat(mm, (uintptr_t)reader,
+						   nr_pages, 1, pages);
+			if ((gup_ret == -EFAULT) && !writeable) {
+				/*
+				 * If mapping read/write fails, and the buffer
+				 * is to be shared as input only, try to map
+				 * again read-only.
+				 */
+				gup_ret = gup_local_repeat(mm,
+							   (uintptr_t)reader,
+							   nr_pages, 0, pages);
+			}
+			up_read(&mm->mmap_sem);
+			if (gup_ret < 0) {
+				ret = gup_ret;
+				mc_dev_err(ret, "failed to get user pages @%p",
+					   reader);
+				goto end;
+			}
+
+			/* check if we could lock all pages. */
+			if (gup_ret != nr_pages) {
+				mc_dev_err((int)gup_ret,
+					   "failed to get user pages");
+				release_pages(pages, gup_ret);
+				ret = -EINVAL;
+				goto end;
+			}
+
+			reader += nr_pages * PAGE_SIZE;
+			mmu->user = true;
+			mmu->pages_locked += nr_pages;
+		} else if (is_vmalloc_addr(data)) {
+			/* Buffer vmalloc'ed in kernel space */
+			for (i = 0; i < nr_pages; i++) {
+				struct page *page = vmalloc_to_page(reader);
+
+				if (!page) {
+					ret = -EINVAL;
+					mc_dev_err(ret,
+						   "failed to map address");
+					goto end;
+				}
+
+				pages[i] = page;
+				reader += PAGE_SIZE;
+			}
+		} else {
+			/* Buffer kmalloc'ed in kernel space */
+			struct page *page = virt_to_page(reader);
+
+			reader += nr_pages * PAGE_SIZE;
+			for (i = 0; i < nr_pages; i++)
+				pages[i] = page++;
+		}
+
+		/* Create Table of physical addresses*/
+		if (mmu->use_pages_and_vas) {
+			memcpy(mmu->pte_tables[chunk].pages, pages,
+			       nr_pages * sizeof(*pages));
+		} else {
+			for (i = 0; i < nr_pages; i++) {
+				mmu->pte_tables[chunk].entries[i] =
+						page_to_phys(pages[i]);
+			}
+		}
+	}
+
+end:
+	if (pages_page) {
+		free_page(pages_page);
+		mmu->pages_created--;
+	}
+
+	if (ret) {
+		tee_mmu_delete(mmu);
+		return ERR_PTR(ret);
+	}
+
+	mc_dev_devel(
+		"created mmu %p: %s va %llx len %u off %u flg %x pmd table %lx",
+		mmu, mmu->user ? "user" : "kernel", buf->va, mmu->length,
+		mmu->offset, mmu->flags, mmu->pmd_table.page);
+	return mmu;
+}
+
+struct tee_mmu *tee_mmu_wrap(struct tee_deleter *deleter, struct page **pages,
+			     const struct mcp_buffer_map *b_map)
+{
+	int ret = -EINVAL;
+#ifdef CONFIG_XEN
+	struct tee_mmu *mmu;
+	unsigned long chunk, nr_pages_left;
+
+	/* Allocate the struct */
+	mmu = tee_mmu_create_common(b_map);
+	if (IS_ERR(mmu))
+		return mmu;
+
+	nr_pages_left = mmu->nr_pages;
+	for (chunk = 0; chunk < mmu->nr_pmd_entries; chunk++) {
+		unsigned long nr_pages = nr_pages_left;
+		u64 *pte;
+		int i;
+
+		if (nr_pages > PTE_ENTRIES_MAX)
+			nr_pages = PTE_ENTRIES_MAX;
+
+		nr_pages_left -= nr_pages;
+
+		/* Allocate a page to hold ptes that describe buffer pages */
+		mmu->pte_tables[chunk].page = get_zeroed_page(GFP_KERNEL);
+		if (!mmu->pte_tables[chunk].page) {
+			ret = -ENOMEM;
+			goto err;
+		}
+		mmu->pages_created++;
+
+		/* Add page address to pmd table if needed */
+		mmu->pmd_table.entries[chunk] =
+			virt_to_phys(mmu->pte_tables[chunk].addr);
+
+		/* Convert to PTEs */
+		pte = &mmu->pte_tables[chunk].entries[0];
+
+		for (i = 0; i < nr_pages; i++, pages++, pte++) {
+			unsigned long phys;
+			unsigned long pfn;
+
+			phys = page_to_phys(*pages);
+#if defined CONFIG_ARM64
+			phys &= PHYS_48BIT_MASK;
+#endif
+			pfn = PFN_DOWN(phys);
+			*pte = __pfn_to_mfn(pfn) << PAGE_SHIFT;
+		}
+	}
+
+	mmu->deleter = deleter;
+	mc_dev_devel("wrapped mmu %p: len %u off %u flg %x pmd table %lx",
+		     mmu, mmu->length, mmu->offset, mmu->flags,
+		     mmu->pmd_table.page);
+	return mmu;
+
+err:
+	tee_mmu_delete(mmu);
+#endif
+	return ERR_PTR(ret);
+}
+
+void tee_mmu_set_deleter(struct tee_mmu *mmu, struct tee_deleter *deleter)
+{
+	mmu->deleter = deleter;
+}
+
+static void tee_mmu_release(struct kref *kref)
+{
+	struct tee_mmu *mmu = container_of(kref, struct tee_mmu, kref);
+
+	mc_dev_devel("free mmu %p: %s len %u off %u pmd table %lx",
+		     mmu, mmu->user ? "user" : "kernel", mmu->length,
+		     mmu->offset, mmu->pmd_table.page);
+	tee_mmu_delete(mmu);
+}
+
+void tee_mmu_get(struct tee_mmu *mmu)
+{
+	kref_get(&mmu->kref);
+}
+
+void tee_mmu_put(struct tee_mmu *mmu)
+{
+	kref_put(&mmu->kref, tee_mmu_release);
+}
+
+void tee_mmu_buffer(struct tee_mmu *mmu, struct mcp_buffer_map *map)
+{
+	if (mmu->use_pages_and_vas)
+		map->addr = mmu->pmd_table.page;
+	else
+		map->addr = virt_to_phys(mmu->pmd_table.addr);
+
+	map->secure_va = 0;
+	map->offset = mmu->offset;
+	map->length = mmu->length;
+	map->nr_pages = mmu->nr_pages;
+	map->flags = mmu->flags;
+	map->type = WSM_L1;
+#ifdef CONFIG_DMA_SHARED_BUFFER
+	if (mmu->dma_buf) {
+		/* ION */
+		if (!(((struct ion_buffer *)mmu->dma_buf->priv)->flags
+		   & ION_FLAG_CACHED)) {
+			map->type |= WSM_UNCACHED;
+			mc_dev_devel("ION buffer Non cacheable");
+		} else {
+			mc_dev_devel("ION buffer cacheable");
+		}
+	}
+#endif
+	map->mmu = mmu;
+}
+
+int tee_mmu_debug_structs(struct kasnprintf_buf *buf, const struct tee_mmu *mmu)
+{
+	return kasnprintf(buf,
+			  "\t\t\tmmu %pK: %s len %u off %u table %pK\n",
+			  mmu, mmu->user ? "user" : "kernel", mmu->length,
+			  mmu->offset, (void *)mmu->pmd_table.page);
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mmu.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mmu.h
new file mode 100644
index 0000000..a5b001f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/mmu.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _TBASE_MEM_H_
+#define _TBASE_MEM_H_
+
+/*
+ * This represents the maximum number of entries in a Page Table Entries
+ * array which maps one 4KiB page. Each entry is 64 bits long physical
+ * address with some possible flags. With 512 entries it is possible
+ * to map 2MiB memory block.
+ */
+#define PTE_ENTRIES_MAX	512
+
+/*
+ * This represents the maximum number of entries in a Page Middle Directory
+ * which maps one 4KiB page. Each entry is a 64 bits physical address that
+ * points to a PTE. With 512 entries t is possible to map 1GB memory block.
+ */
+#define PMD_ENTRIES_MAX	512
+
+struct tee_mmu;
+struct mcp_buffer_map;
+
+struct tee_deleter {
+	void *object;
+	void (*delete)(void *object);
+};
+
+/*
+ * Allocate MMU table and map buffer into it.
+ * That is, create respective table entries.
+ */
+struct tee_mmu *tee_mmu_create(struct mm_struct *mm,
+			       const struct mc_ioctl_buffer *buf);
+
+/*
+ * Allocate MMU table and map pages into it.
+ * This is for Xen Dom0 to re-create a buffer with existing pages.
+ */
+struct tee_mmu *tee_mmu_wrap(struct tee_deleter *deleter, struct page **pages,
+			     const struct mcp_buffer_map *buf);
+
+/*
+ * Give the MMU an object to release when released
+ */
+void tee_mmu_set_deleter(struct tee_mmu *mmu, struct tee_deleter *deleter);
+
+/*
+ * Gets a reference on a MMU table.
+ */
+void tee_mmu_get(struct tee_mmu *mmu);
+
+/*
+ * Puts a reference on a MMU table.
+ */
+void tee_mmu_put(struct tee_mmu *mmu);
+
+/*
+ * Fill in buffer info for MMU table.
+ */
+void tee_mmu_buffer(struct tee_mmu *mmu, struct mcp_buffer_map *map);
+
+/*
+ * Add info to debug buffer.
+ */
+int tee_mmu_debug_structs(struct kasnprintf_buf *buf,
+			  const struct tee_mmu *mmu);
+
+#endif /* _TBASE_MEM_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/nq.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/nq.c
new file mode 100644
index 0000000..0dc493d
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/nq.c
@@ -0,0 +1,1123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+#include <linux/of_irq.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/sched/clock.h>	/* local_clock */
+
+#include "platform.h"			/* CPU-related information */
+
+#include "public/mc_user.h"
+
+#include "mci/mcifc.h"
+#include "mci/mciiwp.h"
+#include "mci/mcimcp.h"
+#include "mci/mcinq.h"
+#include "mci/mcitime.h"		/* struct mcp_time */
+
+#include "main.h"
+#include "clock.h"
+#include "fastcall.h"
+#include "logging.h"
+#include "nq.h"
+
+#define NQ_NUM_ELEMS		64
+#define SCHEDULING_FREQ		5	/**< N-SIQ every n-th time */
+#define DEFAULT_TIMEOUT_MS	20000	/* We do nothing on timeout anyway */
+
+static struct {
+	struct mutex buffer_mutex;	/* Lock on SWd communication buffer */
+	struct mcp_buffer *mcp_buffer;
+	struct interworld_session *iwp_buffer;
+	struct task_struct *irq_bh_thread;
+	struct completion irq_bh_complete;
+	bool irq_bh_thread_run;
+	int irq;
+	struct blocking_notifier_head tee_stop_notifiers;
+	void (*mcp_notif_handler)(u32 id, u32 payload);
+	void (*iwp_notif_handler)(u32 id, u32 payload);
+	/* MobiCore MCI information */
+	unsigned int order;
+	union {
+		void		*mci;
+		struct {
+			struct notification_queue *tx;
+			struct notification_queue *rx;
+		} nq;
+	};
+	/*
+	 * This notifications list is to be used to queue notifications when the
+	 * notification queue overflows, so no session gets its notification
+	 * lost, especially MCP.
+	 */
+	struct mutex		notifications_mutex;
+	struct list_head	notifications;
+	/* Dump buffer */
+	char			*tee_version;
+	struct kasnprintf_buf	dump;
+	/* Time */
+	struct mcp_time		*time;
+
+	/* Scheduler */
+	struct task_struct	*tee_scheduler_thread;
+	bool			tee_scheduler_run;
+	bool			tee_hung;
+	int			boot_ret;
+	struct completion	boot_complete;	/* Signal end of boot */
+	struct completion	idle_complete;	/* Unblock scheduler thread */
+	struct completion	sleep_complete;	/* Wait for sleep status */
+	struct mutex		sleep_mutex;	/* Protect sleep request */
+	struct mutex		request_mutex;	/* Protect all below */
+	/* The order of this enum matters */
+	enum sched_command {
+		NONE,		/* No specific request */
+		YIELD,		/* Run the SWd */
+		NSIQ,		/* Schedule the SWd */
+		SUSPEND,	/* Suspend the SWd */
+		RESUME,		/* Resume the SWd */
+	}			request;
+	bool			suspended;
+
+	/* Logging */
+	phys_addr_t		log_buffer;
+	u32			log_buffer_size;
+	bool			log_buffer_busy;
+} l_ctx;
+
+static inline bool is_iwp_id(u32 id)
+{
+	return (id & SID_IWP_NOTIFICATION) != 0;
+}
+
+static inline void session_state_update_internal(struct nq_session *session,
+						 enum nq_notif_state state)
+{
+	mutex_lock(&session->mutex);
+	session->state = state;
+	session->cpu_clk = local_clock();
+	mutex_unlock(&session->mutex);
+}
+
+/*
+ * Notification Queue overflow management:
+ * - once the SWd NQ is full, sessions get added to the overflow queue:
+ *   'l_ctx.notifications'
+ * - as long as this queue is not empty, new notifications get added there
+ *   first, if not already present, then the queue is flushed
+ * - the queue is also flushed by the scheduler once the SWd has run
+ */
+static inline bool notif_queue_full(void)
+{
+	struct notification_queue *tx = l_ctx.nq.tx;
+
+	return (tx->hdr.write_cnt - tx->hdr.read_cnt) == tx->hdr.queue_size;
+}
+
+static inline void notif_queue_push(u32 session_id, u32 payload)
+{
+	struct notification_queue_header *hdr = &l_ctx.nq.tx->hdr;
+	u32 i = hdr->write_cnt % hdr->queue_size;
+
+	l_ctx.nq.tx->notification[i].session_id = session_id;
+	l_ctx.nq.tx->notification[i].payload = payload;
+	/*
+	 * Ensure notification[] is written before we update the counter
+	 * We want a ARM dmb() / ARM64 dmb(sy) here
+	 */
+	smp_mb();
+
+	hdr->write_cnt++;
+	/*
+	 * Ensure write_cnt is written before new notification
+	 * We want a ARM dsb() / ARM64 dsb(sy) here
+	 */
+	rmb();
+}
+
+static void retrieve_last_session_payload(u32 *session_id, u32 *payload)
+{
+	struct notification_queue_header *hdr = &l_ctx.nq.tx->hdr;
+	u32 i = (hdr->write_cnt - 1) % hdr->queue_size;
+
+	*session_id = l_ctx.nq.tx->notification[i].session_id;
+	*payload = l_ctx.nq.tx->notification[i].payload;
+}
+
+/* Must be called with l_ctx.notifications_mutex taken */
+static inline bool nq_notifications_flush(void)
+{
+	bool flushed = false;
+
+	while (!list_empty(&l_ctx.notifications) && !notif_queue_full()) {
+		struct nq_session *session;
+
+		session = list_first_entry(&l_ctx.notifications,
+					   struct nq_session, list);
+		mc_dev_devel("pop %x", session->id);
+		notif_queue_push(session->id, session->payload);
+		session_state_update_internal(session, NQ_NOTIF_SENT);
+		list_del_init(&session->list);
+		flushed = true;
+	}
+
+	return flushed;
+}
+
+static int nq_scheduler_command(enum sched_command command)
+{
+	if (IS_ERR_OR_NULL(l_ctx.tee_scheduler_thread))
+		return -EFAULT;
+
+	mutex_lock(&l_ctx.request_mutex);
+	if (l_ctx.request < command) {
+		l_ctx.request = command;
+		complete(&l_ctx.idle_complete);
+	}
+
+	mutex_unlock(&l_ctx.request_mutex);
+	return 0;
+}
+
+static inline void nq_update_time(void)
+{
+	struct timespec tm;
+
+	getnstimeofday(&tm);
+	l_ctx.time->wall_clock_seconds = tm.tv_sec;
+	l_ctx.time->wall_clock_nsec = tm.tv_nsec;
+	getrawmonotonic(&tm);
+	l_ctx.time->monotonic_seconds = tm.tv_sec;
+	l_ctx.time->monotonic_nsec = tm.tv_nsec;
+}
+
+static inline void nq_notif_handler(u32 id, u32 payload)
+{
+	mc_dev_devel("NQ notif for id %x payload %x", id, payload);
+	if (is_iwp_id(id))
+		l_ctx.iwp_notif_handler(id, payload);
+	else
+		l_ctx.mcp_notif_handler(id, payload);
+}
+
+static int irq_bh_worker(void *arg)
+{
+	struct notification_queue *rx = l_ctx.nq.rx;
+
+	while (1) {
+		wait_for_completion_killable(&l_ctx.irq_bh_complete);
+
+		/* This thread can only be stopped with nq_stop */
+		if (!l_ctx.irq_bh_thread_run)
+			break;
+
+		/* Deal with all pending notifications in one go */
+		while ((rx->hdr.write_cnt - rx->hdr.read_cnt) > 0) {
+			struct notification nf;
+
+			nf = rx->notification[
+				rx->hdr.read_cnt % rx->hdr.queue_size];
+
+			/*
+			 * Ensure read_cnt writing happens after buffer read
+			 * We want a ARM dmb() / ARM64 dmb(sy) here
+			 */
+			smp_mb();
+			rx->hdr.read_cnt++;
+			/*
+			 * Ensure read_cnt writing finishes before reader
+			 * We want a ARM dsb() / ARM64 dsb(sy) here
+			 */
+			rmb();
+			nq_notif_handler(nf.session_id, nf.payload);
+		}
+
+		/*
+		 * Finished processing notifications. It does not matter whether
+		 * there actually were any notification or not.  S-SIQs can also
+		 * be triggered by an SWd driver which was waiting for a FIQ.
+		 * In this case the S-SIQ tells NWd that SWd is no longer idle
+		 * an will need scheduling again.
+		 */
+		nq_scheduler_command(NSIQ);
+	}
+	return 0;
+}
+
+static irqreturn_t irq_handler(int intr, void *arg)
+{
+	/* wake up thread to continue handling this interrupt */
+	complete(&l_ctx.irq_bh_complete);
+	return IRQ_HANDLED;
+}
+
+void nq_session_init(struct nq_session *session, bool is_gp)
+{
+	session->id = SID_INVALID;
+	session->payload = 0;
+	INIT_LIST_HEAD(&session->list);
+	mutex_init(&session->mutex);
+	session->state = NQ_NOTIF_IDLE;
+	session->cpu_clk = 0;
+	session->is_gp = is_gp;
+}
+
+void nq_session_exit(struct nq_session *session)
+{
+	mutex_lock(&l_ctx.notifications_mutex);
+	if (!list_empty(&session->list))
+		list_del(&session->list);
+	mutex_unlock(&l_ctx.notifications_mutex);
+}
+
+void nq_session_state_update(struct nq_session *session,
+			     enum nq_notif_state state)
+{
+	if (state < NQ_NOTIF_RECEIVED)
+		return;
+
+	session_state_update_internal(session, state);
+}
+
+int nq_session_notify(struct nq_session *session, u32 id, u32 payload)
+{
+	int ret = 0;
+
+	mutex_lock(&l_ctx.notifications_mutex);
+	session->id = id;
+	session->payload = payload;
+	if (!list_empty(&l_ctx.notifications) || notif_queue_full()) {
+		if (!list_empty(&session->list)) {
+			ret = -EAGAIN;
+			if (payload != session->payload) {
+				mc_dev_err(ret,
+					   "skip %x payload change %x -> %x",
+					   session->id, session->payload,
+					   payload);
+			} else {
+				mc_dev_devel("skip %x payload %x",
+					     session->id, payload);
+			}
+		} else {
+			mc_dev_devel("push %x payload %x", session->id,
+				     payload);
+			/* session->payload = payload; */
+			list_add_tail(&session->list, &l_ctx.notifications);
+			session_state_update_internal(session, NQ_NOTIF_QUEUED);
+		}
+
+		nq_notifications_flush();
+
+		if (nq_scheduler_command(YIELD))
+			ret = -EPROTO;
+	} else {
+		mc_dev_devel("send %x payload %x", session->id, payload);
+		notif_queue_push(session->id, payload);
+		session_state_update_internal(session, NQ_NOTIF_SENT);
+		if (nq_scheduler_command(NSIQ))
+			ret = -EPROTO;
+	}
+
+	mutex_unlock(&l_ctx.notifications_mutex);
+	return ret;
+}
+
+const char *nq_session_state(const struct nq_session *session, u64 *cpu_clk)
+{
+	if (cpu_clk)
+		*cpu_clk = session->cpu_clk;
+
+	switch (session->state) {
+	case NQ_NOTIF_IDLE:
+		return "idle";
+	case NQ_NOTIF_QUEUED:
+		return "queued";
+	case NQ_NOTIF_SENT:
+		return "sent";
+	case NQ_NOTIF_RECEIVED:
+		return "received";
+	case NQ_NOTIF_CONSUMED:
+		return "consumed";
+	case NQ_NOTIF_DEAD:
+		return "dead";
+	}
+	return "error";
+}
+
+static ssize_t debug_crashdump_read(struct file *file, char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	if (l_ctx.dump.off)
+		return simple_read_from_buffer(user_buf, count, ppos,
+					       l_ctx.dump.buf, l_ctx.dump.off);
+
+	return 0;
+}
+
+static const struct file_operations debug_crashdump_ops = {
+	.read = debug_crashdump_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t debug_smclog_read(struct file *file, char __user *user_buf,
+				 size_t count, loff_t *ppos)
+{
+	return debug_generic_read(file, user_buf, count, ppos,
+				  mc_fastcall_debug_smclog);
+}
+
+static const struct file_operations debug_smclog_ops = {
+	.read = debug_smclog_read,
+	.llseek = default_llseek,
+	.open = debug_generic_open,
+	.release = debug_generic_release,
+};
+
+static void nq_dump_status(void)
+{
+	static const struct {
+		unsigned int index;
+		const char *msg;
+	} status_map[] = {
+		/**< MobiCore control flags */
+		{ MC_EXT_INFO_ID_FLAGS, "flags"},
+		/**< MobiCore halt condition code */
+		{ MC_EXT_INFO_ID_HALT_CODE, "haltCode"},
+		/**< MobiCore halt condition instruction pointer */
+		{ MC_EXT_INFO_ID_HALT_IP, "haltIp"},
+		/**< MobiCore fault counter */
+		{ MC_EXT_INFO_ID_FAULT_CNT, "faultRec.cnt"},
+		/**< MobiCore last fault cause */
+		{ MC_EXT_INFO_ID_FAULT_CAUSE, "faultRec.cause"},
+		/**< MobiCore last fault meta */
+		{ MC_EXT_INFO_ID_FAULT_META, "faultRec.meta"},
+		/**< MobiCore last fault threadid */
+		{ MC_EXT_INFO_ID_FAULT_THREAD, "faultRec.thread"},
+		/**< MobiCore last fault instruction pointer */
+		{ MC_EXT_INFO_ID_FAULT_IP, "faultRec.ip"},
+		/**< MobiCore last fault stack pointer */
+		{ MC_EXT_INFO_ID_FAULT_SP, "faultRec.sp"},
+		/**< MobiCore last fault ARM arch information */
+		{ MC_EXT_INFO_ID_FAULT_ARCH_DFSR, "faultRec.arch.dfsr"},
+		/**< MobiCore last fault ARM arch information */
+		{ MC_EXT_INFO_ID_FAULT_ARCH_ADFSR, "faultRec.arch.adfsr"},
+		/**< MobiCore last fault ARM arch information */
+		{ MC_EXT_INFO_ID_FAULT_ARCH_DFAR, "faultRec.arch.dfar"},
+		/**< MobiCore last fault ARM arch information */
+		{ MC_EXT_INFO_ID_FAULT_ARCH_IFSR, "faultRec.arch.ifsr"},
+		/**< MobiCore last fault ARM arch information */
+		{ MC_EXT_INFO_ID_FAULT_ARCH_AIFSR, "faultRec.arch.aifsr"},
+		/**< MobiCore last fault ARM arch information */
+		{ MC_EXT_INFO_ID_FAULT_ARCH_IFAR, "faultRec.arch.ifar"},
+		/**< MobiCore configured by Daemon via fc_init flag */
+		{ MC_EXT_INFO_ID_MC_CONFIGURED, "mcData.flags"},
+		/**< MobiCore exception handler last partner */
+		{ MC_EXT_INFO_ID_MC_EXC_PARTNER, "mcExcep.partner"},
+		/**< MobiCore exception handler last peer */
+		{ MC_EXT_INFO_ID_MC_EXC_IPCPEER, "mcExcep.peer"},
+		/**< MobiCore exception handler last IPC message */
+		{ MC_EXT_INFO_ID_MC_EXC_IPCMSG, "mcExcep.cause"},
+		/**< MobiCore exception handler last IPC data */
+		{MC_EXT_INFO_ID_MC_EXC_IPCDATA, "mcExcep.meta"},
+		/**< MobiCore last crashing task offset */
+		{MC_EXT_INFO_ID_TASK_OFFSET,
+		"faultRec.offset.task"},
+		/**< MobiCore last crashing task's mcLib offset */
+		{MC_EXT_INFO_ID_MCLIB_OFFSET,
+		"faultRec.offset.mclib"},
+	};
+
+	char uuid_str[33];
+	int ret = 0;
+	size_t i;
+
+	if (l_ctx.dump.off)
+		ret = -EBUSY;
+
+	mc_dev_info("TEE HALTED");
+	if (l_ctx.tee_version) {
+		mc_dev_info("TEE version: %s", l_ctx.tee_version);
+		if (ret >= 0)
+			ret = kasnprintf(&l_ctx.dump, "TEE version: %s\n",
+					 l_ctx.tee_version);
+	}
+
+	mc_dev_info("Status dump:");
+	for (i = 0; i < (size_t)ARRAY_SIZE(status_map); i++) {
+		u32 info;
+
+		if (fc_info(status_map[i].index, NULL, &info))
+			return;
+
+		mc_dev_info("  %-22s= 0x%08x", status_map[i].msg, info);
+		if (ret >= 0)
+			ret = kasnprintf(&l_ctx.dump, "%-22s= 0x%08x\n",
+					 status_map[i].msg, info);
+	}
+
+	/* construct UUID string */
+	for (i = 0; i < 4; i++) {
+		u32 info;
+		size_t j;
+
+		if (fc_info(MC_EXT_INFO_ID_MC_EXC_UUID + i, NULL, &info))
+			return;
+
+		for (j = 0; j < sizeof(info); j++) {
+			snprintf(&uuid_str[(i * sizeof(info) + j) * 2], 3,
+				 "%02x", (info >> (j * 8)) & 0xff);
+		}
+	}
+
+	mc_dev_info("  %-22s= 0x%s", "mcExcep.uuid", uuid_str);
+	if (ret >= 0)
+		ret = kasnprintf(&l_ctx.dump, "%-22s= 0x%s\n", "mcExcep.uuid",
+				 uuid_str);
+
+	if (ret < 0) {
+		kfree(l_ctx.dump.buf);
+		l_ctx.dump.off = 0;
+		return;
+	}
+
+	debugfs_create_file("crashdump", 0400, g_ctx.debug_dir, NULL,
+			    &debug_crashdump_ops);
+	debugfs_create_file("last_smc_commands", 0400, g_ctx.debug_dir, NULL,
+			    &debug_smclog_ops);
+}
+
+static void nq_handle_tee_crash(void)
+{
+	/*
+	 * Do not change the call order: the debugfs nq status file needs
+	 * to be created before requesting the Daemon to read it.
+	 */
+	nq_dump_status();
+	blocking_notifier_call_chain(&l_ctx.tee_stop_notifiers, 0, NULL);
+}
+
+static inline void set_sleep_mode_rq(u16 sleep_req)
+{
+	mutex_lock(&l_ctx.buffer_mutex);
+	l_ctx.mcp_buffer->flags.sleep_mode.sleep_req = sleep_req;
+	mutex_unlock(&l_ctx.buffer_mutex);
+}
+
+static inline bool nq_suspended(void)
+{
+	struct mcp_flags *flags = &l_ctx.mcp_buffer->flags;
+	bool ret;
+
+	mutex_lock(&l_ctx.buffer_mutex);
+	ret = flags->sleep_mode.ready_to_sleep & MC_STATE_READY_TO_SLEEP;
+	if (!ret) {
+		mc_dev_devel("IDLE=%d", flags->schedule);
+		mc_dev_devel("Request Sleep=%d", flags->sleep_mode.sleep_req);
+		mc_dev_devel("Sleep Ready=%d",
+			     flags->sleep_mode.ready_to_sleep);
+	}
+
+	mutex_unlock(&l_ctx.buffer_mutex);
+	return ret;
+}
+
+/*
+ * Get the requested SWd sleep timeout value (ms)
+ * - if the timeout is -1, wait indefinitely
+ * - if the timeout is 0, re-schedule immediately (timeouts in µs in the SWd)
+ * - otherwise sleep for the required time
+ * returns true if sleep is required, false otherwise
+ */
+static inline bool nq_get_idle_timeout(s32 *timeout)
+{
+	u32 schedule;
+	bool ret;
+
+	mutex_lock(&l_ctx.buffer_mutex);
+	schedule = l_ctx.mcp_buffer->flags.schedule;
+	if (schedule == MC_FLAG_SCHEDULE_IDLE) {
+		*timeout = l_ctx.mcp_buffer->flags.timeout_ms;
+		ret = true;
+	} else {
+		ret = false;
+	}
+
+	mutex_unlock(&l_ctx.buffer_mutex);
+	return ret;
+}
+
+union mcp_message *nq_get_mcp_buffer(void)
+{
+	return &l_ctx.mcp_buffer->message;
+}
+
+struct interworld_session *nq_get_iwp_buffer(void)
+{
+	return l_ctx.iwp_buffer;
+}
+
+void nq_set_version_ptr(char *version)
+{
+	l_ctx.tee_version = version;
+}
+
+void nq_register_notif_handler(void (*handler)(u32 id, u32 payload), bool iwp)
+{
+	if (iwp)
+		l_ctx.iwp_notif_handler = handler;
+	else
+		l_ctx.mcp_notif_handler = handler;
+}
+
+int nq_register_tee_stop_notifier(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&l_ctx.tee_stop_notifiers, nb);
+}
+
+int nq_unregister_tee_stop_notifier(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&l_ctx.tee_stop_notifiers,
+						  nb);
+}
+
+ssize_t nq_get_stop_message(char __user *buffer, size_t size)
+{
+	size_t max_len = l_ctx.dump.size - l_ctx.dump.off;
+	char *buf = l_ctx.dump.buf;
+	int ret;
+
+	if (!l_ctx.dump.off || !max_len)
+		return 0;
+
+	if (size > max_len)
+		size = max_len;
+
+	ret = copy_to_user(buffer, buf, size);
+	if (ret)
+		return -EFAULT;
+
+	return size;
+}
+
+void nq_signal_tee_hung(void)
+{
+	mc_dev_devel("force stop the notification queue");
+	/* Stop the tee_scheduler thread */
+	l_ctx.tee_hung = true;
+	l_ctx.tee_scheduler_run = false;
+	complete(&l_ctx.idle_complete);
+	nq_scheduler_command(NONE);
+}
+
+static int nq_scheduler_pm_command(enum sched_command command)
+{
+	int ret = -EPERM;
+
+	if (IS_ERR_OR_NULL(l_ctx.tee_scheduler_thread))
+		return -EFAULT;
+
+	mutex_lock(&l_ctx.sleep_mutex);
+
+	/* Send request */
+	nq_scheduler_command(command);
+
+	/* Wait for scheduler to reply */
+	wait_for_completion(&l_ctx.sleep_complete);
+	mutex_lock(&l_ctx.request_mutex);
+	if (command == SUSPEND) {
+		if (l_ctx.suspended)
+			ret = 0;
+	} else {
+		if (!l_ctx.suspended)
+			ret = 0;
+	}
+
+	mutex_unlock(&l_ctx.request_mutex);
+	mutex_unlock(&l_ctx.sleep_mutex);
+	return ret;
+}
+
+static int nq_boot_tee(void)
+{
+	size_t q_len = ALIGN(2 * (sizeof(struct notification_queue_header) +
+		NQ_NUM_ELEMS * sizeof(struct notification)), 4);
+	struct irq_data *irq_d = irq_get_irq_data(l_ctx.irq);
+	int ret;
+
+	/* Call the INIT fastcall to setup shared buffers */
+	ret = fc_init(virt_to_phys(l_ctx.mci),
+		      (uintptr_t)l_ctx.mcp_buffer - (uintptr_t)l_ctx.mci, q_len,
+		      sizeof(*l_ctx.mcp_buffer));
+	logging_run();
+	if (ret)
+		return ret;
+
+	/* Set initialization values */
+#if defined(MC_INTR_SSIQ_SWD)
+	l_ctx.mcp_buffer->message.init_values.flags |= MC_IV_FLAG_IRQ;
+	l_ctx.mcp_buffer->message.init_values.irq = MC_INTR_SSIQ_SWD;
+#endif
+	l_ctx.mcp_buffer->message.init_values.flags |= MC_IV_FLAG_TIME;
+	if (irq_d) {
+#ifdef CONFIG_ARCH_MEDIATEK
+		if (irq_d->parent_data) {
+			l_ctx.mcp_buffer->message.init_values.flags |=
+				MC_IV_FLAG_IRQ;
+			l_ctx.mcp_buffer->message.init_values.irq =
+				irq_d->parent_data->hwirq;
+			mc_dev_info("irq_d->parent_data->hwirq is 0x%lx\n",
+				irq_d->parent_data->hwirq);
+		}
+#else
+		l_ctx.mcp_buffer->message.init_values.flags |= MC_IV_FLAG_IRQ;
+		l_ctx.mcp_buffer->message.init_values.irq = irq_d->hwirq;
+		mc_dev_info("irq_d->hwirq is 0x%lx\n", irq_d->hwirq);
+#endif
+	}
+	l_ctx.mcp_buffer->message.init_values.time_ofs =
+		(u32)((uintptr_t)l_ctx.time - (uintptr_t)l_ctx.mci);
+	l_ctx.mcp_buffer->message.init_values.time_len =
+			sizeof(*l_ctx.time);
+
+	l_ctx.mcp_buffer->message.init_values.flags |= MC_IV_FLAG_IWP;
+	l_ctx.mcp_buffer->message.init_values.iws_buf_ofs =
+		(u64)((uintptr_t)l_ctx.iwp_buffer - (uintptr_t)l_ctx.mci);
+	l_ctx.mcp_buffer->message.init_values.iws_buf_size =
+		MAX_IW_SESSION * sizeof(struct interworld_session);
+
+	/* First empty N-SIQ to setup of the MCI structure */
+	ret = fc_nsiq(0, 0);
+	logging_run();
+	if (ret)
+		return ret;
+
+	/*
+	 * Wait until the TEE state switches to MC_STATUS_INITIALIZED
+	 * It is assumed that it always switches state at some point
+	 */
+	do {
+		u32 status = 0;
+		u32 timeslice;
+
+		ret = fc_info(MC_EXT_INFO_ID_MCI_VERSION, &status, NULL);
+		logging_run();
+		if (ret)
+			return ret;
+
+		switch (status) {
+		case MC_STATUS_NOT_INITIALIZED:
+			/* Switch to the TEE to give it more CPU time. */
+			ret = EAGAIN;
+			for (timeslice = 0; timeslice < 10; timeslice++) {
+				int tmp_ret = fc_yield(timeslice);
+
+				logging_run();
+				if (tmp_ret)
+					return tmp_ret;
+			}
+
+			/* No need to loop like mad */
+			if (ret == EAGAIN)
+				usleep_range(100, 500);
+
+			break;
+		case MC_STATUS_HALT:
+			ret = -ENODEV;
+			nq_handle_tee_crash();
+			mc_dev_err(ret, "halt during init, state 0x%x", status);
+			return ret;
+		case MC_STATUS_INITIALIZED:
+			mc_dev_devel("ready");
+			break;
+		default:
+			/* MC_STATUS_BAD_INIT or anything else */
+			ret = -EIO;
+			mc_dev_err(ret, "MCI init failed, state 0x%x", status);
+			return ret;
+		}
+	} while (ret == EAGAIN);
+
+	return ret;
+}
+
+static inline bool tee_sleep(s32 timeout_ms)
+{
+	bool infinite_timeout = timeout_ms < 0;
+
+	/* TEE is going to sleep */
+	mc_clock_disable();
+	do {
+		s32 local_timeout_ms;
+		unsigned long jiffies;
+
+		if (infinite_timeout) {
+			local_timeout_ms = DEFAULT_TIMEOUT_MS;
+		} else {
+			local_timeout_ms = timeout_ms;
+			if (local_timeout_ms > DEFAULT_TIMEOUT_MS)
+				local_timeout_ms = DEFAULT_TIMEOUT_MS;
+		}
+
+		jiffies = msecs_to_jiffies(local_timeout_ms);
+		if (wait_for_completion_timeout(&l_ctx.idle_complete, jiffies))
+			break;
+
+		if (!infinite_timeout)
+			timeout_ms -= local_timeout_ms;
+	} while (timeout_ms);
+
+	/* TEE is getting back to work */
+	mc_clock_enable();
+	return timeout_ms == 0;
+}
+
+/*
+ * This thread, and only this thread, schedules the SWd. Hence, reading the idle
+ * status and its associated timeout is safe from race conditions.
+ */
+static int tee_scheduler(void *arg)
+{
+	bool swd_notify = false;
+	int ret = 0;
+
+	/* Enable TEE clock */
+	mc_clock_enable();
+
+	/* Logging */
+	if (l_ctx.log_buffer_size) {
+		ret = fc_trace_init(l_ctx.log_buffer, l_ctx.log_buffer_size);
+		if (!ret) {
+			logging_run();
+			l_ctx.log_buffer_busy = true;
+			mc_dev_info("registered log buffer of size %d",
+				    l_ctx.log_buffer_size);
+		} else {
+			mc_dev_err(ret, "failed to register log buffer");
+			/* Ignore error */
+			ret = 0;
+		}
+	} else {
+		mc_dev_info("no log buffer to register");
+	}
+
+	/* Bootup */
+	l_ctx.boot_ret = nq_boot_tee();
+	complete(&l_ctx.boot_complete);
+	if (l_ctx.boot_ret) {
+		mc_clock_disable();
+		return l_ctx.boot_ret;
+	}
+
+	/* Run */
+	while (1) {
+		s32 timeout_ms = -1;
+		bool pm_request = false;
+		u8 tee_flags;
+
+		if (l_ctx.suspended || nq_get_idle_timeout(&timeout_ms)) {
+			/* If timeout is 0 we keep scheduling the SWd */
+			if (!timeout_ms)
+				nq_scheduler_command(NSIQ);
+			else if (tee_sleep(timeout_ms))
+				/* Timed out, force SWd schedule */
+				nq_scheduler_command(NSIQ);
+		}
+
+		/*
+		 * Potential exit causes:
+		 * 1) nq_stop is called: just stop the thread (no crash dump)
+		 * 2) nq_signal_tee_hung: breaks the loop and handle the hang as
+		 *    a crash
+		 * 3) The thread detects a TEE crash and breaks the loop
+		 */
+		if (!l_ctx.tee_scheduler_run)
+			break;
+
+		/* Get requested command if any */
+		mutex_lock(&l_ctx.request_mutex);
+		switch (l_ctx.request) {
+		case NONE:
+			break;
+		case YIELD:
+			swd_notify = false;
+			break;
+		case NSIQ:
+			swd_notify = true;
+			break;
+		case SUSPEND:
+			/* Force N_SIQ */
+			swd_notify = true;
+			set_sleep_mode_rq(MC_FLAG_REQ_TO_SLEEP);
+			pm_request = true;
+			break;
+		case RESUME:
+			/* Force N_SIQ */
+			swd_notify = true;
+			set_sleep_mode_rq(MC_FLAG_NO_SLEEP_REQ);
+			pm_request = true;
+			break;
+		}
+
+		l_ctx.request = NONE;
+		nq_update_time();
+		mutex_unlock(&l_ctx.request_mutex);
+
+		/* Reset timeout so we don't loop if SWd halted */
+		mutex_lock(&l_ctx.buffer_mutex);
+		l_ctx.mcp_buffer->flags.timeout_ms = -1;
+		mutex_unlock(&l_ctx.buffer_mutex);
+
+		if (swd_notify) {
+			u32 session_id = 0;
+			u32 payload = 0;
+
+			retrieve_last_session_payload(&session_id, &payload);
+			swd_notify = false;
+
+			/* Call SWd scheduler */
+			fc_nsiq(session_id, payload);
+		} else {
+			/* Resume SWd from where it was */
+			fc_yield(0);
+		}
+
+		/* Always flush log buffer after the SWd has run */
+		logging_run();
+
+		/* Check crash */
+		mutex_lock(&l_ctx.buffer_mutex);
+		tee_flags = l_ctx.mcp_buffer->flags.tee_flags;
+		mutex_unlock(&l_ctx.buffer_mutex);
+		if (tee_flags & MC_STATE_FLAG_TEE_HALT_MASK) {
+			ret = -EHOSTUNREACH;
+			mc_dev_err(ret, "TEE halted, exiting");
+			break;
+		}
+
+		/* Should have suspended by now if requested */
+		mutex_lock(&l_ctx.request_mutex);
+		if (pm_request) {
+			l_ctx.suspended = nq_suspended();
+			complete(&l_ctx.sleep_complete);
+		}
+
+		mutex_unlock(&l_ctx.request_mutex);
+
+		/* Flush pending notifications if possible */
+		mutex_lock(&l_ctx.notifications_mutex);
+		if (nq_notifications_flush())
+			complete(&l_ctx.idle_complete);
+
+		mutex_unlock(&l_ctx.notifications_mutex);
+	}
+
+	mc_dev_devel("loop exit, ret is %d", ret);
+	if (ret || l_ctx.tee_hung) {
+		/* There is an error, the tee must have crashed */
+		nq_handle_tee_crash();
+	}
+
+	/* Logging */
+	ret = fc_trace_deinit();
+	if (!ret)
+		l_ctx.log_buffer_busy = false;
+	else
+		mc_dev_err(ret, "failed to unregister log buffer");
+
+	mc_clock_disable();
+	return ret;
+}
+
+int nq_suspend(void)
+{
+	return nq_scheduler_pm_command(SUSPEND);
+}
+
+int nq_resume(void)
+{
+	return nq_scheduler_pm_command(RESUME);
+}
+
+int nq_start(void)
+{
+	int ret;
+#if defined(CPU_IDS)
+	struct cpumask new_mask;
+	unsigned int cpu_id[] = CPU_IDS;
+	int i;
+#endif
+	/* Make sure we have the interrupt before going on */
+#if defined(CONFIG_OF)
+	l_ctx.irq = irq_of_parse_and_map(g_ctx.mcd->of_node, 0);
+	mc_dev_info("SSIQ from dts is 0x%08x", l_ctx.irq);
+#endif
+#if defined(MC_INTR_SSIQ)
+	if (l_ctx.irq <= 0)
+		l_ctx.irq = MC_INTR_SSIQ;
+#endif
+
+	if (l_ctx.irq <= 0) {
+		ret = -EINVAL;
+		mc_dev_err(ret, "No IRQ number, aborting");
+		return ret;
+	}
+
+	ret = request_irq(l_ctx.irq, irq_handler, IRQF_TRIGGER_RISING,
+			  "trustonic", NULL);
+	if (ret)
+		return ret;
+
+	/*
+	 * Initialize the time structure for SWd
+	 * At this stage, we don't know if the SWd needs to get the REE time and
+	 * we set it anyway.
+	 */
+	nq_update_time();
+
+	/* Setup S-SIQ interrupt handler and its bottom-half */
+	l_ctx.irq_bh_thread_run = true;
+	l_ctx.irq_bh_thread = kthread_run(irq_bh_worker, NULL, "tee_irq_bh");
+	if (IS_ERR(l_ctx.irq_bh_thread)) {
+		ret = PTR_ERR(l_ctx.irq_bh_thread);
+		mc_dev_err(ret, "irq_bh_worker thread creation failed");
+		return ret;
+	}
+
+	/* Scheduler */
+	l_ctx.tee_scheduler_run = true;
+	l_ctx.tee_scheduler_thread = kthread_create(tee_scheduler, NULL,
+						    "tee_scheduler");
+	if (IS_ERR(l_ctx.tee_scheduler_thread)) {
+		ret = PTR_ERR(l_ctx.tee_scheduler_thread);
+		mc_dev_err(ret, "tee_scheduler thread creation failed");
+		return ret;
+	}
+#if defined(CPU_IDS)
+	cpumask_clear(&new_mask);
+	for (i = 0; i < NB_CPU; i++)
+		cpumask_set_cpu(cpu_id[i], &new_mask);
+	set_cpus_allowed_ptr(l_ctx.tee_scheduler_thread, &new_mask);
+	mc_dev_info("tee_scheduler running only on %d CPU", NB_CPU);
+#endif
+
+	wake_up_process(l_ctx.tee_scheduler_thread);
+
+	wait_for_completion(&l_ctx.boot_complete);
+	if (l_ctx.boot_ret)
+		return l_ctx.boot_ret;
+
+	complete(&l_ctx.idle_complete);
+	return 0;
+}
+
+void nq_stop(void)
+{
+	/* Scheduler */
+	l_ctx.tee_scheduler_run = false;
+	complete(&l_ctx.idle_complete);
+	kthread_stop(l_ctx.tee_scheduler_thread);
+
+	/* NQ */
+	l_ctx.irq_bh_thread_run = false;
+	complete(&l_ctx.irq_bh_complete);
+	kthread_stop(l_ctx.irq_bh_thread);
+	free_irq(l_ctx.irq, NULL);
+}
+
+int nq_init(void)
+{
+	size_t q_len, mci_len;
+	unsigned long mci;
+	int ret;
+
+	ret = mc_clock_init();
+	if (ret)
+		goto err_clock;
+
+	ret = logging_init(&l_ctx.log_buffer, &l_ctx.log_buffer_size);
+	if (ret)
+		goto err_logging;
+
+	/* Setup crash handler function list */
+	BLOCKING_INIT_NOTIFIER_HEAD(&l_ctx.tee_stop_notifiers);
+
+	mutex_init(&l_ctx.buffer_mutex);
+	init_completion(&l_ctx.irq_bh_complete);
+	/* Setup notification queue mutex */
+	mutex_init(&l_ctx.notifications_mutex);
+	INIT_LIST_HEAD(&l_ctx.notifications);
+
+	/* NQ_NUM_ELEMS must be power of 2 */
+	q_len = ALIGN(2 * (sizeof(struct notification_queue_header) +
+			   NQ_NUM_ELEMS * sizeof(struct notification)), 4);
+
+	mci_len = q_len +
+		sizeof(*l_ctx.time) +
+		sizeof(*l_ctx.mcp_buffer) +
+		MAX_IW_SESSION * sizeof(struct interworld_session);
+
+	l_ctx.order = get_order(mci_len);
+
+	mci = __get_free_pages(GFP_USER | __GFP_ZERO, l_ctx.order);
+	if (!mci)
+		goto err_mci;
+
+	l_ctx.nq.tx = (struct notification_queue *)mci;
+	l_ctx.nq.tx->hdr.queue_size = NQ_NUM_ELEMS;
+	mci += sizeof(struct notification_queue_header) +
+	    l_ctx.nq.tx->hdr.queue_size * sizeof(struct notification);
+
+	l_ctx.nq.rx = (struct notification_queue *)mci;
+	l_ctx.nq.rx->hdr.queue_size = NQ_NUM_ELEMS;
+	mci += sizeof(struct notification_queue_header) +
+	    l_ctx.nq.rx->hdr.queue_size * sizeof(struct notification);
+
+	l_ctx.mcp_buffer = (void *)ALIGN(mci, 8);
+	mci += sizeof(struct mcp_buffer);
+
+	/* interworld_buffer contains:
+	 *   MAX_IW_SESSION session, and for each session S(i), we could have
+	 *   D(i) extra data, NB: D(i) could be different from D(j)
+	 *
+	 * v0: D(i) = 0
+	 */
+	/* mci should be already 8 bytes aligned */
+	l_ctx.iwp_buffer = (void *)ALIGN(mci, 8);
+	mci += MAX_IW_SESSION * sizeof(struct interworld_session);
+
+	l_ctx.time = (void *)ALIGN(mci, 8);
+
+	/* Scheduler */
+	init_completion(&l_ctx.boot_complete);
+	init_completion(&l_ctx.idle_complete);
+	init_completion(&l_ctx.sleep_complete);
+	mutex_init(&l_ctx.sleep_mutex);
+	mutex_init(&l_ctx.request_mutex);
+	return 0;
+
+err_mci:
+	logging_exit(l_ctx.log_buffer_busy);
+err_logging:
+	mc_clock_exit();
+err_clock:
+	return ret;
+}
+
+void nq_exit(void)
+{
+	if (l_ctx.dump.off)
+		kfree(l_ctx.dump.buf);
+
+	free_pages((unsigned long)l_ctx.mci, l_ctx.order);
+	logging_exit(l_ctx.log_buffer_busy);
+	mc_clock_exit();
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/nq.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/nq.h
new file mode 100644
index 0000000..9d2e6c9
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/nq.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _MC_NQ_H_
+#define _MC_NQ_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+
+/** Max number of interworld session allocated in MCI buffer */
+#define MAX_IW_SESSION 256
+
+enum nq_notif_state {
+	NQ_NOTIF_IDLE,		/* Nothing happened yet */
+	NQ_NOTIF_QUEUED,	/* Notification in overflow queue */
+	NQ_NOTIF_SENT,		/* Notification in send queue */
+	NQ_NOTIF_RECEIVED,	/* Notification received */
+	NQ_NOTIF_CONSUMED,	/* Notification reported to CA */
+	NQ_NOTIF_DEAD,		/* Error reported to CA */
+};
+
+/* FIXME to be renamed */
+struct nq_session {
+	/* Notification id */
+	u32			id;
+	/* Notification payload */
+	u32			payload;
+	/* Notifications list */
+	struct list_head	list;
+	/* Notification debug mutex */
+	struct mutex		mutex;
+	/* Current notification/session state */
+	enum nq_notif_state	state;
+	/* Time at notification state change */
+	u64			cpu_clk;
+	/* This TA is of Global Platform type, set by upper layer */
+	int			is_gp;
+};
+
+/* Notification queue channel */
+void nq_session_init(struct nq_session *session, bool is_gp);
+void nq_session_exit(struct nq_session *session);
+void nq_session_state_update(struct nq_session *session,
+			     enum nq_notif_state state);
+int nq_session_notify(struct nq_session *session, u32 id, u32 payload);
+const char *nq_session_state(const struct nq_session *session, u64 *cpu_clk);
+
+/* Services */
+union mcp_message *nq_get_mcp_buffer(void);
+struct interworld_session *nq_get_iwp_buffer(void);
+void nq_set_version_ptr(char *version);
+void nq_register_notif_handler(void (*handler)(u32 id, u32 payload), bool iwp);
+int nq_register_tee_stop_notifier(struct notifier_block *nb);
+int nq_unregister_tee_stop_notifier(struct notifier_block *nb);
+ssize_t nq_get_stop_message(char __user *buffer, size_t size);
+void nq_signal_tee_hung(void);
+
+/* SWd suspend/resume */
+int nq_suspend(void);
+int nq_resume(void);
+
+/* Start/stop TEE */
+int nq_start(void);
+void nq_stop(void);
+
+/* Initialisation/cleanup */
+int nq_init(void);
+void nq_exit(void);
+
+#endif /* _MC_NQ_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/platform.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/platform.h
new file mode 100644
index 0000000..a93a156
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/platform.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _MC_DRV_PLATFORM_H_
+#define _MC_DRV_PLATFORM_H_
+
+/* Ensure consistency for Fastcall ID between NWd and TEE*/
+#define MC_AARCH32_FC
+
+/* Enable Fastcall worker thread */
+#define MC_FASTCALL_WORKER_THREAD
+
+#if defined(CONFIG_ARM64)
+/* Enable LPAE on 64-bit platforms */
+#ifndef CONFIG_TRUSTONIC_TEE_LPAE
+#define CONFIG_TRUSTONIC_TEE_LPAE
+#endif
+#endif
+
+/* For retrieving SSIQ from dts */
+#define MC_DEVICE_PROPNAME	"trustonic,mobicore"
+
+#endif /* _MC_DRV_PLATFORM_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_api.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_api.h
new file mode 100644
index 0000000..2c7a4c6
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_api.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+#ifndef __TEE_CLIENT_API_H__
+#define __TEE_CLIENT_API_H__
+
+#include "tee_client_types.h"
+#include "tee_client_error.h"
+
+#include "tee_client_api_imp.h"
+
+/* Include GP spec naming (TEEC_*) data type and functions */
+#include "tee_client_api_cust.h"
+
+#define TEEC_EXPORT
+
+/*
+ * The header tee_client_api_imp.h must define implementation-dependent types,
+ * constants and macros.
+ *
+ * The implementation-dependent types are:
+ *   - teec_context_imp
+ *   - teec_session_imp
+ *   - teec_shared_memory_imp
+ *   - teec_operation_imp
+ *
+ * The implementation-dependent constants are:
+ *   - TEEC_CONFIG_SHAREDMEM_MAX_SIZE
+ * The implementation-dependent macros are:
+ *   - TEEC_PARAM_TYPES
+ */
+
+struct teec_value {
+	u32 a;
+	u32 b;
+};
+
+/* Type definitions */
+struct teec_context {
+	struct teec_context_imp imp;
+};
+
+struct teec_session {
+	struct teec_session_imp imp;
+};
+
+struct teec_shared_memory {
+	union {
+		void		      *buffer;
+		int		      fd;
+	};
+	size_t			      size;
+	u32			      flags;
+	struct teec_shared_memory_imp imp;
+};
+
+struct teec_temp_memory_reference {
+	void   *buffer;
+	size_t size;
+};
+
+struct teec_registered_memory_reference {
+	struct teec_shared_memory *parent;
+	size_t			  size;
+	size_t			  offset;
+};
+
+union teec_parameter {
+	struct teec_temp_memory_reference	tmpref;
+	struct teec_registered_memory_reference	memref;
+	struct teec_value			value;
+};
+
+struct teec_operation {
+	u32			  started;
+	union {
+		u32		  param_types;
+		u32		  paramTypes;
+	};
+	union teec_parameter	  params[4];
+	struct teec_operation_imp imp;
+};
+
+#define TEEC_ORIGIN_API                     0x00000001
+#define TEEC_ORIGIN_COMMS                   0x00000002
+#define TEEC_ORIGIN_TEE                     0x00000003
+#define TEEC_ORIGIN_TRUSTED_APP             0x00000004
+
+#define TEEC_MEM_INPUT                      0x00000001
+#define TEEC_MEM_OUTPUT                     0x00000002
+#define TEEC_MEM_ION                        0x01000000
+
+#define TEEC_NONE                           0x0
+#define TEEC_VALUE_INPUT                    0x1
+#define TEEC_VALUE_OUTPUT                   0x2
+#define TEEC_VALUE_INOUT                    0x3
+#define TEEC_MEMREF_TEMP_INPUT              0x5
+#define TEEC_MEMREF_TEMP_OUTPUT             0x6
+#define TEEC_MEMREF_TEMP_INOUT              0x7
+#define TEEC_MEMREF_WHOLE                   0xC
+#define TEEC_MEMREF_PARTIAL_INPUT           0xD
+#define TEEC_MEMREF_PARTIAL_OUTPUT          0xE
+#define TEEC_MEMREF_PARTIAL_INOUT           0xF
+
+#define TEEC_LOGIN_PUBLIC                   0x00000000
+#define TEEC_LOGIN_USER                     0x00000001
+#define TEEC_LOGIN_GROUP                    0x00000002
+#define TEEC_LOGIN_APPLICATION              0x00000004
+#define TEEC_LOGIN_USER_APPLICATION         0x00000005
+#define TEEC_LOGIN_GROUP_APPLICATION        0x00000006
+
+#define TEEC_TIMEOUT_INFINITE               0xFFFFFFFF
+
+#pragma GCC visibility push(default)
+
+TEEC_EXPORT u32
+teec_initialize_context(const char *name, struct teec_context *context);
+
+TEEC_EXPORT void
+teec_finalize_context(struct teec_context *context);
+
+TEEC_EXPORT u32
+teec_register_shared_memory(struct teec_context *context,
+			    struct teec_shared_memory *shared_mem);
+
+TEEC_EXPORT u32
+teec_allocate_shared_memory(struct teec_context *context,
+			    struct teec_shared_memory *shared_mem);
+
+TEEC_EXPORT void
+teec_release_shared_memory(struct teec_shared_memory *shared_mem);
+
+TEEC_EXPORT u32
+teec_open_session(struct teec_context *context,
+		  struct teec_session *session,
+		  const struct teec_uuid *destination,
+		  u32 connection_method, /* Should be 0 */
+		  const void *connection_data,
+		  struct teec_operation *operation,
+		  u32 *return_origin);
+
+TEEC_EXPORT void
+teec_close_session(struct teec_session *session);
+
+TEEC_EXPORT u32
+teec_invoke_command(struct teec_session *session,
+		    u32 command_id,
+		    struct teec_operation *operation,
+		    u32 *return_origin);
+
+TEEC_EXPORT void
+teec_request_cancellation(struct teec_operation *operation);
+
+#pragma GCC visibility pop
+
+#endif /* __TEE_CLIENT_API_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_api_cust.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_api_cust.h
new file mode 100644
index 0000000..61f8633
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_api_cust.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+
+/*
+ * This header file corresponds to V1.0 of the GlobalPlatform
+ * TEE Client API Specification
+ */
+#ifndef __TEE_CLIENT_API_CUST_H__
+#define __TEE_CLIENT_API_CUST_H__
+
+/*
+ * DATA TYPES
+ */
+#define TEEC_UUID teec_uuid
+#define TEEC_Context teec_context
+#define TEEC_Session teec_session
+#define TEEC_SharedMemory teec_shared_memory
+#define TEEC_TempMemoryReference teec_temp_memory_reference
+#define TEEC_RegisteredMemoryReference teec_registered_memory_reference
+#define TEEC_Value teec_value
+#define TEEC_Parameter teec_parameter
+#define TEEC_Operation teec_operation
+
+/*
+ * FUNCTIONS
+ */
+#define TEEC_InitializeContext teec_initialize_context
+#define TEEC_FinalizeContext teec_finalize_context
+#define TEEC_RegisterSharedMemory teec_register_shared_memory
+#define TEEC_AllocateSharedMemory teec_allocate_shared_memory
+#define TEEC_ReleaseSharedMemory teec_release_shared_memory
+#define TEEC_OpenSession teec_open_session
+#define TEEC_CloseSession teec_close_session
+#define TEEC_InvokeCommand teec_invoke_command
+#define TEEC_RequestCancellation teec_request_cancellation
+
+#endif /* __TEE_CLIENT_API_CUST_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_api_imp.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_api_imp.h
new file mode 100644
index 0000000..51afe92
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_api_imp.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+#ifndef __TEE_CLIENT_API_IMP_H__
+#define __TEE_CLIENT_API_IMP_H__
+
+#define TEEC_MEM_INOUT (TEEC_MEM_INPUT | TEEC_MEM_OUTPUT)
+#define TEEC_MEM_FLAGS_MASK (TEEC_MEM_ION | TEEC_MEM_INPUT | TEEC_MEM_OUTPUT)
+
+struct tee_client;
+
+struct teec_context_imp {
+	struct tee_client *client;
+};
+
+struct teec_session_imp {
+	u32			session_id;
+	struct teec_context_imp context;
+	int			active;
+};
+
+struct teec_shared_memory_imp {
+	struct tee_client	*client;
+	int			implementation_allocated;
+};
+
+struct teec_operation_imp {
+	struct teec_session_imp *session;
+};
+
+/*
+ * There is no natural, compile-time limit on the shared memory, but a specific
+ * implementation may introduce a limit (in particular on TrustZone)
+ */
+#define TEEC_CONFIG_SHAREDMEM_MAX_SIZE ((size_t)0xFFFFFFFF)
+
+#define TEEC_PARAM_TYPES(entry0_type, entry1_type, entry2_type, entry3_type) \
+	((entry0_type) | ((entry1_type) << 4) | \
+	((entry2_type) << 8) | ((entry3_type) << 12))
+
+#endif /* __TEE_CLIENT_API_IMP_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_error.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_error.h
new file mode 100644
index 0000000..e77583d
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_error.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef __TEE_CLIENT_ERROR_H__
+#define __TEE_CLIENT_ERROR_H__
+
+#define TEEC_SUCCESS                      ((uint32_t)0x00000000)
+
+/**
+ * Generic error code : Generic error
+ **/
+#define TEEC_ERROR_GENERIC                ((uint32_t)0xFFFF0000)
+
+/**
+ * Generic error code : The underlying security system denies the access to the
+ * object
+ **/
+#define TEEC_ERROR_ACCESS_DENIED          ((uint32_t)0xFFFF0001)
+
+/**
+ * Generic error code : The pending operation is cancelled.
+ **/
+#define TEEC_ERROR_CANCEL                 ((uint32_t)0xFFFF0002)
+
+/**
+ * Generic error code : The underlying system detects a conflict
+ **/
+#define TEEC_ERROR_ACCESS_CONFLICT        ((uint32_t)0xFFFF0003)
+
+/**
+ * Generic error code : Too much data for the operation or some data remain
+ * unprocessed by the operation.
+ **/
+#define TEEC_ERROR_EXCESS_DATA            ((uint32_t)0xFFFF0004)
+
+/**
+ * Generic error code : Error of data format
+ **/
+#define TEEC_ERROR_BAD_FORMAT             ((uint32_t)0xFFFF0005)
+
+/**
+ * Generic error code : The specified parameters are invalid
+ **/
+#define TEEC_ERROR_BAD_PARAMETERS         ((uint32_t)0xFFFF0006)
+
+/**
+ * Generic error code : Illegal state for the operation.
+ **/
+#define TEEC_ERROR_BAD_STATE              ((uint32_t)0xFFFF0007)
+
+/**
+ * Generic error code : The item is not found
+ **/
+#define TEEC_ERROR_ITEM_NOT_FOUND         ((uint32_t)0xFFFF0008)
+
+/**
+ * Generic error code : The specified operation is not implemented
+ **/
+#define TEEC_ERROR_NOT_IMPLEMENTED        ((uint32_t)0xFFFF0009)
+
+/**
+ * Generic error code : The specified operation is not supported
+ **/
+#define TEEC_ERROR_NOT_SUPPORTED          ((uint32_t)0xFFFF000A)
+
+/**
+ * Generic error code : Insufficient data is available for the operation.
+ **/
+#define TEEC_ERROR_NO_DATA                ((uint32_t)0xFFFF000B)
+
+/**
+ * Generic error code : Not enough memory to perform the operation
+ **/
+#define TEEC_ERROR_OUT_OF_MEMORY          ((uint32_t)0xFFFF000C)
+
+/**
+ * Generic error code : The service is currently unable to handle the request;
+ * try later
+ **/
+#define TEEC_ERROR_BUSY                   ((uint32_t)0xFFFF000D)
+
+/**
+ * Generic communication error
+ **/
+#define TEEC_ERROR_COMMUNICATION          ((uint32_t)0xFFFF000E)
+
+/**
+ * Generic error code : security violation
+ **/
+#define TEEC_ERROR_SECURITY               ((uint32_t)0xFFFF000F)
+
+/**
+ * Generic error code : the buffer is too short
+ **/
+#define TEEC_ERROR_SHORT_BUFFER           ((uint32_t)0xFFFF0010)
+
+/**
+ * Error of communication: The target of the connection is dead
+ **/
+#define TEEC_ERROR_TARGET_DEAD            ((uint32_t)0xFFFF3024)
+
+/**
+ * File system error code: not enough space to complete the operation.
+ **/
+#define TEEC_ERROR_STORAGE_NO_SPACE       ((uint32_t)0xFFFF3041)
+
+#endif /* __TEE_CLIENT_ERROR_H__ */
+
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_types.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_types.h
new file mode 100644
index 0000000..bf8e9ad
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/GP/tee_client_types.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef __TEE_CLIENT_TYPES_H__
+#define __TEE_CLIENT_TYPES_H__
+
+/* Definition of an UUID (from RFC 4122 http://www.ietf.org/rfc/rfc4122.txt) */
+struct teec_uuid {
+	u32 time_low;
+	u16 time_mid;
+	u16 time_hi_and_version;
+	u8  clock_seq_and_node[8];
+};
+
+/* Type definition for a TEE Identity */
+struct tee_identity {
+	u32 login;
+	struct teec_uuid uuid;
+};
+
+#endif /* __TEE_CLIENT_TYPES_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/mc_admin.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/mc_admin.h
new file mode 100644
index 0000000..eee82e0
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/mc_admin.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef __MC_ADMIN_IOCTL_H__
+#define __MC_ADMIN_IOCTL_H__
+
+#include <linux/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MC_ADMIN_DEVNODE "mobicore"
+
+/* Driver/daemon commands */
+enum {
+	/* Command 0 is reserved */
+	MC_DRV_GET_ROOT_CONTAINER = 1,
+	MC_DRV_GET_SP_CONTAINER = 2,
+	MC_DRV_GET_TRUSTLET_CONTAINER = 3,
+	MC_DRV_GET_TRUSTLET = 4,
+	MC_DRV_SIGNAL_CRASH = 5,
+};
+
+/* MobiCore IOCTL magic number */
+#define MC_IOC_MAGIC    'M'
+
+struct mc_admin_request {
+	__u32		 request_id;	/* Unique request identifier */
+	__u32		 command;	/* Command to daemon */
+	struct mc_uuid_t uuid;		/* UUID of trustlet, if relevant */
+	__u32		 is_gp;		/* Whether trustlet is GP */
+	__u32		 spid;		/* SPID of trustlet, if relevant */
+};
+
+struct mc_admin_response {
+	__u32		request_id;	/* Unique request identifier */
+	__u32		error_no;	/* Errno from daemon */
+	__u32		spid;		/* SPID of trustlet, if relevant */
+	__u32		service_type;	/* Type of trustlet being returned */
+	__u32		length;		/* Length of data to get */
+	/* Any data follows */
+};
+
+struct mc_admin_driver_info {
+	/* Version, and something else..*/
+	__u32		drv_version;
+	__u32		initial_cmd_id;
+};
+
+struct mc_admin_load_info {
+	__u32		 spid;		/* SPID of trustlet, if relevant */
+	__u64		 address;	/* Address of the data */
+	__u32		 length;	/* Length of data to get */
+	struct mc_uuid_t uuid;		/* UUID of trustlet, if relevant */
+};
+
+#define MC_ADMIN_IO_GET_DRIVER_REQUEST \
+	_IOR(MC_IOC_MAGIC, 0, struct mc_admin_request)
+#define MC_ADMIN_IO_GET_INFO \
+	_IOR(MC_IOC_MAGIC, 1, struct mc_admin_driver_info)
+#define MC_ADMIN_IO_LOAD_DRIVER \
+	_IOW(MC_IOC_MAGIC, 2, struct mc_admin_load_info)
+#define MC_ADMIN_IO_LOAD_TOKEN \
+	_IOW(MC_IOC_MAGIC, 3, struct mc_admin_load_info)
+#define MC_ADMIN_IO_LOAD_CHECK \
+	_IOW(MC_IOC_MAGIC, 4, struct mc_admin_load_info)
+#define MC_ADMIN_IO_LOAD_KEY_SO \
+	_IOW(MC_IOC_MAGIC, 5, struct mc_admin_load_info)
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __MC_ADMIN_IOCTL_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/mc_user.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/mc_user.h
new file mode 100644
index 0000000..4e66b81
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/mc_user.h
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _MC_USER_H_
+#define _MC_USER_H_
+
+#define MCDRVMODULEAPI_VERSION_MAJOR 7
+#define MCDRVMODULEAPI_VERSION_MINOR 0
+
+#include <linux/types.h>
+
+#ifndef __KERNEL__
+#define BIT(n)				(1 << (n))
+#endif /* __KERNEL__ */
+
+#define MC_USER_DEVNODE			"mobicore-user"
+
+/** Maximum length of MobiCore product ID string. */
+#define MC_PRODUCT_ID_LEN		64
+
+/** Number of buffers that can be mapped at once */
+#define MC_MAP_MAX			4
+
+/* Max length for buffers */
+#define MC_MAX_TCI_LEN			0x100000
+#define BUFFER_LENGTH_MAX		0x40000000
+
+/* Max length for objects */
+#define OBJECT_LENGTH_MAX		0x8000000
+
+/* Flags for buffers to map (aligned on GP) */
+#define MC_IO_MAP_INPUT			BIT(0)
+#define MC_IO_MAP_OUTPUT		BIT(1)
+#define MC_IO_MAP_INPUT_OUTPUT		(MC_IO_MAP_INPUT | MC_IO_MAP_OUTPUT)
+
+/*
+ * Universally Unique Identifier (UUID) according to ISO/IEC 11578.
+ */
+struct mc_uuid_t {
+	__u8		value[16];	/* Value of the UUID */
+};
+
+/*
+ * GP TA login types.
+ */
+enum mc_login_type {
+	LOGIN_PUBLIC = 0,
+	LOGIN_USER,
+	LOGIN_GROUP,
+	LOGIN_APPLICATION = 4,
+	LOGIN_USER_APPLICATION,
+	LOGIN_GROUP_APPLICATION,
+};
+
+/*
+ * GP TA identity structure.
+ */
+struct mc_identity {
+	enum mc_login_type	login_type;
+	union {
+		__u8		login_data[16];
+		gid_t		gid;		/* Requested group id */
+		struct {
+			uid_t	euid;
+			uid_t	ruid;
+		} uid;
+	};
+};
+
+/*
+ * Data exchange structure of the MC_IO_OPEN_SESSION ioctl command.
+ */
+struct mc_ioctl_open_session {
+	struct mc_uuid_t uuid;		/* trustlet uuid */
+	__u32		is_gp_uuid;	/* uuid is for GP TA */
+	__u32		sid;            /* session id (out) */
+	__u64		tci;		/* tci buffer pointer */
+	__u32		tcilen;		/* tci length */
+	struct mc_identity identity;	/* GP TA identity */
+};
+
+/*
+ * Data exchange structure of the MC_IO_OPEN_TRUSTLET ioctl command.
+ */
+struct mc_ioctl_open_trustlet {
+	__u32		sid;		/* session id (out) */
+	__u32		spid;		/* trustlet spid */
+	__u64		buffer;		/* trustlet binary pointer */
+	__u32		tlen;		/* binary length  */
+	__u64		tci;		/* tci buffer pointer */
+	__u32		tcilen;		/* tci length */
+};
+
+/*
+ * Data exchange structure of the MC_IO_WAIT ioctl command.
+ */
+struct mc_ioctl_wait {
+	__u32		sid;		/* session id (in) */
+	__s32		timeout;	/* notification timeout */
+	__u32		partial;	/* for proxy server to retry silently */
+};
+
+/*
+ * Data exchange structure of the MC_IO_ALLOC ioctl command.
+ */
+struct mc_ioctl_alloc {
+	__u32		len;		/* buffer length  */
+	__u32		handle;		/* user handle for the buffer (out) */
+};
+
+/*
+ * Buffer mapping incoming and outgoing information.
+ */
+struct mc_ioctl_buffer {
+	__u64		va;		/* user space address of buffer */
+	__u32		len;		/* buffer length  */
+	__u64		sva;		/* SWd virt address of buffer (out) */
+	__u32		flags;		/* buffer flags  */
+};
+
+/*
+ * Data exchange structure of the MC_IO_MAP and MC_IO_UNMAP ioctl commands.
+ */
+struct mc_ioctl_map {
+	__u32		sid;		/* session id */
+	struct mc_ioctl_buffer buf;	/* buffers info */
+};
+
+/*
+ * Data exchange structure of the MC_IO_ERR ioctl command.
+ */
+struct mc_ioctl_geterr {
+	__u32		sid;		/* session id */
+	__s32		value;		/* error value (out) */
+};
+
+/*
+ * Global MobiCore Version Information.
+ */
+struct mc_version_info {
+	char product_id[MC_PRODUCT_ID_LEN]; /* Product ID string */
+	__u32	version_mci;		/* Mobicore Control Interface */
+	__u32	version_so;		/* Secure Objects */
+	__u32	version_mclf;		/* MobiCore Load Format */
+	__u32	version_container;	/* MobiCore Container Format */
+	__u32	version_mc_config;	/* MobiCore Config. Block Format */
+	__u32	version_tl_api;		/* MobiCore Trustlet API */
+	__u32	version_dr_api;		/* MobiCore Driver API */
+	__u32	version_nwd;		/* This Driver */
+};
+
+/*
+ * GP TA operation structure.
+ */
+struct gp_value {
+	__u32			a;
+	__u32			b;
+};
+
+struct gp_temp_memref {
+	__u64			buffer;
+	__u64			size;
+};
+
+struct gp_shared_memory {
+	__u64			buffer;
+	__u64			size;
+	__u32			flags;
+};
+
+struct gp_regd_memref {
+	struct gp_shared_memory	parent;
+	__u64			size;
+	__u64			offset;
+};
+
+union gp_param {
+	struct gp_temp_memref	tmpref;
+	struct gp_regd_memref	memref;
+	struct gp_value		value;
+};
+
+struct gp_operation {
+	__u32			started;
+	__u32			param_types;
+	union gp_param		params[4];
+};
+
+struct gp_return {
+	__u32			origin;
+	__u32			value;
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_INITIALIZE_CONTEXT ioctl command.
+ */
+struct mc_ioctl_gp_initialize_context {
+	struct gp_return	ret;		/* return origin/value (out) */
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_REGISTER_SHARED_MEM ioctl command.
+ */
+struct mc_ioctl_gp_register_shared_mem {
+	struct gp_shared_memory	memref;
+	struct gp_return	ret;		/* return origin/value (out) */
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_RELEASE_SHARED_MEM ioctl command.
+ */
+struct mc_ioctl_gp_release_shared_mem {
+	struct gp_shared_memory	memref;
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_OPEN_SESSION ioctl command.
+ */
+struct mc_ioctl_gp_open_session {
+	struct mc_uuid_t	uuid;		/* trustlet uuid */
+	struct mc_identity	identity;	/* GP TA identity */
+	struct gp_operation	operation;	/* set of parameters */
+	struct gp_return	ret;		/* return origin/value (out) */
+	__u32			session_id;	/* session id (out) */
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_CLOSE_SESSION ioctl command.
+ */
+struct mc_ioctl_gp_close_session {
+	__u32			session_id;	/* session id */
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_INVOKE_COMMAND ioctl command.
+ */
+struct mc_ioctl_gp_invoke_command {
+	struct gp_operation	operation;	/* set of parameters */
+	__u32			session_id;	/* session id */
+	__u32			command_id;	/* ID of the command */
+	struct gp_return	ret;		/* return origin/value (out) */
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_CANCEL ioctl command.
+ */
+struct mc_ioctl_gp_request_cancellation {
+	struct gp_operation	operation;	/* set of parameters */
+};
+
+/*
+ * defines for the ioctl mobicore driver module function call from user space.
+ */
+/* MobiCore IOCTL magic number */
+#define MC_IOC_MAGIC	'M'
+
+/*
+ * Implement corresponding functions from user api
+ */
+#define MC_IO_OPEN_SESSION \
+	_IOWR(MC_IOC_MAGIC, 0, struct mc_ioctl_open_session)
+#define MC_IO_OPEN_TRUSTLET \
+	_IOWR(MC_IOC_MAGIC, 1, struct mc_ioctl_open_trustlet)
+#define MC_IO_CLOSE_SESSION \
+	_IO(MC_IOC_MAGIC, 2)
+#define MC_IO_NOTIFY \
+	_IO(MC_IOC_MAGIC, 3)
+#define MC_IO_WAIT \
+	_IOW(MC_IOC_MAGIC, 4, struct mc_ioctl_wait)
+#define MC_IO_MAP \
+	_IOWR(MC_IOC_MAGIC, 5, struct mc_ioctl_map)
+#define MC_IO_UNMAP \
+	_IOW(MC_IOC_MAGIC, 6, struct mc_ioctl_map)
+#define MC_IO_ERR \
+	_IOWR(MC_IOC_MAGIC, 7, struct mc_ioctl_geterr)
+#define MC_IO_HAS_SESSIONS \
+	_IO(MC_IOC_MAGIC, 8)
+#define MC_IO_VERSION \
+	_IOR(MC_IOC_MAGIC, 9, struct mc_version_info)
+#define MC_IO_GP_INITIALIZE_CONTEXT \
+	_IOW(MC_IOC_MAGIC, 20, struct mc_ioctl_gp_initialize_context)
+#define MC_IO_GP_REGISTER_SHARED_MEM \
+	_IOWR(MC_IOC_MAGIC, 21, struct mc_ioctl_gp_register_shared_mem)
+#define MC_IO_GP_RELEASE_SHARED_MEM \
+	_IOW(MC_IOC_MAGIC, 23, struct mc_ioctl_gp_release_shared_mem)
+#define MC_IO_GP_OPEN_SESSION \
+	_IOWR(MC_IOC_MAGIC, 24, struct mc_ioctl_gp_open_session)
+#define MC_IO_GP_CLOSE_SESSION \
+	_IOW(MC_IOC_MAGIC, 25, struct mc_ioctl_gp_close_session)
+#define MC_IO_GP_INVOKE_COMMAND \
+	_IOWR(MC_IOC_MAGIC, 26, struct mc_ioctl_gp_invoke_command)
+#define MC_IO_GP_REQUEST_CANCELLATION \
+	_IOW(MC_IOC_MAGIC, 27, struct mc_ioctl_gp_request_cancellation)
+
+#endif /* _MC_USER_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/mobicore_driver_api.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/mobicore_driver_api.h
new file mode 100644
index 0000000..be1d1e0
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/public/mobicore_driver_api.h
@@ -0,0 +1,460 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+#ifndef _MOBICORE_DRIVER_API_H_
+#define _MOBICORE_DRIVER_API_H_
+
+#include "mc_user.h"
+
+#define __MC_CLIENT_LIB_API
+
+/*
+ * Return values of MobiCore driver functions.
+ */
+enum mc_result {
+	/* Function call succeeded. */
+	MC_DRV_OK				= 0,
+	/* No notification available. */
+	MC_DRV_NO_NOTIFICATION			= 1,
+	/* Error during notification on communication level. */
+	MC_DRV_ERR_NOTIFICATION			= 2,
+	/* Function not implemented. */
+	MC_DRV_ERR_NOT_IMPLEMENTED		= 3,
+	/* No more resources available. */
+	MC_DRV_ERR_OUT_OF_RESOURCES		= 4,
+	/* Driver initialization failed. */
+	MC_DRV_ERR_INIT				= 5,
+	/* Unknown error. */
+	MC_DRV_ERR_UNKNOWN			= 6,
+	/* The specified device is unknown. */
+	MC_DRV_ERR_UNKNOWN_DEVICE		= 7,
+	/* The specified session is unknown.*/
+	MC_DRV_ERR_UNKNOWN_SESSION		= 8,
+	/* The specified operation is not allowed. */
+	MC_DRV_ERR_INVALID_OPERATION		= 9,
+	/* The response header from the MC is invalid. */
+	MC_DRV_ERR_INVALID_RESPONSE		= 10,
+	/* Function call timed out. */
+	MC_DRV_ERR_TIMEOUT			= 11,
+	/* Can not allocate additional memory. */
+	MC_DRV_ERR_NO_FREE_MEMORY		= 12,
+	/* Free memory failed. */
+	MC_DRV_ERR_FREE_MEMORY_FAILED		= 13,
+	/* Still some open sessions pending. */
+	MC_DRV_ERR_SESSION_PENDING		= 14,
+	/* MC daemon not reachable */
+	MC_DRV_ERR_DAEMON_UNREACHABLE		= 15,
+	/* The device file of the kernel module could not be opened. */
+	MC_DRV_ERR_INVALID_DEVICE_FILE		= 16,
+	/* Invalid parameter. */
+	MC_DRV_ERR_INVALID_PARAMETER		= 17,
+	/* Unspecified error from Kernel Module*/
+	MC_DRV_ERR_KERNEL_MODULE		= 18,
+	/* Error during mapping of additional bulk memory to session. */
+	MC_DRV_ERR_BULK_MAPPING			= 19,
+	/* Error during unmapping of additional bulk memory to session. */
+	MC_DRV_ERR_BULK_UNMAPPING		= 20,
+	/* Notification received, exit code available. */
+	MC_DRV_INFO_NOTIFICATION		= 21,
+	/* Set up of NWd connection failed. */
+	MC_DRV_ERR_NQ_FAILED			= 22,
+	/* Wrong daemon version. */
+	MC_DRV_ERR_DAEMON_VERSION		= 23,
+	/* Wrong container version. */
+	MC_DRV_ERR_CONTAINER_VERSION		= 24,
+	/* System Trustlet public key is wrong. */
+	MC_DRV_ERR_WRONG_PUBLIC_KEY		= 25,
+	/* Wrong container type(s). */
+	MC_DRV_ERR_CONTAINER_TYPE_MISMATCH	= 26,
+	/* Container is locked (or not activated). */
+	MC_DRV_ERR_CONTAINER_LOCKED		= 27,
+	/* SPID is not registered with root container. */
+	MC_DRV_ERR_SP_NO_CHILD			= 28,
+	/* UUID is not registered with sp container. */
+	MC_DRV_ERR_TL_NO_CHILD			= 29,
+	/* Unwrapping of root container failed. */
+	MC_DRV_ERR_UNWRAP_ROOT_FAILED		= 30,
+	/* Unwrapping of service provider container failed. */
+	MC_DRV_ERR_UNWRAP_SP_FAILED		= 31,
+	/* Unwrapping of Trustlet container failed. */
+	MC_DRV_ERR_UNWRAP_TRUSTLET_FAILED	= 32,
+	/* No device associated with connection. */
+	MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN	= 33,
+	/* TA blob attestation is incorrect. */
+	MC_DRV_ERR_TA_ATTESTATION_ERROR		= 34,
+	/* Interrupted system call. */
+	MC_DRV_ERR_INTERRUPTED_BY_SIGNAL	= 35,
+	/* Service is blocked and opensession is thus not allowed. */
+	MC_DRV_ERR_SERVICE_BLOCKED		= 36,
+	/* Service is locked and opensession is thus not allowed. */
+	MC_DRV_ERR_SERVICE_LOCKED		= 37,
+	/* Service was killed by the TEE (due to an administrative command). */
+	MC_DRV_ERR_SERVICE_KILLED		= 38,
+	/* All permitted instances to the service are used */
+	MC_DRV_ERR_NO_FREE_INSTANCES		= 39,
+	/* TA blob header is incorrect. */
+	MC_DRV_ERR_TA_HEADER_ERROR		= 40,
+};
+
+/*
+ * Structure of Session Handle, includes the Session ID and the Device ID the
+ * Session belongs to.
+ * The session handle will be used for session-based MobiCore communication.
+ * It will be passed to calls which address a communication end point in the
+ * MobiCore environment.
+ */
+struct mc_session_handle {
+	u32	session_id;		/* MobiCore session ID */
+	u32	device_id;		/* Device ID the session belongs to */
+};
+
+/*
+ * Information structure about additional mapped Bulk buffer between the
+ * Trustlet Connector (NWd) and the Trustlet (SWd). This structure is
+ * initialized from a Trustlet Connector by calling mc_map().
+ * In order to use the memory within a Trustlet the Trustlet Connector has to
+ * inform the Trustlet with the content of this structure via the TCI.
+ */
+struct mc_bulk_map {
+	/*
+	 * The virtual address of the Bulk buffer regarding the address space
+	 * of the Trustlet, already includes a possible offset!
+	 */
+	u32	secure_virt_addr;
+	u32	secure_virt_len;	/* Length of the mapped Bulk buffer */
+};
+
+/* The default device ID */
+#define MC_DEVICE_ID_DEFAULT	0
+/* Wait infinite for a response of the MC. */
+#define MC_INFINITE_TIMEOUT	((s32)(-1))
+/* Do not wait for a response of the MC. */
+#define MC_NO_TIMEOUT		0
+/* TCI/DCI must not exceed 1MiB */
+#define MC_MAX_TCI_LEN		0x100000
+
+/**
+ * mc_open_device() - Open a new connection to a MobiCore device.
+ * @device_id:		Identifier for the MobiCore device to be used.
+ *			MC_DEVICE_ID_DEFAULT refers to the default device.
+ *
+ * Initializes all device specific resources required to communicate with a
+ * MobiCore instance located on the specified device in the system. If the
+ * device does not exist the function will return MC_DRV_ERR_UNKNOWN_DEVICE.
+ *
+ * Return codes:
+ *	MC_DRV_OK:			operation completed successfully
+ *	MC_DRV_ERR_INVALID_OPERATION:	device already opened
+ *	MC_DRV_ERR_DAEMON_UNREACHABLE:	problems with daemon
+ *	MC_DRV_ERR_UNKNOWN_DEVICE:	device_id unknown
+ *	MC_DRV_ERR_INVALID_DEVICE_FILE:	kernel module under /dev/mobicore
+ *					cannot be opened
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_open_device(
+	u32				device_id);
+
+/**
+ * mc_close_device() - Close the connection to a MobiCore device.
+ * @device_id:		Identifier for the MobiCore device.
+ *
+ * When closing a device, active sessions have to be closed beforehand.
+ * Resources associated with the device will be released.
+ * The device may be opened again after it has been closed.
+ *
+ * MC_DEVICE_ID_DEFAULT refers to the default device.
+ *
+ * Return codes:
+ *	MC_DRV_OK:			operation completed successfully
+ *	MC_DRV_ERR_UNKNOWN_DEVICE:	device id is invalid
+ *	MC_DRV_ERR_SESSION_PENDING:	a session is still open
+ *	MC_DRV_ERR_DAEMON_UNREACHABLE:	problems with daemon occur
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_close_device(
+	u32				device_id);
+
+/**
+ * mc_open_session() - Open a new session to a Trustlet.
+ * @session:		On success, the session data will be returned
+ * @uuid:		UUID of the Trustlet to be opened
+ * @tci:		TCI buffer for communicating with the Trustlet
+ * @tci_len:		Length of the TCI buffer. Maximum allowed value
+ *			is MC_MAX_TCI_LEN
+ *
+ * The Trustlet with the given UUID has to be available in the flash filesystem.
+ *
+ * Write MCP open message to buffer and notify MobiCore about the availability
+ * of a new command.
+ *
+ * Waits till the MobiCore responses with the new session ID (stored in the MCP
+ * buffer).
+ *
+ * Note that session.device_id has to be the device id of an opened device.
+ *
+ * Return codes:
+ *	MC_DRV_OK:			operation completed successfully
+ *	MC_DRV_INVALID_PARAMETER:	session parameter is invalid
+ *	MC_DRV_ERR_UNKNOWN_DEVICE:	device id is invalid
+ *	MC_DRV_ERR_DAEMON_UNREACHABLE:	problems with daemon socket occur
+ *	MC_DRV_ERR_NQ_FAILED:		daemon returns an error
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_open_session(
+	struct mc_session_handle	*session,
+	const struct mc_uuid_t		*uuid,
+	u8				*tci,
+	u32				tci_len);
+
+/**
+ * mc_open_trustlet() - Open a new session to the provided Trustlet.
+ * @session:		On success, the session data will be returned
+ * @spid:		Service Provider ID (for SP trustlets otherwise ignored)
+ * @trustlet		Memory buffer containing the Trusted Application binary
+ * @trustlet_len	Trusted Application length
+ * @tci:		TCI buffer for communicating with the Trustlet
+ * @tci_len:		Length of the TCI buffer. Maximum allowed value
+ *			is MC_MAX_TCI_LEN
+ *
+ * Write MCP open message to buffer and notify MobiCore about the availability
+ * of a new command.
+ *
+ * Waits till the MobiCore responses with the new session ID (stored in the MCP
+ * buffer).
+ *
+ * Note that session.device_id has to be the device id of an opened device.
+ *
+ * Return codes:
+ *	MC_DRV_OK:			operation completed successfully
+ *	MC_DRV_INVALID_PARAMETER:	session parameter is invalid
+ *	MC_DRV_ERR_UNKNOWN_DEVICE:	device id is invalid
+ *	MC_DRV_ERR_DAEMON_UNREACHABLE:	problems with daemon socket occur
+ *	MC_DRV_ERR_NQ_FAILED:		daemon returns an error
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_open_trustlet(
+	struct mc_session_handle	*session,
+	u32				spid,
+	u8				*trustlet,
+	u32				trustlet_len,
+	u8				*tci,
+	u32				len);
+
+/**
+ * mc_close_session() - Close a Trustlet session.
+ * @session:		Session to be closed.
+ *
+ * Closes the specified MobiCore session. The call will block until the
+ * session has been closed.
+ *
+ * Device device_id has to be opened in advance.
+ *
+ * Return codes:
+ *	MC_DRV_OK:			operation completed successfully
+ *	MC_DRV_INVALID_PARAMETER:	session parameter is invalid
+ *	MC_DRV_ERR_UNKNOWN_SESSION:	session id is invalid
+ *	MC_DRV_ERR_UNKNOWN_DEVICE:	device id of session is invalid
+ *	MC_DRV_ERR_DAEMON_UNREACHABLE:	problems with daemon occur
+ *	MC_DRV_ERR_INVALID_DEVICE_FILE:	daemon cannot open Trustlet file
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_close_session(
+	struct mc_session_handle	*session);
+
+/**
+ * mc_notify() - Notify a session.
+ * @session:		The session to be notified.
+ *
+ * Notifies the session end point about available message data.
+ * If the session parameter is correct, notify will always succeed.
+ * Corresponding errors can only be received by mc_wait_notification().
+ *
+ * A session has to be opened in advance.
+ *
+ * Return codes:
+ *	MC_DRV_OK:			operation completed successfully
+ *	MC_DRV_INVALID_PARAMETER:	session parameter is invalid
+ *	MC_DRV_ERR_UNKNOWN_SESSION:	session id is invalid
+ *	MC_DRV_ERR_UNKNOWN_DEVICE:	device id of session is invalid
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_notify(
+	struct mc_session_handle	*session);
+
+/**
+ * mc_wait_notification() - Wait for a notification.
+ * @session:		The session the notification should correspond to.
+ * @timeout:		Time in milliseconds to wait
+ *			(MC_NO_TIMEOUT : direct return, > 0 : milliseconds,
+ *			 MC_INFINITE_TIMEOUT : wait infinitely)
+ *
+ * Wait for a notification issued by the MobiCore for a specific session.
+ * The timeout parameter specifies the number of milliseconds the call will wait
+ * for a notification.
+ *
+ * If the caller passes 0 as timeout value the call will immediately return.
+ * If timeout value is below 0 the call will block until a notification for the
+ * session has been received.
+ *
+ * If timeout is below 0, call will block.
+ *
+ * Caller has to trust the other side to send a notification to wake him up
+ * again.
+ *
+ * Return codes:
+ *	MC_DRV_OK:			operation completed successfully
+ *	MC_DRV_ERR_TIMEOUT:		no notification arrived in time
+ *	MC_DRV_INFO_NOTIFICATION:	a problem with the session was
+ *					encountered. Get more details with
+ *					mc_get_session_error_code()
+ *	MC_DRV_ERR_NOTIFICATION:	a problem with the socket occurred
+ *	MC_DRV_INVALID_PARAMETER:	a parameter is invalid
+ *	MC_DRV_ERR_UNKNOWN_SESSION:	session id is invalid
+ *	MC_DRV_ERR_UNKNOWN_DEVICE:	device id of session is invalid
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_wait_notification(
+	struct mc_session_handle	*session,
+	s32				timeout);
+
+/**
+ * mc_malloc_wsm() - Allocate a block of world shared memory (WSM).
+ * @device_id:		The ID of an opened device to retrieve the WSM from.
+ * @align:		The alignment (number of pages) of the memory block
+ *			(e.g. 0x00000001 for 4kb).
+ * @len:		Length of the block in bytes.
+ * @wsm:		Virtual address of the world shared memory block.
+ * @wsm_flags:		Platform specific flags describing the memory to
+ *			be allocated.
+ *
+ * The MC driver allocates a contiguous block of memory which can be used as
+ * WSM.
+ * This implicates that the allocated memory is aligned according to the
+ * alignment parameter.
+ *
+ * Always returns a buffer of size WSM_SIZE aligned to 4K.
+ *
+ * Align and wsm_flags are currently ignored
+ *
+ * Return codes:
+ *	MC_DRV_OK:			operation completed successfully
+ *	MC_DRV_INVALID_PARAMETER:	a parameter is invalid
+ *	MC_DRV_ERR_UNKNOWN_DEVICE:	device id is invalid
+ *	MC_DRV_ERR_NO_FREE_MEMORY:	no more contiguous memory is
+ *					available in this size or for this
+ *					process
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_malloc_wsm(
+	u32				device_id,
+	u32				align,
+	u32				len,
+	u8				**wsm,
+	u32				wsm_flags);
+
+/**
+ * mc_free_wsm() - Free a block of world shared memory (WSM).
+ * @device_id:		The ID to which the given address belongs
+ * @wsm:		Address of WSM block to be freed
+ *
+ * The MC driver will free a block of world shared memory (WSM) previously
+ * allocated with mc_malloc_wsm(). The caller has to assure that the address
+ * handed over to the driver is a valid WSM address.
+ *
+ * Return codes:
+ *	MC_DRV_OK:			operation completed successfully
+ *	MC_DRV_INVALID_PARAMETER:	a parameter is invalid
+ *	MC_DRV_ERR_UNKNOWN_DEVICE:	when device id is invalid
+ *	MC_DRV_ERR_FREE_MEMORY_FAILED:	on failure
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_free_wsm(
+	u32				device_id,
+	u8				*wsm);
+
+/**
+ *mc_map() -	Map additional bulk buffer between a Trustlet Connector (TLC)
+ *		and the Trustlet (TL) for a session
+ * @session:		Session handle with information of the device_id and
+ *			the session_id. The given buffer is mapped to the
+ *			session specified in the sessionHandle
+ * @buf:		Virtual address of a memory portion (relative to TLC)
+ *			to be shared with the Trustlet, already includes a
+ *			possible offset!
+ * @len:		length of buffer block in bytes.
+ * @map_info:		Information structure about the mapped Bulk buffer
+ *			between the TLC (NWd) and the TL (SWd).
+ *
+ * Memory allocated in user space of the TLC can be mapped as additional
+ * communication channel (besides TCI) to the Trustlet. Limitation of the
+ * Trustlet memory structure apply: only 6 chunks can be mapped with a maximum
+ * chunk size of 1 MiB each.
+ *
+ * It is up to the application layer (TLC) to inform the Trustlet
+ * about the additional mapped bulk memory.
+ *
+ * Return codes:
+ *	MC_DRV_OK:			operation completed successfully
+ *	MC_DRV_INVALID_PARAMETER:	a parameter is invalid
+ *	MC_DRV_ERR_UNKNOWN_SESSION:	session id is invalid
+ *	MC_DRV_ERR_UNKNOWN_DEVICE:	device id of session is invalid
+ *	MC_DRV_ERR_DAEMON_UNREACHABLE:	problems with daemon occur
+ *	MC_DRV_ERR_BULK_MAPPING:	buf is already uses as bulk buffer or
+ *					when registering the buffer failed
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_map(
+	struct mc_session_handle	*session,
+	void				*buf,
+	u32				len,
+	struct mc_bulk_map		*map_info);
+
+/**
+ * mc_unmap() -	Remove additional mapped bulk buffer between Trustlet Connector
+ *		(TLC) and the Trustlet (TL) for a session
+ * @session:		Session handle with information of the device_id and
+ *			the session_id. The given buffer is unmapped from the
+ *			session specified in the sessionHandle.
+ * @buf:		Virtual address of a memory portion (relative to TLC)
+ *			shared with the TL, already includes a possible offset!
+ * @map_info:		Information structure about the mapped Bulk buffer
+ *			between the TLC (NWd) and the TL (SWd)
+ *
+ * The bulk buffer will immediately be unmapped from the session context.
+ *
+ * The application layer (TLC) must inform the TL about unmapping of the
+ * additional bulk memory before calling mc_unmap!
+ *
+ * The clientlib currently ignores the len field in map_info.
+ *
+ * Return codes:
+ *	MC_DRV_OK:			operation completed successfully
+ *	MC_DRV_INVALID_PARAMETER:	a parameter is invalid
+ *	MC_DRV_ERR_UNKNOWN_SESSION:	session id is invalid
+ *	MC_DRV_ERR_UNKNOWN_DEVICE:	device id of session is invalid
+ *	MC_DRV_ERR_DAEMON_UNREACHABLE:	problems with daemon occur
+ *	MC_DRV_ERR_BULK_UNMAPPING:	buf was not registered earlier
+ *					or when unregistering failed
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_unmap(
+	struct mc_session_handle	*session,
+	void				*buf,
+	struct mc_bulk_map		*map_info);
+
+/*
+ * mc_get_session_error_code() - Get additional error information of the last
+ *				 error that occurred on a session.
+ * @session:		Session handle with information of the device_id and
+ *			the session_id
+ * @exit_code:		>0 Trustlet has terminated itself with this value,
+ *			<0 Trustlet is dead because of an error within the
+ *			MobiCore (e.g. Kernel exception). See also MCI
+ *			definition.
+ *
+ * After the request the stored error code will be deleted.
+ *
+ * Return codes:
+ *	MC_DRV_OK:			operation completed successfully
+ *	MC_DRV_INVALID_PARAMETER:	a parameter is invalid
+ *	MC_DRV_ERR_UNKNOWN_SESSION:	session id is invalid
+ *	MC_DRV_ERR_UNKNOWN_DEVICE:	device id of session is invalid
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_get_session_error_code(
+	struct mc_session_handle	*session,
+	s32				*exit_code);
+
+/* Return true if MobiCore is ready */
+__MC_CLIENT_LIB_API bool is_mobicore_ready(void);
+
+#endif /* _MOBICORE_DRIVER_API_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/session.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/session.c
new file mode 100644
index 0000000..2a8af1e
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/session.c
@@ -0,0 +1,902 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <crypto/hash.h>
+#include <linux/scatterlist.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/net.h>
+#include <net/sock.h>		/* sockfd_lookup */
+#include <linux/version.h>
+#include <linux/sched/clock.h>	/* local_clock */
+#include <linux/sched/task.h>	/* put_task_struct */
+
+#include "public/mc_user.h"
+#include "public/mc_admin.h"
+
+#include <linux/uidgid.h>
+#include "main.h"
+#include "mmu.h"		/* tee_mmu_buffer, tee_mmu_debug_structs */
+#include "iwp.h"
+#include "mcp.h"
+#include "client.h"		/* *cbuf* */
+#include "session.h"
+#include "mci/mcimcp.h"		/* WSM_INVALID */
+
+#define SHA1_HASH_SIZE       20
+
+static int wsm_create(struct tee_session *session, struct tee_wsm *wsm,
+		      const struct mc_ioctl_buffer *buf)
+{
+	if (wsm->in_use) {
+		mc_dev_err(-EINVAL, "wsm already in use");
+		return -EINVAL;
+	}
+
+	if (buf->len > BUFFER_LENGTH_MAX) {
+		mc_dev_err(-EINVAL, "buffer size %u too big", buf->len);
+		return -EINVAL;
+	}
+
+	wsm->mmu = client_mmu_create(session->client, buf, &wsm->cbuf);
+	if (IS_ERR(wsm->mmu))
+		return PTR_ERR(wsm->mmu);
+
+	/* Increment debug counter */
+	atomic_inc(&g_ctx.c_wsms);
+	wsm->va = buf->va;
+	wsm->len = buf->len;
+	wsm->flags = buf->flags;
+	wsm->in_use = true;
+	return 0;
+}
+
+static int wsm_wrap(struct tee_session *session, struct tee_wsm *wsm,
+		    struct tee_mmu *mmu)
+{
+	struct mcp_buffer_map map;
+
+	if (wsm->in_use) {
+		mc_dev_err(-EINVAL, "wsm already in use");
+		return -EINVAL;
+	}
+
+	wsm->mmu = mmu;
+	tee_mmu_get(wsm->mmu);
+
+	/* Increment debug counter */
+	atomic_inc(&g_ctx.c_wsms);
+	tee_mmu_buffer(wsm->mmu, &map);
+	wsm->va = 0;
+	wsm->len = map.length;
+	wsm->flags = map.flags;
+	wsm->in_use = true;
+	return 0;
+}
+
+/*
+ * Free a WSM object, must be called under the session's wsms_lock
+ */
+static void wsm_free(struct tee_session *session, struct tee_wsm *wsm)
+{
+	if (!wsm->in_use) {
+		mc_dev_err(-EINVAL, "wsm not in use");
+		return;
+	}
+
+	mc_dev_devel("free wsm %p: mmu %p cbuf %p va %lx len %u sva %x",
+		     wsm, wsm->mmu, wsm->cbuf, wsm->va, wsm->len, wsm->sva);
+	/* Free MMU table */
+	tee_mmu_put(wsm->mmu);
+	if (wsm->cbuf)
+		tee_cbuf_put(wsm->cbuf);
+
+	/* Decrement debug counter */
+	atomic_dec(&g_ctx.c_wsms);
+	wsm->in_use = false;
+}
+
+static int hash_path_and_data(struct task_struct *task, u8 *hash,
+			      const void *data, unsigned int data_len)
+{
+	struct file *exe_file;
+	struct crypto_shash *tfm;
+	struct shash_desc *desc;
+	size_t desc_size;
+	char *buf;
+	char *path;
+	unsigned int path_len;
+	int ret = 0;
+
+	buf = (char *)__get_free_page(GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	exe_file = get_task_exe_file(task);
+	if (!exe_file) {
+		ret = -ENOENT;
+		goto end;
+	}
+
+	path = d_path(&exe_file->f_path, buf, PAGE_SIZE);
+	if (IS_ERR(path)) {
+		ret = PTR_ERR(path);
+		goto end;
+	}
+
+	mc_dev_devel("process path =");
+	{
+		char *c;
+
+		for (c = path; *c; c++)
+			mc_dev_devel("%c %d", *c, *c);
+	}
+
+	path_len = (unsigned int)strnlen(path, PAGE_SIZE);
+	mc_dev_devel("path_len = %u", path_len);
+	/* Compute hash of path */
+	tfm = crypto_alloc_shash("sha1", 0, 0);
+	if (IS_ERR(tfm)) {
+		ret = PTR_ERR(tfm);
+		mc_dev_err(ret, "cannot allocate shash");
+		goto end;
+	}
+
+	desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
+	desc = kzalloc(desc_size, GFP_KERNEL);
+	if (!desc) {
+		ret = -ENOMEM;
+		goto err_desc;
+	}
+
+	desc->tfm = tfm;
+	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+	crypto_shash_init(desc);
+	crypto_shash_update(desc, (u8 *)path, path_len);
+	if (data) {
+		mc_dev_devel("hashing additional data");
+		crypto_shash_update(desc, data, data_len);
+	}
+
+	crypto_shash_final(desc, hash);
+	shash_desc_zero(desc);
+	kfree(desc);
+err_desc:
+	crypto_free_shash(tfm);
+end:
+	free_page((unsigned long)buf);
+
+	return ret;
+}
+
+#define GROUP_AT(gi, i) ((gi)->gid[i])
+
+/*
+ * groups_search is not EXPORTed so copied from kernel/groups.c
+ * a simple bsearch
+ */
+static int has_group(const struct cred *cred, gid_t id_gid)
+{
+	const struct group_info *group_info = cred->group_info;
+	unsigned int left, right;
+	kgid_t gid = KGIDT_INIT(id_gid);
+
+	if (gid_eq(gid, cred->fsgid) || gid_eq(gid, cred->egid))
+		return 1;
+
+	if (!group_info)
+		return 0;
+
+	left = 0;
+	right = group_info->ngroups;
+	while (left < right) {
+		unsigned int mid = (left + right) / 2;
+
+		if (gid_gt(gid, GROUP_AT(group_info, mid)))
+			left = mid + 1;
+		else if (gid_lt(gid, GROUP_AT(group_info, mid)))
+			right = mid;
+		else
+			return 1;
+	}
+	return 0;
+}
+
+static int check_prepare_identity(const struct mc_identity *identity,
+				  struct identity *mcp_identity,
+				  struct task_struct *task)
+{
+	struct mc_identity *mcp_id = (struct mc_identity *)mcp_identity;
+	u8 hash[SHA1_HASH_SIZE] = { 0 };
+	bool application = false;
+	bool supplied_ca_identity = false;
+	const void *data;
+	unsigned int data_len;
+	static const u8 zero_buffer[sizeof(identity->login_data)] = { 0 };
+
+	/* Copy login type */
+	mcp_identity->login_type = identity->login_type;
+
+	if (identity->login_type == LOGIN_PUBLIC ||
+	    identity->login_type == TEEC_TT_LOGIN_KERNEL)
+		return 0;
+
+	/* Fill in uid field */
+	if (identity->login_type == LOGIN_USER ||
+	    identity->login_type == LOGIN_USER_APPLICATION) {
+		/* Set euid and ruid of the process. */
+		mcp_id->uid.euid = __kuid_val(task_euid(task));
+		mcp_id->uid.ruid = __kuid_val(task_uid(task));
+	}
+
+	/* Check gid field */
+	if (identity->login_type == LOGIN_GROUP ||
+	    identity->login_type == LOGIN_GROUP_APPLICATION) {
+		const struct cred *cred = __task_cred(task);
+
+		/*
+		 * Check if gid is one of: egid of the process, its rgid or one
+		 * of its supplementary groups
+		 */
+		if (!has_group(cred, identity->gid)) {
+			mc_dev_err(-EACCES, "group %d not allowed",
+				   identity->gid);
+			return -EACCES;
+		}
+
+		mc_dev_devel("group %d found", identity->gid);
+		mcp_id->gid = identity->gid;
+	}
+
+	switch (identity->login_type) {
+	case LOGIN_PUBLIC:
+	case LOGIN_GROUP:
+		break;
+	case LOGIN_USER:
+		data = NULL;
+		data_len = 0;
+		break;
+	case LOGIN_APPLICATION:
+		application = true;
+		supplied_ca_identity = true;
+		data = NULL;
+		data_len = 0;
+		break;
+	case LOGIN_USER_APPLICATION:
+		application = true;
+		supplied_ca_identity = true;
+		data = &mcp_id->uid;
+		data_len = sizeof(mcp_id->uid);
+		break;
+	case LOGIN_GROUP_APPLICATION:
+		application = true;
+		data = &identity->gid;
+		data_len = sizeof(identity->gid);
+		break;
+	default:
+		/* Any other login_type value is invalid. */
+		mc_dev_err(-EINVAL, "Invalid login type %d",
+			   identity->login_type);
+		return -EINVAL;
+	}
+
+	/* let the supplied login_data pass through if it is LOGIN_APPLICATION
+	 * or LOGIN_USER_APPLICATION and not a zero-filled buffer
+	 * That buffer is expected to contain a NWd computed hash containing the
+	 * CA identity
+	 */
+	if (supplied_ca_identity &&
+	    memcmp(identity->login_data, zero_buffer,
+		   sizeof(identity->login_data)) != 0) {
+		memcpy(&mcp_id->login_data, identity->login_data,
+		       sizeof(mcp_id->login_data));
+	} else if (application) {
+		int ret = hash_path_and_data(task, hash, data, data_len);
+
+		if (ret) {
+			mc_dev_devel("hash calculation returned %d", ret);
+			return ret;
+		}
+
+		memcpy(&mcp_id->login_data, hash, sizeof(mcp_id->login_data));
+	}
+
+	return 0;
+}
+
+/*
+ * Create a session object.
+ * Note: object is not attached to client yet.
+ */
+struct tee_session *session_create(struct tee_client *client,
+				   const struct mc_identity *identity)
+{
+	struct tee_session *session;
+	struct identity mcp_identity;
+
+	if (!IS_ERR_OR_NULL(identity)) {
+		/* Check identity method and data. */
+		int ret;
+
+		ret = check_prepare_identity(identity, &mcp_identity, current);
+		if (ret)
+			return ERR_PTR(ret);
+	}
+
+	/* Allocate session object */
+	session = kzalloc(sizeof(*session), GFP_KERNEL);
+	if (!session)
+		return ERR_PTR(-ENOMEM);
+
+	/* Increment debug counter */
+	atomic_inc(&g_ctx.c_sessions);
+	/* Initialise object members */
+	if (identity) {
+		session->is_gp = true;
+		iwp_session_init(&session->iwp_session, &mcp_identity);
+	} else {
+		session->is_gp = false;
+		mcp_session_init(&session->mcp_session);
+	}
+
+	client_get(client);
+	session->client = client;
+	kref_init(&session->kref);
+	INIT_LIST_HEAD(&session->list);
+	mutex_init(&session->wsms_lock);
+	mc_dev_devel("created session %p: client %p",
+		     session, session->client);
+	return session;
+}
+
+/*
+ * Free session object and all objects it contains (wsm).
+ */
+static void session_release(struct kref *kref)
+{
+	struct tee_session *session;
+	int i;
+
+	/* Remove remaining shared buffers (unmapped in SWd by mcp_close) */
+	session = container_of(kref, struct tee_session, kref);
+	for (i = 0; i < MC_MAP_MAX; i++) {
+		if (!session->wsms[i].in_use)
+			continue;
+
+		mc_dev_devel("session %p: free wsm #%d", session, i);
+		wsm_free(session, &session->wsms[i]);
+		/* Buffer unmapped by SWd */
+		atomic_dec(&g_ctx.c_maps);
+	}
+
+	if (session->tci.in_use) {
+		mc_dev_devel("session %p: free tci", session);
+		wsm_free(session, &session->tci);
+	}
+
+	if (session->is_gp)
+		mc_dev_devel("freed GP session %p: client %p id %x", session,
+			     session->client, session->iwp_session.sid);
+	else
+		mc_dev_devel("freed MC session %p: client %p id %x", session,
+			     session->client, session->mcp_session.sid);
+
+	client_put(session->client);
+	kfree(session);
+	/* Decrement debug counter */
+	atomic_dec(&g_ctx.c_sessions);
+}
+
+/*
+ * Unreference session.
+ * Free session object if reference reaches 0.
+ */
+int session_put(struct tee_session *session)
+{
+	return kref_put(&session->kref, session_release);
+}
+
+static int wsm_debug_structs(struct kasnprintf_buf *buf, struct tee_wsm *wsm,
+			     int no)
+{
+	ssize_t ret;
+
+	if (!wsm->in_use)
+		return 0;
+
+	ret = kasnprintf(buf, "\t\t");
+	if (no < 0)
+		ret = kasnprintf(buf, "tci %pK: cbuf %pK va %pK len %u\n",
+				 wsm, wsm->cbuf, (void *)wsm->va, wsm->len);
+	else if (wsm->in_use)
+		ret = kasnprintf(buf,
+				 "wsm #%d: cbuf %pK va %pK len %u sva %x\n",
+				 no, wsm->cbuf, (void *)wsm->va, wsm->len,
+				 wsm->sva);
+
+	if (ret < 0)
+		return ret;
+
+	if (wsm->mmu) {
+		ret = tee_mmu_debug_structs(buf, wsm->mmu);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+int session_mc_open_session(struct tee_session *session,
+			    struct mcp_open_info *info)
+{
+	struct tee_wsm *wsm = &session->tci;
+	bool tci_in_use = false;
+	int ret;
+
+	/* Check that tci and its length make sense */
+	if (info->tci_len > MC_MAX_TCI_LEN)
+		return -EINVAL;
+
+	if (!info->tci_va != !info->tci_len) {
+		mc_dev_devel("TCI pointer and length are inconsistent");
+		return -EINVAL;
+	}
+
+	/* Add existing TCI map */
+	if (info->tci_mmu) {
+		ret = wsm_wrap(session, wsm, info->tci_mmu);
+		if (ret)
+			return ret;
+
+		tci_in_use = true;
+		mc_dev_devel("wrapped tci: mmu %p len %u flags %x",
+			     wsm->mmu, wsm->len, wsm->flags);
+	}
+
+	/* Create mapping for TCI */
+	if (info->tci_va) {
+		struct mc_ioctl_buffer buf = {
+			.va = info->tci_va,
+			.len = info->tci_len,
+			.flags = MC_IO_MAP_INPUT_OUTPUT,
+		};
+
+		ret = wsm_create(session, wsm, &buf);
+		if (ret)
+			return ret;
+
+		tci_in_use = true;
+		info->tci_mmu = wsm->mmu;
+		mc_dev_devel(
+			"created tci: mmu %p cbuf %p va %lx len %u flags %x",
+			wsm->mmu, wsm->cbuf, wsm->va, wsm->len, wsm->flags);
+	}
+
+	ret = mcp_open_session(&session->mcp_session, info, &tci_in_use);
+	if (info->tci_va && (ret || !tci_in_use))
+		wsm_free(session, &session->tci);
+
+	return ret;
+}
+
+/*
+ * Close session and unreference session object.
+ * Session object is assumed to have been removed from main list, which means
+ * that session_close cannot be called anymore.
+ */
+int session_close(struct tee_session *session)
+{
+	int ret;
+
+	if (session->is_gp) {
+		ret = iwp_close_session(&session->iwp_session);
+		if (!ret)
+			mc_dev_devel("closed GP session %x",
+				     session->iwp_session.sid);
+	} else {
+		ret = mcp_close_session(&session->mcp_session);
+		if (!ret)
+			mc_dev_devel("closed MC session %x",
+				     session->mcp_session.sid);
+	}
+	return ret;
+}
+
+/*
+ * Session is to be removed from NWd records as SWd is dead
+ */
+int session_mc_cleanup_session(struct tee_session *session)
+{
+	mcp_cleanup_session(&session->mcp_session);
+	return session_put(session);
+}
+
+/*
+ * Send a notification to TA
+ */
+int session_mc_notify(struct tee_session *session)
+{
+	if (!session) {
+		mc_dev_devel("Session pointer is null");
+		return -EINVAL;
+	}
+
+	return mcp_notify(&session->mcp_session);
+}
+
+/*
+ * Sleep until next notification from SWd.
+ */
+int session_mc_wait(struct tee_session *session, s32 timeout,
+		    bool silent_expiry)
+{
+	return mcp_wait(&session->mcp_session, timeout, silent_expiry);
+}
+
+/*
+ * Share buffers with SWd and add corresponding WSM objects to session.
+ * This may involve some re-use or cleanup of inactive mappings.
+ */
+int session_mc_map(struct tee_session *session, struct tee_mmu *mmu,
+		   struct mc_ioctl_buffer *buf)
+{
+	struct tee_wsm *wsm;
+	u32 sva;
+	int i, ret;
+
+	mutex_lock(&session->wsms_lock);
+	/* Look for an available slot in the session WSMs array */
+	for (i = 0; i < MC_MAP_MAX; i++)
+		if (!session->wsms[i].in_use)
+			break;
+
+	if (i == MC_MAP_MAX) {
+		ret = -EPERM;
+		mc_dev_devel("no available WSM slot in session %x",
+			     session->mcp_session.sid);
+		goto out;
+	}
+
+	wsm = &session->wsms[i];
+	if (!mmu)
+		ret = wsm_create(session, wsm, buf);
+	else
+		ret = wsm_wrap(session, wsm, mmu);
+
+	if (ret) {
+		mc_dev_devel("maps[%d] va=%llx create failed: %d",
+			     i, buf->va, ret);
+		goto out;
+	}
+
+	mc_dev_devel("created wsm #%d: mmu %p cbuf %p va %lx len %u flags %x",
+		     i, wsm->mmu, wsm->cbuf, wsm->va, wsm->len, wsm->flags);
+	ret = mcp_map(session->mcp_session.sid, wsm->mmu, &sva);
+	if (ret) {
+		wsm_free(session, wsm);
+	} else {
+		buf->sva = sva;
+		wsm->sva = sva;
+	}
+
+out:
+	mutex_unlock(&session->wsms_lock);
+	mc_dev_devel("ret=%d", ret);
+	return ret;
+}
+
+/*
+ * In theory, stop sharing buffers with the SWd. In fact, mark them inactive.
+ */
+int session_mc_unmap(struct tee_session *session,
+		     const struct mc_ioctl_buffer *buf)
+{
+	struct tee_wsm *wsm;
+	struct mcp_buffer_map map;
+	int i, ret = -EINVAL;
+
+	mutex_lock(&session->wsms_lock);
+	/* Look for buffer in the session WSMs array */
+	for (i = 0; i < MC_MAP_MAX; i++)
+		if (session->wsms[i].in_use &&
+		    buf->va == session->wsms[i].va &&
+		    buf->len == session->wsms[i].len &&
+		    buf->sva == session->wsms[i].sva)
+			break;
+
+	if (i == MC_MAP_MAX) {
+		ret = -EINVAL;
+		mc_dev_devel("maps[%d] va=%llx sva=%llx not found",
+			     i, buf[i].va, buf[i].sva);
+		goto out;
+	}
+
+	wsm = &session->wsms[i];
+	tee_mmu_buffer(wsm->mmu, &map);
+	map.secure_va = wsm->sva;
+
+	ret = mcp_unmap(session->mcp_session.sid, &map);
+	if (!ret)
+		wsm_free(session, wsm);
+
+out:
+	mutex_unlock(&session->wsms_lock);
+	return ret;
+}
+
+/*
+ * Read and clear last notification received from TA
+ */
+int session_mc_get_err(struct tee_session *session, s32 *err)
+{
+	return mcp_get_err(&session->mcp_session, err);
+}
+
+static void unmap_gp_bufs(struct tee_session *session,
+			  struct iwp_buffer_map *maps)
+{
+	int i;
+
+	/* Create WSMs from bufs */
+	mutex_lock(&session->wsms_lock);
+	for (i = 0; i < MC_MAP_MAX; i++) {
+		if (session->wsms[i].in_use)
+			wsm_free(session, &session->wsms[i]);
+
+		if (maps[i].sva)
+			client_put_cwsm_sva(session->client, maps[i].sva);
+	}
+	mutex_unlock(&session->wsms_lock);
+}
+
+static int map_gp_bufs(struct tee_session *session,
+		       const struct mc_ioctl_buffer *bufs,
+		       struct gp_shared_memory **parents,
+		       struct iwp_buffer_map *maps)
+{
+	int i, ret = 0;
+
+	/* Create WSMs from bufs */
+	mutex_lock(&session->wsms_lock);
+	for (i = 0; i < MC_MAP_MAX; i++) {
+		/* Reset reference for temporary memory */
+		maps[i].map.addr = 0;
+		/* Reset reference for registered memory */
+		maps[i].sva = 0;
+		if (bufs[i].va) {
+			/* Temporary memory, needs mapping */
+			ret = wsm_create(session, &session->wsms[i], &bufs[i]);
+			if (ret) {
+				mc_dev_devel(
+					"maps[%d] va=%llx create failed: %d",
+					i, bufs[i].va, ret);
+				break;
+			}
+
+			tee_mmu_buffer(session->wsms[i].mmu, &maps[i].map);
+		} else if (parents[i]) {
+			/* Registered memory, already mapped */
+			maps[i].sva = client_get_cwsm_sva(session->client,
+							  parents[i]);
+			if (!maps[i].sva) {
+				ret = -EINVAL;
+				mc_dev_devel("couldn't find shared mem");
+				break;
+			}
+
+			mc_dev_devel("param[%d] has sva %x", i, maps[i].sva);
+		}
+	}
+	mutex_unlock(&session->wsms_lock);
+
+	/* Failed above */
+	if (i < MC_MAP_MAX)
+		unmap_gp_bufs(session, maps);
+
+	return ret;
+}
+
+int session_gp_open_session(struct tee_session *session,
+			    const struct mc_uuid_t *uuid,
+			    struct gp_operation *operation,
+			    struct gp_return *gp_ret)
+{
+	/* TEEC_MEMREF_TEMP_* buffers to map */
+	struct mc_ioctl_buffer bufs[MC_MAP_MAX];
+	struct iwp_buffer_map maps[MC_MAP_MAX];
+	struct gp_shared_memory *parents[MC_MAP_MAX] = { NULL };
+	struct client_gp_operation client_operation;
+	int ret = 0;
+
+	ret = iwp_open_session_prepare(&session->iwp_session, operation, bufs,
+				       parents, gp_ret);
+	if (ret)
+		return ret;
+
+	/* Create WSMs from bufs */
+	ret = map_gp_bufs(session, bufs, parents, maps);
+	if (ret) {
+		iwp_open_session_abort(&session->iwp_session);
+		return iwp_set_ret(ret, gp_ret);
+	}
+
+	/* Tell client about operation */
+	client_operation.started = operation->started;
+	client_operation.slot = iwp_session_slot(&session->iwp_session);
+	client_operation.cancelled = false;
+	if (!client_gp_operation_add(session->client, &client_operation)) {
+		iwp_open_session_abort(&session->iwp_session);
+		return iwp_set_ret(-ECANCELED, gp_ret);
+	}
+
+	/* Open/call TA */
+	ret = iwp_open_session(&session->iwp_session, uuid, operation, maps,
+			       NULL, NULL, gp_ret);
+	/* Cleanup */
+	client_gp_operation_remove(session->client, &client_operation);
+	unmap_gp_bufs(session, maps);
+	return ret;
+}
+
+int session_gp_open_session_domu(struct tee_session *session,
+				 const struct mc_uuid_t *uuid, u64 started,
+				 struct interworld_session *iws,
+				 struct tee_mmu **mmus,
+				 struct gp_return *gp_ret)
+{
+	/* TEEC_MEMREF_TEMP_* buffers to map */
+	struct client_gp_operation client_operation;
+	int ret = 0;
+
+	ret = iwp_open_session_prepare(&session->iwp_session, NULL, NULL, NULL,
+				       gp_ret);
+	if (ret)
+		return ret;
+
+	/* Tell client about operation */
+	client_operation.started = started;
+	client_operation.slot = iwp_session_slot(&session->iwp_session);
+	client_operation.cancelled = false;
+	if (!client_gp_operation_add(session->client, &client_operation)) {
+		iwp_open_session_abort(&session->iwp_session);
+		return iwp_set_ret(-ECANCELED, gp_ret);
+	}
+
+	/* Open/call TA */
+	ret = iwp_open_session(&session->iwp_session, uuid, NULL, NULL, iws,
+			       mmus, gp_ret);
+	/* Cleanup */
+	client_gp_operation_remove(session->client, &client_operation);
+	return ret;
+}
+
+int session_gp_invoke_command(struct tee_session *session, u32 command_id,
+			      struct gp_operation *operation,
+			      struct gp_return *gp_ret)
+{
+	/* TEEC_MEMREF_TEMP_* buffers to map */
+	struct mc_ioctl_buffer bufs[4];
+	struct iwp_buffer_map maps[MC_MAP_MAX];
+	struct gp_shared_memory *parents[MC_MAP_MAX] = { NULL };
+	struct client_gp_operation client_operation;
+	int ret = 0;
+
+	ret = iwp_invoke_command_prepare(&session->iwp_session, command_id,
+					 operation, bufs, parents, gp_ret);
+	if (ret)
+		return ret;
+
+	/* Create WSMs from bufs */
+	ret = map_gp_bufs(session, bufs, parents, maps);
+	if (ret) {
+		iwp_invoke_command_abort(&session->iwp_session);
+		return iwp_set_ret(ret, gp_ret);
+	}
+
+	/* Tell client about operation */
+	client_operation.started = operation->started;
+	client_operation.slot = iwp_session_slot(&session->iwp_session);
+	client_operation.cancelled = false;
+	if (!client_gp_operation_add(session->client, &client_operation)) {
+		iwp_invoke_command_abort(&session->iwp_session);
+		return iwp_set_ret(-ECANCELED, gp_ret);
+	}
+
+	/* Call TA */
+	ret = iwp_invoke_command(&session->iwp_session, operation, maps, NULL,
+				 NULL, gp_ret);
+	/* Cleanup */
+	client_gp_operation_remove(session->client, &client_operation);
+	unmap_gp_bufs(session, maps);
+	return ret;
+}
+
+int session_gp_invoke_command_domu(struct tee_session *session,
+				   u64 started, struct interworld_session *iws,
+				   struct tee_mmu **mmus,
+				   struct gp_return *gp_ret)
+{
+	struct client_gp_operation client_operation;
+	int ret = 0;
+
+	ret = iwp_invoke_command_prepare(&session->iwp_session, 0, NULL, NULL,
+					 NULL, gp_ret);
+	if (ret)
+		return ret;
+
+	/* Tell client about operation */
+	client_operation.started = started;
+	client_operation.slot = iwp_session_slot(&session->iwp_session);
+	client_operation.cancelled = false;
+	if (!client_gp_operation_add(session->client, &client_operation)) {
+		iwp_invoke_command_abort(&session->iwp_session);
+		return iwp_set_ret(-ECANCELED, gp_ret);
+	}
+
+	/* Call TA */
+	ret = iwp_invoke_command(&session->iwp_session, NULL, NULL, iws, mmus,
+				 gp_ret);
+	/* Cleanup */
+	client_gp_operation_remove(session->client, &client_operation);
+	return ret;
+}
+
+int session_gp_request_cancellation(u64 slot)
+{
+	return iwp_request_cancellation(slot);
+}
+
+int session_debug_structs(struct kasnprintf_buf *buf,
+			  struct tee_session *session, bool is_closing)
+{
+	const char *type;
+	u32 session_id;
+	s32 err;
+	int i, ret;
+
+	if (session->is_gp) {
+		session_id = session->iwp_session.sid;
+		err = 0;
+		type = "GP";
+	} else {
+		session_id = session->mcp_session.sid;
+		session_mc_get_err(session, &err);
+		type = "MC";
+	}
+
+	ret = kasnprintf(buf, "\tsession %pK [%d]: %4x %s ec %d%s\n",
+			 session, kref_read(&session->kref), session_id, type,
+			 err, is_closing ? " <closing>" : "");
+	if (ret < 0)
+		return ret;
+
+	/* TCI */
+	if (session->tci.in_use) {
+		ret = wsm_debug_structs(buf, &session->tci, -1);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* WMSs */
+	mutex_lock(&session->wsms_lock);
+	for (i = 0; i < MC_MAP_MAX; i++) {
+		ret = wsm_debug_structs(buf, &session->wsms[i], i);
+		if (ret < 0)
+			break;
+	}
+	mutex_unlock(&session->wsms_lock);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/session.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/session.h
new file mode 100644
index 0000000..214d044
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/session.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _SESSION_H_
+#define _SESSION_H_
+
+#include <linux/list.h>
+
+#include "mcp.h"
+#include "iwp.h"
+
+struct tee_object;
+struct tee_mmu;
+struct mc_ioctl_buffer;
+
+struct tee_wsm {
+	/* Buffer NWd address (uva or kva, used only for lookup) */
+	uintptr_t		va;
+	/* Buffer length */
+	u32			len;
+	/* Buffer flags */
+	u32			flags;
+	/* Buffer SWd address */
+	u32			sva;
+	union {
+		/* MMU table */
+		struct tee_mmu		*mmu;
+		/* Index of re-used buffer (temporary) */
+		int			index;
+	};
+	/* Pointer to associated cbuf, if relevant */
+	struct cbuf		*cbuf;
+	/* State of this WSM */
+	int			in_use;
+};
+
+struct tee_session {
+	/* Session descriptor */
+	union {
+		struct mcp_session	mcp_session;
+		struct iwp_session	iwp_session;
+	};
+	/* Owner */
+	struct tee_client	*client;
+	/* Number of references kept to this object */
+	struct kref		kref;
+	/* WSM for the TCI */
+	struct tee_wsm		tci;
+	/* The list entry to attach to session list of owner */
+	struct list_head	list;
+	/* Session WSMs lock */
+	struct mutex		wsms_lock;
+	/* WSMs for a session */
+	struct tee_wsm		wsms[MC_MAP_MAX];
+	/* This TA is of Global Platform type */
+	bool			is_gp;
+};
+
+struct tee_session *session_create(struct tee_client *client,
+				   const struct mc_identity *identity);
+static inline void session_get(struct tee_session *session)
+{
+	kref_get(&session->kref);
+}
+
+int session_put(struct tee_session *session);
+int session_close(struct tee_session *session);
+
+int session_mc_open_session(struct tee_session *session,
+			    struct mcp_open_info *info);
+int session_mc_cleanup_session(struct tee_session *session);
+int session_mc_notify(struct tee_session *session);
+int session_mc_wait(struct tee_session *session, s32 timeout,
+		    bool silent_expiry);
+int session_mc_map(struct tee_session *session, struct tee_mmu *mmu,
+		   struct mc_ioctl_buffer *bufs);
+int session_mc_unmap(struct tee_session *session,
+		     const struct mc_ioctl_buffer *bufs);
+int session_mc_get_err(struct tee_session *session, s32 *err);
+
+int session_gp_open_session(struct tee_session *session,
+			    const struct mc_uuid_t *uuid,
+			    struct gp_operation *operation,
+			    struct gp_return *gp_ret);
+int session_gp_open_session_domu(struct tee_session *session,
+				 const struct mc_uuid_t *uuid, u64 started,
+				 struct interworld_session *iws,
+				 struct tee_mmu **mmus,
+				 struct gp_return *gp_ret);
+int session_gp_invoke_command(struct tee_session *session, u32 command_id,
+			      struct gp_operation *operation,
+			      struct gp_return *gp_ret);
+int session_gp_invoke_command_domu(struct tee_session *session,
+				   u64 started, struct interworld_session *iws,
+				   struct tee_mmu **mmus,
+				   struct gp_return *gp_ret);
+int session_gp_request_cancellation(u64 slot);
+
+int session_debug_structs(struct kasnprintf_buf *buf,
+			  struct tee_session *session, bool is_closing);
+
+#endif /* _SESSION_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/teeclientapi.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/teeclientapi.c
new file mode 100644
index 0000000..466efcf
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/teeclientapi.c
@@ -0,0 +1,586 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+
+#include "public/GP/tee_client_api.h"
+#include "public/mc_user.h"
+
+#include "main.h"
+#include "mci/mcinq.h"	/* TA termination codes */
+#include "client.h"
+
+/* Macros */
+#define _TEEC_GET_PARAM_TYPE(t, i) (((t) >> (4 * (i))) & 0xF)
+
+/* Parameter number */
+#define _TEEC_PARAMETER_NUMBER		4
+
+/**teec_shared_memory
+ * These error codes are still to be decided by GP and as we do not wish to
+ * expose any part of the GP TAF as of yet, for now they will have to live here
+ * until we decide what to do about them.
+ */
+#define TEEC_ERROR_TA_LOCKED		0xFFFF0257
+#define TEEC_ERROR_SD_BLOCKED		0xFFFF0258
+#define TEEC_ERROR_TARGET_KILLED	0xFFFF0259
+
+static DECLARE_WAIT_QUEUE_HEAD(operations_wq);
+
+static void _lib_uuid_to_array(const struct teec_uuid *uuid, u8 *uuid_array)
+{
+	u8 *identifier_cursor = (u8 *)uuid;
+	/* offsets and syntax constants. See explanations above */
+#ifdef S_BIG_ENDIAN
+	u32 offsets = 0;
+#else
+	u32 offsets = 0xF1F1DF13;
+#endif
+	u32 i;
+
+	for (i = 0; i < sizeof(struct teec_uuid); i++) {
+		/* Two-digit hex number */
+		s32 offset = ((s32)((offsets & 0xF) << 28)) >> 28;
+		u8 number = identifier_cursor[offset];
+
+		offsets >>= 4;
+		identifier_cursor++;
+
+		uuid_array[i] = number;
+	}
+}
+
+static u32 _teec_to_gp_operation(struct teec_operation *teec_op,
+				 struct gp_operation *gp_op)
+{
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < _TEEC_PARAMETER_NUMBER; i++) {
+		switch (_TEEC_GET_PARAM_TYPE(teec_op->param_types, i)) {
+		case TEEC_VALUE_INPUT:
+		case TEEC_VALUE_INOUT:
+			gp_op->params[i].value.a = teec_op->params[i].value.a;
+			gp_op->params[i].value.b = teec_op->params[i].value.b;
+			break;
+		case TEEC_MEMREF_TEMP_INPUT:
+		case TEEC_MEMREF_TEMP_OUTPUT:
+		case TEEC_MEMREF_TEMP_INOUT:
+			gp_op->params[i].tmpref.buffer =
+				(uintptr_t)teec_op->params[i].tmpref.buffer;
+			gp_op->params[i].tmpref.size =
+				teec_op->params[i].tmpref.size;
+			break;
+		case TEEC_MEMREF_WHOLE:
+		case TEEC_MEMREF_PARTIAL_INPUT:
+		case TEEC_MEMREF_PARTIAL_OUTPUT:
+		case TEEC_MEMREF_PARTIAL_INOUT:
+			gp_op->params[i].memref.offset =
+				teec_op->params[i].memref.offset;
+			gp_op->params[i].memref.size =
+				teec_op->params[i].memref.size;
+			gp_op->params[i].memref.parent.buffer =
+			 (uintptr_t)teec_op->params[i].memref.parent->buffer;
+			gp_op->params[i].memref.parent.size =
+				teec_op->params[i].memref.parent->size;
+			gp_op->params[i].memref.parent.flags =
+				teec_op->params[i].memref.parent->flags;
+			break;
+		case TEEC_NONE:
+		case TEEC_VALUE_OUTPUT:
+			break;
+		default:
+			ret = -EINVAL;
+		}
+	}
+	gp_op->param_types = teec_op->param_types;
+	return ret;
+}
+
+static void _teec_from_gp_operation(struct gp_operation *gp_op,
+				    struct teec_operation *teec_op)
+{
+	int i;
+
+	for (i = 0; i < _TEEC_PARAMETER_NUMBER; i++) {
+		switch (_TEEC_GET_PARAM_TYPE(gp_op->param_types, i)) {
+		case TEEC_VALUE_OUTPUT:
+		case TEEC_VALUE_INOUT:
+			teec_op->params[i].value.a = gp_op->params[i].value.a;
+			teec_op->params[i].value.b = gp_op->params[i].value.b;
+			break;
+		case TEEC_MEMREF_TEMP_INPUT:
+		case TEEC_MEMREF_TEMP_OUTPUT:
+		case TEEC_MEMREF_TEMP_INOUT:
+			teec_op->params[i].tmpref.size =
+				gp_op->params[i].tmpref.size;
+			break;
+		case TEEC_MEMREF_WHOLE:
+			break;
+		case TEEC_MEMREF_PARTIAL_INPUT:
+		case TEEC_MEMREF_PARTIAL_OUTPUT:
+		case TEEC_MEMREF_PARTIAL_INOUT:
+			teec_op->params[i].memref.size =
+				gp_op->params[i].memref.size;
+			break;
+		case TEEC_NONE:
+		case TEEC_VALUE_INPUT:
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+static u32 _teec_convert_error(int errno)
+{
+	switch (errno) {
+	case ENOENT:
+		return TEEC_ERROR_ITEM_NOT_FOUND;
+	case EACCES:
+		return TEEC_ERROR_ACCESS_DENIED;
+	case EINVAL:
+		return TEEC_ERROR_BAD_PARAMETERS;
+	case ENOSPC:
+		return TEEC_ERROR_OUT_OF_MEMORY;
+	case ECONNREFUSED:
+		return TEEC_ERROR_SD_BLOCKED;
+	case ECONNABORTED:
+		return TEEC_ERROR_TA_LOCKED;
+	case ECONNRESET:
+		return TEEC_ERROR_TARGET_KILLED;
+	case EBUSY:
+		return TEEC_ERROR_BUSY;
+	case EKEYREJECTED:
+		return TEEC_ERROR_SECURITY;
+	case ETIME:
+		return TEEC_ERROR_TARGET_DEAD;
+	default:
+		return TEEC_ERROR_GENERIC;
+	}
+}
+
+/* teec_initialize_context: TEEC_SUCCESS, Another error code from Table 4-2 */
+u32 teec_initialize_context(const char *name, struct teec_context *context)
+{
+	struct tee_client *client;
+	int ret;
+	(void)name;
+
+	mc_dev_devel("== %s() ==============", __func__);
+
+	if (!context) {
+		mc_dev_devel("context is NULL");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+
+	/* Make sure TEE was started */
+	ret = mc_wait_tee_start();
+	if (ret) {
+		mc_dev_err(ret, "TEE failed to start, now or in the past");
+		return TEEC_ERROR_BAD_STATE;
+	}
+
+	/* Create client */
+	client = client_create(true);
+	if (!client)
+		return TEEC_ERROR_OUT_OF_MEMORY;
+
+	/* Store client in context */
+	context->imp.client = client;
+
+	return TEEC_SUCCESS;
+}
+EXPORT_SYMBOL(teec_initialize_context);
+
+/*
+ * The implementation of this function MUST NOT be able to fail: after this
+ * function returns the Client Application must be able to consider that the
+ * Context has been closed
+ */
+void teec_finalize_context(struct teec_context *context)
+{
+	mc_dev_devel("== %s() ==============", __func__);
+
+	/* The parameter context MUST point to an initialized TEE Context */
+	if (!context) {
+		mc_dev_devel("context is NULL");
+		return;
+	}
+
+	/* The implementation of this function MUST NOT be able to fail: after
+	 * this function returns the Client Application must be able to
+	 * consider that the Context has been closed
+	 */
+	client_close(context->imp.client);
+	context->imp.client = NULL;
+}
+EXPORT_SYMBOL(teec_finalize_context);
+
+/*
+ * If the return_origin is different from TEEC_ORIGIN_TRUSTED_APP, an error code
+ * from Table 4-2. If the return_origin is equal to TEEC_ORIGIN_TRUSTED_APP, a
+ * return code defined by the protocol between the Client Application and the
+ * Trusted Application
+ */
+u32 teec_open_session(struct teec_context *context,
+		      struct teec_session *session,
+		      const struct teec_uuid *destination,
+		      u32 connection_method,
+		      const void *connection_data,
+		      struct teec_operation *operation,
+		      u32 *return_origin)
+{
+	struct mc_uuid_t uuid;
+	struct mc_identity identity = {0};
+	struct tee_client *client = NULL;
+	struct gp_operation gp_op;
+	struct gp_return gp_ret;
+	int ret = 0, timeout;
+
+	mc_dev_devel("== %s() ==============", __func__);
+	gp_ret.value = TEEC_SUCCESS;
+	if (return_origin)
+		*return_origin = TEEC_ORIGIN_API;
+
+	/* The parameter context MUST point to an initialized TEE Context */
+	if (!context) {
+		mc_dev_devel("context is NULL");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+
+	if (!context->imp.client) {
+		mc_dev_devel("context not initialized");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+	client = context->imp.client;
+
+	if (!session) {
+		mc_dev_devel("session is NULL");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+
+	connection_method = TEEC_TT_LOGIN_KERNEL;
+	session->imp.active = false;
+
+	_lib_uuid_to_array(destination, uuid.value);
+
+	memset(&gp_op, 0, sizeof(gp_op));
+	if (operation) {
+		operation->imp.session = &session->imp;
+		ret = _teec_to_gp_operation(operation, &gp_op);
+		if (ret)
+			return TEEC_ERROR_BAD_PARAMETERS;
+	}
+
+	identity.login_type = (enum mc_login_type)connection_method;
+
+	/* Wait for GP loading to be possible, maximum 30s */
+	timeout = 30;
+	do {
+		ret = client_gp_open_session(client, &uuid, &gp_op, &identity,
+					     &gp_ret, &session->imp.session_id);
+		if (!ret || ret != EAGAIN)
+			break;
+
+		msleep(1000);
+	} while (--timeout);
+
+	if (ret || gp_ret.value != TEEC_SUCCESS) {
+		mc_dev_devel("client_gp_open_session failed(%08x) %08x", ret,
+			     gp_ret.value);
+		if (ret)
+			gp_ret.value = _teec_convert_error(-ret);
+		else if (return_origin)
+			/* Update origin as it's not the API */
+			*return_origin = gp_ret.origin;
+	} else {
+		mc_dev_devel(" created session ID %x", session->imp.session_id);
+		session->imp.context = context->imp;
+		session->imp.active = true;
+		if (operation)
+			_teec_from_gp_operation(&gp_op, operation);
+	}
+
+	mc_dev_devel(" %s() = 0x%x", __func__, gp_ret.value);
+	return gp_ret.value;
+}
+EXPORT_SYMBOL(teec_open_session);
+
+u32 teec_invoke_command(struct teec_session *session,
+			u32 command_id,
+			struct teec_operation *operation,
+			u32 *return_origin)
+{
+	struct tee_client *client = NULL;
+	struct gp_operation gp_op = {0};
+	struct gp_return gp_ret = {0};
+	int ret = 0;
+
+	mc_dev_devel("== %s() ==============", __func__);
+
+	gp_ret.value = TEEC_SUCCESS;
+	if (return_origin)
+		*return_origin = TEEC_ORIGIN_API;
+
+	if (!session) {
+		mc_dev_devel("session is NULL");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+
+	if (!session->imp.active) {
+		mc_dev_devel("session is inactive");
+		return TEEC_ERROR_BAD_STATE;
+	}
+	client = session->imp.context.client;
+
+	if (operation) {
+		operation->imp.session = &session->imp;
+		if (_teec_to_gp_operation(operation, &gp_op))
+			return TEEC_ERROR_BAD_PARAMETERS;
+	} else {
+		gp_op.param_types = 0;
+	}
+
+	ret = client_gp_invoke_command(client, session->imp.session_id,
+				       command_id, &gp_op, &gp_ret);
+
+	if (ret || gp_ret.value != TEEC_SUCCESS) {
+		mc_dev_devel("client_gp_invoke_command failed(%08x) %08x", ret,
+			     gp_ret.value);
+		if (ret)
+			gp_ret.value = _teec_convert_error(-ret);
+		else if (return_origin)
+			/* Update origin as it's not the API */
+			*return_origin = gp_ret.origin;
+	} else if (operation) {
+		_teec_from_gp_operation(&gp_op, operation);
+	}
+
+	mc_dev_devel(" %s() = 0x%x", __func__, gp_ret.value);
+	return gp_ret.value;
+}
+EXPORT_SYMBOL(teec_invoke_command);
+
+void teec_close_session(struct teec_session *session)
+{
+	int ret = 0;
+	struct tee_client *client = NULL;
+
+	mc_dev_devel("== %s() ==============", __func__);
+
+	/* The implementation MUST do nothing if session is NULL */
+	if (!session) {
+		mc_dev_devel("session is NULL");
+		return;
+	}
+	client = session->imp.context.client;
+
+	if (session->imp.active) {
+		ret = client_gp_close_session(client, session->imp.session_id);
+
+		if (ret)
+			/* continue even in case of error */
+			mc_dev_devel("client_gp_close failed(%08x)", ret);
+
+		session->imp.active = false;
+	}
+
+	mc_dev_devel(" %s() = 0x%x", __func__, ret);
+}
+EXPORT_SYMBOL(teec_close_session);
+
+/*
+ * Implementation note. We handle internally 2 kind of pointers : kernel memory
+ * (kmalloc, get_pages, ...) and dynamic memory (vmalloc). A global pointer from
+ * a kernel module has the same format as a vmalloc buffer. However, our code
+ * cannot detect that, so it considers it a kmalloc buffer. The TA trying to use
+ * that shared buffer is likely to crash
+ */
+u32 teec_register_shared_memory(struct teec_context *context,
+				struct teec_shared_memory *shared_mem)
+{
+	struct gp_shared_memory memref;
+	struct gp_return gp_ret;
+	int ret = 0;
+
+	mc_dev_devel("== %s() ==============", __func__);
+
+	/* The parameter context MUST point to an initialized TEE Context */
+	if (!context) {
+		mc_dev_devel("context is NULL");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+	/*
+	 * The parameter shared_mem MUST point to the Shared Memory structure
+	 * defining the memory region to register
+	 */
+	if (!shared_mem) {
+		mc_dev_devel("shared_mem is NULL");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+	/*
+	 * The buffer field MUST point to the memory region to be shared,
+	 * and MUST not be NULL
+	 */
+	if (!shared_mem->buffer) {
+		mc_dev_devel("shared_mem->buffer is NULL");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+	if (shared_mem->flags & ~TEEC_MEM_INOUT) {
+		mc_dev_devel("shared_mem->flags is incorrect");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+	if (!shared_mem->flags) {
+		mc_dev_devel("shared_mem->flags is incorrect");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+
+	memref.buffer = (uintptr_t)shared_mem->buffer;
+	memref.flags = shared_mem->flags;
+	memref.size = shared_mem->size;
+	ret = client_gp_register_shared_mem(context->imp.client, NULL, NULL,
+					    &memref, &gp_ret);
+
+	if (ret)
+		return _teec_convert_error(-ret);
+
+	shared_mem->imp.client = context->imp.client;
+	shared_mem->imp.implementation_allocated = false;
+
+	return TEEC_SUCCESS;
+}
+EXPORT_SYMBOL(teec_register_shared_memory);
+
+u32 teec_allocate_shared_memory(struct teec_context *context,
+				struct teec_shared_memory *shared_mem)
+{
+	struct gp_shared_memory memref;
+	struct gp_return gp_ret;
+	int ret = 0;
+
+	/* No connection to "context"? */
+	mc_dev_devel("== %s() ==============", __func__);
+
+	/* The parameter context MUST point to an initialized TEE Context */
+	if (!context) {
+		mc_dev_devel("context is NULL");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+	/*
+	 * The parameter shared_mem MUST point to the Shared Memory structure
+	 * defining the memory region to register
+	 */
+	if (!shared_mem) {
+		mc_dev_devel("shared_mem is NULL");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+	if (shared_mem->flags & ~TEEC_MEM_INOUT) {
+		mc_dev_devel("shared_mem->flags is incorrect");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+	if (!shared_mem->flags) {
+		mc_dev_devel("shared_mem->flags is incorrect");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+
+	shared_mem->buffer = vmalloc(shared_mem->size);
+	if (!shared_mem->buffer)
+		return TEEC_ERROR_OUT_OF_MEMORY;
+
+	memref.buffer = (uintptr_t)shared_mem->buffer;
+	memref.flags = shared_mem->flags;
+	memref.size = shared_mem->size;
+	ret = client_gp_register_shared_mem(context->imp.client, NULL, NULL,
+					    &memref, &gp_ret);
+
+	if (ret) {
+		vfree(shared_mem->buffer);
+		shared_mem->buffer = NULL;
+		shared_mem->size = 0;
+		return _teec_convert_error(-ret);
+	}
+
+	shared_mem->imp.client = context->imp.client;
+	shared_mem->imp.implementation_allocated = true;
+
+	return TEEC_SUCCESS;
+}
+EXPORT_SYMBOL(teec_allocate_shared_memory);
+
+void teec_release_shared_memory(struct teec_shared_memory *shared_mem)
+{
+	struct gp_shared_memory memref;
+
+	/* No connection to "context"? */
+	mc_dev_devel("== %s() ==============", __func__);
+
+	/* The implementation MUST do nothing if shared_mem is NULL */
+	if (!shared_mem) {
+		mc_dev_devel("shared_mem is NULL");
+		return;
+	}
+
+	memref.buffer = (uintptr_t)shared_mem->buffer;
+	memref.flags = shared_mem->flags;
+	memref.size = shared_mem->size;
+	(void)client_gp_release_shared_mem(shared_mem->imp.client, &memref);
+
+	/*
+	 * For a memory buffer allocated using teec_allocate_shared_memory the
+	 * Implementation MUST free the underlying memory
+	 */
+	if (shared_mem->imp.implementation_allocated) {
+		if (shared_mem->buffer) {
+			vfree(shared_mem->buffer);
+			shared_mem->buffer = NULL;
+			shared_mem->size = 0;
+		}
+	}
+}
+EXPORT_SYMBOL(teec_release_shared_memory);
+
+void teec_request_cancellation(struct teec_operation *operation)
+{
+	struct teec_session_imp *session;
+	int ret;
+
+	mc_dev_devel("== %s() ==============", __func__);
+
+	ret = wait_event_interruptible(operations_wq, operation->started);
+	if (ret == -ERESTARTSYS) {
+		mc_dev_devel("signal received");
+		return;
+	}
+
+	mc_dev_devel("operation->started changed from 0 to %d",
+		     operation->started);
+
+	if (operation->started > 1) {
+		mc_dev_devel("the operation has finished");
+		return;
+	}
+
+	session = operation->imp.session;
+	operation->started = 2;
+	wake_up_interruptible(&operations_wq);
+
+	if (!session->active) {
+		mc_dev_devel("Corresponding session is not active");
+		return;
+	}
+
+	/* TODO: handle cancellation */
+
+	/* Signal the Trustlet */
+	ret = client_notify_session(session->context.client,
+				    session->session_id);
+	if (ret)
+		mc_dev_devel("Notify failed: %d", ret);
+}
+EXPORT_SYMBOL(teec_request_cancellation);
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/user.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/user.c
new file mode 100644
index 0000000..afcefe9
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/user.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm_types.h>	/* struct vm_area_struct */
+#include <linux/uaccess.h>
+
+#include "public/mc_user.h"
+
+#include "main.h"
+#include "user.h"
+#include "client.h"
+#include "mcp.h"	/* mcp_get_version */
+
+/*
+ * Get client object from file pointer
+ */
+static inline struct tee_client *get_client(struct file *file)
+{
+	return (struct tee_client *)file->private_data;
+}
+
+/*
+ * Callback for system open()
+ * A set of internal client data are created and initialized.
+ *
+ * @inode
+ * @file
+ * Returns 0 if OK or -ENOMEM if no allocation was possible.
+ */
+static int user_open(struct inode *inode, struct file *file)
+{
+	struct tee_client *client;
+
+	/* Create client */
+	mc_dev_devel("from %s (%d)", current->comm, current->pid);
+	client = client_create(false);
+	if (!client)
+		return -ENOMEM;
+
+	/* Store client in user file */
+	file->private_data = client;
+	return 0;
+}
+
+/*
+ * Callback for system close()
+ * The client object is freed.
+ * @inode
+ * @file
+ * Returns 0
+ */
+static int user_release(struct inode *inode, struct file *file)
+{
+	struct tee_client *client = get_client(file);
+
+	/* Close client */
+	mc_dev_devel("from %s (%d)", current->comm, current->pid);
+	if (!client)
+		return -EPROTO;
+
+	/* Detach client from user file */
+	file->private_data = NULL;
+
+	/* Destroy client, including remaining sessions */
+	client_close(client);
+	return 0;
+}
+
+/*
+ * Check r/w access to referenced memory
+ */
+static inline int ioctl_check_pointer(unsigned int cmd, int __user *uarg)
+{
+	int err = 0;
+
+	if (_IOC_DIR(cmd) & _IOC_READ)
+		err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+	else if (_IOC_DIR(cmd) & _IOC_WRITE)
+		err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+	if (err)
+		return -EFAULT;
+
+	return 0;
+}
+
+/*
+ * Callback for system ioctl()
+ * Implement most of ClientLib API functions
+ * @file	pointer to file
+ * @cmd		command
+ * @arg		arguments
+ *
+ * Returns 0 for OK and an errno in case of error
+ */
+static long user_ioctl(struct file *file, unsigned int id, unsigned long arg)
+{
+	struct tee_client *client = get_client(file);
+	int __user *uarg = (int __user *)arg;
+	int ret = -EINVAL;
+
+	mc_dev_devel("%u from %s", _IOC_NR(id), current->comm);
+
+	if (!client)
+		return -EPROTO;
+
+	if (ioctl_check_pointer(id, uarg))
+		return -EFAULT;
+
+	switch (id) {
+	case MC_IO_HAS_SESSIONS:
+		/* Freeze the client */
+		if (client_has_sessions(client))
+			ret = -ENOTEMPTY;
+		else
+			ret = 0;
+		break;
+
+	case MC_IO_OPEN_SESSION: {
+		struct mc_ioctl_open_session session;
+
+		if (copy_from_user(&session, uarg, sizeof(session))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = client_mc_open_session(client, &session.uuid,
+					     session.tci, session.tcilen,
+					     &session.sid);
+		if (ret)
+			break;
+
+		if (copy_to_user(uarg, &session, sizeof(session))) {
+			ret = -EFAULT;
+			client_remove_session(client, session.sid);
+			break;
+		}
+		break;
+	}
+	case MC_IO_OPEN_TRUSTLET: {
+		struct mc_ioctl_open_trustlet trustlet;
+
+		if (copy_from_user(&trustlet, uarg, sizeof(trustlet))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = client_mc_open_trustlet(client, trustlet.spid,
+					      trustlet.buffer, trustlet.tlen,
+					      trustlet.tci, trustlet.tcilen,
+					      &trustlet.sid);
+		if (ret)
+			break;
+
+		if (copy_to_user(uarg, &trustlet, sizeof(trustlet))) {
+			ret = -EFAULT;
+			client_remove_session(client, trustlet.sid);
+			break;
+		}
+		break;
+	}
+	case MC_IO_CLOSE_SESSION: {
+		u32 sid = (u32)arg;
+
+		ret = client_remove_session(client, sid);
+		break;
+	}
+	case MC_IO_NOTIFY: {
+		u32 sid = (u32)arg;
+
+		ret = client_notify_session(client, sid);
+		break;
+	}
+	case MC_IO_WAIT: {
+		struct mc_ioctl_wait wait;
+
+		if (copy_from_user(&wait, uarg, sizeof(wait))) {
+			ret = -EFAULT;
+			break;
+		}
+		ret = client_waitnotif_session(client, wait.sid, wait.timeout,
+					       wait.partial);
+		break;
+	}
+	case MC_IO_MAP: {
+		struct mc_ioctl_map map;
+
+		if (copy_from_user(&map, uarg, sizeof(map))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = client_mc_map(client, map.sid, NULL, &map.buf);
+		if (ret)
+			break;
+
+		/* Fill in return struct */
+		if (copy_to_user(uarg, &map, sizeof(map))) {
+			ret = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case MC_IO_UNMAP: {
+		struct mc_ioctl_map map;
+
+		if (copy_from_user(&map, uarg, sizeof(map))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = client_mc_unmap(client, map.sid, &map.buf);
+		break;
+	}
+	case MC_IO_ERR: {
+		struct mc_ioctl_geterr __user *uerr =
+			(struct mc_ioctl_geterr __user *)uarg;
+		u32 sid;
+		s32 exit_code;
+
+		if (get_user(sid, &uerr->sid)) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = client_get_session_exitcode(client, sid, &exit_code);
+		if (ret)
+			break;
+
+		/* Fill in return struct */
+		if (put_user(exit_code, &uerr->value)) {
+			ret = -EFAULT;
+			break;
+		}
+
+		break;
+	}
+	case MC_IO_VERSION: {
+		struct mc_version_info version_info;
+
+		ret = mcp_get_version(&version_info);
+		if (ret)
+			break;
+
+		if (copy_to_user(uarg, &version_info, sizeof(version_info)))
+			ret = -EFAULT;
+
+		break;
+	}
+	case MC_IO_GP_INITIALIZE_CONTEXT: {
+		struct mc_ioctl_gp_initialize_context context;
+
+		if (copy_from_user(&context, uarg, sizeof(context))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = client_gp_initialize_context(client, &context.ret);
+
+		if (copy_to_user(uarg, &context, sizeof(context))) {
+			ret = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case MC_IO_GP_REGISTER_SHARED_MEM: {
+		struct mc_ioctl_gp_register_shared_mem shared_mem;
+
+		if (copy_from_user(&shared_mem, uarg, sizeof(shared_mem))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = client_gp_register_shared_mem(client, NULL, NULL,
+						    &shared_mem.memref,
+						    &shared_mem.ret);
+
+		if (copy_to_user(uarg, &shared_mem, sizeof(shared_mem))) {
+			ret = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case MC_IO_GP_RELEASE_SHARED_MEM: {
+		struct mc_ioctl_gp_release_shared_mem shared_mem;
+
+		if (copy_from_user(&shared_mem, uarg, sizeof(shared_mem))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = client_gp_release_shared_mem(client, &shared_mem.memref);
+		break;
+	}
+	case MC_IO_GP_OPEN_SESSION: {
+		struct mc_ioctl_gp_open_session session;
+
+		if (copy_from_user(&session, uarg, sizeof(session))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = client_gp_open_session(client, &session.uuid,
+					     &session.operation,
+					     &session.identity,
+					     &session.ret, &session.session_id);
+
+		if (copy_to_user(uarg, &session, sizeof(session))) {
+			ret = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case MC_IO_GP_CLOSE_SESSION: {
+		struct mc_ioctl_gp_close_session session;
+
+		if (copy_from_user(&session, uarg, sizeof(session))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = client_gp_close_session(client, session.session_id);
+		break;
+	}
+	case MC_IO_GP_INVOKE_COMMAND: {
+		struct mc_ioctl_gp_invoke_command command;
+
+		if (copy_from_user(&command, uarg, sizeof(command))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = client_gp_invoke_command(client, command.session_id,
+					       command.command_id,
+					       &command.operation,
+					       &command.ret);
+
+		if (copy_to_user(uarg, &command, sizeof(command))) {
+			ret = -EFAULT;
+			break;
+		}
+		break;
+	}
+	case MC_IO_GP_REQUEST_CANCELLATION: {
+		struct mc_ioctl_gp_request_cancellation cancel;
+
+		if (copy_from_user(&cancel, uarg, sizeof(cancel))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		client_gp_request_cancellation(client,
+					       cancel.operation.started);
+		ret = 0;
+		break;
+	}
+	default:
+		ret = -ENOIOCTLCMD;
+		mc_dev_err(ret, "unsupported command no %d", id);
+	}
+
+	return ret;
+}
+
+/*
+ * Callback for system mmap()
+ */
+static int user_mmap(struct file *file, struct vm_area_struct *vmarea)
+{
+	struct tee_client *client = get_client(file);
+
+	if ((vmarea->vm_end - vmarea->vm_start) > BUFFER_LENGTH_MAX) {
+		mc_dev_err(-EINVAL, "buffer size %lu too big",
+			   vmarea->vm_end - vmarea->vm_start);
+		return -EINVAL;
+	}
+
+	/* Alloc contiguous buffer for this client */
+	return client_cbuf_create(client,
+				  (u32)(vmarea->vm_end - vmarea->vm_start),
+				  NULL, vmarea);
+}
+
+static const struct file_operations mc_user_fops = {
+	.owner = THIS_MODULE,
+	.open = user_open,
+	.release = user_release,
+	.unlocked_ioctl = user_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = user_ioctl,
+#endif
+	.mmap = user_mmap,
+};
+
+int mc_user_init(struct cdev *cdev)
+{
+	cdev_init(cdev, &mc_user_fops);
+	return 0;
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/user.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/user.h
new file mode 100644
index 0000000..1ca59ae
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/user.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _USER_H_
+#define _USER_H_
+
+struct cdev;
+
+int mc_user_init(struct cdev *cdev);
+static inline void mc_user_exit(void)
+{
+}
+
+#endif /* _USER_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_be.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_be.c
new file mode 100644
index 0000000..3d4efb2
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_be.c
@@ -0,0 +1,1124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifdef CONFIG_XEN
+
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+
+#include "platform.h"		/* MC_XENBUS_MAP_RING_VALLOC_4_1 */
+#include "main.h"
+#include "admin.h"		/* tee_object* */
+#include "client.h"		/* Consider other VMs as clients */
+#include "mmu.h"
+#include "mcp.h"		/* mcp_get_version */
+#include "nq.h"
+#include "xen_common.h"
+#include "xen_be.h"
+
+#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
+
+static struct {
+	struct list_head	xfes;
+	struct mutex		xfes_mutex;	/* Protect the above */
+} l_ctx;
+
+/* Maps */
+
+struct xen_be_map {
+	struct page		**pages;
+	grant_handle_t		*handles;
+	unsigned long		nr_pages;
+	u32			flags;
+	bool			pages_allocd;
+	bool			refs_mapped;
+	/* To auto-delete */
+	struct tee_deleter	deleter;
+};
+
+static void xen_be_map_delete(struct xen_be_map *map)
+{
+	int i;
+
+	if (map->refs_mapped) {
+		struct gnttab_unmap_grant_ref *unmaps;
+
+		unmaps = kcalloc(map->nr_pages, sizeof(*unmaps), GFP_KERNEL);
+		if (!unmaps)
+			/* Cannot go on */
+			return;
+
+		for (i = 0; i < map->nr_pages; i++)
+			gnttab_set_unmap_op(&unmaps[i], vaddr(map->pages[i]),
+					    map->flags, map->handles[i]);
+
+		if (gnttab_unmap_refs(unmaps, NULL, map->pages, map->nr_pages))
+			/* Cannot go on */
+			return;
+
+		for (i = 0; i < map->nr_pages; i++)
+			put_page(map->pages[i]);
+
+		kfree(unmaps);
+	}
+
+	if (map->pages_allocd)
+		gnttab_free_pages(map->nr_pages, map->pages);
+
+	kfree(map->handles);
+	kfree(map->pages);
+	kfree(map);
+	mc_dev_devel("freed xen map %p", map);
+	atomic_dec(&g_ctx.c_xen_maps);
+}
+
+static struct xen_be_map *be_map_create(const struct xen_be_map *pte_map,
+					grant_ref_t *refs, int nr_refs,
+					int pte_entries_max, int dom_id,
+					bool readonly)
+{
+	struct xen_be_map *map;
+	struct gnttab_map_grant_ref *maps = NULL;
+	int i, ret = -ENOMEM;
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (!map)
+		return ERR_PTR(-ENOMEM);
+
+	atomic_inc(&g_ctx.c_xen_maps);
+	map->flags = GNTMAP_host_map;
+	if (readonly)
+		map->flags |= GNTMAP_readonly;
+
+	map->nr_pages = nr_refs;
+	map->pages = kcalloc(map->nr_pages, sizeof(*map->pages), GFP_KERNEL);
+	if (!map->pages)
+		goto out;
+
+	map->handles = kcalloc(map->nr_pages, sizeof(*map->handles),
+			       GFP_KERNEL);
+	if (!map->handles)
+		goto out;
+
+	if (gnttab_alloc_pages(map->nr_pages, map->pages))
+		goto out;
+
+	map->pages_allocd = true;
+	maps = kcalloc(map->nr_pages, sizeof(*maps), GFP_KERNEL);
+	if (!maps)
+		goto out;
+
+	if (pte_map) {
+		int k = 0, nr_refs_left = nr_refs;
+
+		for (i = 0; i < pte_map->nr_pages; i++) {
+			int j, nr_refs = nr_refs_left;
+			grant_ref_t *refs = (void *)vaddr(pte_map->pages[i]);
+
+			if (nr_refs > pte_entries_max)
+				nr_refs = pte_entries_max;
+
+			for (j = 0;  j < nr_refs; j++) {
+				mc_dev_devel("map [%d, %d] -> %d ref %u",
+					     i, j, k, refs[j]);
+#ifdef DEBUG
+				/* Relax serial interface to not kill the USB */
+				usleep_range(100, 200);
+#endif
+				gnttab_set_map_op(
+					&maps[k], vaddr(map->pages[k]),
+					map->flags, refs[j], dom_id);
+				nr_refs_left--;
+				k++;
+			}
+		}
+	} else {
+		for (i = 0;  i < map->nr_pages; i++) {
+			mc_dev_devel("map table %d ref %u", i, refs[i]);
+			gnttab_set_map_op(&maps[i], vaddr(map->pages[i]),
+					  map->flags, refs[i], dom_id);
+		}
+	}
+
+	ret = gnttab_map_refs(maps, NULL, map->pages, map->nr_pages);
+	if (ret)
+		goto out;
+
+	map->refs_mapped = true;
+	/* Pin pages */
+	for (i = 0;  i < map->nr_pages; i++) {
+		get_page(map->pages[i]);
+		map->handles[i] = maps[i].handle;
+	}
+
+out:
+	kfree(maps);
+
+	if (ret) {
+		xen_be_map_delete(map);
+		return ERR_PTR(-ret);
+	}
+
+	mc_dev_devel("created %s xen map %p", pte_map ? "buffer" : "ptes", map);
+	return map;
+}
+
+static struct xen_be_map *xen_be_map_create(struct tee_xen_buffer *buffer,
+					    int pte_entries_max, int dom_id)
+{
+	struct xen_be_map *map;
+	struct xen_be_map *pte_map;
+	int nr_pte_refs =
+		(buffer->info->nr_refs + pte_entries_max - 1) / pte_entries_max;
+
+	/* First map the PTE pages */
+	pte_map = be_map_create(NULL, buffer->data.refs, nr_pte_refs,
+				pte_entries_max, dom_id, true);
+	if (IS_ERR(pte_map))
+		return pte_map;
+
+	/* Now map the pages */
+	map = be_map_create(pte_map, NULL, buffer->info->nr_refs,
+			    pte_entries_max, dom_id,
+			    buffer->info->flags == MC_IO_MAP_INPUT);
+	/* PTE pages mapping not needed any more */
+	xen_be_map_delete(pte_map);
+	if (!IS_ERR(map)) {
+		/* Auto-delete */
+		map->deleter.object = map;
+		map->deleter.delete = (void(*)(void *))xen_be_map_delete;
+	}
+
+	return map;
+}
+
+/* Dom0 call to DomU */
+
+/* Must be called under xfe->ring_mutex */
+static inline void call_domu(struct tee_xfe *xfe, enum tee_xen_dom0_cmd cmd,
+			     u32 id, int ret)
+{
+	WARN_ON(!xfe->ring_busy);
+
+	/* Set command and ID */
+	xfe->ring->dom0.cmd = cmd;
+	xfe->ring->dom0.id = id;
+	xfe->ring->dom0.cmd_ret = ret;
+	mc_dev_devel("Dom0 -> DomU request %u id %u ret %d",
+		     xfe->ring->dom0.cmd, xfe->ring->dom0.id,
+		     xfe->ring->dom0.cmd_ret);
+	/* Call */
+	notify_remote_via_irq(xfe->irq_dom0);
+	wait_for_completion(&xfe->ring_completion);
+}
+
+/* Will be called back under xfe->ring_mutex */
+static irqreturn_t xen_be_irq_handler_dom0_th(int intr, void *arg)
+{
+	struct tee_xfe *xfe = arg;
+
+	if (!xfe->ring->dom0.cmd) {
+		mc_dev_devel("Ignore IRQ with no command (on DomU connect)");
+		return IRQ_HANDLED;
+	}
+
+	WARN_ON(!xfe->ring_busy);
+
+	/* Response to a dom0 command, our side of ring locked by us */
+	mc_dev_devel("Dom0 -> DomU response %u id %u ret %d",
+		     xfe->ring->dom0.cmd, xfe->ring->dom0.id,
+		     xfe->ring->dom0.cmd_ret);
+	xfe->ring->dom0.cmd = TEE_XEN_DOM0_NONE;
+	xfe->ring->dom0.id = 0;
+	xfe->ring->dom0.cmd_ret = -EPERM;
+	complete(&xfe->ring_completion);
+
+	return IRQ_HANDLED;
+}
+
+/* MC protocol interface */
+
+static inline int xen_be_get_version(struct tee_xfe *xfe)
+{
+	struct mc_version_info version_info;
+	int ret;
+
+	ret = mcp_get_version(&version_info);
+	if (ret)
+		return ret;
+
+	xfe->ring->domu.version_info = version_info;
+	return 0;
+}
+
+static inline int xen_be_mc_has_sessions(struct tee_xfe *xfe)
+{
+	return client_has_sessions(xfe->client) ? -EBUSY : 0;
+}
+
+static inline int xen_be_mc_open_session(struct tee_xfe *xfe)
+{
+	struct tee_xen_buffer *tci_buffer = &xfe->buffers[0];
+	struct mcp_open_info info = {
+		.type = TEE_MC_UUID,
+		.uuid = &xfe->ring->domu.uuid,
+	};
+	int ret;
+
+	if (tci_buffer->info->flags) {
+		struct xen_be_map *map;
+		struct mcp_buffer_map b_map = {
+			.offset = tci_buffer->info->offset,
+			.length = tci_buffer->info->length,
+			.flags = tci_buffer->info->flags,
+		};
+
+		map = xen_be_map_create(tci_buffer, xfe->pte_entries_max,
+					xfe->xdev->otherend_id);
+		if (IS_ERR(map)) {
+			ret = PTR_ERR(map);
+			goto out;
+		}
+
+		/* Shall be freed by session */
+		b_map.nr_pages = map->nr_pages;
+		info.tci_mmu = tee_mmu_wrap(&map->deleter, map->pages,
+					    &b_map);
+		if (IS_ERR(info.tci_mmu)) {
+			ret = PTR_ERR(info.tci_mmu);
+			info.tci_mmu = NULL;
+			goto out;
+		}
+	}
+
+	/* Open session */
+	ret = client_mc_open_common(xfe->client, &info,
+				    &xfe->ring->domu.session_id);
+
+out:
+	if (info.tci_mmu)
+		tee_mmu_put(info.tci_mmu);
+
+	mc_dev_devel("session %x, exit with %d",
+		     xfe->ring->domu.session_id, ret);
+	return ret;
+}
+
+static inline int xen_be_mc_open_trustlet(struct tee_xfe *xfe)
+{
+	struct tee_xen_buffer *ta_buffer = &xfe->buffers[1];
+	struct tee_xen_buffer *tci_buffer = &xfe->buffers[0];
+	struct mcp_open_info info = {
+		.type = TEE_MC_TA,
+	};
+	struct xen_be_map *ta_map;
+	void *addr = NULL;
+	int ret = -ENOMEM;
+
+	ta_map = xen_be_map_create(ta_buffer, xfe->pte_entries_max,
+				   xfe->xdev->otherend_id);
+	if (IS_ERR(ta_map))
+		return PTR_ERR(ta_map);
+
+	info.spid = xfe->ring->domu.spid;
+	addr = vmap(ta_map->pages, ta_map->nr_pages,
+		    VM_MAP | VM_IOREMAP | VM_USERMAP, PAGE_KERNEL);
+	if (!addr)
+		goto out;
+
+	info.va = (uintptr_t)addr + ta_buffer->info->offset;
+	info.len = ta_buffer->info->length;
+
+	if (tci_buffer->info->flags) {
+		struct xen_be_map *map;
+		struct mcp_buffer_map b_map = {
+			.offset = tci_buffer->info->offset,
+			.length = tci_buffer->info->length,
+			.flags = tci_buffer->info->flags,
+		};
+
+		map = xen_be_map_create(tci_buffer, xfe->pte_entries_max,
+					xfe->xdev->otherend_id);
+		if (IS_ERR(map)) {
+			ret = PTR_ERR(map);
+			goto out;
+		}
+
+		/* Shall be freed by session */
+		b_map.nr_pages = map->nr_pages;
+		info.tci_mmu = tee_mmu_wrap(&map->deleter, map->pages, &b_map);
+		if (IS_ERR(info.tci_mmu)) {
+			ret = PTR_ERR(info.tci_mmu);
+			info.tci_mmu = NULL;
+			goto out;
+		}
+	}
+
+	/* Open session */
+	ret = client_mc_open_common(xfe->client, &info,
+				    &xfe->ring->domu.session_id);
+
+out:
+	if (info.tci_mmu)
+		tee_mmu_put(info.tci_mmu);
+
+	if (addr)
+		vunmap(addr);
+
+	xen_be_map_delete(ta_map);
+
+	mc_dev_devel("session %x, exit with %d",
+		     xfe->ring->domu.session_id, ret);
+	return ret;
+}
+
+static inline int xen_be_mc_close_session(struct tee_xfe *xfe)
+{
+	return client_remove_session(xfe->client, xfe->ring->domu.session_id);
+}
+
+static inline int xen_be_mc_notify(struct tee_xfe *xfe)
+{
+	return client_notify_session(xfe->client, xfe->ring->domu.session_id);
+}
+
+/* mc_wait cannot keep the ring busy while waiting, so we use a worker */
+struct mc_wait_work {
+	struct work_struct	work;
+	struct tee_xfe		*xfe;
+	u32			session_id;
+	s32			timeout;
+	u32			id;
+};
+
+static void xen_be_mc_wait_worker(struct work_struct *work)
+{
+	struct mc_wait_work *wait_work =
+		container_of(work, struct mc_wait_work, work);
+	struct tee_xfe *xfe = wait_work->xfe;
+	int ret;
+
+	ret = client_waitnotif_session(wait_work->xfe->client,
+				       wait_work->session_id,
+				       wait_work->timeout, false);
+
+	/* Send return code */
+	mc_dev_devel("MC wait session done %x, ret %d",
+		     wait_work->session_id, ret);
+	ring_get(xfe);
+	/* In */
+	xfe->ring->dom0.session_id = wait_work->session_id;
+	/* Call */
+	call_domu(xfe, TEE_XEN_MC_WAIT_DONE, wait_work->id, ret);
+	/* Out */
+	ring_put(xfe);
+	kfree(wait_work);
+	tee_xfe_put(xfe);
+}
+
+static inline int xen_be_mc_wait(struct tee_xfe *xfe)
+{
+	struct mc_wait_work *wait_work;
+
+	/* Wait in a separate thread to release the communication ring */
+	wait_work = kzalloc(sizeof(*wait_work), GFP_KERNEL);
+	if (!wait_work)
+		return -ENOMEM;
+
+	tee_xfe_get(xfe);
+	wait_work->xfe = xfe;
+	wait_work->session_id = xfe->ring->domu.session_id;
+	wait_work->timeout = xfe->ring->domu.timeout;
+	wait_work->id = xfe->ring->domu.id;
+	INIT_WORK(&wait_work->work, xen_be_mc_wait_worker);
+	schedule_work(&wait_work->work);
+	return 0;
+}
+
+static inline int xen_be_mc_map(struct tee_xfe *xfe)
+{
+	struct tee_xen_buffer *buffer = &xfe->buffers[0];
+	struct xen_be_map *map;
+	struct tee_mmu *mmu = NULL;
+	struct mc_ioctl_buffer buf;
+	struct mcp_buffer_map b_map = {
+		.offset = buffer->info->offset,
+		.length = buffer->info->length,
+		.flags = buffer->info->flags,
+	};
+	int ret;
+
+	map = xen_be_map_create(buffer, xfe->pte_entries_max,
+				xfe->xdev->otherend_id);
+	if (IS_ERR(map)) {
+		ret = PTR_ERR(map);
+		return ret;
+	}
+
+	/* Shall be freed by session */
+	b_map.nr_pages = map->nr_pages;
+	mmu = tee_mmu_wrap(&map->deleter, map->pages, &b_map);
+	if (IS_ERR(mmu)) {
+		xen_be_map_delete(map);
+		return PTR_ERR(mmu);
+	}
+
+	ret = client_mc_map(xfe->client, xfe->ring->domu.session_id, mmu, &buf);
+	/* Releasing the MMU shall also clear the map */
+	tee_mmu_put(mmu);
+	if (!ret)
+		buffer->info->sva = buf.sva;
+
+	mc_dev_devel("session %x, exit with %d",
+		     xfe->ring->domu.session_id, ret);
+	return ret;
+}
+
+static inline int xen_be_mc_unmap(struct tee_xfe *xfe)
+{
+	struct tee_xen_buffer *buffer = &xfe->buffers[0];
+	struct mc_ioctl_buffer buf = {
+		.len = buffer->info->length,
+		.sva = buffer->info->sva,
+	};
+	int ret;
+
+	ret = client_mc_unmap(xfe->client, xfe->ring->domu.session_id, &buf);
+
+	mc_dev_devel("session %x, exit with %d",
+		     xfe->ring->domu.session_id, ret);
+	return ret;
+}
+
+static inline int xen_be_mc_get_err(struct tee_xfe *xfe)
+{
+	int ret;
+
+	ret = client_get_session_exitcode(xfe->client,
+					  xfe->ring->domu.session_id,
+					  &xfe->ring->domu.err);
+	mc_dev_devel("session %x err %d, exit with %d",
+		     xfe->ring->domu.session_id, xfe->ring->domu.err, ret);
+	return ret;
+}
+
+/* GP protocol interface */
+
+static inline int xen_be_gp_register_shared_mem(struct tee_xfe *xfe)
+{
+	struct tee_xen_buffer *buffer = &xfe->buffers[0];
+	struct xen_be_map *map;
+	struct tee_mmu *mmu = NULL;
+	struct gp_shared_memory memref = {
+		.buffer = buffer->info->addr,
+		.size = buffer->info->length,
+		.flags = buffer->info->flags,
+	};
+	struct mcp_buffer_map b_map = {
+		.offset = buffer->info->offset,
+		.length = buffer->info->length,
+		.flags = buffer->info->flags,
+	};
+	int ret;
+
+	map = xen_be_map_create(buffer, xfe->pte_entries_max,
+				xfe->xdev->otherend_id);
+	if (IS_ERR(map)) {
+		ret = PTR_ERR(map);
+		return ret;
+	}
+
+	/* Shall be freed by session */
+	b_map.nr_pages = map->nr_pages;
+	mmu = tee_mmu_wrap(&map->deleter, map->pages, &b_map);
+	if (IS_ERR(mmu)) {
+		xen_be_map_delete(map);
+		return PTR_ERR(mmu);
+	}
+
+	ret = client_gp_register_shared_mem(xfe->client, mmu,
+					    &buffer->info->sva, &memref,
+					    &xfe->ring->domu.gp_ret);
+	/* Releasing the MMU shall also clear the map */
+	tee_mmu_put(mmu);
+	mc_dev_devel("session %x, exit with %d",
+		     xfe->ring->domu.session_id, ret);
+	return ret;
+}
+
+static inline int xen_be_gp_release_shared_mem(struct tee_xfe *xfe)
+{
+	struct tee_xen_buffer *buffer = &xfe->buffers[0];
+	struct gp_shared_memory memref = {
+		.buffer = buffer->info->addr,
+		.size = buffer->info->length,
+		.flags = buffer->info->flags,
+	};
+	int ret;
+
+	ret = client_gp_release_shared_mem(xfe->client, &memref);
+
+	mc_dev_devel("exit with %d", ret);
+	return ret;
+}
+
+/* GP functions cannot keep the ring busy while waiting, so we use a worker */
+struct gp_work {
+	struct work_struct		work;
+	struct tee_xfe			*xfe;
+	u64				operation_id;
+	struct interworld_session	iws;
+	struct tee_mmu			*mmus[4];
+	struct mc_uuid_t		uuid;
+	u32				session_id;
+	u32				id;
+};
+
+static void xen_be_gp_open_session_worker(struct work_struct *work)
+{
+	struct gp_work *gp_work = container_of(work, struct gp_work, work);
+	struct tee_xfe *xfe = gp_work->xfe;
+	struct gp_return gp_ret;
+	int i, ret;
+
+	ret = client_gp_open_session_domu(xfe->client, &gp_work->uuid,
+					  gp_work->operation_id, &gp_work->iws,
+					  gp_work->mmus, &gp_ret);
+	mc_dev_devel("GP open session done, ret %d", ret);
+	for (i = 0; i < TEE_BUFFERS; i++)
+		if (gp_work->mmus[i])
+			tee_mmu_put(gp_work->mmus[i]);
+
+	/* Send return code */
+	ring_get(xfe);
+	/* In */
+	xfe->ring->dom0.operation_id = gp_work->operation_id;
+	xfe->ring->dom0.iws = gp_work->iws;
+	xfe->ring->dom0.gp_ret = gp_ret;
+	/* Call */
+	call_domu(xfe, TEE_XEN_GP_OPEN_SESSION_DONE, gp_work->id, ret);
+	/* Out */
+	ring_put(xfe);
+	kfree(gp_work);
+	tee_xfe_put(xfe);
+}
+
+static inline int xen_be_gp_open_session(struct tee_xfe *xfe)
+{
+	struct gp_work *gp_work;
+	int i, ret = 0;
+
+	gp_work = kzalloc(sizeof(*gp_work), GFP_KERNEL);
+	if (!gp_work)
+		return -ENOMEM;
+
+	/* Map tmpref buffers */
+	for (i = 0; i < TEE_BUFFERS; i++) {
+		struct tee_xen_buffer *buffer = &xfe->buffers[i];
+		struct xen_be_map *map;
+		struct mcp_buffer_map b_map = {
+			.offset = buffer->info->offset,
+			.length = buffer->info->length,
+			.flags = buffer->info->flags,
+		};
+
+		if (!buffer->info->flags)
+			continue;
+
+		map = xen_be_map_create(buffer, xfe->pte_entries_max,
+					xfe->xdev->otherend_id);
+		if (IS_ERR(map)) {
+			ret = PTR_ERR(map);
+			goto err_map;
+		}
+
+		/* Shall be freed by session */
+		b_map.nr_pages = map->nr_pages;
+		gp_work->mmus[i] = tee_mmu_wrap(&map->deleter, map->pages,
+						&b_map);
+		if (IS_ERR(gp_work->mmus[i])) {
+			xen_be_map_delete(map);
+			ret = PTR_ERR(gp_work->mmus[i]);
+			goto err_mmus;
+		}
+	}
+
+	tee_xfe_get(xfe);
+	gp_work->xfe = xfe;
+	gp_work->operation_id = xfe->ring->domu.operation_id;
+	gp_work->iws = xfe->ring->domu.iws;
+	gp_work->uuid = xfe->ring->domu.uuid;
+	gp_work->id = xfe->ring->domu.id;
+	INIT_WORK(&gp_work->work, xen_be_gp_open_session_worker);
+	schedule_work(&gp_work->work);
+	return 0;
+
+err_mmus:
+	for (i = 0; i < TEE_BUFFERS; i++)
+		if (!IS_ERR_OR_NULL(gp_work->mmus[i]))
+			tee_mmu_put(gp_work->mmus[i]);
+err_map:
+	kfree(gp_work);
+	return ret;
+}
+
+static void xen_be_gp_close_session_worker(struct work_struct *work)
+{
+	struct gp_work *gp_work = container_of(work, struct gp_work, work);
+	struct tee_xfe *xfe = gp_work->xfe;
+	int ret;
+
+	ret = client_gp_close_session(xfe->client, gp_work->session_id);
+	mc_dev_devel("GP close session done, ret %d", ret);
+
+	/* Send return code */
+	ring_get(xfe);
+	/* In */
+	xfe->ring->dom0.operation_id = gp_work->operation_id;
+	/* Call */
+	call_domu(xfe, TEE_XEN_GP_CLOSE_SESSION_DONE, gp_work->id, ret);
+	/* Out */
+	ring_put(xfe);
+	kfree(gp_work);
+	tee_xfe_put(xfe);
+}
+
+static inline int xen_be_gp_close_session(struct tee_xfe *xfe)
+{
+	struct gp_work *gp_work;
+
+	gp_work = kzalloc(sizeof(*gp_work), GFP_KERNEL);
+	if (!gp_work)
+		return -ENOMEM;
+
+	tee_xfe_get(xfe);
+	gp_work->xfe = xfe;
+	gp_work->operation_id = xfe->ring->domu.operation_id;
+	gp_work->session_id = xfe->ring->domu.session_id;
+	gp_work->id = xfe->ring->domu.id;
+	INIT_WORK(&gp_work->work, xen_be_gp_close_session_worker);
+	schedule_work(&gp_work->work);
+	return 0;
+}
+
+static void xen_be_gp_invoke_command_worker(struct work_struct *work)
+{
+	struct gp_work *gp_work = container_of(work, struct gp_work, work);
+	struct tee_xfe *xfe = gp_work->xfe;
+	struct gp_return gp_ret;
+	int i, ret;
+
+	ret = client_gp_invoke_command_domu(xfe->client, gp_work->session_id,
+					    gp_work->operation_id,
+					    &gp_work->iws, gp_work->mmus,
+					    &gp_ret);
+	mc_dev_devel("GP invoke command done, ret %d", ret);
+	for (i = 0; i < TEE_BUFFERS; i++)
+		if (gp_work->mmus[i])
+			tee_mmu_put(gp_work->mmus[i]);
+
+	/* Send return code */
+	ring_get(xfe);
+	/* In */
+	xfe->ring->dom0.operation_id = gp_work->operation_id;
+	xfe->ring->dom0.iws = gp_work->iws;
+	xfe->ring->dom0.gp_ret = gp_ret;
+	/* Call */
+	call_domu(xfe, TEE_XEN_GP_INVOKE_COMMAND_DONE, gp_work->id, ret);
+	/* Out */
+	ring_put(xfe);
+	kfree(gp_work);
+	tee_xfe_put(xfe);
+}
+
+static inline int xen_be_gp_invoke_command(struct tee_xfe *xfe)
+{
+	struct gp_work *gp_work;
+	int i, ret = 0;
+
+	gp_work = kzalloc(sizeof(*gp_work), GFP_KERNEL);
+	if (!gp_work)
+		return -ENOMEM;
+
+	/* Map tmpref buffers */
+	for (i = 0; i < TEE_BUFFERS; i++) {
+		struct tee_xen_buffer *buffer = &xfe->buffers[i];
+		struct xen_be_map *map;
+		struct mcp_buffer_map b_map = {
+			.offset = buffer->info->offset,
+			.length = buffer->info->length,
+			.flags = buffer->info->flags,
+		};
+
+		if (!buffer->info->flags)
+			continue;
+
+		map = xen_be_map_create(buffer, xfe->pte_entries_max,
+					xfe->xdev->otherend_id);
+		if (IS_ERR(map)) {
+			ret = PTR_ERR(map);
+			goto err_map;
+		}
+
+		/* Shall be freed by session */
+		b_map.nr_pages = map->nr_pages;
+		gp_work->mmus[i] = tee_mmu_wrap(&map->deleter, map->pages,
+						&b_map);
+		if (IS_ERR(gp_work->mmus[i])) {
+			xen_be_map_delete(map);
+			ret = PTR_ERR(gp_work->mmus[i]);
+			goto err_mmus;
+		}
+	}
+
+	tee_xfe_get(xfe);
+	gp_work->xfe = xfe;
+	gp_work->operation_id = xfe->ring->domu.operation_id;
+	gp_work->iws = xfe->ring->domu.iws;
+	gp_work->session_id = xfe->ring->domu.session_id;
+	gp_work->id = xfe->ring->domu.id;
+	INIT_WORK(&gp_work->work, xen_be_gp_invoke_command_worker);
+	schedule_work(&gp_work->work);
+	return 0;
+
+err_mmus:
+	for (i = 0; i < TEE_BUFFERS; i++)
+		if (!IS_ERR_OR_NULL(gp_work->mmus[i]))
+			tee_mmu_put(gp_work->mmus[i]);
+err_map:
+	kfree(gp_work);
+	return ret;
+}
+
+static inline int xen_be_gp_request_cancellation(struct tee_xfe *xfe)
+{
+	client_gp_request_cancellation(xfe->client,
+				       xfe->ring->domu.operation_id);
+	return 0;
+}
+
+static irqreturn_t xen_be_irq_handler_domu_th(int intr, void *arg)
+{
+	struct tee_xfe *xfe = arg;
+
+	if (!xfe->ring->domu.cmd) {
+		mc_dev_devel("Ignore IRQ with no command (on DomU connect)");
+		return IRQ_HANDLED;
+	}
+
+	/* DomU event, their side of ring locked by them */
+	schedule_work(&xfe->work);
+
+	return IRQ_HANDLED;
+}
+
+static void xen_be_irq_handler_domu_bh(struct work_struct *data)
+{
+	struct tee_xfe *xfe = container_of(data, struct tee_xfe, work);
+
+	xfe->ring->domu.otherend_ret = -EINVAL;
+	mc_dev_devel("DomU -> Dom0 command %u id %u",
+		     xfe->ring->domu.cmd, xfe->ring->domu.id);
+	switch (xfe->ring->domu.cmd) {
+	case TEE_XEN_DOMU_NONE:
+		return;
+	/* MC */
+	case TEE_XEN_MC_HAS_SESSIONS:
+		xfe->ring->domu.otherend_ret = xen_be_mc_has_sessions(xfe);
+		break;
+	case TEE_XEN_GET_VERSION:
+		xfe->ring->domu.otherend_ret = xen_be_get_version(xfe);
+		break;
+	case TEE_XEN_MC_OPEN_SESSION:
+		xfe->ring->domu.otherend_ret = xen_be_mc_open_session(xfe);
+		break;
+	case TEE_XEN_MC_OPEN_TRUSTLET:
+		xfe->ring->domu.otherend_ret = xen_be_mc_open_trustlet(xfe);
+		break;
+	case TEE_XEN_MC_CLOSE_SESSION:
+		xfe->ring->domu.otherend_ret = xen_be_mc_close_session(xfe);
+		break;
+	case TEE_XEN_MC_NOTIFY:
+		xfe->ring->domu.otherend_ret = xen_be_mc_notify(xfe);
+		break;
+	case TEE_XEN_MC_WAIT:
+		xfe->ring->domu.otherend_ret = xen_be_mc_wait(xfe);
+		break;
+	case TEE_XEN_MC_MAP:
+		xfe->ring->domu.otherend_ret = xen_be_mc_map(xfe);
+		break;
+	case TEE_XEN_MC_UNMAP:
+		xfe->ring->domu.otherend_ret = xen_be_mc_unmap(xfe);
+		break;
+	case TEE_XEN_MC_GET_ERR:
+		xfe->ring->domu.otherend_ret = xen_be_mc_get_err(xfe);
+		break;
+	/* GP */
+	case TEE_XEN_GP_REGISTER_SHARED_MEM:
+		xfe->ring->domu.otherend_ret =
+			xen_be_gp_register_shared_mem(xfe);
+		break;
+	case TEE_XEN_GP_RELEASE_SHARED_MEM:
+		xfe->ring->domu.otherend_ret =
+			xen_be_gp_release_shared_mem(xfe);
+		break;
+	case TEE_XEN_GP_OPEN_SESSION:
+		xfe->ring->domu.otherend_ret = xen_be_gp_open_session(xfe);
+		break;
+	case TEE_XEN_GP_CLOSE_SESSION:
+		xfe->ring->domu.otherend_ret = xen_be_gp_close_session(xfe);
+		break;
+	case TEE_XEN_GP_INVOKE_COMMAND:
+		xfe->ring->domu.otherend_ret = xen_be_gp_invoke_command(xfe);
+		break;
+	case TEE_XEN_GP_REQUEST_CANCELLATION:
+		xfe->ring->domu.otherend_ret =
+			xen_be_gp_request_cancellation(xfe);
+		break;
+	}
+
+	mc_dev_devel("DomU -> Dom0 result %u id %u ret %d",
+		     xfe->ring->domu.cmd, xfe->ring->domu.id,
+		     xfe->ring->domu.otherend_ret);
+	notify_remote_via_irq(xfe->irq_domu);
+}
+
+/* Device */
+
+static const struct xenbus_device_id xen_be_ids[] = {
+	{ "tee_xen" },
+	{ "" }
+};
+
+/* Called when a front-end is created */
+static int xen_be_probe(struct xenbus_device *xdev,
+			const struct xenbus_device_id *id)
+{
+	struct tee_xfe *xfe;
+	int ret = 0;
+
+	ret = xenbus_switch_state(xdev, XenbusStateInitWait);
+	if (ret) {
+		xenbus_dev_fatal(xdev, ret,
+				 "failed to change state to initwait");
+		return ret;
+	}
+
+	xfe = tee_xfe_create(xdev);
+	if (!xfe) {
+		ret = -ENOMEM;
+		xenbus_dev_fatal(xdev, ret, "failed to create FE struct");
+		goto err_xfe_create;
+	}
+
+	xfe->client = client_create(true);
+	if (!xfe->client) {
+		ret = -ENOMEM;
+		xenbus_dev_fatal(xdev, ret, "failed to create FE client");
+		goto err_client_create;
+	}
+
+	INIT_WORK(&xfe->work, xen_be_irq_handler_domu_bh);
+
+	mutex_lock(&l_ctx.xfes_mutex);
+	list_add_tail(&xfe->list, &l_ctx.xfes);
+	mutex_unlock(&l_ctx.xfes_mutex);
+
+	ret = xenbus_switch_state(xdev, XenbusStateInitialised);
+	if (ret) {
+		xenbus_dev_fatal(xdev, ret,
+				 "failed to change state to initialised");
+		goto err_switch_state;
+	}
+
+	return 0;
+
+err_switch_state:
+	mutex_lock(&l_ctx.xfes_mutex);
+	list_del(&xfe->list);
+	mutex_unlock(&l_ctx.xfes_mutex);
+err_client_create:
+	tee_xfe_put(xfe);
+err_xfe_create:
+	return ret;
+}
+
+/* Called when device is unregistered */
+static int xen_be_remove(struct xenbus_device *xdev)
+{
+	struct tee_xfe *xfe = dev_get_drvdata(&xdev->dev);
+
+	xenbus_switch_state(xdev, XenbusStateClosed);
+
+	mutex_lock(&l_ctx.xfes_mutex);
+	list_del(&xfe->list);
+	mutex_unlock(&l_ctx.xfes_mutex);
+
+	tee_xfe_put(xfe);
+	return 0;
+}
+
+static inline int xen_be_map_ring_valloc(struct xenbus_device *dev,
+					 grant_ref_t ref, void **vaddr)
+{
+	return xenbus_map_ring_valloc(dev, &ref, 1, vaddr);
+}
+
+static inline void frontend_attach(struct tee_xfe *xfe)
+{
+	int domu_version;
+	int ret;
+	int i;
+
+	if (xenbus_read_driver_state(xfe->xdev->nodename) !=
+			XenbusStateInitialised)
+		return;
+
+	ret = xenbus_gather(XBT_NIL, xfe->xdev->otherend,
+			    "ring-ref", "%u", &xfe->ring_ref,
+			    "pte-entries-max", "%u", &xfe->pte_entries_max,
+			    "event-channel-domu", "%u", &xfe->evtchn_domu,
+			    "event-channel-dom0", "%u", &xfe->evtchn_dom0,
+			    "domu-version", "%u", &domu_version, NULL);
+	if (ret) {
+		xenbus_dev_fatal(xfe->xdev, ret,
+				 "failed to gather other domain info");
+		return;
+	}
+
+	mc_dev_devel("ring ref %u evtchn domu=%u dom0=%u version=%u",
+		     xfe->ring_ref, xfe->evtchn_domu, xfe->evtchn_dom0,
+		     domu_version);
+
+	if (domu_version != TEE_XEN_VERSION) {
+		xenbus_dev_fatal(
+			xfe->xdev, ret,
+			"front- and back-end versions do not match: %d vs %d",
+			domu_version, TEE_XEN_VERSION);
+		return;
+	}
+
+	ret = xen_be_map_ring_valloc(xfe->xdev, xfe->ring_ref, &xfe->ring_p);
+	if (ret < 0) {
+		xenbus_dev_fatal(xfe->xdev, ret, "failed to map ring");
+		return;
+	}
+	mc_dev_devel("mapped ring %p", xfe->ring_p);
+
+	/* Map buffers individually */
+	for (i = 0; i < TEE_BUFFERS; i++) {
+		ret = xen_be_map_ring_valloc(xfe->xdev,
+					     xfe->ring->domu.buffers[i].pmd_ref,
+					     &xfe->buffers[i].data.addr);
+		if (ret < 0) {
+			xenbus_dev_fatal(xfe->xdev, ret,
+					 "failed to map buffer page");
+			return;
+		}
+
+		xfe->buffers[i].info = &xfe->ring->domu.buffers[i];
+	}
+
+	ret = bind_interdomain_evtchn_to_irqhandler(
+		xfe->xdev->otherend_id, xfe->evtchn_domu,
+		xen_be_irq_handler_domu_th, 0, "tee_be_domu", xfe);
+	if (ret < 0) {
+		xenbus_dev_fatal(xfe->xdev, ret,
+				 "failed to bind event channel to DomU IRQ");
+		return;
+	}
+
+	xfe->irq_domu = ret;
+	mc_dev_devel("bound DomU IRQ %d", xfe->irq_domu);
+
+	ret = bind_interdomain_evtchn_to_irqhandler(
+		xfe->xdev->otherend_id, xfe->evtchn_dom0,
+		xen_be_irq_handler_dom0_th, 0, "tee_be_dom0", xfe);
+	if (ret < 0) {
+		xenbus_dev_fatal(xfe->xdev, ret,
+				 "failed to bind event channel to Dom0 IRQ");
+		return;
+	}
+
+	xfe->irq_dom0 = ret;
+	mc_dev_devel("bound Dom0 IRQ %d", xfe->irq_dom0);
+
+	ret = xenbus_switch_state(xfe->xdev, XenbusStateConnected);
+	if (ret) {
+		xenbus_dev_fatal(xfe->xdev, ret,
+				 "failed to change state to connected");
+		return;
+	}
+}
+
+static inline void frontend_detach(struct tee_xfe *xfe)
+{
+	int i;
+
+	xenbus_switch_state(xfe->xdev, XenbusStateClosing);
+	if (xfe->irq_domu >= 0)
+		unbind_from_irqhandler(xfe->irq_domu, xfe);
+
+	if (xfe->irq_dom0 >= 0)
+		unbind_from_irqhandler(xfe->irq_dom0, xfe);
+
+	for (i = 0; i < TEE_BUFFERS; i++)
+		xenbus_unmap_ring_vfree(xfe->xdev, xfe->buffers[i].data.addr);
+
+	if (xfe->ring_p)
+		xenbus_unmap_ring_vfree(xfe->xdev, xfe->ring_p);
+}
+
+static void xen_be_frontend_changed(struct xenbus_device *xdev,
+				    enum xenbus_state fe_state)
+{
+	struct tee_xfe *xfe = dev_get_drvdata(&xdev->dev);
+
+	mc_dev_devel("fe state changed to %d", fe_state);
+	switch (fe_state) {
+	case XenbusStateInitialising:
+	case XenbusStateInitWait:
+		break;
+	case XenbusStateInitialised:
+		frontend_attach(xfe);
+		break;
+	case XenbusStateConnected:
+		break;
+	case XenbusStateClosing:
+		frontend_detach(xfe);
+		break;
+	case XenbusStateUnknown:
+	case XenbusStateClosed:
+		device_unregister(&xfe->xdev->dev);
+		break;
+	case XenbusStateReconfiguring:
+	case XenbusStateReconfigured:
+		break;
+	}
+}
+
+static struct xenbus_driver xen_be_driver = {
+	.ids  = xen_be_ids,
+	.probe = xen_be_probe,
+	.remove = xen_be_remove,
+	.otherend_changed = xen_be_frontend_changed,
+};
+
+int xen_be_init(void)
+{
+	INIT_LIST_HEAD(&l_ctx.xfes);
+	mutex_init(&l_ctx.xfes_mutex);
+	return xenbus_register_backend(&xen_be_driver);
+}
+
+void xen_be_exit(void)
+{
+	xenbus_unregister_driver(&xen_be_driver);
+}
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_be.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_be.h
new file mode 100644
index 0000000..135503e
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_be.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _MC_XEN_BE_H_
+#define _MC_XEN_BE_H_
+
+#include <linux/version.h>
+
+struct xen_be_map;
+
+#ifdef CONFIG_XEN
+int xen_be_init(void);
+void xen_be_exit(void);
+#else
+static inline int xen_be_init(void)
+{
+	return 0;
+}
+
+static inline void xen_be_exit(void)
+{
+}
+#endif
+
+#endif /* _MC_XEN_BE_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_common.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_common.c
new file mode 100644
index 0000000..19c0867
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_common.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifdef CONFIG_XEN
+
+#include "main.h"
+#include "client.h"
+#include "xen_common.h"
+
+struct tee_xfe *tee_xfe_create(struct xenbus_device *xdev)
+{
+	struct tee_xfe *xfe;
+
+	/* Alloc */
+	xfe = kzalloc(sizeof(*xfe), GFP_KERNEL);
+	if (!xfe)
+		return NULL;
+
+	atomic_inc(&g_ctx.c_xen_fes);
+	/* Init */
+	dev_set_drvdata(&xdev->dev, xfe);
+	xfe->xdev = xdev;
+	kref_init(&xfe->kref);
+	xfe->evtchn_domu = -1;
+	xfe->evtchn_dom0 = -1;
+	xfe->irq_domu = -1;
+	xfe->irq_dom0 = -1;
+	INIT_LIST_HEAD(&xfe->list);
+	mutex_init(&xfe->ring_mutex);
+	init_completion(&xfe->ring_completion);
+	return xfe;
+}
+
+static void tee_xfe_release(struct kref *kref)
+{
+	struct tee_xfe *xfe = container_of(kref, struct tee_xfe, kref);
+
+	if (xfe->client)
+		client_close(xfe->client);
+
+	kfree(xfe);
+	atomic_dec(&g_ctx.c_xen_fes);
+}
+
+void tee_xfe_put(struct tee_xfe *xfe)
+{
+	kref_put(&xfe->kref, tee_xfe_release);
+}
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_common.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_common.h
new file mode 100644
index 0000000..4fcb707
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_common.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _MC_XEN_COMMON_H_
+#define _MC_XEN_COMMON_H_
+
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <xen/events.h>
+#include <xen/grant_table.h>
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+
+#include "public/mc_user.h"	/* many types */
+#include "mci/mciiwp.h"
+#include "mci/mcimcp.h"
+#include "mmu.h"		/* PMD/PTE max entries */
+#include "client.h"		/* For BE to treat other VMs as clients */
+
+#define TEE_XEN_VERSION	3
+
+#define TEE_BUFFERS	4
+
+enum tee_xen_domu_cmd {
+	TEE_XEN_DOMU_NONE,
+	TEE_XEN_GET_VERSION,
+	/* TEE_XEN_MC_OPEN_DEVICE = 11,		SWd does not support this */
+	/* TEE_XEN_MC_CLOSE_DEVICE,		SWd does not support this */
+	TEE_XEN_MC_HAS_SESSIONS = 13,
+	TEE_XEN_MC_OPEN_SESSION,
+	TEE_XEN_MC_OPEN_TRUSTLET,
+	TEE_XEN_MC_CLOSE_SESSION,
+	TEE_XEN_MC_NOTIFY,
+	TEE_XEN_MC_WAIT,
+	TEE_XEN_MC_MAP,
+	TEE_XEN_MC_UNMAP,
+	TEE_XEN_MC_GET_ERR,
+	/* TEE_XEN_GP_INITIALIZE_CONTEXT = 21,	SWd does not support this */
+	/* TEE_XEN_GP_FINALIZE_CONTEXT,		SWd does not support this */
+	TEE_XEN_GP_REGISTER_SHARED_MEM = 23,
+	TEE_XEN_GP_RELEASE_SHARED_MEM,
+	TEE_XEN_GP_OPEN_SESSION,
+	TEE_XEN_GP_CLOSE_SESSION,
+	TEE_XEN_GP_INVOKE_COMMAND,
+	TEE_XEN_GP_REQUEST_CANCELLATION,
+};
+
+enum tee_xen_dom0_cmd {
+	TEE_XEN_DOM0_NONE,
+	TEE_XEN_MC_WAIT_DONE = TEE_XEN_MC_WAIT,
+	TEE_XEN_GP_OPEN_SESSION_DONE = TEE_XEN_GP_OPEN_SESSION,
+	TEE_XEN_GP_CLOSE_SESSION_DONE = TEE_XEN_GP_CLOSE_SESSION,
+	TEE_XEN_GP_INVOKE_COMMAND_DONE = TEE_XEN_GP_INVOKE_COMMAND,
+};
+
+union tee_xen_mmu_table {
+	/* Array of references to pages (PTE_ENTRIES_MAX or PMD_ENTRIES_MAX) */
+	grant_ref_t		*refs;
+	/* Address of table */
+	void			*addr;
+	/* Page for table */
+	unsigned long		page;
+};
+
+struct tee_xen_buffer_info {
+	/* Page Middle Directory, refs to tee_xen_pte_table's (full pages) */
+	grant_ref_t		pmd_ref;
+	/* Total number of refs for buffer */
+	u32			nr_refs;
+	u64			addr;		/* Unique VM address */
+	u32			offset;
+	u32			length;
+	u32			flags;
+	u32			sva;
+};
+
+/* Convenience structure to get buffer info and contents in one place */
+struct tee_xen_buffer {
+	struct tee_xen_buffer_info	*info;
+	union tee_xen_mmu_table		data;
+};
+
+struct tee_xen_ring {
+	/* DomU side, synchronous and asynchronous commands */
+	struct {
+		enum tee_xen_domu_cmd		cmd;		/* in */
+		u32				id;		/* in (debug) */
+		/* Return code of this command from Dom0 */
+		int				otherend_ret;	/* out */
+		struct mc_uuid_t		uuid;		/* in */
+		u32				session_id;	/* in/out */
+		/* Buffers to share (4 for GP, 2 for mcOpenTrustlet) */
+		struct tee_xen_buffer_info	buffers[TEE_BUFFERS]; /* in */
+		/* MC */
+		struct mc_version_info		version_info;	/* out */
+		u32				spid;		/* in */
+		s32				timeout;	/* in */
+		s32				err;		/* out */
+		/* GP */
+		u64				operation_id;	/* in */
+		struct gp_return		gp_ret;		/* out */
+		struct interworld_session	iws;		/* in */
+	}			domu;
+
+	/* Dom0 side, response to asynchronous command, never read by Dom0 */
+	struct {
+		enum tee_xen_dom0_cmd		cmd;		/* in */
+		u32				id;		/* in (debug) */
+		/* Return code from command */
+		int				cmd_ret;	/* in */
+		/* The operation id is used to match GP request and response */
+		u64				operation_id;	/* in */
+		struct gp_return		gp_ret;		/* in */
+		struct interworld_session	iws;		/* in */
+		/* The session id is used to match MC request and response */
+		u32				session_id;	/* in */
+	}			dom0;
+};
+
+struct tee_xfe {
+	struct xenbus_device	*xdev;
+	struct kref		kref;
+	grant_ref_t		ring_ref;
+	int			pte_entries_max;
+	int			evtchn_domu;
+	int			evtchn_dom0;
+	int			irq_domu;
+	int			irq_dom0;
+	struct list_head	list;
+	struct tee_client	*client;
+	struct work_struct	work;
+	/* Ring page */
+	union {
+		unsigned long		ring_ul;
+		void			*ring_p;
+		struct tee_xen_ring	*ring;
+	};
+	/* Buffer pages */
+	struct tee_xen_buffer	buffers[TEE_BUFFERS];
+	struct mutex		ring_mutex;	/* Protect our side of ring */
+	struct completion	ring_completion;
+	bool			ring_busy;
+	/* Unique ID for commands */
+	u32			domu_cmd_id;
+};
+
+struct tee_xfe *tee_xfe_create(struct xenbus_device *xdev);
+static inline void tee_xfe_get(struct tee_xfe *xfe)
+{
+	kref_get(&xfe->kref);
+}
+
+void tee_xfe_put(struct tee_xfe *xfe);
+
+static inline void ring_get(struct tee_xfe *xfe)
+{
+	mutex_lock(&xfe->ring_mutex);
+	xfe->ring_busy = true;
+}
+
+static inline void ring_put(struct tee_xfe *xfe)
+{
+	xfe->ring_busy = false;
+	mutex_unlock(&xfe->ring_mutex);
+}
+
+#endif /* _MC_XEN_COMMON_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_fe.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_fe.c
new file mode 100644
index 0000000..2ed331f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_fe.c
@@ -0,0 +1,1184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/irq.h>
+
+#include "mci/mciiwp.h"		/* struct interworld_session */
+
+#include "main.h"
+
+#ifdef TRUSTONIC_XEN_DOMU
+
+#include "admin.h"		/* tee_object* */
+#include "client.h"
+#include "iwp.h"
+#include "mcp.h"
+#include "xen_common.h"
+#include "xen_fe.h"
+
+#define page_to_gfn(p) (pfn_to_gfn(page_to_phys(p) >> XEN_PAGE_SHIFT))
+
+static struct {
+	int (*probe)(void);
+	int (*start)(void);
+	struct tee_xfe		*xfe;
+	/* MC sessions */
+	struct mutex		mc_sessions_lock;
+	struct list_head	mc_sessions;
+	/* GP operations */
+	struct mutex		gp_operations_lock;
+	struct list_head	gp_operations;
+	/* Last back-end state,
+	 * to overcome an issue in some Xen implementations
+	 */
+	int			last_be_state;
+} l_ctx;
+
+struct xen_fe_mc_session {
+	struct list_head		list;
+	struct completion		completion;
+	int				ret;
+	struct mcp_session		*session;
+};
+
+struct xen_fe_gp_operation {
+	struct list_head		list;
+	struct completion		completion;
+	int				ret;
+	u64				slot;
+	struct gp_return		*gp_ret;
+	struct interworld_session	*iws;
+};
+
+static inline struct xen_fe_mc_session *find_mc_session(u32 session_id)
+{
+	struct xen_fe_mc_session *session = ERR_PTR(-ENXIO), *candidate;
+
+	mutex_lock(&l_ctx.mc_sessions_lock);
+	list_for_each_entry(candidate, &l_ctx.mc_sessions, list) {
+		struct mcp_session *mcp_session = candidate->session;
+
+		if (mcp_session->sid == session_id) {
+			session = candidate;
+			break;
+		}
+	}
+	mutex_unlock(&l_ctx.mc_sessions_lock);
+
+	WARN(IS_ERR(session), "MC session not found for ID %u", session_id);
+	return session;
+}
+
+static inline int xen_fe_mc_wait_done(struct tee_xfe *xfe)
+{
+	struct xen_fe_mc_session *session;
+
+	mc_dev_devel("received response to mc_wait for session %x: %d",
+		     xfe->ring->dom0.session_id, xfe->ring->dom0.cmd_ret);
+	session = find_mc_session(xfe->ring->dom0.session_id);
+	if (IS_ERR(session))
+		return PTR_ERR(session);
+
+	session->ret = xfe->ring->dom0.cmd_ret;
+	complete(&session->completion);
+	return 0;
+}
+
+static struct xen_fe_gp_operation *find_gp_operation(u64 operation_id)
+{
+	struct xen_fe_gp_operation *operation = ERR_PTR(-ENXIO), *candidate;
+
+	mutex_lock(&l_ctx.gp_operations_lock);
+	list_for_each_entry(candidate, &l_ctx.gp_operations, list) {
+		if (candidate->slot == operation_id) {
+			operation = candidate;
+			list_del(&operation->list);
+			break;
+		}
+	}
+	mutex_unlock(&l_ctx.gp_operations_lock);
+
+	WARN(IS_ERR(operation), "GP operation not found for op id %llx",
+	     operation_id);
+	return operation;
+}
+
+static inline int xen_fe_gp_open_session_done(struct tee_xfe *xfe)
+{
+	struct xen_fe_gp_operation *operation;
+
+	mc_dev_devel("received response to gp_open_session for op id %llx",
+		     xfe->ring->dom0.operation_id);
+	operation = find_gp_operation(xfe->ring->dom0.operation_id);
+	if (IS_ERR(operation))
+		return PTR_ERR(operation);
+
+	operation->ret = xfe->ring->dom0.cmd_ret;
+	*operation->iws = xfe->ring->dom0.iws;
+	*operation->gp_ret = xfe->ring->dom0.gp_ret;
+	complete(&operation->completion);
+	return 0;
+}
+
+static inline int xen_fe_gp_close_session_done(struct tee_xfe *xfe)
+{
+	struct xen_fe_gp_operation *operation;
+
+	mc_dev_devel("received response to gp_close_session for op id %llx",
+		     xfe->ring->dom0.operation_id);
+	operation = find_gp_operation(xfe->ring->dom0.operation_id);
+	if (IS_ERR(operation))
+		return PTR_ERR(operation);
+
+	operation->ret = xfe->ring->dom0.cmd_ret;
+	complete(&operation->completion);
+	return 0;
+}
+
+static inline int xen_fe_gp_invoke_command_done(struct tee_xfe *xfe)
+{
+	struct xen_fe_gp_operation *operation;
+
+	mc_dev_devel("received response to gp_invoke_command for op id %llx",
+		     xfe->ring->dom0.operation_id);
+	operation = find_gp_operation(xfe->ring->dom0.operation_id);
+	if (IS_ERR(operation))
+		return PTR_ERR(operation);
+
+	operation->ret = xfe->ring->dom0.cmd_ret;
+	*operation->iws = xfe->ring->dom0.iws;
+	*operation->gp_ret = xfe->ring->dom0.gp_ret;
+	complete(&operation->completion);
+	return 0;
+}
+
+static irqreturn_t xen_fe_irq_handler_dom0_th(int intr, void *arg)
+{
+	struct tee_xfe *xfe = arg;
+
+	/* Dom0 event, their side of ring locked by them */
+	schedule_work(&xfe->work);
+
+	return IRQ_HANDLED;
+}
+
+static void xen_fe_irq_handler_dom0_bh(struct work_struct *data)
+{
+	struct tee_xfe *xfe = container_of(data, struct tee_xfe, work);
+	int ret = -EINVAL;
+
+	mc_dev_devel("Dom0 -> DomU command %u id %u cmd ret %d",
+		     xfe->ring->dom0.cmd, xfe->ring->dom0.id,
+		     xfe->ring->dom0.cmd_ret);
+	switch (xfe->ring->dom0.cmd) {
+	case TEE_XEN_DOM0_NONE:
+		return;
+	case TEE_XEN_MC_WAIT_DONE:
+		ret = xen_fe_mc_wait_done(xfe);
+		break;
+	case TEE_XEN_GP_OPEN_SESSION_DONE:
+		ret = xen_fe_gp_open_session_done(xfe);
+		break;
+	case TEE_XEN_GP_CLOSE_SESSION_DONE:
+		ret = xen_fe_gp_close_session_done(xfe);
+		break;
+	case TEE_XEN_GP_INVOKE_COMMAND_DONE:
+		ret = xen_fe_gp_invoke_command_done(xfe);
+		break;
+	}
+
+	if (ret)
+		mc_dev_err(ret, "Dom0 -> DomU result %u id %u",
+			   xfe->ring->dom0.cmd, xfe->ring->dom0.id);
+	else
+		mc_dev_devel("Dom0 -> DomU result %u id %u",
+			     xfe->ring->dom0.cmd, xfe->ring->dom0.id);
+
+	notify_remote_via_evtchn(xfe->evtchn_dom0);
+}
+
+/* Buffer management */
+
+struct xen_fe_map {
+	/* Array of PTE tables, so we can release the associated buffer refs */
+	union tee_xen_mmu_table	*pte_tables;
+	int			nr_pte_tables;
+	int			nr_refs;
+	bool			readonly;
+	int			pages_created;	/* Leak check */
+	int			refs_granted;	/* Leak check */
+	/* To auto-delete */
+	struct tee_deleter deleter;
+};
+
+static void xen_fe_map_release_pmd(struct xen_fe_map *map,
+				   const struct tee_xen_buffer *buffer)
+{
+	int i;
+
+	if (IS_ERR_OR_NULL(map))
+		return;
+
+	for (i = 0; i < map->nr_pte_tables; i++) {
+		gnttab_end_foreign_access(buffer->data.refs[i], true, 0);
+		map->refs_granted--;
+		mc_dev_devel("unmapped table %d ref %u",
+			     i, buffer->data.refs[i]);
+	}
+}
+
+static void xen_fe_map_release(struct xen_fe_map *map,
+			       const struct tee_xen_buffer *buffer)
+{
+	int nr_refs_left = map->nr_refs;
+	int i;
+
+	if (buffer)
+		xen_fe_map_release_pmd(map, buffer);
+
+	for (i = 0; i < map->nr_pte_tables; i++) {
+		int j, nr_refs = nr_refs_left;
+
+		if (nr_refs > PTE_ENTRIES_MAX)
+			nr_refs = PTE_ENTRIES_MAX;
+
+		for (j = 0; j < nr_refs; j++) {
+			gnttab_end_foreign_access(map->pte_tables[i].refs[j],
+						  map->readonly, 0);
+			map->refs_granted--;
+			nr_refs_left--;
+			mc_dev_devel("unmapped [%d, %d] ref %u, left %d",
+				     i, j, map->pte_tables[i].refs[j],
+				     nr_refs_left);
+		}
+
+		free_page(map->pte_tables[i].page);
+		map->pages_created--;
+	}
+
+	kfree(map->pte_tables);
+	if (map->pages_created || map->refs_granted)
+		mc_dev_err(-EUCLEAN,
+			   "leak detected: still in use %d, still ref'd %d",
+			   map->pages_created, map->refs_granted);
+
+	kfree(map);
+	atomic_dec(&g_ctx.c_xen_maps);
+	mc_dev_devel("freed map %p: refs=%u nr_pte_tables=%d",
+		     map, map->nr_refs, map->nr_pte_tables);
+}
+
+static void xen_fe_map_delete(void *arg)
+{
+	struct xen_fe_map *map = arg;
+
+	xen_fe_map_release(map, NULL);
+}
+
+static struct xen_fe_map *xen_fe_map_create(struct tee_xen_buffer *buffer,
+					    const struct mcp_buffer_map *b_map,
+					    int dom_id)
+{
+	/* b_map describes the PMD which contains pointers to PTE tables */
+	uintptr_t *pte_tables = (uintptr_t *)(uintptr_t)b_map->addr;
+	struct xen_fe_map *map;
+	unsigned long nr_pte_tables =
+		(b_map->nr_pages + PTE_ENTRIES_MAX - 1) / PTE_ENTRIES_MAX;
+	unsigned long nr_pages_left = b_map->nr_pages;
+	int readonly = !(b_map->flags & MC_IO_MAP_OUTPUT);
+	int ret, i;
+
+	/*
+	 * We always map the same way, to simplify:
+	 * * the buffer contains references to PTE pages
+	 * * PTE pages contain references to the buffer pages
+	 */
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (!map)
+		return ERR_PTR(-ENOMEM);
+
+	atomic_inc(&g_ctx.c_xen_maps);
+	map->readonly = readonly;
+
+	map->pte_tables = kcalloc(nr_pte_tables,
+				  sizeof(union tee_xen_mmu_table), GFP_KERNEL);
+	if (!map->pte_tables) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	for (i = 0; i < nr_pte_tables; i++) {
+		/* As expected, PTE tables contain pointers to buffer pages */
+		struct page **pages = (struct page **)pte_tables[i];
+		unsigned long nr_pages = nr_pages_left;
+		int j;
+
+		map->pte_tables[i].page = get_zeroed_page(GFP_KERNEL);
+		if (!map->pte_tables[i].page) {
+			ret = -ENOMEM;
+			goto err;
+		}
+		map->pages_created++;
+		map->nr_pte_tables++;
+
+		if (nr_pages > PTE_ENTRIES_MAX)
+			nr_pages = PTE_ENTRIES_MAX;
+
+		/* Create ref for this PTE table */
+		ret = gnttab_grant_foreign_access(
+			dom_id, virt_to_gfn(map->pte_tables[i].addr), true);
+		if (ret < 0) {
+			mc_dev_err(
+				ret,
+				"gnttab_grant_foreign_access failed:\t"
+				"PTE table %d", i);
+			goto err;
+		}
+
+		map->refs_granted++;
+		buffer->data.refs[i] = ret;
+		mc_dev_devel("mapped table %d ref %u for %lu pages",
+			     i, buffer->data.refs[i], nr_pages);
+
+		/* Create refs for pages */
+		for (j = 0; j < nr_pages; j++) {
+			ret = gnttab_grant_foreign_access(
+				dom_id, page_to_gfn(pages[j]), readonly);
+			if (ret < 0) {
+				mc_dev_err(
+					ret,
+					"gnttab_grant_foreign_access failed:\t"
+					"PTE %d pg %d", i, j);
+				goto err;
+			}
+
+			map->refs_granted++;
+			map->pte_tables[i].refs[j] = ret;
+			map->nr_refs++;
+			nr_pages_left--;
+			mc_dev_devel("mapped [%d, %d] ref %u, left %lu",
+				     i, j, map->pte_tables[i].refs[j],
+				     nr_pages_left);
+		}
+	}
+
+	buffer->info->nr_refs = map->nr_refs;
+	buffer->info->addr = (uintptr_t)b_map->mmu;
+	buffer->info->offset = b_map->offset;
+	buffer->info->length = b_map->length;
+	buffer->info->flags = b_map->flags;
+
+	/* Auto-delete */
+	map->deleter.object = map;
+	map->deleter.delete = xen_fe_map_delete;
+	tee_mmu_set_deleter(b_map->mmu, &map->deleter);
+
+	mc_dev_devel("created map %p: refs=%u nr_pte_tables=%d",
+		     map, map->nr_refs, map->nr_pte_tables);
+	return map;
+
+err:
+	xen_fe_map_release(map, buffer);
+	return ERR_PTR(ret);
+}
+
+/* DomU call to Dom0 */
+
+/* Must be called under xfe->ring_mutex */
+static inline void call_dom0(struct tee_xfe *xfe, enum tee_xen_domu_cmd cmd)
+{
+	WARN_ON(!xfe->ring_busy);
+
+	xfe->domu_cmd_id++;
+	if (!xfe->domu_cmd_id)
+		xfe->domu_cmd_id++;
+
+	/* Set command and ID */
+	xfe->ring->domu.cmd = cmd;
+	xfe->ring->domu.id = xfe->domu_cmd_id;
+	mc_dev_devel("DomU -> Dom0 request %u id %u pid %d",
+		     xfe->ring->domu.cmd, xfe->ring->domu.id, current->pid);
+	/* Call */
+	notify_remote_via_evtchn(xfe->evtchn_domu);
+	wait_for_completion(&xfe->ring_completion);
+}
+
+/* Will be called back under xfe->ring_mutex */
+static irqreturn_t xen_fe_irq_handler_domu_th(int intr, void *arg)
+{
+	struct tee_xfe *xfe = arg;
+
+	WARN_ON(!xfe->ring_busy);
+
+	/* Response to a domU command, our side of ring locked by us */
+	mc_dev_devel("DomU -> Dom0 response %u id %u ret %d",
+		     xfe->ring->domu.cmd, xfe->ring->domu.id,
+		     xfe->ring->domu.otherend_ret);
+	xfe->ring->domu.cmd = TEE_XEN_DOMU_NONE;
+	xfe->ring->domu.id = 0;
+	complete(&xfe->ring_completion);
+
+	return IRQ_HANDLED;
+}
+
+/* MC protocol interface */
+
+int xen_mc_get_version(struct mc_version_info *version_info)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+
+	ring_get(xfe);
+	/* Call */
+	call_dom0(xfe, TEE_XEN_GET_VERSION);
+	/* Out */
+	memcpy(version_info, &xfe->ring->domu.version_info,
+	       sizeof(*version_info));
+	ring_put(xfe);
+	return xfe->ring->domu.otherend_ret;
+}
+
+int xen_mc_open_session(struct mcp_session *session,
+			struct mcp_open_info *info)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+	struct xen_fe_mc_session *fe_mc_session;
+	struct tee_xen_buffer *ta_buffer = &xfe->buffers[1];
+	struct tee_xen_buffer *tci_buffer = &xfe->buffers[0];
+	struct xen_fe_map *ta_map = NULL;
+	struct xen_fe_map *tci_map = NULL;
+	struct tee_mmu *mmu = NULL;
+	enum tee_xen_domu_cmd cmd;
+	int ret;
+
+	fe_mc_session = kzalloc(sizeof(*fe_mc_session), GFP_KERNEL);
+	if (!fe_mc_session)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&fe_mc_session->list);
+	init_completion(&fe_mc_session->completion);
+	fe_mc_session->session = session;
+
+	ring_get(xfe);
+	/* In */
+	if (info->type == TEE_MC_UUID) {
+		cmd = TEE_XEN_MC_OPEN_SESSION;
+		xfe->ring->domu.uuid = *info->uuid;
+	} else {
+		struct mc_ioctl_buffer buf = {
+			.va = info->va,
+			.len = info->len,
+			.flags = MC_IO_MAP_INPUT,
+		};
+		struct mcp_buffer_map b_map;
+
+		cmd = TEE_XEN_MC_OPEN_TRUSTLET;
+		/* Use an otherwise unused field to pass the SPID */
+		xfe->ring->domu.spid = info->spid;
+		mmu = tee_mmu_create(info->user ? current->mm : NULL, &buf);
+		if (IS_ERR(mmu)) {
+			ret = PTR_ERR(mmu);
+			mmu = NULL;
+			goto out;
+		}
+
+		tee_mmu_buffer(mmu, &b_map);
+		ta_map = xen_fe_map_create(ta_buffer, &b_map,
+					   xfe->xdev->otherend_id);
+		if (IS_ERR(ta_map)) {
+			ret = PTR_ERR(ta_map);
+			goto out;
+		}
+	}
+
+	/* Convert IPAs to grant references in-place */
+	if (info->tci_mmu) {
+		struct mcp_buffer_map b_map;
+
+		tee_mmu_buffer(info->tci_mmu, &b_map);
+		tci_map = xen_fe_map_create(tci_buffer, &b_map,
+					    xfe->xdev->otherend_id);
+		if (IS_ERR(tci_map)) {
+			ret = PTR_ERR(tci_map);
+			goto out;
+		}
+	} else {
+		tci_buffer->info->flags = 0;
+	}
+
+	/* Call */
+	call_dom0(xfe, cmd);
+	/* Out */
+	ret = xfe->ring->domu.otherend_ret;
+	if (!ret)
+		session->sid = xfe->ring->domu.session_id;
+
+out:
+	if (!ret) {
+		mutex_lock(&l_ctx.mc_sessions_lock);
+		list_add_tail(&fe_mc_session->list, &l_ctx.mc_sessions);
+		mutex_unlock(&l_ctx.mc_sessions_lock);
+	} else {
+		kfree(fe_mc_session);
+	}
+
+	xen_fe_map_release_pmd(ta_map, ta_buffer);
+	xen_fe_map_release_pmd(tci_map, tci_buffer);
+	if (mmu)
+		tee_mmu_put(mmu);
+
+	ring_put(xfe);
+	return ret;
+}
+
+int xen_mc_close_session(struct mcp_session *session)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+	struct xen_fe_mc_session *fe_mc_session;
+	int ret;
+
+	fe_mc_session = find_mc_session(session->sid);
+	if (!fe_mc_session)
+		return -ENXIO;
+
+	ring_get(xfe);
+	/* In */
+	xfe->ring->domu.session_id = session->sid;
+	/* Call */
+	call_dom0(xfe, TEE_XEN_MC_CLOSE_SESSION);
+	/* Out */
+	ret = xfe->ring->domu.otherend_ret;
+	if (!ret) {
+		mutex_lock(&l_ctx.mc_sessions_lock);
+		session->state = MCP_SESSION_CLOSED;
+		list_del(&fe_mc_session->list);
+		mutex_unlock(&l_ctx.mc_sessions_lock);
+		kfree(fe_mc_session);
+	}
+
+	ring_put(xfe);
+	return ret;
+}
+
+int xen_mc_notify(struct mcp_session *session)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+	int ret;
+
+	mc_dev_devel("MC notify session %x", session->sid);
+	ring_get(xfe);
+	/* In */
+	xfe->ring->domu.session_id = session->sid;
+	/* Call */
+	call_dom0(xfe, TEE_XEN_MC_NOTIFY);
+	/* Out */
+	ret = xfe->ring->domu.otherend_ret;
+	ring_put(xfe);
+	return ret;
+}
+
+int xen_mc_wait(struct mcp_session *session, s32 timeout, bool silent_expiry)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+	struct xen_fe_mc_session *fe_mc_session;
+	int ret;
+
+	/* Locked by caller so no two waits can happen on one session */
+	fe_mc_session = find_mc_session(session->sid);
+	if (!fe_mc_session)
+		return -ENXIO;
+
+	fe_mc_session->ret = 0;
+
+	mc_dev_devel("MC wait session %x", session->sid);
+	ring_get(xfe);
+	/* In */
+	xfe->ring->domu.session_id = session->sid;
+	xfe->ring->domu.timeout = timeout;
+	/* Call */
+	call_dom0(xfe, TEE_XEN_MC_WAIT);
+	/* Out */
+	ret = xfe->ring->domu.otherend_ret;
+	ring_put(xfe);
+
+	if (ret)
+		return ret;
+
+	/* Now wait for notification from Dom0 */
+	ret = wait_for_completion_interruptible(&fe_mc_session->completion);
+	if (!ret)
+		ret = fe_mc_session->ret;
+
+	return ret;
+}
+
+int xen_mc_map(u32 session_id, struct tee_mmu *mmu, u32 *sva)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+	struct tee_xen_buffer *buffer = &xfe->buffers[0];
+	struct mcp_buffer_map b_map;
+	struct xen_fe_map *map = NULL;
+	int ret;
+
+	ring_get(xfe);
+	/* In */
+	xfe->ring->domu.session_id = session_id;
+	tee_mmu_buffer(mmu, &b_map);
+	map = xen_fe_map_create(buffer, &b_map, xfe->xdev->otherend_id);
+	if (IS_ERR(map)) {
+		ret = PTR_ERR(map);
+		goto out;
+	}
+
+	/* Call */
+	call_dom0(xfe, TEE_XEN_MC_MAP);
+	/* Out */
+	ret = xfe->ring->domu.otherend_ret;
+	if (!ret) {
+		*sva = buffer->info->sva;
+		atomic_inc(&g_ctx.c_maps);
+	}
+
+out:
+	xen_fe_map_release_pmd(map, buffer);
+	ring_put(xfe);
+	return ret;
+}
+
+int xen_mc_unmap(u32 session_id, const struct mcp_buffer_map *map)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+	struct tee_xen_buffer *buffer = &xfe->buffers[0];
+	int ret;
+
+	ring_get(xfe);
+	/* In */
+	xfe->ring->domu.session_id = session_id;
+	buffer->info->length = map->length;
+	buffer->info->sva = map->secure_va;
+	/* Call */
+	call_dom0(xfe, TEE_XEN_MC_UNMAP);
+	/* Out */
+	ret = xfe->ring->domu.otherend_ret;
+	if (!ret)
+		atomic_dec(&g_ctx.c_maps);
+
+	ring_put(xfe);
+	return ret;
+}
+
+int xen_mc_get_err(struct mcp_session *session, s32 *err)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+	int ret;
+
+	ring_get(xfe);
+	/* In */
+	xfe->ring->domu.session_id = session->sid;
+	/* Call */
+	call_dom0(xfe, TEE_XEN_MC_GET_ERR);
+	/* Out */
+	ret = xfe->ring->domu.otherend_ret;
+	if (!ret)
+		*err = xfe->ring->domu.err;
+
+	mc_dev_devel("MC get_err session %x err %d", session->sid, *err);
+	ring_put(xfe);
+	return ret;
+}
+
+/* GP protocol interface */
+
+int xen_gp_register_shared_mem(struct tee_mmu *mmu, u32 *sva,
+			       struct gp_return *gp_ret)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+	struct tee_xen_buffer *buffer = &xfe->buffers[0];
+	struct mcp_buffer_map b_map;
+	struct xen_fe_map *map = NULL;
+	int ret;
+
+	ring_get(xfe);
+	/* In */
+	tee_mmu_buffer(mmu, &b_map);
+	map = xen_fe_map_create(buffer, &b_map, xfe->xdev->otherend_id);
+	if (IS_ERR(map)) {
+		ret = PTR_ERR(map);
+		goto out;
+	}
+
+	/* Call */
+	call_dom0(xfe, TEE_XEN_GP_REGISTER_SHARED_MEM);
+	/* Out */
+	ret = xfe->ring->domu.otherend_ret;
+	if (!ret) {
+		*sva = buffer->info->sva;
+		atomic_inc(&g_ctx.c_maps);
+	}
+
+	if (xfe->ring->domu.gp_ret.origin)
+		*gp_ret = xfe->ring->domu.gp_ret;
+
+out:
+	xen_fe_map_release_pmd(map, buffer);
+	ring_put(xfe);
+	return ret;
+}
+
+int xen_gp_release_shared_mem(struct mcp_buffer_map *map)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+	struct tee_xen_buffer *buffer = &xfe->buffers[0];
+	int ret;
+
+	ring_get(xfe);
+	/* In */
+	buffer->info->addr = (uintptr_t)map->mmu;
+	buffer->info->length = map->length;
+	buffer->info->flags = map->flags;
+	buffer->info->sva = map->secure_va;
+	/* Call */
+	call_dom0(xfe, TEE_XEN_GP_RELEASE_SHARED_MEM);
+	/* Out */
+	ret = xfe->ring->domu.otherend_ret;
+	if (!ret)
+		atomic_dec(&g_ctx.c_maps);
+
+	ring_put(xfe);
+	return ret;
+}
+
+int xen_gp_open_session(struct iwp_session *session,
+			const struct mc_uuid_t *uuid,
+			const struct iwp_buffer_map *b_maps,
+			struct interworld_session *iws,
+			struct interworld_session *op_iws,
+			struct gp_return *gp_ret)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+	struct xen_fe_gp_operation operation = { .ret = 0 };
+	struct xen_fe_map *maps[4] = { NULL, NULL, NULL, NULL };
+	int i, ret;
+
+	/* Prepare operation first not to be racey */
+	INIT_LIST_HEAD(&operation.list);
+	init_completion(&operation.completion);
+	/* Note: slot is a unique identifier for a session/operation */
+	operation.slot = session->slot;
+	operation.gp_ret = gp_ret;
+	operation.iws = iws;
+	mutex_lock(&l_ctx.gp_operations_lock);
+	list_add_tail(&operation.list, &l_ctx.gp_operations);
+	mutex_unlock(&l_ctx.gp_operations_lock);
+
+	ring_get(xfe);
+	/* The operation may contain tmpref's to map */
+	for (i = 0; i < TEE_BUFFERS; i++) {
+		if (!b_maps[i].map.addr) {
+			xfe->buffers[i].info->flags = 0;
+			continue;
+		}
+
+		maps[i] = xen_fe_map_create(&xfe->buffers[i], &b_maps[i].map,
+					    xfe->xdev->otherend_id);
+		if (IS_ERR(maps[i])) {
+			ret = PTR_ERR(maps[i]);
+			goto err;
+		}
+	}
+
+	/* In */
+	xfe->ring->domu.uuid = *uuid;
+	xfe->ring->domu.operation_id = session->slot;
+	xfe->ring->domu.iws = *op_iws;
+	/* Call */
+	call_dom0(xfe, TEE_XEN_GP_OPEN_SESSION);
+	/* Out */
+	ret = xfe->ring->domu.otherend_ret;
+err:
+	for (i = 0; i < TEE_BUFFERS; i++)
+		xen_fe_map_release_pmd(maps[i], &xfe->buffers[i]);
+
+	ring_put(xfe);
+	if (ret) {
+		mutex_lock(&l_ctx.gp_operations_lock);
+		list_del(&operation.list);
+		mutex_unlock(&l_ctx.gp_operations_lock);
+		return ret;
+	}
+
+	/* Now wait for notification from Dom0 */
+	wait_for_completion(&operation.completion);
+	/* FIXME origins? */
+	return operation.ret;
+}
+
+int xen_gp_close_session(struct iwp_session *session)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+	struct xen_fe_gp_operation operation = { .ret = 0 };
+	int ret;
+
+	/* Prepare operation first not to be racey */
+	INIT_LIST_HEAD(&operation.list);
+	init_completion(&operation.completion);
+	/* Note: slot is a unique identifier for a session/operation */
+	operation.slot = session->slot;
+	mutex_lock(&l_ctx.gp_operations_lock);
+	list_add_tail(&operation.list, &l_ctx.gp_operations);
+	mutex_unlock(&l_ctx.gp_operations_lock);
+
+	ring_get(xfe);
+	/* In */
+	xfe->ring->domu.session_id = session->sid;
+	xfe->ring->domu.operation_id = session->slot;
+	/* Call */
+	call_dom0(xfe, TEE_XEN_GP_CLOSE_SESSION);
+	/* Out */
+	ret = xfe->ring->domu.otherend_ret;
+	ring_put(xfe);
+	if (ret) {
+		mutex_lock(&l_ctx.gp_operations_lock);
+		list_del(&operation.list);
+		mutex_unlock(&l_ctx.gp_operations_lock);
+		return ret;
+	}
+
+	/* Now wait for notification from Dom0 */
+	wait_for_completion(&operation.completion);
+	return operation.ret;
+}
+
+int xen_gp_invoke_command(struct iwp_session *session,
+			  const struct iwp_buffer_map *b_maps,
+			  struct interworld_session *iws,
+			  struct gp_return *gp_ret)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+	struct xen_fe_gp_operation operation = { .ret = 0 };
+	struct xen_fe_map *maps[4] = { NULL, NULL, NULL, NULL };
+	int i, ret;
+
+	/* Prepare operation first not to be racey */
+	INIT_LIST_HEAD(&operation.list);
+	init_completion(&operation.completion);
+	/* Note: slot is a unique identifier for a session/operation */
+	operation.slot = session->slot;
+	operation.gp_ret = gp_ret;
+	operation.iws = iws;
+	mutex_lock(&l_ctx.gp_operations_lock);
+	list_add_tail(&operation.list, &l_ctx.gp_operations);
+	mutex_unlock(&l_ctx.gp_operations_lock);
+
+	ring_get(xfe);
+	/* The operation is in op_iws and may contain tmpref's to map */
+	for (i = 0; i < TEE_BUFFERS; i++) {
+		if (!b_maps[i].map.addr) {
+			xfe->buffers[i].info->flags = 0;
+			continue;
+		}
+
+		maps[i] = xen_fe_map_create(&xfe->buffers[i], &b_maps[i].map,
+					    xfe->xdev->otherend_id);
+		if (IS_ERR(maps[i])) {
+			ret = PTR_ERR(maps[i]);
+			goto err;
+		}
+	}
+
+	/* In */
+	xfe->ring->domu.session_id = session->sid;
+	xfe->ring->domu.operation_id = session->slot;
+	xfe->ring->domu.iws = *iws;
+	/* Call */
+	call_dom0(xfe, TEE_XEN_GP_INVOKE_COMMAND);
+	/* Out */
+	ret = xfe->ring->domu.otherend_ret;
+err:
+	for (i = 0; i < TEE_BUFFERS; i++)
+		xen_fe_map_release_pmd(maps[i], &xfe->buffers[i]);
+
+	ring_put(xfe);
+	if (ret) {
+		mutex_lock(&l_ctx.gp_operations_lock);
+		list_del(&operation.list);
+		mutex_unlock(&l_ctx.gp_operations_lock);
+		return ret;
+	}
+
+	/* Now wait for notification from Dom0 */
+	wait_for_completion(&operation.completion);
+	/* FIXME origins? */
+	return operation.ret;
+}
+
+int xen_gp_request_cancellation(u64 slot)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+	int ret;
+
+	ring_get(xfe);
+	/* In */
+	xfe->ring->domu.operation_id = slot;
+	/* Call */
+	call_dom0(xfe, TEE_XEN_GP_REQUEST_CANCELLATION);
+	/* Out */
+	ret = xfe->ring->domu.otherend_ret;
+	ring_put(xfe);
+	return ret;
+}
+
+/* Device */
+
+static inline void xfe_release(struct tee_xfe *xfe)
+{
+	int i;
+
+	if (xfe->irq_domu >= 0)
+		unbind_from_irqhandler(xfe->irq_domu, xfe);
+
+	if (xfe->irq_dom0 >= 0)
+		unbind_from_irqhandler(xfe->irq_dom0, xfe);
+
+	if (xfe->evtchn_domu >= 0)
+		xenbus_free_evtchn(xfe->xdev, xfe->evtchn_domu);
+
+	if (xfe->evtchn_dom0 >= 0)
+		xenbus_free_evtchn(xfe->xdev, xfe->evtchn_dom0);
+
+	for (i = 0; i < TEE_BUFFERS; i++) {
+		if (!xfe->buffers[i].data.page)
+			break;
+
+		gnttab_end_foreign_access(xfe->ring->domu.buffers[i].pmd_ref, 0,
+					  xfe->buffers[i].data.page);
+		free_page(xfe->buffers[i].data.page);
+	}
+
+	if (xfe->ring_ul) {
+		gnttab_end_foreign_access(xfe->ring_ref, 0, xfe->ring_ul);
+		free_page(xfe->ring_ul);
+	}
+
+	kfree(xfe);
+}
+
+static inline struct tee_xfe *xfe_create(struct xenbus_device *xdev)
+{
+	struct tee_xfe *xfe;
+	struct xenbus_transaction trans;
+	int i, ret = -ENOMEM;
+
+	/* Alloc */
+	xfe = tee_xfe_create(xdev);
+	if (!xfe)
+		return ERR_PTR(-ENOMEM);
+
+	/* Create shared information buffer */
+	xfe->ring_ul = get_zeroed_page(GFP_KERNEL);
+	if (!xfe->ring_ul)
+		goto err;
+
+	/* Connect */
+	ret = xenbus_grant_ring(xfe->xdev, xfe->ring, 1, &xfe->ring_ref);
+	if (ret < 0)
+		goto err;
+
+	for (i = 0; i < TEE_BUFFERS; i++) {
+		xfe->buffers[i].data.page = get_zeroed_page(GFP_KERNEL);
+		if (!xfe->buffers[i].data.page)
+			goto err;
+
+		ret = xenbus_grant_ring(xfe->xdev, xfe->buffers[i].data.addr, 1,
+					&xfe->ring->domu.buffers[i].pmd_ref);
+		if (ret < 0)
+			goto err;
+
+		xfe->buffers[i].info = &xfe->ring->domu.buffers[i];
+	}
+
+	ret = xenbus_alloc_evtchn(xfe->xdev, &xfe->evtchn_domu);
+	if (ret)
+		goto err;
+
+	ret = xenbus_alloc_evtchn(xfe->xdev, &xfe->evtchn_dom0);
+	if (ret)
+		goto err;
+
+	ret = bind_evtchn_to_irqhandler(xfe->evtchn_domu,
+					xen_fe_irq_handler_domu_th, 0,
+					"tee_fe_domu", xfe);
+	if (ret < 0)
+		goto err;
+
+	xfe->irq_domu = ret;
+
+	ret = bind_evtchn_to_irqhandler(xfe->evtchn_dom0,
+					xen_fe_irq_handler_dom0_th, 0,
+					"tee_fe_dom0", xfe);
+	if (ret < 0)
+		goto err;
+
+	xfe->irq_dom0 = ret;
+
+	/* Publish */
+	do {
+		ret = xenbus_transaction_start(&trans);
+		if (ret) {
+			xenbus_dev_fatal(xfe->xdev, ret,
+					 "failed to start transaction");
+			goto err_transaction;
+		}
+
+		/* Ring is one page to support older kernels */
+		ret = xenbus_printf(trans, xfe->xdev->nodename,
+				    "ring-ref", "%u", xfe->ring_ref);
+		if (ret) {
+			xenbus_dev_fatal(xfe->xdev, ret,
+					 "failed to write ring ref");
+			goto err_transaction;
+		}
+
+		ret = xenbus_printf(trans, xfe->xdev->nodename,
+				    "pte-entries-max", "%u",
+				    PTE_ENTRIES_MAX);
+		if (ret) {
+			xenbus_dev_fatal(xfe->xdev, ret,
+					 "failed to write PTE entries max");
+			goto err_transaction;
+		}
+
+		ret = xenbus_printf(trans, xfe->xdev->nodename,
+				    "event-channel-domu", "%u",
+				    xfe->evtchn_domu);
+		if (ret) {
+			xenbus_dev_fatal(xfe->xdev, ret,
+					 "failed to write event channel domu");
+			goto err_transaction;
+		}
+
+		ret = xenbus_printf(trans, xfe->xdev->nodename,
+				    "event-channel-dom0", "%u",
+				    xfe->evtchn_dom0);
+		if (ret) {
+			xenbus_dev_fatal(xfe->xdev, ret,
+					 "failed to write event channel dom0");
+			goto err_transaction;
+		}
+
+		ret = xenbus_printf(trans, xfe->xdev->nodename,
+				    "domu-version", "%u", TEE_XEN_VERSION);
+		if (ret) {
+			xenbus_dev_fatal(xfe->xdev, ret,
+					 "failed to write version");
+			goto err_transaction;
+		}
+
+		ret = xenbus_transaction_end(trans, 0);
+		if (ret) {
+			if (ret == -EAGAIN)
+				mc_dev_devel("retry");
+			else
+				xenbus_dev_fatal(xfe->xdev, ret,
+						 "failed to end transaction");
+		}
+	} while (ret == -EAGAIN);
+
+	mc_dev_devel("evtchn domu=%u dom0=%u version=%u",
+		     xfe->evtchn_domu, xfe->evtchn_dom0, TEE_XEN_VERSION);
+	xenbus_switch_state(xfe->xdev, XenbusStateInitialised);
+	return xfe;
+
+err_transaction:
+err:
+	xenbus_switch_state(xfe->xdev, XenbusStateClosed);
+	xfe_release(xfe);
+	return ERR_PTR(ret);
+}
+
+static const struct xenbus_device_id xen_fe_ids[] = {
+	{ "tee_xen" },
+	{ "" }
+};
+
+static int xen_fe_probe(struct xenbus_device *xdev,
+			const struct xenbus_device_id *id)
+{
+	int ret;
+
+	ret = l_ctx.probe();
+	if (ret)
+		return ret;
+
+	l_ctx.xfe = xfe_create(xdev);
+	if (IS_ERR(l_ctx.xfe))
+		return PTR_ERR(l_ctx.xfe);
+
+	INIT_WORK(&l_ctx.xfe->work, xen_fe_irq_handler_dom0_bh);
+
+	return 0;
+}
+
+static void xen_fe_backend_changed(struct xenbus_device *xdev,
+				   enum xenbus_state be_state)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+
+	mc_dev_devel("be state changed to %d", be_state);
+
+	if (be_state == l_ctx.last_be_state) {
+		/* Protection against duplicated notifications (TBUG-1387) */
+		mc_dev_devel("be state (%d) already set... ignoring", be_state);
+		return;
+	}
+
+	switch (be_state) {
+	case XenbusStateUnknown:
+	case XenbusStateInitialising:
+	case XenbusStateInitWait:
+	case XenbusStateInitialised:
+		break;
+	case XenbusStateConnected:
+		if (l_ctx.start())
+			xenbus_switch_state(xfe->xdev, XenbusStateClosing);
+		else
+			xenbus_switch_state(xfe->xdev, XenbusStateConnected);
+		break;
+	case XenbusStateClosing:
+	case XenbusStateClosed:
+	case XenbusStateReconfiguring:
+	case XenbusStateReconfigured:
+		break;
+	}
+
+	/* Refresh last back-end state */
+	l_ctx.last_be_state = be_state;
+}
+
+static struct xenbus_driver xen_fe_driver = {
+	.ids  = xen_fe_ids,
+	.probe = xen_fe_probe,
+	.otherend_changed = xen_fe_backend_changed,
+};
+
+int xen_fe_init(int (*probe)(void), int (*start)(void))
+{
+	l_ctx.probe = probe;
+	l_ctx.start = start;
+	mutex_init(&l_ctx.mc_sessions_lock);
+	INIT_LIST_HEAD(&l_ctx.mc_sessions);
+	mutex_init(&l_ctx.gp_operations_lock);
+	INIT_LIST_HEAD(&l_ctx.gp_operations);
+	return xenbus_register_frontend(&xen_fe_driver);
+}
+
+void xen_fe_exit(void)
+{
+	struct tee_xfe *xfe = l_ctx.xfe;
+
+	tee_xfe_put(xfe);
+	xenbus_unregister_driver(&xen_fe_driver);
+}
+
+#endif /* TRUSTONIC_XEN_DOMU */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_fe.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_fe.h
new file mode 100644
index 0000000..c2030a7
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/MobiCoreDriver/xen_fe.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _MC_XEN_FE_H_
+#define _MC_XEN_FE_H_
+
+#include <linux/version.h>
+
+#include "main.h"
+#include "client.h"
+#include "iwp.h"
+#include "mcp.h"
+
+#ifdef TRUSTONIC_XEN_DOMU
+/* MC protocol interface */
+int xen_mc_get_version(struct mc_version_info *version_info);
+int xen_mc_open_session(struct mcp_session *session,
+			struct mcp_open_info *info);
+int xen_mc_close_session(struct mcp_session *session);
+int xen_mc_map(u32 session_id, struct tee_mmu *mmu, u32 *sva);
+int xen_mc_unmap(u32 session_id, const struct mcp_buffer_map *map);
+int xen_mc_notify(struct mcp_session *session);
+int xen_mc_wait(struct mcp_session *session, s32 timeout, bool silent_expiry);
+int xen_mc_get_err(struct mcp_session *session, s32 *err);
+/* GP protocol interface */
+int xen_gp_register_shared_mem(struct tee_mmu *mmu, u32 *sva,
+			       struct gp_return *gp_ret);
+int xen_gp_release_shared_mem(struct mcp_buffer_map *map);
+int xen_gp_open_session(struct iwp_session *session,
+			const struct mc_uuid_t *uuid,
+			const struct iwp_buffer_map *maps,
+			struct interworld_session *iws,
+			struct interworld_session *op_iws,
+			struct gp_return *gp_ret);
+int xen_gp_close_session(struct iwp_session *session);
+int xen_gp_invoke_command(struct iwp_session *session,
+			  const struct iwp_buffer_map *maps,
+			  struct interworld_session *iws,
+			  struct gp_return *gp_ret);
+int xen_gp_request_cancellation(u64 slot);
+
+int xen_fe_init(int (*probe)(void), int (*start)(void));
+void xen_fe_exit(void);
+#else
+static inline int xen_fe_init(int (*probe)(void), int (*start)(void))
+{
+	return 0;
+}
+
+static inline void xen_fe_exit(void)
+{
+}
+#endif
+
+#endif /* _MC_XEN_FE_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/Makefile b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/Makefile
new file mode 100644
index 0000000..6aaae1f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/Makefile
@@ -0,0 +1,35 @@
+# Copyright (c) 2013-2018 TRUSTONIC LIMITED
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+#
+# Makefile for the Kinibi trusted UI driver
+#
+
+GUD_ROOT_FOLDER := drivers/gud/
+
+# add our modules to kernel.
+obj-$(CONFIG_TRUSTONIC_TRUSTED_UI) += TlcTui.o
+
+TlcTui-y := main.o tlcTui.o trustedui.o tui-hal.o
+
+# Release mode by default
+ccflags-y += -DNDEBUG
+ccflags-y += -Wno-declaration-after-statement
+
+ccflags-$(CONFIG_TRUSTONIC_TEE_DEBUG) += -DDEBUG
+
+# MobiCore Driver includes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/public
+
+# MobiCore TlcTui required includes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/TlcTui/inc \
+             -I$(GUD_ROOT_FOLDER)/TlcTui/public
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/build_tag.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/build_tag.h
new file mode 100644
index 0000000..db7db6f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/build_tag.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+#ifndef MOBICORE_COMPONENT_BUILD_TAG
+#define MOBICORE_COMPONENT_BUILD_TAG \
+	"t-base-MTK-A64-Android-410a-V002-20190318_110437_65733_93363"
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/inc/dciTui.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/inc/dciTui.h
new file mode 100644
index 0000000..dd3e54b
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/inc/dciTui.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef __DCITUI_H__
+#define __DCITUI_H__
+
+/* Linux checkpatch suggests to use the BIT macro */
+#ifndef BIT
+#define BIT(n) (1U << (n))
+#endif
+
+#ifndef u32
+#define u32 uint32_t
+#endif
+
+#ifndef u64
+#define u64 uint64_t
+#endif
+
+/**< Responses have bit 31 set */
+#define RSP_ID_MASK BIT(31)
+
+#define RSP_ID(cmd_id) (((u32)(cmd_id)) | RSP_ID_MASK)
+#define IS_CMD(cmd_id) ((((u32)(cmd_id)) & RSP_ID_MASK) == 0)
+#define IS_RSP(cmd_id) ((((u32)(cmd_id)) & RSP_ID_MASK) == RSP_ID_MASK)
+#define CMD_ID_FROM_RSP(rsp_id) ((rsp_id) & (~RSP_ID_MASK))
+
+/**
+ * Return codes of driver commands.
+ */
+#define TUI_DCI_OK                      0x00030000
+#define TUI_DCI_ERR_UNKNOWN_CMD         0x00030001
+#define TUI_DCI_ERR_NOT_SUPPORTED       0x00030002
+#define TUI_DCI_ERR_INTERNAL_ERROR      0x00030003
+#define TUI_DCI_ERR_NO_RESPONSE         0x00030004
+#define TUI_DCI_ERR_BAD_PARAMETERS      0x00030005
+#define TUI_DCI_ERR_NO_EVENT            0x00030006
+#define TUI_DCI_ERR_OUT_OF_DISPLAY      0x00030007
+/* ... add more error codes when needed */
+
+/**
+ * Notification ID's for communication Trustlet Connector -> Driver.
+ */
+#define NOT_TUI_NONE                0
+/* NWd system event that closes the current TUI session*/
+#define NOT_TUI_CANCEL_EVENT        1
+/* TODO put this in HAL specific code */
+#define NOT_TUI_HAL_TOUCH_EVENT     0x80000001
+
+/**
+ * Command ID's for communication Driver -> Trustlet Connector.
+ */
+#define CMD_TUI_SW_NONE             0
+/* SWd request to NWd to start the TUI session */
+#define CMD_TUI_SW_OPEN_SESSION     1
+/* SWd request to NWd to close the TUI session */
+#define CMD_TUI_SW_CLOSE_SESSION    2
+/* SWd request to NWd stop accessing display controller */
+#define CMD_TUI_SW_STOP_DISPLAY     3
+/* SWd request to get TlcTui DCI version */
+#define CMD_TUI_SW_GET_VERSION      4
+/* SWd request to NWd to execute a HAL command */
+#define CMD_TUI_SW_HAL              5
+
+#define CMD_TUI_HAL_NONE                    0
+#define CMD_TUI_HAL_QUEUE_BUFFER            1
+#define CMD_TUI_HAL_QUEUE_DEQUEUE_BUFFER    2
+#define CMD_TUI_HAL_CLEAR_TOUCH_INTERRUPT   3
+#define CMD_TUI_HAL_HIDE_SURFACE            4
+#define CMD_TUI_HAL_GET_RESOLUTION          5
+
+/**
+ * Maximum data length.
+ */
+#define MAX_DCI_DATA_LEN (1024 * 100)
+
+/*
+ * TUI DCI VERSION
+ */
+#define TUI_DCI_VERSION_MAJOR   (1u)
+#define TUI_DCI_VERSION_MINOR   (1u)
+
+#define TUI_DCI_VERSION(major, minor) \
+	((((major) & 0x0000ffff) << 16) | ((minor) & 0x0000ffff))
+#define TUI_DCI_VERSION_GET_MAJOR(version) (((version) >> 16) & 0x0000ffff)
+#define TUI_DCI_VERSION_GET_MINOR(version) ((version) & 0x0000ffff)
+
+/* Command payload */
+
+struct tui_disp_data_t {
+	u32 buff_id;
+};
+
+struct tui_hal_cmd_t {
+	u32 id;    /* Id of the HAL command */
+	u32 size;  /* Size of the data associated to the HAL command */
+	u64 data[2];   /* Data associated to the HAL command */
+};
+
+struct tui_hal_rsp_t {
+	u32 id;    /* Id of the HAL response */
+	u32 return_code;   /* Return code of the HAL response */
+	u32 size;  /* Size of the data associated to the HAL response */
+	u32 data[3];   /* Data associated to the HAL response */
+};
+
+struct tui_alloc_data_t {
+	u32 alloc_size;
+	u32 num_of_buff;
+};
+
+union dci_cmd_payload_t {
+	struct tui_alloc_data_t alloc_data;
+	struct tui_disp_data_t  disp_data;
+	struct tui_hal_cmd_t    hal;
+};
+
+/* Command */
+struct dci_command_t {
+	u32 id;
+	union dci_cmd_payload_t payload;
+};
+
+/* TUI frame buffer (output from NWd) */
+struct tui_alloc_buffer_t {
+	u64    pa;
+};
+
+#define MAX_DCI_BUFFER_NUMBER 4
+
+/* Response */
+struct dci_response_t {
+	u32	id; /* must be command ID | RSP_ID_MASK */
+	u32		return_code;
+	union {
+		struct tui_alloc_buffer_t alloc_buffer[MAX_DCI_BUFFER_NUMBER];
+		struct tui_hal_rsp_t hal_rsp;
+	};
+};
+
+/* DCI buffer */
+struct tui_dci_msg_t {
+	u32 version;
+	u32     nwd_notif; /* Notification from TlcTui to DrTui */
+	struct dci_command_t  cmd_nwd;   /* Command from DrTui to TlcTui */
+	struct dci_response_t nwd_rsp;   /* Response from TlcTui to DrTui */
+	u32     hal_cmd;
+	u32     hal_rsp;
+};
+
+/**
+ * Driver UUID. Update accordingly after reserving UUID
+ */
+#define DR_TUI_UUID { { 7, 0xC, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
+
+#endif /* __DCITUI_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/inc/t-base-tui.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/inc/t-base-tui.h
new file mode 100644
index 0000000..e5fbfe7
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/inc/t-base-tui.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef __TBASE_TUI_H__
+#define __TBASE_TUI_H__
+
+#define TRUSTEDUI_MODE_OFF                0x00
+#define TRUSTEDUI_MODE_ALL                0xff
+#define TRUSTEDUI_MODE_TUI_SESSION    0x01
+#define TRUSTEDUI_MODE_VIDEO_SECURED  0x02
+#define TRUSTEDUI_MODE_INPUT_SECURED  0x04
+
+#ifdef CONFIG_TRUSTONIC_TRUSTED_UI
+
+int trustedui_blank_inc(void);
+int trustedui_blank_dec(void);
+int trustedui_blank_get_counter(void);
+void trustedui_blank_set_counter(int counter);
+
+int trustedui_get_current_mode(void);
+void trustedui_set_mode(int mode);
+int trustedui_set_mask(int mask);
+int trustedui_clear_mask(int mask);
+
+#endif /* CONFIG_TRUSTONIC_TRUSTED_UI */
+
+#endif /* __TBASE_TUI_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/main.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/main.c
new file mode 100644
index 0000000..5981e45
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/main.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "tui_ioctl.h"
+#include "tlcTui.h"
+#include "mobicore_driver_api.h"
+#include "dciTui.h"
+#include "tui-hal.h"
+#include "build_tag.h"
+
+/*static int tui_dev_major_number = 122; */
+
+/*module_param(tui_dev_major_number, int, 0000); */
+/*MODULE_PARM_DESC(major, */
+/* "The device major number used to register a unique char device driver"); */
+
+/* Static variables */
+static struct cdev tui_cdev;
+
+static long tui_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+	int ret = -ENOTTY;
+	int __user *uarg = (int __user *)arg;
+
+	if (_IOC_TYPE(cmd) != TUI_IO_MAGIC)
+		return -EINVAL;
+
+	pr_info("t-base-tui module: ioctl 0x%x ", cmd);
+
+	switch (cmd) {
+	case TUI_IO_SET_RESOLUTION:
+		/* TLC_TUI_CMD_SET_RESOLUTION is for specific platforms
+		 * that rely on onConfigurationChanged to set resolution
+		 * it has no effect on Trustonic reference implementaton.
+		 */
+		pr_info("TLC_TUI_CMD_SET_RESOLUTION\n");
+		/* NOT IMPLEMENTED */
+		ret = 0;
+		break;
+	case TUI_IO_NOTIFY:
+		pr_info("TUI_IO_NOTIFY\n");
+
+		if (tlc_notify_event(arg))
+			ret = 0;
+		else
+			ret = -EFAULT;
+		break;
+
+	case TUI_IO_WAITCMD: {
+		struct tlc_tui_command_t tui_cmd = {0};
+
+		pr_info("TUI_IO_WAITCMD\n");
+
+		ret = tlc_wait_cmd(&tui_cmd);
+		if (ret) {
+			pr_debug("ERROR %s:%d tlc_wait_cmd returned (0x%08X)\n",
+				 __func__, __LINE__, ret);
+			return ret;
+		}
+
+		/* Write command id to user */
+		pr_debug("IOCTL: sending command %d to user.\n", tui_cmd.id);
+
+		if (copy_to_user(uarg, &tui_cmd, sizeof(
+						struct tlc_tui_command_t)))
+			ret = -EFAULT;
+		else
+			ret = 0;
+
+		break;
+	}
+
+	case TUI_IO_ACK: {
+		struct tlc_tui_response_t rsp_id;
+
+		pr_info("TUI_IO_ACK\n");
+
+		/* Read user response */
+		if (copy_from_user(&rsp_id, uarg, sizeof(rsp_id)))
+			ret = -EFAULT;
+		else
+			ret = 0;
+
+		pr_debug("IOCTL: User completed command %d.\n", rsp_id.id);
+		ret = tlc_ack_cmd(&rsp_id);
+		if (ret)
+			return ret;
+		break;
+	}
+
+	case TUI_IO_INIT_DRIVER: {
+		pr_info("TUI_IO_INIT_DRIVER\n");
+
+		ret = tlc_init_driver();
+		if (ret) {
+			pr_debug("ERROR %s:%d tlc_init_driver returned (0x%08X)\n",
+				 __func__, __LINE__, ret);
+			return ret;
+		}
+		break;
+	}
+
+	default:
+		pr_info("ERROR %s:%d Unknown ioctl (%u)!\n", __func__,
+			__LINE__, cmd);
+		return -ENOTTY;
+	}
+
+	return ret;
+}
+
+atomic_t fileopened;
+
+static int tui_open(struct inode *inode, struct file *file)
+{
+	pr_info("TUI file opened\n");
+	atomic_inc(&fileopened);
+	return 0;
+}
+
+static int tui_release(struct inode *inode, struct file *file)
+{
+	pr_info("TUI file closed\n");
+	if (atomic_dec_and_test(&fileopened))
+		tlc_notify_event(NOT_TUI_CANCEL_EVENT);
+
+	return 0;
+}
+
+static const struct file_operations tui_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = tui_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = tui_ioctl,
+#endif
+	.open = tui_open,
+	.release = tui_release
+};
+
+/*--------------------------------------------------------------------------- */
+static int __init tlc_tui_init(void)
+{
+	pr_info("Loading t-base-tui module.\n");
+	pr_debug("\n=============== Running TUI Kernel TLC ===============\n");
+	pr_info("%s\n", MOBICORE_COMPONENT_BUILD_TAG);
+
+	dev_t devno;
+	int err;
+	static struct class *tui_class;
+
+	atomic_set(&fileopened, 0);
+
+	err = alloc_chrdev_region(&devno, 0, 1, TUI_DEV_NAME);
+	if (err) {
+		pr_debug("Unable to allocate Trusted UI device number\n");
+		return err;
+	}
+
+	cdev_init(&tui_cdev, &tui_fops);
+	tui_cdev.owner = THIS_MODULE;
+	/*    tui_cdev.ops = &tui_fops; */
+
+	err = cdev_add(&tui_cdev, devno, 1);
+	if (err) {
+		pr_debug("Unable to add Trusted UI char device\n");
+		unregister_chrdev_region(devno, 1);
+		return err;
+	}
+
+	tui_class = class_create(THIS_MODULE, "tui_cls");
+	device_create(tui_class, NULL, devno, NULL, TUI_DEV_NAME);
+
+	if (!hal_tui_init())
+		return -EPERM;
+
+	return 0;
+}
+
+static void __exit tlc_tui_exit(void)
+{
+	pr_info("Unloading t-base-tui module.\n");
+
+	unregister_chrdev_region(tui_cdev.dev, 1);
+	cdev_del(&tui_cdev);
+
+	hal_tui_exit();
+}
+
+module_init(tlc_tui_init);
+module_exit(tlc_tui_exit);
+
+MODULE_AUTHOR("Trustonic Limited");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Kinibi TUI");
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/public/tui_ioctl.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/public/tui_ioctl.h
new file mode 100644
index 0000000..4e551ad
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/public/tui_ioctl.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef TUI_IOCTL_H_
+#define TUI_IOCTL_H_
+
+#define MAX_BUFFER_NUMBER 3
+
+#ifndef u32
+#define u32 uint32_t
+#endif
+
+/* Command header */
+struct tlc_tui_command_t {
+	u32     id;
+	u32     data[2];
+};
+
+/* Response header */
+struct tlc_tui_response_t {
+	u32	id;
+	u32	return_code;
+	int	ion_fd[MAX_BUFFER_NUMBER];
+	u32	screen_metrics[3];
+};
+
+/* Resolution */
+struct tlc_tui_resolution_t {
+	u32	width;
+	u32	height;
+};
+
+/* Command IDs */
+/*  */
+#define TLC_TUI_CMD_NONE                0
+/* Start TUI session */
+#define TLC_TUI_CMD_START_ACTIVITY      1
+/* Stop TUI session */
+#define TLC_TUI_CMD_STOP_ACTIVITY       2
+/*
+ * Queue a buffer
+ * IN: index of buffer to be queued
+ */
+#define TLC_TUI_CMD_QUEUE               3
+/*
+ * Queue a new buffer and dequeue the buffer currently displayed
+ * IN: indexes of buffer to be queued
+ */
+#define TLC_TUI_CMD_QUEUE_DEQUEUE       4
+/*
+ * Alloc buffers
+ * IN: number of buffers
+ * OUT: ion fd
+ */
+#define TLC_TUI_CMD_ALLOC_FB            5
+/* Free buffers */
+#define TLC_TUI_CMD_FREE_FB             6
+/* hide secure surface */
+#define TLC_TUI_CMD_HIDE_SURFACE        7
+#define TLC_TUI_CMD_GET_RESOLUTION      8
+
+/* TLC_TUI_CMD_SET_RESOLUTION is for specific platforms
+ * that rely on onConfigurationChanged to set resolution
+ * it has no effect on Trustonic reference implementaton.
+ */
+#define TLC_TUI_CMD_SET_RESOLUTION      9
+
+/* Return codes */
+#define TLC_TUI_OK                  0
+#define TLC_TUI_ERROR               1
+#define TLC_TUI_ERR_UNKNOWN_CMD     2
+
+/*
+ * defines for the ioctl TUI driver module function call from user space.
+ */
+#define TUI_DEV_NAME	"t-base-tui"
+
+#define TUI_IO_MAGIC	't'
+
+#define TUI_IO_NOTIFY	_IOW(TUI_IO_MAGIC, 1, u32)
+#define TUI_IO_WAITCMD	_IOR(TUI_IO_MAGIC, 2, struct tlc_tui_command_t)
+#define TUI_IO_ACK	_IOW(TUI_IO_MAGIC, 3, struct tlc_tui_response_t)
+#define TUI_IO_INIT_DRIVER	_IO(TUI_IO_MAGIC, 4)
+#define TUI_IO_SET_RESOLUTION _IOW(TUI_IO_MAGIC, 9, struct tlc_tui_resolution_t)
+
+#ifdef INIT_COMPLETION
+#define reinit_completion(x) INIT_COMPLETION(*(x))
+#endif
+
+#endif /* TUI_IOCTL_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/tlcTui.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/tlcTui.c
new file mode 100644
index 0000000..e12909e
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/tlcTui.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+
+#include "mobicore_driver_api.h"
+#include "tui_ioctl.h"
+#include "tlcTui.h"
+#include "dciTui.h"
+#include "tui-hal.h"
+
+/* ------------------------------------------------------------- */
+/* Globals */
+struct tui_dci_msg_t *dci;
+static DECLARE_COMPLETION(dci_comp);
+static DECLARE_COMPLETION(io_comp);
+
+/* ------------------------------------------------------------- */
+/* Static */
+static const u32 DEVICE_ID = MC_DEVICE_ID_DEFAULT;
+static struct task_struct *thread_id;
+static DEFINE_MUTEX(thread_mutex);
+static struct tlc_tui_command_t g_user_cmd = {.id = TLC_TUI_CMD_NONE};
+static struct mc_session_handle dr_session_handle = {0, 0};
+struct tlc_tui_response_t g_user_rsp = {.id = TLC_TUI_CMD_NONE,
+				.return_code = TLC_TUI_ERR_UNKNOWN_CMD};
+static bool g_dci_version_checked;
+
+/* Functions */
+
+/* ------------------------------------------------------------- */
+static bool tlc_open_driver(void)
+{
+	bool ret = false;
+	enum mc_result mc_ret;
+	struct mc_uuid_t dr_uuid = DR_TUI_UUID;
+
+	/* Allocate WSM buffer for the DCI */
+	mc_ret = mc_malloc_wsm(DEVICE_ID, 0, sizeof(struct tui_dci_msg_t),
+			       (uint8_t **)&dci, 0);
+	if (mc_ret != MC_DRV_OK) {
+		pr_debug("ERROR %s:%d Allocation of DCI WSM failed: %d\n",
+			 __func__, __LINE__, mc_ret);
+		return false;
+	}
+
+	/* Clear the session handle */
+	memset(&dr_session_handle, 0, sizeof(dr_session_handle));
+	/* The device ID (default device is used */
+	dr_session_handle.device_id = DEVICE_ID;
+	/* Open session with the Driver */
+	mc_ret = mc_open_session(&dr_session_handle, &dr_uuid, (uint8_t *)dci,
+				 (u32)sizeof(struct tui_dci_msg_t));
+	if (mc_ret != MC_DRV_OK) {
+		pr_debug("ERROR %s:%d Open driver session failed: %d\n",
+			 __func__, __LINE__, mc_ret);
+		ret = false;
+	} else {
+		ret = true;
+	}
+
+	return ret;
+}
+
+/* ------------------------------------------------------------- */
+static bool tlc_open(void)
+{
+	bool ret = false;
+	enum mc_result mc_ret;
+
+	/* Open the tbase device */
+	pr_debug("%s: Opening tbase device\n", __func__);
+	mc_ret = mc_open_device(DEVICE_ID);
+
+	/* In case the device is already open, mc_open_device will return an
+	 * error (MC_DRV_ERR_INVALID_OPERATION).  But in this case, we can
+	 * continue, even though mc_open_device returned an error.  Stop in all
+	 * other case of error
+	 */
+	if (MC_DRV_OK != mc_ret && MC_DRV_ERR_INVALID_OPERATION != mc_ret) {
+		pr_debug("ERROR %s:%d Error %d opening device\n", __func__,
+			 __LINE__, mc_ret);
+		return false;
+	}
+
+	pr_debug("%s: Opening driver session\n", __func__);
+	ret = tlc_open_driver();
+
+	return ret;
+}
+
+/* ------------------------------------------------------------- */
+static void tlc_wait_cmd_from_driver(void)
+{
+	u32 ret = TUI_DCI_ERR_INTERNAL_ERROR;
+
+	/* Wait for a command from secure driver */
+	ret = mc_wait_notification(&dr_session_handle, -1);
+	if (ret == MC_DRV_OK)
+		pr_debug("%s: Got a command\n", __func__);
+	else
+		pr_debug("ERROR %s:%d mc_wait_notification() failed: %d\n",
+			 __func__, __LINE__, ret);
+}
+
+struct mc_session_handle *get_session_handle(void)
+{
+	return &dr_session_handle;
+}
+
+u32 send_cmd_to_user(u32 command_id, u32 data0, u32 data1)
+{
+	u32 ret = TUI_DCI_ERR_NO_RESPONSE;
+	int retry = 10;
+
+	/* Init shared variables */
+	g_user_cmd.id = command_id;
+	g_user_cmd.data[0] = data0;
+	g_user_cmd.data[1] = data1;
+	/* Erase the rsp struct */
+	memset(&g_user_rsp, 0, sizeof(g_user_rsp));
+	g_user_rsp.id = TLC_TUI_CMD_NONE;
+	g_user_rsp.return_code = TLC_TUI_ERR_UNKNOWN_CMD;
+
+	while (!atomic_read(&fileopened) && retry--) {
+		msleep(100);
+		pr_debug("sleep for atomic_read(&fileopened) with retry = %d\n",
+			 retry);
+	}
+
+	/*
+	 * Check that the client (TuiService) is still present before to return
+	 * the command.
+	 */
+	if (atomic_read(&fileopened)) {
+		/* Clean up previous response. */
+		complete_all(&io_comp);
+		reinit_completion(&io_comp);
+
+		/*
+		 * Unlock the ioctl thread (IOCTL_WAIT) in order to let the
+		 * client know that there is a command to process.
+		 */
+		pr_info("%s: give way to ioctl thread\n", __func__);
+		complete(&dci_comp);
+		pr_info("TUI TLC is running, waiting for the userland response\n");
+		/* Wait for the client acknowledge (IOCTL_ACK). */
+		unsigned long completed = wait_for_completion_timeout(&io_comp,
+				msecs_to_jiffies(5000));
+		if (!completed) {
+			pr_debug("%s:%d No acknowledge from client, timeout!\n",
+				 __func__, __LINE__);
+		}
+	} else {
+		/*
+		 * There is no client, do nothing except reporting an error to
+		 * SWd.
+		 */
+		pr_info("TUI TLC seems dead. Not waiting for userland answer\n");
+		ret = TUI_DCI_ERR_INTERNAL_ERROR;
+		goto end;
+	}
+
+	pr_debug("%s: Got an answer from ioctl thread.\n", __func__);
+	reinit_completion(&io_comp);
+
+	/* Check id of the cmd processed by ioctl thread (paranoia) */
+	if (g_user_rsp.id != command_id) {
+		pr_debug("ERROR %s:%d Wrong response id 0x%08x iso 0x%08x\n",
+			 __func__, __LINE__, dci->nwd_rsp.id,
+			 (u32)RSP_ID(command_id));
+		ret = TUI_DCI_ERR_INTERNAL_ERROR;
+	} else {
+		/* retrieve return code */
+		switch (g_user_rsp.return_code) {
+		case TLC_TUI_OK:
+			ret = TUI_DCI_OK;
+			break;
+		case TLC_TUI_ERROR:
+			ret = TUI_DCI_ERR_INTERNAL_ERROR;
+			break;
+		case TLC_TUI_ERR_UNKNOWN_CMD:
+			ret = TUI_DCI_ERR_UNKNOWN_CMD;
+			break;
+		}
+	}
+
+end:
+	/*
+	 * In any case, reset the value of the command, to ensure that commands
+	 * sent due to inturrupted wait_for_completion are TLC_TUI_CMD_NONE.
+	 */
+	reset_global_command_id();
+	return ret;
+}
+
+/* ------------------------------------------------------------- */
+static void tlc_process_cmd(void)
+{
+	u32 ret = TUI_DCI_ERR_INTERNAL_ERROR;
+	u32 command_id = CMD_TUI_SW_NONE;
+
+	if (!dci) {
+		pr_debug("ERROR %s:%d DCI has not been set up properly - exiting\n",
+			 __func__, __LINE__);
+		return;
+	}
+
+	command_id = dci->cmd_nwd.id;
+
+	if (dci->hal_rsp)
+		hal_tui_notif();
+
+	/* Warn if previous response was not acknowledged */
+	if (command_id == CMD_TUI_SW_NONE) {
+		pr_debug("ERROR %s:%d Notified without command\n", __func__,
+			 __LINE__);
+		return;
+	}
+
+	if (dci->nwd_rsp.id != CMD_TUI_SW_NONE)
+		pr_debug("%s: Warning, previous response not ack\n",
+			 __func__);
+
+	/* Handle command */
+	switch (command_id) {
+	case CMD_TUI_SW_OPEN_SESSION:
+		pr_debug("%s: CMD_TUI_SW_OPEN_SESSION.\n", __func__);
+
+		if (!g_dci_version_checked) {
+			pr_info("ERROR %s:%d DrTui version is not compatible!\n",
+				__func__, __LINE__);
+			ret = TUI_DCI_ERR_INTERNAL_ERROR;
+			break;
+		}
+		/* Start android TUI activity */
+		ret = send_cmd_to_user(
+			TLC_TUI_CMD_START_ACTIVITY,
+			dci->cmd_nwd.payload.alloc_data.num_of_buff,
+			dci->cmd_nwd.payload.alloc_data.alloc_size);
+		if (ret != TUI_DCI_OK) {
+			pr_debug("%s:%d return value is 0x%x.\n", __func__,
+				 __LINE__, ret);
+			break;
+		}
+
+/*****************************************************************************/
+
+		/* Alloc work buffer separately and send it as last buffer */
+		ret = hal_tui_alloc(dci->nwd_rsp.alloc_buffer,
+				    dci->cmd_nwd.payload.alloc_data.alloc_size,
+				   dci->cmd_nwd.payload.alloc_data.num_of_buff);
+		if (ret != TUI_DCI_OK) {
+			pr_debug("%s: hal_tui_alloc() failed (0x%08X)",
+				 __func__, ret);
+			send_cmd_to_user(TLC_TUI_CMD_STOP_ACTIVITY, 0, 0);
+			break;
+		}
+
+		/* Deactivate linux UI drivers */
+		ret = hal_tui_deactivate();
+
+		if (ret != TUI_DCI_OK) {
+			hal_tui_free();
+			send_cmd_to_user(TLC_TUI_CMD_STOP_ACTIVITY, 0, 0);
+			break;
+		}
+
+		break;
+
+	case CMD_TUI_SW_GET_VERSION: {
+		pr_debug("%s: CMD_TUI_SW_GET_VERSION.\n", __func__);
+		u32 drtui_dci_version = dci->version;
+		u32 tlctui_dci_version =
+			TUI_DCI_VERSION(TUI_DCI_VERSION_MAJOR,
+					TUI_DCI_VERSION_MINOR);
+		pr_info("%s: TlcTui DCI Version (%u.%u)\n",  __func__,
+			TUI_DCI_VERSION_GET_MAJOR(tlctui_dci_version),
+			TUI_DCI_VERSION_GET_MINOR(tlctui_dci_version));
+		pr_info("%s: DrTui DCI Version (%u.%u)\n",  __func__,
+			TUI_DCI_VERSION_GET_MAJOR(drtui_dci_version),
+			TUI_DCI_VERSION_GET_MINOR(drtui_dci_version));
+		/* Write the TlcTui DCI version in the response for the SWd */
+		dci->version = tlctui_dci_version;
+		g_dci_version_checked = true;
+		ret = TUI_DCI_OK;
+		break;
+	}
+
+	case CMD_TUI_SW_HAL:
+		/* TODO Always answer, even if there is a cancel!! */
+		ret = hal_tui_process_cmd(&dci->cmd_nwd.payload.hal,
+					  &dci->nwd_rsp.hal_rsp);
+		break;
+
+	case CMD_TUI_SW_CLOSE_SESSION:
+		pr_debug("%s: CMD_TUI_SW_CLOSE_SESSION.\n", __func__);
+
+		/* QC: close ion client before activating linux UI */
+		hal_tui_free();
+
+		/* Activate linux UI drivers */
+		ret = hal_tui_activate();
+
+		/* Stop android TUI activity */
+		/* Ignore return code, because an error means the TLC has been
+		 * killed, which imply that the activity is stopped already.
+		 */
+		send_cmd_to_user(TLC_TUI_CMD_STOP_ACTIVITY, 0, 0);
+		ret = TUI_DCI_OK;
+
+		break;
+
+	default:
+		pr_debug("ERROR %s:%d Unknown command %d\n",
+			 __func__, __LINE__, command_id);
+		ret = TUI_DCI_ERR_UNKNOWN_CMD;
+		break;
+	}
+
+	/* Fill in response to SWd, fill ID LAST */
+	pr_debug("%s: return 0x%08x to cmd 0x%08x\n",
+		 __func__, ret, command_id);
+	/* TODO: fill data fields of pDci->nwdRsp */
+	dci->nwd_rsp.return_code = ret;
+	dci->nwd_rsp.id = RSP_ID(command_id);
+
+	/* Acknowledge command */
+	dci->cmd_nwd.id = CMD_TUI_SW_NONE;
+
+	/* Notify SWd */
+	pr_debug("DCI RSP NOTIFY CORE\n");
+	ret = mc_notify(&dr_session_handle);
+	if (ret != MC_DRV_OK)
+		pr_debug("ERROR %s:%d Notify failed: %d\n", __func__, __LINE__,
+			 ret);
+}
+
+/* ------------------------------------------------------------- */
+static void tlc_close_driver(void)
+{
+	enum mc_result ret;
+
+	/* Close session with the Driver */
+	ret = mc_close_session(&dr_session_handle);
+	if (ret != MC_DRV_OK) {
+		pr_debug("ERROR %s:%d Closing driver session failed: %d\n",
+			 __func__, __LINE__, ret);
+	}
+}
+
+/* ------------------------------------------------------------- */
+static void tlc_close(void)
+{
+	enum mc_result ret;
+
+	pr_debug("%s: Closing driver session\n", __func__);
+	tlc_close_driver();
+
+	pr_debug("%s: Closing tbase\n", __func__);
+	/* Close the tbase device */
+	ret = mc_close_device(DEVICE_ID);
+	if (ret != MC_DRV_OK) {
+		pr_debug("ERROR %s:%d Closing tbase device failed: %d\n",
+			 __func__, __LINE__, ret);
+	}
+}
+
+void reset_global_command_id(void)
+{
+	g_user_cmd.id = TLC_TUI_CMD_NONE;
+}
+
+/* ------------------------------------------------------------- */
+
+bool tlc_notify_event(u32 event_type)
+{
+	bool ret = false;
+	enum mc_result result;
+
+	if (!dci) {
+		pr_warn("%s: DCI has not been set up properly - exiting\n",
+			__func__);
+		return false;
+	}
+
+	/* Prepare notification message in DCI */
+	pr_debug("%s: event_type = %d\n", __func__, event_type);
+	dci->nwd_notif = event_type;
+
+	/* Signal the Driver */
+	pr_debug("DCI EVENT NOTIFY CORE\n");
+	result = mc_notify(&dr_session_handle);
+	if (result != MC_DRV_OK) {
+		pr_err("%s: mc_notify failed: %d\n", __func__, result);
+		ret = false;
+	} else {
+		ret = true;
+	}
+
+	return ret;
+}
+
+/* ------------------------------------------------------------- */
+/**
+ */
+static int main_thread(void *uarg)
+{
+	pr_debug("%s: TlcTui start!\n", __func__);
+
+	/* Open session on the driver */
+	if (!tlc_open()) {
+		pr_err("%s: open driver failed!\n", __func__);
+		return 1;
+	}
+
+	/* TlcTui main thread loop */
+	for (;;) {
+		/* Wait for a command from the DrTui on DCI */
+		tlc_wait_cmd_from_driver();
+		/* Something has been received, process it. */
+		tlc_process_cmd();
+	}
+
+	/*
+	 * Close tlc. Note that this frees the DCI pointer.
+	 * Do not use this pointer after tlc_close().
+	 */
+	tlc_close();
+
+	return 0;
+}
+
+static int start_thread_if_needed(void)
+{
+	int rc = 0;
+
+	/*
+	 * Create the TlcTui Main thread and start secure driver (only 1st time)
+	 */
+	mutex_lock(&thread_mutex);
+	if (thread_id)
+		/* Already started */
+		goto end;
+
+	thread_id = kthread_run(main_thread, NULL, "tee_tui");
+	if (IS_ERR_OR_NULL(thread_id)) {
+		rc = PTR_ERR(thread_id);
+		pr_err("Unable to start Trusted UI main thread: %d\n", rc);
+		thread_id = NULL;
+	}
+
+end:
+	mutex_unlock(&thread_mutex);
+	return rc;
+}
+
+int tlc_wait_cmd(struct tlc_tui_command_t *cmd_id)
+{
+	int ret = start_thread_if_needed();
+
+	if (ret)
+		return ret;
+
+	/* Wait for signal from DCI handler */
+	/* In case of an interrupted sys call, return with -EINTR */
+	if (wait_for_completion_interruptible(&dci_comp)) {
+		pr_debug("interrupted by system\n");
+		return -ERESTARTSYS;
+	}
+	reinit_completion(&dci_comp);
+
+	*cmd_id = g_user_cmd;
+	return 0;
+}
+
+int tlc_init_driver(void)
+{
+	return start_thread_if_needed();
+}
+
+int tlc_ack_cmd(struct tlc_tui_response_t *rsp)
+{
+	g_user_rsp = *rsp;
+
+	if (g_user_rsp.id == TLC_TUI_CMD_ALLOC_FB)
+		hal_tui_post_start(&g_user_rsp);
+
+	/* Send signal to DCI */
+	complete(&io_comp);
+
+	return 0;
+}
+
+/** @} */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/tlcTui.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/tlcTui.h
new file mode 100644
index 0000000..e426fd8
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/tlcTui.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef TLCTUI_H_
+#define TLCTUI_H_
+
+#include "tui_ioctl.h"
+#define TUI_MOD_TAG "t-base-tui "
+
+#define ION_PHYS_WORKING_BUFFER_IDX (0)
+#define ION_PHYS_FRAME_BUFFER_IDX   (1)
+
+void reset_global_command_id(void);
+int tlc_wait_cmd(struct tlc_tui_command_t *cmd);
+int tlc_ack_cmd(struct tlc_tui_response_t *rsp_id);
+bool tlc_notify_event(u32 event_type);
+int tlc_init_driver(void);
+u32 send_cmd_to_user(u32 command_id, u32 data0, u32 data1);
+struct mc_session_handle *get_session_handle(void);
+
+extern atomic_t fileopened;
+extern struct tui_dci_msg_t *dci;
+extern struct tlc_tui_response_t g_user_rsp;
+extern u64 g_ion_phys[MAX_BUFFER_NUMBER];
+extern u32 g_ion_size[MAX_BUFFER_NUMBER];
+#endif /* TLCTUI_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/trustedui.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/trustedui.c
new file mode 100644
index 0000000..2a1ad00
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/trustedui.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/t-base-tui.h>
+
+static int trustedui_mode = TRUSTEDUI_MODE_OFF;
+static int trustedui_blank_counter;
+
+static DEFINE_SPINLOCK(trustedui_lock);
+
+int trustedui_blank_inc(void)
+{
+	unsigned long flags;
+	int newvalue;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	newvalue = ++trustedui_blank_counter;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+
+	return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_inc);
+
+int trustedui_blank_dec(void)
+{
+	unsigned long flags;
+	int newvalue;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	newvalue = --trustedui_blank_counter;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+
+	return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_dec);
+
+int trustedui_blank_get_counter(void)
+{
+	unsigned long flags;
+	int newvalue;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	newvalue = trustedui_blank_counter;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+
+	return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_get_counter);
+
+void trustedui_blank_set_counter(int counter)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	trustedui_blank_counter = counter;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+}
+EXPORT_SYMBOL(trustedui_blank_set_counter);
+
+int trustedui_get_current_mode(void)
+{
+	unsigned long flags;
+	int mode;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	mode = trustedui_mode;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+
+	return mode;
+}
+EXPORT_SYMBOL(trustedui_get_current_mode);
+
+void trustedui_set_mode(int mode)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	trustedui_mode = mode;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+}
+EXPORT_SYMBOL(trustedui_set_mode);
+
+int trustedui_set_mask(int mask)
+{
+	unsigned long flags;
+	int mode;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	mode = trustedui_mode |= mask;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+
+	return mode;
+}
+EXPORT_SYMBOL(trustedui_set_mask);
+
+int trustedui_clear_mask(int mask)
+{
+	unsigned long flags;
+	int mode;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	mode = trustedui_mode &= ~mask;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+
+	return mode;
+}
+EXPORT_SYMBOL(trustedui_clear_mask);
+
+MODULE_AUTHOR("Trustonic Limited");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Kinibi TUI");
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/tui-hal.c b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/tui-hal.c
new file mode 100644
index 0000000..5d067ef
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/tui-hal.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fb.h>
+
+#ifndef CONFIG_TRUSTONIC_TRUSTED_UI
+#define CONFIG_TRUSTONIC_TRUSTED_UI
+#endif
+#include <t-base-tui.h>
+
+#include "tui_ioctl.h"
+#include "dciTui.h"
+#include "tlcTui.h"
+#include "tui-hal.h"
+
+#define TUI_MEMPOOL_SIZE 0
+
+struct tui_mempool {
+	void *va;
+	unsigned long pa;
+	size_t size;
+};
+
+static struct tui_mempool g_tui_mem_pool;
+
+/* basic implementation of a memory pool for TUI framebuffer.  This
+ * implementation is using kmalloc, for the purpose of demonstration only.
+ * A real implementation might prefer using more advanced allocator, like ION,
+ * in order not to exhaust memory available to kmalloc
+ */
+static bool allocate_tui_memory_pool(struct tui_mempool *pool, size_t size)
+{
+	bool ret = false;
+	void *tui_mem_pool = NULL;
+
+	pr_info("%s %s:%d\n", __func__, __FILE__, __LINE__);
+	if (!size) {
+		pr_debug("TUI frame buffer: nothing to allocate.");
+		return true;
+	}
+
+	tui_mem_pool = kmalloc(size, GFP_KERNEL);
+	if (!tui_mem_pool) {
+		return ret;
+	} else if (ksize(tui_mem_pool) < size) {
+		pr_err("TUI mem pool size too small: req'd=%zu alloc'd=%zu",
+		       size, ksize(tui_mem_pool));
+		kfree(tui_mem_pool);
+	} else {
+		pool->va = tui_mem_pool;
+		pool->pa = virt_to_phys(tui_mem_pool);
+		pool->size = ksize(tui_mem_pool);
+		ret = true;
+	}
+	return ret;
+}
+
+static void free_tui_memory_pool(struct tui_mempool *pool)
+{
+	kfree(pool->va);
+	memset(pool, 0, sizeof(*pool));
+}
+
+/**
+ * hal_tui_init() - integrator specific initialization for kernel module
+ *
+ * This function is called when the kernel module is initialized, either at
+ * boot time, if the module is built statically in the kernel, or when the
+ * kernel is dynamically loaded if the module is built as a dynamic kernel
+ * module. This function may be used by the integrator, for instance, to get a
+ * memory pool that will be used to allocate the secure framebuffer and work
+ * buffer for TUI sessions.
+ *
+ * Return: must return 0 on success, or non-zero on error. If the function
+ * returns an error, the module initialization will fail.
+ */
+u32 hal_tui_init(void)
+{
+	/* Allocate memory pool for the framebuffer
+	 */
+	if (!allocate_tui_memory_pool(&g_tui_mem_pool, TUI_MEMPOOL_SIZE))
+		return TUI_DCI_ERR_INTERNAL_ERROR;
+
+	return TUI_DCI_OK;
+}
+
+/**
+ * hal_tui_exit() - integrator specific exit code for kernel module
+ *
+ * This function is called when the kernel module exit. It is called when the
+ * kernel module is unloaded, for a dynamic kernel module, and never called for
+ * a module built into the kernel. It can be used to free any resources
+ * allocated by hal_tui_init().
+ */
+void hal_tui_exit(void)
+{
+	/* delete memory pool if any */
+	if (g_tui_mem_pool.va)
+		free_tui_memory_pool(&g_tui_mem_pool);
+}
+
+/**
+ * hal_tui_alloc() - allocator for secure framebuffer and working buffer
+ * @allocbuffer:    input parameter that the allocator fills with the physical
+ *                  addresses of the allocated buffers
+ * @allocsize:      size of the buffer to allocate.  All the buffer are of the
+ *                  same size
+ * @number:         Number to allocate.
+ *
+ * This function is called when the module receives a CMD_TUI_SW_OPEN_SESSION
+ * message from the secure driver.  The function must allocate 'number'
+ * buffer(s) of physically contiguous memory, where the length of each buffer
+ * is at least 'allocsize' bytes.  The physical address of each buffer must be
+ * stored in the array of structure 'allocbuffer' which is provided as
+ * arguments.
+ *
+ * Physical address of the first buffer must be put in allocate[0].pa , the
+ * second one on allocbuffer[1].pa, and so on.  The function must return 0 on
+ * success, non-zero on error.  For integrations where the framebuffer is not
+ * allocated by the Normal World, this function should do nothing and return
+ * success (zero).
+ * If the working buffer allocation is different from framebuffers, ensure that
+ * the physical address of the working buffer is at index 0 of the allocbuffer
+ * table (allocbuffer[0].pa).
+ */
+u32 hal_tui_alloc(
+	struct tui_alloc_buffer_t allocbuffer[MAX_DCI_BUFFER_NUMBER],
+	size_t allocsize, u32 number)
+{
+	u32 ret = TUI_DCI_ERR_INTERNAL_ERROR;
+
+	if (!allocbuffer) {
+		pr_debug("%s(%d): allocbuffer is null\n", __func__, __LINE__);
+		return TUI_DCI_ERR_INTERNAL_ERROR;
+	}
+
+	pr_debug("%s(%d): Requested size=0x%zx x %u chunks\n",
+		 __func__, __LINE__, allocsize, number);
+
+	if ((size_t)allocsize == 0) {
+		pr_debug("%s(%d): Nothing to allocate\n", __func__, __LINE__);
+		return TUI_DCI_OK;
+	}
+
+	if (number != 2) {
+		pr_debug("%s(%d): Unexpected number of buffers requested\n",
+			 __func__, __LINE__);
+		return TUI_DCI_ERR_INTERNAL_ERROR;
+	}
+
+	if ((size_t)(allocsize * number) <= g_tui_mem_pool.size) {
+		/* requested buffer fits in the memory pool */
+		allocbuffer[0].pa = (u64)g_tui_mem_pool.pa;
+		allocbuffer[1].pa = (u64)(g_tui_mem_pool.pa +
+					       g_tui_mem_pool.size / 2);
+		pr_debug("%s(%d): allocated at %llx\n", __func__, __LINE__,
+			 allocbuffer[0].pa);
+		pr_debug("%s(%d): allocated at %llx\n", __func__, __LINE__,
+			 allocbuffer[1].pa);
+		ret = TUI_DCI_OK;
+	} else {
+		/*
+		 * requested buffer is bigger than the memory pool, return an
+		 * error
+		 */
+		pr_debug("%s(%d): Memory pool too small\n", __func__, __LINE__);
+		ret = TUI_DCI_ERR_INTERNAL_ERROR;
+	}
+
+	return ret;
+}
+
+/**
+ * hal_tui_free() - free memory allocated by hal_tui_alloc()
+ *
+ * This function is called at the end of the TUI session, when the TUI module
+ * receives the CMD_TUI_SW_CLOSE_SESSION message. The function should free the
+ * buffers allocated by hal_tui_alloc(...).
+ */
+void hal_tui_free(void)
+{
+}
+
+/**
+ * hal_tui_deactivate() - deactivate Normal World display and input
+ *
+ * This function should stop the Normal World display and, if necessary, Normal
+ * World input. It is called when a TUI session is opening, before the Secure
+ * World takes control of display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+u32 hal_tui_deactivate(void)
+{
+	/* Set linux TUI flag */
+	trustedui_set_mask(TRUSTEDUI_MODE_TUI_SESSION);
+	/*
+	 * Stop NWd display here.  After this function returns, SWd will take
+	 * control of the display and input.  Therefore the NWd should no longer
+	 * access it
+	 * This can be done by calling the fb_blank(FB_BLANK_POWERDOWN) function
+	 * on the appropriate framebuffer device
+	 */
+	trustedui_set_mask(TRUSTEDUI_MODE_VIDEO_SECURED |
+			   TRUSTEDUI_MODE_INPUT_SECURED);
+
+	return TUI_DCI_OK;
+}
+
+/**
+ * hal_tui_activate() - restore Normal World display and input after a TUI
+ * session
+ *
+ * This function should enable Normal World display and, if necessary, Normal
+ * World input. It is called after a TUI session, after the Secure World has
+ * released the display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+u32 hal_tui_activate(void)
+{
+	/* Protect NWd */
+	trustedui_clear_mask(TRUSTEDUI_MODE_VIDEO_SECURED |
+			     TRUSTEDUI_MODE_INPUT_SECURED);
+	/*
+	 * Restart NWd display here.  TUI session has ended, and therefore the
+	 * SWd will no longer use display and input.
+	 * This can be done by calling the fb_blank(FB_BLANK_UNBLANK) function
+	 * on the appropriate framebuffer device
+	 */
+	/* Clear linux TUI flag */
+	trustedui_set_mode(TRUSTEDUI_MODE_OFF);
+	return TUI_DCI_OK;
+}
+
+/* Do nothing it's only use for QC */
+u32 hal_tui_process_cmd(struct tui_hal_cmd_t *cmd, struct tui_hal_rsp_t *rsp)
+{
+	return TUI_DCI_OK;
+}
+
+/* Do nothing it's only use for QC */
+u32 hal_tui_notif(void)
+{
+	return TUI_DCI_OK;
+}
+
+/* Do nothing it's only use for QC */
+void hal_tui_post_start(struct tlc_tui_response_t *rsp)
+{
+	pr_info("%s(%d)\n", __func__, __LINE__);
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/tui-hal.h b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/tui-hal.h
new file mode 100644
index 0000000..c6e7aa4
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/410/TlcTui/tui-hal.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 TRUSTONIC LIMITED
+ */
+
+#ifndef _TUI_HAL_H_
+#define _TUI_HAL_H_
+
+#include <linux/types.h>
+#include "tui_ioctl.h"
+
+/**
+ * hal_tui_init() - integrator specific initialization for kernel module
+ *
+ * This function is called when the kernel module is initialized, either at
+ * boot time, if the module is built statically in the kernel, or when the
+ * kernel is dynamically loaded if the module is built as a dynamic kernel
+ * module. This function may be used by the integrator, for instance, to get a
+ * memory pool that will be used to allocate the secure framebuffer and work
+ * buffer for TUI sessions.
+ *
+ * Return: must return 0 on success, or non-zero on error. If the function
+ * returns an error, the module initialization will fail.
+ */
+uint32_t hal_tui_init(void);
+
+/**
+ * hal_tui_exit() - integrator specific exit code for kernel module
+ *
+ * This function is called when the kernel module exit. It is called when the
+ * kernel module is unloaded, for a dynamic kernel module, and never called for
+ * a module built into the kernel. It can be used to free any resources
+ * allocated by hal_tui_init().
+ */
+void hal_tui_exit(void);
+
+/**
+ * hal_tui_alloc() - allocator for secure framebuffer and working buffer
+ * @allocbuffer:    input parameter that the allocator fills with the physical
+ *                  addresses of the allocated buffers
+ * @allocsize:      size of the buffer to allocate.  All the buffer are of the
+ *                  same size
+ * @number:         Number to allocate.
+ *
+ * This function is called when the module receives a CMD_TUI_SW_OPEN_SESSION
+ * message from the secure driver.  The function must allocate 'number'
+ * buffer(s) of physically contiguous memory, where the length of each buffer
+ * is at least 'allocsize' bytes.  The physical address of each buffer must be
+ * stored in the array of structure 'allocbuffer' which is provided as
+ * arguments.
+ *
+ * Physical address of the first buffer must be put in allocate[0].pa , the
+ * second one on allocbuffer[1].pa, and so on.  The function must return 0 on
+ * success, non-zero on error.  For integrations where the framebuffer is not
+ * allocated by the Normal World, this function should do nothing and return
+ * success (zero).
+ * If the working buffer allocation is different from framebuffers, ensure that
+ * the physical address of the working buffer is at index 0 of the allocbuffer
+ * table (allocbuffer[0].pa).
+ */
+uint32_t hal_tui_alloc(
+	struct tui_alloc_buffer_t allocbuffer[MAX_DCI_BUFFER_NUMBER],
+	size_t allocsize, uint32_t number);
+
+/**
+ * hal_tui_free() - free memory allocated by hal_tui_alloc()
+ *
+ * This function is called at the end of the TUI session, when the TUI module
+ * receives the CMD_TUI_SW_CLOSE_SESSION message. The function should free the
+ * buffers allocated by hal_tui_alloc(...).
+ */
+void hal_tui_free(void);
+
+void hal_tui_post_start(struct tlc_tui_response_t *rsp);
+
+/**
+ * hal_tui_deactivate() - deactivate Normal World display and input
+ *
+ * This function should stop the Normal World display and, if necessary, Normal
+ * World input. It is called when a TUI session is opening, before the Secure
+ * World takes control of display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+uint32_t hal_tui_deactivate(void);
+
+/**
+ * hal_tui_activate() - restore Normal World display and input after a TUI
+ * session
+ *
+ * This function should enable Normal World display and, if necessary, Normal
+ * World input. It is called after a TUI session, after the Secure World has
+ * released the display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+uint32_t hal_tui_activate(void);
+uint32_t hal_tui_process_cmd(struct tui_hal_cmd_t *cmd,
+			     struct tui_hal_rsp_t *rsp);
+uint32_t hal_tui_notif(void);
+
+/**
+ * hal_tui_process_cmd() - integrator specific exit code for kernel module
+ *
+ * This function is called when kernel module receives a command from the
+ * secure driver HAL, ie when drTuiCoreDciSendAndWait() is called.
+ */
+uint32_t hal_tui_process_cmd(struct tui_hal_cmd_t *cmd,
+			     struct tui_hal_rsp_t *rsp);
+
+/**
+ * hal_tui_notif() - integrator specific exit code for kernel module
+ *
+ * This function is called when kernel module receives an answer from the
+ * secure driver HAL (the hal_rsp field of the world shared memory struct is
+ * not null).
+ * This should be the way to get an answer from the secure driver after a
+ * command has been sent to it (the hal_cmd field of the world shared memory
+ * struct has been set and a notification has been raised).
+ */
+uint32_t hal_tui_notif(void);
+
+/**
+ * hal_tui_process_cmd() - integrator specific exit code for kernel module
+ *
+ * This function is called when kernel module receives a command from the
+ * secure driver HAL, ie when drTuiCoreDciSendAndWait() is called.
+ */
+uint32_t hal_tui_process_cmd(struct tui_hal_cmd_t *cmd,
+			     struct tui_hal_rsp_t *rsp);
+
+/**
+ * hal_tui_notif() - integrator specific exit code for kernel module
+ *
+ * This function is called when kernel module receives an answer from the
+ * secure driver HAL (the hal_rsp field of the world shared memory struct is
+ * not null).
+ * This should be the way to get an answer from the secure driver after a
+ * command has been sent to it (the hal_cmd field of the world shared memory
+ * struct has been set and a notification has been raised).
+ */
+uint32_t hal_tui_notif(void);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/Kconfig b/src/kernel/linux/v4.19/drivers/tee/gud/Kconfig
new file mode 100644
index 0000000..350c03f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/Kconfig
@@ -0,0 +1,35 @@
+#
+# TRUSTONIC TEE configuration
+#
+config TRUSTONIC_TEE_SUPPORT
+	tristate "Enable Trustonic TEE Support"
+	help
+	  Enable Trustonic TEE Support.
+	  This option enables MobiCore Driver which is used to communicate with
+	  TEE OS, for things like, setting up log buffers, interrupt pins,
+	  and IPC channels.
+
+config TRUSTONIC_TEE_VERSION
+	string "TRUSTONIC TEE Version"
+	depends on TRUSTONIC_TEE_SUPPORT
+	default "410"
+
+config TRUSTONIC_TEE_DEBUG
+	bool "Trustonic TEE driver debug mode"
+	depends on TRUSTONIC_TEE_SUPPORT
+	default n
+	help
+	  Enable the debug mode in the Trustonic TEE Driver.
+	  Unlike MOBICORE_DEBUG and MOBICORE_VERBOSE used in TBASE 302 series,
+	  TBASE 311 series use one option to turn on necessary debug logs.
+	  Enable this option for debugging and see more detailed driver flow.
+
+config TRUSTONIC_TRUSTED_UI
+	tristate "<t-base TUI"
+	depends on TRUSTONIC_TEE_SUPPORT
+	default n
+	help
+	  Enable <t-base Trusted User Interface
+	  Trustonic TUI features was based on Trustonic TEE mechanism to protect
+	  Touch/Display inside TEE, to prevent malicious access of sensitive
+	  data used by APPs like Banking.
diff --git a/src/kernel/linux/v4.19/drivers/tee/gud/Makefile b/src/kernel/linux/v4.19/drivers/tee/gud/Makefile
new file mode 100644
index 0000000..33dd1e7
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/gud/Makefile
@@ -0,0 +1,3 @@
+ifneq ($(CONFIG_TRUSTONIC_TEE_SUPPORT),)
+	obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) += $(subst ",,$(CONFIG_TRUSTONIC_TEE_VERSION))/
+endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/optee/Kconfig b/src/kernel/linux/v4.19/drivers/tee/optee/Kconfig
new file mode 100644
index 0000000..3c59e19
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/optee/Kconfig
@@ -0,0 +1,15 @@
+# OP-TEE Trusted Execution Environment Configuration
+config OPTEE
+	tristate "OP-TEE"
+	depends on HAVE_ARM_SMCCC
+	help
+	  This implements the OP-TEE Trusted Execution Environment (TEE)
+	  driver.
+
+config OPTEE_SHM_NUM_PRIV_PAGES
+	int "Private Shared Memory Pages"
+	default 1
+	depends on OPTEE
+	help
+	  This sets the number of private shared memory pages to be
+	  used by OP-TEE TEE driver.
diff --git a/src/kernel/linux/v4.19/drivers/tee/optee/Makefile b/src/kernel/linux/v4.19/drivers/tee/optee/Makefile
new file mode 100644
index 0000000..48d262a
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/optee/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_OPTEE) += optee.o
+optee-objs += core.o
+optee-objs += call.o
+optee-objs += rpc.o
+optee-objs += supp.o
+optee-objs += shm_pool.o
diff --git a/src/kernel/linux/v4.19/drivers/tee/optee/call.c b/src/kernel/linux/v4.19/drivers/tee/optee/call.c
new file mode 100644
index 0000000..a5afbe6
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/optee/call.c
@@ -0,0 +1,662 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/arm-smccc.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+
+struct optee_call_waiter {
+	struct list_head list_node;
+	struct completion c;
+};
+
+static void optee_cq_wait_init(struct optee_call_queue *cq,
+			       struct optee_call_waiter *w)
+{
+	/*
+	 * We're preparing to make a call to secure world. In case we can't
+	 * allocate a thread in secure world we'll end up waiting in
+	 * optee_cq_wait_for_completion().
+	 *
+	 * Normally if there's no contention in secure world the call will
+	 * complete and we can cleanup directly with optee_cq_wait_final().
+	 */
+	mutex_lock(&cq->mutex);
+
+	/*
+	 * We add ourselves to the queue, but we don't wait. This
+	 * guarantees that we don't lose a completion if secure world
+	 * returns busy and another thread just exited and try to complete
+	 * someone.
+	 */
+	init_completion(&w->c);
+	list_add_tail(&w->list_node, &cq->waiters);
+
+	mutex_unlock(&cq->mutex);
+}
+
+static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
+					 struct optee_call_waiter *w)
+{
+	wait_for_completion(&w->c);
+
+	mutex_lock(&cq->mutex);
+
+	/* Move to end of list to get out of the way for other waiters */
+	list_del(&w->list_node);
+	reinit_completion(&w->c);
+	list_add_tail(&w->list_node, &cq->waiters);
+
+	mutex_unlock(&cq->mutex);
+}
+
+static void optee_cq_complete_one(struct optee_call_queue *cq)
+{
+	struct optee_call_waiter *w;
+
+	list_for_each_entry(w, &cq->waiters, list_node) {
+		if (!completion_done(&w->c)) {
+			complete(&w->c);
+			break;
+		}
+	}
+}
+
+static void optee_cq_wait_final(struct optee_call_queue *cq,
+				struct optee_call_waiter *w)
+{
+	/*
+	 * We're done with the call to secure world. The thread in secure
+	 * world that was used for this call is now available for some
+	 * other task to use.
+	 */
+	mutex_lock(&cq->mutex);
+
+	/* Get out of the list */
+	list_del(&w->list_node);
+
+	/* Wake up one eventual waiting task */
+	optee_cq_complete_one(cq);
+
+	/*
+	 * If we're completed we've got a completion from another task that
+	 * was just done with its call to secure world. Since yet another
+	 * thread now is available in secure world wake up another eventual
+	 * waiting task.
+	 */
+	if (completion_done(&w->c))
+		optee_cq_complete_one(cq);
+
+	mutex_unlock(&cq->mutex);
+}
+
+/* Requires the filpstate mutex to be held */
+static struct optee_session *find_session(struct optee_context_data *ctxdata,
+					  u32 session_id)
+{
+	struct optee_session *sess;
+
+	list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+		if (sess->session_id == session_id)
+			return sess;
+
+	return NULL;
+}
+
+/**
+ * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
+ * @ctx:	calling context
+ * @parg:	physical address of message to pass to secure world
+ *
+ * Does and SMC to OP-TEE in secure world and handles eventual resulting
+ * Remote Procedure Calls (RPC) from OP-TEE.
+ *
+ * Returns return code from secure world, 0 is OK
+ */
+u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
+{
+	struct optee *optee = tee_get_drvdata(ctx->teedev);
+	struct optee_call_waiter w;
+	struct optee_rpc_param param = { };
+	struct optee_call_ctx call_ctx = { };
+	u32 ret;
+
+	param.a0 = OPTEE_SMC_CALL_WITH_ARG;
+	reg_pair_from_64(&param.a1, &param.a2, parg);
+	/* Initialize waiter */
+	optee_cq_wait_init(&optee->call_queue, &w);
+	while (true) {
+		struct arm_smccc_res res;
+
+		optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
+				 param.a4, param.a5, param.a6, param.a7,
+				 &res);
+
+		if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
+			/*
+			 * Out of threads in secure world, wait for a thread
+			 * become available.
+			 */
+			optee_cq_wait_for_completion(&optee->call_queue, &w);
+		} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
+			param.a0 = res.a0;
+			param.a1 = res.a1;
+			param.a2 = res.a2;
+			param.a3 = res.a3;
+			optee_handle_rpc(ctx, &param, &call_ctx);
+		} else {
+			ret = res.a0;
+			break;
+		}
+	}
+
+	optee_rpc_finalize_call(&call_ctx);
+	/*
+	 * We're done with our thread in secure world, if there's any
+	 * thread waiters wake up one.
+	 */
+	optee_cq_wait_final(&optee->call_queue, &w);
+
+	return ret;
+}
+
+static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
+				   struct optee_msg_arg **msg_arg,
+				   phys_addr_t *msg_parg)
+{
+	int rc;
+	struct tee_shm *shm;
+	struct optee_msg_arg *ma;
+
+	shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
+			    TEE_SHM_MAPPED);
+	if (IS_ERR(shm))
+		return shm;
+
+	ma = tee_shm_get_va(shm, 0);
+	if (IS_ERR(ma)) {
+		rc = PTR_ERR(ma);
+		goto out;
+	}
+
+	rc = tee_shm_get_pa(shm, 0, msg_parg);
+	if (rc)
+		goto out;
+
+	memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
+	ma->num_params = num_params;
+	*msg_arg = ma;
+out:
+	if (rc) {
+		tee_shm_free(shm);
+		return ERR_PTR(rc);
+	}
+
+	return shm;
+}
+
+int optee_open_session(struct tee_context *ctx,
+		       struct tee_ioctl_open_session_arg *arg,
+		       struct tee_param *param)
+{
+	struct optee_context_data *ctxdata = ctx->data;
+	int rc;
+	struct tee_shm *shm;
+	struct optee_msg_arg *msg_arg;
+	phys_addr_t msg_parg;
+	struct optee_session *sess = NULL;
+
+	/* +2 for the meta parameters added below */
+	shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
+	if (IS_ERR(shm))
+		return PTR_ERR(shm);
+
+	msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
+	msg_arg->cancel_id = arg->cancel_id;
+
+	/*
+	 * Initialize and add the meta parameters needed when opening a
+	 * session.
+	 */
+	msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
+				  OPTEE_MSG_ATTR_META;
+	msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
+				  OPTEE_MSG_ATTR_META;
+	memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
+	memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid));
+	msg_arg->params[1].u.value.c = arg->clnt_login;
+
+	rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
+	if (rc)
+		goto out;
+
+	sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+	if (!sess) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	if (optee_do_call_with_arg(ctx, msg_parg)) {
+		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+	}
+
+	if (msg_arg->ret == TEEC_SUCCESS) {
+		/* A new session has been created, add it to the list. */
+		sess->session_id = msg_arg->session;
+		mutex_lock(&ctxdata->mutex);
+		list_add(&sess->list_node, &ctxdata->sess_list);
+		mutex_unlock(&ctxdata->mutex);
+	} else {
+		kfree(sess);
+	}
+
+	if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
+		arg->ret = TEEC_ERROR_COMMUNICATION;
+		arg->ret_origin = TEEC_ORIGIN_COMMS;
+		/* Close session again to avoid leakage */
+		optee_close_session(ctx, msg_arg->session);
+	} else {
+		arg->session = msg_arg->session;
+		arg->ret = msg_arg->ret;
+		arg->ret_origin = msg_arg->ret_origin;
+	}
+out:
+	tee_shm_free(shm);
+
+	return rc;
+}
+
+int optee_close_session(struct tee_context *ctx, u32 session)
+{
+	struct optee_context_data *ctxdata = ctx->data;
+	struct tee_shm *shm;
+	struct optee_msg_arg *msg_arg;
+	phys_addr_t msg_parg;
+	struct optee_session *sess;
+
+	/* Check that the session is valid and remove it from the list */
+	mutex_lock(&ctxdata->mutex);
+	sess = find_session(ctxdata, session);
+	if (sess)
+		list_del(&sess->list_node);
+	mutex_unlock(&ctxdata->mutex);
+	if (!sess)
+		return -EINVAL;
+	kfree(sess);
+
+	shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
+	if (IS_ERR(shm))
+		return PTR_ERR(shm);
+
+	msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
+	msg_arg->session = session;
+	optee_do_call_with_arg(ctx, msg_parg);
+
+	tee_shm_free(shm);
+	return 0;
+}
+
+int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
+		      struct tee_param *param)
+{
+	struct optee_context_data *ctxdata = ctx->data;
+	struct tee_shm *shm;
+	struct optee_msg_arg *msg_arg;
+	phys_addr_t msg_parg;
+	struct optee_session *sess;
+	int rc;
+
+	/* Check that the session is valid */
+	mutex_lock(&ctxdata->mutex);
+	sess = find_session(ctxdata, arg->session);
+	mutex_unlock(&ctxdata->mutex);
+	if (!sess)
+		return -EINVAL;
+
+	shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
+	if (IS_ERR(shm))
+		return PTR_ERR(shm);
+	msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
+	msg_arg->func = arg->func;
+	msg_arg->session = arg->session;
+	msg_arg->cancel_id = arg->cancel_id;
+
+	rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
+	if (rc)
+		goto out;
+
+	if (optee_do_call_with_arg(ctx, msg_parg)) {
+		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+	}
+
+	if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
+		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+	}
+
+	arg->ret = msg_arg->ret;
+	arg->ret_origin = msg_arg->ret_origin;
+out:
+	tee_shm_free(shm);
+	return rc;
+}
+
+int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
+{
+	struct optee_context_data *ctxdata = ctx->data;
+	struct tee_shm *shm;
+	struct optee_msg_arg *msg_arg;
+	phys_addr_t msg_parg;
+	struct optee_session *sess;
+
+	/* Check that the session is valid */
+	mutex_lock(&ctxdata->mutex);
+	sess = find_session(ctxdata, session);
+	mutex_unlock(&ctxdata->mutex);
+	if (!sess)
+		return -EINVAL;
+
+	shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
+	if (IS_ERR(shm))
+		return PTR_ERR(shm);
+
+	msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
+	msg_arg->session = session;
+	msg_arg->cancel_id = cancel_id;
+	optee_do_call_with_arg(ctx, msg_parg);
+
+	tee_shm_free(shm);
+	return 0;
+}
+
+/**
+ * optee_enable_shm_cache() - Enables caching of some shared memory allocation
+ *			      in OP-TEE
+ * @optee:	main service struct
+ */
+void optee_enable_shm_cache(struct optee *optee)
+{
+	struct optee_call_waiter w;
+
+	/* We need to retry until secure world isn't busy. */
+	optee_cq_wait_init(&optee->call_queue, &w);
+	while (true) {
+		struct arm_smccc_res res;
+
+		optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
+				 0, &res);
+		if (res.a0 == OPTEE_SMC_RETURN_OK)
+			break;
+		optee_cq_wait_for_completion(&optee->call_queue, &w);
+	}
+	optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+/**
+ * optee_disable_shm_cache() - Disables caching of some shared memory allocation
+ *			      in OP-TEE
+ * @optee:	main service struct
+ */
+void optee_disable_shm_cache(struct optee *optee)
+{
+	struct optee_call_waiter w;
+
+	/* We need to retry until secure world isn't busy. */
+	optee_cq_wait_init(&optee->call_queue, &w);
+	while (true) {
+		union {
+			struct arm_smccc_res smccc;
+			struct optee_smc_disable_shm_cache_result result;
+		} res;
+
+		optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
+				 0, &res.smccc);
+		if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
+			break; /* All shm's freed */
+		if (res.result.status == OPTEE_SMC_RETURN_OK) {
+			struct tee_shm *shm;
+
+			shm = reg_pair_to_ptr(res.result.shm_upper32,
+					      res.result.shm_lower32);
+			tee_shm_free(shm);
+		} else {
+			optee_cq_wait_for_completion(&optee->call_queue, &w);
+		}
+	}
+	optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+#define PAGELIST_ENTRIES_PER_PAGE				\
+	((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
+
+/**
+ * optee_fill_pages_list() - write list of user pages to given shared
+ * buffer.
+ *
+ * @dst: page-aligned buffer where list of pages will be stored
+ * @pages: array of pages that represents shared buffer
+ * @num_pages: number of entries in @pages
+ * @page_offset: offset of user buffer from page start
+ *
+ * @dst should be big enough to hold list of user page addresses and
+ *	links to the next pages of buffer
+ */
+void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
+			   size_t page_offset)
+{
+	int n = 0;
+	phys_addr_t optee_page;
+	/*
+	 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
+	 * for details.
+	 */
+	struct {
+		u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
+		u64 next_page_data;
+	} *pages_data;
+
+	/*
+	 * Currently OP-TEE uses 4k page size and it does not looks
+	 * like this will change in the future.  On other hand, there are
+	 * no know ARM architectures with page size < 4k.
+	 * Thus the next built assert looks redundant. But the following
+	 * code heavily relies on this assumption, so it is better be
+	 * safe than sorry.
+	 */
+	BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+	pages_data = (void *)dst;
+	/*
+	 * If linux page is bigger than 4k, and user buffer offset is
+	 * larger than 4k/8k/12k/etc this will skip first 4k pages,
+	 * because they bear no value data for OP-TEE.
+	 */
+	optee_page = page_to_phys(*pages) +
+		round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+	while (true) {
+		pages_data->pages_list[n++] = optee_page;
+
+		if (n == PAGELIST_ENTRIES_PER_PAGE) {
+			pages_data->next_page_data =
+				virt_to_phys(pages_data + 1);
+			pages_data++;
+			n = 0;
+		}
+
+		optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+		if (!(optee_page & ~PAGE_MASK)) {
+			if (!--num_pages)
+				break;
+			pages++;
+			optee_page = page_to_phys(*pages);
+		}
+	}
+}
+
+/*
+ * The final entry in each pagelist page is a pointer to the next
+ * pagelist page.
+ */
+static size_t get_pages_list_size(size_t num_entries)
+{
+	int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
+
+	return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+}
+
+u64 *optee_allocate_pages_list(size_t num_entries)
+{
+	return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
+}
+
+void optee_free_pages_list(void *list, size_t num_entries)
+{
+	free_pages_exact(list, get_pages_list_size(num_entries));
+}
+
+static bool is_normal_memory(pgprot_t p)
+{
+#if defined(CONFIG_ARM)
+	return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC;
+#elif defined(CONFIG_ARM64)
+	return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
+#else
+#error "Unuspported architecture"
+#endif
+}
+
+static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
+{
+	while (vma && is_normal_memory(vma->vm_page_prot)) {
+		if (vma->vm_end >= end)
+			return 0;
+		vma = vma->vm_next;
+	}
+
+	return -EINVAL;
+}
+
+static int check_mem_type(unsigned long start, size_t num_pages)
+{
+	struct mm_struct *mm = current->mm;
+	int rc;
+
+	down_read(&mm->mmap_sem);
+	rc = __check_mem_type(find_vma(mm, start),
+			      start + num_pages * PAGE_SIZE);
+	up_read(&mm->mmap_sem);
+
+	return rc;
+}
+
+int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+		       struct page **pages, size_t num_pages,
+		       unsigned long start)
+{
+	struct tee_shm *shm_arg = NULL;
+	struct optee_msg_arg *msg_arg;
+	u64 *pages_list;
+	phys_addr_t msg_parg;
+	int rc;
+
+	if (!num_pages)
+		return -EINVAL;
+
+	rc = check_mem_type(start, num_pages);
+	if (rc)
+		return rc;
+
+	pages_list = optee_allocate_pages_list(num_pages);
+	if (!pages_list)
+		return -ENOMEM;
+
+	shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
+	if (IS_ERR(shm_arg)) {
+		rc = PTR_ERR(shm_arg);
+		goto out;
+	}
+
+	optee_fill_pages_list(pages_list, pages, num_pages,
+			      tee_shm_get_page_offset(shm));
+
+	msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
+	msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+				OPTEE_MSG_ATTR_NONCONTIG;
+	msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
+	msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
+	/*
+	 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
+	 * store buffer offset from 4k page, as described in OP-TEE ABI.
+	 */
+	msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
+	  (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+
+	if (optee_do_call_with_arg(ctx, msg_parg) ||
+	    msg_arg->ret != TEEC_SUCCESS)
+		rc = -EINVAL;
+
+	tee_shm_free(shm_arg);
+out:
+	optee_free_pages_list(pages_list, num_pages);
+	return rc;
+}
+
+int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
+{
+	struct tee_shm *shm_arg;
+	struct optee_msg_arg *msg_arg;
+	phys_addr_t msg_parg;
+	int rc = 0;
+
+	shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
+	if (IS_ERR(shm_arg))
+		return PTR_ERR(shm_arg);
+
+	msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
+
+	msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+	msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
+
+	if (optee_do_call_with_arg(ctx, msg_parg) ||
+	    msg_arg->ret != TEEC_SUCCESS)
+		rc = -EINVAL;
+	tee_shm_free(shm_arg);
+	return rc;
+}
+
+int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
+			    struct page **pages, size_t num_pages,
+			    unsigned long start)
+{
+	/*
+	 * We don't want to register supplicant memory in OP-TEE.
+	 * Instead information about it will be passed in RPC code.
+	 */
+	return check_mem_type(start, num_pages);
+}
+
+int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
+{
+	return 0;
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/optee/core.c b/src/kernel/linux/v4.19/drivers/tee/optee/core.c
new file mode 100644
index 0000000..2f254f9
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/optee/core.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+#include "shm_pool.h"
+
+#define DRIVER_NAME "optee"
+
+#define OPTEE_SHM_NUM_PRIV_PAGES	CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
+
+/**
+ * optee_from_msg_param() - convert from OPTEE_MSG parameters to
+ *			    struct tee_param
+ * @params:	subsystem internal parameter representation
+ * @num_params:	number of elements in the parameter arrays
+ * @msg_params:	OPTEE_MSG parameters
+ * Returns 0 on success or <0 on failure
+ */
+int optee_from_msg_param(struct tee_param *params, size_t num_params,
+			 const struct optee_msg_param *msg_params)
+{
+	int rc;
+	size_t n;
+	struct tee_shm *shm;
+	phys_addr_t pa;
+
+	for (n = 0; n < num_params; n++) {
+		struct tee_param *p = params + n;
+		const struct optee_msg_param *mp = msg_params + n;
+		u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
+
+		switch (attr) {
+		case OPTEE_MSG_ATTR_TYPE_NONE:
+			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+			memset(&p->u, 0, sizeof(p->u));
+			break;
+		case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+		case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+		case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
+				  attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+			p->u.value.a = mp->u.value.a;
+			p->u.value.b = mp->u.value.b;
+			p->u.value.c = mp->u.value.c;
+			break;
+		case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
+		case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+		case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+				  attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+			p->u.memref.size = mp->u.tmem.size;
+			shm = (struct tee_shm *)(unsigned long)
+				mp->u.tmem.shm_ref;
+			if (!shm) {
+				p->u.memref.shm_offs = 0;
+				p->u.memref.shm = NULL;
+				break;
+			}
+			rc = tee_shm_get_pa(shm, 0, &pa);
+			if (rc)
+				return rc;
+			p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
+			p->u.memref.shm = shm;
+
+			/* Check that the memref is covered by the shm object */
+			if (p->u.memref.size) {
+				size_t o = p->u.memref.shm_offs +
+					   p->u.memref.size - 1;
+
+				rc = tee_shm_get_pa(shm, o, NULL);
+				if (rc)
+					return rc;
+			}
+			break;
+		case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+		case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+		case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+				  attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+			p->u.memref.size = mp->u.rmem.size;
+			shm = (struct tee_shm *)(unsigned long)
+				mp->u.rmem.shm_ref;
+
+			if (!shm) {
+				p->u.memref.shm_offs = 0;
+				p->u.memref.shm = NULL;
+				break;
+			}
+			p->u.memref.shm_offs = mp->u.rmem.offs;
+			p->u.memref.shm = shm;
+
+			break;
+
+		default:
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
+				const struct tee_param *p)
+{
+	int rc;
+	phys_addr_t pa;
+
+	mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
+		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+	mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
+	mp->u.tmem.size = p->u.memref.size;
+
+	if (!p->u.memref.shm) {
+		mp->u.tmem.buf_ptr = 0;
+		return 0;
+	}
+
+	rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
+	if (rc)
+		return rc;
+
+	mp->u.tmem.buf_ptr = pa;
+	mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
+		    OPTEE_MSG_ATTR_CACHE_SHIFT;
+
+	return 0;
+}
+
+static int to_msg_param_reg_mem(struct optee_msg_param *mp,
+				const struct tee_param *p)
+{
+	mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
+		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+	mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
+	mp->u.rmem.size = p->u.memref.size;
+	mp->u.rmem.offs = p->u.memref.shm_offs;
+	return 0;
+}
+
+/**
+ * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
+ * @msg_params:	OPTEE_MSG parameters
+ * @num_params:	number of elements in the parameter arrays
+ * @params:	subsystem itnernal parameter representation
+ * Returns 0 on success or <0 on failure
+ */
+int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
+		       const struct tee_param *params)
+{
+	int rc;
+	size_t n;
+
+	for (n = 0; n < num_params; n++) {
+		const struct tee_param *p = params + n;
+		struct optee_msg_param *mp = msg_params + n;
+
+		switch (p->attr) {
+		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+			mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+			memset(&mp->u, 0, sizeof(mp->u));
+			break;
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+			mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
+				   TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+			mp->u.value.a = p->u.value.a;
+			mp->u.value.b = p->u.value.b;
+			mp->u.value.c = p->u.value.c;
+			break;
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+			if (tee_shm_is_registered(p->u.memref.shm))
+				rc = to_msg_param_reg_mem(mp, p);
+			else
+				rc = to_msg_param_tmp_mem(mp, p);
+			if (rc)
+				return rc;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static void optee_get_version(struct tee_device *teedev,
+			      struct tee_ioctl_version_data *vers)
+{
+	struct tee_ioctl_version_data v = {
+		.impl_id = TEE_IMPL_ID_OPTEE,
+		.impl_caps = TEE_OPTEE_CAP_TZ,
+		.gen_caps = TEE_GEN_CAP_GP,
+	};
+	struct optee *optee = tee_get_drvdata(teedev);
+
+	if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+		v.gen_caps |= TEE_GEN_CAP_REG_MEM;
+	*vers = v;
+}
+
+static int optee_open(struct tee_context *ctx)
+{
+	struct optee_context_data *ctxdata;
+	struct tee_device *teedev = ctx->teedev;
+	struct optee *optee = tee_get_drvdata(teedev);
+
+	ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+	if (!ctxdata)
+		return -ENOMEM;
+
+	if (teedev == optee->supp_teedev) {
+		bool busy = true;
+
+		mutex_lock(&optee->supp.mutex);
+		if (!optee->supp.ctx) {
+			busy = false;
+			optee->supp.ctx = ctx;
+		}
+		mutex_unlock(&optee->supp.mutex);
+		if (busy) {
+			kfree(ctxdata);
+			return -EBUSY;
+		}
+	}
+
+	mutex_init(&ctxdata->mutex);
+	INIT_LIST_HEAD(&ctxdata->sess_list);
+
+	ctx->data = ctxdata;
+	return 0;
+}
+
+static void optee_release(struct tee_context *ctx)
+{
+	struct optee_context_data *ctxdata = ctx->data;
+	struct tee_device *teedev = ctx->teedev;
+	struct optee *optee = tee_get_drvdata(teedev);
+	struct tee_shm *shm;
+	struct optee_msg_arg *arg = NULL;
+	phys_addr_t parg;
+	struct optee_session *sess;
+	struct optee_session *sess_tmp;
+
+	if (!ctxdata)
+		return;
+
+	shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
+	if (!IS_ERR(shm)) {
+		arg = tee_shm_get_va(shm, 0);
+		/*
+		 * If va2pa fails for some reason, we can't call into
+		 * secure world, only free the memory. Secure OS will leak
+		 * sessions and finally refuse more sessions, but we will
+		 * at least let normal world reclaim its memory.
+		 */
+		if (!IS_ERR(arg))
+			if (tee_shm_va2pa(shm, arg, &parg))
+				arg = NULL; /* prevent usage of parg below */
+	}
+
+	list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
+				 list_node) {
+		list_del(&sess->list_node);
+		if (!IS_ERR_OR_NULL(arg)) {
+			memset(arg, 0, sizeof(*arg));
+			arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
+			arg->session = sess->session_id;
+			optee_do_call_with_arg(ctx, parg);
+		}
+		kfree(sess);
+	}
+	kfree(ctxdata);
+
+	if (!IS_ERR(shm))
+		tee_shm_free(shm);
+
+	ctx->data = NULL;
+
+	if (teedev == optee->supp_teedev)
+		optee_supp_release(&optee->supp);
+}
+
+static const struct tee_driver_ops optee_ops = {
+	.get_version = optee_get_version,
+	.open = optee_open,
+	.release = optee_release,
+	.open_session = optee_open_session,
+	.close_session = optee_close_session,
+	.invoke_func = optee_invoke_func,
+	.cancel_req = optee_cancel_req,
+	.shm_register = optee_shm_register,
+	.shm_unregister = optee_shm_unregister,
+};
+
+static const struct tee_desc optee_desc = {
+	.name = DRIVER_NAME "-clnt",
+	.ops = &optee_ops,
+	.owner = THIS_MODULE,
+};
+
+static const struct tee_driver_ops optee_supp_ops = {
+	.get_version = optee_get_version,
+	.open = optee_open,
+	.release = optee_release,
+	.supp_recv = optee_supp_recv,
+	.supp_send = optee_supp_send,
+	.shm_register = optee_shm_register_supp,
+	.shm_unregister = optee_shm_unregister_supp,
+};
+
+static const struct tee_desc optee_supp_desc = {
+	.name = DRIVER_NAME "-supp",
+	.ops = &optee_supp_ops,
+	.owner = THIS_MODULE,
+	.flags = TEE_DESC_PRIVILEGED,
+};
+
+static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
+{
+	struct arm_smccc_res res;
+
+	invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+
+	if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
+	    res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
+		return true;
+	return false;
+}
+
+static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
+{
+	union {
+		struct arm_smccc_res smccc;
+		struct optee_smc_call_get_os_revision_result result;
+	} res = {
+		.result = {
+			.build_id = 0
+		}
+	};
+
+	invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
+		  &res.smccc);
+
+	if (res.result.build_id)
+		pr_info("revision %lu.%lu (%08lx)", res.result.major,
+			res.result.minor, res.result.build_id);
+	else
+		pr_info("revision %lu.%lu", res.result.major, res.result.minor);
+}
+
+static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
+{
+	union {
+		struct arm_smccc_res smccc;
+		struct optee_smc_calls_revision_result result;
+	} res;
+
+	invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+
+	if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
+	    (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
+		return true;
+	return false;
+}
+
+static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
+					    u32 *sec_caps)
+{
+	union {
+		struct arm_smccc_res smccc;
+		struct optee_smc_exchange_capabilities_result result;
+	} res;
+	u32 a1 = 0;
+
+	/*
+	 * TODO This isn't enough to tell if it's UP system (from kernel
+	 * point of view) or not, is_smp() returns the the information
+	 * needed, but can't be called directly from here.
+	 */
+	if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
+		a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
+
+	invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
+		  &res.smccc);
+
+	if (res.result.status != OPTEE_SMC_RETURN_OK)
+		return false;
+
+	*sec_caps = res.result.capabilities;
+	return true;
+}
+
+static struct tee_shm_pool *
+optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm,
+			  u32 sec_caps)
+{
+	union {
+		struct arm_smccc_res smccc;
+		struct optee_smc_get_shm_config_result result;
+	} res;
+	unsigned long vaddr;
+	phys_addr_t paddr;
+	size_t size;
+	phys_addr_t begin;
+	phys_addr_t end;
+	void *va;
+	struct tee_shm_pool_mgr *priv_mgr;
+	struct tee_shm_pool_mgr *dmabuf_mgr;
+	void *rc;
+
+	invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+	if (res.result.status != OPTEE_SMC_RETURN_OK) {
+		pr_info("shm service not available\n");
+		return ERR_PTR(-ENOENT);
+	}
+
+	if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
+		pr_err("only normal cached shared memory supported\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	begin = roundup(res.result.start, PAGE_SIZE);
+	end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
+	paddr = begin;
+	size = end - begin;
+
+	if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
+		pr_err("too small shared memory area\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	va = memremap(paddr, size, MEMREMAP_WB);
+	if (!va) {
+		pr_err("shared memory ioremap failed\n");
+		return ERR_PTR(-EINVAL);
+	}
+	vaddr = (unsigned long)va;
+
+	/*
+	 * If OP-TEE can work with unregistered SHM, we will use own pool
+	 * for private shm
+	 */
+	if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
+		rc = optee_shm_pool_alloc_pages();
+		if (IS_ERR(rc))
+			goto err_memunmap;
+		priv_mgr = rc;
+	} else {
+		const size_t sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+
+		rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
+						    3 /* 8 bytes aligned */);
+		if (IS_ERR(rc))
+			goto err_memunmap;
+		priv_mgr = rc;
+
+		vaddr += sz;
+		paddr += sz;
+		size -= sz;
+	}
+
+	rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
+	if (IS_ERR(rc))
+		goto err_free_priv_mgr;
+	dmabuf_mgr = rc;
+
+	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+	if (IS_ERR(rc))
+		goto err_free_dmabuf_mgr;
+
+	*memremaped_shm = va;
+
+	return rc;
+
+err_free_dmabuf_mgr:
+	tee_shm_pool_mgr_destroy(dmabuf_mgr);
+err_free_priv_mgr:
+	tee_shm_pool_mgr_destroy(priv_mgr);
+err_memunmap:
+	memunmap(va);
+	return rc;
+}
+
+/* Simple wrapper functions to be able to use a function pointer */
+static void optee_smccc_smc(unsigned long a0, unsigned long a1,
+			    unsigned long a2, unsigned long a3,
+			    unsigned long a4, unsigned long a5,
+			    unsigned long a6, unsigned long a7,
+			    struct arm_smccc_res *res)
+{
+	arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
+			    unsigned long a2, unsigned long a3,
+			    unsigned long a4, unsigned long a5,
+			    unsigned long a6, unsigned long a7,
+			    struct arm_smccc_res *res)
+{
+	arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static optee_invoke_fn *get_invoke_func(struct device_node *np)
+{
+	const char *method;
+
+	pr_info("probing for conduit method from DT.\n");
+
+	if (of_property_read_string(np, "method", &method)) {
+		pr_warn("missing \"method\" property\n");
+		return ERR_PTR(-ENXIO);
+	}
+
+	if (!strcmp("hvc", method))
+		return optee_smccc_hvc;
+	else if (!strcmp("smc", method))
+		return optee_smccc_smc;
+
+	pr_warn("invalid \"method\" property: %s\n", method);
+	return ERR_PTR(-EINVAL);
+}
+
+static struct optee *optee_probe(struct device_node *np)
+{
+	optee_invoke_fn *invoke_fn;
+	struct tee_shm_pool *pool;
+	struct optee *optee = NULL;
+	void *memremaped_shm = NULL;
+	struct tee_device *teedev;
+	u32 sec_caps;
+	int rc;
+
+	invoke_fn = get_invoke_func(np);
+	if (IS_ERR(invoke_fn))
+		return (void *)invoke_fn;
+
+	if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
+		pr_warn("api uid mismatch\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	optee_msg_get_os_revision(invoke_fn);
+
+	if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
+		pr_warn("api revision mismatch\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
+		pr_warn("capabilities mismatch\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * We have no other option for shared memory, if secure world
+	 * doesn't have any reserved memory we can use we can't continue.
+	 */
+	if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
+		return ERR_PTR(-EINVAL);
+
+	pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps);
+	if (IS_ERR(pool))
+		return (void *)pool;
+
+	optee = kzalloc(sizeof(*optee), GFP_KERNEL);
+	if (!optee) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	optee->invoke_fn = invoke_fn;
+	optee->sec_caps = sec_caps;
+
+	teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
+	if (IS_ERR(teedev)) {
+		rc = PTR_ERR(teedev);
+		goto err;
+	}
+	optee->teedev = teedev;
+
+	teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
+	if (IS_ERR(teedev)) {
+		rc = PTR_ERR(teedev);
+		goto err;
+	}
+	optee->supp_teedev = teedev;
+
+	rc = tee_device_register(optee->teedev);
+	if (rc)
+		goto err;
+
+	rc = tee_device_register(optee->supp_teedev);
+	if (rc)
+		goto err;
+
+	mutex_init(&optee->call_queue.mutex);
+	INIT_LIST_HEAD(&optee->call_queue.waiters);
+	optee_wait_queue_init(&optee->wait_queue);
+	optee_supp_init(&optee->supp);
+	optee->memremaped_shm = memremaped_shm;
+	optee->pool = pool;
+
+	optee_enable_shm_cache(optee);
+
+	pr_info("initialized driver\n");
+	return optee;
+err:
+	if (optee) {
+		/*
+		 * tee_device_unregister() is safe to call even if the
+		 * devices hasn't been registered with
+		 * tee_device_register() yet.
+		 */
+		tee_device_unregister(optee->supp_teedev);
+		tee_device_unregister(optee->teedev);
+		kfree(optee);
+	}
+	if (pool)
+		tee_shm_pool_free(pool);
+	if (memremaped_shm)
+		memunmap(memremaped_shm);
+	return ERR_PTR(rc);
+}
+
+static void optee_remove(struct optee *optee)
+{
+	/*
+	 * Ask OP-TEE to free all cached shared memory objects to decrease
+	 * reference counters and also avoid wild pointers in secure world
+	 * into the old shared memory range.
+	 */
+	optee_disable_shm_cache(optee);
+
+	/*
+	 * The two devices has to be unregistered before we can free the
+	 * other resources.
+	 */
+	tee_device_unregister(optee->supp_teedev);
+	tee_device_unregister(optee->teedev);
+
+	tee_shm_pool_free(optee->pool);
+	if (optee->memremaped_shm)
+		memunmap(optee->memremaped_shm);
+	optee_wait_queue_exit(&optee->wait_queue);
+	optee_supp_uninit(&optee->supp);
+	mutex_destroy(&optee->call_queue.mutex);
+
+	kfree(optee);
+}
+
+static const struct of_device_id optee_match[] = {
+	{ .compatible = "linaro,optee-tz" },
+	{},
+};
+
+static struct optee *optee_svc;
+
+static int __init optee_driver_init(void)
+{
+	struct device_node *fw_np;
+	struct device_node *np;
+	struct optee *optee;
+
+	/* Node is supposed to be below /firmware */
+	fw_np = of_find_node_by_name(NULL, "firmware");
+	if (!fw_np)
+		return -ENODEV;
+
+	np = of_find_matching_node(fw_np, optee_match);
+	if (!np || !of_device_is_available(np)) {
+		of_node_put(np);
+		return -ENODEV;
+	}
+
+	optee = optee_probe(np);
+	of_node_put(np);
+
+	if (IS_ERR(optee))
+		return PTR_ERR(optee);
+
+	optee_svc = optee;
+
+	return 0;
+}
+module_init(optee_driver_init);
+
+static void __exit optee_driver_exit(void)
+{
+	struct optee *optee = optee_svc;
+
+	optee_svc = NULL;
+	if (optee)
+		optee_remove(optee);
+}
+module_exit(optee_driver_exit);
+
+MODULE_AUTHOR("Linaro");
+MODULE_DESCRIPTION("OP-TEE driver");
+MODULE_SUPPORTED_DEVICE("");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.19/drivers/tee/optee/optee_msg.h b/src/kernel/linux/v4.19/drivers/tee/optee/optee_msg.h
new file mode 100644
index 0000000..3050490
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/optee/optee_msg.h
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _OPTEE_MSG_H
+#define _OPTEE_MSG_H
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/*
+ * This file defines the OP-TEE message protocol used to communicate
+ * with an instance of OP-TEE running in secure world.
+ *
+ * This file is divided into three sections.
+ * 1. Formatting of messages.
+ * 2. Requests from normal world
+ * 3. Requests from secure world, Remote Procedure Call (RPC), handled by
+ *    tee-supplicant.
+ */
+
+/*****************************************************************************
+ * Part 1 - formatting of messages
+ *****************************************************************************/
+
+#define OPTEE_MSG_ATTR_TYPE_NONE		0x0
+#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT		0x1
+#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT	0x2
+#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT		0x3
+#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT		0x5
+#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT		0x6
+#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT		0x7
+#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT		0x9
+#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT		0xa
+#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT		0xb
+
+#define OPTEE_MSG_ATTR_TYPE_MASK		GENMASK(7, 0)
+
+/*
+ * Meta parameter to be absorbed by the Secure OS and not passed
+ * to the Trusted Application.
+ *
+ * Currently only used with OPTEE_MSG_CMD_OPEN_SESSION.
+ */
+#define OPTEE_MSG_ATTR_META			BIT(8)
+
+/*
+ * Pointer to a list of pages used to register user-defined SHM buffer.
+ * Used with OPTEE_MSG_ATTR_TYPE_TMEM_*.
+ * buf_ptr should point to the beginning of the buffer. Buffer will contain
+ * list of page addresses. OP-TEE core can reconstruct contiguous buffer from
+ * that page addresses list. Page addresses are stored as 64 bit values.
+ * Last entry on a page should point to the next page of buffer.
+ * Every entry in buffer should point to a 4k page beginning (12 least
+ * significant bits must be equal to zero).
+ *
+ * 12 least significant bints of optee_msg_param.u.tmem.buf_ptr should hold page
+ * offset of the user buffer.
+ *
+ * So, entries should be placed like members of this structure:
+ *
+ * struct page_data {
+ *   uint64_t pages_array[OPTEE_MSG_NONCONTIG_PAGE_SIZE/sizeof(uint64_t) - 1];
+ *   uint64_t next_page_data;
+ * };
+ *
+ * Structure is designed to exactly fit into the page size
+ * OPTEE_MSG_NONCONTIG_PAGE_SIZE which is a standard 4KB page.
+ *
+ * The size of 4KB is chosen because this is the smallest page size for ARM
+ * architectures. If REE uses larger pages, it should divide them to 4KB ones.
+ */
+#define OPTEE_MSG_ATTR_NONCONTIG		BIT(9)
+
+/*
+ * Memory attributes for caching passed with temp memrefs. The actual value
+ * used is defined outside the message protocol with the exception of
+ * OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already
+ * defined for the memory range should be used. If optee_smc.h is used as
+ * bearer of this protocol OPTEE_SMC_SHM_* is used for values.
+ */
+#define OPTEE_MSG_ATTR_CACHE_SHIFT		16
+#define OPTEE_MSG_ATTR_CACHE_MASK		GENMASK(2, 0)
+#define OPTEE_MSG_ATTR_CACHE_PREDEFINED		0
+
+/*
+ * Same values as TEE_LOGIN_* from TEE Internal API
+ */
+#define OPTEE_MSG_LOGIN_PUBLIC			0x00000000
+#define OPTEE_MSG_LOGIN_USER			0x00000001
+#define OPTEE_MSG_LOGIN_GROUP			0x00000002
+#define OPTEE_MSG_LOGIN_APPLICATION		0x00000004
+#define OPTEE_MSG_LOGIN_APPLICATION_USER	0x00000005
+#define OPTEE_MSG_LOGIN_APPLICATION_GROUP	0x00000006
+
+/*
+ * Page size used in non-contiguous buffer entries
+ */
+#define OPTEE_MSG_NONCONTIG_PAGE_SIZE		4096
+
+/**
+ * struct optee_msg_param_tmem - temporary memory reference parameter
+ * @buf_ptr:	Address of the buffer
+ * @size:	Size of the buffer
+ * @shm_ref:	Temporary shared memory reference, pointer to a struct tee_shm
+ *
+ * Secure and normal world communicates pointers as physical address
+ * instead of the virtual address. This is because secure and normal world
+ * have completely independent memory mapping. Normal world can even have a
+ * hypervisor which need to translate the guest physical address (AKA IPA
+ * in ARM documentation) to a real physical address before passing the
+ * structure to secure world.
+ */
+struct optee_msg_param_tmem {
+	u64 buf_ptr;
+	u64 size;
+	u64 shm_ref;
+};
+
+/**
+ * struct optee_msg_param_rmem - registered memory reference parameter
+ * @offs:	Offset into shared memory reference
+ * @size:	Size of the buffer
+ * @shm_ref:	Shared memory reference, pointer to a struct tee_shm
+ */
+struct optee_msg_param_rmem {
+	u64 offs;
+	u64 size;
+	u64 shm_ref;
+};
+
+/**
+ * struct optee_msg_param_value - opaque value parameter
+ *
+ * Value parameters are passed unchecked between normal and secure world.
+ */
+struct optee_msg_param_value {
+	u64 a;
+	u64 b;
+	u64 c;
+};
+
+/**
+ * struct optee_msg_param - parameter used together with struct optee_msg_arg
+ * @attr:	attributes
+ * @tmem:	parameter by temporary memory reference
+ * @rmem:	parameter by registered memory reference
+ * @value:	parameter by opaque value
+ *
+ * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
+ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
+ * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
+ * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
+ * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
+ */
+struct optee_msg_param {
+	u64 attr;
+	union {
+		struct optee_msg_param_tmem tmem;
+		struct optee_msg_param_rmem rmem;
+		struct optee_msg_param_value value;
+	} u;
+};
+
+/**
+ * struct optee_msg_arg - call argument
+ * @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_*
+ * @func: Trusted Application function, specific to the Trusted Application,
+ *	     used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND
+ * @session: In parameter for all OPTEE_MSG_CMD_* except
+ *	     OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead
+ * @cancel_id: Cancellation id, a unique value to identify this request
+ * @ret: return value
+ * @ret_origin: origin of the return value
+ * @num_params: number of parameters supplied to the OS Command
+ * @params: the parameters supplied to the OS Command
+ *
+ * All normal calls to Trusted OS uses this struct. If cmd requires further
+ * information than what these field holds it can be passed as a parameter
+ * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding
+ * attrs field). All parameters tagged as meta has to come first.
+ *
+ * Temp memref parameters can be fragmented if supported by the Trusted OS
+ * (when optee_smc.h is bearer of this protocol this is indicated with
+ * OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM). If a logical memref parameter is
+ * fragmented then has all but the last fragment the
+ * OPTEE_MSG_ATTR_FRAGMENT bit set in attrs. Even if a memref is fragmented
+ * it will still be presented as a single logical memref to the Trusted
+ * Application.
+ */
+struct optee_msg_arg {
+	u32 cmd;
+	u32 func;
+	u32 session;
+	u32 cancel_id;
+	u32 pad;
+	u32 ret;
+	u32 ret_origin;
+	u32 num_params;
+
+	/* num_params tells the actual number of element in params */
+	struct optee_msg_param params[0];
+};
+
+/**
+ * OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg
+ *
+ * @num_params: Number of parameters embedded in the struct optee_msg_arg
+ *
+ * Returns the size of the struct optee_msg_arg together with the number
+ * of embedded parameters.
+ */
+#define OPTEE_MSG_GET_ARG_SIZE(num_params) \
+	(sizeof(struct optee_msg_arg) + \
+	 sizeof(struct optee_msg_param) * (num_params))
+
+/*****************************************************************************
+ * Part 2 - requests from normal world
+ *****************************************************************************/
+
+/*
+ * Return the following UID if using API specified in this file without
+ * further extensions:
+ * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
+ * Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1,
+ * OPTEE_MSG_UID_2, OPTEE_MSG_UID_3.
+ */
+#define OPTEE_MSG_UID_0			0x384fb3e0
+#define OPTEE_MSG_UID_1			0xe7f811e3
+#define OPTEE_MSG_UID_2			0xaf630002
+#define OPTEE_MSG_UID_3			0xa5d5c51b
+#define OPTEE_MSG_FUNCID_CALLS_UID	0xFF01
+
+/*
+ * Returns 2.0 if using API specified in this file without further
+ * extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR
+ * and OPTEE_MSG_REVISION_MINOR
+ */
+#define OPTEE_MSG_REVISION_MAJOR	2
+#define OPTEE_MSG_REVISION_MINOR	0
+#define OPTEE_MSG_FUNCID_CALLS_REVISION	0xFF03
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in 4 32-bit words in the same way as
+ * OPTEE_MSG_FUNCID_CALLS_UID described above.
+ */
+#define OPTEE_MSG_OS_OPTEE_UUID_0	0x486178e0
+#define OPTEE_MSG_OS_OPTEE_UUID_1	0xe7f811e3
+#define OPTEE_MSG_OS_OPTEE_UUID_2	0xbc5e0002
+#define OPTEE_MSG_OS_OPTEE_UUID_3	0xa5d5c51b
+#define OPTEE_MSG_FUNCID_GET_OS_UUID	0x0000
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in 2 32-bit words in the same way as
+ * OPTEE_MSG_CALLS_REVISION described above.
+ */
+#define OPTEE_MSG_FUNCID_GET_OS_REVISION	0x0001
+
+/*
+ * Do a secure call with struct optee_msg_arg as argument
+ * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd
+ *
+ * OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application.
+ * The first two parameters are tagged as meta, holding two value
+ * parameters to pass the following information:
+ * param[0].u.value.a-b uuid of Trusted Application
+ * param[1].u.value.a-b uuid of Client
+ * param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_*
+ *
+ * OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened
+ * session to a Trusted Application.  struct optee_msg_arg::func is Trusted
+ * Application function, specific to the Trusted Application.
+ *
+ * OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to
+ * Trusted Application.
+ *
+ * OPTEE_MSG_CMD_CANCEL cancels a currently invoked command.
+ *
+ * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The
+ * information is passed as:
+ * [in] param[0].attr			OPTEE_MSG_ATTR_TYPE_TMEM_INPUT
+ *					[| OPTEE_MSG_ATTR_FRAGMENT]
+ * [in] param[0].u.tmem.buf_ptr		physical address (of first fragment)
+ * [in] param[0].u.tmem.size		size (of first fragment)
+ * [in] param[0].u.tmem.shm_ref		holds shared memory reference
+ * ...
+ * The shared memory can optionally be fragmented, temp memrefs can follow
+ * each other with all but the last with the OPTEE_MSG_ATTR_FRAGMENT bit set.
+ *
+ * OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared
+ * memory reference. The information is passed as:
+ * [in] param[0].attr			OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
+ * [in] param[0].u.rmem.shm_ref		holds shared memory reference
+ * [in] param[0].u.rmem.offs		0
+ * [in] param[0].u.rmem.size		0
+ */
+#define OPTEE_MSG_CMD_OPEN_SESSION	0
+#define OPTEE_MSG_CMD_INVOKE_COMMAND	1
+#define OPTEE_MSG_CMD_CLOSE_SESSION	2
+#define OPTEE_MSG_CMD_CANCEL		3
+#define OPTEE_MSG_CMD_REGISTER_SHM	4
+#define OPTEE_MSG_CMD_UNREGISTER_SHM	5
+#define OPTEE_MSG_FUNCID_CALL_WITH_ARG	0x0004
+
+/*****************************************************************************
+ * Part 3 - Requests from secure world, RPC
+ *****************************************************************************/
+
+/*
+ * All RPC is done with a struct optee_msg_arg as bearer of information,
+ * struct optee_msg_arg::arg holds values defined by OPTEE_MSG_RPC_CMD_* below
+ *
+ * RPC communication with tee-supplicant is reversed compared to normal
+ * client communication desribed above. The supplicant receives requests
+ * and sends responses.
+ */
+
+/*
+ * Load a TA into memory, defined in tee-supplicant
+ */
+#define OPTEE_MSG_RPC_CMD_LOAD_TA	0
+
+/*
+ * Reserved
+ */
+#define OPTEE_MSG_RPC_CMD_RPMB		1
+
+/*
+ * File system access, defined in tee-supplicant
+ */
+#define OPTEE_MSG_RPC_CMD_FS		2
+
+/*
+ * Get time
+ *
+ * Returns number of seconds and nano seconds since the Epoch,
+ * 1970-01-01 00:00:00 +0000 (UTC).
+ *
+ * [out] param[0].u.value.a	Number of seconds
+ * [out] param[0].u.value.b	Number of nano seconds.
+ */
+#define OPTEE_MSG_RPC_CMD_GET_TIME	3
+
+/*
+ * Wait queue primitive, helper for secure world to implement a wait queue.
+ *
+ * If secure world need to wait for a secure world mutex it issues a sleep
+ * request instead of spinning in secure world. Conversely is a wakeup
+ * request issued when a secure world mutex with a thread waiting thread is
+ * unlocked.
+ *
+ * Waiting on a key
+ * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP
+ * [in] param[0].u.value.b wait key
+ *
+ * Waking up a key
+ * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP
+ * [in] param[0].u.value.b wakeup key
+ */
+#define OPTEE_MSG_RPC_CMD_WAIT_QUEUE	4
+#define OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP	0
+#define OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP	1
+
+/*
+ * Suspend execution
+ *
+ * [in] param[0].value	.a number of milliseconds to suspend
+ */
+#define OPTEE_MSG_RPC_CMD_SUSPEND	5
+
+/*
+ * Allocate a piece of shared memory
+ *
+ * Shared memory can optionally be fragmented, to support that additional
+ * spare param entries are allocated to make room for eventual fragments.
+ * The spare param entries has .attr = OPTEE_MSG_ATTR_TYPE_NONE when
+ * unused. All returned temp memrefs except the last should have the
+ * OPTEE_MSG_ATTR_FRAGMENT bit set in the attr field.
+ *
+ * [in]  param[0].u.value.a		type of memory one of
+ *					OPTEE_MSG_RPC_SHM_TYPE_* below
+ * [in]  param[0].u.value.b		requested size
+ * [in]  param[0].u.value.c		required alignment
+ *
+ * [out] param[0].u.tmem.buf_ptr	physical address (of first fragment)
+ * [out] param[0].u.tmem.size		size (of first fragment)
+ * [out] param[0].u.tmem.shm_ref	shared memory reference
+ * ...
+ * [out] param[n].u.tmem.buf_ptr	physical address
+ * [out] param[n].u.tmem.size		size
+ * [out] param[n].u.tmem.shm_ref	shared memory reference (same value
+ *					as in param[n-1].u.tmem.shm_ref)
+ */
+#define OPTEE_MSG_RPC_CMD_SHM_ALLOC	6
+/* Memory that can be shared with a non-secure user space application */
+#define OPTEE_MSG_RPC_SHM_TYPE_APPL	0
+/* Memory only shared with non-secure kernel */
+#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL	1
+
+/*
+ * Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC
+ *
+ * [in]  param[0].u.value.a		type of memory one of
+ *					OPTEE_MSG_RPC_SHM_TYPE_* above
+ * [in]  param[0].u.value.b		value of shared memory reference
+ *					returned in param[0].u.tmem.shm_ref
+ *					above
+ */
+#define OPTEE_MSG_RPC_CMD_SHM_FREE	7
+
+#endif /* _OPTEE_MSG_H */
diff --git a/src/kernel/linux/v4.19/drivers/tee/optee/optee_private.h b/src/kernel/linux/v4.19/drivers/tee/optee/optee_private.h
new file mode 100644
index 0000000..35e7938
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/optee/optee_private.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef OPTEE_PRIVATE_H
+#define OPTEE_PRIVATE_H
+
+#include <linux/arm-smccc.h>
+#include <linux/semaphore.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include "optee_msg.h"
+
+#define OPTEE_MAX_ARG_SIZE	1024
+
+/* Some Global Platform error codes used in this driver */
+#define TEEC_SUCCESS			0x00000000
+#define TEEC_ERROR_BAD_PARAMETERS	0xFFFF0006
+#define TEEC_ERROR_COMMUNICATION	0xFFFF000E
+#define TEEC_ERROR_OUT_OF_MEMORY	0xFFFF000C
+
+#define TEEC_ORIGIN_COMMS		0x00000002
+
+typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
+				unsigned long, unsigned long, unsigned long,
+				unsigned long, unsigned long,
+				struct arm_smccc_res *);
+
+struct optee_call_queue {
+	/* Serializes access to this struct */
+	struct mutex mutex;
+	struct list_head waiters;
+};
+
+struct optee_wait_queue {
+	/* Serializes access to this struct */
+	struct mutex mu;
+	struct list_head db;
+};
+
+/**
+ * struct optee_supp - supplicant synchronization struct
+ * @ctx			the context of current connected supplicant.
+ *			if !NULL the supplicant device is available for use,
+ *			else busy
+ * @mutex:		held while accessing content of this struct
+ * @req_id:		current request id if supplicant is doing synchronous
+ *			communication, else -1
+ * @reqs:		queued request not yet retrieved by supplicant
+ * @idr:		IDR holding all requests currently being processed
+ *			by supplicant
+ * @reqs_c:		completion used by supplicant when waiting for a
+ *			request to be queued.
+ */
+struct optee_supp {
+	/* Serializes access to this struct */
+	struct mutex mutex;
+	struct tee_context *ctx;
+
+	int req_id;
+	struct list_head reqs;
+	struct idr idr;
+	struct completion reqs_c;
+};
+
+/**
+ * struct optee - main service struct
+ * @supp_teedev:	supplicant device
+ * @teedev:		client device
+ * @invoke_fn:		function to issue smc or hvc
+ * @call_queue:		queue of threads waiting to call @invoke_fn
+ * @wait_queue:		queue of threads from secure world waiting for a
+ *			secure world sync object
+ * @supp:		supplicant synchronization struct for RPC to supplicant
+ * @pool:		shared memory pool
+ * @memremaped_shm	virtual address of memory in shared memory pool
+ * @sec_caps:		secure world capabilities defined by
+ *			OPTEE_SMC_SEC_CAP_* in optee_smc.h
+ */
+struct optee {
+	struct tee_device *supp_teedev;
+	struct tee_device *teedev;
+	optee_invoke_fn *invoke_fn;
+	struct optee_call_queue call_queue;
+	struct optee_wait_queue wait_queue;
+	struct optee_supp supp;
+	struct tee_shm_pool *pool;
+	void *memremaped_shm;
+	u32 sec_caps;
+};
+
+struct optee_session {
+	struct list_head list_node;
+	u32 session_id;
+};
+
+struct optee_context_data {
+	/* Serializes access to this struct */
+	struct mutex mutex;
+	struct list_head sess_list;
+};
+
+struct optee_rpc_param {
+	u32	a0;
+	u32	a1;
+	u32	a2;
+	u32	a3;
+	u32	a4;
+	u32	a5;
+	u32	a6;
+	u32	a7;
+};
+
+/* Holds context that is preserved during one STD call */
+struct optee_call_ctx {
+	/* information about pages list used in last allocation */
+	void *pages_list;
+	size_t num_entries;
+};
+
+void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
+		      struct optee_call_ctx *call_ctx);
+void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx);
+
+void optee_wait_queue_init(struct optee_wait_queue *wq);
+void optee_wait_queue_exit(struct optee_wait_queue *wq);
+
+u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+			struct tee_param *param);
+
+int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
+int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
+void optee_supp_init(struct optee_supp *supp);
+void optee_supp_uninit(struct optee_supp *supp);
+void optee_supp_release(struct optee_supp *supp);
+
+int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
+		    struct tee_param *param);
+int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
+		    struct tee_param *param);
+
+u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg);
+int optee_open_session(struct tee_context *ctx,
+		       struct tee_ioctl_open_session_arg *arg,
+		       struct tee_param *param);
+int optee_close_session(struct tee_context *ctx, u32 session);
+int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
+		      struct tee_param *param);
+int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
+
+void optee_enable_shm_cache(struct optee *optee);
+void optee_disable_shm_cache(struct optee *optee);
+
+int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+		       struct page **pages, size_t num_pages,
+		       unsigned long start);
+int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm);
+
+int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
+			    struct page **pages, size_t num_pages,
+			    unsigned long start);
+int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm);
+
+int optee_from_msg_param(struct tee_param *params, size_t num_params,
+			 const struct optee_msg_param *msg_params);
+int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
+		       const struct tee_param *params);
+
+u64 *optee_allocate_pages_list(size_t num_entries);
+void optee_free_pages_list(void *array, size_t num_entries);
+void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
+			   size_t page_offset);
+
+/*
+ * Small helpers
+ */
+
+static inline void *reg_pair_to_ptr(u32 reg0, u32 reg1)
+{
+	return (void *)(unsigned long)(((u64)reg0 << 32) | reg1);
+}
+
+static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val)
+{
+	*reg0 = val >> 32;
+	*reg1 = val;
+}
+
+#endif /*OPTEE_PRIVATE_H*/
diff --git a/src/kernel/linux/v4.19/drivers/tee/optee/optee_smc.h b/src/kernel/linux/v4.19/drivers/tee/optee/optee_smc.h
new file mode 100644
index 0000000..bbf0cf0
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/optee/optee_smc.h
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef OPTEE_SMC_H
+#define OPTEE_SMC_H
+
+#include <linux/arm-smccc.h>
+#include <linux/bitops.h>
+
+#define OPTEE_SMC_STD_CALL_VAL(func_num) \
+	ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \
+			   ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
+#define OPTEE_SMC_FAST_CALL_VAL(func_num) \
+	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+			   ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
+
+/*
+ * Function specified by SMC Calling convention.
+ */
+#define OPTEE_SMC_FUNCID_CALLS_COUNT	0xFF00
+#define OPTEE_SMC_CALLS_COUNT \
+	ARM_SMCCC_CALL_VAL(OPTEE_SMC_FAST_CALL, SMCCC_SMC_32, \
+			   SMCCC_OWNER_TRUSTED_OS_END, \
+			   OPTEE_SMC_FUNCID_CALLS_COUNT)
+
+/*
+ * Normal cached memory (write-back), shareable for SMP systems and not
+ * shareable for UP systems.
+ */
+#define OPTEE_SMC_SHM_CACHED		1
+
+/*
+ * a0..a7 is used as register names in the descriptions below, on arm32
+ * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's
+ * 32-bit registers.
+ */
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Return one of the following UIDs if using API specified in this file
+ * without further extentions:
+ * 65cb6b93-af0c-4617-8ed6-644a8d1140f8
+ * see also OPTEE_SMC_UID_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID
+#define OPTEE_SMC_CALLS_UID \
+	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+			   ARM_SMCCC_OWNER_TRUSTED_OS_END, \
+			   OPTEE_SMC_FUNCID_CALLS_UID)
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Returns 2.0 if using API specified in this file without further extentions.
+ * see also OPTEE_MSG_REVISION_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION
+#define OPTEE_SMC_CALLS_REVISION \
+	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+			   ARM_SMCCC_OWNER_TRUSTED_OS_END, \
+			   OPTEE_SMC_FUNCID_CALLS_REVISION)
+
+struct optee_smc_calls_revision_result {
+	unsigned long major;
+	unsigned long minor;
+	unsigned long reserved0;
+	unsigned long reserved1;
+};
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID
+ * described above.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID
+#define OPTEE_SMC_CALL_GET_OS_UUID \
+	OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID)
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION
+ * described above. May optionally return a 32-bit build identifier in a2,
+ * with zero meaning unspecified.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION
+#define OPTEE_SMC_CALL_GET_OS_REVISION \
+	OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION)
+
+struct optee_smc_call_get_os_revision_result {
+	unsigned long major;
+	unsigned long minor;
+	unsigned long build_id;
+	unsigned long reserved1;
+};
+
+/*
+ * Call with struct optee_msg_arg as argument
+ *
+ * Call register usage:
+ * a0	SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
+ * a1	Upper 32bit of a 64bit physical pointer to a struct optee_msg_arg
+ * a2	Lower 32bit of a 64bit physical pointer to a struct optee_msg_arg
+ * a3	Cache settings, not used if physical pointer is in a predefined shared
+ *	memory area else per OPTEE_SMC_SHM_*
+ * a4-6	Not used
+ * a7	Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0	Return value, OPTEE_SMC_RETURN_*
+ * a1-3	Not used
+ * a4-7	Preserved
+ *
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage:
+ * a0	Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT
+ * a1-3	Preserved
+ * a4-7	Preserved
+ *
+ * RPC return register usage:
+ * a0	Return value, OPTEE_SMC_RETURN_IS_RPC(val)
+ * a1-2	RPC parameters
+ * a3-7	Resume information, must be preserved
+ *
+ * Possible return values:
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION	Trusted OS does not recognize this
+ *					function.
+ * OPTEE_SMC_RETURN_OK			Call completed, result updated in
+ *					the previously supplied struct
+ *					optee_msg_arg.
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT	Number of Trusted OS threads exceeded,
+ *					try again later.
+ * OPTEE_SMC_RETURN_EBADADDR		Bad physcial pointer to struct
+ *					optee_msg_arg.
+ * OPTEE_SMC_RETURN_EBADCMD		Bad/unknown cmd in struct optee_msg_arg
+ * OPTEE_SMC_RETURN_IS_RPC()		Call suspended by RPC call to normal
+ *					world.
+ */
+#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG
+#define OPTEE_SMC_CALL_WITH_ARG \
+	OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG)
+
+/*
+ * Get Shared Memory Config
+ *
+ * Returns the Secure/Non-secure shared memory config.
+ *
+ * Call register usage:
+ * a0	SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG
+ * a1-6	Not used
+ * a7	Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * a0	OPTEE_SMC_RETURN_OK
+ * a1	Physical address of start of SHM
+ * a2	Size of of SHM
+ * a3	Cache settings of memory, as defined by the
+ *	OPTEE_SMC_SHM_* values above
+ * a4-7	Preserved
+ *
+ * Not available register usage:
+ * a0	OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-3 Not used
+ * a4-7	Preserved
+ */
+#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG	7
+#define OPTEE_SMC_GET_SHM_CONFIG \
+	OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG)
+
+struct optee_smc_get_shm_config_result {
+	unsigned long status;
+	unsigned long start;
+	unsigned long size;
+	unsigned long settings;
+};
+
+/*
+ * Exchanges capabilities between normal world and secure world
+ *
+ * Call register usage:
+ * a0	SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES
+ * a1	bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_*
+ * a2-6	Not used
+ * a7	Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0	OPTEE_SMC_RETURN_OK
+ * a1	bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2-7	Preserved
+ *
+ * Error return register usage:
+ * a0	OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
+ * a1	bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2-7 Preserved
+ */
+/* Normal world works as a uniprocessor system */
+#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR		BIT(0)
+/* Secure world has reserved shared memory for normal world to use */
+#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM	BIT(0)
+/* Secure world can communicate via previously unregistered shared memory */
+#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM	BIT(1)
+
+/*
+ * Secure world supports commands "register/unregister shared memory",
+ * secure world accepts command buffers located in any parts of non-secure RAM
+ */
+#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM		BIT(2)
+
+#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES	9
+#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
+	OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
+
+struct optee_smc_exchange_capabilities_result {
+	unsigned long status;
+	unsigned long capabilities;
+	unsigned long reserved0;
+	unsigned long reserved1;
+};
+
+/*
+ * Disable and empties cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns one shared memory reference to free. To disable the
+ * cache and free all cached objects this function has to be called until
+ * it returns OPTEE_SMC_RETURN_ENOTAVAIL.
+ *
+ * Call register usage:
+ * a0	SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE
+ * a1-6	Not used
+ * a7	Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0	OPTEE_SMC_RETURN_OK
+ * a1	Upper 32bit of a 64bit Shared memory cookie
+ * a2	Lower 32bit of a 64bit Shared memory cookie
+ * a3-7	Preserved
+ *
+ * Cache empty return register usage:
+ * a0	OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7	Preserved
+ *
+ * Not idle return register usage:
+ * a0	OPTEE_SMC_RETURN_EBUSY
+ * a1-7	Preserved
+ */
+#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE	10
+#define OPTEE_SMC_DISABLE_SHM_CACHE \
+	OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE)
+
+struct optee_smc_disable_shm_cache_result {
+	unsigned long status;
+	unsigned long shm_upper32;
+	unsigned long shm_lower32;
+	unsigned long reserved0;
+};
+
+/*
+ * Enable cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If
+ * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned.
+ *
+ * Call register usage:
+ * a0	SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE
+ * a1-6	Not used
+ * a7	Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0	OPTEE_SMC_RETURN_OK
+ * a1-7	Preserved
+ *
+ * Not idle return register usage:
+ * a0	OPTEE_SMC_RETURN_EBUSY
+ * a1-7	Preserved
+ */
+#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE	11
+#define OPTEE_SMC_ENABLE_SHM_CACHE \
+	OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE)
+
+/*
+ * Resume from RPC (for example after processing a foreign interrupt)
+ *
+ * Call register usage:
+ * a0	SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC
+ * a1-3	Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned
+ *	OPTEE_SMC_RETURN_RPC in a0
+ *
+ * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above.
+ *
+ * Possible return values
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION	Trusted OS does not recognize this
+ *					function.
+ * OPTEE_SMC_RETURN_OK			Original call completed, result
+ *					updated in the previously supplied.
+ *					struct optee_msg_arg
+ * OPTEE_SMC_RETURN_RPC			Call suspended by RPC call to normal
+ *					world.
+ * OPTEE_SMC_RETURN_ERESUME		Resume failed, the opaque resume
+ *					information was corrupt.
+ */
+#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC	3
+#define OPTEE_SMC_CALL_RETURN_FROM_RPC \
+	OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC)
+
+#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK	0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_PREFIX		0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_FUNC_MASK		0x0000FFFF
+
+#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \
+	((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK)
+
+#define OPTEE_SMC_RPC_VAL(func)		((func) | OPTEE_SMC_RETURN_RPC_PREFIX)
+
+/*
+ * Allocate memory for RPC parameter passing. The memory is used to hold a
+ * struct optee_msg_arg.
+ *
+ * "Call" register usage:
+ * a0	This value, OPTEE_SMC_RETURN_RPC_ALLOC
+ * a1	Size in bytes of required argument memory
+ * a2	Not used
+ * a3	Resume information, must be preserved
+ * a4-5	Not used
+ * a6-7	Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0	SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1	Upper 32bits of 64bit physical pointer to allocated
+ *	memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ *	be allocated.
+ * a2	Lower 32bits of 64bit physical pointer to allocated
+ *	memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ *	be allocated
+ * a3	Preserved
+ * a4	Upper 32bits of 64bit Shared memory cookie used when freeing
+ *	the memory or doing an RPC
+ * a5	Lower 32bits of 64bit Shared memory cookie used when freeing
+ *	the memory or doing an RPC
+ * a6-7	Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_ALLOC	0
+#define OPTEE_SMC_RETURN_RPC_ALLOC \
+	OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC)
+
+/*
+ * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC
+ *
+ * "Call" register usage:
+ * a0	This value, OPTEE_SMC_RETURN_RPC_FREE
+ * a1	Upper 32bits of 64bit shared memory cookie belonging to this
+ *	argument memory
+ * a2	Lower 32bits of 64bit shared memory cookie belonging to this
+ *	argument memory
+ * a3-7	Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0	SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2	Not used
+ * a3-7	Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_FREE		2
+#define OPTEE_SMC_RETURN_RPC_FREE \
+	OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE)
+
+/*
+ * Deliver foreign interrupt to normal world.
+ *
+ * "Call" register usage:
+ * a0	OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
+ * a1-7	Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0	SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-7	Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR		4
+#define OPTEE_SMC_RETURN_RPC_FOREIGN_INTR \
+	OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FOREIGN_INTR)
+
+/*
+ * Do an RPC request. The supplied struct optee_msg_arg tells which
+ * request to do and the parameters for the request. The following fields
+ * are used (the rest are unused):
+ * - cmd		the Request ID
+ * - ret		return value of the request, filled in by normal world
+ * - num_params		number of parameters for the request
+ * - params		the parameters
+ * - param_attrs	attributes of the parameters
+ *
+ * "Call" register usage:
+ * a0	OPTEE_SMC_RETURN_RPC_CMD
+ * a1	Upper 32bit of a 64bit Shared memory cookie holding a
+ *	struct optee_msg_arg, must be preserved, only the data should
+ *	be updated
+ * a2	Lower 32bit of a 64bit Shared memory cookie holding a
+ *	struct optee_msg_arg, must be preserved, only the data should
+ *	be updated
+ * a3-7	Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0	SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2	Not used
+ * a3-7	Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_CMD		5
+#define OPTEE_SMC_RETURN_RPC_CMD \
+	OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD)
+
+/* Returned in a0 */
+#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
+
+/* Returned in a0 only from Trusted OS functions */
+#define OPTEE_SMC_RETURN_OK		0x0
+#define OPTEE_SMC_RETURN_ETHREAD_LIMIT	0x1
+#define OPTEE_SMC_RETURN_EBUSY		0x2
+#define OPTEE_SMC_RETURN_ERESUME	0x3
+#define OPTEE_SMC_RETURN_EBADADDR	0x4
+#define OPTEE_SMC_RETURN_EBADCMD	0x5
+#define OPTEE_SMC_RETURN_ENOMEM		0x6
+#define OPTEE_SMC_RETURN_ENOTAVAIL	0x7
+#define OPTEE_SMC_RETURN_IS_RPC(ret)	__optee_smc_return_is_rpc((ret))
+
+static inline bool __optee_smc_return_is_rpc(u32 ret)
+{
+	return ret != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION &&
+	       (ret & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) ==
+			OPTEE_SMC_RETURN_RPC_PREFIX;
+}
+
+#endif /* OPTEE_SMC_H */
diff --git a/src/kernel/linux/v4.19/drivers/tee/optee/rpc.c b/src/kernel/linux/v4.19/drivers/tee/optee/rpc.c
new file mode 100644
index 0000000..b45c73d
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/optee/rpc.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+
+struct wq_entry {
+	struct list_head link;
+	struct completion c;
+	u32 key;
+};
+
+void optee_wait_queue_init(struct optee_wait_queue *priv)
+{
+	mutex_init(&priv->mu);
+	INIT_LIST_HEAD(&priv->db);
+}
+
+void optee_wait_queue_exit(struct optee_wait_queue *priv)
+{
+	mutex_destroy(&priv->mu);
+}
+
+static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
+{
+	struct timespec64 ts;
+
+	if (arg->num_params != 1)
+		goto bad;
+	if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+			OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT)
+		goto bad;
+
+	ktime_get_real_ts64(&ts);
+	arg->params[0].u.value.a = ts.tv_sec;
+	arg->params[0].u.value.b = ts.tv_nsec;
+
+	arg->ret = TEEC_SUCCESS;
+	return;
+bad:
+	arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key)
+{
+	struct wq_entry *w;
+
+	mutex_lock(&wq->mu);
+
+	list_for_each_entry(w, &wq->db, link)
+		if (w->key == key)
+			goto out;
+
+	w = kmalloc(sizeof(*w), GFP_KERNEL);
+	if (w) {
+		init_completion(&w->c);
+		w->key = key;
+		list_add_tail(&w->link, &wq->db);
+	}
+out:
+	mutex_unlock(&wq->mu);
+	return w;
+}
+
+static void wq_sleep(struct optee_wait_queue *wq, u32 key)
+{
+	struct wq_entry *w = wq_entry_get(wq, key);
+
+	if (w) {
+		wait_for_completion(&w->c);
+		mutex_lock(&wq->mu);
+		list_del(&w->link);
+		mutex_unlock(&wq->mu);
+		kfree(w);
+	}
+}
+
+static void wq_wakeup(struct optee_wait_queue *wq, u32 key)
+{
+	struct wq_entry *w = wq_entry_get(wq, key);
+
+	if (w)
+		complete(&w->c);
+}
+
+static void handle_rpc_func_cmd_wq(struct optee *optee,
+				   struct optee_msg_arg *arg)
+{
+	if (arg->num_params != 1)
+		goto bad;
+
+	if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+			OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+		goto bad;
+
+	switch (arg->params[0].u.value.a) {
+	case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP:
+		wq_sleep(&optee->wait_queue, arg->params[0].u.value.b);
+		break;
+	case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP:
+		wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b);
+		break;
+	default:
+		goto bad;
+	}
+
+	arg->ret = TEEC_SUCCESS;
+	return;
+bad:
+	arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg)
+{
+	u32 msec_to_wait;
+
+	if (arg->num_params != 1)
+		goto bad;
+
+	if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+			OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+		goto bad;
+
+	msec_to_wait = arg->params[0].u.value.a;
+
+	/* Go to interruptible sleep */
+	msleep_interruptible(msec_to_wait);
+
+	arg->ret = TEEC_SUCCESS;
+	return;
+bad:
+	arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_rpc_supp_cmd(struct tee_context *ctx,
+				struct optee_msg_arg *arg)
+{
+	struct tee_param *params;
+
+	arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+	params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
+			       GFP_KERNEL);
+	if (!params) {
+		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+		return;
+	}
+
+	if (optee_from_msg_param(params, arg->num_params, arg->params)) {
+		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+		goto out;
+	}
+
+	arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);
+
+	if (optee_to_msg_param(arg->params, arg->num_params, params))
+		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+out:
+	kfree(params);
+}
+
+static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
+{
+	u32 ret;
+	struct tee_param param;
+	struct optee *optee = tee_get_drvdata(ctx->teedev);
+	struct tee_shm *shm;
+
+	param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+	param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
+	param.u.value.b = sz;
+	param.u.value.c = 0;
+
+	ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, &param);
+	if (ret)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_lock(&optee->supp.mutex);
+	/* Increases count as secure world doesn't have a reference */
+	shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
+	mutex_unlock(&optee->supp.mutex);
+	return shm;
+}
+
+static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+					  struct optee_msg_arg *arg,
+					  struct optee_call_ctx *call_ctx)
+{
+	phys_addr_t pa;
+	struct tee_shm *shm;
+	size_t sz;
+	size_t n;
+
+	arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+	if (!arg->num_params ||
+	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+		return;
+	}
+
+	for (n = 1; n < arg->num_params; n++) {
+		if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
+			arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+			return;
+		}
+	}
+
+	sz = arg->params[0].u.value.b;
+	switch (arg->params[0].u.value.a) {
+	case OPTEE_MSG_RPC_SHM_TYPE_APPL:
+		shm = cmd_alloc_suppl(ctx, sz);
+		break;
+	case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
+		shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
+		break;
+	default:
+		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+		return;
+	}
+
+	if (IS_ERR(shm)) {
+		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+		return;
+	}
+
+	if (tee_shm_get_pa(shm, 0, &pa)) {
+		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+		goto bad;
+	}
+
+	sz = tee_shm_get_size(shm);
+
+	if (tee_shm_is_registered(shm)) {
+		struct page **pages;
+		u64 *pages_list;
+		size_t page_num;
+
+		pages = tee_shm_get_pages(shm, &page_num);
+		if (!pages || !page_num) {
+			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+			goto bad;
+		}
+
+		pages_list = optee_allocate_pages_list(page_num);
+		if (!pages_list) {
+			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+			goto bad;
+		}
+
+		call_ctx->pages_list = pages_list;
+		call_ctx->num_entries = page_num;
+
+		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+				      OPTEE_MSG_ATTR_NONCONTIG;
+		/*
+		 * In the least bits of u.tmem.buf_ptr we store buffer offset
+		 * from 4k page, as described in OP-TEE ABI.
+		 */
+		arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
+			(tee_shm_get_page_offset(shm) &
+			 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+		arg->params[0].u.tmem.size = tee_shm_get_size(shm);
+		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+
+		optee_fill_pages_list(pages_list, pages, page_num,
+				      tee_shm_get_page_offset(shm));
+	} else {
+		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+		arg->params[0].u.tmem.buf_ptr = pa;
+		arg->params[0].u.tmem.size = sz;
+		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+	}
+
+	arg->ret = TEEC_SUCCESS;
+	return;
+bad:
+	tee_shm_free(shm);
+}
+
+static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
+{
+	struct tee_param param;
+
+	param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+	param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
+	param.u.value.b = tee_shm_get_id(shm);
+	param.u.value.c = 0;
+
+	/*
+	 * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure
+	 * world has released its reference.
+	 *
+	 * It's better to do this before sending the request to supplicant
+	 * as we'd like to let the process doing the initial allocation to
+	 * do release the last reference too in order to avoid stacking
+	 * many pending fput() on the client process. This could otherwise
+	 * happen if secure world does many allocate and free in a single
+	 * invoke.
+	 */
+	tee_shm_put(shm);
+
+	optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, &param);
+}
+
+static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
+					 struct optee_msg_arg *arg)
+{
+	struct tee_shm *shm;
+
+	arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+	if (arg->num_params != 1 ||
+	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+		return;
+	}
+
+	shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
+	switch (arg->params[0].u.value.a) {
+	case OPTEE_MSG_RPC_SHM_TYPE_APPL:
+		cmd_free_suppl(ctx, shm);
+		break;
+	case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
+		tee_shm_free(shm);
+		break;
+	default:
+		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+	}
+	arg->ret = TEEC_SUCCESS;
+}
+
+static void free_pages_list(struct optee_call_ctx *call_ctx)
+{
+	if (call_ctx->pages_list) {
+		optee_free_pages_list(call_ctx->pages_list,
+				      call_ctx->num_entries);
+		call_ctx->pages_list = NULL;
+		call_ctx->num_entries = 0;
+	}
+}
+
+void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
+{
+	free_pages_list(call_ctx);
+}
+
+static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
+				struct tee_shm *shm,
+				struct optee_call_ctx *call_ctx)
+{
+	struct optee_msg_arg *arg;
+
+	arg = tee_shm_get_va(shm, 0);
+	if (IS_ERR(arg)) {
+		pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
+		return;
+	}
+
+	switch (arg->cmd) {
+	case OPTEE_MSG_RPC_CMD_GET_TIME:
+		handle_rpc_func_cmd_get_time(arg);
+		break;
+	case OPTEE_MSG_RPC_CMD_WAIT_QUEUE:
+		handle_rpc_func_cmd_wq(optee, arg);
+		break;
+	case OPTEE_MSG_RPC_CMD_SUSPEND:
+		handle_rpc_func_cmd_wait(arg);
+		break;
+	case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
+		free_pages_list(call_ctx);
+		handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
+		break;
+	case OPTEE_MSG_RPC_CMD_SHM_FREE:
+		handle_rpc_func_cmd_shm_free(ctx, arg);
+		break;
+	default:
+		handle_rpc_supp_cmd(ctx, arg);
+	}
+}
+
+/**
+ * optee_handle_rpc() - handle RPC from secure world
+ * @ctx:	context doing the RPC
+ * @param:	value of registers for the RPC
+ * @call_ctx:	call context. Preserved during one OP-TEE invocation
+ *
+ * Result of RPC is written back into @param.
+ */
+void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
+		      struct optee_call_ctx *call_ctx)
+{
+	struct tee_device *teedev = ctx->teedev;
+	struct optee *optee = tee_get_drvdata(teedev);
+	struct tee_shm *shm;
+	phys_addr_t pa;
+
+	switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
+	case OPTEE_SMC_RPC_FUNC_ALLOC:
+		shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);
+		if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
+			reg_pair_from_64(&param->a1, &param->a2, pa);
+			reg_pair_from_64(&param->a4, &param->a5,
+					 (unsigned long)shm);
+		} else {
+			param->a1 = 0;
+			param->a2 = 0;
+			param->a4 = 0;
+			param->a5 = 0;
+		}
+		break;
+	case OPTEE_SMC_RPC_FUNC_FREE:
+		shm = reg_pair_to_ptr(param->a1, param->a2);
+		tee_shm_free(shm);
+		break;
+	case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
+		/*
+		 * A foreign interrupt was raised while secure world was
+		 * executing, since they are handled in Linux a dummy RPC is
+		 * performed to let Linux take the interrupt through the normal
+		 * vector.
+		 */
+		break;
+	case OPTEE_SMC_RPC_FUNC_CMD:
+		shm = reg_pair_to_ptr(param->a1, param->a2);
+		handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
+		break;
+	default:
+		pr_warn("Unknown RPC func 0x%x\n",
+			(u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
+		break;
+	}
+
+	param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/optee/shm_pool.c b/src/kernel/linux/v4.19/drivers/tee/optee/shm_pool.c
new file mode 100644
index 0000000..4939781
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/optee/shm_pool.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2017, EPAM Systems
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+#include "shm_pool.h"
+
+static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
+			 struct tee_shm *shm, size_t size)
+{
+	unsigned int order = get_order(size);
+	struct page *page;
+
+	page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+	if (!page)
+		return -ENOMEM;
+
+	shm->kaddr = page_address(page);
+	shm->paddr = page_to_phys(page);
+	shm->size = PAGE_SIZE << order;
+
+	return 0;
+}
+
+static void pool_op_free(struct tee_shm_pool_mgr *poolm,
+			 struct tee_shm *shm)
+{
+	free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+	shm->kaddr = NULL;
+}
+
+static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+	kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops = {
+	.alloc = pool_op_alloc,
+	.free = pool_op_free,
+	.destroy_poolmgr = pool_op_destroy_poolmgr,
+};
+
+/**
+ * optee_shm_pool_alloc_pages() - create page-based allocator pool
+ *
+ * This pool is used when OP-TEE supports dymanic SHM. In this case
+ * command buffers and such are allocated from kernel's own memory.
+ */
+struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
+{
+	struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+	if (!mgr)
+		return ERR_PTR(-ENOMEM);
+
+	mgr->ops = &pool_ops;
+
+	return mgr;
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/optee/shm_pool.h b/src/kernel/linux/v4.19/drivers/tee/optee/shm_pool.h
new file mode 100644
index 0000000..4e753c3
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/optee/shm_pool.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2016, EPAM Systems
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef SHM_POOL_H
+#define SHM_POOL_H
+
+#include <linux/tee_drv.h>
+
+struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/optee/supp.c b/src/kernel/linux/v4.19/drivers/tee/optee/supp.c
new file mode 100644
index 0000000..43626e1
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/optee/supp.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "optee_private.h"
+
+struct optee_supp_req {
+	struct list_head link;
+
+	bool in_queue;
+	u32 func;
+	u32 ret;
+	size_t num_params;
+	struct tee_param *param;
+
+	struct completion c;
+};
+
+void optee_supp_init(struct optee_supp *supp)
+{
+	memset(supp, 0, sizeof(*supp));
+	mutex_init(&supp->mutex);
+	init_completion(&supp->reqs_c);
+	idr_init(&supp->idr);
+	INIT_LIST_HEAD(&supp->reqs);
+	supp->req_id = -1;
+}
+
+void optee_supp_uninit(struct optee_supp *supp)
+{
+	mutex_destroy(&supp->mutex);
+	idr_destroy(&supp->idr);
+}
+
+void optee_supp_release(struct optee_supp *supp)
+{
+	int id;
+	struct optee_supp_req *req;
+	struct optee_supp_req *req_tmp;
+
+	mutex_lock(&supp->mutex);
+
+	/* Abort all request retrieved by supplicant */
+	idr_for_each_entry(&supp->idr, req, id) {
+		idr_remove(&supp->idr, id);
+		req->ret = TEEC_ERROR_COMMUNICATION;
+		complete(&req->c);
+	}
+
+	/* Abort all queued requests */
+	list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
+		list_del(&req->link);
+		req->in_queue = false;
+		req->ret = TEEC_ERROR_COMMUNICATION;
+		complete(&req->c);
+	}
+
+	supp->ctx = NULL;
+	supp->req_id = -1;
+
+	mutex_unlock(&supp->mutex);
+}
+
+/**
+ * optee_supp_thrd_req() - request service from supplicant
+ * @ctx:	context doing the request
+ * @func:	function requested
+ * @num_params:	number of elements in @param array
+ * @param:	parameters for function
+ *
+ * Returns result of operation to be passed to secure world
+ */
+u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+			struct tee_param *param)
+
+{
+	struct optee *optee = tee_get_drvdata(ctx->teedev);
+	struct optee_supp *supp = &optee->supp;
+	struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL);
+	bool interruptable;
+	u32 ret;
+
+	if (!req)
+		return TEEC_ERROR_OUT_OF_MEMORY;
+
+	init_completion(&req->c);
+	req->func = func;
+	req->num_params = num_params;
+	req->param = param;
+
+	/* Insert the request in the request list */
+	mutex_lock(&supp->mutex);
+	list_add_tail(&req->link, &supp->reqs);
+	req->in_queue = true;
+	mutex_unlock(&supp->mutex);
+
+	/* Tell an eventual waiter there's a new request */
+	complete(&supp->reqs_c);
+
+	/*
+	 * Wait for supplicant to process and return result, once we've
+	 * returned from wait_for_completion(&req->c) successfully we have
+	 * exclusive access again.
+	 */
+	while (wait_for_completion_interruptible(&req->c)) {
+		mutex_lock(&supp->mutex);
+		interruptable = !supp->ctx;
+		if (interruptable) {
+			/*
+			 * There's no supplicant available and since the
+			 * supp->mutex currently is held none can
+			 * become available until the mutex released
+			 * again.
+			 *
+			 * Interrupting an RPC to supplicant is only
+			 * allowed as a way of slightly improving the user
+			 * experience in case the supplicant hasn't been
+			 * started yet. During normal operation the supplicant
+			 * will serve all requests in a timely manner and
+			 * interrupting then wouldn't make sense.
+			 */
+			if (req->in_queue) {
+				list_del(&req->link);
+				req->in_queue = false;
+			}
+		}
+		mutex_unlock(&supp->mutex);
+
+		if (interruptable) {
+			req->ret = TEEC_ERROR_COMMUNICATION;
+			break;
+		}
+	}
+
+	ret = req->ret;
+	kfree(req);
+
+	return ret;
+}
+
+static struct optee_supp_req  *supp_pop_entry(struct optee_supp *supp,
+					      int num_params, int *id)
+{
+	struct optee_supp_req *req;
+
+	if (supp->req_id != -1) {
+		/*
+		 * Supplicant should not mix synchronous and asnynchronous
+		 * requests.
+		 */
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (list_empty(&supp->reqs))
+		return NULL;
+
+	req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
+
+	if (num_params < req->num_params) {
+		/* Not enough room for parameters */
+		return ERR_PTR(-EINVAL);
+	}
+
+	*id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
+	if (*id < 0)
+		return ERR_PTR(-ENOMEM);
+
+	list_del(&req->link);
+	req->in_queue = false;
+
+	return req;
+}
+
+static int supp_check_recv_params(size_t num_params, struct tee_param *params,
+				  size_t *num_meta)
+{
+	size_t n;
+
+	if (!num_params)
+		return -EINVAL;
+
+	/*
+	 * If there's memrefs we need to decrease those as they where
+	 * increased earlier and we'll even refuse to accept any below.
+	 */
+	for (n = 0; n < num_params; n++)
+		if (tee_param_is_memref(params + n) && params[n].u.memref.shm)
+			tee_shm_put(params[n].u.memref.shm);
+
+	/*
+	 * We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
+	 * or without the TEE_IOCTL_PARAM_ATTR_META bit set.
+	 */
+	for (n = 0; n < num_params; n++)
+		if (params[n].attr &&
+		    params[n].attr != TEE_IOCTL_PARAM_ATTR_META)
+			return -EINVAL;
+
+	/* At most we'll need one meta parameter so no need to check for more */
+	if (params->attr == TEE_IOCTL_PARAM_ATTR_META)
+		*num_meta = 1;
+	else
+		*num_meta = 0;
+
+	return 0;
+}
+
+/**
+ * optee_supp_recv() - receive request for supplicant
+ * @ctx:	context receiving the request
+ * @func:	requested function in supplicant
+ * @num_params:	number of elements allocated in @param, updated with number
+ *		used elements
+ * @param:	space for parameters for @func
+ *
+ * Returns 0 on success or <0 on failure
+ */
+int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
+		    struct tee_param *param)
+{
+	struct tee_device *teedev = ctx->teedev;
+	struct optee *optee = tee_get_drvdata(teedev);
+	struct optee_supp *supp = &optee->supp;
+	struct optee_supp_req *req = NULL;
+	int id;
+	size_t num_meta;
+	int rc;
+
+	rc = supp_check_recv_params(*num_params, param, &num_meta);
+	if (rc)
+		return rc;
+
+	while (true) {
+		mutex_lock(&supp->mutex);
+		req = supp_pop_entry(supp, *num_params - num_meta, &id);
+		mutex_unlock(&supp->mutex);
+
+		if (req) {
+			if (IS_ERR(req))
+				return PTR_ERR(req);
+			break;
+		}
+
+		/*
+		 * If we didn't get a request we'll block in
+		 * wait_for_completion() to avoid needless spinning.
+		 *
+		 * This is where supplicant will be hanging most of
+		 * the time, let's make this interruptable so we
+		 * can easily restart supplicant if needed.
+		 */
+		if (wait_for_completion_interruptible(&supp->reqs_c))
+			return -ERESTARTSYS;
+	}
+
+	if (num_meta) {
+		/*
+		 * tee-supplicant support meta parameters -> requsts can be
+		 * processed asynchronously.
+		 */
+		param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
+			      TEE_IOCTL_PARAM_ATTR_META;
+		param->u.value.a = id;
+		param->u.value.b = 0;
+		param->u.value.c = 0;
+	} else {
+		mutex_lock(&supp->mutex);
+		supp->req_id = id;
+		mutex_unlock(&supp->mutex);
+	}
+
+	*func = req->func;
+	*num_params = req->num_params + num_meta;
+	memcpy(param + num_meta, req->param,
+	       sizeof(struct tee_param) * req->num_params);
+
+	return 0;
+}
+
+static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
+					   size_t num_params,
+					   struct tee_param *param,
+					   size_t *num_meta)
+{
+	struct optee_supp_req *req;
+	int id;
+	size_t nm;
+	const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
+			 TEE_IOCTL_PARAM_ATTR_META;
+
+	if (!num_params)
+		return ERR_PTR(-EINVAL);
+
+	if (supp->req_id == -1) {
+		if (param->attr != attr)
+			return ERR_PTR(-EINVAL);
+		id = param->u.value.a;
+		nm = 1;
+	} else {
+		id = supp->req_id;
+		nm = 0;
+	}
+
+	req = idr_find(&supp->idr, id);
+	if (!req)
+		return ERR_PTR(-ENOENT);
+
+	if ((num_params - nm) != req->num_params)
+		return ERR_PTR(-EINVAL);
+
+	idr_remove(&supp->idr, id);
+	supp->req_id = -1;
+	*num_meta = nm;
+
+	return req;
+}
+
+/**
+ * optee_supp_send() - send result of request from supplicant
+ * @ctx:	context sending result
+ * @ret:	return value of request
+ * @num_params:	number of parameters returned
+ * @param:	returned parameters
+ *
+ * Returns 0 on success or <0 on failure.
+ */
+int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
+		    struct tee_param *param)
+{
+	struct tee_device *teedev = ctx->teedev;
+	struct optee *optee = tee_get_drvdata(teedev);
+	struct optee_supp *supp = &optee->supp;
+	struct optee_supp_req *req;
+	size_t n;
+	size_t num_meta;
+
+	mutex_lock(&supp->mutex);
+	req = supp_pop_req(supp, num_params, param, &num_meta);
+	mutex_unlock(&supp->mutex);
+
+	if (IS_ERR(req)) {
+		/* Something is wrong, let supplicant restart. */
+		return PTR_ERR(req);
+	}
+
+	/* Update out and in/out parameters */
+	for (n = 0; n < req->num_params; n++) {
+		struct tee_param *p = req->param + n;
+
+		switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+			p->u.value.a = param[n + num_meta].u.value.a;
+			p->u.value.b = param[n + num_meta].u.value.b;
+			p->u.value.c = param[n + num_meta].u.value.c;
+			break;
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+			p->u.memref.size = param[n + num_meta].u.memref.size;
+			break;
+		default:
+			break;
+		}
+	}
+	req->ret = ret;
+
+	/* Let the requesting thread continue */
+	complete(&req->c);
+
+	return 0;
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tee_core.c b/src/kernel/linux/v4.19/drivers/tee/tee_core.c
new file mode 100644
index 0000000..dd46b75
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tee_core.c
@@ -0,0 +1,964 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uaccess.h>
+#include "tee_private.h"
+
+#define TEE_NUM_DEVICES	32
+
+#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
+
+/*
+ * Unprivileged devices in the lower half range and privileged devices in
+ * the upper half range.
+ */
+static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
+static DEFINE_SPINLOCK(driver_lock);
+
+static struct class *tee_class;
+static dev_t tee_devt;
+
+static int tee_open(struct inode *inode, struct file *filp)
+{
+	int rc;
+	struct tee_device *teedev;
+	struct tee_context *ctx;
+
+	teedev = container_of(inode->i_cdev, struct tee_device, cdev);
+	if (!tee_device_get(teedev))
+		return -EINVAL;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	kref_init(&ctx->refcount);
+	ctx->teedev = teedev;
+	INIT_LIST_HEAD(&ctx->list_shm);
+	filp->private_data = ctx;
+	rc = teedev->desc->ops->open(ctx);
+	if (rc)
+		goto err;
+
+	return 0;
+err:
+	kfree(ctx);
+	tee_device_put(teedev);
+	return rc;
+}
+
+void teedev_ctx_get(struct tee_context *ctx)
+{
+	if (ctx->releasing)
+		return;
+
+	kref_get(&ctx->refcount);
+}
+
+static void teedev_ctx_release(struct kref *ref)
+{
+	struct tee_context *ctx = container_of(ref, struct tee_context,
+					       refcount);
+	ctx->releasing = true;
+	ctx->teedev->desc->ops->release(ctx);
+	kfree(ctx);
+}
+
+void teedev_ctx_put(struct tee_context *ctx)
+{
+	if (ctx->releasing)
+		return;
+
+	kref_put(&ctx->refcount, teedev_ctx_release);
+}
+
+static void teedev_close_context(struct tee_context *ctx)
+{
+	tee_device_put(ctx->teedev);
+	teedev_ctx_put(ctx);
+}
+
+static int tee_release(struct inode *inode, struct file *filp)
+{
+	teedev_close_context(filp->private_data);
+	return 0;
+}
+
+static int tee_ioctl_version(struct tee_context *ctx,
+			     struct tee_ioctl_version_data __user *uvers)
+{
+	struct tee_ioctl_version_data vers;
+
+	ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
+
+	if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED)
+		vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED;
+
+	if (copy_to_user(uvers, &vers, sizeof(vers)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int tee_ioctl_shm_alloc(struct tee_context *ctx,
+			       struct tee_ioctl_shm_alloc_data __user *udata)
+{
+	long ret;
+	struct tee_ioctl_shm_alloc_data data;
+	struct tee_shm *shm;
+
+	if (copy_from_user(&data, udata, sizeof(data)))
+		return -EFAULT;
+
+	/* Currently no input flags are supported */
+	if (data.flags)
+		return -EINVAL;
+
+	shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+	if (IS_ERR(shm))
+		return PTR_ERR(shm);
+
+	data.id = shm->id;
+	data.flags = shm->flags;
+	data.size = shm->size;
+
+	if (copy_to_user(udata, &data, sizeof(data)))
+		ret = -EFAULT;
+	else
+		ret = tee_shm_get_fd(shm);
+
+	/*
+	 * When user space closes the file descriptor the shared memory
+	 * should be freed or if tee_shm_get_fd() failed then it will
+	 * be freed immediately.
+	 */
+	tee_shm_put(shm);
+	return ret;
+}
+
+static int
+tee_ioctl_shm_register(struct tee_context *ctx,
+		       struct tee_ioctl_shm_register_data __user *udata)
+{
+	long ret;
+	struct tee_ioctl_shm_register_data data;
+	struct tee_shm *shm;
+
+	if (copy_from_user(&data, udata, sizeof(data)))
+		return -EFAULT;
+
+	/* Currently no input flags are supported */
+	if (data.flags)
+		return -EINVAL;
+
+	shm = tee_shm_register(ctx, data.addr, data.length,
+			       TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
+	if (IS_ERR(shm))
+		return PTR_ERR(shm);
+
+	data.id = shm->id;
+	data.flags = shm->flags;
+	data.length = shm->size;
+
+	if (copy_to_user(udata, &data, sizeof(data)))
+		ret = -EFAULT;
+	else
+		ret = tee_shm_get_fd(shm);
+	/*
+	 * When user space closes the file descriptor the shared memory
+	 * should be freed or if tee_shm_get_fd() failed then it will
+	 * be freed immediately.
+	 */
+	tee_shm_put(shm);
+	return ret;
+}
+
+static int params_from_user(struct tee_context *ctx, struct tee_param *params,
+			    size_t num_params,
+			    struct tee_ioctl_param __user *uparams)
+{
+	size_t n;
+
+	for (n = 0; n < num_params; n++) {
+		struct tee_shm *shm;
+		struct tee_ioctl_param ip;
+
+		if (copy_from_user(&ip, uparams + n, sizeof(ip)))
+			return -EFAULT;
+
+		/* All unused attribute bits has to be zero */
+		if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
+			return -EINVAL;
+
+		params[n].attr = ip.attr;
+		switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+			break;
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+			params[n].u.value.a = ip.a;
+			params[n].u.value.b = ip.b;
+			params[n].u.value.c = ip.c;
+			break;
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+			/*
+			 * If we fail to get a pointer to a shared memory
+			 * object (and increase the ref count) from an
+			 * identifier we return an error. All pointers that
+			 * has been added in params have an increased ref
+			 * count. It's the callers responibility to do
+			 * tee_shm_put() on all resolved pointers.
+			 */
+			shm = tee_shm_get_from_id(ctx, ip.c);
+			if (IS_ERR(shm))
+				return PTR_ERR(shm);
+
+			/*
+			 * Ensure offset + size does not overflow offset
+			 * and does not overflow the size of the referred
+			 * shared memory object.
+			 */
+			if ((ip.a + ip.b) < ip.a ||
+			    (ip.a + ip.b) > shm->size) {
+				tee_shm_put(shm);
+				return -EINVAL;
+			}
+
+			params[n].u.memref.shm_offs = ip.a;
+			params[n].u.memref.size = ip.b;
+			params[n].u.memref.shm = shm;
+			break;
+		default:
+			/* Unknown attribute */
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int params_to_user(struct tee_ioctl_param __user *uparams,
+			  size_t num_params, struct tee_param *params)
+{
+	size_t n;
+
+	for (n = 0; n < num_params; n++) {
+		struct tee_ioctl_param __user *up = uparams + n;
+		struct tee_param *p = params + n;
+
+		switch (p->attr) {
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+			if (put_user(p->u.value.a, &up->a) ||
+			    put_user(p->u.value.b, &up->b) ||
+			    put_user(p->u.value.c, &up->c))
+				return -EFAULT;
+			break;
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+			if (put_user((u64)p->u.memref.size, &up->b))
+				return -EFAULT;
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+static int tee_ioctl_open_session(struct tee_context *ctx,
+				  struct tee_ioctl_buf_data __user *ubuf)
+{
+	int rc;
+	size_t n;
+	struct tee_ioctl_buf_data buf;
+	struct tee_ioctl_open_session_arg __user *uarg;
+	struct tee_ioctl_open_session_arg arg;
+	struct tee_ioctl_param __user *uparams = NULL;
+	struct tee_param *params = NULL;
+	bool have_session = false;
+
+	if (!ctx->teedev->desc->ops->open_session)
+		return -EINVAL;
+
+	if (copy_from_user(&buf, ubuf, sizeof(buf)))
+		return -EFAULT;
+
+	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+	    buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
+		return -EINVAL;
+
+	uarg = u64_to_user_ptr(buf.buf_ptr);
+	if (copy_from_user(&arg, uarg, sizeof(arg)))
+		return -EFAULT;
+
+	if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+		return -EINVAL;
+
+	if (arg.num_params) {
+		params = kcalloc(arg.num_params, sizeof(struct tee_param),
+				 GFP_KERNEL);
+		if (!params)
+			return -ENOMEM;
+		uparams = uarg->params;
+		rc = params_from_user(ctx, params, arg.num_params, uparams);
+		if (rc)
+			goto out;
+	}
+
+	rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
+	if (rc)
+		goto out;
+	have_session = true;
+
+	if (put_user(arg.session, &uarg->session) ||
+	    put_user(arg.ret, &uarg->ret) ||
+	    put_user(arg.ret_origin, &uarg->ret_origin)) {
+		rc = -EFAULT;
+		goto out;
+	}
+	rc = params_to_user(uparams, arg.num_params, params);
+out:
+	/*
+	 * If we've succeeded to open the session but failed to communicate
+	 * it back to user space, close the session again to avoid leakage.
+	 */
+	if (rc && have_session && ctx->teedev->desc->ops->close_session)
+		ctx->teedev->desc->ops->close_session(ctx, arg.session);
+
+	if (params) {
+		/* Decrease ref count for all valid shared memory pointers */
+		for (n = 0; n < arg.num_params; n++)
+			if (tee_param_is_memref(params + n) &&
+			    params[n].u.memref.shm)
+				tee_shm_put(params[n].u.memref.shm);
+		kfree(params);
+	}
+
+	return rc;
+}
+
+static int tee_ioctl_invoke(struct tee_context *ctx,
+			    struct tee_ioctl_buf_data __user *ubuf)
+{
+	int rc;
+	size_t n;
+	struct tee_ioctl_buf_data buf;
+	struct tee_ioctl_invoke_arg __user *uarg;
+	struct tee_ioctl_invoke_arg arg;
+	struct tee_ioctl_param __user *uparams = NULL;
+	struct tee_param *params = NULL;
+
+	if (!ctx->teedev->desc->ops->invoke_func)
+		return -EINVAL;
+
+	if (copy_from_user(&buf, ubuf, sizeof(buf)))
+		return -EFAULT;
+
+	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+	    buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
+		return -EINVAL;
+
+	uarg = u64_to_user_ptr(buf.buf_ptr);
+	if (copy_from_user(&arg, uarg, sizeof(arg)))
+		return -EFAULT;
+
+	if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+		return -EINVAL;
+
+	if (arg.num_params) {
+		params = kcalloc(arg.num_params, sizeof(struct tee_param),
+				 GFP_KERNEL);
+		if (!params)
+			return -ENOMEM;
+		uparams = uarg->params;
+		rc = params_from_user(ctx, params, arg.num_params, uparams);
+		if (rc)
+			goto out;
+	}
+
+	rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
+	if (rc)
+		goto out;
+
+	if (put_user(arg.ret, &uarg->ret) ||
+	    put_user(arg.ret_origin, &uarg->ret_origin)) {
+		rc = -EFAULT;
+		goto out;
+	}
+	rc = params_to_user(uparams, arg.num_params, params);
+out:
+	if (params) {
+		/* Decrease ref count for all valid shared memory pointers */
+		for (n = 0; n < arg.num_params; n++)
+			if (tee_param_is_memref(params + n) &&
+			    params[n].u.memref.shm)
+				tee_shm_put(params[n].u.memref.shm);
+		kfree(params);
+	}
+	return rc;
+}
+
+static int tee_ioctl_cancel(struct tee_context *ctx,
+			    struct tee_ioctl_cancel_arg __user *uarg)
+{
+	struct tee_ioctl_cancel_arg arg;
+
+	if (!ctx->teedev->desc->ops->cancel_req)
+		return -EINVAL;
+
+	if (copy_from_user(&arg, uarg, sizeof(arg)))
+		return -EFAULT;
+
+	return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
+						  arg.session);
+}
+
+static int
+tee_ioctl_close_session(struct tee_context *ctx,
+			struct tee_ioctl_close_session_arg __user *uarg)
+{
+	struct tee_ioctl_close_session_arg arg;
+
+	if (!ctx->teedev->desc->ops->close_session)
+		return -EINVAL;
+
+	if (copy_from_user(&arg, uarg, sizeof(arg)))
+		return -EFAULT;
+
+	return ctx->teedev->desc->ops->close_session(ctx, arg.session);
+}
+
+static int params_to_supp(struct tee_context *ctx,
+			  struct tee_ioctl_param __user *uparams,
+			  size_t num_params, struct tee_param *params)
+{
+	size_t n;
+
+	for (n = 0; n < num_params; n++) {
+		struct tee_ioctl_param ip;
+		struct tee_param *p = params + n;
+
+		ip.attr = p->attr;
+		switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+			ip.a = p->u.value.a;
+			ip.b = p->u.value.b;
+			ip.c = p->u.value.c;
+			break;
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+			ip.b = p->u.memref.size;
+			if (!p->u.memref.shm) {
+				ip.a = 0;
+				ip.c = (u64)-1; /* invalid shm id */
+				break;
+			}
+			ip.a = p->u.memref.shm_offs;
+			ip.c = p->u.memref.shm->id;
+			break;
+		default:
+			ip.a = 0;
+			ip.b = 0;
+			ip.c = 0;
+			break;
+		}
+
+		if (copy_to_user(uparams + n, &ip, sizeof(ip)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int tee_ioctl_supp_recv(struct tee_context *ctx,
+			       struct tee_ioctl_buf_data __user *ubuf)
+{
+	int rc;
+	struct tee_ioctl_buf_data buf;
+	struct tee_iocl_supp_recv_arg __user *uarg;
+	struct tee_param *params;
+	u32 num_params;
+	u32 func;
+
+	if (!ctx->teedev->desc->ops->supp_recv)
+		return -EINVAL;
+
+	if (copy_from_user(&buf, ubuf, sizeof(buf)))
+		return -EFAULT;
+
+	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+	    buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
+		return -EINVAL;
+
+	uarg = u64_to_user_ptr(buf.buf_ptr);
+	if (get_user(num_params, &uarg->num_params))
+		return -EFAULT;
+
+	if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
+		return -EINVAL;
+
+	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
+	if (!params)
+		return -ENOMEM;
+
+	rc = params_from_user(ctx, params, num_params, uarg->params);
+	if (rc)
+		goto out;
+
+	rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
+	if (rc)
+		goto out;
+
+	if (put_user(func, &uarg->func) ||
+	    put_user(num_params, &uarg->num_params)) {
+		rc = -EFAULT;
+		goto out;
+	}
+
+	rc = params_to_supp(ctx, uarg->params, num_params, params);
+out:
+	kfree(params);
+	return rc;
+}
+
+static int params_from_supp(struct tee_param *params, size_t num_params,
+			    struct tee_ioctl_param __user *uparams)
+{
+	size_t n;
+
+	for (n = 0; n < num_params; n++) {
+		struct tee_param *p = params + n;
+		struct tee_ioctl_param ip;
+
+		if (copy_from_user(&ip, uparams + n, sizeof(ip)))
+			return -EFAULT;
+
+		/* All unused attribute bits has to be zero */
+		if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
+			return -EINVAL;
+
+		p->attr = ip.attr;
+		switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+			/* Only out and in/out values can be updated */
+			p->u.value.a = ip.a;
+			p->u.value.b = ip.b;
+			p->u.value.c = ip.c;
+			break;
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+			/*
+			 * Only the size of the memref can be updated.
+			 * Since we don't have access to the original
+			 * parameters here, only store the supplied size.
+			 * The driver will copy the updated size into the
+			 * original parameters.
+			 */
+			p->u.memref.shm = NULL;
+			p->u.memref.shm_offs = 0;
+			p->u.memref.size = ip.b;
+			break;
+		default:
+			memset(&p->u, 0, sizeof(p->u));
+			break;
+		}
+	}
+	return 0;
+}
+
+static int tee_ioctl_supp_send(struct tee_context *ctx,
+			       struct tee_ioctl_buf_data __user *ubuf)
+{
+	long rc;
+	struct tee_ioctl_buf_data buf;
+	struct tee_iocl_supp_send_arg __user *uarg;
+	struct tee_param *params;
+	u32 num_params;
+	u32 ret;
+
+	/* Not valid for this driver */
+	if (!ctx->teedev->desc->ops->supp_send)
+		return -EINVAL;
+
+	if (copy_from_user(&buf, ubuf, sizeof(buf)))
+		return -EFAULT;
+
+	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+	    buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
+		return -EINVAL;
+
+	uarg = u64_to_user_ptr(buf.buf_ptr);
+	if (get_user(ret, &uarg->ret) ||
+	    get_user(num_params, &uarg->num_params))
+		return -EFAULT;
+
+	if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
+		return -EINVAL;
+
+	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
+	if (!params)
+		return -ENOMEM;
+
+	rc = params_from_supp(params, num_params, uarg->params);
+	if (rc)
+		goto out;
+
+	rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
+out:
+	kfree(params);
+	return rc;
+}
+
+static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct tee_context *ctx = filp->private_data;
+	void __user *uarg = (void __user *)arg;
+
+	switch (cmd) {
+	case TEE_IOC_VERSION:
+		return tee_ioctl_version(ctx, uarg);
+	case TEE_IOC_SHM_ALLOC:
+		return tee_ioctl_shm_alloc(ctx, uarg);
+	case TEE_IOC_SHM_REGISTER:
+		return tee_ioctl_shm_register(ctx, uarg);
+	case TEE_IOC_OPEN_SESSION:
+		return tee_ioctl_open_session(ctx, uarg);
+	case TEE_IOC_INVOKE:
+		return tee_ioctl_invoke(ctx, uarg);
+	case TEE_IOC_CANCEL:
+		return tee_ioctl_cancel(ctx, uarg);
+	case TEE_IOC_CLOSE_SESSION:
+		return tee_ioctl_close_session(ctx, uarg);
+	case TEE_IOC_SUPPL_RECV:
+		return tee_ioctl_supp_recv(ctx, uarg);
+	case TEE_IOC_SUPPL_SEND:
+		return tee_ioctl_supp_send(ctx, uarg);
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct file_operations tee_fops = {
+	.owner = THIS_MODULE,
+	.open = tee_open,
+	.release = tee_release,
+	.unlocked_ioctl = tee_ioctl,
+	.compat_ioctl = tee_ioctl,
+};
+
+static void tee_release_device(struct device *dev)
+{
+	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
+
+	spin_lock(&driver_lock);
+	clear_bit(teedev->id, dev_mask);
+	spin_unlock(&driver_lock);
+	mutex_destroy(&teedev->mutex);
+	idr_destroy(&teedev->idr);
+	kfree(teedev);
+}
+
+/**
+ * tee_device_alloc() - Allocate a new struct tee_device instance
+ * @teedesc:	Descriptor for this driver
+ * @dev:	Parent device for this device
+ * @pool:	Shared memory pool, NULL if not used
+ * @driver_data: Private driver data for this device
+ *
+ * Allocates a new struct tee_device instance. The device is
+ * removed by tee_device_unregister().
+ *
+ * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
+ */
+struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
+				    struct device *dev,
+				    struct tee_shm_pool *pool,
+				    void *driver_data)
+{
+	struct tee_device *teedev;
+	void *ret;
+	int rc, max_id;
+	int offs = 0;
+
+	if (!teedesc || !teedesc->name || !teedesc->ops ||
+	    !teedesc->ops->get_version || !teedesc->ops->open ||
+	    !teedesc->ops->release || !pool)
+		return ERR_PTR(-EINVAL);
+
+	teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
+	if (!teedev) {
+		ret = ERR_PTR(-ENOMEM);
+		goto err;
+	}
+
+	max_id = TEE_NUM_DEVICES / 2;
+
+	if (teedesc->flags & TEE_DESC_PRIVILEGED) {
+		offs = TEE_NUM_DEVICES / 2;
+		max_id = TEE_NUM_DEVICES;
+	}
+
+	spin_lock(&driver_lock);
+	teedev->id = find_next_zero_bit(dev_mask, max_id, offs);
+	if (teedev->id < max_id)
+		set_bit(teedev->id, dev_mask);
+	spin_unlock(&driver_lock);
+
+	if (teedev->id >= max_id) {
+		ret = ERR_PTR(-ENOMEM);
+		goto err;
+	}
+
+	snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
+		 teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
+		 teedev->id - offs);
+
+	teedev->dev.class = tee_class;
+	teedev->dev.release = tee_release_device;
+	teedev->dev.parent = dev;
+
+	teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
+
+	rc = dev_set_name(&teedev->dev, "%s", teedev->name);
+	if (rc) {
+		ret = ERR_PTR(rc);
+		goto err_devt;
+	}
+
+	cdev_init(&teedev->cdev, &tee_fops);
+	teedev->cdev.owner = teedesc->owner;
+	teedev->cdev.kobj.parent = &teedev->dev.kobj;
+
+	dev_set_drvdata(&teedev->dev, driver_data);
+	device_initialize(&teedev->dev);
+
+	/* 1 as tee_device_unregister() does one final tee_device_put() */
+	teedev->num_users = 1;
+	init_completion(&teedev->c_no_users);
+	mutex_init(&teedev->mutex);
+	idr_init(&teedev->idr);
+
+	teedev->desc = teedesc;
+	teedev->pool = pool;
+
+	return teedev;
+err_devt:
+	unregister_chrdev_region(teedev->dev.devt, 1);
+err:
+	pr_err("could not register %s driver\n",
+	       teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
+	if (teedev && teedev->id < TEE_NUM_DEVICES) {
+		spin_lock(&driver_lock);
+		clear_bit(teedev->id, dev_mask);
+		spin_unlock(&driver_lock);
+	}
+	kfree(teedev);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tee_device_alloc);
+
+static ssize_t implementation_id_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
+	struct tee_ioctl_version_data vers;
+
+	teedev->desc->ops->get_version(teedev, &vers);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
+}
+static DEVICE_ATTR_RO(implementation_id);
+
+static struct attribute *tee_dev_attrs[] = {
+	&dev_attr_implementation_id.attr,
+	NULL
+};
+
+static const struct attribute_group tee_dev_group = {
+	.attrs = tee_dev_attrs,
+};
+
+/**
+ * tee_device_register() - Registers a TEE device
+ * @teedev:	Device to register
+ *
+ * tee_device_unregister() need to be called to remove the @teedev if
+ * this function fails.
+ *
+ * @returns < 0 on failure
+ */
+int tee_device_register(struct tee_device *teedev)
+{
+	int rc;
+
+	if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
+		dev_err(&teedev->dev, "attempt to register twice\n");
+		return -EINVAL;
+	}
+
+	rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1);
+	if (rc) {
+		dev_err(&teedev->dev,
+			"unable to cdev_add() %s, major %d, minor %d, err=%d\n",
+			teedev->name, MAJOR(teedev->dev.devt),
+			MINOR(teedev->dev.devt), rc);
+		return rc;
+	}
+
+	rc = device_add(&teedev->dev);
+	if (rc) {
+		dev_err(&teedev->dev,
+			"unable to device_add() %s, major %d, minor %d, err=%d\n",
+			teedev->name, MAJOR(teedev->dev.devt),
+			MINOR(teedev->dev.devt), rc);
+		goto err_device_add;
+	}
+
+	rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group);
+	if (rc) {
+		dev_err(&teedev->dev,
+			"failed to create sysfs attributes, err=%d\n", rc);
+		goto err_sysfs_create_group;
+	}
+
+	teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
+	return 0;
+
+err_sysfs_create_group:
+	device_del(&teedev->dev);
+err_device_add:
+	cdev_del(&teedev->cdev);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tee_device_register);
+
+void tee_device_put(struct tee_device *teedev)
+{
+	mutex_lock(&teedev->mutex);
+	/* Shouldn't put in this state */
+	if (!WARN_ON(!teedev->desc)) {
+		teedev->num_users--;
+		if (!teedev->num_users) {
+			teedev->desc = NULL;
+			complete(&teedev->c_no_users);
+		}
+	}
+	mutex_unlock(&teedev->mutex);
+}
+
+bool tee_device_get(struct tee_device *teedev)
+{
+	mutex_lock(&teedev->mutex);
+	if (!teedev->desc) {
+		mutex_unlock(&teedev->mutex);
+		return false;
+	}
+	teedev->num_users++;
+	mutex_unlock(&teedev->mutex);
+	return true;
+}
+
+/**
+ * tee_device_unregister() - Removes a TEE device
+ * @teedev:	Device to unregister
+ *
+ * This function should be called to remove the @teedev even if
+ * tee_device_register() hasn't been called yet. Does nothing if
+ * @teedev is NULL.
+ */
+void tee_device_unregister(struct tee_device *teedev)
+{
+	if (!teedev)
+		return;
+
+	if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
+		sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group);
+		cdev_del(&teedev->cdev);
+		device_del(&teedev->dev);
+	}
+
+	tee_device_put(teedev);
+	wait_for_completion(&teedev->c_no_users);
+
+	/*
+	 * No need to take a mutex any longer now since teedev->desc was
+	 * set to NULL before teedev->c_no_users was completed.
+	 */
+
+	teedev->pool = NULL;
+
+	put_device(&teedev->dev);
+}
+EXPORT_SYMBOL_GPL(tee_device_unregister);
+
+/**
+ * tee_get_drvdata() - Return driver_data pointer
+ * @teedev:	Device containing the driver_data pointer
+ * @returns the driver_data pointer supplied to tee_register().
+ */
+void *tee_get_drvdata(struct tee_device *teedev)
+{
+	return dev_get_drvdata(&teedev->dev);
+}
+EXPORT_SYMBOL_GPL(tee_get_drvdata);
+
+static int __init tee_init(void)
+{
+	int rc;
+
+	tee_class = class_create(THIS_MODULE, "tee");
+	if (IS_ERR(tee_class)) {
+		pr_err("couldn't create class\n");
+		return PTR_ERR(tee_class);
+	}
+
+	rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
+	if (rc) {
+		pr_err("failed to allocate char dev region\n");
+		class_destroy(tee_class);
+		tee_class = NULL;
+	}
+
+	return rc;
+}
+
+static void __exit tee_exit(void)
+{
+	class_destroy(tee_class);
+	tee_class = NULL;
+	unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
+}
+
+subsys_initcall(tee_init);
+module_exit(tee_exit);
+
+MODULE_AUTHOR("Linaro");
+MODULE_DESCRIPTION("TEE Driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.19/drivers/tee/tee_private.h b/src/kernel/linux/v4.19/drivers/tee/tee_private.h
new file mode 100644
index 0000000..85d99d6
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tee_private.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef TEE_PRIVATE_H
+#define TEE_PRIVATE_H
+
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/**
+ * struct tee_shm_pool - shared memory pool
+ * @private_mgr:	pool manager for shared memory only between kernel
+ *			and secure world
+ * @dma_buf_mgr:	pool manager for shared memory exported to user space
+ */
+struct tee_shm_pool {
+	struct tee_shm_pool_mgr *private_mgr;
+	struct tee_shm_pool_mgr *dma_buf_mgr;
+};
+
+#define TEE_DEVICE_FLAG_REGISTERED	0x1
+#define TEE_MAX_DEV_NAME_LEN		32
+
+/**
+ * struct tee_device - TEE Device representation
+ * @name:	name of device
+ * @desc:	description of device
+ * @id:		unique id of device
+ * @flags:	represented by TEE_DEVICE_FLAG_REGISTERED above
+ * @dev:	embedded basic device structure
+ * @cdev:	embedded cdev
+ * @num_users:	number of active users of this device
+ * @c_no_user:	completion used when unregistering the device
+ * @mutex:	mutex protecting @num_users and @idr
+ * @idr:	register of shared memory object allocated on this device
+ * @pool:	shared memory pool
+ */
+struct tee_device {
+	char name[TEE_MAX_DEV_NAME_LEN];
+	const struct tee_desc *desc;
+	int id;
+	unsigned int flags;
+
+	struct device dev;
+	struct cdev cdev;
+
+	size_t num_users;
+	struct completion c_no_users;
+	struct mutex mutex;	/* protects num_users and idr */
+
+	struct idr idr;
+	struct tee_shm_pool *pool;
+};
+
+int tee_shm_init(void);
+
+int tee_shm_get_fd(struct tee_shm *shm);
+
+bool tee_device_get(struct tee_device *teedev);
+void tee_device_put(struct tee_device *teedev);
+
+void teedev_ctx_get(struct tee_context *ctx);
+void teedev_ctx_put(struct tee_context *ctx);
+
+#endif /*TEE_PRIVATE_H*/
diff --git a/src/kernel/linux/v4.19/drivers/tee/tee_shm.c b/src/kernel/linux/v4.19/drivers/tee/tee_shm.c
new file mode 100644
index 0000000..8e7b52a
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tee_shm.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/fdtable.h>
+#include <linux/idr.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "tee_private.h"
+
+static void tee_shm_release(struct tee_shm *shm)
+{
+	struct tee_device *teedev = shm->teedev;
+
+	mutex_lock(&teedev->mutex);
+	idr_remove(&teedev->idr, shm->id);
+	if (shm->ctx)
+		list_del(&shm->link);
+	mutex_unlock(&teedev->mutex);
+
+	if (shm->flags & TEE_SHM_POOL) {
+		struct tee_shm_pool_mgr *poolm;
+
+		if (shm->flags & TEE_SHM_DMA_BUF)
+			poolm = teedev->pool->dma_buf_mgr;
+		else
+			poolm = teedev->pool->private_mgr;
+
+		poolm->ops->free(poolm, shm);
+	} else if (shm->flags & TEE_SHM_REGISTER) {
+		size_t n;
+		int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
+
+		if (rc)
+			dev_err(teedev->dev.parent,
+				"unregister shm %p failed: %d", shm, rc);
+
+		for (n = 0; n < shm->num_pages; n++)
+			put_page(shm->pages[n]);
+
+		kfree(shm->pages);
+	}
+
+	if (shm->ctx)
+		teedev_ctx_put(shm->ctx);
+
+	kfree(shm);
+
+	tee_device_put(teedev);
+}
+
+static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
+			*attach, enum dma_data_direction dir)
+{
+	return NULL;
+}
+
+static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
+				     struct sg_table *table,
+				     enum dma_data_direction dir)
+{
+}
+
+static void tee_shm_op_release(struct dma_buf *dmabuf)
+{
+	struct tee_shm *shm = dmabuf->priv;
+
+	tee_shm_release(shm);
+}
+
+static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
+{
+	return NULL;
+}
+
+static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+	struct tee_shm *shm = dmabuf->priv;
+	size_t size = vma->vm_end - vma->vm_start;
+
+	/* Refuse sharing shared memory provided by application */
+	if (shm->flags & TEE_SHM_REGISTER)
+		return -EINVAL;
+
+	return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
+			       size, vma->vm_page_prot);
+}
+
+static const struct dma_buf_ops tee_shm_dma_buf_ops = {
+	.map_dma_buf = tee_shm_op_map_dma_buf,
+	.unmap_dma_buf = tee_shm_op_unmap_dma_buf,
+	.release = tee_shm_op_release,
+	.map = tee_shm_op_map,
+	.mmap = tee_shm_op_mmap,
+};
+
+static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
+				       struct tee_device *teedev,
+				       size_t size, u32 flags)
+{
+	struct tee_shm_pool_mgr *poolm = NULL;
+	struct tee_shm *shm;
+	void *ret;
+	int rc;
+
+	if (ctx && ctx->teedev != teedev) {
+		dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!(flags & TEE_SHM_MAPPED)) {
+		dev_err(teedev->dev.parent,
+			"only mapped allocations supported\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
+		dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!tee_device_get(teedev))
+		return ERR_PTR(-EINVAL);
+
+	if (!teedev->pool) {
+		/* teedev has been detached from driver */
+		ret = ERR_PTR(-EINVAL);
+		goto err_dev_put;
+	}
+
+	shm = kzalloc(sizeof(*shm), GFP_KERNEL);
+	if (!shm) {
+		ret = ERR_PTR(-ENOMEM);
+		goto err_dev_put;
+	}
+
+	shm->flags = flags | TEE_SHM_POOL;
+	shm->teedev = teedev;
+	shm->ctx = ctx;
+	if (flags & TEE_SHM_DMA_BUF)
+		poolm = teedev->pool->dma_buf_mgr;
+	else
+		poolm = teedev->pool->private_mgr;
+
+	rc = poolm->ops->alloc(poolm, shm, size);
+	if (rc) {
+		ret = ERR_PTR(rc);
+		goto err_kfree;
+	}
+
+	mutex_lock(&teedev->mutex);
+	shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
+	mutex_unlock(&teedev->mutex);
+	if (shm->id < 0) {
+		ret = ERR_PTR(shm->id);
+		goto err_pool_free;
+	}
+
+	if (flags & TEE_SHM_DMA_BUF) {
+		DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+		exp_info.ops = &tee_shm_dma_buf_ops;
+		exp_info.size = shm->size;
+		exp_info.flags = O_RDWR;
+		exp_info.priv = shm;
+
+		shm->dmabuf = dma_buf_export(&exp_info);
+		if (IS_ERR(shm->dmabuf)) {
+			ret = ERR_CAST(shm->dmabuf);
+			goto err_rem;
+		}
+	}
+
+	if (ctx) {
+		teedev_ctx_get(ctx);
+		mutex_lock(&teedev->mutex);
+		list_add_tail(&shm->link, &ctx->list_shm);
+		mutex_unlock(&teedev->mutex);
+	}
+
+	return shm;
+err_rem:
+	mutex_lock(&teedev->mutex);
+	idr_remove(&teedev->idr, shm->id);
+	mutex_unlock(&teedev->mutex);
+err_pool_free:
+	poolm->ops->free(poolm, shm);
+err_kfree:
+	kfree(shm);
+err_dev_put:
+	tee_device_put(teedev);
+	return ret;
+}
+
+/**
+ * tee_shm_alloc() - Allocate shared memory
+ * @ctx:	Context that allocates the shared memory
+ * @size:	Requested size of shared memory
+ * @flags:	Flags setting properties for the requested shared memory.
+ *
+ * Memory allocated as global shared memory is automatically freed when the
+ * TEE file pointer is closed. The @flags field uses the bits defined by
+ * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
+ * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
+ * associated with a dma-buf handle, else driver private memory.
+ */
+struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
+{
+	return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc);
+
+struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
+{
+	return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
+}
+EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
+
+struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
+				 size_t length, u32 flags)
+{
+	struct tee_device *teedev = ctx->teedev;
+	const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
+	struct tee_shm *shm;
+	void *ret;
+	int rc;
+	int num_pages;
+	unsigned long start;
+
+	if (flags != req_flags)
+		return ERR_PTR(-ENOTSUPP);
+
+	if (!tee_device_get(teedev))
+		return ERR_PTR(-EINVAL);
+
+	if (!teedev->desc->ops->shm_register ||
+	    !teedev->desc->ops->shm_unregister) {
+		tee_device_put(teedev);
+		return ERR_PTR(-ENOTSUPP);
+	}
+
+	teedev_ctx_get(ctx);
+
+	shm = kzalloc(sizeof(*shm), GFP_KERNEL);
+	if (!shm) {
+		ret = ERR_PTR(-ENOMEM);
+		goto err;
+	}
+
+	shm->flags = flags | TEE_SHM_REGISTER;
+	shm->teedev = teedev;
+	shm->ctx = ctx;
+	shm->id = -1;
+	addr = untagged_addr(addr);
+	start = rounddown(addr, PAGE_SIZE);
+	shm->offset = addr - start;
+	shm->size = length;
+	num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
+	shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
+	if (!shm->pages) {
+		ret = ERR_PTR(-ENOMEM);
+		goto err;
+	}
+
+	rc = get_user_pages_fast(start, num_pages, 1, shm->pages);
+	if (rc > 0)
+		shm->num_pages = rc;
+	if (rc != num_pages) {
+		if (rc >= 0)
+			rc = -ENOMEM;
+		ret = ERR_PTR(rc);
+		goto err;
+	}
+
+	mutex_lock(&teedev->mutex);
+	shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
+	mutex_unlock(&teedev->mutex);
+
+	if (shm->id < 0) {
+		ret = ERR_PTR(shm->id);
+		goto err;
+	}
+
+	rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
+					     shm->num_pages, start);
+	if (rc) {
+		ret = ERR_PTR(rc);
+		goto err;
+	}
+
+	if (flags & TEE_SHM_DMA_BUF) {
+		DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+		exp_info.ops = &tee_shm_dma_buf_ops;
+		exp_info.size = shm->size;
+		exp_info.flags = O_RDWR;
+		exp_info.priv = shm;
+
+		shm->dmabuf = dma_buf_export(&exp_info);
+		if (IS_ERR(shm->dmabuf)) {
+			ret = ERR_CAST(shm->dmabuf);
+			teedev->desc->ops->shm_unregister(ctx, shm);
+			goto err;
+		}
+	}
+
+	mutex_lock(&teedev->mutex);
+	list_add_tail(&shm->link, &ctx->list_shm);
+	mutex_unlock(&teedev->mutex);
+
+	return shm;
+err:
+	if (shm) {
+		size_t n;
+
+		if (shm->id >= 0) {
+			mutex_lock(&teedev->mutex);
+			idr_remove(&teedev->idr, shm->id);
+			mutex_unlock(&teedev->mutex);
+		}
+		if (shm->pages) {
+			for (n = 0; n < shm->num_pages; n++)
+				put_page(shm->pages[n]);
+			kfree(shm->pages);
+		}
+	}
+	kfree(shm);
+	teedev_ctx_put(ctx);
+	tee_device_put(teedev);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tee_shm_register);
+
+/**
+ * tee_shm_get_fd() - Increase reference count and return file descriptor
+ * @shm:	Shared memory handle
+ * @returns user space file descriptor to shared memory
+ */
+int tee_shm_get_fd(struct tee_shm *shm)
+{
+	int fd;
+
+	if (!(shm->flags & TEE_SHM_DMA_BUF))
+		return -EINVAL;
+
+	get_dma_buf(shm->dmabuf);
+	fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
+	if (fd < 0)
+		dma_buf_put(shm->dmabuf);
+	return fd;
+}
+
+/**
+ * tee_shm_free() - Free shared memory
+ * @shm:	Handle to shared memory to free
+ */
+void tee_shm_free(struct tee_shm *shm)
+{
+	/*
+	 * dma_buf_put() decreases the dmabuf reference counter and will
+	 * call tee_shm_release() when the last reference is gone.
+	 *
+	 * In the case of driver private memory we call tee_shm_release
+	 * directly instead as it doesn't have a reference counter.
+	 */
+	if (shm->flags & TEE_SHM_DMA_BUF)
+		dma_buf_put(shm->dmabuf);
+	else
+		tee_shm_release(shm);
+}
+EXPORT_SYMBOL_GPL(tee_shm_free);
+
+/**
+ * tee_shm_va2pa() - Get physical address of a virtual address
+ * @shm:	Shared memory handle
+ * @va:		Virtual address to tranlsate
+ * @pa:		Returned physical address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
+{
+	if (!(shm->flags & TEE_SHM_MAPPED))
+		return -EINVAL;
+	/* Check that we're in the range of the shm */
+	if ((char *)va < (char *)shm->kaddr)
+		return -EINVAL;
+	if ((char *)va >= ((char *)shm->kaddr + shm->size))
+		return -EINVAL;
+
+	return tee_shm_get_pa(
+			shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
+}
+EXPORT_SYMBOL_GPL(tee_shm_va2pa);
+
+/**
+ * tee_shm_pa2va() - Get virtual address of a physical address
+ * @shm:	Shared memory handle
+ * @pa:		Physical address to tranlsate
+ * @va:		Returned virtual address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
+{
+	if (!(shm->flags & TEE_SHM_MAPPED))
+		return -EINVAL;
+	/* Check that we're in the range of the shm */
+	if (pa < shm->paddr)
+		return -EINVAL;
+	if (pa >= (shm->paddr + shm->size))
+		return -EINVAL;
+
+	if (va) {
+		void *v = tee_shm_get_va(shm, pa - shm->paddr);
+
+		if (IS_ERR(v))
+			return PTR_ERR(v);
+		*va = v;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tee_shm_pa2va);
+
+/**
+ * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
+ * @shm:	Shared memory handle
+ * @offs:	Offset from start of this shared memory
+ * @returns virtual address of the shared memory + offs if offs is within
+ *	the bounds of this shared memory, else an ERR_PTR
+ */
+void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
+{
+	if (!(shm->flags & TEE_SHM_MAPPED))
+		return ERR_PTR(-EINVAL);
+	if (offs >= shm->size)
+		return ERR_PTR(-EINVAL);
+	return (char *)shm->kaddr + offs;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_va);
+
+/**
+ * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
+ * @shm:	Shared memory handle
+ * @offs:	Offset from start of this shared memory
+ * @pa:		Physical address to return
+ * @returns 0 if offs is within the bounds of this shared memory, else an
+ *	error code.
+ */
+int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
+{
+	if (offs >= shm->size)
+		return -EINVAL;
+	if (pa)
+		*pa = shm->paddr + offs;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_pa);
+
+/**
+ * tee_shm_get_from_id() - Find shared memory object and increase reference
+ * count
+ * @ctx:	Context owning the shared memory
+ * @id:		Id of shared memory object
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
+{
+	struct tee_device *teedev;
+	struct tee_shm *shm;
+
+	if (!ctx)
+		return ERR_PTR(-EINVAL);
+
+	teedev = ctx->teedev;
+	mutex_lock(&teedev->mutex);
+	shm = idr_find(&teedev->idr, id);
+	if (!shm || shm->ctx != ctx)
+		shm = ERR_PTR(-EINVAL);
+	else if (shm->flags & TEE_SHM_DMA_BUF)
+		get_dma_buf(shm->dmabuf);
+	mutex_unlock(&teedev->mutex);
+	return shm;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
+
+/**
+ * tee_shm_put() - Decrease reference count on a shared memory handle
+ * @shm:	Shared memory handle
+ */
+void tee_shm_put(struct tee_shm *shm)
+{
+	if (shm->flags & TEE_SHM_DMA_BUF)
+		dma_buf_put(shm->dmabuf);
+}
+EXPORT_SYMBOL_GPL(tee_shm_put);
diff --git a/src/kernel/linux/v4.19/drivers/tee/tee_shm_pool.c b/src/kernel/linux/v4.19/drivers/tee/tee_shm_pool.c
new file mode 100644
index 0000000..e6d4b9e
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tee_shm_pool.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "tee_private.h"
+
+static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
+			     struct tee_shm *shm, size_t size)
+{
+	unsigned long va;
+	struct gen_pool *genpool = poolm->private_data;
+	size_t s = roundup(size, 1 << genpool->min_alloc_order);
+
+	va = gen_pool_alloc(genpool, s);
+	if (!va)
+		return -ENOMEM;
+
+	memset((void *)va, 0, s);
+	shm->kaddr = (void *)va;
+	shm->paddr = gen_pool_virt_to_phys(genpool, va);
+	shm->size = s;
+	return 0;
+}
+
+static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
+			     struct tee_shm *shm)
+{
+	gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr,
+		      shm->size);
+	shm->kaddr = NULL;
+}
+
+static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+	gen_pool_destroy(poolm->private_data);
+	kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
+	.alloc = pool_op_gen_alloc,
+	.free = pool_op_gen_free,
+	.destroy_poolmgr = pool_op_gen_destroy_poolmgr,
+};
+
+/**
+ * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
+ * memory range
+ * @priv_info:	Information for driver private shared memory pool
+ * @dmabuf_info: Information for dma-buf shared memory pool
+ *
+ * Start and end of pools will must be page aligned.
+ *
+ * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
+ * in @dmabuf, others will use the range provided by @priv.
+ *
+ * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
+ */
+struct tee_shm_pool *
+tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
+			   struct tee_shm_pool_mem_info *dmabuf_info)
+{
+	struct tee_shm_pool_mgr *priv_mgr;
+	struct tee_shm_pool_mgr *dmabuf_mgr;
+	void *rc;
+
+	/*
+	 * Create the pool for driver private shared memory
+	 */
+	rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
+					    priv_info->size,
+					    3 /* 8 byte aligned */);
+	if (IS_ERR(rc))
+		return rc;
+	priv_mgr = rc;
+
+	/*
+	 * Create the pool for dma_buf shared memory
+	 */
+	rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
+					    dmabuf_info->paddr,
+					    dmabuf_info->size, PAGE_SHIFT);
+	if (IS_ERR(rc))
+		goto err_free_priv_mgr;
+	dmabuf_mgr = rc;
+
+	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+	if (IS_ERR(rc))
+		goto err_free_dmabuf_mgr;
+
+	return rc;
+
+err_free_dmabuf_mgr:
+	tee_shm_pool_mgr_destroy(dmabuf_mgr);
+err_free_priv_mgr:
+	tee_shm_pool_mgr_destroy(priv_mgr);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
+
+struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
+							phys_addr_t paddr,
+							size_t size,
+							int min_alloc_order)
+{
+	const size_t page_mask = PAGE_SIZE - 1;
+	struct tee_shm_pool_mgr *mgr;
+	int rc;
+
+	/* Start and end must be page aligned */
+	if (vaddr & page_mask || paddr & page_mask || size & page_mask)
+		return ERR_PTR(-EINVAL);
+
+	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+	if (!mgr)
+		return ERR_PTR(-ENOMEM);
+
+	mgr->private_data = gen_pool_create(min_alloc_order, -1);
+	if (!mgr->private_data) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
+	rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
+	if (rc) {
+		gen_pool_destroy(mgr->private_data);
+		goto err;
+	}
+
+	mgr->ops = &pool_ops_generic;
+
+	return mgr;
+err:
+	kfree(mgr);
+
+	return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
+
+static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
+{
+	return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
+		mgr->ops->destroy_poolmgr;
+}
+
+struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
+					struct tee_shm_pool_mgr *dmabuf_mgr)
+{
+	struct tee_shm_pool *pool;
+
+	if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
+		return ERR_PTR(-EINVAL);
+
+	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+	if (!pool)
+		return ERR_PTR(-ENOMEM);
+
+	pool->private_mgr = priv_mgr;
+	pool->dma_buf_mgr = dmabuf_mgr;
+
+	return pool;
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
+
+/**
+ * tee_shm_pool_free() - Free a shared memory pool
+ * @pool:	The shared memory pool to free
+ *
+ * There must be no remaining shared memory allocated from this pool when
+ * this function is called.
+ */
+void tee_shm_pool_free(struct tee_shm_pool *pool)
+{
+	if (pool->private_mgr)
+		tee_shm_pool_mgr_destroy(pool->private_mgr);
+	if (pool->dma_buf_mgr)
+		tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
+	kfree(pool);
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_free);
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/Kconfig b/src/kernel/linux/v4.19/drivers/tee/tkcore/Kconfig
new file mode 100644
index 0000000..bc64008
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015-2019 TrustKernel Incorporated
+
+config TRUSTKERNEL_TEE_SUPPORT
+	bool "TrustKernel Trusted Execution Environment Support"
+	default n
+	help
+	  Enable the support of TrustKernel TEE drivers.
+	  All Trustkernel related functions will depend on this option.
+	  Say Y if you would like yo enable this feature.
+	  If unsure, say N.
+
+config TRUSTKERNEL_TEE_FP_SUPPORT
+	bool "TrustKernel Trusted Execution Environment Finger Print Support"
+	depends on TRUSTKERNEL_TEE_SUPPORT
+	default n
+	help
+	  Enable TrustKernel TEE clock management mechanism.
+	  This feature allows TEE OS to manage clock already
+	  used by Linux.
+	  Say Y if you would like yo enable this feature.
+	  If unsure, say Y.
+
+config TRUSTKERNEL_TEE_RPMB_SUPPORT
+	bool "TrustKernel Trusted Execution Environment RPMB Support"
+	depends on TRUSTKERNEL_TEE_SUPPORT
+	default n
+	help
+	  Enable the access of Replay Protected Memory Block
+	  region for Trustkernel TEE.
+	  Say Y if you would like yo enable this feature.
+	  If unsure, say Y.
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/Makefile b/src/kernel/linux/v4.19/drivers/tee/tkcore/Makefile
new file mode 100644
index 0000000..6e645cf
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015-2019 TrustKernel Incorporated
+
+obj-$(CONFIG_TRUSTKERNEL_TEE_SUPPORT) += core/
+obj-$(CONFIG_TRUSTKERNEL_TEE_SUPPORT) += armtz/
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/Makefile b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/Makefile
new file mode 100644
index 0000000..d3cb173
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015-2019 TrustKernel Incorporated
+
+ifneq ($(strip $(M)),)
+	obj-m += tkcore_drv.o
+else
+	obj-y += tkcore_drv.o
+endif
+
+M ?= $(srctree)/drivers/tee/tkcore
+
+ccflags-y += -I$(M)/include/arm_common
+ccflags-y += -I$(M)/include/linux
+ccflags-y += -I$(M)/include
+ccflags-y += -I$(M)/core
+
+ifeq ($(CONFIG_TRUSTKERNEL_TEE_RPMB_SUPPORT),y)
+ccflags-y += -DRPMB_SUPPORT
+endif
+
+tkcore_drv-objs:= \
+		tee_tz_drv.o \
+		tee_smc_xfer.o \
+		tee_mem.o \
+		handle.o
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/handle.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/handle.c
new file mode 100644
index 0000000..2c029dc
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/handle.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "handle.h"
+
+/*
+ * Define the initial capacity of the database. It should be a low number
+ * multiple of 2 since some databases a likely to only use a few handles.
+ * Since the algorithm is to doubles up when growing it shouldn't cause a
+ * noticeable overhead on large databases.
+ */
+#define HANDLE_DB_INITIAL_MAX_PTRS	4
+
+void handle_db_destroy(struct handle_db *db)
+{
+	if (db) {
+		kfree(db->ptrs);
+		db->ptrs = NULL;
+		db->max_ptrs = 0;
+	}
+}
+
+int handle_get(struct handle_db *db, void *ptr)
+{
+	unsigned int n;
+	void *p;
+	unsigned int new_max_ptrs;
+
+	if (!db || !ptr)
+		return -1;
+
+	/* Try to find an empty location */
+	for (n = 0; n < db->max_ptrs; n++) {
+		if (!db->ptrs[n]) {
+			db->ptrs[n] = ptr;
+			return n;
+		}
+	}
+
+	/* No location available, grow the ptrs array */
+	if (db->max_ptrs)
+		new_max_ptrs = db->max_ptrs * 2;
+	else
+		new_max_ptrs = HANDLE_DB_INITIAL_MAX_PTRS;
+	p = krealloc(db->ptrs, new_max_ptrs * sizeof(void *), GFP_KERNEL);
+	if (!p)
+		return -1;
+	db->ptrs = p;
+	memset(db->ptrs + db->max_ptrs, 0,
+	       (new_max_ptrs - db->max_ptrs) * sizeof(void *));
+	db->max_ptrs = new_max_ptrs;
+
+	/* Since n stopped at db->max_ptrs there is an empty location there */
+	db->ptrs[n] = ptr;
+	return n;
+}
+
+void *handle_put(struct handle_db *db, int handle)
+{
+	void *p;
+
+	if (!db || handle < 0 || (unsigned int) handle >= db->max_ptrs)
+		return NULL;
+
+	p = db->ptrs[handle];
+	db->ptrs[handle] = NULL;
+	return p;
+}
+
+void *handle_lookup(struct handle_db *db, int handle)
+{
+	if (!db || handle < 0 || (unsigned int) handle >= db->max_ptrs)
+		return NULL;
+
+	return db->ptrs[handle];
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/handle.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/handle.h
new file mode 100644
index 0000000..cdf8715
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/handle.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEE_HANDLE_H
+#define TEE_HANDLE_H
+
+struct handle_db {
+	void **ptrs;
+	unsigned int max_ptrs;
+};
+
+#define HANDLE_DB_INITIALIZER { NULL, 0 }
+
+/*
+ * Frees all internal data structures of the database, but does not free
+ * the db pointer. The database is safe to reuse after it's destroyed, it
+ * just be empty again.
+ */
+void handle_db_destroy(struct handle_db *db);
+
+/*
+ * Allocates a new handle and assigns the supplied pointer to it,
+ * ptr must not be NULL.
+ * The function returns
+ * >= 0 on success and
+ * -1 on failure
+ */
+int handle_get(struct handle_db *db, void *ptr);
+
+/*
+ * Deallocates a handle. Returns the associated pointer of the handle
+ * the the handle was valid or NULL if it's invalid.
+ */
+void *handle_put(struct handle_db *db, int handle);
+
+/*
+ * Returns the associated pointer of the handle if the handle is a valid
+ * handle.
+ * Returns NULL on failure.
+ */
+void *handle_lookup(struct handle_db *db, int handle);
+
+#endif /*HANDLE_H*/
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_mem.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_mem.c
new file mode 100644
index 0000000..0a14d45
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_mem.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/slab.h>
+
+#include "tee_mem.h"
+
+#define _DUMP_INFO_ALLOCATOR 0
+#define USE_DEVM_ALLOC 1
+
+#ifndef USE_DEVM_ALLOC
+#define _KMALLOC(s, f) kmalloc(s, f)
+#define _KFREE(a)	kfree(a)
+#else
+#define _KMALLOC(s, f)	devm_kzalloc(dev, s, f)
+#define _KFREE(a)		devm_kfree(dev, a)
+#endif
+
+/**
+ * \struct mem_chunk
+ * \brief Elementary descriptor of an allocated memory block
+ *
+ * \param node		Node for linked list
+ * \param counter   Reference counter
+ *   (0 indicates that the block is not used/freed)
+ * \param size		Total size in bytes
+ * \param paddr		Physical base address
+ *
+ * Elementary memory block definition
+ */
+struct mem_chunk {
+	struct list_head node;
+	uint32_t counter;
+	size_t size;
+	unsigned long paddr;
+};
+
+/**
+ * \struct shm_pool
+ * \brief Main structure to describe a shared memory pool
+ *
+ * \param size		Total size in bytes of the associated memory region
+ * \param vaddr		Logical base address
+ * \param paddr		Physical base address
+ * \param used		Total size in bytes of the used memory
+ * \param mchunks	List head for handle the elementary memory blocks
+ *
+ * Shared memory pool structure definition
+ */
+struct shm_pool {
+	struct mutex lock;
+	size_t size;		/* Size of pool/heap memory segment */
+	size_t used;		/* Number of bytes allocated */
+	void *vaddr;		/* Associated Virtual address */
+	unsigned long paddr;	/* Associated Physical address */
+	bool cached;		/* true if pool is cacheable */
+	struct list_head mchunks;	/* Head of memory chunk/block list */
+};
+
+#define __CALCULATE_RATIO_MEM_USED(a) (((a->used)*100)/(a->size))
+
+/**
+ * \brief Dumps the information of the shared memory pool
+ *
+ * \param pool           Pointer on the pool
+ * \param detailforced   Flag to force the log for the detailed information
+ *
+ * Dump/log the meta data of the shared memory pool on the standard output.
+ *
+ */
+void tee_shm_pool_dump(struct device *dev, struct shm_pool *pool, bool forced)
+{
+	struct mem_chunk *chunk;
+
+	if (WARN_ON(!dev || !pool))
+		return;
+
+	pr_info(
+		"%s() poolH(0x%p) pAddr=0x%p vAddr=0x%p size=%zu used=%zu(%zu%%)\n",
+		__func__,
+		(void *) pool,
+		(void *) pool->paddr,
+		(void *) pool->vaddr,
+		pool->size, pool->used, __CALCULATE_RATIO_MEM_USED(pool));
+
+	if ((pool->used != 0) || (forced == true)) {
+		pr_info("  \\ HEAD next:[0x%p] prev:[0x%p]\n",
+			 (void *)pool->mchunks.next,
+			 (void *)pool->mchunks.prev);
+
+		pr_info(
+			 "  |-[@]        next       prev       pAddr      size     refCount\n");
+
+		list_for_each_entry(chunk, &pool->mchunks, node) {
+			pr_info("  | [0x%p] 0x%p 0x%p 0x%p %08zu %d\n",
+				 (void *)chunk,
+				 (void *)chunk->node.next,
+				 (void *)chunk->node.prev,
+				 (void *)chunk->paddr,
+				 chunk->size, chunk->counter);
+		}
+	}
+}
+
+bool tee_shm_pool_is_cached(struct shm_pool *pool)
+{
+	return pool->cached;
+}
+
+void tee_shm_pool_set_cached(struct shm_pool *pool)
+{
+	pool->cached = true;
+}
+
+/**
+ * \brief Creates and returns a new shared memory pool manager structure
+ *
+ * \param shm_size      Size of the associated memory chunk
+ * \param shm_vaddr     Virtual/logical base address
+ * \param shm_paddr     Physical base address
+ *
+ * \return Reference of the created shared memory pool manager
+ *
+ * Create and initialize a shared memory pool manager.
+ * The description of the memory region (shm_size, shm_vaddr, shm_paddr)
+ * which is passed should be a physically AND virtually contiguous
+ * (no check is performed by the function).
+ * If a error is detected returned pool is NULL.
+ */
+struct shm_pool *tee_shm_pool_create(struct device *dev, size_t shm_size,
+				     void *shm_vaddr, unsigned long shm_paddr)
+{
+	struct mem_chunk *chunk = NULL;
+	struct shm_pool *pool = NULL;
+
+	if (WARN_ON(!dev))
+		goto alloc_failed;
+
+	/* Alloc and initialize the shm_pool structure */
+	pool = _KMALLOC(sizeof(struct shm_pool), GFP_KERNEL);
+	if (!pool) {
+		pr_err("kmalloc <struct shm_pool> failed\n");
+		goto alloc_failed;
+	}
+	memset(pool, 0, sizeof(*pool));
+	mutex_init(&pool->lock);
+	mutex_lock(&pool->lock);
+
+	INIT_LIST_HEAD(&(pool->mchunks));
+	pool->size = shm_size;
+	pool->vaddr = shm_vaddr;
+	pool->paddr = shm_paddr;
+
+	/* Create the initial elementary memory chunk	*/
+	/* which handles the whole memory region		*/
+	chunk = _KMALLOC(sizeof(struct mem_chunk), GFP_KERNEL);
+	if (!chunk) {
+		pr_err("kmalloc <struct MemChunk> failed\n");
+		goto alloc_failed;
+	}
+	memset(chunk, 0, sizeof(*chunk));
+	chunk->paddr = shm_paddr;
+	chunk->size = shm_size;
+
+	/* Adds the new entry immediately after the list head */
+	list_add(&(chunk->node), &(pool->mchunks));
+
+#if defined(_DUMP_INFO_ALLOCATOR) && (_DUMP_INFO_ALLOCATOR > 0)
+	tee_shm_pool_dump(dev, pool, true);
+#endif
+
+	mutex_unlock(&pool->lock);
+	return pool;
+
+alloc_failed:
+	if (chunk)
+		_KFREE(chunk);
+
+	if (pool)
+		_KFREE(pool);
+
+	return NULL;
+}
+
+/**
+ * Local helper function to check that the physical address is valid
+ */
+static inline int is_valid_paddr(struct shm_pool *pool, unsigned long paddr)
+{
+	return (paddr >= pool->paddr && paddr < (pool->paddr + pool->size));
+}
+
+/**
+ * Local helper function to check that the virtual address is valid
+ */
+static inline int is_valid_vaddr(struct shm_pool *pool, void *vaddr)
+{
+	return (vaddr >= pool->vaddr && vaddr < (pool->vaddr + pool->size));
+}
+
+/**
+ * \brief Destroy the shared memory pool manager
+ *
+ * \param pool	Pointer on the pool
+ *
+ * Destroy a memory pool manager
+ *
+ */
+void tee_shm_pool_destroy(struct device *dev, struct shm_pool *pool)
+{
+	struct mem_chunk *chunk;
+
+	if (WARN_ON(!dev || !pool))
+		return;
+
+#if defined(_DUMP_INFO_ALLOCATOR) && (_DUMP_INFO_ALLOCATOR > 0)
+	tee_shm_pool_dump(dev, pool, true);
+#endif
+
+	tee_shm_pool_reset(dev, pool);
+
+	chunk = list_first_entry(&pool->mchunks, struct mem_chunk, node);
+
+	_KFREE(chunk);
+	_KFREE(pool);
+
+}
+
+/**
+ * \brief Free all reserved chunk if any, and set pool at it initial state
+ *
+ * \param pool  Pointer on the pool
+ *
+ */
+void tee_shm_pool_reset(struct device *dev, struct shm_pool *pool)
+{
+	struct mem_chunk *chunk;
+	struct mem_chunk *tmp;
+	struct mem_chunk *first = NULL;
+
+	if (WARN_ON(!dev || !pool))
+		return;
+
+	mutex_lock(&pool->lock);
+
+	list_for_each_entry_safe(chunk, tmp, &pool->mchunks, node) {
+		if (first != NULL) {
+			pr_err("Free lost chunkH=0x%p\n", (void *)chunk);
+			list_del(&chunk->node);
+			_KFREE(chunk);
+		} else
+			first = chunk;
+	}
+
+	first->counter = 0;
+	first->paddr = pool->paddr;
+	first->size = pool->size;
+	pool->used = 0;
+
+	mutex_unlock(&pool->lock);
+}
+
+/**
+ * \brief Return the logical address
+ *
+ * \param pool          Pointer on the pool
+ * \param paddr         Physical address
+ *
+ * \return Virtual/logical address
+ *
+ * Return the associated virtual/logical address. The address should be inside
+ * the range of addresses managed by the shm pool.
+ *
+ */
+void *tee_shm_pool_p2v(struct device *dev, struct shm_pool *pool,
+		       unsigned long paddr)
+{
+	unsigned long offset;
+	void *p;
+
+	if (WARN_ON(!dev || !pool))
+		return NULL;
+
+	mutex_lock(&pool->lock);
+	if (!is_valid_paddr(pool, paddr)) {
+		mutex_unlock(&pool->lock);
+		pr_err("%s() paddr=0x%p not in the shm pool\n",
+			__func__,
+			(void *) paddr);
+		return NULL;
+	}
+
+	offset = paddr - pool->paddr;
+	p = (void *)((unsigned long)pool->vaddr + offset);
+
+	mutex_unlock(&pool->lock);
+
+	return p;
+}
+
+/**
+ * \brief Return the physical address
+ *
+ * \param pool          Pointer on the pool
+ * \param vaddr         Logical/Virtual address
+ *
+ * \return Physical address
+ *
+ * Return the associated physical address. The address should be inside
+ * the range of addresses managed by the pool.
+ *
+ */
+unsigned long tee_shm_pool_v2p(struct device *dev, struct shm_pool *pool,
+			       void *vaddr)
+{
+	unsigned long offset, p;
+
+	if (WARN_ON(!dev || !pool))
+		return 0UL;
+
+	mutex_lock(&pool->lock);
+	if (!is_valid_vaddr(pool, vaddr)) {
+		pr_err("%s() vaddr=0x%p not in shm pool\n",
+			__func__, (void *) vaddr);
+		mutex_unlock(&pool->lock);
+		return 0UL;
+	}
+
+	offset = vaddr - pool->vaddr;
+	p = pool->paddr + offset;
+
+	mutex_unlock(&pool->lock);
+	return p;
+}
+
+/**
+ * \brief Allocate a new block of memory
+ *
+ * \param pool          Pointer on the pool
+ * \param size          Expected size (in byte)
+ * \param alignment     Alignment constraint (in byte)
+ *
+ * \return Physical base address of the allocated block
+ *
+ * Allocate a memory chunk inside the memory region managed by the pool.
+ *
+ */
+unsigned long tkcore_shm_pool_alloc(struct device *dev,
+				 struct shm_pool *pool,
+				 size_t size, size_t alignment)
+{
+	struct mem_chunk *chunk;
+	struct mem_chunk *betterchunk = NULL;
+	struct mem_chunk *prev_chunk = NULL;
+	struct mem_chunk *next_chunk = NULL;
+	unsigned long begAddr;
+	unsigned long endAddr;
+
+	if (WARN_ON(!dev || !pool))
+		return 0UL;
+
+	/* Align on cache line of the target */
+	/* \todo(jmd) Should be defined by a global target specific parameter */
+	/*  size = (size + (32-1)) & ~(32-1) */
+
+	if (ALIGN(size, 0x20) < size)
+		goto failed_out;
+
+	if (alignment == 0)
+		alignment = 1;
+
+	size = ALIGN(size, 0x20);
+
+	alignment = ALIGN(alignment, 0x20);
+
+	if (size > (pool->size - pool->used))
+		goto failed_out;
+
+	mutex_lock(&pool->lock);
+
+	/**
+	 * Algorithm: Smallest waste (best fit):
+	 * We choose the block that has the
+	 * smallest waste. In other words we choose
+	 * the block so that
+	 * size(b) - size is as small as possible.
+	 */
+	list_for_each_entry(chunk, &pool->mchunks, node) {
+		if (chunk->counter == 0) {	/* Free chunk */
+			begAddr = ALIGN(chunk->paddr, alignment);
+			endAddr = begAddr + size;
+
+			if (begAddr >= chunk->paddr
+			    && endAddr <= (chunk->paddr + chunk->size)
+			    && (betterchunk == NULL
+				/* Always split smaller block */
+				|| chunk->size < betterchunk->size))
+				betterchunk = chunk;
+		}
+	}
+
+	/**
+	 * Update the linked list
+	 */
+	if (betterchunk != NULL) {
+		prev_chunk = _KMALLOC(sizeof(struct mem_chunk), GFP_KERNEL);
+		next_chunk = _KMALLOC(sizeof(struct mem_chunk), GFP_KERNEL);
+
+		if ((!prev_chunk) || (!next_chunk))
+			goto failed_out_unlock;
+
+		begAddr = ALIGN(betterchunk->paddr, alignment);
+		endAddr = begAddr + size;
+
+		if (betterchunk->paddr < begAddr) {
+			/* memory between begin of chunk and begin
+			 * of created memory => create a free chunk
+			 */
+			prev_chunk->counter = 0;
+			prev_chunk->paddr = betterchunk->paddr;
+			prev_chunk->size = begAddr - betterchunk->paddr;
+
+			betterchunk->paddr = begAddr;
+			betterchunk->size -= prev_chunk->size;
+
+			list_add_tail(&(prev_chunk->node),
+				&(betterchunk->node));
+			prev_chunk = NULL;
+		} else
+			_KFREE(prev_chunk);
+
+		if (betterchunk->paddr + betterchunk->size > endAddr) {
+			/* memory between end of chunk and end of
+			 * created memory => create a free chunk
+			 */
+			next_chunk->counter = 0;
+			next_chunk->paddr = endAddr;
+			next_chunk->size = betterchunk->size - size;
+
+			betterchunk->size = size;
+
+			list_add(&(next_chunk->node), &(betterchunk->node));
+			next_chunk = NULL;
+		} else
+			_KFREE(next_chunk);
+
+		betterchunk->counter = 1;
+		pool->used += size;
+
+		mutex_unlock(&pool->lock);
+
+#if defined(_DUMP_INFO_ALLOCATOR) && (_DUMP_INFO_ALLOCATOR > 1)
+		tee_shm_pool_dump(dev, pool, false);
+#endif
+
+		return betterchunk->paddr;
+	}
+
+failed_out_unlock:
+	mutex_unlock(&pool->lock);
+failed_out:
+	if (prev_chunk)
+		_KFREE(prev_chunk);
+	if (next_chunk)
+		_KFREE(next_chunk);
+
+	pr_err(
+		"%s() FAILED, size=0x%zx, align=0x%zx free=%zu\n",
+		__func__, size, alignment, pool->size - pool->used);
+
+#if defined(_DUMP_INFO_ALLOCATOR) && (_DUMP_INFO_ALLOCATOR > 1)
+	tee_shm_pool_dump(dev, pool, true);
+#endif
+
+	return 0UL;
+}
+
+/**
+ * \brief Release a allocated block of memory
+ *
+ * \param pool          Pointer on the pool
+ * \param paddr         Physical @ of the block which must be released
+ * \param size          Reference to return the size of the block
+ *
+ * Free a allocated memory block inside
+ * the memory region managed by the pool.
+ *
+ */
+int tkcore_shm_pool_free(struct device *dev, struct shm_pool *pool,
+		      unsigned long paddr, size_t *size)
+{
+	struct mem_chunk *chunk;
+
+	if (WARN_ON(!dev || !pool))
+		return -EINVAL;
+
+#if defined(_DUMP_INFO_ALLOCATOR) && (_DUMP_INFO_ALLOCATOR > 1)
+	tee_shm_pool_dump(dev, pool, false);
+#endif
+
+	mutex_lock(&pool->lock);
+
+	if (!is_valid_paddr(pool, paddr))
+		goto out_failed;
+
+	list_for_each_entry(chunk, &pool->mchunks, node) {
+		if (chunk->paddr == paddr) {
+			if (size != NULL)
+				*size = chunk->size;
+
+			if (chunk->counter == 0) {
+				pr_warn(
+					 "< %s() WARNING, paddr=0x%p already released\n",
+					 __func__, (void *) paddr);
+				return -EINVAL;
+			} else if (--chunk->counter == 0) {
+
+				pool->used -= chunk->size;
+
+				/* Merge with previous */
+				if (chunk->node.prev != &pool->mchunks) {
+					struct mem_chunk *prev =
+						list_entry(chunk->node.prev,
+							struct mem_chunk, node);
+					if (prev->counter == 0) {
+						prev->size += chunk->size;
+						list_del(&chunk->node);
+						_KFREE(chunk);
+						chunk = prev;
+					}
+				}
+				/* Merge with next */
+				if (chunk->node.next != &pool->mchunks) {
+					struct mem_chunk *next =
+						list_entry(chunk->node.next,
+							struct mem_chunk, node);
+					if (next->counter == 0) {
+						chunk->size += next->size;
+						list_del(&next->node);
+						_KFREE(next);
+					}
+				}
+				mutex_unlock(&pool->lock);
+
+#if defined(_DUMP_INFO_ALLOCATOR) && (_DUMP_INFO_ALLOCATOR > 1)
+				tee_shm_pool_dump(dev, pool, false);
+#endif
+				return 0;
+
+			} else {
+				mutex_unlock(&pool->lock);
+				return 1;
+			}
+		}
+	}
+
+out_failed:
+	mutex_unlock(&pool->lock);
+#if defined(_DUMP_INFO_ALLOCATOR) && (_DUMP_INFO_ALLOCATOR > 1)
+	tee_shm_pool_dump(dev, pool, false);
+#endif
+	pr_err(
+		"< %s() FAILED, pAddr=0x%p not found\n",
+		__func__, (void *) paddr);
+	return -EINVAL;
+}
+
+/**
+ * \brief Increase the reference count of the memory chunk
+ *
+ * \param pool          Pointer on the pool
+ * \param paddr         Physical address
+ *
+ * \return true if successful (false otherwise)
+ *
+ * Increment the reference count of the allocated block of memory.
+ * paddr should a valid address returned by the tkcore_shm_pool_alloc().
+ *
+ */
+bool tee_shm_pool_incref(struct device *dev, struct shm_pool *pool,
+			 unsigned long paddr)
+{
+	struct mem_chunk *chunk;
+
+	if (WARN_ON(!dev || !pool))
+		return false;
+
+	mutex_lock(&pool->lock);
+
+	if (!is_valid_paddr(pool, paddr))
+		goto out_failed;
+
+	list_for_each_entry(chunk, &pool->mchunks, node) {
+		if (chunk->paddr == paddr) {
+			chunk->counter++;
+
+#if defined(_DUMP_INFO_ALLOCATOR) && (_DUMP_INFO_ALLOCATOR > 0)
+			tee_shm_pool_dump(dev, pool, false);
+#endif
+			mutex_unlock(&pool->lock);
+			return true;
+		}
+	}
+
+out_failed:
+	mutex_unlock(&pool->lock);
+
+	pr_err(
+		"%s() FAILED, pAddr=%p is not a valid @\n",
+		__func__, (void *) paddr);
+
+	return false;
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_mem.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_mem.h
new file mode 100644
index 0000000..5924c9d
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_mem.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEE_MEM_H
+#define TEE_MEM_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct shm_pool;
+
+struct shm_pool *tee_shm_pool_create(struct device *dev, size_t shm_size,
+	void *shm_vaddr, unsigned long shm_paddr);
+
+void tee_shm_pool_destroy(struct device *dev, struct shm_pool *pool);
+
+void *tee_shm_pool_p2v(struct device *dev, struct shm_pool *pool,
+	unsigned long paddr);
+
+unsigned long tee_shm_pool_v2p(struct device *dev, struct shm_pool *pool,
+	void *vaddr);
+
+unsigned long tkcore_shm_pool_alloc(struct device *dev,
+	struct shm_pool *pool, size_t size, size_t alignment);
+
+int tkcore_shm_pool_free(struct device *dev, struct shm_pool *pool,
+	unsigned long paddr, size_t *size);
+
+bool tee_shm_pool_incref(struct device *dev, struct shm_pool *pool,
+	unsigned long paddr);
+
+void tee_shm_pool_dump(struct device *dev, struct shm_pool *pool, bool forced);
+
+void tee_shm_pool_reset(struct device *dev, struct shm_pool *pool);
+
+bool tee_shm_pool_is_cached(struct shm_pool *pool);
+
+void tee_shm_pool_set_cached(struct shm_pool *pool);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_smc_xfer.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_smc_xfer.c
new file mode 100644
index 0000000..2b99773
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_smc_xfer.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+
+#include <linux/version.h>
+
+#include <linux/tee_clkmgr.h>
+
+#include <tee_kernel_lowlevel_api.h>
+#include <arm_common/teesmc.h>
+
+struct tee_smc_xfer_ctl {
+	/* number of cmds that are not
+	 * processed by `tee_smc_daemon` kthreads
+	 */
+	atomic_t nr_unbound_cmds;
+	/* guarantee the mutual-exlusiveness of smc */
+	struct mutex xfer_lock;
+
+	/* cmds that wait for
+	 * available TEE thread slots
+	 */
+	atomic_t nr_waiting_cmds;
+	struct completion smc_comp;
+
+	/* statistics information */
+	s64 max_smc_time;
+	s64 max_cmd_time;
+
+};
+
+static struct tee_smc_xfer_ctl tee_smc_xfer_ctl;
+
+static inline void trace_tee_smc(struct tee_smc_xfer_ctl *ctl, int rv,
+				 s64 time_start, s64 time_end)
+{
+	s64 duration = time_end - time_start;
+
+	if (duration > 1000000LL) {
+		pr_warn("WARNING SMC[0x%x] %sDURATION %lld us\n", rv,
+			rv == TEESMC_RPC_FUNC_IRQ ? "IRQ " : "", duration);
+	}
+
+	/* we needn't handle concurrency here. */
+	if (duration > ctl->max_smc_time)
+		ctl->max_smc_time = duration;
+}
+
+static inline void trace_tee_smc_done(struct tee_smc_xfer_ctl *ctl,
+				      s64 time_start,
+				      s64 time_end)
+{
+	s64 duration = time_end - time_start;
+
+	if (duration > ctl->max_cmd_time)
+		ctl->max_cmd_time = duration;
+}
+
+/* return 0 for nonpreempt rpc, 1 for others */
+static int handle_nonpreempt_rpc(struct smc_param *p)
+{
+	uint32_t func_id = TEESMC_RETURN_GET_RPC_FUNC(p->a0);
+
+	/* for compatibility with legacy tee-os which
+	 * does not support clkmgr
+	 */
+	if (func_id == T6SMC_RPC_CLKMGR_LEGACY_CMD) {
+		p->a1 = tee_clkmgr_handle(p->a1, p->a2);
+		return 0;
+	}
+
+	if (func_id != T6SMC_RPC_NONPREEMPT_CMD)
+		return 1;
+
+	switch (T6SMC_RPC_NONPREEMPT_GET_FUNC(p->a0)) {
+	case T6SMC_RPC_CLKMGR_CMD:
+		/* compatible with old interface */
+		p->a1 = tee_clkmgr_handle(p->a1,
+			(p->a1 & TEE_CLKMGR_TOKEN_NOT_LEGACY) ?
+				p->a2 : (p->a2 | TEE_CLKMGR_OP_ENABLE));
+		break;
+
+	default:
+		pr_err("Unknown non-preempt rpc cmd: 0x%llx\n",
+			(unsigned long long) p->a0);
+	}
+
+	return 0;
+}
+
+static void tee_smc_work(struct tee_smc_xfer_ctl *ctl, struct smc_param *p)
+{
+	s64 start, end;
+
+
+	u64 rv = p->a0 == TEESMC32_FASTCALL_WITH_ARG ?
+		 TEESMC32_FASTCALL_RETURN_FROM_RPC :
+		 TEESMC32_CALL_RETURN_FROM_RPC;
+
+	/* we need to place atomic_inc ahead of xfer_lock
+	 * in order that an smc-execution thread can
+	 * see other pending commands without releasing
+	 * xfer_lock
+	 */
+	atomic_inc(&ctl->nr_unbound_cmds);
+
+	mutex_lock(&ctl->xfer_lock);
+
+	start = ktime_to_us(ktime_get());
+
+	while (1) {
+
+		s64 a = ktime_to_us(ktime_get()), b;
+
+		tee_smc_call(p);
+
+		b = ktime_to_us(ktime_get());
+		trace_tee_smc(ctl, TEESMC_RETURN_GET_RPC_FUNC(p->a0), a, b);
+
+		if (!TEESMC_RETURN_IS_RPC(p->a0))
+			goto smc_return;
+
+		if (handle_nonpreempt_rpc(p)) {
+			if (TEESMC_RETURN_GET_RPC_FUNC(p->a0)
+					!= TEESMC_RPC_FUNC_IRQ)
+				goto smc_return;
+		}
+		p->a0 = rv;
+	}
+
+smc_return:
+
+	atomic_dec(&ctl->nr_unbound_cmds);
+
+	mutex_unlock(&ctl->xfer_lock);
+
+	end = ktime_to_us(ktime_get());
+
+	trace_tee_smc_done(ctl, start, end);
+}
+
+
+static inline void __smc_xfer(struct tee_smc_xfer_ctl *ctl, struct smc_param *p)
+{
+	tee_smc_work(ctl, p);
+}
+
+static int platform_bl_init(struct tee_smc_xfer_ctl *ctl)
+{
+	return 0;
+}
+
+static void platform_bl_deinit(struct tee_smc_xfer_ctl *ctl) { }
+
+static void xfer_enqueue_waiters(struct tee_smc_xfer_ctl *ctl)
+{
+	/*TODO handle too long time of waiting */
+	atomic_inc(&ctl->nr_waiting_cmds);
+	wait_for_completion(&ctl->smc_comp);
+}
+
+static void xfer_dequeue_waiters(struct tee_smc_xfer_ctl *ctl)
+{
+	if (atomic_dec_if_positive(&ctl->nr_waiting_cmds) >= 0)
+		complete(&ctl->smc_comp);
+}
+
+void __call_tee(struct smc_param *p)
+{
+	/* NOTE!!! we remove the e_lock_teez(ptee) here !!!! */
+#ifdef ARM64
+	uint64_t orig_a0 = p->a0;
+#else
+	uint32_t orig_a0 = p->a0;
+#endif
+	for (;;) {
+		__smc_xfer(&tee_smc_xfer_ctl, p);
+		if (p->a0 == TEESMC_RETURN_ETHREAD_LIMIT) {
+			xfer_enqueue_waiters(&tee_smc_xfer_ctl);
+			p->a0 = orig_a0;
+		} else {
+			if (!TEESMC_RETURN_IS_RPC(p->a0))
+				xfer_dequeue_waiters(&tee_smc_xfer_ctl);
+			break;
+		}
+	}
+}
+
+inline void smc_xfer(struct smc_param *p)
+{
+	__smc_xfer(&tee_smc_xfer_ctl, p);
+}
+
+int tee_init_smc_xfer(void)
+{
+	int r;
+	struct tee_smc_xfer_ctl *ctl = &tee_smc_xfer_ctl;
+
+	atomic_set(&ctl->nr_unbound_cmds, 0);
+	mutex_init(&ctl->xfer_lock);
+
+	atomic_set(&ctl->nr_waiting_cmds, 0);
+	init_completion(&ctl->smc_comp);
+
+	ctl->max_smc_time = 0LL;
+	ctl->max_cmd_time = 0LL;
+
+	r = platform_bl_init(ctl);
+	if (r < 0)
+		goto err;
+
+	return 0;
+
+err:
+	return r;
+}
+
+void tee_exit_smc_xfer(void)
+{
+	struct tee_smc_xfer_ctl *ctl = &tee_smc_xfer_ctl;
+
+	platform_bl_deinit(ctl);
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_smc_xfer.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_smc_xfer.h
new file mode 100644
index 0000000..30a2d00
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_smc_xfer.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEE_SMC_XFER_H
+#define TEE_SMC_XFER_H
+
+struct smc_param;
+
+void smc_xfer(struct smc_param *p);
+void __call_tee(struct smc_param *p);
+
+int tee_init_smc_xfer(void);
+void tee_exit_smc_xfer(void);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_tz_drv.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_tz_drv.c
new file mode 100644
index 0000000..8230ae9
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_tz_drv.c
@@ -0,0 +1,1316 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/init.h>
+
+#include <linux/tee_core.h>
+#include <linux/tee_ioc.h>
+
+#include <tee_shm.h>
+#include <tee_supp_com.h>
+#include <tee_wait_queue.h>
+
+#include <arm_common/teesmc.h>
+#include <arm_common/teesmc_st.h>
+
+#include "asm/io.h"
+#include "tee_mem.h"
+#include "tee_tz_op.h"
+#include "tee_tz_priv.h"
+#include "handle.h"
+#include "tee_smc_xfer.h"
+
+#include "tee_procfs.h"
+#include "pm.h"
+
+#define _TEE_TZ_NAME "tkcoredrv"
+#define DEV (ptee->tee->dev)
+
+/* #define TEE_STRESS_OUTERCACHE_FLUSH */
+
+/* magic config: bit 1 is set, Secure TEE shall handler NSec IRQs */
+#define SEC_ROM_NO_FLAG_MASK	0x0000
+#define SEC_ROM_IRQ_ENABLE_MASK	0x0001
+#define SEC_ROM_DEFAULT		SEC_ROM_IRQ_ENABLE_MASK
+#define TEE_RETURN_BUSY		0x3
+#define ALLOC_ALIGN		SZ_4K
+
+#define CAPABLE(tee) !(tee->conf & TEE_CONF_FW_NOT_CAPABLE)
+
+static struct tee_tz *tee_tz;
+
+static struct handle_db shm_handle_db = HANDLE_DB_INITIALIZER;
+
+/*******************************************************************
+ * Calling TEE
+ *******************************************************************/
+
+static void handle_rpc_func_cmd_wait_queue(struct tee_tz *ptee,
+		struct teesmc32_arg *arg32)
+{
+	struct teesmc32_param *params;
+
+	if (arg32->num_params != 2)
+		goto bad;
+
+	params = TEESMC32_GET_PARAMS(arg32);
+
+	if ((params[0].attr & TEESMC_ATTR_TYPE_MASK) !=
+		TEESMC_ATTR_TYPE_VALUE_INPUT)
+		goto bad;
+	if ((params[1].attr & TEESMC_ATTR_TYPE_MASK) !=
+		TEESMC_ATTR_TYPE_NONE)
+		goto bad;
+
+	switch (arg32->cmd) {
+	case TEE_RPC_WAIT_QUEUE_SLEEP:
+		tee_wait_queue_sleep(DEV, &ptee->wait_queue,
+					params[0].u.value.a);
+		break;
+	case TEE_RPC_WAIT_QUEUE_WAKEUP:
+		tee_wait_queue_wakeup(DEV, &ptee->wait_queue,
+					params[0].u.value.a);
+		break;
+	default:
+		goto bad;
+	}
+
+	arg32->ret = TEEC_SUCCESS;
+	return;
+bad:
+	arg32->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+
+
+static void handle_rpc_func_cmd_wait(struct teesmc32_arg *arg32)
+{
+	struct teesmc32_param *params;
+	u32 msec_to_wait;
+
+	if (arg32->num_params != 1)
+		goto bad;
+
+	params = TEESMC32_GET_PARAMS(arg32);
+	msec_to_wait = params[0].u.value.a;
+
+	/* set task's state to interruptible sleep */
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	/* take a nap */
+	schedule_timeout(msecs_to_jiffies(msec_to_wait));
+
+	arg32->ret = TEEC_SUCCESS;
+	return;
+bad:
+	arg32->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_rpc_func_cmd_to_supplicant(struct tee_tz *ptee,
+		struct teesmc32_arg *arg32)
+{
+	struct teesmc32_param *params;
+	struct tee_rpc_invoke inv;
+	size_t n;
+	uint32_t ret;
+
+	if (arg32->num_params > TEE_RPC_BUFFER_NUMBER) {
+		arg32->ret = TEEC_ERROR_GENERIC;
+		return;
+	}
+
+	params = TEESMC32_GET_PARAMS(arg32);
+
+	memset(&inv, 0, sizeof(inv));
+	inv.cmd = arg32->cmd;
+	/*
+	 * Set a suitable error code in case teed
+	 * ignores the request.
+	 */
+	inv.res = TEEC_ERROR_NOT_IMPLEMENTED;
+	inv.nbr_bf = arg32->num_params;
+	for (n = 0; n < arg32->num_params; n++) {
+		switch (params[n].attr & TEESMC_ATTR_TYPE_MASK) {
+		case TEESMC_ATTR_TYPE_VALUE_INPUT:
+		case TEESMC_ATTR_TYPE_VALUE_INOUT:
+			inv.cmds[n].fd = (int)params[n].u.value.a;
+		/* Fall through */
+		case TEESMC_ATTR_TYPE_VALUE_OUTPUT:
+			inv.cmds[n].type = TEE_RPC_VALUE;
+			break;
+		case TEESMC_ATTR_TYPE_MEMREF_INPUT:
+		case TEESMC_ATTR_TYPE_MEMREF_OUTPUT:
+		case TEESMC_ATTR_TYPE_MEMREF_INOUT:
+			inv.cmds[n].buffer =
+				(void *)(uintptr_t)params[n].u.memref.buf_ptr;
+			inv.cmds[n].size = params[n].u.memref.size;
+			inv.cmds[n].type = TEE_RPC_BUFFER;
+			break;
+		default:
+			arg32->ret = TEEC_ERROR_GENERIC;
+			return;
+		}
+	}
+
+	ret = tee_supp_cmd(ptee->tee, TEE_RPC_ICMD_INVOKE,
+			   &inv, sizeof(inv));
+	if (ret == TEEC_RPC_OK)
+		arg32->ret = inv.res;
+
+	for (n = 0; n < arg32->num_params; n++) {
+		switch (params[n].attr & TEESMC_ATTR_TYPE_MASK) {
+		case TEESMC_ATTR_TYPE_MEMREF_OUTPUT:
+		case TEESMC_ATTR_TYPE_MEMREF_INOUT:
+			/*
+			 * Allow teed to assign a new pointer
+			 * to an out-buffer. Needed when the
+			 * teed allocates a new buffer, for
+			 * instance when loading a TA.
+			 */
+			params[n].u.memref.buf_ptr =
+				(uint32_t)(uintptr_t)inv.cmds[n].buffer;
+			params[n].u.memref.size = inv.cmds[n].size;
+			break;
+		case TEESMC_ATTR_TYPE_VALUE_OUTPUT:
+		case TEESMC_ATTR_TYPE_VALUE_INOUT:
+			params[n].u.value.a = inv.cmds[n].fd;
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+#ifdef RPMB_SUPPORT
+
+#include "linux/tee_rpmb.h"
+
+/*
+ * Need to be in consistency with
+ * struct rpmb_req {...} defined in
+ * tee/core/arch/arm/tee/tee_rpmb.c
+ */
+struct tee_rpmb_cmd {
+	uint16_t cmd;
+	uint16_t dev_id;
+	uint32_t req_nr;
+	uint32_t resp_nr;
+};
+
+int rpmb_exec(void *req)
+{
+	return tkcore_emmc_rpmb_execute((struct tkcore_rpmb_request *) req);
+}
+
+#endif
+
+static void handle_rpmb_cmd(struct tee_tz *ptee,
+				struct teesmc32_arg *arg32)
+{
+#ifdef RPMB_SUPPORT
+	uint32_t req_size, resp_size;
+	struct teesmc32_param *params;
+
+	uint8_t *data_frame;
+	struct tee_rpmb_cmd *rpmb_req;
+	struct tkcore_rpmb_request teec_rpmb_req;
+	void *resp;
+
+	if (arg32->num_params != TEE_RPMB_BUFFER_NUMBER) {
+		arg32->ret = TEEC_ERROR_BAD_PARAMETERS;
+		return;
+	}
+
+	params = TEESMC32_GET_PARAMS(arg32);
+
+	if (((params[0].attr & TEESMC_ATTR_TYPE_MASK)
+			!= TEESMC_ATTR_TYPE_MEMREF_INPUT) ||
+		((params[1].attr & TEESMC_ATTR_TYPE_MASK)
+			!= TEESMC_ATTR_TYPE_MEMREF_OUTPUT)) {
+		arg32->ret = TEEC_ERROR_GENERIC;
+		return;
+	}
+
+
+	rpmb_req = (struct tee_rpmb_cmd *) tee_shm_pool_p2v(
+			   ptee->tee->dev,
+			   ptee->shm_pool,
+			   params[0].u.memref.buf_ptr);
+
+	if (rpmb_req == NULL)  {
+		pr_err("Bad RPC request buffer 0x%x.\n",
+			params[0].u.memref.buf_ptr);
+		arg32->ret = TEEC_ERROR_GENERIC;
+		return;
+	}
+
+	resp = tee_shm_pool_p2v(
+			ptee->tee->dev,
+			ptee->shm_pool,
+			params[1].u.memref.buf_ptr);
+
+	if (resp == NULL) {
+		pr_err("Bad RPC response buffer 0x%x.\n",
+			params[1].u.memref.buf_ptr);
+		arg32->ret = TEEC_ERROR_GENERIC;
+		return;
+	}
+
+	if (rpmb_req->cmd != TEE_RPMB_GET_DEV_INFO) {
+		int r;
+		uint32_t frm_size;
+
+		req_size = rpmb_req->req_nr * 512;
+		resp_size = rpmb_req->resp_nr * 512;
+		frm_size = req_size > resp_size ? req_size : resp_size;
+
+		if (frm_size & (511)) {
+			pr_err("bad RPMB frame size 0x%x\n", frm_size);
+			arg32->ret = TEEC_ERROR_BAD_PARAMETERS;
+			return;
+		}
+
+		teec_rpmb_req.type = rpmb_req->cmd;
+		teec_rpmb_req.blk_cnt = frm_size / 512;
+		/* not used by emmc_rpmb driver */
+		teec_rpmb_req.addr = (uint16_t) 0;
+
+		teec_rpmb_req.data_frame = data_frame = kmalloc(
+				frm_size,
+				GFP_KERNEL);
+
+		if (teec_rpmb_req.data_frame == NULL) {
+			arg32->ret = TEEC_ERROR_OUT_OF_MEMORY;
+			return;
+		}
+
+		memcpy(data_frame,
+			((uint8_t *) rpmb_req) + sizeof(struct tee_rpmb_cmd),
+			req_size);
+		r = tkcore_stay_awake(rpmb_exec, (void *) &teec_rpmb_req);
+
+		if (r != 0) {
+			pr_err("Bad rpmb_exec: %d\n", r);
+			arg32->ret = (uint32_t) r;
+		} else {
+			arg32->ret = TEEC_SUCCESS;
+			memcpy(resp, data_frame, resp_size);
+		}
+
+		kfree(data_frame);
+	} else {
+		struct tee_rpmb_dev_info *dev_info;
+
+		teec_rpmb_req.type = rpmb_req->cmd;
+		teec_rpmb_req.blk_cnt = 1;
+		/* not used by emmc_rpmb driver */
+		teec_rpmb_req.addr = (uint16_t) 0;
+		teec_rpmb_req.data_frame = (uint8_t *) resp;
+
+		dev_info = (struct tee_rpmb_dev_info *) resp;
+
+		dev_info->ret_code =
+			(uint8_t) tkcore_emmc_rpmb_execute(&teec_rpmb_req);
+
+		arg32->ret = TEEC_SUCCESS;
+	}
+
+#else
+	arg32->ret = TEEC_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+
+static void handle_rpc_func_cmd(struct tee_tz *ptee, u32 parg32)
+{
+	struct teesmc32_arg *arg32;
+
+	arg32 = tee_shm_pool_p2v(DEV, ptee->shm_pool, parg32);
+	if (!arg32)
+		return;
+
+	switch (arg32->cmd) {
+	case TEE_RPC_WAIT_QUEUE_SLEEP:
+	case TEE_RPC_WAIT_QUEUE_WAKEUP:
+		handle_rpc_func_cmd_wait_queue(ptee, arg32);
+		break;
+	case TEE_RPC_WAIT:
+		handle_rpc_func_cmd_wait(arg32);
+		break;
+	case TEE_RPC_RPMB_CMD:
+		handle_rpmb_cmd(ptee, arg32);
+		break;
+	default:
+		handle_rpc_func_cmd_to_supplicant(ptee, arg32);
+	}
+}
+
+static struct tee_shm *handle_rpc_alloc(struct tee_tz *ptee, size_t size)
+{
+	struct tee_rpc_alloc rpc_alloc;
+
+	memset((void *) &rpc_alloc, 0, sizeof(struct tee_rpc_alloc));
+
+	rpc_alloc.size = size;
+	tee_supp_cmd(ptee->tee, TEE_RPC_ICMD_ALLOCATE,
+		&rpc_alloc, sizeof(rpc_alloc));
+	return rpc_alloc.shm;
+}
+
+static void handle_rpc_free(struct tee_tz *ptee, struct tee_shm *shm)
+{
+	struct tee_rpc_free rpc_free;
+
+	if (!shm)
+		return;
+	rpc_free.shm = shm;
+	tee_supp_cmd(ptee->tee, TEE_RPC_ICMD_FREE, &rpc_free, sizeof(rpc_free));
+}
+
+static u32 handle_rpc(struct tee_tz *ptee, struct smc_param *param)
+{
+	struct tee_shm *shm;
+	int cookie;
+
+	switch (TEESMC_RETURN_GET_RPC_FUNC(param->a0)) {
+	case TEESMC_RPC_FUNC_ALLOC_ARG:
+		param->a1 =
+			tkcore_shm_pool_alloc(DEV,
+			ptee->shm_pool, param->a1, 4);
+
+		break;
+	case TEESMC_RPC_FUNC_ALLOC_PAYLOAD:
+		/* Can't support payload shared memory with this interface */
+		param->a2 = 0;
+		break;
+	case TEESMC_RPC_FUNC_FREE_ARG:
+		tkcore_shm_pool_free(DEV, ptee->shm_pool, param->a1, 0);
+		break;
+	case TEESMC_RPC_FUNC_FREE_PAYLOAD:
+		/* Can't support payload shared memory with this interface */
+		break;
+	case TEESMC_ST_RPC_FUNC_ALLOC_PAYLOAD:
+		shm = handle_rpc_alloc(ptee, param->a1);
+		if (IS_ERR_OR_NULL(shm)) {
+			param->a1 = 0;
+			break;
+		}
+		cookie = handle_get(&shm_handle_db, shm);
+		if (cookie < 0) {
+			handle_rpc_free(ptee, shm);
+			param->a1 = 0;
+			break;
+		}
+		param->a1 = shm->resv.paddr;
+		param->a2 = cookie;
+		break;
+	case TEESMC_ST_RPC_FUNC_FREE_PAYLOAD:
+		shm = handle_put(&shm_handle_db, param->a1);
+		handle_rpc_free(ptee, shm);
+		break;
+	case TEESMC_RPC_FUNC_CMD:
+		handle_rpc_func_cmd(ptee, param->a1);
+		break;
+	default:
+		pr_warn("Unknown RPC func 0x%x\n",
+			 (u32) TEESMC_RETURN_GET_RPC_FUNC(param->a0));
+		break;
+	}
+
+	/* TODO refine this piece of logic. the irq status
+	 * can no longer determine whether it's a fastcall
+	 * or not
+	 */
+	if (irqs_disabled())
+		return TEESMC32_FASTCALL_RETURN_FROM_RPC;
+	else
+		return TEESMC32_CALL_RETURN_FROM_RPC;
+}
+
+static void call_tee(struct tee_tz *ptee, uintptr_t parg32,
+			struct teesmc32_arg *arg32, u32 funcid)
+{
+	u32 ret;
+
+	struct smc_param param = {
+		.a1 = parg32,
+	};
+
+	for (;;) {
+		param.a0 = funcid;
+		__call_tee(&param);
+
+		if (!TEESMC_RETURN_IS_RPC(param.a0))
+			break;
+
+		funcid = handle_rpc(ptee, &param);
+	}
+
+	ret = param.a0;
+
+	if (unlikely(ret != TEESMC_RETURN_OK &&
+			ret != TEESMC_RETURN_UNKNOWN_FUNCTION)) {
+		arg32->ret = TEEC_ERROR_COMMUNICATION;
+		arg32->ret_origin = TEEC_ORIGIN_COMMS;
+	}
+}
+
+static inline void stdcall_tee(struct tee_tz *ptee, uintptr_t parg32,
+				struct teesmc32_arg *arg32)
+{
+	call_tee(ptee, parg32, arg32, TEESMC32_CALL_WITH_ARG);
+}
+
+static inline void fastcall_tee(struct tee_tz *ptee, uintptr_t parg32,
+				struct teesmc32_arg *arg32)
+{
+	call_tee(ptee, parg32, arg32, TEESMC32_FASTCALL_WITH_ARG);
+}
+
+/*******************************************************************
+ * TEE service invoke formating
+ *******************************************************************/
+
+/* allocate tee service argument buffer and return virtual address */
+static void *alloc_tee_arg(struct tee_tz *ptee, unsigned long *p, size_t l)
+{
+	void *vaddr;
+
+	WARN_ON(!CAPABLE(ptee->tee));
+
+	if ((p == NULL) || (l == 0))
+		return NULL;
+
+	/* assume a 4 bytes aligned is sufficient */
+	*p = tkcore_shm_pool_alloc(DEV, ptee->shm_pool, l, ALLOC_ALIGN);
+	if (*p == 0)
+		return NULL;
+
+	vaddr = tee_shm_pool_p2v(DEV, ptee->shm_pool, *p);
+
+
+	return vaddr;
+}
+
+/* free tee service argument buffer (from its physical address) */
+static void free_tee_arg(struct tee_tz *ptee, unsigned long p)
+{
+	WARN_ON(!CAPABLE(ptee->tee));
+
+	if (p)
+		tkcore_shm_pool_free(DEV, ptee->shm_pool, p, 0);
+
+}
+
+static uint32_t get_cache_attrs(struct tee_tz *ptee)
+{
+	if (tee_shm_pool_is_cached(ptee->shm_pool))
+		return TEESMC_ATTR_CACHE_DEFAULT << TEESMC_ATTR_CACHE_SHIFT;
+	else
+		return TEESMC_ATTR_CACHE_NONCACHE << TEESMC_ATTR_CACHE_SHIFT;
+}
+
+static uint32_t param_type_teec2teesmc(uint8_t type)
+{
+	switch (type) {
+	case TEEC_NONE:
+		return TEESMC_ATTR_TYPE_NONE;
+	case TEEC_VALUE_INPUT:
+		return TEESMC_ATTR_TYPE_VALUE_INPUT;
+	case TEEC_VALUE_OUTPUT:
+		return TEESMC_ATTR_TYPE_VALUE_OUTPUT;
+	case TEEC_VALUE_INOUT:
+		return TEESMC_ATTR_TYPE_VALUE_INOUT;
+	case TEEC_MEMREF_TEMP_INPUT:
+	case TEEC_MEMREF_PARTIAL_INPUT:
+		return TEESMC_ATTR_TYPE_MEMREF_INPUT;
+	case TEEC_MEMREF_TEMP_OUTPUT:
+	case TEEC_MEMREF_PARTIAL_OUTPUT:
+		return TEESMC_ATTR_TYPE_MEMREF_OUTPUT;
+	case TEEC_MEMREF_WHOLE:
+	case TEEC_MEMREF_TEMP_INOUT:
+	case TEEC_MEMREF_PARTIAL_INOUT:
+		return TEESMC_ATTR_TYPE_MEMREF_INOUT;
+	default:
+		WARN_ON(true);
+		return 0;
+	}
+}
+
+static void set_params(struct tee_tz *ptee,
+	struct teesmc32_param params32[TEEC_CONFIG_PAYLOAD_REF_COUNT],
+	uint32_t param_types,
+	struct tee_data *data)
+{
+	size_t n;
+	struct tee_shm *shm;
+	struct TEEC_Value *value;
+
+	for (n = 0; n < TEEC_CONFIG_PAYLOAD_REF_COUNT; n++) {
+		uint32_t type = TEEC_PARAM_TYPE_GET(param_types, n);
+
+		params32[n].attr = param_type_teec2teesmc(type);
+		if (params32[n].attr == TEESMC_ATTR_TYPE_NONE)
+			continue;
+		if (params32[n].attr < TEESMC_ATTR_TYPE_MEMREF_INPUT) {
+			value = (struct TEEC_Value *)&data->params[n];
+			params32[n].u.value.a = value->a;
+			params32[n].u.value.b = value->b;
+			continue;
+		}
+		shm = data->params[n].shm;
+		params32[n].attr |= get_cache_attrs(ptee);
+		params32[n].u.memref.buf_ptr = shm->resv.paddr;
+		params32[n].u.memref.size = shm->size_req;
+	}
+}
+
+static void get_params(struct tee_data *data,
+	struct teesmc32_param params32[TEEC_CONFIG_PAYLOAD_REF_COUNT])
+{
+	size_t n;
+	struct tee_shm *shm;
+	struct TEEC_Value *value;
+
+	for (n = 0; n < TEEC_CONFIG_PAYLOAD_REF_COUNT; n++) {
+		if (params32[n].attr == TEESMC_ATTR_TYPE_NONE)
+			continue;
+		if (params32[n].attr < TEESMC_ATTR_TYPE_MEMREF_INPUT) {
+			value = &data->params[n].value;
+			value->a = params32[n].u.value.a;
+			value->b = params32[n].u.value.b;
+			continue;
+		}
+		shm = data->params[n].shm;
+		shm->size_req = params32[n].u.memref.size;
+	}
+}
+
+/*
+ * tee_open_session - invoke TEE to open a GP TEE session
+ */
+static int tz_open(struct tee_session *sess, struct tee_cmd *cmd)
+{
+	struct tee *tee;
+	struct tee_tz *ptee;
+	int ret = 0;
+
+	struct teesmc32_arg *arg32;
+	struct teesmc32_param *params32;
+	struct teesmc_meta_open_session *meta;
+	uintptr_t parg32;
+	uintptr_t pmeta;
+	size_t num_meta = 1;
+	uint8_t *ta;
+	struct TEEC_UUID *uuid;
+
+	WARN_ON(!sess->ctx->tee);
+	WARN_ON(!sess->ctx->tee->priv);
+	tee = sess->ctx->tee;
+	ptee = tee->priv;
+
+	if (cmd->uuid)
+		uuid = cmd->uuid->resv.kaddr;
+	else
+		uuid = NULL;
+
+	if (!CAPABLE(ptee->tee)) {
+		pr_err("tkcoredrv: %s: not capable\n", __func__);
+		return -EBUSY;
+	}
+
+	/* case ta binary is inside the open request */
+	ta = NULL;
+	if (cmd->ta)
+		ta = cmd->ta->resv.kaddr;
+	if (ta)
+		num_meta++;
+
+	arg32 = alloc_tee_arg(ptee, &parg32,
+		TEESMC32_GET_ARG_SIZE(
+		TEEC_CONFIG_PAYLOAD_REF_COUNT + num_meta));
+	meta = alloc_tee_arg(ptee, &pmeta, sizeof(*meta));
+
+	if ((arg32 == NULL) || (meta == NULL)) {
+		free_tee_arg(ptee, parg32);
+		free_tee_arg(ptee, pmeta);
+		return -ENOMEM;
+	}
+
+	memset(arg32, 0, sizeof(*arg32));
+	memset(meta, 0, sizeof(*meta));
+	arg32->num_params = TEEC_CONFIG_PAYLOAD_REF_COUNT + num_meta;
+	params32 = TEESMC32_GET_PARAMS(arg32);
+
+	arg32->cmd = TEESMC_CMD_OPEN_SESSION;
+
+	params32[0].u.memref.buf_ptr = pmeta;
+	params32[0].u.memref.size = sizeof(*meta);
+	params32[0].attr = TEESMC_ATTR_TYPE_MEMREF_INPUT |
+			   TEESMC_ATTR_META | get_cache_attrs(ptee);
+
+	if (ta) {
+		params32[1].u.memref.buf_ptr =
+			tee_shm_pool_v2p(DEV, ptee->shm_pool,
+				cmd->ta->resv.kaddr);
+		params32[1].u.memref.size = cmd->ta->size_req;
+		params32[1].attr = TEESMC_ATTR_TYPE_MEMREF_INPUT |
+				   TEESMC_ATTR_META | get_cache_attrs(ptee);
+	}
+
+	if (uuid != NULL)
+		memcpy(meta->uuid, uuid, TEESMC_UUID_LEN);
+	meta->clnt_login = 0; /* FIXME: is this reliable ? used ? */
+
+	params32 += num_meta;
+	set_params(ptee, params32, cmd->param.type, &cmd->param);
+
+	stdcall_tee(ptee, parg32, arg32);
+
+	get_params(&cmd->param, params32);
+
+	if (arg32->ret != TEEC_ERROR_COMMUNICATION) {
+		sess->sessid = arg32->session;
+		cmd->err = arg32->ret;
+		cmd->origin = arg32->ret_origin;
+	} else
+		ret = -EBUSY;
+
+	free_tee_arg(ptee, parg32);
+	free_tee_arg(ptee, pmeta);
+
+	return ret;
+}
+
+/*
+ * tee_invoke_command - invoke TEE to invoke a GP TEE command
+ */
+static int tz_invoke(struct tee_session *sess, struct tee_cmd *cmd)
+{
+	struct tee *tee;
+	struct tee_tz *ptee;
+	int ret = 0;
+
+	struct teesmc32_arg *arg32;
+	uintptr_t parg32;
+	struct teesmc32_param *params32;
+
+	WARN_ON(!sess->ctx->tee);
+	WARN_ON(!sess->ctx->tee->priv);
+	tee = sess->ctx->tee;
+	ptee = tee->priv;
+
+	if (!CAPABLE(tee)) {
+		pr_err("tkcoredrv: %s: not capable\n", __func__);
+		return -EBUSY;
+	}
+
+	arg32 = (typeof(arg32))alloc_tee_arg(ptee, &parg32,
+		TEESMC32_GET_ARG_SIZE(TEEC_CONFIG_PAYLOAD_REF_COUNT));
+	if (!arg32) {
+		free_tee_arg(ptee, parg32);
+		return TEEC_ERROR_OUT_OF_MEMORY;
+	}
+
+	memset(arg32, 0, sizeof(*arg32));
+	arg32->num_params = TEEC_CONFIG_PAYLOAD_REF_COUNT;
+	params32 = TEESMC32_GET_PARAMS(arg32);
+
+	arg32->cmd = TEESMC_CMD_INVOKE_COMMAND;
+	arg32->session = sess->sessid;
+	arg32->ta_func = cmd->cmd;
+
+	set_params(ptee, params32, cmd->param.type, &cmd->param);
+
+	stdcall_tee(ptee, parg32, arg32);
+
+	get_params(&cmd->param, params32);
+
+	if (arg32->ret != TEEC_ERROR_COMMUNICATION) {
+		cmd->err = arg32->ret;
+		cmd->origin = arg32->ret_origin;
+	} else
+		ret = -EBUSY;
+
+	free_tee_arg(ptee, parg32);
+
+	return ret;
+}
+
+/*
+ * tee_cancel_command - invoke TEE to cancel a GP TEE command
+ */
+static int tz_cancel(struct tee_session *sess, struct tee_cmd *cmd)
+{
+	struct tee *tee;
+	struct tee_tz *ptee;
+	int ret = 0;
+
+	struct teesmc32_arg *arg32;
+	uintptr_t parg32;
+
+	WARN_ON(!sess->ctx->tee);
+	WARN_ON(!sess->ctx->tee->priv);
+	tee = sess->ctx->tee;
+	ptee = tee->priv;
+
+
+	arg32 = alloc_tee_arg(ptee, &parg32, TEESMC32_GET_ARG_SIZE(0));
+	if (arg32 == NULL) {
+		free_tee_arg(ptee, parg32);
+		return TEEC_ERROR_OUT_OF_MEMORY;
+	}
+
+	memset(arg32, 0, sizeof(*arg32));
+	arg32->cmd = TEESMC_CMD_CANCEL;
+	arg32->session = sess->sessid;
+
+	fastcall_tee(ptee, parg32, arg32);
+
+	if (arg32->ret == TEEC_ERROR_COMMUNICATION)
+		ret = -EBUSY;
+
+	free_tee_arg(ptee, parg32);
+
+	return ret;
+}
+
+/*
+ * tee_close_session - invoke TEE to close a GP TEE session
+ */
+static int tz_close(struct tee_session *sess)
+{
+	struct tee *tee;
+	struct tee_tz *ptee;
+	int ret = 0;
+
+	struct teesmc32_arg *arg32;
+	uintptr_t parg32;
+
+	WARN_ON(!sess->ctx->tee);
+	WARN_ON(!sess->ctx->tee->priv);
+	tee = sess->ctx->tee;
+	ptee = tee->priv;
+
+
+	if (!CAPABLE(tee)) {
+		pr_err("tkcoredrv: %s: not capable\n", __func__);
+		return -EBUSY;
+	}
+
+	arg32 = alloc_tee_arg(ptee, &parg32, TEESMC32_GET_ARG_SIZE(0));
+	if (arg32 == NULL) {
+		pr_err("tkcoredrv: failed to allocate tee arg\n");
+		free_tee_arg(ptee, parg32);
+		return TEEC_ERROR_OUT_OF_MEMORY;
+	}
+
+	memset(arg32, 0, sizeof(*arg32));
+	arg32->cmd = TEESMC_CMD_CLOSE_SESSION;
+	arg32->session = sess->sessid;
+
+	stdcall_tee(ptee, parg32, arg32);
+
+	if (arg32->ret == TEEC_ERROR_COMMUNICATION)
+		ret = -EBUSY;
+
+	free_tee_arg(ptee, parg32);
+
+	return ret;
+}
+
+static struct tee_shm *tz_alloc(struct tee *tee, size_t size, uint32_t flags)
+{
+	struct tee_shm *shm = NULL;
+	struct tee_tz *ptee;
+	size_t size_aligned;
+
+	WARN_ON(!tee->priv);
+	ptee = tee->priv;
+
+
+	size_aligned = ((size / SZ_4K) + 1) * SZ_4K;
+	if (unlikely(size_aligned == 0)) {
+		pr_err("requested size too big\n");
+		return NULL;
+	}
+
+	shm = devm_kzalloc(tee->dev, sizeof(struct tee_shm), GFP_KERNEL);
+	if (!shm)
+		return ERR_PTR(-ENOMEM);
+
+	shm->size_alloc = ((size / SZ_4K) + 1) * SZ_4K;
+	shm->size_req = size;
+
+	shm->resv.paddr = tkcore_shm_pool_alloc(tee->dev, ptee->shm_pool,
+						shm->size_alloc, ALLOC_ALIGN);
+	if (!shm->resv.paddr) {
+		pr_err("tkcoredrv: %s cannot alloc memory, size 0x%lx\n",
+			__func__, (unsigned long) shm->size_alloc);
+		devm_kfree(tee->dev, shm);
+		return ERR_PTR(-ENOMEM);
+	}
+	shm->resv.kaddr =
+		tee_shm_pool_p2v(tee->dev, ptee->shm_pool, shm->resv.paddr);
+	if (!shm->resv.kaddr) {
+		pr_err("tkcoredrv: %s: p2v(%p)=0\n",
+			__func__,
+			(void *) (unsigned long) shm->resv.paddr);
+		tkcore_shm_pool_free(tee->dev,
+			ptee->shm_pool, shm->resv.paddr, NULL);
+		devm_kfree(tee->dev, shm);
+		return ERR_PTR(-EFAULT);
+	}
+	shm->flags = flags;
+	if (ptee->shm_cached)
+		shm->flags |= TEE_SHM_CACHED;
+
+	return shm;
+}
+
+static void tz_free(struct tee_shm *shm)
+{
+	size_t size;
+	int ret;
+	struct tee *tee;
+	struct tee_tz *ptee;
+
+	WARN_ON(!shm->tee);
+	WARN_ON(!shm->tee->priv);
+	tee = shm->tee;
+	ptee = tee->priv;
+
+
+	ret = tkcore_shm_pool_free(tee->dev, ptee->shm_pool,
+		shm->resv.paddr, &size);
+	if (!ret) {
+		devm_kfree(tee->dev, shm);
+		shm = NULL;
+	}
+}
+
+static int tz_shm_inc_ref(struct tee_shm *shm)
+{
+	struct tee *tee;
+	struct tee_tz *ptee;
+
+	WARN_ON(!shm->tee);
+	WARN_ON(!shm->tee->priv);
+	tee = shm->tee;
+	ptee = tee->priv;
+
+	return tee_shm_pool_incref(tee->dev, ptee->shm_pool, shm->resv.paddr);
+}
+
+#ifdef CONFIG_OUTER_CACHE
+/*
+ * Synchronised outer cache maintenance support
+ */
+#ifndef CONFIG_ARM_TZ_SUPPORT
+/* weak outer_tz_mutex in case not supported by kernel */
+bool __weak outer_tz_mutex(unsigned long *p)
+{
+	if (p != NULL)
+		return false;
+	return true;
+}
+#endif
+
+/* register_outercache_mutex - Negotiate/Disable outer cache shared mutex */
+static int register_outercache_mutex(struct tee_tz *ptee, bool reg)
+{
+	unsigned long *vaddr = NULL;
+	int ret = 0;
+	struct smc_param param;
+	uintptr_t paddr = 0;
+
+	WARN_ON(!CAPABLE(ptee->tee));
+
+	if ((reg == true) && (ptee->tz_outer_cache_mutex != NULL)) {
+		pr_err("outer cache shared mutex already registered\n");
+		return -EINVAL;
+	}
+	if ((reg == false) && (ptee->tz_outer_cache_mutex == NULL))
+		return 0;
+
+	if (reg == false) {
+		vaddr = ptee->tz_outer_cache_mutex;
+		ptee->tz_outer_cache_mutex = NULL;
+		goto out;
+	}
+
+	memset(&param, 0, sizeof(param));
+	param.a0 = TEESMC32_ST_FASTCALL_L2CC_MUTEX;
+	param.a1 = TEESMC_ST_L2CC_MUTEX_GET_ADDR;
+	smc_xfer(&param);
+
+	if (param.a0 != TEESMC_RETURN_OK) {
+		pr_err("no TZ l2cc mutex service supported\n");
+		goto out;
+	}
+	paddr = param.a2;
+
+	vaddr = tee_map_cached_shm(paddr, sizeof(u32));
+	if (vaddr == NULL) {
+		pr_err("TZ l2cc mutex disabled: ioremap failed\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	if (outer_tz_mutex(vaddr) == false) {
+		pr_err("TZ l2cc mutex disabled: outer cache refused\n");
+		goto out;
+	}
+
+	memset(&param, 0, sizeof(param));
+	param.a0 = TEESMC32_ST_FASTCALL_L2CC_MUTEX;
+	param.a1 = TEESMC_ST_L2CC_MUTEX_ENABLE;
+	smc_xfer(&param);
+
+	if (param.a0 != TEESMC_RETURN_OK) {
+		pr_warn("TZ l2cc mutex disabled: TZ enable failed\n");
+		goto out;
+	}
+	ptee->tz_outer_cache_mutex = vaddr;
+
+out:
+	if (ptee->tz_outer_cache_mutex == NULL) {
+		memset(&param, 0, sizeof(param));
+		param.a0 = TEESMC32_ST_FASTCALL_L2CC_MUTEX;
+		param.a1 = TEESMC_ST_L2CC_MUTEX_DISABLE;
+		smc_xfer(&param);
+		outer_tz_mutex(NULL);
+		if (vaddr)
+			iounmap(vaddr);
+
+	}
+
+	return ret;
+}
+#endif
+
+/* configure_shm - Negotiate Shared Memory configuration with teetz. */
+static int configure_shm(struct tee_tz *ptee)
+{
+	struct smc_param param = { 0 };
+	size_t shm_size = -1;
+	int ret = 0;
+
+	WARN_ON(!CAPABLE(ptee->tee));
+
+	param.a0 = TEESMC32_ST_FASTCALL_GET_SHM_CONFIG;
+	smc_xfer(&param);
+
+	if (param.a0 != TEESMC_RETURN_OK) {
+		pr_err("shm service not available: 0x%x",
+			(uint) param.a0);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ptee->shm_paddr = param.a1;
+	shm_size = param.a2;
+	ptee->shm_cached = (bool)param.a3;
+
+	if (ptee->shm_cached)
+		ptee->shm_vaddr = tee_map_cached_shm(ptee->shm_paddr, shm_size);
+	else
+		ptee->shm_vaddr = ioremap_nocache(ptee->shm_paddr, shm_size);
+
+	if (ptee->shm_vaddr == NULL) {
+		pr_err("shm ioremap failed\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ptee->shm_pool = tee_shm_pool_create(DEV, shm_size,
+		ptee->shm_vaddr, ptee->shm_paddr);
+
+	if (!ptee->shm_pool) {
+		pr_err("shm pool creation failed (%zu)", shm_size);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (ptee->shm_cached)
+		tee_shm_pool_set_cached(ptee->shm_pool);
+out:
+	return ret;
+}
+
+static int tz_start(struct tee *tee)
+{
+	struct tee_tz *ptee;
+	int ret;
+
+	WARN_ON(!tee || !tee->priv);
+
+	if (!CAPABLE(tee)) {
+		pr_err("not capable\n");
+		return -EBUSY;
+	}
+
+	ptee = tee->priv;
+	WARN_ON(ptee->started);
+	ptee->started = true;
+
+	ret = configure_shm(ptee);
+	if (ret)
+		goto exit;
+
+#ifdef CONFIG_MEDIATEK_SOLUTION
+	{
+#define TKCORE_GET_ROOT_OF_TRUST_INFO 0xBF000202
+		struct smc_param param = { 0 };
+
+		/* tell tos to get root of trust
+		 *
+		 * we are not insterested in the
+		 * return value. the availability
+		 * of RoT will affect the behavior of
+		 * other trust apps anyhow
+		 */
+
+		param.a0 = TKCORE_GET_ROOT_OF_TRUST_INFO;
+		smc_xfer(&param);
+	}
+#endif
+
+#ifdef CONFIG_OUTER_CACHE
+	ret = register_outercache_mutex(ptee, true);
+	if (ret)
+		goto exit;
+#endif
+
+exit:
+	if (ret)
+		ptee->started = false;
+
+	return ret;
+}
+
+static int tz_stop(struct tee *tee)
+{
+	struct tee_tz *ptee;
+
+	WARN_ON(!tee || !tee->priv);
+
+	ptee = tee->priv;
+
+	if (!CAPABLE(tee)) {
+		pr_err("tee: bad state\n");
+		return -EBUSY;
+	}
+
+#ifdef CONFIG_OUTER_CACHE
+	register_outercache_mutex(ptee, false);
+#endif
+	tee_shm_pool_destroy(tee->dev, ptee->shm_pool);
+	iounmap(ptee->shm_vaddr);
+	ptee->started = false;
+
+	return 0;
+}
+
+static void __tee_smc_call(struct smc_param *p)
+{
+	tee_smc_call(p);
+}
+
+/******************************************************************************/
+
+const struct tee_ops tee_tz_fops = {
+	.type = "tz",
+	.owner = THIS_MODULE,
+	.start = tz_start,
+	.stop = tz_stop,
+	.invoke = tz_invoke,
+	.cancel = tz_cancel,
+	.open = tz_open,
+	.close = tz_close,
+	.alloc = tz_alloc,
+	.free = tz_free,
+	.shm_inc_ref = tz_shm_inc_ref,
+
+	.call_tee = smc_xfer,
+	.raw_call_tee = __tee_smc_call,
+};
+
+static int tz_tee_init(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	struct tee *tee = platform_get_drvdata(pdev);
+	struct tee_tz *ptee = tee->priv;
+
+	tee_tz = ptee;
+
+	ptee->started = false;
+
+	tee_wait_queue_init(&ptee->wait_queue);
+
+	if (ret) {
+		pr_err("dev=%s, Secure failed (%d)\n",
+			tee->name, ret);
+	}
+
+	return ret;
+}
+
+static void tz_tee_deinit(struct platform_device *pdev)
+{
+	struct tee *tee = platform_get_drvdata(pdev);
+	struct tee_tz *ptee = tee->priv;
+
+	if (!CAPABLE(tee))
+		return;
+
+	tee_wait_queue_exit(&ptee->wait_queue);
+}
+
+static int tz_tee_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct device *dev = &pdev->dev;
+	struct tee *tee;
+	struct tee_tz *ptee;
+
+	tee = tee_core_alloc(dev, _TEE_TZ_NAME, pdev->id, &tee_tz_fops,
+				sizeof(struct tee_tz));
+	if (!tee)
+		return -ENOMEM;
+
+	ptee = tee->priv;
+	ptee->tee = tee;
+
+	platform_set_drvdata(pdev, tee);
+
+	ret = tz_tee_init(pdev);
+	if (ret)
+		goto bail0;
+
+	ret = tee_core_add(tee);
+	if (ret)
+		goto bail1;
+
+	ret = __tee_get(tee);
+	if (ret) {
+		tz_tee_deinit(pdev);
+		tee_core_del(tee);
+
+		return ret;
+	}
+
+	return 0;
+
+bail1:
+	tz_tee_deinit(pdev);
+bail0:
+	tee_core_free(tee);
+	return ret;
+}
+
+static int tz_tee_remove(struct platform_device *pdev)
+{
+	struct tee *tee = platform_get_drvdata(pdev);
+
+	tz_tee_deinit(pdev);
+	tee_core_del(tee);
+	return 0;
+}
+
+static const struct of_device_id tz_tee_match[] = {
+	{
+		.compatible = "trustkernel,tzdrv",
+	},
+	{},
+};
+
+static struct platform_driver tz_tee_driver = {
+	.probe = tz_tee_probe,
+	.remove = tz_tee_remove,
+
+	.driver = {
+		.name = "tzdrv",
+		.owner = THIS_MODULE,
+		.of_match_table = tz_tee_match,
+	},
+};
+
+static struct platform_device tz_0_plt_device = {
+	.name = "tzdrv",
+	.id = 0,
+	.dev = {
+		/*	.platform_data = tz_0_tee_data,*/
+	},
+};
+
+static int __init tee_tz_init(void)
+{
+	int rc;
+
+	pr_info("TrustKernel TEE Driver initialization\n");
+
+	rc = tee_init_smc_xfer();
+	if (rc < 0)
+		return rc;
+
+	rc = platform_device_register(&tz_0_plt_device);
+	if (rc < 0) {
+		pr_err(
+			"failed to register the platform devices 0 (rc=%d)\n",
+			rc);
+		goto err0;
+	}
+
+	rc = platform_driver_register(&tz_tee_driver);
+	if (rc < 0) {
+		pr_err("failed to probe the platform driver (rc=%d)\n",
+			rc);
+		goto err1;
+	}
+
+	return 0;
+
+err1:
+	platform_device_unregister(&tz_0_plt_device);
+err0:
+	tee_exit_smc_xfer();
+
+	return rc;
+}
+
+static void __exit tee_tz_exit(void)
+{
+	pr_info("TrustKernel TEE Driver Release\n");
+
+	platform_device_unregister(&tz_0_plt_device);
+	platform_driver_unregister(&tz_tee_driver);
+
+	tee_exit_smc_xfer();
+}
+
+#ifndef MODULE
+rootfs_initcall(tee_tz_init);
+#else
+module_init(tee_tz_init);
+#endif
+module_exit(tee_tz_exit);
+
+MODULE_AUTHOR("TrustKernel");
+MODULE_DESCRIPTION("TrustKernel TKCore TZ driver");
+MODULE_SUPPORTED_DEVICE("");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_tz_op.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_tz_op.h
new file mode 100644
index 0000000..9e38cde
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_tz_op.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef __TEE_ARMV7_OP_H__
+#define __TEE_ARMV7_OP_H__
+
+enum t_issw_service_id {
+	/*
+	 * ("SSAPI_PRE_INIT_SERV")
+	 */
+	SSAPI_PRE_INIT_SERV = 1,
+
+	/*
+	 * ("SSAPI_POST_SPEEDUP_INIT_SERV")
+	 * Reserved, Not used
+	 */
+	SSAPI_POST_SPEEDUP_INIT_SERV = 2,
+
+	/*
+	 * ("SSAPI_ISSW_IMPORT_SERV")
+	 */
+	SSAPI_ISSW_IMPORT_SERV = 3,
+
+	/*
+	 * ("SSAPI_RET_FROM_INT_SERV")
+	 */
+	SSAPI_RET_FROM_INT_SERV = 4,
+
+	/*
+	 * ("SSAPI_RET_FROM_RPC_SERV")
+	 */
+	SSAPI_RET_FROM_RPC_SERV = 5,
+
+	/*
+	 * "ISSWAPI_ISSW_EXECUTE_SERV" is linked to ROM code
+	 * ("SSAPI_ISSW_EXECUTE_SERV")
+	 */
+	ISSWAPI_ISSW_EXECUTE_SERV = 6,
+	ISSWAPI_PROT_APPL_MSG_SEND = 0x10000000,
+	ISSWAPI_EXTERNAL_CODE_CHECK = 0x10000001,
+	ISSWAPI_SECURE_LOAD = 0x10000002,
+	ISSWAPI_ISSW_REIMPORT_PUB_KEYS = 0x10000003,
+
+	/* Accessible only on request */
+	ISSWAPI_WRITE_L2CC = 0x10000004,
+	ISSWAPI_WRITE_CP15_SCTLR = 0x10000005,
+	ISSWAPI_READ_CP15_SCTLR = 0x10000006,
+	ISSWAPI_WRITE_CP15_ACTLR = 0x10000007,
+	ISSWAPI_READ_CP15_ACTLR = 0x10000008,
+	ISSWAPI_WRITE_CP15_DIAGR = 0x10000009,
+	ISSWAPI_READ_CP15_DIAGR = 0x1000000A,
+
+	ISSWAPI_EXECUTE_TA = 0x11000001,
+	ISSWAPI_CLOSE_TA = 0x11000002,
+	ISSWAPI_FLUSH_BOOT_CODE = 0x11000003,
+	/* Generic, restricted to be used by u-boot */
+	ISSWAPI_VERIFY_SIGNED_HEADER = 0x11000005,
+	ISSWAPI_VERIFY_HASH = 0x11000006,
+	/* 8500 only, restricted to be used by u-boot */
+	ISSWAPI_GET_RT_FLAGS = 0x11000007,
+
+	/* For TEE Client API 1.0 */
+	ISSWAPI_TEEC_OPEN_SESSION = 0x11000008,
+	ISSWAPI_TEEC_CLOSE_SESSION = 0x11000009,
+	ISSWAPI_TEEC_INVOKE_COMMAND = 0x1100000a,
+	ISSWAPI_REGISTER_RPC = 0x1100000b,	/* this is NOT a GP TEE API ! */
+	ISSWAPI_SET_SEC_DDR = 0x1100000c,	/* this is NOT a GP TEE API ! */
+	ISSWAPI_TEEC_CANCEL_COMMAND = 0x1100000d,
+	ISSWAPI_TEEC_REGISTER_MEMORY = 0x1100000e,
+	ISSWAPI_TEEC_UNREGISTER_MEMORY = 0x1100000f,
+
+	/* Internal command */
+	ISSWAPI_TEE_DEINIT_CPU = 0x11000010,
+	ISSWAPI_TEE_CRASH_CPU = 0x11000011,
+	ISSWAPI_TEE_SET_CORE_TRACE_LEVEL = 0x11000012,
+	ISSWAPI_TEE_GET_CORE_TRACE_LEVEL = 0x11000013,
+	ISSWAPI_TEE_SET_TA_TRACE_LEVEL = 0x11000014,
+	ISSWAPI_TEE_GET_TA_TRACE_LEVEL = 0x11000015,
+	ISSWAPI_TEE_GET_CORE_STATUS = 0x11000016,
+	ISSWAPI_TEE_FLUSH_CACHE = 0x11000017,
+
+	ISSWAPI_REGISTER_DEF_SHM = 0x11000020,
+	ISSWAPI_UNREGISTER_DEF_SHM = 0x11000021,
+	ISSWAPI_REGISTER_IRQFWD = 0x11000022,
+	ISSWAPI_UNREGISTER_IRQFWD = 0x11000023,
+	ISSWAPI_GET_SHM_START = 0x11000024,
+	ISSWAPI_GET_SHM_SIZE = 0x11000025,
+	ISSWAPI_GET_SHM_CACHED = 0x11000026,
+
+	ISSWAPI_ENABLE_L2CC_MUTEX = 0x20000000,
+	ISSWAPI_DISABLE_L2CC_MUTEX = 0x20000001,
+	ISSWAPI_GET_L2CC_MUTEX = 0x20000002,
+	ISSWAPI_SET_L2CC_MUTEX = 0x20000003,
+
+	ISSWAPI_LOAD_TEE = 0x20000004,
+
+};
+
+/*
+ * tee_msg_send - generic part of the msg sent to the TEE
+ */
+struct tee_msg_send {
+	unsigned int service;
+};
+
+/*
+ * tee_msg_recv - default strcutre of TEE service output message
+ */
+struct tee_msg_recv {
+	int duration;
+	uint32_t res;
+	uint32_t origin;
+};
+
+/*
+ * tee_register_irqfwd_xxx - (un)register callback for interrupt forwarding
+ */
+struct tee_register_irqfwd_send {
+	struct tee_msg_send header;
+	struct {
+		unsigned long cb;
+	} data;
+};
+struct tee_register_irqfwd_recv {
+	struct tee_msg_recv header;
+};
+
+/*
+ * tee_get_l2cc_mutex - input/output argument structures
+ */
+struct tee_get_l2cc_mutex_send {
+	struct tee_msg_send header;
+};
+struct tee_get_l2cc_mutex_recv {
+	struct tee_msg_recv header;
+	struct {
+		unsigned long paddr;
+	} data;
+};
+
+/**
+ * struct tee_identity - Represents the identity of the client
+ * @login: Login id
+ * @uuid: UUID as defined above
+ */
+struct tee_identity {
+	uint32_t login;
+	struct TEEC_UUID uuid;
+};
+
+/*
+ * tee_open_session_data - input arg structure for TEE open session service
+ */
+struct tee_open_session_data {
+	struct ta_signed_header_t *ta;
+	struct TEEC_UUID uuid;
+	uint32_t param_types;
+	struct TEEC_Value params[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+	struct tee_identity client_id;
+	uint32_t params_flags[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+};
+
+/*
+ * tee_open_session_send - input arg msg for TEE open session service
+ */
+struct tee_open_session_send {
+	struct tee_msg_send header;
+	struct tee_open_session_data data;
+};
+
+/*
+ * tee_open_session_recv - output arg structure for TEE open session service
+ */
+struct tee_open_session_recv {
+	struct tee_msg_recv header;
+	uint32_t sess;
+	struct TEEC_Value params[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+};
+
+/*
+ * tee_invoke_command_data - input arg structure for TEE invoke cmd service
+ */
+struct tee_invoke_command_data {
+	uint32_t sess;
+	uint32_t cmd;
+	uint32_t param_types;
+	struct TEEC_Value params[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+	uint32_t params_flags[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+};
+
+struct tee_invoke_command_send {
+	struct tee_msg_send header;
+	struct tee_invoke_command_data data;
+};
+
+/*
+ * tee_invoke_command_recv - output arg structure for TEE invoke cmd service
+ */
+struct tee_invoke_command_recv {
+	struct tee_msg_recv header;
+	struct TEEC_Value params[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+};
+
+/*
+ * tee_cancel_command_data - input arg structure for TEE cancel service
+ */
+struct tee_cancel_command_data {
+	uint32_t sess;
+};
+
+/*
+ * tee_cancel_command_send - input msg structure for TEE cancel service
+ */
+struct tee_cancel_command_send {
+	struct tee_msg_send header;
+	struct tee_cancel_command_data data;
+};
+
+/*
+ * tee_close_session_data - input arg structure for TEE close session service
+ */
+struct tee_close_session_data {
+	uint32_t sess;
+};
+
+/*
+ * tee_close_session_send - input arg msg for TEE close session service
+ */
+struct tee_close_session_send {
+	struct tee_msg_send header;
+	struct tee_close_session_data data;
+};
+
+/*
+ * tee_register_rpc_send_data - input arg structure for TEE register rpc service
+ */
+struct tee_register_rpc_send_data {
+	uint32_t fnk;
+	uint32_t bf;
+	uint32_t nbr_bf;
+};
+
+/*
+ * tee_register_rpc_send - input msg structure for TEE register rpc service
+ */
+struct tee_register_rpc_send {
+	struct tee_msg_send header;
+	struct tee_register_rpc_send_data data;
+};
+
+/*
+ * tee_core_status_out - output arg structure for TEE status service
+ */
+#define TEEC_STATUS_MSG_SIZE 80
+
+struct tee_core_status_out {
+	struct tee_msg_recv header;
+	char raw[TEEC_STATUS_MSG_SIZE];
+};
+
+#endif /* __TEE_ARMV7_OP_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_tz_priv.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_tz_priv.h
new file mode 100644
index 0000000..d0111fa
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/armtz/tee_tz_priv.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef __TEE_TZ_PRIV__
+#define __TEE_TZ_PRIV__
+
+#include <linux/tee_kernel_lowlevel_api.h>
+
+struct tee;
+struct shm_pool;
+struct tee_rpc_bf;
+
+struct tee_tz {
+	bool started;
+	struct tee *tee;
+	unsigned long shm_paddr;
+	void *shm_vaddr;
+	struct shm_pool *shm_pool;
+	void *tz_outer_cache_mutex;
+	struct tee_rpc_bf *rpc_buffers;
+	bool shm_cached;
+	struct tee_wait_queue_private wait_queue;
+};
+
+#endif /* __TEE_TZ_PRIV__ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/Makefile b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/Makefile
new file mode 100644
index 0000000..50916ce
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015-2019 TrustKernel Incorporated
+
+ifneq ($(strip $(M)),)
+	obj-m += tkcore.o
+else
+	obj-y += tkcore.o
+endif
+
+M ?=$(srctree)/drivers/tee/tkcore
+
+#ccflags-y+=-Werror
+ccflags-y += -I$(M)/include/linux
+ccflags-y += -I$(M)/include
+ccflags-y += -I$(M)
+
+tkcore-objs:= \
+		tee_core.o \
+		tee_context.o \
+		tee_session.o \
+		tee_shm.o \
+		tee_supp_com.o \
+		tee_sysfs.o \
+		tee_kernel_api.o \
+		tee_wait_queue.o \
+		tee_tui.o \
+		tee_tui_hal.o \
+		tee_procfs.o \
+		tee_ta_mgmt.o \
+		tee_clkmgr.o \
+		tee_fp.o \
+		pm.o \
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/pm.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/pm.c
new file mode 100644
index 0000000..91fdc6e
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/pm.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/wait.h>
+#include <linux/freezer.h>
+#include <linux/suspend.h>
+
+#include <linux/atomic.h>
+
+#include "pm.h"
+
+/* tee_wakeup_cnt == 0 means currently
+ * the system is in idle state, without
+ * wakeup source or suspend request.
+ * wakeup source > 0 means there are outgoing
+ * commands which stop the system from suspending.
+ * wakeup source == -1 means the system is now
+ * preparing for suspend or suspending
+ */
+static atomic_t tee_wakeup_cnt = ATOMIC_INIT(0);
+
+static DECLARE_WAIT_QUEUE_HEAD(suspend_done);
+static DECLARE_WAIT_QUEUE_HEAD(awake_done);
+
+static void tee_keep_awake(void)
+{
+	while (atomic_inc_unless_negative(&tee_wakeup_cnt) == 0)
+		wait_event_freezable(suspend_done,
+			atomic_read(&tee_wakeup_cnt) >= 0);
+}
+
+static void tee_cancel_awake(void)
+{
+	if (atomic_dec_and_test(&tee_wakeup_cnt)) {
+		/* wake_up() implies a memory barrier */
+		wake_up(&awake_done);
+	}
+}
+
+static void tee_prepare_suspend(void)
+{
+	int r;
+
+	while ((r = atomic_read(&tee_wakeup_cnt)) >= 0) {
+		if (atomic_cmpxchg(&tee_wakeup_cnt, 0, -1) == 0)
+			return;
+		/* wait_event() implies a memory barrier */
+		wait_event(awake_done, atomic_read(&tee_wakeup_cnt) == 0);
+	}
+
+	pr_warn("tee_wakeup_cnt unexpected value: %d\n", r);
+}
+
+#ifdef CONFIG_TRUSTKERNEL_TEE_RPMB_SUPPORT
+#include "linux/tee_rpmb.h"
+#endif
+
+static void tee_post_suspend(void)
+{
+	/* we do not need to use atomic instruction here,
+	 * because there is one single suspend source.
+	 */
+	atomic_set(&tee_wakeup_cnt, atomic_read(&tee_wakeup_cnt) + 1);
+	wake_up(&suspend_done);
+}
+
+static int tee_pm_suspend_notifier(struct notifier_block *nb,
+				   unsigned long event, void *dummy)
+{
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		tee_prepare_suspend();
+
+#ifdef CONFIG_TRUSTKERNEL_TEE_RPMB_SUPPORT
+		{
+			struct tkcore_rpmb_request rq = {
+				.type = TEE_RPMB_SWITCH_NORMAL
+			};
+			tkcore_emmc_rpmb_execute(&rq);
+		}
+#endif
+		break;
+	case PM_POST_SUSPEND:
+		tee_post_suspend();
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block tz_pm_notifier = {
+	.notifier_call = tee_pm_suspend_notifier,
+};
+
+int tkcore_stay_awake(void *fn, void *data)
+{
+	int r;
+
+	tee_keep_awake();
+	r = ((int (*) (void *)) fn) (data);
+	tee_cancel_awake();
+
+	return r;
+}
+
+int tkcore_tee_pm_init(void)
+{
+	int r;
+
+	r = register_pm_notifier(&tz_pm_notifier);
+	if (r) {
+		pr_err("failed to register pm notifier: %d\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+void tkcore_tee_pm_exit(void)
+{
+	int r;
+
+	r = unregister_pm_notifier(&tz_pm_notifier);
+	if (r)
+		pr_err("failed to unregister_pm_notifier: %d\n", r);
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/pm.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/pm.h
new file mode 100644
index 0000000..750e5f8
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/pm.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TKCORE_PM_H
+#define TKCORE_PM_H
+
+int tkcore_stay_awake(void *fn, void *data);
+
+int tkcore_tee_pm_init(void);
+
+void tkcore_tee_pm_exit(void);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_clkmgr.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_clkmgr.c
new file mode 100644
index 0000000..a37d772
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_clkmgr.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/tee_clkmgr.h>
+#include <linux/tee_client_api.h>
+
+#include "tee_clkmgr_priv.h"
+
+struct clkmgr_handle {
+	uint32_t token;
+	void *e, *d;
+	const void *p0, *p1, *p2;
+	size_t argnum;
+	struct list_head le;
+};
+
+/* sync with tee-os */
+enum tee_clkmgr_type {
+	TEE_CLKMGR_TYPE_SPI = 0,
+	TEE_CLKMGR_TYPE_I2C,
+	TEE_CLKMGR_TYPE_I2C_DMA
+};
+
+static const char * const clkid[] = {
+	[TEE_CLKMGR_TYPE_SPI] = "spi",
+	[TEE_CLKMGR_TYPE_I2C] = "i2c",
+	[TEE_CLKMGR_TYPE_I2C_DMA] = "i2c-dma",
+};
+
+static LIST_HEAD(clk_list);
+static DEFINE_SPINLOCK(clk_list_lock);
+
+/* called inside list_lock */
+static struct clkmgr_handle *get_clkmgr_handle(uint32_t token)
+{
+	struct clkmgr_handle *h;
+
+	list_for_each_entry(h, &clk_list, le) {
+		if (h->token == token)
+			return h;
+	}
+
+	return NULL;
+}
+
+int tee_clkmgr_handle(uint32_t token, uint32_t op)
+{
+	struct clkmgr_handle *ph, h;
+	void *fn;
+
+	spin_lock(&clk_list_lock);
+
+	ph = get_clkmgr_handle(token | TEE_CLKMGR_TOKEN_NOT_LEGACY);
+	if (ph == NULL) {
+		pr_err("invalid token %u\n", token);
+		spin_unlock(&clk_list_lock);
+		return TEEC_ERROR_ITEM_NOT_FOUND;
+	}
+
+	memcpy(&h, ph, sizeof(h));
+
+	spin_unlock(&clk_list_lock);
+
+	fn = (op & TEE_CLKMGR_OP_ENABLE) ? h.e : h.d;
+
+	if (h.argnum == 0) {
+		((void (*)(void)) fn) ();
+	} else if (h.argnum == 1) {
+		((void (*)(const void *)) fn) (h.p0);
+	} else if (h.argnum == 2) {
+		((void (*)(const void *, const void *)) fn) (h.p0, h.p1);
+	} else if (h.argnum == 3) {
+		((void (*) (const void *, const void *, const void *)) fn)
+			(h.p0, h.p1, h.p2);
+	} else {
+		pr_err("unsupported token %u argnum %zu\n",
+			h.token, h.argnum);
+		return TEEC_ERROR_NOT_SUPPORTED;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(tee_clkmgr_handle);
+
+int tee_clkmgr_register(const char *clkname, int id, void *e, void *d,
+	void *p0, void *p1, void *p2, size_t argnum)
+{
+	size_t n;
+
+	struct clkmgr_handle *h, *w;
+
+	pr_info("tkcoredrv: clkname=%s id=%d\n",
+		clkname, id);
+
+	if (argnum > 3) {
+		pr_err("does not support argnum %zu\n", argnum);
+		return -EINVAL;
+	}
+
+	for (n = 0; n < ARRAY_SIZE(clkid); n++) {
+		if (clkid[n] && strcmp(clkname, clkid[n]) == 0)
+			break;
+	}
+
+	if (n == ARRAY_SIZE(clkid)) {
+		pr_err("invalid clkname %s\n", clkname);
+		return -EINVAL;
+	}
+
+	if ((id << TEE_CLKMGR_TOKEN_ID_SHIFT) &
+		(TEE_CLKMGR_TOKEN_TYPE_MASK << TEE_CLKMGR_TOKEN_TYPE_SHIFT)) {
+		pr_err("%s-%d: invalid id\n", clkname, id);
+		return -EINVAL;
+	}
+
+	h = kmalloc(sizeof(struct clkmgr_handle), GFP_KERNEL);
+	if (h == NULL)
+		return -ENOMEM;
+
+	h->token = TEE_CLKMGR_TOKEN((uint32_t) n, (uint32_t) id);
+	h->e = e;
+	h->d = d;
+	h->p0 = p0;
+	h->p1 = p1;
+	h->p2 = p2;
+	h->argnum = argnum;
+
+	spin_lock(&clk_list_lock);
+
+	/* check for duplication */
+	list_for_each_entry(w, &clk_list, le) {
+		if (w->token == h->token) {
+			pr_err("clk 0x%x already registered\n",
+				h->token);
+			spin_unlock(&clk_list_lock);
+			return -EINVAL;
+		}
+	}
+
+	list_add(&(h->le), &clk_list);
+	spin_unlock(&clk_list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(tee_clkmgr_register);
+
+int tee_clkmgr_init(void)
+{
+	return 0;
+}
+
+void tee_clkmgr_exit(void)
+{
+	struct clkmgr_handle *h, *n;
+
+	spin_lock(&clk_list_lock);
+
+	list_for_each_entry_safe(h, n, &clk_list, le) {
+		list_del(&(h->le));
+		kfree(h);
+	}
+
+	spin_unlock(&clk_list_lock);
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_clkmgr_priv.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_clkmgr_priv.h
new file mode 100644
index 0000000..b6b0f5f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_clkmgr_priv.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEE_CLKMGR_PRIV_H
+#define TEE_CLKMGR_PRIV_H
+
+int tee_clkmgr_init(void);
+void tee_clkmgr_exit(void);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_context.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_context.c
new file mode 100644
index 0000000..77eccc7
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_context.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+
+#include "tee_shm.h"
+#include "tee_core_priv.h"
+
+/**
+ * tee_context_dump -	Dump in a buffer the information (ctx, sess & shm)
+ *			associated to a tee.
+ */
+int tee_context_dump(struct tee *tee, char *buff, size_t len)
+{
+	struct list_head *ptr_ctx, *ptr_sess, *ptr_shm;
+	struct tee_context *ctx;
+	struct tee_session *sess;
+	struct tee_shm *shm;
+	int i = -1;
+	int j = 0;
+
+	int pos = 0;
+
+	WARN_ON(!tee);
+
+	if (len < 80 || list_empty(&tee->list_ctx))
+		return 0;
+
+	mutex_lock(&tee->lock);
+
+	list_for_each(ptr_ctx, &tee->list_ctx) {
+		ctx = list_entry(ptr_ctx, struct tee_context, entry);
+		++i;
+
+		pos += sprintf(buff + pos,
+				"[%02d] ctx=%p (refcount=%d) (usr=%d)",
+				i, ctx,
+				(int)kref_read(&ctx->refcount),
+				ctx->usr_client);
+		pos += sprintf(buff + pos, "name=\"%s\" (tgid=%d)\n",
+				ctx->name,
+				ctx->tgid);
+		if ((len - pos) < 80) {
+			pos = 0;
+			goto out;
+		}
+
+		if (list_empty(&ctx->list_sess))
+			continue;
+
+		j = 0;
+		list_for_each(ptr_sess, &ctx->list_sess) {
+			sess = list_entry(ptr_sess,
+					struct tee_session,
+					entry);
+
+			pos += sprintf(buff + pos,
+					"[%02d.%d] sess=%p sessid=%08x\n",
+					i, j, sess,
+					sess->sessid);
+
+			if ((len - pos) < 80) {
+				pos = 0;
+				goto out;
+			}
+		}
+
+		if (list_empty(&ctx->list_shm))
+			continue;
+
+		j = 0;
+		list_for_each(ptr_shm, &ctx->list_shm) {
+			shm = list_entry(ptr_shm, struct tee_shm, entry);
+
+			pos += sprintf(buff + pos,
+					"[%02d.%d] shm=%p paddr=%p kaddr=%p",
+					i, j, shm,
+					&shm->resv.paddr,
+					shm->resv.kaddr);
+			pos += sprintf(buff + pos,
+					" s=%zu(%zu)\n",
+					shm->size_req,
+					shm->size_alloc);
+			if ((len - pos) < 80) {
+				pos = 0;
+				goto out;
+			}
+
+			j++;
+		}
+	}
+
+out:
+	mutex_unlock(&tee->lock);
+	return pos;
+}
+
+/**
+ * tee_context_create - Allocate and create a new context.
+ *			Reference on the back-end is requested.
+ */
+struct tee_context *tee_context_create(struct tee *tee)
+{
+	int ret;
+	struct tee_context *ctx;
+
+
+	ctx = devm_kzalloc(_DEV(tee), sizeof(struct tee_context), GFP_KERNEL);
+	if (!ctx) {
+		ctx = ERR_PTR(-ENOMEM);
+		pr_err("tee_context allocation failed\n");
+		return ctx;
+	}
+
+	kref_init(&ctx->refcount);
+	INIT_LIST_HEAD(&ctx->list_sess);
+	INIT_LIST_HEAD(&ctx->list_shm);
+
+	ctx->tee = tee;
+	snprintf(ctx->name, sizeof(ctx->name), "%s", current->comm);
+	ctx->tgid = current->tgid;
+
+	ret = tee_get(tee);
+	if (ret) {
+		devm_kfree(_DEV(tee), ctx);
+		return ERR_PTR(ret);
+	}
+
+	mutex_lock(&tee->lock);
+	tee_inc_stats(&tee->stats[TEE_STATS_CONTEXT_IDX]);
+	list_add_tail(&ctx->entry, &tee->list_ctx);
+	mutex_unlock(&tee->lock);
+
+
+	return ctx;
+}
+
+/**
+ * _tee_context_do_release - Final function to release
+ *                           and free a context.
+ */
+static void _tee_context_do_release(struct kref *kref)
+{
+	struct tee_context *ctx;
+	struct tee *tee;
+
+	ctx = container_of(kref, struct tee_context, refcount);
+
+	WARN_ON(!ctx || !ctx->tee);
+
+	tee = ctx->tee;
+
+
+	tee_dec_stats(&tee->stats[TEE_STATS_CONTEXT_IDX]);
+	list_del(&ctx->entry);
+
+	devm_kfree(_DEV(tee), ctx);
+	tee_put(tee);
+
+}
+
+/**
+ * tee_context_get - Increase the reference count of
+ *                   the context.
+ */
+void tee_context_get(struct tee_context *ctx)
+{
+	WARN_ON(!ctx || !ctx->tee);
+
+	kref_get(&ctx->refcount);
+}
+
+static int is_in_list(struct tee *tee, struct list_head *entry)
+{
+	int present = 1;
+
+	if ((entry->next == LIST_POISON1) && (entry->prev == LIST_POISON2))
+		present = 0;
+	return present;
+}
+
+/**
+ * tee_context_put - Decreases the reference count of
+ *                   the context. If 0, the final
+ *                   release function is called.
+ */
+void tee_context_put(struct tee_context *ctx)
+{
+	struct tee_context *_ctx = ctx;
+	struct tee *tee;
+
+	(void) _ctx;
+	WARN_ON(!ctx || !ctx->tee);
+	tee = ctx->tee;
+
+	if (!is_in_list(tee, &ctx->entry))
+		return;
+
+	kref_put(&ctx->refcount, _tee_context_do_release);
+}
+
+/**
+ * tee_context_destroy - Request to destroy a context.
+ */
+void tee_context_destroy(struct tee_context *ctx)
+{
+	struct tee *tee;
+
+	if (!ctx || !ctx->tee)
+		return;
+
+	tee = ctx->tee;
+
+
+	mutex_lock(&tee->lock);
+	tee_context_put(ctx);
+	mutex_unlock(&tee->lock);
+}
+
+int tee_context_copy_from_client(const struct tee_context *ctx,
+				 void *dest, const void *src, size_t size)
+{
+	int res = 0;
+
+	if (dest && src && (size > 0)) {
+		if (ctx->usr_client)
+			res = copy_from_user(dest, src, size);
+		else
+			memcpy(dest, src, size);
+	}
+	return res;
+}
+
+struct tee_shm *tee_context_alloc_shm_tmp(struct tee_context *ctx,
+					  size_t size, const void *src,
+					  int type)
+{
+	struct tee_shm *shm;
+
+	type &= (TEEC_MEM_INPUT | TEEC_MEM_OUTPUT);
+
+	shm = tkcore_alloc_shm(ctx->tee, size,
+			TEE_SHM_MAPPED | TEE_SHM_TEMP | type);
+	if (IS_ERR_OR_NULL(shm)) {
+		pr_err("buffer allocation failed (%ld)\n",
+			PTR_ERR(shm));
+		return shm;
+	}
+
+	shm->ctx = ctx;
+
+	if (type & TEEC_MEM_INPUT) {
+		if (tee_context_copy_from_client(ctx,
+			shm->resv.kaddr, src, size)) {
+			pr_err(
+				"tee_context_copy_from_client failed\n");
+			tkcore_shm_free(shm);
+			shm = NULL;
+		}
+	}
+	return shm;
+}
+
+struct tee_shm *tee_context_create_tmpref_buffer(struct tee_context *ctx,
+						 size_t size,
+						 const void *buffer, int type)
+{
+	struct tee_shm *shm = NULL;
+	int flags;
+
+	switch (type) {
+	case TEEC_MEMREF_TEMP_OUTPUT:
+		flags = TEEC_MEM_OUTPUT;
+		break;
+	case TEEC_MEMREF_TEMP_INPUT:
+		flags = TEEC_MEM_INPUT;
+		break;
+	case TEEC_MEMREF_TEMP_INOUT:
+		flags = TEEC_MEM_INPUT | TEEC_MEM_OUTPUT;
+		break;
+	default:
+		flags = 0;
+		WARN_ON(1);
+	}
+	shm = tee_context_alloc_shm_tmp(ctx, size, buffer, flags);
+	return shm;
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_core.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_core.c
new file mode 100644
index 0000000..1444be4
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_core.c
@@ -0,0 +1,958 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/idr.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <asm-generic/ioctl.h>
+#include <linux/sched.h>
+#include <linux/version.h>
+
+#include "linux/tee_core.h"
+#include "linux/tee_ioc.h"
+#include <linux/tee_client_api.h>
+
+#include "tee_core_priv.h"
+#include "tee_sysfs.h"
+#include "tee_shm.h"
+#include "tee_supp_com.h"
+#include "tee_tui.h"
+
+#include "tee_ta_mgmt.h"
+#include "tee_procfs.h"
+
+#include "tee_fp_priv.h"
+#include "tee_clkmgr_priv.h"
+
+#include "pm.h"
+
+static uint32_t nsdrv_feature_flags;
+
+#if defined(CONFIG_ARM)
+
+#include <asm/mach/map.h>
+#include <linux/io.h>
+
+void *__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
+{
+	phys_addr_t last_addr;
+	unsigned long offset = phys_addr & ~PAGE_MASK;
+	unsigned long pfn = __phys_to_pfn(phys_addr);
+
+	last_addr = phys_addr + size - 1;
+	if (!size || last_addr < phys_addr)
+		return NULL;
+
+	return __arm_ioremap_pfn(pfn, offset, size, mtype);
+}
+
+#endif
+void *tee_map_cached_shm(unsigned long pa, size_t len)
+{
+#if defined(CONFIG_ARM64)
+	return ioremap_cache(pa, len);
+#elif defined(CONFIG_ARM)
+	return __arm_ioremap(pa, len, MT_MEMORY_RW);
+#else
+#error "tee_map_cached_shm() not implemented for this platform"
+#endif
+}
+EXPORT_SYMBOL(tee_map_cached_shm);
+
+void tee_unmap_cached_shm(void *va)
+{
+	iounmap(va);
+}
+EXPORT_SYMBOL(tee_unmap_cached_shm);
+
+#define _TEE_CORE_FW_VER "1:0.1"
+
+static char *_tee_supp_app_name = "teed";
+
+/* Store the class misc reference */
+static struct class *misc_class;
+
+static int device_match(struct device *device, const void *devname)
+{
+	int ret;
+	struct tee *tee = dev_get_drvdata(device);
+
+	WARN_ON(!tee);
+
+	/*
+	 * It shall always return
+	 * 0 if tee is a null
+	 * ptr
+	 */
+	if (tee == NULL)
+		return 0;
+
+	ret = strncmp(devname,
+		tee->name, sizeof(tee->name));
+	return ret == 0;
+}
+
+/*
+ * For the kernel api.
+ * Get a reference on a device tee from the device needed
+ */
+struct tee *tee_get_tee(const char *devname)
+{
+	struct device *device;
+
+	if (!devname)
+		return NULL;
+	device = class_find_device(misc_class, NULL,
+		(void *) devname, device_match);
+	if (!device) {
+		pr_err("can't find device [%s]\n",
+			devname);
+		return NULL;
+	}
+
+	return dev_get_drvdata(device);
+}
+
+void tee_inc_stats(struct tee_stats_entry *entry)
+{
+	entry->count++;
+	if (entry->count > entry->max)
+		entry->max = entry->count;
+}
+
+void tee_dec_stats(struct tee_stats_entry *entry)
+{
+	entry->count--;
+}
+
+int __tee_get(struct tee *tee)
+{
+	int ret = 0;
+	int v;
+
+	WARN_ON(!tee);
+
+	v = atomic_inc_return(&tee->refcount);
+	if (v == 1) {
+		WARN_ON(!try_module_get(tee->ops->owner));
+		get_device(tee->dev);
+
+		if (tee->ops->start)
+			ret = tee->ops->start(tee);
+
+		if (ret) {
+			pr_err("%s::start() failed, err=0x%x\n",
+				tee->name, ret);
+			put_device(tee->dev);
+			module_put(tee->ops->owner);
+			atomic_dec(&tee->refcount);
+		}
+	} else {
+		dev_warn(_DEV(tee), "Unexpected tee->refcount: 0x%x\n", v);
+		return -1;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(__tee_get);
+
+
+/**
+ * tee_get - increases refcount of the tee
+ * @tee:	[in]	tee to increase refcount of
+ *
+ * @note: If tee.ops.start() callback function is available,
+ * it is called when refcount is equal at 1.
+ */
+int tee_get(struct tee *tee)
+{
+	int ret = 0;
+
+	WARN_ON(!tee);
+
+	if (atomic_inc_return(&tee->refcount) == 1) {
+		pr_warn("unexpected refcount 1\n");
+	} else {
+		int count = (int) atomic_read(&tee->refcount);
+
+		if (count > tee->max_refcount)
+			tee->max_refcount = count;
+	}
+	return ret;
+}
+
+/**
+ * tee_put - decreases refcount of the tee
+ * @tee:	[in]	tee to reduce refcount of
+ *
+ * @note: If tee.ops.stop() callback function is available,
+ * it is called when refcount is equal at 0.
+ */
+int tee_put(struct tee *tee)
+{
+	int ret = 0;
+	int count;
+
+	WARN_ON(!tee);
+
+	if (atomic_dec_and_test(&tee->refcount)) {
+		pr_warn("unexpected refcount: 0\n");
+		/*
+		 * tee should never be stopped
+		 */
+	}
+
+	count = (int)atomic_read(&tee->refcount);
+	return ret;
+}
+
+static int tee_supp_open(struct tee *tee)
+{
+	int ret = 0;
+
+	WARN_ON(!tee->rpc);
+
+	if (strncmp(_tee_supp_app_name, current->comm,
+			strlen(_tee_supp_app_name)) == 0) {
+		if (atomic_add_return(1, &tee->rpc->used) > 1) {
+			ret = -EBUSY;
+			pr_err("Only one teed is allowed\n");
+			atomic_sub(1, &tee->rpc->used);
+		}
+	}
+
+	return ret;
+}
+
+static void tee_supp_release(struct tee *tee)
+{
+	WARN_ON(!tee->rpc);
+
+	if ((atomic_read(&tee->rpc->used) == 1) &&
+			(strncmp(_tee_supp_app_name, current->comm,
+					strlen(_tee_supp_app_name)) == 0))
+		atomic_sub(1, &tee->rpc->used);
+}
+
+static int tee_ctx_open(struct inode *inode, struct file *filp)
+{
+	struct tee_context *ctx;
+	struct tee *tee;
+	int ret;
+
+	tee = container_of(filp->private_data, struct tee, miscdev);
+
+	WARN_ON(!tee);
+	WARN_ON(tee->miscdev.minor != iminor(inode));
+
+	ret = tee_supp_open(tee);
+	if (ret)
+		return ret;
+
+	ctx = tee_context_create(tee);
+	if (IS_ERR_OR_NULL(ctx))
+		return PTR_ERR(ctx);
+
+	ctx->usr_client = 1;
+	filp->private_data = ctx;
+
+	return 0;
+}
+
+static int tee_ctx_release(struct inode *inode, struct file *filp)
+{
+	struct tee_context *ctx = filp->private_data;
+	struct tee *tee;
+
+	if (!ctx)
+		return -EINVAL;
+
+	WARN_ON(!ctx->tee);
+	tee = ctx->tee;
+	WARN_ON(tee->miscdev.minor != iminor(inode));
+
+	tee_context_destroy(ctx);
+	tee_supp_release(tee);
+
+	return 0;
+}
+
+static int tee_do_create_session(struct tee_context *ctx,
+				 struct tee_cmd_io __user *u_cmd)
+{
+	int ret = -EINVAL;
+	struct tee_cmd_io k_cmd;
+	struct tee *tee;
+
+	tee = ctx->tee;
+	WARN_ON(!ctx->usr_client);
+
+
+	if (copy_from_user(&k_cmd, (void *)u_cmd, sizeof(struct tee_cmd_io))) {
+		pr_err("create_session: copy_from_user failed\n");
+		goto exit;
+	}
+
+	if (k_cmd.fd_sess > 0) {
+		pr_err("invalid fd_sess %d\n", k_cmd.fd_sess);
+		goto exit;
+	}
+
+	if ((k_cmd.op == NULL) || (k_cmd.uuid == NULL) ||
+		((k_cmd.data != NULL) && (k_cmd.data_size == 0)) ||
+		((k_cmd.data == NULL) && (k_cmd.data_size != 0))) {
+		pr_err("op or/and data parameters are not valid\n");
+		goto exit;
+	}
+
+	ret = tee_session_create_fd(ctx, &k_cmd);
+	put_user(k_cmd.err, &u_cmd->err);
+	put_user(k_cmd.origin, &u_cmd->origin);
+	if (ret)
+		goto exit;
+
+	put_user(k_cmd.fd_sess, &u_cmd->fd_sess);
+
+exit:
+	return ret;
+}
+
+static int tee_do_shm_alloc_perm(struct tee_context *ctx,
+	struct tee_shm_io __user *u_shm)
+{
+	int ret = -EINVAL;
+	struct tee_shm_io k_shm;
+	struct tee *tee = ctx->tee;
+
+	(void) tee;
+
+	if (copy_from_user(&k_shm, (void *)u_shm, sizeof(struct tee_shm_io))) {
+		pr_err("shm_alloc_perm: copy_from_user failed\n");
+		goto exit;
+	}
+
+	ret = tee_shm_alloc_io_perm(ctx, &k_shm);
+	if (ret)
+		goto exit;
+
+	put_user(k_shm.paddr, &u_shm->paddr);
+	put_user(k_shm.fd_shm, &u_shm->fd_shm);
+	put_user(k_shm.flags, &u_shm->flags);
+
+exit:
+	return ret;
+}
+
+static int tee_do_shm_alloc(struct tee_context *ctx,
+	struct tee_shm_io __user *u_shm)
+{
+	int ret = -EINVAL;
+	struct tee_shm_io k_shm;
+	struct tee *tee = ctx->tee;
+
+	WARN_ON(!ctx->usr_client);
+
+
+	if (copy_from_user(&k_shm, (void *)u_shm, sizeof(struct tee_shm_io))) {
+		pr_err("copy_from_user failed\n");
+		goto exit;
+	}
+
+	if ((k_shm.buffer != NULL) || (k_shm.fd_shm != 0) ||
+		((k_shm.flags & tee->shm_flags) == 0) ||
+		(k_shm.registered != 0)) {
+		pr_err(
+			"shm parameters are not valid %p %d %08x %08x %d\n",
+			(void *) k_shm.buffer,
+			k_shm.fd_shm,
+			(unsigned int) k_shm.flags,
+			(unsigned int) tee->shm_flags,
+			k_shm.registered);
+		goto exit;
+	}
+
+	ret = tee_shm_alloc_io(ctx, &k_shm);
+	if (ret)
+		goto exit;
+
+	put_user(k_shm.fd_shm, &u_shm->fd_shm);
+	put_user(k_shm.flags, &u_shm->flags);
+
+exit:
+	return ret;
+}
+
+static int tee_do_get_fd_for_rpc_shm(struct tee_context *ctx,
+	struct tee_shm_io __user *u_shm)
+{
+	int ret = -EINVAL;
+	struct tee_shm_io k_shm;
+	struct tee *tee = ctx->tee;
+
+
+	WARN_ON(!ctx->usr_client);
+
+	if (copy_from_user(&k_shm, (void *)u_shm, sizeof(struct tee_shm_io))) {
+		pr_err("copy_from_user failed\n");
+		goto exit;
+	}
+
+	if (k_shm.registered != 0) {
+		pr_err("expecting shm to be unregistered\n");
+		goto exit;
+	}
+
+	if ((k_shm.buffer == NULL) || (k_shm.size == 0) ||
+		(k_shm.fd_shm != 0)) {
+		pr_err("Invalid shm param. buffer: %p size: %u fd: %d\n",
+			k_shm.buffer, k_shm.size, k_shm.fd_shm);
+		goto exit;
+	}
+
+	if ((k_shm.flags & ~(tee->shm_flags)) ||
+		((k_shm.flags & tee->shm_flags) == 0)) {
+		pr_err(
+			"Invalid shm flags: 0x%x expecting to be within 0x%x\n",
+			k_shm.flags, tee->shm_flags);
+		goto exit;
+	}
+
+	ret = tee_shm_fd_for_rpc(ctx, &k_shm);
+	if (ret)
+		goto exit;
+
+	put_user(k_shm.fd_shm, &u_shm->fd_shm);
+
+exit:
+
+	return ret;
+}
+
+static int tee_tui_notify(uint32_t arg)
+{
+	if (teec_notify_event(arg))
+		return 0;
+
+	return -EINVAL;
+}
+
+static int tee_tui_wait(uint32_t __user *u_arg)
+{
+	int r;
+	uint32_t cmd_id;
+
+	r = teec_wait_cmd(&cmd_id);
+	if (r)
+		return r;
+
+	if (copy_to_user(u_arg, &cmd_id, sizeof(cmd_id)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long tee_internal_ioctl(struct tee_context *ctx,
+				unsigned int cmd,
+				void __user *u_arg)
+{
+	int ret = -EINVAL;
+
+	switch (cmd) {
+	case TEE_OPEN_SESSION_IOC:
+		ret = tee_do_create_session(ctx,
+			(struct tee_cmd_io __user *) u_arg);
+		break;
+
+	case TEE_ALLOC_SHM_PERM_IOC:
+		ret = tee_do_shm_alloc_perm(ctx,
+			(struct tee_shm_io __user *) u_arg);
+		break;
+
+	case TEE_ALLOC_SHM_IOC:
+		ret = tee_do_shm_alloc(ctx,
+			(struct tee_shm_io __user *) u_arg);
+		break;
+
+	case TEE_GET_FD_FOR_RPC_SHM_IOC:
+		ret = tee_do_get_fd_for_rpc_shm(ctx,
+			(struct tee_shm_io __user *) u_arg);
+		break;
+
+	case TEE_TUI_NOTIFY_IOC:
+		ret = tee_tui_notify(
+			(uint32_t) (unsigned long) u_arg);
+		break;
+
+	case TEE_TUI_WAITCMD_IOC:
+		ret = tee_tui_wait(
+			(uint32_t __user *) u_arg);
+		break;
+
+	case TEE_INSTALL_TA_IOC:
+		ret = tee_install_sp_ta(ctx, u_arg);
+		break;
+
+	case TEE_INSTALL_TA_RESP_IOC:
+		ret = tee_install_sp_ta_response(
+			ctx, u_arg);
+		break;
+
+	case TEE_DELETE_TA_IOC:
+		ret = tee_delete_sp_ta(ctx, u_arg);
+		break;
+
+	case TEE_QUERY_DRV_FEATURE_IOC:
+		if (u_arg) {
+			pr_info("tkcoredrv: nsdrv feature = 0x%x\n",
+					nsdrv_feature_flags);
+			if (copy_to_user(u_arg, &nsdrv_feature_flags,
+					sizeof(nsdrv_feature_flags))) {
+				ret = -EFAULT;
+			}
+		} else
+			ret = -EINVAL;
+
+		break;
+
+	default:
+		pr_err("internal_ioctl: Unknown command: %u\n", cmd);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+static int convert_compat_tee_shm(struct TEEC_SharedMemory __user *shm)
+{
+	if (unlikely(put_user(0, ((uint32_t __user *) &(shm->buffer)) + 1)))
+		return -EFAULT;
+
+	if (unlikely(put_user(0, ((uint32_t __user *) &(shm->size)) + 1)))
+		return -EFAULT;
+
+	if (unlikely(put_user(0, ((uint32_t __user *) &(shm->d.fd)) + 1)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int convert_compat_tee_param(union TEEC_Parameter __user *p,
+	uint32_t type)
+{
+	struct TEEC_SharedMemory __user *p_shm;
+
+	switch (type) {
+	case TEEC_MEMREF_TEMP_INPUT:
+	case TEEC_MEMREF_TEMP_OUTPUT:
+	case TEEC_MEMREF_TEMP_INOUT:
+
+		if (unlikely(put_user(0,
+				((uint32_t __user *) &(p->tmpref.buffer)) + 1)))
+			return -EFAULT;
+
+		if (unlikely(put_user(0,
+				((uint32_t __user *) &(p->tmpref.size)) + 1)))
+			return -EFAULT;
+
+		break;
+
+	case TEEC_MEMREF_PARTIAL_INPUT:
+	case TEEC_MEMREF_PARTIAL_OUTPUT:
+	case TEEC_MEMREF_PARTIAL_INOUT:
+	case TEEC_MEMREF_WHOLE:
+
+		if (unlikely(put_user(0,
+				((uint32_t __user *) &(p->memref.parent)) + 1)))
+			return -EFAULT;
+
+		if (unlikely(put_user(0,
+				((uint32_t __user *) &(p->memref.size)) + 1)))
+			return -EFAULT;
+
+		if (unlikely(put_user(0,
+				((uint32_t __user *) &(p->memref.offset)) + 1)))
+			return -EFAULT;
+
+		if ((copy_from_user(&p_shm, &p->memref.parent,
+				sizeof(p_shm))))
+			return -EFAULT;
+
+		if (p_shm == NULL)
+			break;
+
+		if (convert_compat_tee_shm(p_shm))
+			return -EFAULT;
+
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+int convert_compat_tee_cmd(struct tee_cmd_io __user *u_cmd)
+{
+	uint32_t i;
+	struct TEEC_Operation __user *p_op;
+	uint32_t paramTypes;
+
+	if (u_cmd == NULL)
+		return -EINVAL;
+
+	if (unlikely(put_user(0, ((uint32_t __user *) &u_cmd->uuid) + 1)))
+		return -EFAULT;
+
+	if (unlikely(put_user(0, ((uint32_t __user *) &u_cmd->data) + 1)))
+		return -EFAULT;
+
+	if (unlikely(put_user(0, ((uint32_t __user *) &u_cmd->op) + 1)))
+		return -EFAULT;
+
+	if (copy_from_user(&p_op, &u_cmd->op, sizeof(p_op)))
+		return -EFAULT;
+
+	if (p_op == NULL)
+		return -EINVAL;
+
+	if (copy_from_user(&paramTypes, (uint32_t __user *) &p_op->paramTypes,
+			sizeof(p_op->paramTypes)))
+		return -EFAULT;
+
+	if (unlikely(put_user(0, ((uint32_t __user *) &p_op->session) + 1)))
+		return -EFAULT;
+
+	for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; i++) {
+		if (convert_compat_tee_param(&p_op->params[i],
+			TEEC_PARAM_TYPE_GET(paramTypes, i))) {
+			pr_err("bad param %u\n", i);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+int convert_compat_tee_shm_io(struct tee_shm_io __user *shm_io)
+{
+	if (shm_io == NULL)
+		return -EINVAL;
+
+	if (unlikely(put_user(0, ((uint32_t __user *) &shm_io->buffer) + 1)))
+		return -EFAULT;
+
+	return 0;
+}
+
+int convert_compat_tee_spta_inst(struct tee_spta_inst_desc __user *spta)
+{
+	if (spta == NULL)
+		return -EINVAL;
+
+	if (unlikely(put_user(0,
+		((uint32_t __user *) &spta->ta_binary) + 1)))
+		return -EFAULT;
+
+	if (unlikely(put_user(0,
+		((uint32_t __user *) &spta->response_len) + 1)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long tee_compat_ioctl(struct file *filp, unsigned int cmd,
+			unsigned long arg)
+{
+	struct tee_context *ctx = filp->private_data;
+	void __user *u_arg;
+
+	WARN_ON(!ctx);
+	WARN_ON(!ctx->tee);
+
+	if (is_compat_task())
+		u_arg = compat_ptr(arg);
+	else
+		u_arg = (void __user *)arg;
+
+	switch (cmd) {
+	case TEE_OPEN_SESSION_IOC:
+		if (convert_compat_tee_cmd((struct tee_cmd_io __user *) u_arg))
+			return -EFAULT;
+		break;
+
+	case TEE_ALLOC_SHM_PERM_IOC:
+	case TEE_ALLOC_SHM_IOC:
+	case TEE_GET_FD_FOR_RPC_SHM_IOC:
+		if (convert_compat_tee_shm_io(
+				(struct tee_shm_io __user *) u_arg))
+			return -EFAULT;
+		break;
+
+	case TEE_INSTALL_TA_IOC:
+		if (convert_compat_tee_spta_inst(
+			(struct tee_spta_inst_desc __user *) u_arg))
+			return -EFAULT;
+		break;
+
+	default:
+		break;
+	}
+
+	return tee_internal_ioctl(ctx, cmd, u_arg);
+}
+#endif
+
+static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct tee_context *ctx = filp->private_data;
+
+	WARN_ON(!ctx);
+	WARN_ON(!ctx->tee);
+
+	return tee_internal_ioctl(ctx, cmd, (void __user *) arg);
+}
+
+const struct file_operations tee_fops = {
+	.owner = THIS_MODULE,
+	.read = tee_supp_read,
+	.write = tee_supp_write,
+	.open = tee_ctx_open,
+	.release = tee_ctx_release,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = tee_compat_ioctl,
+#endif
+	.unlocked_ioctl = tee_ioctl
+};
+
+static void tee_plt_device_release(struct device *dev)
+{
+	(void) dev;
+}
+
+static spinlock_t tee_idr_lock;
+static struct idr tee_idr;
+
+/* let caller to guarantee tee
+ * and id are not NULL. using lock to protect
+ */
+int tee_core_alloc_uuid(void *ptr)
+{
+	int r;
+
+	idr_preload(GFP_KERNEL);
+
+	spin_lock(&tee_idr_lock);
+	r = idr_alloc(&tee_idr, ptr, 1, 0, GFP_NOWAIT);
+	if (r < 0)
+		pr_err("Bad alloc tee_uuid. rv: %d\n",
+			r);
+	spin_unlock(&tee_idr_lock);
+
+	idr_preload_end();
+
+	return r;
+}
+
+void *tee_core_uuid2ptr(int id)
+{
+	return idr_find(&tee_idr, id);
+}
+
+/* let caller to guarantee tee
+ *and id are not NULL
+ */
+void tee_core_free_uuid(int id)
+{
+	idr_remove(&tee_idr, id);
+}
+
+struct tee *tee_core_alloc(struct device *dev, char *name, int id,
+	const struct tee_ops *ops, size_t len)
+{
+	struct tee *tee;
+
+	if (!dev || !name || !ops ||
+		!ops->open || !ops->close || !ops->alloc || !ops->free)
+		return NULL;
+
+	tee = devm_kzalloc(dev, sizeof(struct tee) + len, GFP_KERNEL);
+	if (!tee) {
+		tee = NULL;
+		pr_err("core_alloc: kzalloc failed\n");
+		return tee;
+	}
+
+	if (!dev->release)
+		dev->release = tee_plt_device_release;
+
+	tee->dev = dev;
+	tee->id = id;
+	tee->ops = ops;
+	tee->priv = &tee[1];
+
+	snprintf(tee->name, sizeof(tee->name), "%s", name);
+	pr_info("TEE core: Alloc the misc device \"%s\" (id=%d)\n",
+		tee->name, tee->id);
+
+	tee->miscdev.parent = dev;
+	tee->miscdev.minor = MISC_DYNAMIC_MINOR;
+	tee->miscdev.name = tee->name;
+	tee->miscdev.fops = &tee_fops;
+
+	mutex_init(&tee->lock);
+	atomic_set(&tee->refcount, 0);
+	INIT_LIST_HEAD(&tee->list_ctx);
+	INIT_LIST_HEAD(&tee->list_rpc_shm);
+
+	tee->state = TEE_OFFLINE;
+	tee->shm_flags = TEEC_MEM_INPUT | TEEC_MEM_OUTPUT | TEEC_MEM_NONSECURE;
+	tee->test = 0;
+
+	if ((tee_supp_init(tee))) {
+		devm_kfree(dev, tee);
+		return NULL;
+	}
+
+	return tee;
+}
+EXPORT_SYMBOL(tee_core_alloc);
+
+int tee_core_free(struct tee *tee)
+{
+	if (tee) {
+		tee_supp_deinit(tee);
+		devm_kfree(tee->dev, tee);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(tee_core_free);
+
+int tee_core_add(struct tee *tee)
+{
+	int rc = 0;
+
+	if (!tee)
+		return -EINVAL;
+
+	rc = misc_register(&tee->miscdev);
+	if (rc != 0) {
+		pr_err("misc_register() failed with ret = %d\n",
+			rc);
+		return rc;
+	}
+
+	dev_set_drvdata(tee->miscdev.this_device, tee);
+
+	rc = tee_init_procfs(tee);
+	if (rc) {
+		misc_deregister(&tee->miscdev);
+		return rc;
+	}
+
+	rc = tee_init_sysfs(tee);
+	if (rc) {
+		misc_deregister(&tee->miscdev);
+		return rc;
+	}
+
+	/* Register a static reference on the class misc
+	 * to allow finding device by class
+	 */
+	WARN_ON(!tee->miscdev.this_device->class);
+
+	if (misc_class)
+		WARN_ON(misc_class != tee->miscdev.this_device->class);
+	else
+		misc_class = tee->miscdev.this_device->class;
+
+	pr_info(
+		"TKCore misc: Register the misc device \"%s\" (id=%d,minor=%d)\n",
+		dev_name(tee->miscdev.this_device), tee->id,
+		tee->miscdev.minor);
+
+	return rc;
+}
+EXPORT_SYMBOL(tee_core_add);
+
+int tee_core_del(struct tee *tee)
+{
+	if (tee) {
+		pr_info(
+			"TEE Core: Destroy the misc device \"%s\" (id=%d)\n",
+			dev_name(tee->miscdev.this_device), tee->id);
+
+		tee_cleanup_sysfs(tee);
+
+		if (tee->miscdev.minor != MISC_DYNAMIC_MINOR) {
+			pr_info(
+				"TEE Core: Deregister the misc device \"%s\" (id=%d)\n",
+				dev_name(tee->miscdev.this_device), tee->id);
+			misc_deregister(&tee->miscdev);
+		}
+	}
+
+	tee_core_free(tee);
+
+	return 0;
+}
+EXPORT_SYMBOL(tee_core_del);
+
+static int __init tee_core_init(void)
+{
+	int r;
+
+	pr_info("\nTEE Core Framework initialization (ver %s)\n",
+		_TEE_CORE_FW_VER);
+
+	r = tkcore_tee_pm_init();
+	if (r) {
+		pr_err("tkcore_tee_pm_init() failed with %d\n", r);
+		return r;
+	}
+
+	spin_lock_init(&tee_idr_lock);
+	idr_init(&tee_idr);
+
+	tee_fp_init();
+	tee_clkmgr_init();
+	tee_ta_mgmt_init();
+
+	return 0;
+}
+
+static void __exit tee_core_exit(void)
+{
+	pr_info("TEE Core Framework unregistered\n");
+
+	tkcore_tee_pm_exit();
+
+	tee_clkmgr_exit();
+	tee_fp_exit();
+}
+
+#ifndef MODULE
+rootfs_initcall(tee_core_init);
+#else
+module_init(tee_core_init);
+#endif
+module_exit(tee_core_exit);
+
+MODULE_AUTHOR("TrustKernel");
+MODULE_DESCRIPTION("TrustKernel TKCore TEEC v1.0");
+MODULE_SUPPORTED_DEVICE("");
+MODULE_VERSION(_TEE_CORE_FW_VER);
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_core_priv.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_core_priv.h
new file mode 100644
index 0000000..0a752d5
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_core_priv.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef __TEE_CORE_PRIV_H__
+#define __TEE_CORE_PRIV_H__
+
+#include "linux/tee_core.h"
+#include "linux/tee_ioc.h"
+
+/* from tee_core_module.c */
+int tee_get(struct tee *tee);
+int tee_put(struct tee *tee);
+
+void tee_inc_stats(struct tee_stats_entry *entry);
+void tee_dec_stats(struct tee_stats_entry *entry);
+
+/* from tee_context.c */
+int tee_context_dump(struct tee *tee, char *buff, size_t len);
+
+struct tee_context *tee_context_create(struct tee *tee);
+void tee_context_destroy(struct tee_context *ctx);
+
+void tee_context_get(struct tee_context *ctx);
+void tee_context_put(struct tee_context *ctx);
+
+struct tee_shm *tee_context_create_tmpref_buffer(struct tee_context *ctx,
+						 size_t size,
+						 const void *buffer, int type);
+struct tee_shm *tee_context_alloc_shm_tmp(struct tee_context *ctx, size_t size,
+					  const void *data, int type);
+int tee_context_copy_from_client(const struct tee_context *ctx, void *dest,
+				 const void *src, size_t size);
+
+/* from tee_session.c */
+int tee_session_create_fd(struct tee_context *ctx, struct tee_cmd_io *cmd_io);
+struct tee_session *tee_session_create_and_open(struct tee_context *ctx,
+						struct tee_cmd_io *cmd_io);
+int tee_session_close_and_destroy(struct tee_session *sess);
+
+struct tee *tee_get_tee(const char *devname);
+int tee_session_invoke_be(struct tee_session *sess, struct tee_cmd_io *cmd_io);
+
+int tee_core_alloc_uuid(void *ptr);
+void tee_core_free_uuid(int id);
+void *tee_core_uuid2ptr(int id);
+
+#ifdef CONFIG_COMPAT
+
+int convert_compat_tee_cmd(struct tee_cmd_io __user *u_cmd);
+
+int convert_compat_tee_shm_io(struct tee_shm_io __user *shm_io);
+
+int convert_compat_tee_spta_inst(struct tee_spta_inst_desc __user *spta);
+
+#endif
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_fp.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_fp.c
new file mode 100644
index 0000000..ea75537
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_fp.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+#include <linux/tee_fp.h>
+
+#include <tee_kernel_api.h>
+
+#include "tee_fp_priv.h"
+
+static struct TEEC_UUID SENSOR_DETECTOR_TA_UUID = { 0x966d3f7c, 0x04ef, 0x1beb,
+	{ 0x08, 0xb7, 0x57, 0xf3, 0x7a, 0x6d, 0x87, 0xf9 } };
+
+#define CMD_READ_CHIPID		0x0
+#define CMD_DISABLE			0x1
+#define CMD_CONFIG_PADSEL	0x2
+
+int tee_spi_cfg_padsel(uint32_t padsel)
+{
+	struct TEEC_Context context;
+	struct TEEC_Session session;
+	struct TEEC_Operation op;
+
+	TEEC_Result r;
+
+	uint32_t returnOrigin;
+
+	pr_info("padsel=0x%x\n", padsel);
+
+	memset(&context, 0, sizeof(context));
+	memset(&session, 0, sizeof(session));
+	memset(&op, 0, sizeof(op));
+
+	r = TEEC_InitializeContext(NULL, &context);
+	if (r != TEEC_SUCCESS) {
+		pr_err(
+			"TEEC_InitializeContext() failed with 0x%08x\n", r);
+		return r;
+	}
+
+	r = TEEC_OpenSession(
+		&context, &session, &SENSOR_DETECTOR_TA_UUID,
+		TEEC_LOGIN_PUBLIC,
+		NULL, NULL, &returnOrigin);
+
+	if (r != TEEC_SUCCESS) {
+		pr_err(
+			"TEEC_OpenSession failed with 0x%x returnOrigun: %u\n",
+			r, returnOrigin);
+		TEEC_FinalizeContext(&context);
+		return r;
+	}
+
+	op.paramTypes = TEEC_PARAM_TYPES(
+		TEEC_VALUE_INPUT,
+		TEEC_NONE,
+		TEEC_NONE,
+		TEEC_NONE);
+
+	op.params[0].value.a = padsel;
+
+	r = TEEC_InvokeCommand(&session, CMD_CONFIG_PADSEL, &op, &returnOrigin);
+	if (r != TEEC_SUCCESS) {
+		pr_err(
+			"TEEC_InvokeCommand() failed with 0x%08x returnOrigin: %u\n",
+			r, returnOrigin);
+	}
+
+	TEEC_CloseSession(&session);
+	TEEC_FinalizeContext(&context);
+
+	return r;
+}
+EXPORT_SYMBOL(tee_spi_cfg_padsel);
+
+int tee_spi_transfer(void *conf, uint32_t conf_size,
+	void *inbuf, void *outbuf, uint32_t size)
+{
+	struct TEEC_Context context;
+	struct TEEC_Session session;
+	struct TEEC_Operation op;
+
+	TEEC_Result r;
+
+	char *buf;
+	uint32_t returnOrigin;
+
+	pr_info("conf=%p conf_size=%u inbuf=%p outbuf=%p size=%u\n",
+		conf, conf_size, inbuf, outbuf, size);
+
+	if (!conf || !inbuf || !outbuf) {
+		pr_err("Bad parameters NULL buf\n");
+		return -EINVAL;
+	}
+
+	if (size == 0) {
+		pr_err("zero buf size\n");
+		return -EINVAL;
+	}
+
+	memset(&context, 0, sizeof(context));
+	memset(&session, 0, sizeof(session));
+	memset(&op, 0, sizeof(op));
+
+	memcpy(outbuf, inbuf, size);
+
+	r = TEEC_InitializeContext(NULL, &context);
+	if (r != TEEC_SUCCESS) {
+		pr_err(
+			"TEEC_InitializeContext() failed with 0x%08x\n", r);
+		return r;
+	}
+
+	r = TEEC_OpenSession(
+		&context, &session, &SENSOR_DETECTOR_TA_UUID,
+		TEEC_LOGIN_PUBLIC,
+		NULL, NULL, &returnOrigin);
+	if (r != TEEC_SUCCESS) {
+		pr_err(
+			"TEEC_OpenSession failed with 0x%x returnOrigun: %u\n",
+			r, returnOrigin);
+		TEEC_FinalizeContext(&context);
+		return r;
+	}
+
+	op.paramTypes = TEEC_PARAM_TYPES(
+		TEEC_MEMREF_TEMP_INPUT,
+		TEEC_MEMREF_TEMP_INOUT,
+		TEEC_NONE,
+		TEEC_NONE);
+
+	op.params[0].tmpref.buffer = conf;
+	op.params[0].tmpref.size = conf_size;
+
+	op.params[1].tmpref.buffer = outbuf;
+	op.params[1].tmpref.size = size;
+
+	buf = outbuf;
+
+	r = TEEC_InvokeCommand(&session, CMD_READ_CHIPID, &op, &returnOrigin);
+	if (r != TEEC_SUCCESS) {
+		pr_err(
+			"TEEC_InvokeCommand() failed with 0x%08x returnOrigin: %u\n",
+			r, returnOrigin);
+	}
+
+	TEEC_CloseSession(&session);
+	TEEC_FinalizeContext(&context);
+
+	return r;
+}
+EXPORT_SYMBOL(tee_spi_transfer);
+
+int tee_spi_transfer_disable(void)
+{
+	struct TEEC_Context context;
+	struct TEEC_Session session;
+	struct TEEC_Operation op;
+
+	TEEC_Result r;
+
+	uint32_t returnOrigin;
+
+	memset(&context, 0, sizeof(context));
+	memset(&session, 0, sizeof(session));
+	memset(&op, 0, sizeof(op));
+
+	r = TEEC_InitializeContext(NULL, &context);
+	if (r != TEEC_SUCCESS) {
+		pr_err(
+			"TEEC_InitializeContext() failed with 0x%08x\n", r);
+		return r;
+	}
+
+	r = TEEC_OpenSession(
+		&context, &session, &SENSOR_DETECTOR_TA_UUID,
+		TEEC_LOGIN_PUBLIC,
+		NULL, NULL, &returnOrigin);
+	if (r != TEEC_SUCCESS) {
+		pr_err(
+			"TEEC_OpenSession failed with 0x%x returnOrigun: %u\n",
+			r, returnOrigin);
+		TEEC_FinalizeContext(&context);
+		return r;
+	}
+
+	op.paramTypes = TEEC_PARAM_TYPES(
+		TEEC_NONE,
+		TEEC_NONE,
+		TEEC_NONE,
+		TEEC_NONE);
+
+	r = TEEC_InvokeCommand(&session, CMD_DISABLE, &op, &returnOrigin);
+	if (r != TEEC_SUCCESS) {
+		pr_err(
+			"TEEC_InvokeCommand() failed with 0x%08x returnOrigin: %u\n",
+			r, returnOrigin);
+	}
+
+	TEEC_CloseSession(&session);
+	TEEC_FinalizeContext(&context);
+
+	return r;
+}
+EXPORT_SYMBOL(tee_spi_transfer_disable);
+
+int tee_fp_init(void)
+{
+	return 0;
+}
+
+void tee_fp_exit(void)
+{
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_fp_priv.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_fp_priv.h
new file mode 100644
index 0000000..ee9de5f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_fp_priv.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEE_FP_PRIV_H
+#define TEE_FP_PRIV_H
+
+int tee_fp_init(void);
+void tee_fp_exit(void);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_kernel_api.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_kernel_api.c
new file mode 100644
index 0000000..a2285a6
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_kernel_api.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+
+#include "linux/tee_kernel_api.h"
+#include "linux/tee_core.h"
+#include "linux/tee_ioc.h"
+
+#include "tee_core_priv.h"
+#include "tee_shm.h"
+#include "tee_supp_com.h"
+
+#define TEE_TZ_DEVICE_NAME	"tkcoredrv"
+
+static void reset_tee_cmd(struct tee_cmd_io *cmd)
+{
+	memset(cmd, 0, sizeof(struct tee_cmd_io));
+	cmd->fd_sess = -1;
+	cmd->cmd = 0;
+	cmd->uuid = NULL;
+	cmd->origin = TEEC_ORIGIN_API;
+	cmd->err = TEEC_SUCCESS;
+	cmd->data = NULL;
+	cmd->data_size = 0;
+	cmd->op = NULL;
+}
+
+TEEC_Result TEEC_InitializeContext(const char *name,
+					struct TEEC_Context *context)
+{
+	struct tee *tee;
+	struct tee_context *ctx;
+
+	if (!context)
+		return TEEC_ERROR_BAD_PARAMETERS;
+
+	context->fd = 0;
+
+	if (name == NULL)
+		strncpy(context->devname, TEE_TZ_DEVICE_NAME,
+			sizeof(context->devname));
+	else
+		strncpy(context->devname, name, sizeof(context->devname));
+
+	tee = tee_get_tee(context->devname);
+	if (!tee) {
+		pr_err("can't get device [%s]\n", name);
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+
+	ctx = tee_context_create(tee);
+	if (IS_ERR_OR_NULL(ctx)) {
+		pr_err("ctx is NULL\n");
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+
+	ctx->usr_client = 0;
+
+	context->fd = tee_core_alloc_uuid(ctx);
+	if (context->fd < 0) {
+
+		if (context->fd == -ENOSPC || context->fd == -ENOMEM)
+			return TEEC_ERROR_OUT_OF_MEMORY;
+
+		if (context->fd == -EINVAL) {
+			pr_err("context->fd is invalid\n");
+			return TEEC_ERROR_BAD_PARAMETERS;
+		}
+
+		return TEEC_ERROR_GENERIC;
+	}
+
+	return TEEC_SUCCESS;
+}
+EXPORT_SYMBOL(TEEC_InitializeContext);
+
+void TEEC_FinalizeContext(struct TEEC_Context *context)
+{
+	struct tee_context *ctx;
+
+	if (!context || !context->fd) {
+		pr_err("can't release context %p:[%s]\n",
+			context, strnlen(context->devname, 256) < 256 ?
+			context->devname : "[invalid]");
+		return;
+	}
+
+	ctx = tee_core_uuid2ptr(context->fd);
+	if (ctx == NULL) {
+		pr_err("bad context->fd %d provided\n",
+			context->fd);
+		return;
+	}
+
+	tee_core_free_uuid(context->fd);
+	tee_context_destroy(ctx);
+}
+EXPORT_SYMBOL(TEEC_FinalizeContext);
+
+TEEC_Result TEEC_OpenSession(struct TEEC_Context *context,
+	struct TEEC_Session *session,
+	const struct TEEC_UUID *destination,
+	uint32_t connectionMethod,
+	const void *connectionData,
+	struct TEEC_Operation *operation,
+	uint32_t *return_origin)
+{
+	struct TEEC_Operation dummy_op;
+	struct tee_cmd_io cmd;
+	struct tee_session *sess;
+	struct tee_context *ctx;
+	uint32_t error_origin;
+
+	if (!operation) {
+		/*
+		 * The code here exist because Global Platform API states that
+		 * it is allowed to give operation as a NULL pointer.
+		 * In kernel and secure world we in most cases don't want
+		 * this to be NULL, hence we use this dummy operation when
+		 * a client doesn't provide any operation.
+		 */
+		memset(&dummy_op, 0, sizeof(struct TEEC_Operation));
+		operation = &dummy_op;
+	}
+
+	if (!context || !session || !destination || !operation)
+		return TEEC_ERROR_BAD_PARAMETERS;
+
+	session->fd = 0;
+
+	ctx = tee_core_uuid2ptr(context->fd);
+	if (ctx == NULL) {
+		pr_err("bad context_fd %d provided\n",
+			context->fd);
+		if (return_origin)
+			*return_origin = TEEC_ORIGIN_COMMS;
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+
+	reset_tee_cmd(&cmd);
+	cmd.op = operation;
+	cmd.uuid = (struct TEEC_UUID *) destination;
+
+	sess = tee_session_create_and_open(ctx, &cmd);
+	if (IS_ERR_OR_NULL(sess)) {
+		if (cmd.origin)
+			error_origin = cmd.origin;
+		else
+			error_origin = TEEC_ORIGIN_COMMS;
+
+		if (return_origin)
+			*return_origin = error_origin;
+
+		if (cmd.err)
+			return cmd.err;
+		else
+			return TEEC_ERROR_COMMUNICATION;
+	} else {
+		error_origin = cmd.origin;
+		session->fd = tee_core_alloc_uuid(sess);
+		if (session->fd < 0) {
+
+			/* since we failed to alloc uuid, we need
+			 * to close this session
+			 */
+
+			tee_session_close_and_destroy(sess);
+
+			error_origin = TEEC_ORIGIN_COMMS;
+
+			if (return_origin)
+				*return_origin = error_origin;
+
+			if (session->fd == -ENOSPC || session->fd == -ENOMEM)
+				return TEEC_ERROR_OUT_OF_MEMORY;
+			if (session->fd == -EINVAL)
+				return TEEC_ERROR_BAD_PARAMETERS;
+
+			return TEEC_ERROR_GENERIC;
+		}
+
+		if (return_origin)
+			*return_origin = error_origin;
+
+		return cmd.err;
+	}
+}
+EXPORT_SYMBOL(TEEC_OpenSession);
+
+void TEEC_CloseSession(struct TEEC_Session *session)
+{
+	if (session && session->fd) {
+		struct tee_session *sess = tee_core_uuid2ptr(session->fd);
+
+		if (sess == NULL) {
+			pr_err("bad session_id %d provided\n",
+				session->fd);
+			return;
+		}
+
+		tee_core_free_uuid(session->fd);
+
+		tee_session_close_and_destroy(sess);
+	}
+}
+EXPORT_SYMBOL(TEEC_CloseSession);
+
+TEEC_Result TEEC_InvokeCommand(struct TEEC_Session *session,
+	uint32_t commandID,
+	struct TEEC_Operation *operation,
+	uint32_t *return_origin)
+{
+	int ret = 0;
+	struct tee_cmd_io cmd;
+	struct tee_session *sess;
+	uint32_t error_origin;
+	struct TEEC_Operation dummy_op;
+
+	if (!session || !session->fd)
+		return TEEC_ERROR_BAD_PARAMETERS;
+
+	if (!operation) {
+		memset(&dummy_op, 0, sizeof(struct TEEC_Operation));
+		operation = &dummy_op;
+	}
+
+	sess = tee_core_uuid2ptr(session->fd);
+	if (sess == NULL) {
+		pr_err("bad session_fd %d provided\n",
+			session->fd);
+		if (return_origin)
+			*return_origin = TEEC_ORIGIN_COMMS;
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+	reset_tee_cmd(&cmd);
+	cmd.cmd = commandID;
+	cmd.op = operation;
+
+	ret = tee_session_invoke_be(sess, &cmd);
+	if (ret) {
+		if (cmd.origin)
+			error_origin = cmd.origin;
+		else
+			error_origin = TEEC_ORIGIN_COMMS;
+
+		if (return_origin)
+			*return_origin = error_origin;
+
+		if (cmd.err)
+			return cmd.err;
+		else
+			return TEEC_ERROR_COMMUNICATION;
+	} else {
+		if (return_origin)
+			*return_origin = cmd.origin;
+		return cmd.err;
+	}
+}
+EXPORT_SYMBOL(TEEC_InvokeCommand);
+
+TEEC_Result TEEC_RegisterSharedMemory(struct TEEC_Context *context,
+	struct TEEC_SharedMemory *sharedMem)
+{
+	if (!sharedMem)
+		return TEEC_ERROR_BAD_PARAMETERS;
+
+	sharedMem->registered = 1;
+	sharedMem->d.fd = 0;
+	return TEEC_SUCCESS;
+}
+EXPORT_SYMBOL(TEEC_RegisterSharedMemory);
+
+TEEC_Result TEEC_AllocateSharedMemory(struct TEEC_Context *context,
+	struct TEEC_SharedMemory *shared_memory)
+{
+	struct tee_shm_io shm_io;
+	int ret;
+	struct tee_shm *shm;
+	struct tee_context *ctx;
+
+	if (!context || !context->fd || !shared_memory)
+		return TEEC_ERROR_BAD_PARAMETERS;
+
+	shm_io.size = shared_memory->size;
+	shm_io.flags = shared_memory->flags | TEEC_MEM_KAPI;
+
+	ctx = tee_core_uuid2ptr(context->fd);
+	if (ctx == NULL) {
+		pr_err("Invalid fd %d for tee context\n",
+			context->fd);
+		return TEEC_ERROR_BAD_PARAMETERS;
+	}
+
+	ret = tee_shm_alloc_io(ctx, &shm_io);
+	if (ret) {
+		pr_err("tee_shm_alloc_io(%zd) failed\n",
+			shared_memory->size);
+		return TEEC_ERROR_OUT_OF_MEMORY;
+	}
+
+	shared_memory->registered = 0;
+	shared_memory->flags = shm_io.flags;
+	shared_memory->d.fd = shm_io.fd_shm;
+
+	shm = (struct tee_shm *)(long)shm_io.fd_shm;
+	shared_memory->buffer = shm->resv.kaddr;
+
+	return TEEC_SUCCESS;
+}
+EXPORT_SYMBOL(TEEC_AllocateSharedMemory);
+
+void TEEC_ReleaseSharedMemory(struct TEEC_SharedMemory *shared_memory)
+{
+	struct tee_shm *shm;
+
+	if (!shared_memory || shared_memory->registered)
+		return;
+
+	shm = (struct tee_shm *)(long)shared_memory->d.fd;
+	tee_shm_free_io(shm);
+
+	shared_memory->buffer = NULL;
+	shared_memory->d.fd = 0;
+}
+EXPORT_SYMBOL(TEEC_ReleaseSharedMemory);
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_procfs.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_procfs.c
new file mode 100644
index 0000000..7319912
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_procfs.c
@@ -0,0 +1,1030 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/jiffies.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+
+#include <asm/barrier.h>
+
+#include <linux/tee_core.h>
+#include <linux/tee_kernel_lowlevel_api.h>
+
+#include <arm_common/teesmc_st.h>
+#include <arm_common/teesmc.h>
+
+#include "tee_procfs.h"
+#include "tee_core_priv.h"
+
+#ifdef CONFIG_OF
+#include <linux/of_irq.h>
+#endif
+
+#define PROC_DBG(fmt, ...) do {} while (0)
+
+struct tkcore_trace {
+	int level;
+	const int idx;
+	const char *desc;
+};
+
+#define xstr(s) str(s)
+#define str(s) #s
+
+#define TKCORE_DEFINE_TRACE(__x, __idx) \
+	{ \
+		.level = 0, \
+		.idx = __idx, \
+		.desc = xstr(__x) \
+	}
+
+#define TEE_LOG_TIMEOUT_MS	(500)
+
+static struct mutex trace_mutex;
+
+struct proc_dir_entry *tee_proc_dir;
+
+struct proc_dir_entry *tee_proc_log_file;
+struct proc_dir_entry *tee_proc_trace;
+
+struct proc_dir_entry *tee_proc_drv_version;
+struct proc_dir_entry *tee_proc_tee_version;
+struct proc_dir_entry *tee_proc_teed_version;
+
+union tee_log_ctrl {
+	struct {
+		unsigned int tee_buf_addr;
+		unsigned int tee_buf_size;
+		unsigned int tee_write_pos;
+		unsigned int tee_read_pos;
+
+		unsigned int tee_buf_unread_size;
+
+		unsigned int tee_irq_count;
+		unsigned int tee_reader_alive;
+
+		unsigned int tee_write_seq;
+		unsigned int tee_read_seq;
+	} info;
+	unsigned char data[TEE_LOG_CTL_BUF_SIZE];
+};
+
+struct klog {
+	/* shm for log ctl */
+	union tee_log_ctrl *log_ctl;
+
+	/* tee ringbuffer for log */
+	char *tee_rb;
+	/* tee ring buffer length */
+	uint32_t tee_rb_len;
+
+	/*
+	 * whether write_pos
+	 * has restarted from
+	 * one
+	 */
+	bool overwrite;
+	/*
+	 * served as notifier
+	 * when there's new
+	 * log
+	 */
+	wait_queue_head_t wq;
+
+	struct task_struct *ts;
+
+	/*
+	 * irq for tee to notify
+	 * nsdrv to fetch log
+	 */
+	int notify_irq;
+};
+
+struct ulog {
+	/*
+	 * local read sequence,
+	 * only updated by user
+	 * apps open this proc
+	 * entry
+	 */
+	uint32_t rseq;
+
+	/*
+	 * buffer containing
+	 * temporary str to
+	 * pass to CA
+	 */
+	const char *tmpbuf;
+
+	/*
+	 * ptr to global
+	 * klog
+	 */
+	struct klog *klog;
+
+};
+
+static struct klog klog;
+
+static inline bool rb_overrun(uint32_t rseq,
+							uint32_t wseq,
+							uint32_t rb_len)
+{
+	return wseq - rseq > rb_len;
+}
+
+static size_t ulog_rb(struct klog *klog,
+					struct ulog *ulog,
+					char __user *buf,
+					size_t count)
+{
+	size_t len = 0;
+	uint32_t wseq;
+
+	union tee_log_ctrl *log_ctl = klog->log_ctl;
+
+	static const char ulog_flag_intr[] =
+		"------ interrupted\n";
+
+	wseq = READ_ONCE(log_ctl->info.tee_write_seq);
+
+	while ((len != count) && ((ulog->rseq != wseq) ||
+		(ulog->tmpbuf && ulog->tmpbuf[0] != '\0'))) {
+		size_t copy_len;
+		unsigned long n;
+
+		if (ulog->tmpbuf == NULL) {
+			if (rb_overrun(ulog->rseq, wseq,
+						klog->tee_rb_len)) {
+				ulog->tmpbuf = ulog_flag_intr;
+			}
+		} else if (ulog->tmpbuf[0] == '\0') {
+			if (klog->overwrite ||
+				wseq >= klog->tee_rb_len) {
+				ulog->rseq = wseq - klog->tee_rb_len;
+			} else {
+				ulog->rseq = 0;
+			}
+			ulog->tmpbuf = NULL;
+		}
+
+		if (ulog->tmpbuf) {
+			size_t tmpbuf_len;
+			const char *tmpbuf;
+
+			tmpbuf = ulog->tmpbuf;
+			tmpbuf_len = strlen(tmpbuf);
+
+			copy_len = (uint32_t) min(tmpbuf_len, count - len);
+			n = copy_to_user(&buf[len], tmpbuf, copy_len);
+
+			if (copy_len == n) {
+				pr_warn("tkcoredrv: failed to copy flag to user");
+				return len;
+			}
+
+			ulog->tmpbuf = &tmpbuf[copy_len - n];
+			len += (copy_len - n);
+		} else {
+			copy_len = min((uint32_t) (count - len),
+					min(klog->tee_rb_len -
+						ulog->rseq % klog->tee_rb_len,
+						wseq - ulog->rseq));
+			n = copy_to_user(&buf[len],
+				&klog->tee_rb[ulog->rseq % klog->tee_rb_len],
+				copy_len);
+			if (copy_len == n) {
+				pr_warn("tkcoredrv: failed to copy klog to user\n");
+				return len;
+			}
+
+			ulog->rseq += (copy_len - n);
+			len += (copy_len - n);
+		}
+
+		wseq = READ_ONCE(log_ctl->info.tee_write_seq);
+	}
+
+	return len;
+}
+
+static ssize_t tee_log_read(struct file *file, char __user *buf,
+				size_t count, loff_t *pos)
+{
+	ssize_t ret;
+	struct ulog *ulog;
+	struct klog *klog;
+
+	if (file == NULL || buf == NULL || pos == NULL)
+		return -EINVAL;
+
+	ulog = (struct ulog *) file->private_data;
+	if (ulog == NULL) {
+		pr_warn("tkcoredrv: file not open correctly\n");
+		return -EINVAL;
+	}
+
+	klog = ulog->klog;
+
+	if (file->f_flags & O_NONBLOCK) {
+		/*
+		 * currently nonblock file is
+		 * not supported, since
+		 * we might need to enter
+		 * wait queue
+		 */
+		return -EAGAIN;
+	}
+
+	if (ulog->tmpbuf == NULL) {
+		long r;
+
+		do {
+			r = wait_event_interruptible_timeout(klog->wq,
+				ulog->rseq !=
+				READ_ONCE(klog->log_ctl->info.tee_write_seq),
+				msecs_to_jiffies(TEE_LOG_TIMEOUT_MS));
+
+		} while (!r);
+
+		if (r < 0) {
+			/*
+			 * woke up due to signal, e.g.
+			 * calling program terminated
+			 * by CTRL-C
+			 */
+			return -EINTR;
+		}
+	}
+
+	ret = ulog_rb(klog, ulog, buf, count);
+	*pos += ret;
+
+	return ret;
+}
+
+int tee_log_open(struct inode *inode, struct file *file)
+{
+	int ret;
+	struct ulog *ulog;
+
+	static const char ulog_flag_begin[] =
+		"------ beginning of tee\n";
+
+	ulog = kmalloc(
+			sizeof(struct ulog),
+			GFP_KERNEL);
+	if (ulog == NULL)
+		return -ENOMEM;
+
+	ulog->tmpbuf = ulog_flag_begin;
+	ulog->klog = &klog;
+
+	ret = nonseekable_open(inode, file);
+
+	if (unlikely(ret)) {
+		kfree(ulog);
+
+		pr_warn("tkcoredrv: open file failed with %d\n", ret);
+		return ret;
+	}
+
+	file->private_data = (void *) ulog;
+
+	return 0;
+}
+
+int tee_log_release(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+static const struct file_operations log_tee_ops = {
+	.read = tee_log_read,
+	.open = tee_log_open,
+	.release = tee_log_release,
+};
+
+/* Guarantee the idx defined here is consistent with TEE */
+struct tkcore_trace tkcore_traces[] = {
+	TKCORE_DEFINE_TRACE(ree_fs, 0),
+	TKCORE_DEFINE_TRACE(enc_fs, 1),
+	TKCORE_DEFINE_TRACE(rpmb_blk, 2),
+	TKCORE_DEFINE_TRACE(rpmb_fs, 3),
+	TKCORE_DEFINE_TRACE(ta_mgmt, 4),
+	TKCORE_DEFINE_TRACE(tee_comm, 5),
+	TKCORE_DEFINE_TRACE(tee_boot, 6),
+	TKCORE_DEFINE_TRACE(core_mm, 7),
+	TKCORE_DEFINE_TRACE(uart_print, 8),
+	TKCORE_DEFINE_TRACE(htfat, 9),
+	TKCORE_DEFINE_TRACE(spi, 10),
+};
+
+#define NTRACES ARRAY_SIZE(tkcore_traces)
+
+static int tee_trace_open(struct inode *inode, struct file *filp)
+{
+	int ret;
+
+	mutex_lock(&trace_mutex);
+
+
+	ret = nonseekable_open(inode, filp);
+	if (unlikely(ret)) {
+		mutex_unlock(&trace_mutex);
+		return ret;
+	}
+
+	filp->f_pos = 0UL;
+	filp->private_data = PDE_DATA(inode);
+
+	return 0;
+}
+
+#define TRACE_BUF_SIZE 128
+
+static char trace_buf[TRACE_BUF_SIZE + 1];
+
+static ssize_t tee_trace_read(struct file *file, char __user *buf, size_t count,
+				loff_t *pos)
+{
+	size_t i;
+	ssize_t r;
+
+	char *p = trace_buf;
+
+	loff_t __pos;
+
+	size_t len = count > TRACE_BUF_SIZE - 1 ? TRACE_BUF_SIZE - 1 : count;
+
+	if (buf == NULL)
+		return -EINVAL;
+
+	for (i = 0; i < NTRACES; i++) {
+		int l = snprintf(p, trace_buf + len - p, "%s %d ",
+				 tkcore_traces[i].desc, tkcore_traces[i].level);
+
+		if (l <= 0)
+			return -EINVAL;
+
+		p += l;
+	}
+
+	*p++ = '\n';
+
+	if (pos == NULL)
+		__pos = 0;
+	else
+		__pos = *pos;
+
+	if (__pos >= p - trace_buf)
+		return 0;
+
+	r = copy_to_user(buf, trace_buf, (p - trace_buf) - __pos);
+
+	if (r < 0)
+		return r;
+
+	*pos += ((p - trace_buf) - __pos) - r;
+
+	return ((p - trace_buf) - __pos) - r;
+}
+
+static ssize_t tee_trace_write(struct file *filp, const char __user *buf,
+				   size_t count, loff_t *pos)
+{
+	char *p;
+	ssize_t r;
+	uint32_t level;
+	size_t i, len = count > TRACE_BUF_SIZE ? TRACE_BUF_SIZE : count;
+
+	struct smc_param param = { 0 };
+
+	struct tee *tee = filp->private_data;
+
+	if (tee == NULL) {
+		pr_warn("bad proc fp\n");
+		return -EINVAL;
+	}
+
+	(void) pos;
+
+	PROC_DBG("Count %lu Actual Count %lu\n", count, len);
+
+	r = copy_from_user(trace_buf, buf, len);
+	if (r < 0)
+		return r;
+
+	len = len - r;
+	trace_buf[len] = '\0';
+
+	PROC_DBG("Buffer: %s\n", trace_buf);
+
+	p = strchr(trace_buf, '=');
+	if (p == NULL) {
+		PROC_DBG("Expecting format: <trace_item>=<loglevel>[0|1]\n");
+		return -EINVAL;
+	}
+
+	*p = '\0';
+
+	r = kstrtouint(p + 1, 10, &level);
+	if (r < 0) {
+		PROC_DBG("Expecting format: <trace_item>=<loglevel>[0|1]\n");
+		return r;
+	}
+
+	for (i = 0; i < NTRACES; i++) {
+		if (strcmp(tkcore_traces[i].desc, trace_buf) == 0) {
+			param.a0 = TKCORE_FASTCALL_TRACE_CONFIG;
+			param.a1 = tkcore_traces[i].idx;
+			param.a2 = level;
+
+			if (tkcore_traces[i].level != level) {
+				tee->ops->call_tee(&param);
+
+				PROC_DBG("TRACE_CONFIG return value: 0x%x\n",
+					 param.a0);
+
+				if (param.a0 == TEESMC_RETURN_OK) {
+					tkcore_traces[i].level = level;
+					return len;
+				}
+
+				pr_warn(
+					"trace config Failed with 0x%llx\n",
+					(uint64_t) param.a0);
+
+				return -EINVAL;
+			}
+
+			PROC_DBG("Request level same with current level: %d\n",
+				tkcore_traces[i].level);
+
+			return len;
+		}
+	}
+
+	PROC_DBG("Can't find a matching trace_item\n");
+
+	return -EINVAL;
+}
+
+static int tee_trace_release(struct inode *inode, struct file *file)
+{
+	mutex_unlock(&trace_mutex);
+
+	return 0;
+}
+
+static const struct file_operations log_tee_trace_ops = {
+	.read = tee_trace_read,
+	.write = tee_trace_write,
+	.open = tee_trace_open,
+	.release = tee_trace_release
+};
+
+#include <version.h>
+
+static ssize_t copy_to_user_str(char __user *buf, ssize_t count, loff_t *pos,
+				const char *version)
+{
+	ssize_t r;
+	size_t cnt;
+	loff_t __pos;
+
+	__pos = *pos;
+	if (__pos > strlen(version) + 1) {
+		pr_warn("invalid pos: %lld len: %zu\n",
+			__pos, strlen(version));
+		return -EINVAL;
+	}
+
+	cnt = count < strlen(version) + 1 - __pos ?
+		count : strlen(version) + 1 - __pos;
+
+	r = copy_to_user(buf, version + __pos, cnt);
+
+	if (r < 0)
+		return r;
+
+	*pos += cnt - r;
+
+	return cnt - r;
+}
+
+static int tee_version_major, tee_version_minor;
+
+static ssize_t tee_version_read(struct file *file, char __user *buf,
+				size_t count, loff_t *pos)
+{
+	char tee_version[20];
+
+	if (buf == NULL || pos == NULL)
+		return -EINVAL;
+
+	snprintf(tee_version, sizeof(tee_version),
+		 "0.%d.%d-gp\n", tee_version_major, tee_version_minor);
+
+	return copy_to_user_str(buf, count, pos, tee_version);
+}
+
+static const struct file_operations tee_version_ops = {
+	.read = tee_version_read,
+	.write = NULL,
+	.open = NULL,
+	.release = NULL
+};
+
+
+static ssize_t drv_version_read(struct file *file, char __user *buf,
+				size_t count, loff_t *pos)
+{
+	if (buf == NULL || pos == NULL)
+		return -EINVAL;
+
+	return copy_to_user_str(buf, count, pos, tkcore_nsdrv_version);
+}
+
+static const struct file_operations drv_version_ops = {
+	.read = drv_version_read,
+	.write = NULL,
+	.open = NULL,
+	.release = NULL
+};
+
+#define TEED_VERSION_SIZE 50
+char teed_version[TEED_VERSION_SIZE + 1] = "unknown\n";
+
+static ssize_t teed_version_read(struct file *file, char __user *buf,
+				 size_t count, loff_t *pos)
+{
+	if (buf == NULL || pos == NULL)
+		return -EINVAL;
+
+	return copy_to_user_str(buf, count, pos, teed_version);
+}
+
+static ssize_t teed_version_write(struct file *filp, const char __user *buf,
+				  size_t count, loff_t *pos)
+{
+	ssize_t r;
+
+	if (count > TEED_VERSION_SIZE)
+		return -ENOMEM;
+
+	r = copy_from_user(teed_version, buf, count);
+	if (r < 0)
+		return r;
+
+	teed_version[count + 1] = '\0';
+
+	return count;
+}
+
+static const struct file_operations teed_version_ops = {
+	.read = teed_version_read,
+	.write = teed_version_write,
+	.open = NULL,
+	.release = NULL
+};
+
+static void remove_entry(void)
+{
+	proc_remove(tee_proc_dir);
+
+	tee_proc_dir = NULL;
+	tee_proc_log_file = NULL;
+	tee_proc_trace = NULL;
+	tee_proc_drv_version = NULL;
+	tee_proc_tee_version = NULL;
+}
+
+static int create_entry(struct tee *tee)
+{
+
+	tee_proc_dir = proc_mkdir("tkcore", NULL);
+	if (tee_proc_dir == NULL) {
+		pr_err("proc_mkdir tkcore failed\n");
+		return -1;
+	}
+
+	tee_proc_log_file = proc_create_data("tkcore_log",
+		0444, tee_proc_dir, &log_tee_ops, (void *) tee);
+
+	if (tee_proc_log_file == NULL) {
+		pr_err("proc_create failed\n");
+		goto err;
+	}
+
+	tee_proc_trace = proc_create_data("tkcore_trace",
+			0444, tee_proc_dir,
+			&log_tee_trace_ops, (void *) tee);
+
+	if (tee_proc_trace == NULL) {
+		pr_err("proc_create tkcore_trace failed\n");
+		goto err;
+	}
+
+	tee_proc_drv_version = proc_create_data(
+			"tkcore_drv_version",
+			0444, tee_proc_dir,
+			&drv_version_ops, (void *) tee);
+
+	if (tee_proc_drv_version == NULL) {
+		pr_err("proc_create tkcore_drv_version failed\n");
+		goto err;
+	}
+
+	tee_proc_tee_version = proc_create_data("tkcore_os_version",
+			0444, tee_proc_dir,
+			&tee_version_ops, (void *) tee);
+
+	if (tee_proc_tee_version == NULL) {
+		pr_err("proc_create tkcore_os_version failed\n");
+		goto err;
+	}
+
+	tee_proc_teed_version = proc_create_data("tkcore_teed_version",
+			0666, tee_proc_dir, &teed_version_ops, (void *) tee);
+
+	if (tee_proc_teed_version == NULL) {
+		pr_err("proc_create tkcore_teed_version failed\n");
+		goto err;
+	}
+
+	return 0;
+
+err:
+	remove_entry();
+	return -1;
+}
+
+static irqreturn_t tkcore_log_irq_handler(int irq, void *dev_id)
+{
+	wake_up_all(&(klog.wq));
+	return IRQ_HANDLED;
+}
+
+#define LINE_LENGTH	120U
+
+static void log_rb(struct klog *klog)
+{
+	char *p;
+	uint32_t rseq, wseq;
+
+	const char tail[] = "<...>";
+	char line[LINE_LENGTH + sizeof(tail) + 1];
+
+	union tee_log_ctrl *log_ctl = klog->log_ctl;
+	char *rb = klog->tee_rb;
+
+	strcpy(&line[LINE_LENGTH], tail);
+
+	rseq = log_ctl->info.tee_read_seq;
+	wseq = READ_ONCE(log_ctl->info.tee_write_seq);
+
+	p = line;
+
+	do {
+		uint32_t copy_len, i, k;
+
+		if (rb_overrun(rseq, wseq, klog->tee_rb_len)) {
+			pr_info("---- interrupted\n");
+			rseq = log_ctl->info.tee_read_seq =
+				wseq - klog->tee_rb_len;
+		}
+
+		k = rseq % klog->tee_rb_len;
+
+		copy_len = min(LINE_LENGTH - (uint32_t) (p - line),
+				min(wseq - rseq, klog->tee_rb_len - k));
+
+		for (i = 0; i < copy_len &&
+				rb[k + i] != '\n' && rb[k + i] != '\0';
+				i++, p++) {
+			*p = rb[k + i];
+		}
+
+		rseq += i;
+
+		if (i != copy_len) {
+			/*
+			 * find an '\n' in buffer
+			 * we skip it
+			 */
+			++rseq;
+			*p = '\0';
+		}
+
+		if (((i == copy_len) && (p - line == LINE_LENGTH))
+				|| (i != copy_len)) {
+			pr_info("%s\n", line);
+			p = line;
+			log_ctl->info.tee_read_seq = rseq;
+		}
+
+		wseq = READ_ONCE(log_ctl->info.tee_write_seq);
+		if (wseq >= klog->tee_rb_len)
+			klog->overwrite = true;
+	} while (rseq != wseq);
+}
+
+static int logd(void *args)
+{
+	struct klog *klog = (struct klog *) args;
+	union tee_log_ctrl *log_ctl = klog->log_ctl;
+
+	++log_ctl->info.tee_reader_alive;
+
+	while (!kthread_should_stop()) {
+		/*
+		 * a memory barrier is implied by
+		 * wait_event_interruptible_timeout(..)
+		 */
+		if (wait_event_interruptible_timeout(klog->wq,
+			log_ctl->info.tee_read_seq !=
+			READ_ONCE(log_ctl->info.tee_write_seq),
+			msecs_to_jiffies(TEE_LOG_TIMEOUT_MS)) <= 0) {
+			/*
+			 * interrupted /
+			 * timeout and condition
+			 * evaluated to false
+			 */
+
+			continue;
+		}
+
+		log_rb(klog);
+	}
+
+	--log_ctl->info.tee_reader_alive;
+	return 0;
+}
+
+static int register_klog_irq(struct klog *klog)
+{
+	int r;
+	int irq_num;
+
+#ifdef CONFIG_OF
+	struct device_node *node;
+
+	node = of_find_compatible_node(NULL, NULL,
+						"trustkernel,tkcore");
+	if (node) {
+		irq_num = irq_of_parse_and_map(node, 0);
+	} else {
+		pr_err("tkcoredrv: node not found\n");
+		irq_num = -1;
+	}
+#else
+	irq_num = TEE_LOG_IRQ;
+#endif
+
+	if (irq_num < 0) {
+		pr_warn("tkcoredrv: unknown tee_log_irq id\n");
+		return -1;
+	}
+
+	pr_info("tkcoredrv: tee_log_irq id = %d\n",
+			irq_num);
+
+	r = request_irq(irq_num,
+		(irq_handler_t) tkcore_log_irq_handler,
+		IRQF_TRIGGER_RISING,
+		"tee_log_irq", NULL);
+
+	if (r != 0) {
+		pr_err("tkcoredrv: failed to register klog_irq with %d\n",
+			r);
+		return -1;
+	}
+
+	klog->notify_irq = irq_num;
+
+	return 0;
+}
+
+static int init_tos_version(struct tee *tee)
+{
+	struct smc_param param;
+
+	memset(&param, 0, sizeof(param));
+
+	/* get os revision */
+	param.a0 = TEESMC32_CALL_GET_OS_REVISION;
+	tee->ops->raw_call_tee(&param);
+
+	tee_version_major = param.a0;
+	tee_version_minor = param.a1;
+
+	pr_info("tkcoreos-rev: 0.%d.%d-gp\n",
+		tee_version_major, tee_version_minor);
+
+	return 0;
+}
+
+static int init_klog_shm_args(struct tee *tee,
+							unsigned long *shm_pa,
+							unsigned int *shm_len)
+{
+	struct smc_param param;
+
+	unsigned long pa;
+	unsigned int len;
+
+	if (shm_pa == NULL || shm_len == NULL)
+		return -1;
+
+	memset(&param, 0, sizeof(param));
+
+	param.a0 = TEESMC32_ST_FASTCALL_GET_LOGM_CONFIG;
+	tee->ops->raw_call_tee(&param);
+
+	if (param.a0 != TEESMC_RETURN_OK) {
+		pr_err("Log service not available: 0x%x",
+			(uint) param.a0);
+		return -1;
+	}
+
+	pa = param.a1;
+	len = param.a2;
+
+	if (len <= TEE_LOG_CTL_BUF_SIZE) {
+		pr_err("tkcoredrv: invalid shm_len: %u\n",
+				len);
+		return -1;
+	}
+
+	if ((pa & (PAGE_SIZE - 1)) ||
+		(len & (PAGE_SIZE - 1))) {
+		pr_err("tkcoredrv: invalid klog args\n");
+		pr_err("tkcoredrv: pa=0x%lx len=0x%x\n",
+				pa, len);
+		return -1;
+	}
+
+	*shm_pa = pa;
+	*shm_len = len;
+
+	return 0;
+}
+
+static int init_klog_shm(struct klog *klog,
+						unsigned long shm_pa,
+						unsigned int shm_len)
+{
+	char *rb;
+	uint32_t rb_len;
+	union tee_log_ctrl *log_ctl;
+
+	log_ctl = tee_map_cached_shm(shm_pa,
+								shm_len);
+
+	if (log_ctl == NULL) {
+		pr_err("tkcoredrv: failed to map shm\n");
+		pr_err("tkcoredrv: pa=0x%lx len=%u\n",
+				shm_pa, shm_len);
+		return -1;
+	}
+
+	rb = (char *) log_ctl + TEE_LOG_CTL_BUF_SIZE;
+	rb_len = log_ctl->info.tee_buf_size;
+
+	if (rb_len != shm_len - TEE_LOG_CTL_BUF_SIZE) {
+		pr_err("tkcoredrv:Unexpected shm length: %u\n",
+				shm_len);
+		tee_unmap_cached_shm(log_ctl);
+		return -1;
+	}
+
+	log_ctl->info.tee_reader_alive = 0;
+
+	klog->log_ctl = log_ctl;
+
+	klog->tee_rb = rb;
+	klog->tee_rb_len = rb_len;
+
+	return 0;
+}
+
+static int init_klog(struct klog *klog, struct tee *tee)
+{
+	unsigned long shm_pa;
+	unsigned int shm_len;
+
+	BUILD_BUG_ON(sizeof(union tee_log_ctrl)
+			!= TEE_LOG_CTL_BUF_SIZE);
+
+	if (init_klog_shm_args(tee, &shm_pa,
+						&shm_len) < 0) {
+		return -1;
+	}
+
+	if (init_klog_shm(klog,
+					shm_pa,
+					shm_len) < 0) {
+		return -1;
+	}
+
+	klog->overwrite = false;
+
+	init_waitqueue_head(&klog->wq);
+
+	if (register_klog_irq(klog) < 0)
+		goto err_unmap_shm;
+
+
+	klog->ts = kthread_run(logd,
+				(void *) klog, "tee-log");
+	if (klog->ts == NULL) {
+		pr_err("tkcoredrv: Failed to create kthread\n");
+		goto err_free_irq;
+	}
+
+	return 0;
+
+err_free_irq:
+	if (klog->notify_irq > 0)
+		free_irq(klog->notify_irq, NULL);
+
+err_unmap_shm:
+	tee_unmap_cached_shm(klog->log_ctl);
+
+	memset(klog, 0, sizeof(*klog));
+	/*
+	 * set notify_irq to
+	 * un-initialized state
+	 */
+	klog->notify_irq = -1;
+
+	return -1;
+}
+
+/* TODO wait for kthread logwq
+ * to exit
+ */
+static void free_klog(struct klog *klog)
+{
+	if (klog->notify_irq > 0) {
+		free_irq(klog->notify_irq, NULL);
+		klog->notify_irq = -1;
+	}
+
+	kthread_stop(klog->ts);
+	klog->ts = NULL;
+
+	/*
+	 * wake up all waiters
+	 * in wq, since we're
+	 * about to leave
+	 */
+	wake_up_all(&(klog->wq));
+
+	/*
+	 * we don't unmap klog->tee_rb,
+	 * because it's not
+	 * quite easy to check whether
+	 * all user process using
+	 * procfs has finished
+	 */
+}
+
+int tee_init_procfs(struct tee *tee)
+{
+	mutex_init(&trace_mutex);
+
+	init_tos_version(tee);
+
+	if (create_entry(tee) < 0)
+		return -1;
+
+	if (init_klog(&klog, tee) < 0)
+		goto out_remove_entry;
+
+	return 0;
+
+out_remove_entry:
+	remove_entry();
+
+	return -1;
+}
+
+void tee_exit_procfs(void)
+{
+	remove_entry();
+	free_klog(&klog);
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_procfs.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_procfs.h
new file mode 100644
index 0000000..9f4cac3
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_procfs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef __TEE_PROCFS_H__
+#define __TEE_PROCFS_H__
+
+#define TEE_LOG_IRQ	280
+
+#define TEE_LOG_CTL_BUF_SIZE	256
+
+#define TEE_LOG_SIGNAL_THRESHOLD_SIZE 1024
+
+#define TEE_CRASH_MAGIC_NO	0xdeadbeef
+
+struct tee;
+
+int tee_init_procfs(struct tee *tee);
+void tee_exit_procfs(void);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_session.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_session.c
new file mode 100644
index 0000000..cfa87de
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_session.c
@@ -0,0 +1,947 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/anon_inodes.h>
+#include <linux/fs.h>
+
+#include "tee_shm.h"
+#include "tee_core_priv.h"
+#include "tee_tui_hal.h"
+
+static int _init_tee_cmd(struct tee_session *sess, struct tee_cmd_io *cmd_io,
+			 struct tee_cmd *cmd);
+static void _update_client_tee_cmd(struct tee_session *sess,
+				   struct tee_cmd_io *cmd_io,
+				   struct tee_cmd *cmd);
+static void _release_tee_cmd(struct tee_session *sess, struct tee_cmd *cmd);
+
+/******************************************************************************/
+
+static inline bool flag_set(int val, int flags)
+{
+	return (val & flags) == flags;
+}
+
+static inline bool is_mapped_temp(int flags)
+{
+	return flag_set(flags, TEE_SHM_MAPPED | TEE_SHM_TEMP);
+}
+
+/******************************************************************************/
+static int tee_copy_from_user(struct tee_context *ctx, void *to, void *from,
+				size_t size)
+{
+	if ((!to) || (!from) || (!size))
+		return 0;
+
+	if (ctx->usr_client)
+		return copy_from_user(to, from, size);
+
+	memcpy(to, from, size);
+	return 0;
+}
+
+static int tee_copy_to_user(struct tee_context *ctx, void *to, void *from,
+				size_t size)
+{
+	if ((!to) || (!from) || (!size))
+		return 0;
+
+	if (ctx->usr_client)
+		return copy_to_user(to, from, size);
+
+	memcpy(to, from, size);
+	return 0;
+}
+
+/* Defined as macro to let the put_user macro see the types */
+#define tee_put_user(ctx, from, to)				\
+	do {							\
+		if ((ctx)->usr_client)				\
+			put_user(from, to);			\
+		else						\
+			*to = from;				\
+	} while (0)
+
+static inline int tee_session_is_opened(struct tee_session *sess)
+{
+	if (sess && sess->sessid)
+		return (sess->sessid != 0);
+	return 0;
+}
+
+static int tee_session_open_be(struct tee_session *sess,
+				struct tee_cmd_io *cmd_io)
+{
+	int ret = -EINVAL;
+	struct tee *tee;
+	struct tee_cmd cmd;
+
+	WARN_ON(!sess || !sess->ctx || !sess->ctx->tee);
+
+	tee = sess->ctx->tee;
+
+
+	sess->sessid = 0;
+	ret = _init_tee_cmd(sess, cmd_io, &cmd);
+	if (ret) {
+		pr_err(
+			"init tee command failed with 0x%08x\n",
+			ret);
+		goto out;
+	}
+
+	ret = tee->ops->open(sess, &cmd);
+	if (ret == 0)
+		_update_client_tee_cmd(sess, cmd_io, &cmd);
+	else {
+		/* propagate the reason of the error */
+		cmd_io->origin = cmd.origin;
+		cmd_io->err = cmd.err;
+	}
+
+out:
+	_release_tee_cmd(sess, &cmd);
+	return ret;
+}
+
+int tee_session_invoke_be(struct tee_session *sess, struct tee_cmd_io *cmd_io)
+{
+	int ret = -EINVAL;
+	struct tee *tee;
+	struct tee_cmd cmd;
+
+	WARN_ON(!sess || !sess->ctx || !sess->ctx->tee);
+
+	tee = sess->ctx->tee;
+
+	ret = _init_tee_cmd(sess, cmd_io, &cmd);
+	if (ret)
+		goto out;
+
+	ret = tee->ops->invoke(sess, &cmd);
+	if (!ret)
+		_update_client_tee_cmd(sess, cmd_io, &cmd);
+	else {
+		/* propagate the reason of the error */
+		cmd_io->origin = cmd.origin;
+		cmd_io->err = cmd.err;
+	}
+
+out:
+	_release_tee_cmd(sess, &cmd);
+	return ret;
+}
+
+static int tee_session_close_be(struct tee_session *sess)
+{
+	int ret = -EINVAL;
+	struct tee *tee;
+
+	WARN_ON(!sess || !sess->ctx || !sess->ctx->tee);
+
+	tee = sess->ctx->tee;
+
+
+	ret = tee->ops->close(sess);
+	sess->sessid = 0;
+
+	return ret;
+}
+
+static int tee_session_cancel_be(struct tee_session *sess,
+				 struct tee_cmd_io *cmd_io)
+{
+	int ret = -EINVAL;
+	struct tee *tee;
+	struct tee_cmd cmd;
+
+	WARN_ON(!sess || !sess->ctx || !sess->ctx->tee);
+
+	tee = sess->ctx->tee;
+
+
+	ret = _init_tee_cmd(sess, cmd_io, &cmd);
+	if (ret)
+		goto out;
+
+	ret = tee->ops->cancel(sess, &cmd);
+
+out:
+	_release_tee_cmd(sess, &cmd);
+	return ret;
+}
+
+static int tee_do_invoke_command(struct tee_session *sess,
+				 struct tee_cmd_io __user *u_cmd)
+{
+	int ret = -EINVAL;
+	struct tee *tee;
+	struct tee_cmd_io k_cmd;
+	struct tee_context *ctx;
+
+	WARN_ON(!sess->ctx);
+	WARN_ON(!sess->ctx->tee);
+	ctx = sess->ctx;
+	tee = sess->ctx->tee;
+
+	WARN_ON(!sess->sessid);
+
+	if (tee_copy_from_user
+		(ctx, &k_cmd, (void *)u_cmd, sizeof(struct tee_cmd_io))) {
+		pr_err("tee_copy_from_user failed\n");
+		goto exit;
+	}
+
+	if ((k_cmd.op == NULL) || (k_cmd.uuid != NULL) ||
+		(k_cmd.data != NULL) || (k_cmd.data_size != 0)) {
+		pr_err(
+			"op or/and data parameters are not valid\n");
+		goto exit;
+	}
+
+	ret = tee_session_invoke_be(sess, &k_cmd);
+	if (ret)
+		pr_err("tee_invoke_command failed with %d\n", ret);
+
+	tee_put_user(ctx, k_cmd.err, &u_cmd->err);
+	tee_put_user(ctx, k_cmd.origin, &u_cmd->origin);
+
+exit:
+	return ret;
+}
+
+static int tee_do_cancel_cmd(struct tee_session *sess,
+				struct tee_cmd_io __user *u_cmd)
+{
+	int ret = -EINVAL;
+	struct tee *tee;
+	struct tee_cmd_io k_cmd;
+	struct tee_context *ctx;
+
+	WARN_ON(!sess->ctx);
+	WARN_ON(!sess->ctx->tee);
+	ctx = sess->ctx;
+	tee = sess->ctx->tee;
+
+
+	WARN_ON(!sess->sessid);
+
+	if (tee_copy_from_user
+		(ctx, &k_cmd, (void *)u_cmd, sizeof(struct tee_cmd_io))) {
+		pr_err("cancel_cmd: tee_copy_from_user failed\n");
+		goto exit;
+	}
+
+	if ((k_cmd.op == NULL) || (k_cmd.uuid != NULL) ||
+		(k_cmd.data != NULL) || (k_cmd.data_size != 0)) {
+		pr_err("op or/and data parameters are not valid\n");
+		goto exit;
+	}
+
+	ret = tee_session_cancel_be(sess, &k_cmd);
+	if (ret)
+		pr_err("tee_invoke_command failed\n");
+
+	tee_put_user(ctx, k_cmd.err, &u_cmd->err);
+	tee_put_user(ctx, k_cmd.origin, &u_cmd->origin);
+
+exit:
+	return ret;
+}
+
+static int tee_do_kernel_cancel_cmd(struct tee_session *sess,
+					struct tee_cmd_io *k_cmd)
+{
+	int ret = -EINVAL;
+	struct tee *tee;
+	struct tee_context *ctx;
+	struct tee_cmd cmd;
+
+	WARN_ON(!sess->ctx);
+	WARN_ON(!sess->ctx->tee);
+	ctx = sess->ctx;
+	tee = sess->ctx->tee;
+
+
+	WARN_ON(!sess->sessid);
+
+	if ((k_cmd == NULL || k_cmd->op == NULL) || (k_cmd->uuid != NULL) ||
+		(k_cmd->data != NULL) || (k_cmd->data_size != 0)) {
+		pr_err(
+			"op or/and data parameters are not valid\n");
+		goto exit;
+	}
+
+	cmd.cmd = k_cmd->cmd;
+	cmd.origin = TEEC_ORIGIN_TEE;
+	cmd.err = TEEC_ERROR_BAD_PARAMETERS;
+
+	cmd.param.type_original = 0;
+
+	ret = tee->ops->cancel(sess, &cmd);
+
+	if (ret)
+		pr_err("tee_cancel failed\n");
+
+exit:
+	return ret;
+}
+
+DECLARE_COMPLETION(io_comp);
+static int flag_tui_obj;
+
+struct tui_obj {
+	struct tee_cmd_io op;
+	struct tee_session *sess;
+	uint32_t cmd_id;
+	uint32_t status;
+	struct mutex lock;
+} g_tui_obj;
+
+static uint32_t send_cmd_to_user(uint32_t command_id)
+{
+	uint32_t ret = 0;
+
+	g_tui_obj.cmd_id = command_id;
+	complete(&io_comp);
+
+	return ret;
+}
+
+bool teec_notify_event(uint32_t event_type)
+{
+	bool ret = false;
+
+	/* Cancel the TUI session if exists */
+	if (g_tui_obj.status)
+		ret = tee_do_kernel_cancel_cmd(g_tui_obj.sess, &g_tui_obj.op);
+
+	return ret;
+}
+EXPORT_SYMBOL(teec_notify_event);
+
+int teec_wait_cmd(uint32_t *cmd_id)
+{
+	/* Wait for signal from DCI handler */
+	wait_for_completion(&io_comp);
+#ifdef INIT_COMPLETION
+	INIT_COMPLETION(io_comp);
+#else
+	io_comp.done = 0;
+#endif
+
+	*cmd_id = g_tui_obj.cmd_id;
+	return 0;
+}
+EXPORT_SYMBOL(teec_wait_cmd);
+
+static long tee_session_internal_ioctl(struct tee_session *sess,
+					unsigned int cmd, unsigned long arg)
+{
+	int ret;
+	struct tee *tee;
+
+	tee = sess->ctx->tee;
+
+
+	switch (cmd) {
+	case TEE_INVOKE_COMMAND_IOC:
+		ret = tee_do_invoke_command(sess,
+			(struct tee_cmd_io __user *) arg);
+		break;
+	case TEE_REQUEST_CANCELLATION_IOC:
+		ret = tee_do_cancel_cmd(sess,
+			(struct tee_cmd_io __user *) arg);
+		break;
+	case TEE_TUI_OPEN_SESSION_IOC:
+		pr_debug("TEE_TUI_OPEN_SESSION_IOC.\n");
+		if (flag_tui_obj == 0) {
+			mutex_init(&g_tui_obj.lock);
+			flag_tui_obj = 1;
+		}
+		mutex_lock(&g_tui_obj.lock);
+		if (g_tui_obj.status != 0 ||
+			(struct tee_cmd_io __user *)arg == NULL) {
+			ret = -EBUSY;
+			mutex_unlock(&g_tui_obj.lock);
+			pr_warn(
+				"TEE_TUI_OPEN_SESSION_IOC: tui busy or invalid argument\n");
+			break;
+		}
+		if (tee_copy_from_user(sess->ctx, &g_tui_obj.op, (void *)arg,
+					sizeof(struct tee_cmd_io))) {
+			ret = -EINVAL;
+			mutex_unlock(&g_tui_obj.lock);
+			break;
+		}
+		/* reset part of op */
+		g_tui_obj.op.uuid = NULL;
+		g_tui_obj.op.data = NULL;
+		g_tui_obj.op.data_size = 0;
+
+		g_tui_obj.sess = sess;
+		g_tui_obj.status = 1;
+		mutex_unlock(&g_tui_obj.lock);
+		/* Start android TUI activity */
+		ret = send_cmd_to_user(TEEC_TUI_CMD_START_ACTIVITY);
+		if (ret != 0) {
+			mutex_lock(&g_tui_obj.lock);
+			g_tui_obj.status = 0;
+			mutex_unlock(&g_tui_obj.lock);
+			break;
+		}
+		/* Deactivate linux UI drivers */
+		ret = hal_tui_deactivate();
+		if (ret != 0) {
+			mutex_lock(&g_tui_obj.lock);
+			g_tui_obj.status = 0;
+			mutex_unlock(&g_tui_obj.lock);
+			send_cmd_to_user(TEEC_TUI_CMD_STOP_ACTIVITY);
+			break;
+		}
+		break;
+	case TEE_TUI_CLOSE_SESSION_IOC:
+		pr_debug(
+			"TEE_TUI_CLOSE_SESSION_IOC.\n");
+		if (flag_tui_obj == 0) {
+			mutex_init(&g_tui_obj.lock);
+			flag_tui_obj = 1;
+		}
+		mutex_lock(&g_tui_obj.lock);
+		g_tui_obj.status = 0;
+		mutex_unlock(&g_tui_obj.lock);
+		/* Activate linux UI drivers */
+		ret = hal_tui_activate();
+		/* Stop android TUI activity */
+		ret = send_cmd_to_user(TEEC_TUI_CMD_STOP_ACTIVITY);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+
+	return ret;
+}
+
+static long tee_session_ioctl(struct file *filp, unsigned int cmd,
+				unsigned long arg)
+{
+	struct tee_session *sess = filp->private_data;
+
+	WARN_ON(!sess || !sess->ctx || !sess->ctx->tee);
+
+	return tee_session_internal_ioctl(sess, cmd, arg);
+}
+
+#ifdef CONFIG_COMPAT
+
+static long tee_session_compat_ioctl(struct file *filp, unsigned int cmd,
+					unsigned long arg)
+{
+	struct tee_session *sess = filp->private_data;
+
+	WARN_ON(!sess || !sess->ctx || !sess->ctx->tee);
+
+	switch (cmd) {
+	case TEE_INVOKE_COMMAND_IOC:
+	case TEE_REQUEST_CANCELLATION_IOC:
+	case TEE_TUI_OPEN_SESSION_IOC:
+		if (convert_compat_tee_cmd((struct tee_cmd_io __user *) arg))
+			return -EFAULT;
+		break;
+	default:
+		break;
+	}
+
+	return tee_session_internal_ioctl(sess, cmd, arg);
+}
+
+#endif
+
+static int tee_session_release(struct inode *inode, struct file *filp)
+{
+	struct tee_session *sess = filp->private_data;
+	int ret = 0;
+	struct tee *tee;
+
+	WARN_ON(!sess || !sess->ctx || !sess->ctx->tee);
+	tee = sess->ctx->tee;
+
+	ret = tee_session_close_and_destroy(sess);
+	return ret;
+}
+
+const struct file_operations tee_session_fops = {
+	.owner = THIS_MODULE,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = tee_session_compat_ioctl,
+#endif
+	.unlocked_ioctl = tee_session_ioctl,
+	.release = tee_session_release,
+};
+
+int tee_session_close_and_destroy(struct tee_session *sess)
+{
+	int ret;
+	struct tee *tee;
+	struct tee_context *ctx;
+
+	if (!sess || !sess->ctx || !sess->ctx->tee)
+		return -EINVAL;
+
+	ctx = sess->ctx;
+	tee = ctx->tee;
+
+
+	if (!tee_session_is_opened(sess))
+		return -EINVAL;
+
+	ret = tee_session_close_be(sess);
+
+	mutex_lock(&tee->lock);
+	tee_dec_stats(&tee->stats[TEE_STATS_SESSION_IDX]);
+	list_del(&sess->entry);
+
+	devm_kfree(_DEV(tee), sess);
+	tee_context_put(ctx);
+	tee_put(tee);
+	mutex_unlock(&tee->lock);
+
+	return ret;
+}
+
+struct tee_session *tee_session_create_and_open(struct tee_context *ctx,
+		struct tee_cmd_io *cmd_io)
+{
+	int ret = 0;
+	struct tee_session *sess;
+	struct tee *tee;
+
+	WARN_ON(!ctx->tee);
+
+	tee = ctx->tee;
+
+	ret = tee_get(tee);
+	if (ret)
+		return ERR_PTR(-EBUSY);
+
+	sess = devm_kzalloc(_DEV(tee), sizeof(struct tee_session), GFP_KERNEL);
+	if (!sess) {
+		tee_put(tee);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	tee_context_get(ctx);
+	sess->ctx = ctx;
+
+	ret = tee_session_open_be(sess, cmd_io);
+	mutex_lock(&tee->lock);
+	if (ret || !sess->sessid || cmd_io->err) {
+		pr_err(
+			"ERROR ret=%d (err=0x%08x, org=%d,  sessid=0x%08x)\n",
+			ret, cmd_io->err,
+			cmd_io->origin, sess->sessid);
+		tee_put(tee);
+		tee_context_put(ctx);
+		devm_kfree(_DEV(tee), sess);
+		mutex_unlock(&tee->lock);
+		if (ret)
+			return ERR_PTR(ret);
+		else
+			return NULL;
+	}
+
+	tee_inc_stats(&tee->stats[TEE_STATS_SESSION_IDX]);
+	list_add_tail(&sess->entry, &ctx->list_sess);
+	mutex_unlock(&tee->lock);
+
+	return sess;
+}
+
+int tee_session_create_fd(struct tee_context *ctx, struct tee_cmd_io *cmd_io)
+{
+	int ret;
+	struct tee_session *sess;
+	struct tee *tee = ctx->tee;
+
+	(void) tee;
+
+	WARN_ON(cmd_io->fd_sess > 0);
+
+
+	sess = tee_session_create_and_open(ctx, cmd_io);
+	if (IS_ERR_OR_NULL(sess)) {
+		ret = PTR_ERR(sess);
+		pr_warn(
+			"ERROR can't create the session (ret=%d, err=0x%08x, org=%d)\n",
+			ret, cmd_io->err, cmd_io->origin);
+		cmd_io->fd_sess = -1;
+		goto out;
+	}
+
+	/* Retrieve a fd */
+	cmd_io->fd_sess = -1;
+	ret = anon_inode_getfd("tee_session",
+			&tee_session_fops, sess, O_CLOEXEC);
+	if (ret < 0) {
+		pr_err("ERROR can't get a fd (ret=%d)\n",
+			ret);
+		tee_session_close_and_destroy(sess);
+		goto out;
+	}
+	cmd_io->fd_sess = ret;
+	ret = 0;
+
+out:
+	return ret;
+}
+
+static bool tee_session_is_supported_type(struct tee_session *sess, int type)
+{
+	switch (type) {
+	case TEEC_NONE:
+	case TEEC_VALUE_INPUT:
+	case TEEC_VALUE_OUTPUT:
+	case TEEC_VALUE_INOUT:
+	case TEEC_MEMREF_TEMP_INPUT:
+	case TEEC_MEMREF_TEMP_OUTPUT:
+	case TEEC_MEMREF_TEMP_INOUT:
+	case TEEC_MEMREF_WHOLE:
+	case TEEC_MEMREF_PARTIAL_INPUT:
+	case TEEC_MEMREF_PARTIAL_OUTPUT:
+	case TEEC_MEMREF_PARTIAL_INOUT:
+		return true;
+	default:
+		pr_err("type is invalid (type %02x)\n", type);
+		return false;
+	}
+}
+
+static int to_memref_type(int flags)
+{
+	if (flag_set(flags, TEEC_MEM_INPUT | TEEC_MEM_OUTPUT))
+		return TEEC_MEMREF_TEMP_INOUT;
+
+	if (flag_set(flags, TEEC_MEM_INPUT))
+		return TEEC_MEMREF_TEMP_INPUT;
+
+	if (flag_set(flags, TEEC_MEM_OUTPUT))
+		return TEEC_MEMREF_TEMP_OUTPUT;
+
+	pr_warn("tkcoredrv: %s: bad flags=%x\n", __func__, flags);
+	return 0;
+}
+
+static int _init_tee_cmd(struct tee_session *sess, struct tee_cmd_io *cmd_io,
+			 struct tee_cmd *cmd)
+{
+	int ret = -EINVAL;
+	int idx;
+	struct TEEC_Operation op;
+	struct tee_data *param = &cmd->param;
+	struct tee *tee;
+	struct tee_context *ctx;
+
+	WARN_ON(!sess->ctx);
+	WARN_ON(!sess->ctx->tee);
+	ctx = sess->ctx;
+	tee = sess->ctx->tee;
+
+
+	memset(cmd, 0, sizeof(struct tee_cmd));
+
+	cmd->cmd = cmd_io->cmd;
+	cmd->origin = TEEC_ORIGIN_TEE;
+	cmd->err = TEEC_ERROR_BAD_PARAMETERS;
+	cmd_io->origin = cmd->origin;
+	cmd_io->err = cmd->err;
+
+	if (tee_context_copy_from_client(ctx, &op, cmd_io->op, sizeof(op)))
+		goto out;
+
+	cmd->param.type_original = op.paramTypes;
+
+	for (idx = 0; idx < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++idx) {
+		uint32_t offset = 0;
+		uint32_t size = 0;
+		int type = TEEC_PARAM_TYPE_GET(op.paramTypes, idx);
+
+		switch (type) {
+		case TEEC_NONE:
+			break;
+
+		case TEEC_VALUE_INPUT:
+		case TEEC_VALUE_OUTPUT:
+		case TEEC_VALUE_INOUT:
+			param->params[idx].value = op.params[idx].value;
+			break;
+
+		case TEEC_MEMREF_TEMP_INPUT:
+		case TEEC_MEMREF_TEMP_OUTPUT:
+		case TEEC_MEMREF_TEMP_INOUT:
+			param->params[idx].shm =
+				tee_context_create_tmpref_buffer(
+				ctx, op.params[idx].tmpref.size,
+				op.params[idx].tmpref.buffer,
+				type);
+			if (IS_ERR_OR_NULL(param->params[idx].shm))
+				goto out;
+
+			break;
+
+		case TEEC_MEMREF_PARTIAL_INPUT:
+		case TEEC_MEMREF_PARTIAL_OUTPUT:
+		case TEEC_MEMREF_PARTIAL_INOUT:
+		case TEEC_MEMREF_WHOLE:
+			if (tee_copy_from_user(ctx, &param->c_shm[idx],
+						op.params[idx].memref.parent,
+						sizeof(param->c_shm[idx])))
+				goto out;
+
+			if (type == TEEC_MEMREF_WHOLE) {
+				offset = 0;
+				size = param->c_shm[idx].size;
+			} else { /* for PARTIAL, check the size */
+				offset = op.params[idx].memref.offset;
+				size = op.params[idx].memref.size;
+				if (param->c_shm[idx].size < size + offset) {
+					pr_err(
+						"A PARTIAL parameter is bigger than the parent %zd < %d + %d\n",
+						param->c_shm[idx].size, size,
+						offset);
+					goto out;
+				}
+			}
+
+			type = to_memref_type(param->c_shm[idx].flags);
+			if (type == 0)
+				goto out;
+
+			param->params[idx].shm = tkcore_shm_get(ctx,
+					&param->c_shm[idx], size, offset);
+
+			if (IS_ERR_OR_NULL(param->params[idx].shm)) {
+				param->params[idx].shm =
+					tee_context_create_tmpref_buffer(
+							ctx, size,
+							param->c_shm[idx].buffer
+								+ offset, type);
+
+				if (IS_ERR_OR_NULL(param->params[idx].shm))
+					goto out;
+			}
+
+			break;
+		default:
+			WARN_ON(1);
+		}
+
+		param->type |= (type << (idx * 4));
+	}
+
+	if (cmd_io->uuid != NULL) {
+		cmd->uuid = tee_context_alloc_shm_tmp(sess->ctx,
+			sizeof(*cmd_io->uuid), cmd_io->uuid, TEEC_MEM_INPUT);
+		if (IS_ERR_OR_NULL(cmd->uuid)) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	ret = 0;
+
+out:
+	if (ret)
+		_release_tee_cmd(sess, cmd);
+
+	return ret;
+}
+
+static void _update_client_tee_cmd(struct tee_session *sess,
+				   struct tee_cmd_io *cmd_io,
+				   struct tee_cmd *cmd)
+{
+	int idx;
+	struct tee_context *ctx;
+	struct TEEC_Operation tmp_op;
+
+	WARN_ON(!cmd_io);
+	WARN_ON(!cmd_io->op);
+	WARN_ON(!cmd);
+	WARN_ON(!sess->ctx);
+	ctx = sess->ctx;
+
+
+	cmd_io->origin = cmd->origin;
+	cmd_io->err = cmd->err;
+
+	if (tee_context_copy_from_client(ctx,
+			&tmp_op, cmd_io->op, sizeof(tmp_op))) {
+		pr_err("Failed to copy op from client\n");
+		return;
+	}
+
+	if (cmd->param.type_original == TEEC_PARAM_TYPES(TEEC_NONE,
+			TEEC_NONE, TEEC_NONE, TEEC_NONE))
+		return;
+
+	for (idx = 0; idx < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++idx) {
+		int type = TEEC_PARAM_TYPE_GET(cmd->param.type_original, idx);
+		int offset = 0;
+		size_t size;
+		size_t size_new;
+		struct TEEC_SharedMemory *parent;
+
+		WARN_ON(!tee_session_is_supported_type(sess, type));
+		switch (type) {
+		case TEEC_NONE:
+		case TEEC_VALUE_INPUT:
+		case TEEC_MEMREF_TEMP_INPUT:
+		case TEEC_MEMREF_PARTIAL_INPUT:
+			break;
+		case TEEC_VALUE_OUTPUT:
+		case TEEC_VALUE_INOUT:
+			if (tee_copy_to_user
+				(ctx, &cmd_io->op->params[idx].value,
+				&cmd->param.params[idx].value,
+				sizeof(tmp_op.params[idx].value))) {
+
+				pr_err(
+					"can't update %d result to user\n",
+					idx);
+			}
+			break;
+		case TEEC_MEMREF_TEMP_OUTPUT:
+		case TEEC_MEMREF_TEMP_INOUT:
+			/* Returned updated size */
+			size_new = cmd->param.params[idx].shm->size_req;
+			if (size_new !=
+				tmp_op.params[idx].tmpref.size) {
+				tee_put_user(ctx, size_new,
+					&cmd_io->op->params[idx].tmpref.size);
+			}
+
+			/* ensure we do not exceed the shared buffer length */
+			if (size_new > tmp_op.params[idx].tmpref.size) {
+				pr_err(
+					"  *** Wrong returned size from %d:%zd > %zd\n",
+					idx, size_new,
+					tmp_op.params[idx].tmpref.size);
+			} else if (tee_copy_to_user
+				   (ctx,
+					tmp_op.params[idx].tmpref.buffer,
+					cmd->param.params[idx].shm->resv.kaddr,
+					size_new)) {
+				pr_err(
+					"can't update %d result to user\n",
+					idx);
+			}
+			break;
+
+		case TEEC_MEMREF_PARTIAL_OUTPUT:
+		case TEEC_MEMREF_PARTIAL_INOUT:
+		case TEEC_MEMREF_WHOLE:
+			parent = &cmd->param.c_shm[idx];
+			if (type == TEEC_MEMREF_WHOLE) {
+				offset = 0;
+				size = parent->size;
+			} else {
+				offset = tmp_op.params[idx].memref.offset;
+				size = tmp_op.params[idx].memref.size;
+			}
+
+			/* Returned updated size */
+			size_new = cmd->param.params[idx].shm->size_req;
+			tee_put_user(ctx, size_new,
+					&cmd_io->op->params[idx].memref.size);
+
+			/*
+			 * If we allocated a tmpref buffer,
+			 * copy back data to the user buffer
+			 */
+			if (is_mapped_temp(cmd->param.params[idx].shm->flags)) {
+				if (parent->buffer &&
+					offset + size_new <= parent->size) {
+					if (tee_copy_to_user(ctx,
+						parent->buffer + offset,
+						cmd->param.params[idx].shm
+							->resv.kaddr,
+						size_new)) {
+						pr_err(
+							"can't update %d data to user\n",
+							idx);
+					}
+				}
+			}
+			break;
+		default:
+			WARN_ON(1);
+		}
+	}
+
+}
+
+static void _release_tee_cmd(struct tee_session *sess, struct tee_cmd *cmd)
+{
+	int idx;
+	struct tee_context *ctx;
+
+	WARN_ON(!cmd);
+	WARN_ON(!sess);
+	WARN_ON(!sess->ctx);
+	WARN_ON(!sess->ctx->tee);
+
+	ctx = sess->ctx;
+
+
+	tkcore_shm_free(cmd->uuid);
+
+	if (cmd->param.type_original == TEEC_PARAM_TYPES(TEEC_NONE,
+			TEEC_NONE, TEEC_NONE, TEEC_NONE))
+		goto out;
+
+	for (idx = 0; idx < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++idx) {
+		int type = TEEC_PARAM_TYPE_GET(cmd->param.type_original, idx);
+		struct tee_shm *shm;
+
+		switch (type) {
+		case TEEC_NONE:
+		case TEEC_VALUE_INPUT:
+		case TEEC_VALUE_OUTPUT:
+		case TEEC_VALUE_INOUT:
+			break;
+		case TEEC_MEMREF_TEMP_INPUT:
+		case TEEC_MEMREF_TEMP_OUTPUT:
+		case TEEC_MEMREF_TEMP_INOUT:
+		case TEEC_MEMREF_WHOLE:
+		case TEEC_MEMREF_PARTIAL_INPUT:
+		case TEEC_MEMREF_PARTIAL_OUTPUT:
+		case TEEC_MEMREF_PARTIAL_INOUT:
+			if (IS_ERR_OR_NULL(cmd->param.params[idx].shm))
+				break;
+
+			shm = cmd->param.params[idx].shm;
+
+			if (is_mapped_temp(shm->flags))
+				tkcore_shm_free(shm);
+			else
+				tkcore_shm_put(ctx, shm);
+			break;
+		default:
+			WARN_ON(1);
+		}
+	}
+
+out:
+	memset(cmd, 0, sizeof(struct tee_cmd));
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_shm.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_shm.c
new file mode 100644
index 0000000..ceb3000
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_shm.c
@@ -0,0 +1,884 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/dma-buf.h>
+#include <linux/hugetlb.h>
+#include <linux/version.h>
+#include <linux/anon_inodes.h>
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include "tee_core_priv.h"
+#include "tee_shm.h"
+
+struct tee_shm_attach {
+	struct sg_table sgt;
+	enum dma_data_direction dir;
+	bool is_mapped;
+};
+
+static struct tee_shm *tee_shm_alloc_static(struct tee *tee, size_t size,
+		uint32_t flags)
+{
+	struct tee_shm *shm;
+	unsigned long pfn;
+	unsigned int nr_pages;
+	struct page *page;
+	int ret;
+
+	shm = tee->ops->alloc(tee, size, flags);
+	if (IS_ERR_OR_NULL(shm)) {
+		pr_err("allocation failed (s=%d,flags=0x%08x) err=%ld\n",
+			(int) size, flags, PTR_ERR(shm));
+		goto exit;
+	}
+
+	pfn = shm->resv.paddr >> PAGE_SHIFT;
+	page = pfn_to_page(pfn);
+	if (IS_ERR_OR_NULL(page)) {
+		pr_err("pfn_to_page(%lx) failed\n", pfn);
+		tee->ops->free(shm);
+		return (struct tee_shm *) page;
+	}
+
+	/* Only one page of contiguous physical memory */
+	nr_pages = 1;
+
+	ret = sg_alloc_table_from_pages(&shm->resv.sgt, &page,
+					nr_pages, 0,
+					nr_pages * PAGE_SIZE, GFP_KERNEL);
+	if (ret) {
+		pr_err("sg_alloc_table_from_pages() failed\n");
+		tee->ops->free(shm);
+		shm = ERR_PTR(ret);
+	}
+
+exit:
+	return shm;
+}
+
+static struct tee_shm *tee_shm_alloc_ns(struct tee *tee, size_t size,
+					uint32_t flags)
+{
+	size_t i, nr_pages;
+	struct page **pages;
+
+	struct tee_shm *shm;
+
+	if (size == 0) {
+		pr_warn("invalid size %zu flags 0x%x\n",
+			size, flags);
+		return NULL;
+	}
+
+	shm = kzalloc(sizeof(struct tee_shm), GFP_KERNEL);
+	if (shm == NULL) {
+		shm = NULL;
+		pr_err("bad kmalloc tee_shm: %zu\n",
+			sizeof(struct tee_shm));
+		return shm;
+	}
+
+	shm->ns.token = tee_core_alloc_uuid(shm);
+	if (shm->ns.token <= 0) {
+		pr_err("failed to alloc idr for shm\n");
+		kfree(shm);
+		return NULL;
+	}
+
+	/* FIXME whether it's correct? */
+	nr_pages = ((size - 1) >> PAGE_SHIFT) + 1;
+
+	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
+	if (pages == NULL)
+		goto err_free_pagelist;
+
+	for (i = 0; i < nr_pages; i++) {
+		pages[i] = alloc_page(GFP_KERNEL);
+		if (pages[i] == NULL) {
+			pr_err("bad alloc page %zu\n", i);
+			goto err_free_pages;
+		}
+	}
+
+	shm->ns.pages = pages;
+	shm->ns.nr_pages = (size_t) nr_pages;
+
+	atomic_set(&shm->ns.ref, 1);
+
+	shm->size_req = size;
+	shm->size_alloc = nr_pages << PAGE_SHIFT;
+
+	shm->flags = flags;
+
+	return shm;
+
+err_free_pages:
+	for (i = 0; i < nr_pages; i++) {
+		if (pages[i] == NULL)
+			break;
+
+		__free_page(pages[i]);
+	}
+
+err_free_pagelist:
+	kfree(pages);
+
+	tee_core_free_uuid(shm->ns.token);
+
+	kfree(shm);
+
+	return NULL;
+}
+
+void tee_shm_free_ns(struct tee_shm *shm)
+{
+	size_t i;
+
+	if (atomic_dec_return(&shm->ns.ref) != 0)
+		return;
+
+	for (i = 0; i < shm->ns.nr_pages; i++)
+		__free_page(shm->ns.pages[i]);
+
+	kfree(shm->ns.pages);
+
+	tee_core_free_uuid(shm->ns.token);
+
+	kfree(shm);
+}
+
+struct tee_shm *tkcore_alloc_shm(struct tee *tee, size_t size, uint32_t flags)
+{
+	struct tee_shm *shm;
+
+	if ((shm_test_nonsecure(flags)))
+		shm = tee_shm_alloc_ns(tee, size, flags);
+	else
+		shm = tee_shm_alloc_static(tee, size, flags);
+
+	if (IS_ERR_OR_NULL(shm))
+		goto exit;
+
+	shm->tee = tee;
+
+exit:
+	return shm;
+}
+
+void tkcore_shm_free(struct tee_shm *shm)
+{
+	struct tee *tee;
+
+	if (IS_ERR_OR_NULL(shm))
+		return;
+
+	tee = shm->tee;
+
+	if (tee == NULL) {
+		pr_warn("tkcoredrv: %s(): NULL tee\n",
+			__func__);
+		return;
+	}
+	if (shm->tee == NULL) {
+		pr_warn("tkcoredrv: %s(): invalid shm\n", __func__);
+		return;
+	}
+
+	if (shm_test_nonsecure(shm->flags))
+		tee_shm_free_ns(shm);
+	else {
+		sg_free_table(&shm->resv.sgt);
+		shm->tee->ops->free(shm);
+	}
+}
+
+static int __tee_shm_attach_dma_buf(struct dma_buf *dmabuf,
+				struct dma_buf_attachment *attach)
+{
+	struct tee_shm_attach *tee_shm_attach;
+	struct tee_shm *shm;
+	struct tee *tee;
+
+	shm = dmabuf->priv;
+	tee = shm->tee;
+
+
+	tee_shm_attach = devm_kzalloc(_DEV(tee),
+				sizeof(*tee_shm_attach), GFP_KERNEL);
+	if (!tee_shm_attach)
+		return -ENOMEM;
+
+	tee_shm_attach->dir = DMA_NONE;
+	attach->priv = tee_shm_attach;
+
+	return 0;
+}
+
+static void __tee_shm_detach_dma_buf(struct dma_buf *dmabuf,
+				struct dma_buf_attachment *attach)
+{
+	struct tee_shm_attach *tee_shm_attach = attach->priv;
+	struct sg_table *sgt;
+	struct tee_shm *shm;
+	struct tee *tee;
+
+	shm = dmabuf->priv;
+	tee = shm->tee;
+
+
+	if (!tee_shm_attach) {
+		pr_err("No shm attached with this dmabuf context");
+		return;
+	}
+
+	sgt = &tee_shm_attach->sgt;
+
+	if (tee_shm_attach->dir != DMA_NONE)
+		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+			tee_shm_attach->dir);
+
+	sg_free_table(sgt);
+	devm_kfree(_DEV(tee), tee_shm_attach);
+	attach->priv = NULL;
+}
+
+static struct sg_table *__tee_shm_dma_buf_map_dma_buf(
+	struct dma_buf_attachment *attach, enum dma_data_direction dir)
+{
+	struct tee_shm_attach *tee_shm_attach = attach->priv;
+	struct tee_shm *tee_shm = attach->dmabuf->priv;
+	struct sg_table *sgt = NULL;
+	struct scatterlist *rd, *wr;
+	unsigned int i;
+	int nents, ret;
+	struct tee *tee;
+
+	tee = tee_shm->tee;
+
+
+	/* just return current sgt if already requested. */
+	if (tee_shm_attach->dir == dir && tee_shm_attach->is_mapped)
+		return &tee_shm_attach->sgt;
+
+	sgt = &tee_shm_attach->sgt;
+
+	ret = sg_alloc_table(sgt, tee_shm->resv.sgt.orig_nents, GFP_KERNEL);
+	if (ret) {
+		pr_err("failed to alloc sgt.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	rd = tee_shm->resv.sgt.sgl;
+	wr = sgt->sgl;
+	for (i = 0; i < sgt->orig_nents; ++i) {
+		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+		rd = sg_next(rd);
+		wr = sg_next(wr);
+	}
+
+	if (dir != DMA_NONE) {
+		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+		if (!nents) {
+			pr_err("failed to map sgl with iommu.\n");
+			sg_free_table(sgt);
+			sgt = ERR_PTR(-EIO);
+			goto err_unlock;
+		}
+	}
+
+	tee_shm_attach->is_mapped = true;
+	tee_shm_attach->dir = dir;
+	attach->priv = tee_shm_attach;
+
+err_unlock:
+	return sgt;
+}
+
+static void __tee_shm_dma_buf_unmap_dma_buf(struct dma_buf_attachment *attach,
+		struct sg_table *table,
+		enum dma_data_direction dir)
+{
+}
+
+static void __tee_shm_dma_buf_release(struct dma_buf *dmabuf)
+{
+	struct tee_shm *shm = dmabuf->priv;
+	struct tee_context *ctx;
+	struct tee *tee;
+
+	tee = shm->ctx->tee;
+
+
+	ctx = shm->ctx;
+
+	tee_shm_free_io(shm);
+}
+
+static int __tee_shm_dma_buf_mmap(struct dma_buf *dmabuf,
+				  struct vm_area_struct *vma)
+{
+	struct tee_shm *shm = dmabuf->priv;
+	size_t size = vma->vm_end - vma->vm_start;
+	struct tee *tee;
+	int ret;
+	pgprot_t prot;
+	unsigned long pfn;
+
+	tee = shm->ctx->tee;
+
+	pfn = shm->resv.paddr >> PAGE_SHIFT;
+
+
+	if (shm->flags & TEE_SHM_CACHED)
+		prot = vma->vm_page_prot;
+	else
+		prot = pgprot_noncached(vma->vm_page_prot);
+
+	ret =
+		remap_pfn_range(vma, vma->vm_start, pfn, size, prot);
+	if (!ret)
+		vma->vm_private_data = (void *)shm;
+
+	return ret;
+}
+
+static void *map_stub(struct dma_buf *dmabuf, unsigned long length)
+{
+	(void) dmabuf;
+	(void) length;
+
+	return NULL;
+}
+
+static const struct dma_buf_ops tee_static_shm_dma_buf_ops = {
+	.attach = __tee_shm_attach_dma_buf,
+	.detach = __tee_shm_detach_dma_buf,
+	.map_dma_buf = __tee_shm_dma_buf_map_dma_buf,
+	.unmap_dma_buf = __tee_shm_dma_buf_unmap_dma_buf,
+	.release = __tee_shm_dma_buf_release,
+	.mmap = __tee_shm_dma_buf_mmap,
+
+	.map = map_stub,
+};
+
+static int tee_static_shm_export(struct tee *tee, struct tee_shm *shm,
+				 int *export)
+{
+	struct dma_buf *dmabuf;
+	int ret = 0;
+
+#if defined(DEFINE_DMA_BUF_EXPORT_INFO)
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+#endif
+
+	if (shm_test_nonsecure(shm->flags)) {
+		pr_err(
+			"cannot export dmabuf for nonsecure buf flags: 0x%x\n",
+			shm->flags);
+		return -EINVAL;
+	}
+
+	/* Temporary fix to support both older and newer kernel versions. */
+#if defined(DEFINE_DMA_BUF_EXPORT_INFO)
+	exp_info.priv = shm;
+	exp_info.ops = &tee_static_shm_dma_buf_ops;
+	exp_info.size = shm->size_alloc;
+	exp_info.flags = O_RDWR;
+
+	dmabuf = dma_buf_export(&exp_info);
+#else
+	dmabuf = dma_buf_export(shm, &tee_static_shm_dma_buf_ops,
+				shm->size_alloc, O_RDWR, NULL);
+#endif
+	if (IS_ERR_OR_NULL(dmabuf)) {
+		pr_err("dmabuf: couldn't export buffer (%ld)\n",
+			PTR_ERR(dmabuf));
+		ret = -EINVAL;
+		goto out;
+	}
+
+	*export = dma_buf_fd(dmabuf, O_CLOEXEC);
+out:
+	return ret;
+}
+
+static int __tee_ns_shm_vma_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+	struct tee_shm *shm = (struct tee_shm *) vma->vm_private_data;
+	struct page *page;
+
+	if (vmf->pgoff >= shm->ns.nr_pages)
+		return VM_FAULT_ERROR;
+
+	page = shm->ns.pages[vmf->pgoff];
+	get_page(page);
+
+	vmf->page = page;
+	return 0;
+}
+
+static const struct vm_operations_struct tee_ns_shm_vm_ops = {
+	/*	.close = __tee_ns_shm_vma_close, */
+	.fault = __tee_ns_shm_vma_fault,
+};
+
+static int __tee_ns_shm_release(struct inode *inode, struct file *filp)
+{
+	struct tee_shm *shm = filp->private_data;
+
+	tee_shm_free_io(shm);
+	return 0;
+}
+
+static int __tee_ns_shm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	vma->vm_ops = &tee_ns_shm_vm_ops;
+	vma->vm_private_data = filp->private_data;
+
+	return 0;
+}
+
+static const struct file_operations tee_ns_shm_fops = {
+	.release = __tee_ns_shm_release,
+	.mmap = __tee_ns_shm_mmap,
+};
+
+static int tee_ns_shm_export(struct tee *tee, struct tee_shm *shm, int *export)
+{
+	int fd;
+
+	if (!shm_test_nonsecure(shm->flags)) {
+		pr_err("cannot export for static buf flags: 0x%x\n",
+			shm->flags);
+		return -EINVAL;
+	}
+
+	fd = anon_inode_getfd("tz_ns_shm", &tee_ns_shm_fops,
+				   (void *) shm, O_RDWR | O_CLOEXEC);
+	if (fd < 0) {
+		pr_err("anon_inode_getfd() failed with %d\n", fd);
+		return fd;
+	}
+
+	*export = fd;
+	return 0;
+}
+
+/* called inside tee->lock */
+static int tee_ns_shm_inc_ref(struct tee_shm *shm)
+{
+	/* check old value, if old value < 1, then do not inc ref.
+	 * actually this part of logic is already protected by tee->lock
+	 */
+	atomic_inc(&shm->ns.ref);
+	return 0;
+}
+
+struct tee_shm *tee_shm_alloc_from_rpc(struct tee *tee, size_t size,
+					uint32_t extra_flags)
+{
+	struct tee_shm *shm;
+
+
+	mutex_lock(&tee->lock);
+	shm = tkcore_alloc_shm(tee, size,
+		TEE_SHM_TEMP | TEE_SHM_FROM_RPC | extra_flags);
+	if (IS_ERR_OR_NULL(shm)) {
+		pr_err("buffer allocation failed (%ld)\n",
+			PTR_ERR(shm));
+		goto out;
+	}
+
+	tee_inc_stats(&tee->stats[TEE_STATS_SHM_IDX]);
+	list_add_tail(&shm->entry, &tee->list_rpc_shm);
+
+	shm->ctx = NULL;
+
+out:
+	mutex_unlock(&tee->lock);
+	return shm;
+}
+
+void tee_shm_free_from_rpc(struct tee_shm *shm)
+{
+	struct tee *tee;
+
+	if (shm == NULL)
+		return;
+
+	tee = shm->tee;
+	mutex_lock(&tee->lock);
+
+	if (shm->ctx == NULL) {
+		tee_dec_stats(&shm->tee->stats[TEE_STATS_SHM_IDX]);
+		list_del(&shm->entry);
+	}
+
+	tkcore_shm_free(shm);
+	mutex_unlock(&tee->lock);
+}
+
+/* Buffer allocated by rpc from fw and to be accessed by the user
+ * Not need to be registered as it is not allocated by the user
+ */
+int tee_shm_fd_for_rpc(struct tee_context *ctx, struct tee_shm_io *shm_io)
+{
+	struct tee_shm *shm = NULL;
+	struct tee *tee = ctx->tee;
+	int ret;
+	struct list_head *pshm;
+
+
+	shm_io->fd_shm = 0;
+
+	mutex_lock(&tee->lock);
+
+	if (!list_empty(&tee->list_rpc_shm)) {
+		list_for_each(pshm, &tee->list_rpc_shm) {
+			shm = list_entry(pshm, struct tee_shm, entry);
+			if (shm_test_nonsecure(shm_io->flags)) {
+				if ((void *)(unsigned long) shm->ns.token
+						== shm_io->buffer)
+					goto found;
+			} else {
+				if ((void *)(unsigned long) shm->resv.paddr
+						== shm_io->buffer)
+					goto found;
+			}
+		}
+	}
+
+	pr_err("Can't find shm for %p\n", shm_io->buffer);
+	ret = -ENOMEM;
+	goto out;
+
+found:
+
+	if (shm_test_nonsecure(shm_io->flags))
+		ret = tee_ns_shm_export(tee, shm, &shm_io->fd_shm);
+	else
+		ret = tee_static_shm_export(tee, shm, &shm_io->fd_shm);
+
+	if (ret) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	shm->ctx = ctx;
+	list_move(&shm->entry, &ctx->list_shm);
+
+	shm->dev = get_device(_DEV(tee));
+	ret = tee_get(tee);
+	WARN_ON(ret);
+	tee_context_get(ctx);
+
+	if (shm_test_nonsecure(shm_io->flags)) {
+		/*FIXME check for return value */
+		tee_ns_shm_inc_ref(shm);
+	} else
+		WARN_ON(!tee->ops->shm_inc_ref(shm));
+out:
+	mutex_unlock(&tee->lock);
+	return ret;
+}
+
+int tee_shm_alloc_io_perm(struct tee_context *ctx, struct tee_shm_io *shm_io)
+{
+	struct tee_shm *shm;
+	struct tee *tee = ctx->tee;
+	int ret;
+
+
+	if (shm_test_nonsecure(shm_io->flags)) {
+		pr_err("permanent shm cannot be nonsecure\n");
+		return -EINVAL;
+	}
+
+	if (ctx->usr_client)
+		shm_io->fd_shm = 0;
+
+	mutex_lock(&tee->lock);
+	shm = tkcore_alloc_shm(tee, shm_io->size, shm_io->flags);
+	if (IS_ERR_OR_NULL(shm)) {
+		pr_err("buffer allocation failed (%ld)\n",
+			PTR_ERR(shm));
+		ret = PTR_ERR(shm);
+		goto out;
+	}
+
+	if (ctx->usr_client) {
+		ret = tee_static_shm_export(tee, shm, &shm_io->fd_shm);
+		if (ret) {
+			tkcore_shm_free(shm);
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		shm->flags |= TEEC_MEM_DMABUF;
+	}
+
+	shm_io->paddr = (void *)(unsigned long) shm->resv.paddr;
+
+	shm->ctx = ctx;
+	shm->dev = get_device(_DEV(tee));
+	ret = tee_get(tee);
+	WARN_ON(ret);
+	tee_context_get(ctx);
+
+	tee_inc_stats(&tee->stats[TEE_STATS_SHM_IDX]);
+	list_add_tail(&shm->entry, &ctx->list_shm);
+out:
+	mutex_unlock(&tee->lock);
+	return ret;
+}
+
+int tee_shm_alloc_io(struct tee_context *ctx, struct tee_shm_io *shm_io)
+{
+	struct tee_shm *shm;
+	struct tee *tee = ctx->tee;
+	int ret;
+
+
+	if (ctx->usr_client)
+		shm_io->fd_shm = 0;
+
+	mutex_lock(&tee->lock);
+	shm = tkcore_alloc_shm(tee, shm_io->size, shm_io->flags);
+	if (IS_ERR_OR_NULL(shm)) {
+		pr_err("buffer allocation failed (%ld)\n",
+			PTR_ERR(shm));
+		ret = PTR_ERR(shm);
+		goto out;
+	}
+
+	if (ctx->usr_client) {
+		if (shm_test_nonsecure(shm_io->flags))
+			ret = tee_ns_shm_export(tee, shm, &shm_io->fd_shm);
+		else
+			ret = tee_static_shm_export(tee, shm, &shm_io->fd_shm);
+
+		if (ret) {
+			tkcore_shm_free(shm);
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		shm->flags |= TEEC_MEM_DMABUF;
+	}
+
+	shm->ctx = ctx;
+	shm->dev = get_device(_DEV(tee));
+	ret = tee_get(tee);
+	WARN_ON(ret);		/* tee_core_get must not issue */
+	tee_context_get(ctx);
+
+	tee_inc_stats(&tee->stats[TEE_STATS_SHM_IDX]);
+	list_add_tail(&shm->entry, &ctx->list_shm);
+out:
+	mutex_unlock(&tee->lock);
+	return ret;
+}
+
+void tee_shm_free_io(struct tee_shm *shm)
+{
+	struct tee_context *ctx = shm->ctx;
+	struct tee *tee = ctx->tee;
+	struct device *dev = shm->dev;
+
+	mutex_lock(&ctx->tee->lock);
+	tee_dec_stats(&tee->stats[TEE_STATS_SHM_IDX]);
+	list_del(&shm->entry);
+
+	tkcore_shm_free(shm);
+	tee_put(ctx->tee);
+	tee_context_put(ctx);
+	if (dev)
+		put_device(dev);
+	mutex_unlock(&tee->lock);
+}
+
+static int tee_shm_db_get(struct tee *tee, struct tee_shm *shm, int fd,
+			  unsigned int flags, size_t size, int offset)
+{
+	struct tee_shm_dma_buf *sdb;
+	struct dma_buf *dma_buf;
+	int ret = 0;
+
+	dma_buf = dma_buf_get(fd);
+	if (IS_ERR(dma_buf)) {
+		ret = PTR_ERR(dma_buf);
+		goto exit;
+	}
+
+	sdb = kzalloc(sizeof(*sdb), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(sdb)) {
+		pr_err("can't alloc tee_shm_dma_buf\n");
+		ret = PTR_ERR(sdb);
+		goto buf_put;
+	}
+	shm->resv.sdb = sdb;
+
+	if (dma_buf->size < size + offset) {
+		pr_err("dma_buf too small %zd < %zd + %d\n",
+			dma_buf->size, size, offset);
+		ret = -EINVAL;
+		goto free_sdb;
+	}
+
+	sdb->attach = dma_buf_attach(dma_buf, _DEV(tee));
+	if (IS_ERR_OR_NULL(sdb->attach)) {
+		ret = PTR_ERR(sdb->attach);
+		goto free_sdb;
+	}
+
+	sdb->sgt = dma_buf_map_attachment(sdb->attach, DMA_NONE);
+	if (IS_ERR_OR_NULL(sdb->sgt)) {
+		ret = PTR_ERR(sdb->sgt);
+		goto buf_detach;
+	}
+
+	if (sg_nents(sdb->sgt->sgl) != 1) {
+		ret = -EINVAL;
+		goto buf_unmap;
+	}
+
+	shm->resv.paddr = sg_phys(sdb->sgt->sgl) + offset;
+	if (dma_buf->ops->attach == __tee_shm_attach_dma_buf)
+		sdb->tee_allocated = true;
+	else
+		sdb->tee_allocated = false;
+
+	shm->flags |= TEEC_MEM_DMABUF;
+
+	goto exit;
+
+buf_unmap:
+	dma_buf_unmap_attachment(sdb->attach, sdb->sgt, DMA_NONE);
+buf_detach:
+	dma_buf_detach(dma_buf, sdb->attach);
+free_sdb:
+	kfree(sdb);
+buf_put:
+	dma_buf_put(dma_buf);
+exit:
+	return ret;
+}
+
+struct tee_shm *tkcore_shm_get(struct tee_context *ctx,
+				struct TEEC_SharedMemory *c_shm,
+				size_t size, int offset)
+{
+	struct tee_shm *shm;
+	struct tee *tee = ctx->tee;
+	int ret;
+
+	if (shm_test_nonsecure(c_shm->flags)) {
+		pr_err("invalid shared memory flags: 0x%x\n",
+			c_shm->flags);
+		return NULL;
+	}
+
+	mutex_lock(&tee->lock);
+	shm = kzalloc(sizeof(*shm), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(shm)) {
+		pr_err("can't alloc tee_shm\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	shm->ctx = ctx;
+	shm->tee = tee;
+	shm->dev = _DEV(tee);
+	shm->flags = c_shm->flags | TEE_SHM_MEMREF;
+	shm->size_req = size;
+	shm->size_alloc = 0;
+
+	if (c_shm->flags & TEEC_MEM_KAPI) {
+		struct tee_shm *kc_shm = (struct tee_shm *)c_shm->d.ptr;
+
+		if (!kc_shm) {
+			pr_err("kapi fd null\n");
+			ret = -EINVAL;
+			goto err;
+		}
+		shm->resv.paddr = kc_shm->resv.paddr;
+
+		if (kc_shm->size_alloc < size + offset) {
+			pr_err("kapi buff too small %zd < %zd + %d\n",
+				kc_shm->size_alloc, size, offset);
+			ret = -EINVAL;
+			goto err;
+		}
+
+	} else if (c_shm->d.fd) {
+		ret = tee_shm_db_get(tee, shm,
+			c_shm->d.fd, c_shm->flags, size, offset);
+		if (ret)
+			goto err;
+	} else if (!c_shm->buffer) {
+		pr_debug("null buffer, pass 'as is'\n");
+	} else {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	mutex_unlock(&tee->lock);
+	return shm;
+
+err:
+	kfree(shm);
+	mutex_unlock(&tee->lock);
+	return ERR_PTR(ret);
+}
+
+void tkcore_shm_put(struct tee_context *ctx, struct tee_shm *shm)
+{
+	struct tee *tee;
+
+	WARN_ON(!ctx);
+	if (!ctx)
+		return;
+
+	tee = ctx->tee;
+	WARN_ON(!tee);
+	if (!tee)
+		return;
+
+	WARN_ON(!shm);
+	if (!shm)
+		return;
+
+	WARN_ON(!(shm->flags & TEE_SHM_MEMREF));
+
+	if (shm_test_nonsecure(shm->flags)) {
+		pr_warn("invalid shared memory flags: 0x%x\n",
+			shm->flags);
+		return;
+	}
+
+	mutex_lock(&tee->lock);
+	if (shm->flags & TEEC_MEM_DMABUF) {
+		struct tee_shm_dma_buf *sdb;
+		struct dma_buf *dma_buf;
+
+		sdb = shm->resv.sdb;
+		dma_buf = sdb->attach->dmabuf;
+
+		dma_buf_unmap_attachment(sdb->attach, sdb->sgt, DMA_NONE);
+		dma_buf_detach(dma_buf, sdb->attach);
+		dma_buf_put(dma_buf);
+
+		kfree(sdb);
+		sdb = 0;
+	}
+
+	kfree(shm);
+	mutex_unlock(&tee->lock);
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_shm.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_shm.h
new file mode 100644
index 0000000..9c48ba0
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_shm.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef __TEE_SHM_H__
+#define __TEE_SHM_H__
+
+#include <linux/tee_client_api.h>
+
+struct tee_context;
+struct tee_shm;
+struct tee_shm_io;
+struct tee;
+
+static inline int shm_test_nonsecure(uint32_t flags)
+{
+	return flags & TEEC_MEM_NONSECURE;
+}
+
+int tee_shm_alloc_io(struct tee_context *ctx, struct tee_shm_io *shm_io);
+void tee_shm_free_io(struct tee_shm *shm);
+
+int tee_shm_fd_for_rpc(struct tee_context *ctx, struct tee_shm_io *shm_io);
+
+int tee_shm_alloc_io_perm(struct tee_context *ctx, struct tee_shm_io *shm_io);
+
+struct tee_shm *tkcore_alloc_shm(struct tee *tee, size_t size, uint32_t flags);
+void tkcore_shm_free(struct tee_shm *shm);
+
+struct tee_shm *tkcore_shm_get(struct tee_context *ctx,
+		struct TEEC_SharedMemory *c_shm,
+		size_t size, int offset);
+void tkcore_shm_put(struct tee_context *ctx, struct tee_shm *shm);
+
+#endif /* __TEE_SHM_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_supp_com.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_supp_com.c
new file mode 100644
index 0000000..4d333a6
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_supp_com.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/anon_inodes.h>
+#include <linux/semaphore.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+
+#include "tee_shm.h"
+#include "tee_core.h"
+#include "tee_supp_com.h"
+
+enum teec_rpc_result tee_supp_cmd(struct tee *tee,
+			uint32_t id, void *data, size_t datalen)
+{
+	struct tee_rpc *rpc = tee->rpc;
+	enum teec_rpc_result res = TEEC_RPC_FAIL;
+	size_t size;
+	struct task_struct *task = current;
+
+	(void) task;
+
+	if (id == TEE_RPC_ICMD_INVOKE) {
+		if (atomic_read(&rpc->used) == 0) {
+			pr_err("teed not ready. id=0x%x\n", id);
+			goto out;
+		}
+	}
+
+	switch (id) {
+	case TEE_RPC_ICMD_ALLOCATE:
+		{
+			struct tee_rpc_alloc *alloc;
+			struct tee_shm *shmint;
+
+			alloc = (struct tee_rpc_alloc *)data;
+			size = alloc->size;
+			memset(alloc, 0, sizeof(struct tee_rpc_alloc));
+			shmint = tee_shm_alloc_from_rpc(tee, size, 0U);
+			if (IS_ERR_OR_NULL(shmint))
+				break;
+
+			alloc->size = size;
+			alloc->data = (void *) (unsigned long)
+				shmint->resv.paddr;
+			alloc->shm = shmint;
+			res = TEEC_RPC_OK;
+
+			break;
+		}
+	case TEE_RPC_ICMD_FREE:
+		{
+			struct tee_rpc_free *free;
+
+			free = (struct tee_rpc_free *)data;
+			tee_shm_free_from_rpc(free->shm);
+			res = TEEC_RPC_OK;
+			break;
+		}
+	case TEE_RPC_ICMD_INVOKE:
+		{
+			if (sizeof(rpc->commToUser) < datalen)
+				break;
+
+			/*
+			 * Other threads blocks here until we've copied our
+			 * answer from the teed
+			 */
+			mutex_lock(&rpc->thrd_mutex);
+
+			mutex_lock(&rpc->outsync);
+			rpc->res = 0;
+			memcpy(&rpc->commToUser, data, datalen);
+			mutex_unlock(&rpc->outsync);
+
+			up(&rpc->datatouser);
+
+			down(&rpc->datafromuser);
+
+			mutex_lock(&rpc->insync);
+			memcpy(data, &rpc->commFromUser, datalen);
+			mutex_unlock(&rpc->insync);
+
+			res = rpc->res;
+			mutex_unlock(&rpc->thrd_mutex);
+
+			break;
+		}
+	default:
+		/* not supported */
+		break;
+	}
+
+out:
+
+	return res;
+}
+EXPORT_SYMBOL(tee_supp_cmd);
+
+ssize_t tee_supp_read(struct file *filp, char __user *buffer,
+		size_t length, loff_t *offset)
+{
+	struct tee_context *ctx = (struct tee_context *)(filp->private_data);
+	struct tee *tee;
+	struct tee_rpc *rpc;
+	struct task_struct *task = current;
+	int ret;
+
+	(void) task;
+
+	if (ctx == NULL ||
+		ctx->tee == NULL ||
+		ctx->tee->dev == NULL ||
+		ctx->tee->rpc == NULL) {
+		pr_err("invalid context\n");
+		return -EINVAL;
+	}
+
+	tee = ctx->tee;
+
+	rpc = tee->rpc;
+
+	if (atomic_read(&rpc->used) == 0) {
+		pr_err("teed application not ready\n");
+		ret = -EPERM;
+		goto out;
+	}
+
+	if (down_interruptible(&rpc->datatouser))
+		return -ERESTARTSYS;
+
+	mutex_lock(&rpc->outsync);
+
+	ret =
+		sizeof(rpc->commToUser) - sizeof(rpc->commToUser.cmds) +
+		sizeof(rpc->commToUser.cmds[0]) * rpc->commToUser.nbr_bf;
+	if (length < ret) {
+		ret = -EINVAL;
+	} else {
+		if (copy_to_user(buffer, &rpc->commToUser, ret)) {
+			pr_err("copy_to_user(comm) failed!\n");
+			ret = -EINVAL;
+		}
+	}
+
+	mutex_unlock(&rpc->outsync);
+
+out:
+	return ret;
+}
+
+ssize_t tee_supp_write(struct file *filp, const char __user *buffer,
+		size_t length, loff_t *offset)
+{
+	struct tee_context *ctx = (struct tee_context *) (filp->private_data);
+	struct tee *tee;
+	struct tee_rpc *rpc;
+	struct task_struct *task = current;
+	int ret = 0;
+
+	(void) task;
+
+	if (ctx == NULL || ctx->tee == NULL || ctx->tee->rpc == NULL) {
+		pr_err("Invalid ctx\n");
+		return -EINVAL;
+	}
+
+	tee = ctx->tee;
+	rpc = tee->rpc;
+
+	if (atomic_read(&rpc->used) == 0) {
+		pr_err("teed not ready\n");
+		goto out;
+	}
+
+	if (length > 0 && length < sizeof(rpc->commFromUser)) {
+		uint32_t i;
+		unsigned long r;
+
+		mutex_lock(&rpc->insync);
+
+		r = copy_from_user(&rpc->commFromUser, buffer, length);
+		if (r) {
+			pr_err("copy_from_user(comm) failed: %lu\n", r);
+			rpc->res = -EINVAL;
+			mutex_unlock(&rpc->insync);
+			up(&rpc->datafromuser);
+
+			ret = -EINVAL;
+			goto out;
+		}
+
+		/* Translate virtual address of caller into physical address */
+		for (i = 0; i < rpc->commFromUser.nbr_bf; i++) {
+			uint32_t type = rpc->commFromUser.cmds[i].type;
+			void *buffer = rpc->commFromUser.cmds[i].buffer;
+
+			if (type != TEE_RPC_BUFFER || buffer == NULL)
+				continue;
+
+			if (type & TEE_RPC_BUFFER_NONSECURE) {
+			} else {
+				struct tee_shm *shm;
+				struct vm_area_struct *vma = find_vma(
+					current->mm, (unsigned long) buffer);
+
+				if (vma == NULL)
+					continue;
+
+				shm = vma->vm_private_data;
+
+				if (shm == NULL) {
+					pr_err(
+							"Invalid vma->vm_private_data [%s:%d:%d]\n",
+							current->comm,
+							current->tgid,
+							current->pid);
+
+					rpc->res = -EINVAL;
+					mutex_unlock(&rpc->insync);
+					up(&rpc->datafromuser);
+
+					ret = -EINVAL;
+					goto out;
+				}
+
+				rpc->commFromUser.cmds[i].buffer =
+					(void *) (unsigned long)
+					shm->resv.paddr;
+			}
+		}
+
+		rpc->res = 0;
+		mutex_unlock(&rpc->insync);
+		up(&rpc->datafromuser);
+		ret = length;
+	}
+
+out:
+	return ret;
+}
+
+int tee_supp_init(struct tee *tee)
+{
+	struct tee_rpc *rpc;
+
+	rpc = devm_kzalloc(tee->dev, sizeof(struct tee_rpc), GFP_KERNEL);
+	if (rpc == NULL)
+		return -ENOMEM;
+
+	rpc->datafromuser = (struct semaphore)
+		__SEMAPHORE_INITIALIZER(rpc->datafromuser, 0);
+	rpc->datatouser = (struct semaphore)
+		__SEMAPHORE_INITIALIZER(rpc->datatouser, 0);
+	mutex_init(&rpc->thrd_mutex);
+	mutex_init(&rpc->outsync);
+	mutex_init(&rpc->insync);
+	atomic_set(&rpc->used, 0);
+	tee->rpc = rpc;
+
+	return 0;
+}
+
+void tee_supp_deinit(struct tee *tee)
+{
+	devm_kfree(tee->dev, tee->rpc);
+	tee->rpc = NULL;
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_supp_com.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_supp_com.h
new file mode 100644
index 0000000..906d4de
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_supp_com.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEE_SUPP_COMM_H
+#define TEE_SUPP_COMM_H
+
+#define TEE_RPMB_BUFFER_NUMBER	2
+
+#define TEE_RPC_ICMD_ALLOCATE	0x1001
+#define TEE_RPC_ICMD_FREE		0x1002
+#define TEE_RPC_ICMD_INVOKE		0x1003
+
+#define TEE_RPC_NBR_BUFF		1
+#define TEE_RPC_DATA_SIZE		64
+#define TEE_RPC_BUFFER_NUMBER	5
+
+#define TEE_RPC_STATE_IDLE		0x00
+#define TEE_RPC_STATE_ACTIVE	0x01
+
+/* Be Consistent with teed (user space) */
+#define TEE_RPC_BUFFER		0x00000001
+#define TEE_RPC_VALUE		0x00000002
+#define TEE_RPC_BUFFER_NONSECURE	0x00040000
+
+#define TEE_RPC_LOAD_TA		0x10000001
+
+#define TEE_RPC_RPMB_CMD	0x1000000A
+
+#define TEE_RPC_INSTALL_TA		0x10000013
+#define TEE_RPC_DELETE_TA		0x10000014
+#define TEE_RPC_INSTALL_TA_RESP	0x10000015
+
+#define TEE_RPC_INSTALL_SYS_TA	0x10000020
+/*
+ * Handled within the driver only
+ * Be CONSISTENT with tkcore-os (secure space)
+ */
+#define TEE_RPC_WAIT_QUEUE_SLEEP	0x20000001
+#define TEE_RPC_WAIT_QUEUE_WAKEUP	0x20000002
+#define TEE_RPC_WAIT			0x30000000
+
+#include <linux/semaphore.h>
+
+/**
+ * struct tee_rpc_bf - Contains definition of the tee com buffer
+ * @state: Buffer state
+ * @data: Command data
+ */
+struct tee_rpc_bf {
+	uint32_t state;
+	uint8_t data[TEE_RPC_DATA_SIZE];
+};
+
+struct tee_rpc_alloc {
+	uint32_t size; /* size of block */
+	void *data; /* pointer to data */
+	void *shm; /* pointer to an opaque data, being shm structure */
+};
+
+struct tee_rpc_free {
+	void *shm; /* pointer to an opaque data, being shm structure */
+};
+
+struct tee_rpc_cmd {
+	union {
+		void *buffer;
+		uint64_t padding_buf;
+	};
+	uint32_t size;
+	uint32_t type;
+	int fd;
+	int reserved;
+};
+
+struct tee_rpc_invoke {
+	uint32_t cmd;
+	uint32_t res;
+	uint32_t nbr_bf;
+	uint32_t reserved;
+	struct tee_rpc_cmd cmds[TEE_RPC_BUFFER_NUMBER];
+};
+
+struct tee_rpc {
+	struct tee_rpc_invoke commToUser;
+	struct tee_rpc_invoke commFromUser;
+	struct semaphore datatouser;
+	struct semaphore datafromuser;
+	struct mutex thrd_mutex; /* Block the thread to wait for supp answer */
+	struct mutex outsync; /* Out sync mutex */
+	struct mutex insync; /* In sync mutex */
+	atomic_t used;
+	int res;
+};
+
+enum teec_rpc_result {
+	TEEC_RPC_OK,
+	TEEC_RPC_FAIL
+};
+
+struct tee;
+
+int tee_supp_init(struct tee *tee);
+void tee_supp_deinit(struct tee *tee);
+
+enum teec_rpc_result tee_supp_cmd(struct tee *tee,
+		uint32_t id, void *data, size_t datalen);
+
+ssize_t tee_supp_read(struct file *filp, char __user *buffer,
+		size_t length, loff_t *offset);
+
+ssize_t tee_supp_write(struct file *filp, const char __user *buffer,
+		size_t length, loff_t *offset);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_sysfs.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_sysfs.c
new file mode 100644
index 0000000..7a578f5
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_sysfs.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/atomic.h>
+#include <asm/page.h>
+
+#include "tee_core_priv.h"
+
+static ssize_t dump_show(struct device *device,
+			 struct device_attribute *attr, char *buf)
+{
+	struct tee *tee = dev_get_drvdata(device);
+	int len;
+	char *tmp_buf;
+
+	tmp_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!tmp_buf)
+		return -ENOMEM;
+
+	len = tee_context_dump(tee, tmp_buf, PAGE_SIZE - 128);
+
+	if (len > 0)
+		len = snprintf(buf, PAGE_SIZE, "%s", tmp_buf);
+	kfree(tmp_buf);
+
+	return len;
+}
+
+static ssize_t stat_show(struct device *device,
+			 struct device_attribute *attr, char *buf)
+{
+	struct tee *tee = dev_get_drvdata(device);
+
+	return snprintf(buf, PAGE_SIZE, "%d/%d %d/%d %d/%d %d/%d\n",
+			atomic_read(&tee->refcount),
+			tee->max_refcount,
+			tee->stats[TEE_STATS_CONTEXT_IDX].count,
+			tee->stats[TEE_STATS_CONTEXT_IDX].max,
+			tee->stats[TEE_STATS_SESSION_IDX].count,
+			tee->stats[TEE_STATS_SESSION_IDX].max,
+			tee->stats[TEE_STATS_SHM_IDX].count,
+			tee->stats[TEE_STATS_SHM_IDX].max);
+}
+
+static ssize_t info_show(struct device *device,
+			 struct device_attribute *attr, char *buf)
+{
+	struct tee *tee = dev_get_drvdata(device);
+
+	return snprintf(buf, PAGE_SIZE, "%s iminor=%d dev=\"%s\" state=%d\n",
+			dev_name(tee->dev), tee->miscdev.minor,
+			dev_name(tee->miscdev.this_device), tee->state);
+}
+
+static ssize_t name_show(struct device *device,
+			 struct device_attribute *attr, char *buf)
+{
+	struct tee *tee = dev_get_drvdata(device);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", tee->name);
+}
+
+static ssize_t type_show(struct device *device,
+			 struct device_attribute *attr, char *buf)
+{
+	struct tee *tee = dev_get_drvdata(device);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", tee->ops->type);
+}
+
+static ssize_t refcount_show(struct device *device,
+			struct device_attribute *attr, char *buf)
+{
+	struct tee *tee = dev_get_drvdata(device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&tee->refcount));
+}
+
+static ssize_t conf_show(struct device *device,
+			 struct device_attribute *attr, char *buf)
+{
+	struct tee *tee = dev_get_drvdata(device);
+
+	return snprintf(buf, PAGE_SIZE, "0x%08x\n", tee->conf);
+}
+
+static ssize_t test_show(struct device *device,
+			 struct device_attribute *attr, char *buf)
+{
+	struct tee *tee = dev_get_drvdata(device);
+
+	return snprintf(buf, PAGE_SIZE, "%08X\n", tee->test);
+}
+
+static ssize_t test_store(struct device *device,
+			  struct device_attribute *attr, const char *buf,
+			  size_t count)
+{
+	struct tee *tee = dev_get_drvdata(device);
+	unsigned long val;
+	int status;
+
+	status = kstrtoul(buf, 0, &val);
+	if (status)
+		return status;
+
+	if ((tee->conf & TEE_CONF_TEST_MODE) == TEE_CONF_TEST_MODE)
+		tee->test = val;
+
+	return count;
+}
+
+/*
+ * A state-to-string lookup table, for exposing a human readable state
+ * via sysfs. Always keep in sync with enum tee_state
+ */
+static const char *const tee_state_string[] = {
+	"offline",
+	"online",
+	"suspended",
+	"running",
+	"crashed",
+	"invalid",
+};
+
+static ssize_t tee_show_state(struct device *device,
+			  struct device_attribute *attr, char *buf)
+{
+	struct tee *tee = dev_get_drvdata(device);
+
+	int state = tee->state > TEE_LAST ? TEE_LAST : tee->state;
+
+	return snprintf(buf, PAGE_SIZE, "%s (%d)\n", tee_state_string[state],
+			tee->state);
+}
+
+static struct device_attribute device_attrs[] = {
+	__ATTR_RO(dump),
+	__ATTR_RO(stat),
+	__ATTR_RO(info),
+	__ATTR(test, (0660), test_show, test_store),
+	__ATTR(state, 0444, tee_show_state, NULL),
+	__ATTR(name, 0444, name_show, NULL),
+	__ATTR(refcount, 0444, refcount_show, NULL),
+	__ATTR(type, 0444, type_show, NULL),
+	__ATTR(conf, 0444, conf_show, NULL),
+};
+
+int tee_init_sysfs(struct tee *tee)
+{
+	int i, error = 0;
+
+	if (!tee)
+		return -EINVAL;
+
+	if (dev_get_drvdata(tee->miscdev.this_device) != tee) {
+		pr_err("drvdata is not valid\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+		error = device_create_file(tee->miscdev.this_device,
+				&device_attrs[i]);
+		if (error) {
+			pr_err("bad device_create_file(): %d\n",
+				error);
+			break;
+		}
+	}
+
+	if (error) {
+		while (--i >= 0) {
+			device_remove_file(
+				tee->miscdev.this_device,
+				&device_attrs[i]);
+		}
+	}
+
+	return error;
+}
+
+void tee_cleanup_sysfs(struct tee *tee)
+{
+	int i;
+
+	if (!tee)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
+		device_remove_file(tee->miscdev.this_device, &device_attrs[i]);
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_sysfs.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_sysfs.h
new file mode 100644
index 0000000..93bc9a7
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_sysfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef __TEE_SYSFS_H__
+#define __TEE_SYSFS_H__
+
+struct tee;
+
+int tee_init_sysfs(struct tee *tee);
+void tee_cleanup_sysfs(struct tee *tee);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_ta_mgmt.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_ta_mgmt.c
new file mode 100644
index 0000000..6d7a87e
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_ta_mgmt.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+
+#include <linux/tee_ioc.h>
+#include "tee_core.h"
+
+#include "tee_ta_mgmt.h"
+#include "tee_shm.h"
+#include "tee_supp_com.h"
+
+struct completion event_inst_ta;
+static struct mutex mutex_inst_ta;
+
+int tee_install_sp_ta(struct tee_context *ctx,
+				void __user *__tee_spta_inst_desc)
+{
+	struct tee *tee;
+
+	struct tee_shm *shm_in = NULL;
+	struct tee_shm *shm_out = NULL;
+
+	struct tee_rpc_invoke request;
+	struct tee_spta_inst_desc ta;
+
+	uint32_t response_len = 0;
+	uint32_t response_msg_len = 0;
+
+	int ret = 0;
+
+	if (ctx == NULL || __tee_spta_inst_desc == NULL) {
+		pr_err("invalid param\n");
+		return -EINVAL;
+	}
+
+	tee = ctx->tee;
+
+	if (copy_from_user(&ta, (void *) __tee_spta_inst_desc,
+			sizeof(struct tee_spta_inst_desc))) {
+		pr_err("copy_from_user tee_spta_inst_desc failed\n");
+		return -EINVAL;
+	}
+
+	if (ta.ta_size < 4)
+		return -EINVAL;
+
+	shm_in = tee_shm_alloc_from_rpc(tee, ta.ta_size, 0);
+	if (IS_ERR_OR_NULL(shm_in)) {
+		pr_err("failed to alloc shm_in\n");
+		return -ENOMEM;
+	}
+
+	shm_out = tee_shm_alloc_from_rpc(tee, ta.ta_size, 0);
+	if (IS_ERR_OR_NULL(shm_out)) {
+		pr_err("failed to alloc shm_out\n");
+		tee_shm_free_from_rpc(shm_in);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(shm_in->resv.kaddr, (void *) ta.ta_binary,
+			ta.ta_size)) {
+		pr_err("copy ta_binary failed\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	memset(&request, 0, sizeof(struct tee_rpc_invoke));
+
+	request.cmd = TEE_RPC_INSTALL_TA;
+	request.nbr_bf = 2;
+
+	request.cmds[0].buffer = (void *) (unsigned long) shm_in->resv.paddr;
+	request.cmds[0].size = shm_in->size_req;
+
+	request.cmds[1].buffer = (void *) (unsigned long) shm_out->resv.paddr;
+	request.cmds[1].size = shm_out->size_req;
+
+	mutex_lock(&mutex_inst_ta);
+
+	ret = tee_supp_cmd(ctx->tee, TEE_RPC_ICMD_INVOKE,
+		&request, sizeof(struct tee_rpc_invoke));
+
+	if (ret == 0)
+		ret = request.res;
+
+	if (ret) {
+		pr_err(
+			"TA install command failed to start with %d\n", ret);
+		goto exit;
+	}
+
+	wait_for_completion(&event_inst_ta);
+
+	mutex_unlock(&mutex_inst_ta);
+
+/*
+ *	response_msg format:
+ *	+----------------------------------+
+ *	| TEE_Result | resp_msg_len | resp_msg_json | ta_bin_len | ta_bin |
+ */
+
+	memcpy(&ret, (void *) shm_out->resv.kaddr, 4);
+	if (ret) {
+		pr_err("install TA failed with 0x%x\n", ret);
+		goto exit;
+	}
+
+	memcpy(&response_msg_len, (uint8_t *) shm_out->resv.kaddr + 4, 4);
+	if (ta.ta_size - 4 < response_msg_len) {
+		pr_err(
+			"unexpected response msg len: %u total_size: %u\n",
+			ta.ta_size, response_msg_len);
+		ret = -E2BIG;
+		goto exit;
+	}
+
+	/* response_msg_len + sizeof(response_msg_len) + sizeof(TEE_Result */
+	response_len = response_msg_len + 4 + 4;
+
+	ret = copy_to_user((void __user *) ta.ta_binary,
+			(void *) shm_out->resv.kaddr, response_len);
+	if (ret) {
+		pr_err("copy response buffer failed with %d\n", ret);
+		ret = ret < 0 ? ret : -EAGAIN;
+		goto exit;
+	}
+
+	put_user(response_len, (uint32_t __user *) ta.response_len);
+
+exit:
+	tee_shm_free_from_rpc(shm_in);
+	tee_shm_free_from_rpc(shm_out);
+
+	return ret;
+}
+
+int tee_install_sp_ta_response(struct tee_context *ctx, void __user *u_arg)
+{
+	complete(&event_inst_ta);
+	return 0;
+}
+
+int tee_delete_sp_ta(struct tee_context *ctx, void __user *uuid)
+{
+	int ret = 0;
+	struct tee *tee;
+	struct tee_rpc_invoke request;
+	struct tee_shm *shm_uuid;
+
+	if (ctx == NULL || uuid == NULL) {
+		pr_err("invalid param\n");
+		return -EINVAL;
+	}
+
+	tee = ctx->tee;
+
+	shm_uuid = tee_shm_alloc_from_rpc(tee, sizeof(struct TEEC_UUID), 0);
+	if (IS_ERR_OR_NULL(shm_uuid)) {
+		pr_err("failed to alloc shm uuid\n");
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(shm_uuid->resv.kaddr,
+			uuid, sizeof(struct TEEC_UUID))) {
+		pr_err("TEEC_UUID copy_from_user failed\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	memset(&request, 0, sizeof(struct tee_rpc_invoke));
+
+	request.cmd = TEE_RPC_DELETE_TA;
+	request.nbr_bf = 1;
+	request.cmds[0].buffer = (void *) (unsigned long) shm_uuid->resv.paddr;
+	request.cmds[0].size = shm_uuid->size_req;
+
+	ret = tee_supp_cmd(tee, TEE_RPC_ICMD_INVOKE, &request, sizeof(request));
+	if (ret) {
+		pr_err("start delete_ta failed with %d\n", ret);
+		goto exit;
+	}
+
+	ret = request.res;
+	if (ret)
+		pr_err("delete_ta failed with 0x%x\n", ret);
+
+exit:
+	tee_shm_free_from_rpc(shm_uuid);
+	return ret;
+}
+
+int tee_install_sys_ta(struct tee *tee, void __user *u_arg)
+{
+	int r;
+	void *shm_kva;
+	unsigned long left;
+
+	struct TEEC_UUID uuid;
+	struct tee_rpc_invoke inv;
+	struct tee_shm *shm;
+
+	struct tee_ta_inst_desc ta_inst_desc;
+
+	if ((copy_from_user(&ta_inst_desc, u_arg,
+			sizeof(struct tee_ta_inst_desc)))) {
+		return -EFAULT;
+	}
+
+	if (copy_from_user(&uuid, ta_inst_desc.uuid, sizeof(struct TEEC_UUID)))
+		return -EFAULT;
+
+	shm = tee_shm_alloc_from_rpc(tee, sizeof(struct TEEC_UUID) +
+		sizeof(uint32_t) + ta_inst_desc.ta_buf_size,
+		TEEC_MEM_NONSECURE);
+
+	if (shm == NULL)
+		return -ENOMEM;
+
+	shm_kva = vmap(shm->ns.pages, shm->ns.nr_pages, VM_MAP, PAGE_KERNEL);
+	if (shm_kva == NULL) {
+		pr_err("failed to vmap %zu pages\n",
+			shm->ns.nr_pages);
+		r = -ENOMEM;
+		goto exit;
+	}
+
+	memcpy(shm_kva, &uuid, sizeof(struct TEEC_UUID));
+	memcpy((char *) shm_kva + sizeof(struct TEEC_UUID),
+		&ta_inst_desc.ta_buf_size, sizeof(uint32_t));
+
+	left = copy_from_user(
+		(char *) shm_kva + sizeof(struct TEEC_UUID) + sizeof(uint32_t),
+		ta_inst_desc.ta_buf,
+		ta_inst_desc.ta_buf_size);
+
+	if (left) {
+		pr_err("copy_from_user failed size %x return: %lu\n",
+			ta_inst_desc.ta_buf_size, left);
+		vunmap(shm_kva);
+		r = -EFAULT;
+		goto exit;
+	}
+
+	vunmap(shm_kva);
+
+	memset(&inv, 0, sizeof(inv));
+
+	inv.cmd = TEE_RPC_INSTALL_SYS_TA;
+	inv.res = TEEC_ERROR_NOT_IMPLEMENTED;
+	inv.nbr_bf = 1;
+
+	inv.cmds[0].buffer = (void *) (unsigned long) shm->ns.token;
+	inv.cmds[0].type = TEE_RPC_BUFFER | TEE_RPC_BUFFER_NONSECURE;
+	inv.cmds[0].size = shm->size_req;
+
+	r = tee_supp_cmd(tee, TEE_RPC_ICMD_INVOKE, &inv, sizeof(inv));
+	if (r)
+		pr_err("install_sys_ta failed with %d\n", r);
+	else
+		r = inv.res;
+
+exit:
+	tee_shm_free_from_rpc(shm);
+
+	return r;
+}
+
+int tee_ta_mgmt_init(void)
+{
+	mutex_init(&mutex_inst_ta);
+	init_completion(&event_inst_ta);
+	return 0;
+}
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_ta_mgmt.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_ta_mgmt.h
new file mode 100644
index 0000000..86d6cc4
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_ta_mgmt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEE_TA_MGMT_H
+#define TEE_TA_MGMT_H
+
+struct tee;
+struct tee_context;
+
+int tee_ta_mgmt_init(void);
+
+int tee_install_sp_ta(struct tee_context *ctx, void __user *ta_spta_inst_desc);
+int tee_install_sp_ta_response(struct tee_context *ctx, void __user *u_arg);
+int tee_delete_sp_ta(struct tee_context *ctx, void __user *uuid);
+
+int tee_install_sys_ta(struct tee *tee, void __user *u_arg);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_tui.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_tui.c
new file mode 100644
index 0000000..f48f89a
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_tui.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include "tee_tui.h"
+
+static int trustedui_mode = TRUSTEDUI_MODE_OFF;
+static int trustedui_blank_counter;
+
+static DEFINE_SPINLOCK(trustedui_lock);
+
+int trustedui_blank_inc(void)
+{
+	unsigned long flags;
+	int newvalue;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	newvalue = ++trustedui_blank_counter;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+
+	return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_inc);
+
+int trustedui_blank_dec(void)
+{
+	unsigned long flags;
+	int newvalue;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	newvalue = --trustedui_blank_counter;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+
+	return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_dec);
+
+int trustedui_blank_get_counter(void)
+{
+	unsigned long flags;
+	int newvalue;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	newvalue = trustedui_blank_counter;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+
+	return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_get_counter);
+
+void trustedui_blank_set_counter(int counter)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	trustedui_blank_counter = counter;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+}
+EXPORT_SYMBOL(trustedui_blank_set_counter);
+
+int trustedui_get_current_mode(void)
+{
+	unsigned long flags;
+	int mode;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	mode = trustedui_mode;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+
+	return mode;
+}
+EXPORT_SYMBOL(trustedui_get_current_mode);
+
+void trustedui_set_mode(int mode)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	trustedui_mode = mode;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+}
+EXPORT_SYMBOL(trustedui_set_mode);
+
+
+int trustedui_set_mask(int mask)
+{
+	unsigned long flags;
+	int mode;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	mode = trustedui_mode |= mask;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+
+	return mode;
+}
+EXPORT_SYMBOL(trustedui_set_mask);
+
+int trustedui_clear_mask(int mask)
+{
+	unsigned long flags;
+	int mode;
+
+	spin_lock_irqsave(&trustedui_lock, flags);
+	mode = trustedui_mode &= ~mask;
+	spin_unlock_irqrestore(&trustedui_lock, flags);
+
+	return mode;
+}
+EXPORT_SYMBOL(trustedui_clear_mask);
+
+
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_tui.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_tui.h
new file mode 100644
index 0000000..5acdbfa
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_tui.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef _TEE_TUI_H_
+#define _TEE_TUI_H_
+
+#define TRUSTEDUI_MODE_OFF				0x00
+#define TRUSTEDUI_MODE_ALL				0xff
+#define TRUSTEDUI_MODE_TUI_SESSION		0x01
+#define TRUSTEDUI_MODE_VIDEO_SECURED	0x02
+#define TRUSTEDUI_MODE_INPUT_SECURED	0x04
+
+int teec_wait_cmd(uint32_t *cmd_id);
+bool teec_notify_event(uint32_t event_type);
+
+int trustedui_blank_inc(void);
+int trustedui_blank_dec(void);
+int trustedui_blank_get_counter(void);
+void trustedui_blank_set_counter(int counter);
+
+int trustedui_get_current_mode(void);
+void trustedui_set_mode(int mode);
+int trustedui_set_mask(int mask);
+int trustedui_clear_mask(int mask);
+
+
+/**
+ * Notification ID's for communication Trustlet Connector -> Driver.
+ */
+#define NOT_TUI_NONE			0
+/* NWd system event that closes the current TUI session*/
+#define NOT_TUI_CANCEL_EVENT	1
+
+#endif /* _TEE_TUI_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_tui_hal.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_tui_hal.c
new file mode 100644
index 0000000..2aec579
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_tui_hal.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fb.h>
+
+#include "tee_tui.h"
+#include "tee_tui_hal.h"
+#include <linux/delay.h>
+
+/**
+ * hal_tui_init() - integrator specific initialization for kernel module
+ *
+ * This function is called when the kernel module is initialized, either at
+ * boot time, if the module is built statically in the kernel, or when the
+ * kernel is dynamically loaded if the module is built as a dynamic kernel
+ * module. This function may be used by the integrator, for instance, to get a
+ * memory pool that will be used to allocate the secure framebuffer and work
+ * buffer for TUI sessions.
+ *
+ * Return: must return 0 on success, or non-zero on error. If the function
+ * returns an error, the module initialization will fail.
+ */
+uint32_t __weak hal_tui_init(void)
+{
+	return 0;
+}
+
+/**
+ * hal_tui_exit() - integrator specific exit code for kernel module
+ *
+ * This function is called when the kernel module exit. It is called when the
+ * kernel module is unloaded, for a dynamic kernel module, and never called for
+ * a module built into the kernel. It can be used to free any resources
+ * allocated by hal_tui_init().
+ */
+void __weak hal_tui_exit(void)
+{
+}
+
+/**
+ * hal_tui_deactivate() - deactivate Normal World display and input
+ *
+ * This function should stop the Normal World display and, if necessary, Normal
+ * World input. It is called when a TUI session is opening, before the Secure
+ * World takes control of display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+uint32_t  __weak hal_tui_deactivate(void)
+{
+	/* Set linux TUI flag */
+	trustedui_set_mask(TRUSTEDUI_MODE_TUI_SESSION);
+
+	/*TODO: save touch/display state */
+	trustedui_set_mask(TRUSTEDUI_MODE_VIDEO_SECURED|
+			   TRUSTEDUI_MODE_INPUT_SECURED);
+
+	return 0;
+}
+
+/**
+ * hal_tui_activate() - restore Normal World display and input after a TUI
+ * session
+ *
+ * This function should enable Normal World display and, if necessary, Normal
+ * World input. It is called after a TUI session, after the Secure World has
+ * released the display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+uint32_t  __weak hal_tui_activate(void)
+{
+	/* Protect NWd */
+	trustedui_clear_mask(TRUSTEDUI_MODE_VIDEO_SECURED|
+		TRUSTEDUI_MODE_INPUT_SECURED);
+
+	/*TODO: restore touch/display state */
+	trustedui_set_mode(TRUSTEDUI_MODE_OFF);
+
+	return 0;
+}
+
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_tui_hal.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_tui_hal.h
new file mode 100644
index 0000000..0947dc3
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_tui_hal.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef _TUI_HAL_H_
+#define _TUI_HAL_H_
+
+#include <linux/types.h>
+
+uint32_t hal_tui_init(void);
+void hal_tui_exit(void);
+uint32_t hal_tui_deactivate(void);
+uint32_t hal_tui_activate(void);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_wait_queue.c b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_wait_queue.c
new file mode 100644
index 0000000..9ddef3b
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_wait_queue.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include "tee_wait_queue.h"
+
+struct tee_wait_queue {
+	struct list_head link;
+	struct completion comp;
+	u32 key;
+};
+
+void tee_wait_queue_init(struct tee_wait_queue_private *priv)
+{
+	mutex_init(&priv->mu);
+	INIT_LIST_HEAD(&priv->db);
+}
+EXPORT_SYMBOL(tee_wait_queue_init);
+
+void tee_wait_queue_exit(struct tee_wait_queue_private *priv)
+{
+	mutex_destroy(&priv->mu);
+}
+EXPORT_SYMBOL(tee_wait_queue_exit);
+
+static struct tee_wait_queue *tee_wait_queue_get(struct device *dev,
+				struct tee_wait_queue_private *priv, u32 key)
+{
+	struct tee_wait_queue *w;
+
+	mutex_lock(&priv->mu);
+
+	list_for_each_entry(w, &priv->db, link)
+		if (w->key == key)
+			goto out;
+
+	w = kmalloc(sizeof(struct tee_wait_queue), GFP_KERNEL);
+	if (!w)
+		goto out;
+
+	init_completion(&w->comp);
+	w->key = key;
+	list_add_tail(&w->link, &priv->db);
+out:
+	mutex_unlock(&priv->mu);
+	return w;
+}
+
+void tee_wait_queue_sleep(struct device *dev,
+			struct tee_wait_queue_private *priv, u32 key)
+{
+	struct tee_wait_queue *w = tee_wait_queue_get(dev, priv, key);
+
+	if (!w)
+		return;
+
+	wait_for_completion(&w->comp);
+	mutex_lock(&priv->mu);
+	list_del(&w->link);
+	mutex_unlock(&priv->mu);
+	kfree(w);
+}
+EXPORT_SYMBOL(tee_wait_queue_sleep);
+
+void tee_wait_queue_wakeup(struct device *dev,
+			struct tee_wait_queue_private *priv, u32 key)
+{
+	struct tee_wait_queue *w = tee_wait_queue_get(dev, priv, key);
+
+	if (!w)
+		return;
+
+	complete(&w->comp);
+}
+EXPORT_SYMBOL(tee_wait_queue_wakeup);
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_wait_queue.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_wait_queue.h
new file mode 100644
index 0000000..922baf2
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/core/tee_wait_queue.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEE_WAIT_QUEUE_H
+#define TEE_WAIT_QUEUE_H
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/device.h>
+
+struct tee_wait_queue_private {
+	struct mutex mu;
+	struct list_head db;
+};
+
+void tee_wait_queue_init(struct tee_wait_queue_private *priv);
+void tee_wait_queue_exit(struct tee_wait_queue_private *priv);
+void tee_wait_queue_sleep(struct device *dev,
+			struct tee_wait_queue_private *priv, u32 key);
+void tee_wait_queue_wakeup(struct device *dev,
+			struct tee_wait_queue_private *priv, u32 key);
+
+#endif /*TEE_WAIT_QUEUE_H*/
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/include/arm_common/teesmc.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/arm_common/teesmc.h
new file mode 100644
index 0000000..0c54326
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/arm_common/teesmc.h
@@ -0,0 +1,710 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEESMC_H
+#define TEESMC_H
+
+#ifndef ASM
+/*
+ * This section depends on uint64_t, uint32_t uint8_t already being
+ * defined. Since this file is used in several different environments
+ * (secure world OS and normal world Linux kernel to start with) where
+ * stdint.h may not be available it's the responsibility of the one
+ * including this file to provide those types.
+ */
+
+/*
+ * Trusted OS SMC interface.
+ *
+ * The SMC interface follows SMC Calling Convention
+ * (ARM_DEN0028A_SMC_Calling_Convention).
+ *
+ * The primary objective of this API is to provide a transport layer on
+ * which a Global Platform compliant TEE interfaces can be deployed. But the
+ * interface can also be used for other implementations.
+ *
+ * This file is divided in two parts.
+ * Part 1 deals with passing parameters to Trusted Applications running in
+ * a trusted OS in secure world.
+ * Part 2 deals with the lower level handling of the SMC.
+ */
+
+/*
+ *******************************************************************************
+ * Part 1 - passing parameters to Trusted Applications
+ *******************************************************************************
+ */
+
+/*
+ * Same values as TEE_PARAM_* from TEE Internal API
+ */
+#define TEESMC_ATTR_TYPE_NONE		0
+#define TEESMC_ATTR_TYPE_VALUE_INPUT	1
+#define TEESMC_ATTR_TYPE_VALUE_OUTPUT	2
+#define TEESMC_ATTR_TYPE_VALUE_INOUT	3
+#define TEESMC_ATTR_TYPE_MEMREF_INPUT	5
+#define TEESMC_ATTR_TYPE_MEMREF_OUTPUT	6
+#define TEESMC_ATTR_TYPE_MEMREF_INOUT	7
+
+#define TEESMC_ATTR_TYPE_MASK		0x7
+
+/*
+ * Meta parameter to be absorbed by the Secure OS and not passed
+ * to the Trusted Application.
+ *
+ * One example of this is a struct teesmc_meta_open_session which
+ * is added to TEESMC{32,64}_CMD_OPEN_SESSION.
+ */
+#define TEESMC_ATTR_META		0x8
+
+/*
+ * Used as an indication from normal world of compatible cache usage.
+ * 'I' stands for inner cache and 'O' for outer cache.
+ */
+#define TEESMC_ATTR_CACHE_I_NONCACHE	0x0
+#define TEESMC_ATTR_CACHE_I_WRITE_THR	0x1
+#define TEESMC_ATTR_CACHE_I_WRITE_BACK	0x2
+#define TEESMC_ATTR_CACHE_O_NONCACHE	0x0
+#define TEESMC_ATTR_CACHE_O_WRITE_THR	0x4
+#define TEESMC_ATTR_CACHE_O_WRITE_BACK	0x8
+
+#define TEESMC_ATTR_CACHE_NONCACHE	(TEESMC_ATTR_CACHE_I_NONCACHE | \
+					 TEESMC_ATTR_CACHE_O_NONCACHE)
+#define TEESMC_ATTR_CACHE_DEFAULT	(TEESMC_ATTR_CACHE_I_WRITE_BACK | \
+					 TEESMC_ATTR_CACHE_O_WRITE_BACK)
+
+#define TEESMC_ATTR_CACHE_SHIFT		4
+#define TEESMC_ATTR_CACHE_MASK		0xf
+
+#define TEESMC_CMD_OPEN_SESSION		0
+#define TEESMC_CMD_INVOKE_COMMAND	1
+#define TEESMC_CMD_CLOSE_SESSION	2
+#define TEESMC_CMD_CANCEL		3
+
+/**
+ * struct teesmc32_param_memref - memory reference
+ * @buf_ptr: Address of the buffer
+ * @size: Size of the buffer
+ *
+ * Secure and normal world communicates pointer via physical address instead of
+ * the virtual address with is usually used for pointers. This is because
+ * Secure and normal world has completely independent memory mapping. Normal
+ * world can even have a hypervisor which need to translate the guest
+ * physical address (AKA IPA in ARM lingo) to a real physical address
+ * before passing the structure to secure world.
+ */
+struct teesmc32_param_memref {
+	uint32_t buf_ptr;
+	uint32_t size;
+};
+
+/**
+ * struct teesmc32_param_memref - memory reference
+ * @buf_ptr: Address of the buffer
+ * @size: Size of the buffer
+ *
+ * See description of struct teesmc32_param_memref.
+ */
+struct teesmc64_param_memref {
+	uint64_t buf_ptr;
+	uint64_t size;
+};
+
+/**
+ * struct teesmc32_param_value - values
+ * @a: first value
+ * @b: second value
+ */
+struct teesmc32_param_value {
+	uint32_t a;
+	uint32_t b;
+};
+
+/**
+ * struct teesmc64_param_value - values
+ * @a: first value
+ * @b: second value
+ */
+struct teesmc64_param_value {
+	uint64_t a;
+	uint64_t b;
+};
+
+/**
+ * struct teesmc32_param - parameter
+ * @attr: attributes
+ * @memref: a memory reference
+ * @value: a value
+ *
+ * attr & TEESMC_ATTR_TYPE_MASK indicates if memref or value is used in the
+ * union. TEESMC_ATTR_TYPE_VALUE_* indicates value and
+ * TEESMC_ATTR_TYPE_MEMREF_* indicates memref. TEESMC_ATTR_TYPE_NONE
+ * indicates that none of the members are used.
+ */
+struct teesmc32_param {
+	uint32_t attr;
+	union {
+		struct teesmc32_param_memref memref;
+		struct teesmc32_param_value value;
+	} u;
+};
+
+/**
+ * struct teesmc64_param - parameter
+ * @attr: attributes
+ * @memref: a memory reference
+ * @value: a value
+ *
+ * See description of union teesmc32_param.
+ */
+struct teesmc64_param {
+	uint64_t attr;
+	union {
+		struct teesmc64_param_memref memref;
+		struct teesmc64_param_value value;
+	} u;
+};
+
+/**
+ * struct teesmc32_arg - SMC argument for Trusted OS
+ * @cmd: Command, one of TEESMC_CMD_*
+ * @ta_func: Trusted Application function, specific to the Trusted Application,
+ *	used if cmd == TEESMC_CMD_INVOKE_COMMAND
+ * @session: In parameter for all TEESMC_CMD_* except
+ *	TEESMC_CMD_OPEN_SESSION where it's an output parameter instead
+ * @ret: return value
+ * @ret_origin: origin of the return value
+ * @num_params: number of parameters supplied to the OS Command
+ * @params: the parameters supplied to the OS Command
+ *
+ * All normal SMC calls to Trusted OS uses this struct. If cmd requires
+ * further information than what these field holds it can be passed as a
+ * parameter tagged as meta (setting the TEESMC_ATTR_META bit in
+ * corresponding param_attrs). This is used for TEESMC_CMD_OPEN_SESSION
+ * to pass a struct teesmc32_meta_open_session which is needed find the
+ * Trusted Application and to indicate the credentials of the client.
+ */
+struct teesmc32_arg {
+	uint32_t cmd;
+	uint32_t ta_func;
+	uint32_t session;
+	uint32_t ret;
+	uint32_t ret_origin;
+	uint32_t num_params;
+	/*
+	 * Commented out elements used to visualize the layout dynamic part
+	 * of the struct. Note that these fields are not available at all
+	 * if num_params == 0.
+	 *
+	 * params is accessed through the macro TEESMC32_GET_PARAMS
+	 */
+
+	/* struct teesmc32_param params[num_params]; */
+};
+
+/**
+ * TEESMC32_GET_PARAMS - return pointer to union teesmc32_param *
+ *
+ * @x: Pointer to a struct teesmc32_arg
+ *
+ * Returns a pointer to the params[] inside a struct teesmc32_arg.
+ */
+#define TEESMC32_GET_PARAMS(x) \
+	(struct teesmc32_param *)(((struct teesmc32_arg *)(x)) + 1)
+
+/**
+ * TEESMC32_GET_ARG_SIZE - return size of struct teesmc32_arg
+ *
+ * @num_params: Number of parameters embedded in the struct teesmc32_arg
+ *
+ * Returns the size of the struct teesmc32_arg together with the number
+ * of embedded parameters.
+ */
+#define TEESMC32_GET_ARG_SIZE(num_params) \
+	(sizeof(struct teesmc32_arg) + \
+	 sizeof(struct teesmc32_param) * (num_params))
+
+/**
+ * struct teesmc64_arg - SMC argument for Trusted OS
+ * @cmd: OS Command, one of TEESMC_CMD_*
+ * @ta_func: Trusted Application function, specific to the Trusted Application
+ * @session: In parameter for all TEESMC_CMD_* but
+ *	TEESMC_CMD_OPEN_SESSION
+ * @ret: return value
+ * @ret_origin: origin of the return value
+ * @num_params: number of parameters supplied to the OS Command
+ * @params: the parameters supplied to the OS Command
+ *
+ * See description of struct teesmc32_arg.
+ */
+struct teesmc64_arg {
+	uint64_t cmd;
+	uint64_t ta_func;
+	uint64_t session;
+	uint64_t ret;
+	uint64_t ret_origin;
+	uint64_t num_params;
+	/*
+	 * Commented out elements used to visualize the layout dynamic part
+	 * of the struct. Note that these fields are not available at all
+	 * if num_params == 0.
+	 *
+	 * params is accessed through the macro TEESMC64_GET_PARAMS
+	 */
+
+	/* struct teesmc64_param params[num_params]; */
+};
+
+/**
+ * TEESMC64_GET_PARAMS - return pointer to union teesmc64_param *
+ *
+ * @x: Pointer to a struct teesmc64_arg
+ *
+ * Returns a pointer to the params[] inside a struct teesmc64_arg.
+ */
+#define TEESMC64_GET_PARAMS(x) \
+	(struct teesmc64_param *)(((struct teesmc64_arg *)(x)) + 1)
+
+/**
+ * TEESMC64_GET_ARG_SIZE - return size of struct teesmc64_arg
+ *
+ * @num_params: Number of parameters embedded in the struct teesmc64_arg
+ *
+ * Returns the size of the struct teesmc64_arg together with the number
+ * of embedded parameters.
+ */
+#define TEESMC64_GET_ARG_SIZE(num_params) \
+	(sizeof(struct teesmc64_arg) + \
+	 sizeof(union teesmc64_param) * (num_params))
+
+#define TEESMC_UUID_LEN	16
+
+/**
+ * struct teesmc_meta_open_session - additional parameters for
+ *				TEESMC32_CMD_OPEN_SESSION and
+ *				TEESMC64_CMD_OPEN_SESSION
+ * @uuid: UUID of the Trusted Application
+ * @clnt_uuid: UUID of client
+ * @clnt_login: Login class of client, TEE_LOGIN_* if being Global Platform
+ *		compliant
+ *
+ * This struct is passed in the first parameter as an input memref tagged
+ * as meta on an TEESMC{32,64}_CMD_OPEN_SESSION cmd. It's important
+ * that it really is the first parameter to make it easy for an eventual
+ * hypervisor to inspect and possibly update clnt_* values.
+ */
+struct teesmc_meta_open_session {
+	uint8_t uuid[TEESMC_UUID_LEN];
+	uint8_t clnt_uuid[TEESMC_UUID_LEN];
+	uint32_t clnt_login;
+};
+
+
+#endif /*!ASM*/
+
+/*
+ *******************************************************************************
+ * Part 2 - low level SMC interaction
+ *******************************************************************************
+ */
+
+#define TEESMC_32			0
+#define TEESMC_64			0x40000000
+#define TEESMC_FAST_CALL		0x80000000
+#define TEESMC_STD_CALL			0
+
+#define TEESMC_OWNER_MASK		0x3F
+#define TEESMC_OWNER_SHIFT		24
+
+#define TEESMC_FUNC_MASK		0xFFFF
+
+#define TEESMC_IS_FAST_CALL(smc_val)	((smc_val) & TEESMC_FAST_CALL)
+#define TEESMC_IS_64(smc_val)		((smc_val) & TEESMC_64)
+#define TEESMC_FUNC_NUM(smc_val)	((smc_val) & TEESMC_FUNC_MASK)
+#define TEESMC_OWNER_NUM(smc_val)	(((smc_val) >> TEESMC_OWNER_SHIFT) & \
+					 TEESMC_OWNER_MASK)
+
+#define TEESMC_CALL_VAL(type, calling_convention, owner, func_num) \
+			((type) | (calling_convention) | \
+			(((owner) & TEESMC_OWNER_MASK) << TEESMC_OWNER_SHIFT) |\
+			((func_num) & TEESMC_FUNC_MASK))
+
+#define TEESMC_OWNER_ARCH		0
+#define TEESMC_OWNER_CPU		1
+#define TEESMC_OWNER_SIP		2
+#define TEESMC_OWNER_OEM		3
+#define TEESMC_OWNER_STANDARD		4
+#define TEESMC_OWNER_TRUSTED_APP	48
+#define TEESMC_OWNER_TRUSTED_OS		50
+
+#define TEESMC_OWNER_TRUSTED_OS_API	63
+
+/*
+ * Function specified by SMC Calling convention.
+ */
+#define TEESMC32_FUNCID_CALLS_COUNT	0xFF00
+#define TEESMC32_CALLS_COUNT \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, \
+			TEESMC_OWNER_TRUSTED_OS_API, \
+			TEESMC32_FUNCID_CALLS_COUNT)
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Return one of the following UIDs if using API specified in this file
+ * without further extentions:
+ * 65cb6b93-af0c-4617-8ed6-644a8d1140f8 : Only 32 bit calls are supported
+ * 65cb6b93-af0c-4617-8ed6-644a8d1140f9 : Both 32 and 64 bit calls are supported
+ */
+#define TEESMC_UID_R0			0x65cb6b93
+#define TEESMC_UID_R1			0xaf0c4617
+#define TEESMC_UID_R2			0x8ed6644a
+#define TEESMC_UID32_R3			0x8d1140f8
+#define TEESMC_UID64_R3			0x8d1140f9
+#define TEESMC32_FUNCID_CALLS_UID	0xFF01
+#define TEESMC32_CALLS_UID \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, \
+			TEESMC_OWNER_TRUSTED_OS_API, \
+			TEESMC32_FUNCID_CALLS_UID)
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Returns 1.0 if using API specified in this file without further extentions.
+ */
+#define TEESMC_REVISION_MAJOR	1
+#define TEESMC_REVISION_MINOR	0
+#define TEESMC32_FUNCID_CALLS_REVISION	0xFF03
+#define TEESMC32_CALLS_REVISION \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, \
+			TEESMC_OWNER_TRUSTED_OS_API, \
+			TEESMC32_FUNCID_CALLS_REVISION)
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in r0-4/w0-4 in the same way as TEESMC32_CALLS_UID
+ * described above.
+ */
+#define TEESMC_FUNCID_GET_OS_UUID	0
+#define TEESMC32_CALL_GET_OS_UUID \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+			TEESMC_FUNCID_GET_OS_UUID)
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in r0-1/w0-1 in the same way as TEESMC32_CALLS_REVISION
+ * described above.
+ */
+#define TEESMC_FUNCID_GET_OS_REVISION	1
+#define TEESMC32_CALL_GET_OS_REVISION \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+			TEESMC_FUNCID_GET_OS_REVISION)
+
+
+
+/*
+ * Call with struct teesmc32_arg as argument
+ *
+ * Call register usage:
+ * r0/x0	SMC Function ID, TEESMC32_CALL_WITH_ARG
+ * r1/x1	Physical pointer to a struct teesmc32_arg
+ * r2-6/x2-6	Not used
+ * r7/x7	Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * r0/x0	Return value, TEESMC_RETURN_*
+ * r1-3/x1-3	Not used
+ * r4-7/x4-7	Preserved
+ *
+ * Ebusy return register usage:
+ * r0/x0	Return value, TEESMC_RETURN_ETHREAD_LIMIT
+ * r1-3/x1-3	Preserved
+ * r4-7/x4-7	Preserved
+ *
+ * RPC return register usage:
+ * r0/x0	Return value, TEESMC_RETURN_IS_RPC(val)
+ * r1-2/x1-2	RPC parameters
+ * r3-7/x3-7	Resume information, must be preserved
+ *
+ * Possible return values:
+ * TEESMC_RETURN_UNKNOWN_FUNCTION	Trusted OS does not recognize this
+ *					function.
+ * TEESMC_RETURN_OK			Call completed, result updated in
+ *					the previously supplied struct
+ *					teesmc32_arg.
+ * TEESMC_RETURN_ETHREAD_LIMIT		Trusted OS out of threads,
+ *					try again later.
+ * TEESMC_RETURN_EBADADDR		Bad physcial pointer to struct
+ *					teesmc32_arg.
+ * TEESMC_RETURN_EBADCMD		Bad/unknown cmd in struct teesmc32_arg
+ * TEESMC_RETURN_IS_RPC()		Call suspended by RPC call to normal
+ *					world.
+ */
+#define TEESMC_FUNCID_CALL_WITH_ARG	2
+#define TEESMC32_CALL_WITH_ARG \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_STD_CALL, TEESMC_OWNER_TRUSTED_OS, \
+	TEESMC_FUNCID_CALL_WITH_ARG)
+/* Same as TEESMC32_CALL_WITH_ARG but a "fast call". */
+#define TEESMC32_FASTCALL_WITH_ARG \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+	TEESMC_FUNCID_CALL_WITH_ARG)
+
+/*
+ * Call with struct teesmc64_arg as argument
+ *
+ * See description of TEESMC32_CALL_WITH_ARG above, uses struct
+ * teesmc64_arg in x1 instead.
+ */
+#define TEESMC64_CALL_WITH_ARG \
+	TEESMC_CALL_VAL(TEESMC_64, TEESMC_STD_CALL, TEESMC_OWNER_TRUSTED_OS, \
+	TEESMC_FUNCID_CALL_WITH_ARG)
+/* Same as TEESMC64_CALL_WITH_ARG but a "fast call". */
+#define TEESMC64_FASTCALL_WITH_ARG \
+	TEESMC_CALL_VAL(TEESMC_64, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+	TEESMC_FUNCID_CALL_WITH_ARG)
+
+/*
+ * Resume from RPC (for example after processing an IRQ)
+ *
+ * Call register usage:
+ * r0/x0	SMC Function ID,
+ *		TEESMC32_CALL_RETURN_FROM_RPC or
+ *		TEESMC32_FASTCALL_RETURN_FROM_RPC
+ * r1-3/x1-3	Value of r1-3/x1-3 when TEESMC32_CALL_WITH_ARG returned
+ *		TEESMC_RETURN_RPC in r0/x0
+ *
+ * Return register usage is the same as for TEESMC32_CALL_WITH_ARG above.
+ *
+ * Possible return values
+ * TEESMC_RETURN_UNKNOWN_FUNCTION	Trusted OS does not recognize this
+ *					function.
+ * TEESMC_RETURN_OK			Original call completed, result
+ *					updated in the previously supplied.
+ *					struct teesmc32_arg
+ * TEESMC_RETURN_RPC			Call suspended by RPC call to normal
+ *					world.
+ * TEESMC_RETURN_ETHREAD_LIMIT		Trusted OS out of threads,
+ *					try again later.
+ * TEESMC_RETURN_ERESUME		Resume failed, the opaque resume
+ *					information was corrupt.
+ */
+#define TEESMC_FUNCID_RETURN_FROM_RPC	3
+#define TEESMC32_CALL_RETURN_FROM_RPC \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_STD_CALL, TEESMC_OWNER_TRUSTED_OS, \
+			TEESMC_FUNCID_RETURN_FROM_RPC)
+/* Same as TEESMC32_CALL_RETURN_FROM_RPC but a "fast call". */
+#define TEESMC32_FASTCALL_RETURN_FROM_RPC \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+			TEESMC_FUNCID_RETURN_FROM_RPC)
+
+/*
+ * Resume from RPC (for example after processing an IRQ)
+ *
+ * See description of TEESMC32_CALL_RETURN_FROM_RPC above, used when
+ * it's a 64bit call that has returned.
+ */
+#define TEESMC64_CALL_RETURN_FROM_RPC \
+	TEESMC_CALL_VAL(TEESMC_64, TEESMC_STD_CALL, TEESMC_OWNER_TRUSTED_OS, \
+			TEESMC_FUNCID_RETURN_FROM_RPC)
+/* Same as TEESMC64_CALL_RETURN_FROM_RPC but a "fast call". */
+#define TEESMC64_FASTCALL_RETURN_FROM_RPC \
+	TEESMC_CALL_VAL(TEESMC_64, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+			TEESMC_FUNCID_RETURN_FROM_RPC)
+
+/*
+ * From secure monitor to Trusted OS, handle FIQ
+ *
+ * A virtual call which is injected by the Secure Monitor when an FIQ is
+ * raised while in normal world (SCR_NS is set). The monitor restores
+ * secure architecture registers and secure EL_SP1 and jumps to previous
+ * secure EL3_ELR. Trusted OS should preserve all general purpose
+ * registers.
+ *
+ * Call register usage:
+ * r0/x0	SMC Function ID, TEESMC32_CALL_HANDLE_FIQ
+ * r1-7/x1-7	Not used, but must be preserved
+ *
+ * Return register usage:
+ * Note used
+ */
+#define TEESMC_FUNCID_CALL_HANDLE_FIQ	0xf000
+#define TEESMC32_CALL_HANDLE_FIQ \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+			TEESMC_FUNCID_CALL_HANDLE_FIQ)
+
+#define TEESMC_RETURN_RPC_PREFIX_MASK	0xFFFF0000
+#define TEESMC_RETURN_RPC_PREFIX	0xFFFF0000
+#define TEESMC_RETURN_RPC_FUNC_MASK	0x0000FFFF
+
+#define TEESMC_RETURN_GET_RPC_FUNC(ret)	((ret) & TEESMC_RETURN_RPC_FUNC_MASK)
+
+#define TEESMC_RPC_VAL(func)		((func) | TEESMC_RETURN_RPC_PREFIX)
+
+/*
+ * Allocate argument memory for RPC parameter passing.
+ * Argument memory is used to hold a struct teesmc32_arg.
+ *
+ * "Call" register usage:
+ * r0/x0	This value, TEESMC_RETURN_RPC_ALLOC
+ * r1/x1	Size in bytes of required argument memory
+ * r2-7/x2-7	Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0	SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ *		AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ *		AArch64 SMC return
+ * r1/x1	Physical pointer to allocated argument memory, 0 if size
+ *		was 0 or if memory can't be allocated
+ * r2-7/x2-7	Preserved
+ */
+#define TEESMC_RPC_FUNC_ALLOC_ARG	0
+#define TEESMC_RETURN_RPC_ALLOC_ARG	\
+	TEESMC_RPC_VAL(TEESMC_RPC_FUNC_ALLOC_ARG)
+
+/*
+ * Allocate payload memory for RPC parameter passing.
+ * Payload memory is used to hold the memory referred to by struct
+ * teesmc32_param_memref.
+ *
+ * "Call" register usage:
+ * r0/x0	This value, TEESMC_RETURN_RPC_ALLOC
+ * r1/x1	Size in bytes of required payload memory
+ * r2-7/x2-7	Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0	SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ *		AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ *		AArch64 SMC return
+ * r1/x1	Physical pointer to allocated payload memory, 0 if size
+ *		was 0 or if memory can't be allocated
+ * r2-7/x2-7	Preserved
+ */
+#define TEESMC_RPC_FUNC_ALLOC_PAYLOAD	1
+#define TEESMC_RETURN_RPC_ALLOC_PAYLOAD	\
+	TEESMC_RPC_VAL(TEESMC_RPC_FUNC_ALLOC_PAYLOAD)
+
+/*
+ * Free memory previously allocated by TEESMC_RETURN_RPC_ALLOC_ARG.
+ *
+ * "Call" register usage:
+ * r0/x0	This value, TEESMC_RETURN_RPC_FREE
+ * r1/x1	Physical pointer to previously allocated argument memory
+ * r2-7/x2-7	Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0	SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ *		AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ *		AArch64 SMC return
+ * r1/x1	Not used
+ * r2-7/x2-7	Preserved
+ */
+#define TEESMC_RPC_FUNC_FREE_ARG	2
+#define TEESMC_RETURN_RPC_FREE_ARG	TEESMC_RPC_VAL(TEESMC_RPC_FUNC_FREE_ARG)
+
+/*
+ * Free memory previously allocated by TEESMC_RETURN_RPC_ALLOC_PAYLOAD.
+ *
+ * "Call" register usage:
+ * r0/x0	This value, TEESMC_RETURN_RPC_FREE
+ * r1/x1	Physical pointer to previously allocated payload memory
+ * r3-7/x3-7	Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0	SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ *		AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ *		AArch64 SMC return
+ * r1-2/x1-2	Not used
+ * r3-7/x3-7	Preserved
+ */
+#define TEESMC_RPC_FUNC_FREE_PAYLOAD	3
+#define TEESMC_RETURN_RPC_FREE_PAYLOAD	\
+	TEESMC_RPC_VAL(TEESMC_RPC_FUNC_FREE_PAYLOAD)
+
+/*
+ * Deliver an IRQ in normal world.
+ *
+ * "Call" register usage:
+ * r0/x0	TEESMC_RETURN_RPC_IRQ
+ * r1-7/x1-7	Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0	SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ *		AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ *		AArch64 SMC return
+ * r1-7/x1-7	Preserved
+ */
+#define TEESMC_RPC_FUNC_IRQ		4
+#define TEESMC_RETURN_RPC_IRQ		TEESMC_RPC_VAL(TEESMC_RPC_FUNC_IRQ)
+
+/*
+ * Do an RPC request. The supplied struct teesmc{32,64}_arg tells which
+ * request to do and the parameters for the request. The following fields
+ * are used (the rest are unused):
+ * - cmd		the Request ID
+ * - ret		return value of the request, filled in by normal world
+ * - num_params		number of parameters for the request
+ * - params		the parameters
+ * - param_attrs	attributes of the parameters
+ *
+ * "Call" register usage:
+ * r0/x0	TEESMC_RETURN_RPC_CMD
+ * r1/x1	Physical pointer to a struct teesmc32_arg if returning from
+ *		a AArch32 SMC or a struct teesmc64_arg if returning from a
+ *		AArch64 SMC, must be preserved, only the data should
+ *		be updated
+ * r2-7/x2-7	Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0	SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ *		AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ *		AArch64 SMC return
+ * r1-7/x1-7	Preserved
+ */
+#define TEESMC_RPC_FUNC_CMD		5
+#define TEESMC_RETURN_RPC_CMD	TEESMC_RPC_VAL(TEESMC_RPC_FUNC_CMD)
+
+#define T6SMC_RPC_NONPREEMPT_CMD	6
+#define T6SMC_RPC_NONPREEMPT_GET_FUNC(ret) \
+	((TEESMC_RETURN_GET_RPC_FUNC(ret) >> 8) & 0xff)
+
+#define T6SMC_RETURN_RPC_NONPREEMPT_CMD(cmd) \
+	TEESMC_RPC_VAL(T6SMC_RPC_NONPREEMPT_CMD | (((cmd) & 0xff) << 8))
+
+#define T6SMC_RPC_CLKMGR_CMD		0
+#define T6SMC_RETURN_RPC_CLKMGR_CMD \
+	T6SMC_RETURN_RPC_NONPREEMPT_CMD(T6SMC_RPC_CLKMGR_CMD)
+
+/* not used. for compatible with old tos version */
+#define T6SMC_RPC_CLKMGR_LEGACY_CMD	7
+#define T6SMC_RETURN_RPC_CLKMGR_LEGACY_CMD \
+	TEESMC_RPC_VAL(T6SMC_RPC_CLKMGR_LEGACY_CMD)
+
+/* Returned in r0 */
+#define TEESMC_RETURN_UNKNOWN_FUNCTION	0xFFFFFFFF
+
+/* Returned in r0 only from Trusted OS functions */
+#define TEESMC_RETURN_OK		0x0
+#define TEESMC_RETURN_ETHREAD_LIMIT	0x1
+#define TEESMC_RETURN_ERESUME		0x2
+#define TEESMC_RETURN_EBADADDR		0x3
+#define TEESMC_RETURN_EBADCMD		0x4
+#define TEESMC_RETURN_IS_RPC(ret) \
+	(((ret) & TEESMC_RETURN_RPC_PREFIX_MASK) == TEESMC_RETURN_RPC_PREFIX)
+
+/*
+ * Returned in r1 by Trusted OS functions if r0 = TEESMC_RETURN_RPC
+ */
+#define TEESMC_RPC_REQUEST_IRQ		0x0
+
+#endif /* TEESMC_H */
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/include/arm_common/teesmc_st.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/arm_common/teesmc_st.h
new file mode 100644
index 0000000..73fd356
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/arm_common/teesmc_st.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEESMC_ST_H
+#define TEESMC_ST_H
+
+#define TEESMC_ST_RETURN_NOTAVAIL	0x5700
+
+/*
+ * Get Shared Memory Config
+ *
+ * Returns the Secure/Non-secure shared memory config.
+ *
+ * Call register usage:
+ * r0	SMC Function ID, TEESMC32_ST_FASTCALL_GET_SHM_CONFIG
+ * r1-6	Not used
+ * r7	Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * r0	TEESMC_RETURN_OK
+ * r1	Physical address of start of SHM
+ * r2	Size of of SHM
+ * r3	1 if SHM is cached, 0 if uncached.
+ * r4-7	Preserved
+ *
+ * Not available register usage:
+ * r0	TEESMC_ST_RETURN_NOTAVAIL
+ * r1-3 Not used
+ * r4-7	Preserved
+ */
+#define TEESMC_ST_FUNCID_GET_SHM_CONFIG	0x5700
+#define TEESMC32_ST_FASTCALL_GET_SHM_CONFIG \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+			TEESMC_ST_FUNCID_GET_SHM_CONFIG)
+
+/*
+ * Get Log Memory Config
+ *
+ * Returns the Secure/Non-secure shared memory config.
+ *
+ * Call register usage:
+ * r0	SMC Function ID, TEESMC32_ST_FASTCALL_GET_LOGM_CONFIG
+ * r1-6	Not used
+ * r7	Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * r0	TEESMC_RETURN_OK
+ * r1	Physical address of start of LOGM
+ * r2	Size of of SHM
+ * r3	1 if SHM is cached, 0 if uncached.
+ * r4-7	Preserved
+ *
+ * Not available register usage:
+ * r0	TEESMC_ST_RETURN_NOTAVAIL
+ * r1-3 Not used
+ * r4-7	Preserved
+ */
+
+#define TEESMC_ST_FUNCID_GET_LOGM_CONFIG	0x5702
+#define TEESMC32_ST_FASTCALL_GET_LOGM_CONFIG \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+			TEESMC_ST_FUNCID_GET_LOGM_CONFIG)
+
+#define TKCORE_FUNCID_TRACE_CONFIG		0x5703
+#define TKCORE_FASTCALL_TRACE_CONFIG \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+			TKCORE_FUNCID_TRACE_CONFIG)
+
+/*
+ * Configures TZ/NS shared mutex for outer cache maintenance
+ *
+ * Disables, enables usage of outercache mutex.
+ * Returns or sets physical address of outercache mutex.
+ *
+ * Call register usage:
+ * r0	SMC Function ID, TEESMC32_ST_FASTCALL_L2CC_MUTEX
+ * r1	TEESMC_ST_L2CC_MUTEX_GET_ADDR	Get physical address of mutex
+ *	TEESMC_ST_L2CC_MUTEX_SET_ADDR	Set physical address of mutex
+ *	TEESMC_ST_L2CC_MUTEX_ENABLE	Enable usage of mutex
+ *	TEESMC_ST_L2CC_MUTEX_DISABLE	Disable usage of mutex
+ * r2	if r1 == TEESMC_ST_L2CC_MUTEX_SET_ADDR, physical address of mutex
+ * r3-6	Not used
+ * r7	Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * r0	TEESMC_RETURN_OK
+ * r1	Preserved
+ * r2	if r1 == 0, physical address of L2CC mutex
+ * r3-7	Preserved
+ *
+ * Error return register usage:
+ * r0	TEESMC_ST_RETURN_NOTAVAIL	Physical address not available
+ *	TEESMC_RETURN_EBADADDR		Bad supplied physical address
+ *	TEESMC_RETURN_EBADCMD		Unsupported value in r1
+ * r1-7	Preserved
+ */
+#define TEESMC_ST_L2CC_MUTEX_GET_ADDR	0
+#define TEESMC_ST_L2CC_MUTEX_SET_ADDR	1
+#define TEESMC_ST_L2CC_MUTEX_ENABLE	2
+#define TEESMC_ST_L2CC_MUTEX_DISABLE	3
+#define TEESMC_ST_FUNCID_L2CC_MUTEX	0x5701
+#define TEESMC32_ST_FASTCALL_L2CC_MUTEX \
+	TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+			TEESMC_ST_FUNCID_L2CC_MUTEX)
+
+/*
+ * Allocate payload memory for RPC parameter passing.
+ *
+ * "Call" register usage:
+ * r0/x0	This value, TEESMC_RETURN_ST_RPC_ALLOC_PAYLOAD
+ * r1/x1	Size in bytes of required payload memory
+ * r2/x2	Not used
+ * r3-7/x3-7	Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0	SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ *		AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ *		AArch64 SMC return
+ * r1/x1	Physical pointer to allocated payload memory, 0 if size
+ *		was 0 or if memory can't be allocated
+ * r2/x2	Shared memory cookie used when freeing the memory
+ * r3-7/x3-7	Preserved
+ */
+#define TEESMC_ST_RPC_FUNC_ALLOC_PAYLOAD	0x5700
+#define TEESMC_RETURN_ST_RPC_ALLOC_PAYLOAD	\
+		TEESMC_RPC_VAL(TEESMC_ST_RPC_FUNC_ALLOC_PAYLOAD)
+
+
+/*
+ * Free memory previously allocated by TEESMC_RETURN_ST_RPC_ALLOC_PAYLOAD
+ *
+ * "Call" register usage:
+ * r0/x0	This value, TEESMC_RETURN_ST_RPC_FREE_PAYLOAD
+ * r1/x1	Shared memory cookie belonging to this payload memory
+ * r2-7/x2-7	Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0	SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ *		AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ *		AArch64 SMC return
+ * r2-7/x2-7	Preserved
+ */
+#define TEESMC_ST_RPC_FUNC_FREE_PAYLOAD		0x5701
+#define TEESMC_RETURN_ST_RPC_FREE_PAYLOAD	\
+		TEESMC_RPC_VAL(TEESMC_ST_RPC_FUNC_FREE_PAYLOAD)
+
+#endif /*TEESMC_ST_H*/
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_client_api.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_client_api.h
new file mode 100644
index 0000000..70ada52
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_client_api.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEE_CLIENT_API_H
+#define TEE_CLIENT_API_H
+
+#define TEEC_CONFIG_PAYLOAD_REF_COUNT 4
+
+#define TEEC_CONFIG_SHAREDMEM_MAX_SIZE 0x8000
+
+#define TEEC_NONE					0x00000000
+#define TEEC_VALUE_INPUT			0x00000001
+#define TEEC_VALUE_OUTPUT			0x00000002
+#define TEEC_VALUE_INOUT			0x00000003
+#define TEEC_MEMREF_TEMP_INPUT		0x00000005
+#define TEEC_MEMREF_TEMP_OUTPUT		0x00000006
+#define TEEC_MEMREF_TEMP_INOUT		0x00000007
+#define TEEC_MEMREF_WHOLE			0x0000000C
+#define TEEC_MEMREF_PARTIAL_INPUT	0x0000000D
+#define TEEC_MEMREF_PARTIAL_OUTPUT	0x0000000E
+#define TEEC_MEMREF_PARTIAL_INOUT	0x0000000F
+
+#define TEEC_MEMREF_PERMANENT		0x00000008
+
+#define TEEC_MEM_INPUT	0x00000001
+#define TEEC_MEM_OUTPUT	0x00000002
+#define TEEC_MEM_DMABUF	0x00010000
+#define TEEC_MEM_KAPI	0x00020000
+
+#define TEEC_MEM_NONSECURE	0x00040000
+
+#define TEEC_SUCCESS				0x00000000
+#define TEEC_ERROR_GENERIC			0xFFFF0000
+#define TEEC_ERROR_ACCESS_DENIED	0xFFFF0001
+#define TEEC_ERROR_CANCEL			0xFFFF0002
+#define TEEC_ERROR_ACCESS_CONFLICT	0xFFFF0003
+#define TEEC_ERROR_EXCESS_DATA		0xFFFF0004
+#define TEEC_ERROR_BAD_FORMAT		0xFFFF0005
+#define TEEC_ERROR_BAD_PARAMETERS	0xFFFF0006
+#define TEEC_ERROR_BAD_STATE		0xFFFF0007
+#define TEEC_ERROR_ITEM_NOT_FOUND	0xFFFF0008
+#define TEEC_ERROR_NOT_IMPLEMENTED	0xFFFF0009
+#define TEEC_ERROR_NOT_SUPPORTED	0xFFFF000A
+#define TEEC_ERROR_NO_DATA			0xFFFF000B
+#define TEEC_ERROR_OUT_OF_MEMORY	0xFFFF000C
+#define TEEC_ERROR_BUSY				0xFFFF000D
+#define TEEC_ERROR_COMMUNICATION	0xFFFF000E
+#define TEEC_ERROR_SECURITY			0xFFFF000F
+#define TEEC_ERROR_SHORT_BUFFER		0xFFFF0010
+#define TEEC_ERROR_TARGET_DEAD		0xFFFF3024
+
+#define TEEC_ORIGIN_API			0x00000001
+#define TEEC_ORIGIN_COMMS		0x00000002
+#define TEEC_ORIGIN_TEE			0x00000003
+#define TEEC_ORIGIN_TRUSTED_APP	0x00000004
+
+#define TEEC_LOGIN_PUBLIC		0x00000000
+#define TEEC_LOGIN_USER			0x00000001
+#define TEEC_LOGIN_GROUP		0x00000002
+#define TEEC_LOGIN_APPLICATION	0x00000004
+
+#define TEEC_PARAM_TYPES(p0, p1, p2, p3) \
+	((p0) | ((p1) << 4) | ((p2) << 8) | ((p3) << 12))
+
+#define TEEC_PARAM_TYPE_GET(p, i)\
+	(((p) >> (i * 4)) & 0xF)
+
+typedef uint32_t TEEC_Result;
+
+struct TEEC_Context {
+	char devname[256];
+	union {
+		struct tee_context *ctx;
+		int fd;
+	};
+};
+
+struct TEEC_UUID {
+	uint32_t timeLow;
+	uint16_t timeMid;
+	uint16_t timeHiAndVersion;
+	uint8_t clockSeqAndNode[8];
+};
+
+struct TEEC_SharedMemory {
+	union {
+		void *buffer;
+		uint64_t padding_ptr;
+	};
+	union {
+		size_t size;
+		uint64_t padding_sz;
+	};
+	uint32_t flags;
+
+	uint32_t reserved;
+	union {
+		int fd;
+		void *ptr;
+		uint64_t padding_d;
+	} d;
+	uint64_t registered;
+};
+
+struct TEEC_TempMemoryReference {
+	union {
+		void *buffer;
+		uint64_t padding_ptr;
+	};
+	union {
+		size_t size;
+		uint64_t padding_sz;
+	};
+};
+
+struct TEEC_RegisteredMemoryReference {
+	union {
+		struct TEEC_SharedMemory *parent;
+		uint64_t padding_ptr;
+	};
+	union {
+		size_t size;
+		uint64_t padding_sz;
+	};
+	union {
+		size_t offset;
+		uint64_t padding_off;
+	};
+};
+
+struct TEEC_Value {
+	uint32_t a;
+	uint32_t b;
+};
+
+union TEEC_Parameter {
+	struct TEEC_TempMemoryReference tmpref;
+	struct TEEC_RegisteredMemoryReference memref;
+	struct TEEC_Value value;
+};
+
+struct TEEC_Session {
+	int fd;
+};
+
+struct TEEC_Operation {
+	uint32_t started;
+	uint32_t paramTypes;
+	union TEEC_Parameter params[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+	/* Implementation-Defined */
+	union {
+		struct TEEC_Session *session;
+		uint64_t padding_ptr;
+	};
+	struct TEEC_SharedMemory memRefs[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+	uint64_t flags;
+};
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_clkmgr.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_clkmgr.h
new file mode 100644
index 0000000..e4be3ac
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_clkmgr.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEE_CLKMGR_H
+#define TEE_CLKMGR_H
+
+/* TKCORE clk manager framework assumes that
+ * the clock enable/disable operations are
+ * symmetric, that is, a clock enabling operation
+ * requires the same amount of arguments as
+ * the clock disabling operation.
+ *
+ * TKCORE Clk mgr supports at the most 3 arguments
+ * for a clk operation
+ */
+
+int tee_clkmgr_register(const char *clkname, int master_id,
+			void *enable_fn, void *disable_fn,
+			void *p1, void *p2, void *p3, size_t argnum);
+
+#define tee_clkmgr_register0(clkname, id, e, d) \
+	tee_clkmgr_register((clkname), (id), \
+		(void *) (e), (void *) (d), \
+		NULL, NULL, NULL, 0)
+
+#define tee_clkmgr_register1(clkname, id, e, d, p0) \
+	tee_clkmgr_register((clkname), (id), \
+		(void *) (e), (void *) (d), \
+		(void *) (p0), NULL, NULL, 1)
+
+#define tee_clkmgr_register2(clkname, id, e, d, p0, p1)\
+	tee_clkmgr_register((clkname), (id), \
+		(void *) (e), (void *) (d), \
+		(void *) (p0), (void *) (p1), NULL, 2)
+
+#define tee_clkmgr_register3(clkname, id, e, d, p0, p1, p2)\
+	tee_clkmgr_register((clkname), (id), \
+		(void *) (e), (void *) (d), \
+		(void *) (p0), (void *) (p1), (void *) (p2), 3)
+
+#define TEE_CLKMGR_TOKEN_NOT_LEGACY	(0x1)
+
+#define TEE_CLKMGR_TOKEN_ID_SHIFT	(1)
+#define TEE_CLKMGR_TOKEN_TYPE_MASK	(0xffffu)
+#define TEE_CLKMGR_TOKEN_TYPE_SHIFT	(16)
+#define TEE_CLKMGR_TOKEN(type, id)	\
+	(((type) << TEE_CLKMGR_TOKEN_TYPE_SHIFT) | \
+	((id) << TEE_CLKMGR_TOKEN_ID_SHIFT) | \
+	TEE_CLKMGR_TOKEN_NOT_LEGACY)
+
+#define TEE_CLKMGR_OP_ENABLE		(0x1)
+
+int tee_clkmgr_handle(uint32_t token, uint32_t op);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_core.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_core.h
new file mode 100644
index 0000000..3969e62
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_core.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef __TEE_CORE_DRV_H__
+#define __TEE_CORE_DRV_H__
+
+#include <linux/klist.h>
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/cdev.h>
+#include <linux/miscdevice.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/scatterlist.h>
+
+#include <linux/types.h>
+#include <linux/tee_client_api.h>
+
+#include <linux/err.h>
+#include <linux/sched.h>
+
+struct tee_cmd_io;
+struct tee_shm_io;
+struct tee_rpc;
+
+enum tee_state {
+	TEE_OFFLINE = 0,
+	TEE_ONLINE = 1,
+	TEE_SUSPENDED = 2,
+	TEE_RUNNING = 3,
+	TEE_CRASHED = 4,
+	TEE_LAST = 5,
+};
+
+#define TEE_CONF_TEST_MODE		0x01000000
+#define TEE_CONF_FW_NOT_CAPABLE		0x00000001
+
+struct tee_stats_entry {
+	int count;
+	int max;
+};
+
+#define TEE_STATS_CONTEXT_IDX	0
+#define TEE_STATS_SESSION_IDX	1
+#define TEE_STATS_SHM_IDX		2
+
+#define TEE_MAX_TEE_DEV_NAME (64)
+struct tee {
+	struct klist_node node;
+	char name[TEE_MAX_TEE_DEV_NAME];
+	int id;
+	void *priv;
+	const struct tee_ops *ops;
+	struct device *dev;
+	struct miscdevice miscdev;
+	struct tee_rpc *rpc;
+
+	atomic_t refcount;
+	int max_refcount;
+	struct tee_stats_entry stats[3];
+	struct list_head list_ctx;
+	struct list_head list_rpc_shm;
+	struct mutex lock;
+	unsigned int state;
+	uint32_t shm_flags;	/* supported flags for shm allocation */
+	uint32_t conf;
+	uint32_t test;
+};
+
+#define _DEV(tee) (tee->miscdev.this_device)
+
+#define TEE_MAX_CLIENT_NAME (128)
+
+/**
+ * struct tee_context - internal structure to store a TEE context.
+ *
+ * @tee: tee attached to the tee_context
+ * @usr_client: flag to known if the client is user side client
+ * @entry: list of tee_context
+ * @list_sess: list of tee_session that denotes all tee_session attached
+ * @list_shm: list of tee_shm that denotes all tee_shm attached
+ * @refcount: number of objects which reference it (including itself)
+ */
+struct tee_context {
+	struct tee *tee;
+	char name[TEE_MAX_CLIENT_NAME];
+	int tgid;
+	int usr_client;
+	struct list_head entry;
+	struct list_head list_sess;
+	struct list_head list_shm;
+	struct kref refcount;
+};
+
+/**
+ * struct tee_session - internal structure to store a TEE session.
+ *
+ * @entry: list of tee_context
+ * @ctx: tee_context attached to the tee_session
+ * @sessid: session ID returned by the secure world
+ * @priv: exporter specific private data for this buffer object
+ */
+struct tee_session {
+	struct list_head entry;
+	struct tee_context *ctx;
+	uint32_t sessid;
+	void *priv;
+};
+
+struct tee_shm_dma_buf {
+	struct dma_buf_attachment *attach;
+	struct sg_table *sgt;
+	bool tee_allocated;
+};
+
+struct tee_shm_resv {
+	void *kaddr;
+	dma_addr_t paddr;
+	struct sg_table sgt;
+	struct tee_shm_dma_buf *sdb;
+};
+
+struct tee_shm_ns {
+	uint32_t token;
+	struct page **pages;
+	size_t nr_pages;
+	atomic_t ref;
+};
+
+/**
+ * struct tee_shm - internal structure to store a shm object.
+ *
+ * @ctx: tee_context attached to the buffer.
+ * @tee: tee attached to the buffer.
+ * @dev: device attached to the buffer.
+ * @size_req: requested size for the buffer
+ * @size_alloc: effective size of the buffer
+ * @kaddr: kernel address if mapped kernel side
+ * @paddr: physical address
+ * @flags: flags which denote the type of the buffer
+ * @entry: list of tee_shm
+ */
+struct tee_shm {
+	struct list_head entry;
+
+	struct tee_context *ctx;
+	struct tee *tee;
+	struct device *dev;
+
+	size_t size_req;
+	size_t size_alloc;
+
+	uint32_t flags;
+
+	union {
+		struct tee_shm_resv resv;
+		struct tee_shm_ns ns;
+	};
+};
+
+#define TEE_SHM_MAPPED			0x01000000
+#define TEE_SHM_TEMP			0x02000000
+#define TEE_SHM_FROM_RPC		0x04000000
+#define TEE_SHM_REGISTERED		0x08000000
+#define TEE_SHM_MEMREF			0x10000000
+#define TEE_SHM_CACHED			0x20000000
+
+#define TEE_SHM_DRV_PRIV_MASK		0xFF000000
+
+struct tee_data {
+	uint32_t type;
+	uint32_t type_original;
+	struct TEEC_SharedMemory c_shm[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+	union {
+		struct tee_shm *shm;
+		struct TEEC_Value value;
+	} params[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+};
+
+struct tee_cmd {
+	TEEC_Result err;
+	uint32_t origin;
+	uint32_t cmd;
+	struct tee_shm *uuid;
+	struct tee_shm *ta;
+	struct tee_data param;
+};
+
+void *tee_map_cached_shm(unsigned long pa, size_t len);
+void tee_unmap_cached_shm(void *va);
+
+struct tee_shm *tee_shm_alloc_from_rpc(struct tee *tee,
+	size_t size, uint32_t extra_flags);
+void tee_shm_free_from_rpc(struct tee_shm *shm);
+
+int tee_core_add(struct tee *tee);
+int tee_core_del(struct tee *tee);
+
+int __tee_get(struct tee *tee);
+
+struct tee *tee_core_alloc(struct device *dev, char *name, int id,
+			const struct tee_ops *ops, size_t len);
+int tee_core_free(struct tee *tee);
+
+#include <linux/tee_kernel_lowlevel_api.h>
+
+struct tee_ops {
+	struct module *owner;
+	const char *type;
+
+	int (*start)(struct tee *tee);
+	int (*stop)(struct tee *tee);
+	int (*open)(struct tee_session *sess, struct tee_cmd *cmd);
+	int (*close)(struct tee_session *sess);
+	int (*invoke)(struct tee_session *sess, struct tee_cmd *cmd);
+	int (*cancel)(struct tee_session *sess, struct tee_cmd *cmd);
+	struct tee_shm *(*alloc)(struct tee *tee, size_t size,
+				  uint32_t flags);
+	void (*free)(struct tee_shm *shm);
+	int (*shm_inc_ref)(struct tee_shm *shm);
+
+	void (*call_tee)(struct smc_param *p);
+	void (*raw_call_tee)(struct smc_param *p);
+};
+
+
+#endif /* __TEE_CORE_DRV_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_fp.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_fp.h
new file mode 100644
index 0000000..3522d56
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_fp.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEE_FP_H
+#define TEE_FP_H
+
+int tee_spi_cfg_padsel(uint32_t padsel);
+
+int tee_spi_transfer(void *conf, uint32_t conf_size, void *inbuf, void *outbuf,
+		     uint32_t size);
+
+int tee_spi_transfer_disable(void);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_ioc.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_ioc.h
new file mode 100644
index 0000000..b3cd047f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_ioc.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef _TEE_IOC_H
+#define _TEE_IOC_H
+
+#include <linux/tee_client_api.h>
+
+#ifndef __KERNEL__
+#define __user
+#endif
+
+/**
+ * struct tee_cmd_io - The command sent to an open tee device.
+ * @err: Error code (as in Global Platform TEE Client API spec)
+ * @origin: Origin for the error code (also from spec).
+ * @cmd: The command to be executed in the trusted application.
+ * @uuid: The uuid for the trusted application.
+ * @data: The trusted application or memory block.
+ * @data_size: The size of the trusted application or memory block.
+ * @op: The cmd payload operation for the trusted application.
+ *
+ * This structure is mainly used in the Linux kernel for communication
+ * with the user space.
+ */
+struct tee_cmd_io {
+	TEEC_Result err;
+	uint32_t origin;
+	uint32_t cmd;
+	int fd_sess;
+	/*
+	 * Here fd_sess is 32-bit variable. Since TEEC_Result also is defined as
+	 * "uint32_t", this structure is aligned.
+	 */
+	union {
+		struct TEEC_UUID __user *uuid;
+		uint64_t padding_uuid;
+	};
+	union {
+		void __user *data;
+		uint64_t padding_data;
+	};
+	union {
+		struct TEEC_Operation __user *op;
+		uint64_t padding_op;
+	};
+	uint32_t data_size;
+	/* use this field to represent
+	 * whether there is a difference
+	 * in abi
+	 */
+	int32_t reserved;
+};
+
+struct tee_shm_io {
+	union {
+		void __user *buffer;
+		void *paddr;
+		uint64_t padding_buf;
+	};
+	uint32_t size;
+	uint32_t flags;
+	/*
+	 * Here fd_shm is 32-bit. To be compliant with the convention of file
+	 * descriptor definition, fd_shm is defined as "int" type other
+	 * than "int32_t". Even though using "int32_t" is more obvious to
+	 * indicate that we intend to keep this structure aligned.
+	 */
+	int fd_shm;
+	uint32_t registered;
+};
+
+struct tee_ta_inst_desc {
+	union {
+		struct TEEC_UUID __user *uuid;
+		uint64_t pad1;
+	};
+	union {
+		void __user *ta_buf;
+		uint64_t pad2;
+	};
+	uint32_t ta_buf_size;
+	union {
+		uint32_t *resp_size;
+		uint64_t pad3;
+	};
+};
+
+struct tee_spta_inst_desc {
+	union {
+		void *ta_binary;
+		uint64_t padding_buf;
+	};
+	uint32_t ta_size;
+	union {
+		uint32_t *response_len;
+		uint64_t paddind_response_len;
+	};
+};
+
+#define TEE_OPEN_SESSION_IOC \
+	_IOWR('t', 161, struct tee_cmd_io)
+#define TEE_INVOKE_COMMAND_IOC \
+	_IOWR('t', 163, struct tee_cmd_io)
+#define TEE_REQUEST_CANCELLATION_IOC \
+	_IOWR('t', 164, struct tee_cmd_io)
+
+#define TEE_ALLOC_SHM_IOC \
+	_IOWR('t', 165, struct tee_shm_io)
+#define TEE_ALLOC_SHM_PERM_IOC \
+	_IOWR('t', 166, struct tee_shm_io)
+#define TEE_GET_FD_FOR_RPC_SHM_IOC \
+	_IOWR('t', 167, struct tee_shm_io)
+
+#define TEE_TUI_OPEN_SESSION_IOC \
+	_IOWR('t', 168, struct tee_cmd_io)
+#define TEE_TUI_CLOSE_SESSION_IOC \
+	_IOWR('t', 169, struct tee_cmd_io)
+#define TEE_TUI_NOTIFY_IOC \
+	_IOWR('t', 170, uint32_t)
+#define TEE_TUI_WAITCMD_IOC \
+	_IOWR('t', 171, uint32_t)
+
+#define TEE_INSTALL_TA_IOC \
+	_IOWR('t', 174, struct tee_spta_inst_desc)
+#define TEE_INSTALL_TA_RESP_IOC \
+	_IO('t', 175)
+#define TEE_DELETE_TA_IOC \
+	_IOWR('t', 176, struct TEEC_UUID)
+
+#define TEE_QUERY_DRV_FEATURE_IOC \
+	_IOR('t', 183, uint32_t)
+
+/* Command IDs */
+#define TEEC_TUI_CMD_NONE				0
+#define TEEC_TUI_CMD_START_ACTIVITY		1
+#define TEEC_TUI_CMD_STOP_ACTIVITY		2
+
+#endif /* _TEE_IOC_H */
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_kernel_api.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_kernel_api.h
new file mode 100644
index 0000000..be85672
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_kernel_api.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef _TEE_KERNEL_API_H
+#define _TEE_KERNEL_API_H
+
+#include <linux/tee_client_api.h>
+
+TEEC_Result TEEC_InitializeContext(const char *name,
+	struct TEEC_Context *context);
+
+void TEEC_FinalizeContext(struct TEEC_Context *context);
+
+TEEC_Result TEEC_OpenSession(struct TEEC_Context *context,
+			     struct TEEC_Session *session,
+			     const struct TEEC_UUID *destination,
+			     uint32_t connectionMethod,
+			     const void *connectionData,
+			     struct TEEC_Operation *operation,
+			     uint32_t *returnOrigin);
+
+void TEEC_CloseSession(struct TEEC_Session *session);
+
+TEEC_Result TEEC_InvokeCommand(struct TEEC_Session *session,
+			       uint32_t commandID,
+			       struct TEEC_Operation *operation,
+			       uint32_t *returnOrigin);
+
+TEEC_Result TEEC_RegisterSharedMemory(struct TEEC_Context *context,
+				      struct TEEC_SharedMemory *sharedMem);
+
+TEEC_Result TEEC_AllocateSharedMemory(struct TEEC_Context *context,
+				      struct TEEC_SharedMemory *sharedMem);
+
+void TEEC_ReleaseSharedMemory(struct TEEC_SharedMemory *sharedMemory);
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_kernel_lowlevel_api.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_kernel_lowlevel_api.h
new file mode 100644
index 0000000..53e2836
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_kernel_lowlevel_api.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef _TEE_KERNEL_LOWLEVEL_API_H
+#define _TEE_KERNEL_LOWLEVEL_API_H
+
+#include <linux/arm-smccc.h>
+
+#ifdef CONFIG_ARM
+struct smc_param {
+	uint32_t a0;
+	uint32_t a1;
+	uint32_t a2;
+	uint32_t a3;
+	uint32_t a4;
+	uint32_t a5;
+	uint32_t a6;
+	uint32_t a7;
+};
+#endif
+
+#ifdef CONFIG_ARM64
+struct smc_param {
+	uint64_t a0;
+	uint64_t a1;
+	uint64_t a2;
+	uint64_t a3;
+	uint64_t a4;
+	uint64_t a5;
+	uint64_t a6;
+	uint64_t a7;
+};
+#endif
+
+#define tee_smc_call(p) do {\
+		struct arm_smccc_res res; \
+		arm_smccc_smc(p->a0, p->a1, p->a2, p->a3, \
+			p->a4, p->a5, p->a6, p->a7, &res); \
+		p->a0 = res.a0; \
+		p->a1 = res.a1; \
+		p->a2 = res.a2; \
+		p->a3 = res.a3; \
+	} while (0)
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_pm_sec_call.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_pm_sec_call.h
new file mode 100644
index 0000000..44a95f4
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_pm_sec_call.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEE_PM_SEC_CALL_H_
+#define TEE_PM_SEC_CALL_H_
+
+#ifdef CONFIG_ARM
+
+#define TKCORE_SET_NS_BOOT_ADDR	(0xBF000103U)
+#define TKCORE_PREPARE_CPU_OFF	(0xBF000104U)
+#define TKCORE_ERRATA_802022	(0xBF000105U)
+
+#define TKCORE_SET_WDT_KADDR    (0xBF000300U)
+
+static void tee_pm_sec_call(u32 cmd, u32 p0, u32 p1, u32 p2)
+{
+	__asm__ __volatile__(
+		".arch_extension sec\n"
+		"mov r0, %0\n"
+		"mov r1, %1\n"
+		"mov r2, %2\n"
+		"mov r3, %3\n"
+		"smc #0\n" :
+		: "r"(cmd), "r"(p0), "r"(p1), "r"(p2)
+		: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7");
+}
+
+#endif
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_rpmb.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_rpmb.h
new file mode 100644
index 0000000..025c62d
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/include/linux/tee_rpmb.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TEE_RPMB_H
+#define TEE_RPMB_H
+
+struct tkcore_rpmb_request {
+	uint16_t type;
+	uint16_t blk_cnt;
+	uint16_t addr;
+	uint8_t *data_frame;
+};
+
+#define TEE_RPMB_EMMC_CID_SIZE 16
+
+struct tee_rpmb_dev_info {
+	uint8_t cid[TEE_RPMB_EMMC_CID_SIZE];
+	uint8_t rpmb_size_mult;
+	uint8_t rel_wr_sec_c;
+	uint8_t ret_code;
+};
+
+int tkcore_emmc_rpmb_execute(struct tkcore_rpmb_request *req);
+
+#define TEE_RPMB_GET_DEV_INFO		0x10
+#define TEE_RPMB_PROGRAM_KEY		0x11
+#define TEE_RPMB_GET_WRITE_COUNTER	0x12
+#define TEE_RPMB_WRITE_DATA			0x13
+#define TEE_RPMB_READ_DATA			0x14
+#define TEE_RPMB_SWITCH_NORMAL		0x15
+
+#endif
+
diff --git a/src/kernel/linux/v4.19/drivers/tee/tkcore/version.h b/src/kernel/linux/v4.19/drivers/tee/tkcore/version.h
new file mode 100644
index 0000000..4f3a008
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/tee/tkcore/version.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2015-2019 TrustKernel Incorporated
+ */
+
+#ifndef TKCORE_VERSION_H
+#define TKCORE_VERSION_H
+
+static const char tkcore_nsdrv_version[] = "3.4p0\n";
+
+#endif