[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/Kconfig b/src/kernel/linux/v4.14/drivers/devfreq/Kconfig
new file mode 100644
index 0000000..4c4ec68
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/Kconfig
@@ -0,0 +1,119 @@
+menuconfig PM_DEVFREQ
+	bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
+	select SRCU
+	select PM_OPP
+	help
+	  A device may have a list of frequencies and voltages available.
+	  devfreq, a generic DVFS framework can be registered for a device
+	  in order to let the governor provided to devfreq choose an
+	  operating frequency based on the device driver's policy.
+
+	  Each device may have its own governor and policy. Devfreq can
+	  reevaluate the device state periodically and/or based on the
+	  notification to "nb", a notifier block, of devfreq.
+
+	  Like some CPUs with CPUfreq, a device may have multiple clocks.
+	  However, because the clock frequencies of a single device are
+	  determined by the single device's state, an instance of devfreq
+	  is attached to a single device and returns a "representative"
+	  clock frequency of the device, which is also attached
+	  to a device by 1-to-1. The device registering devfreq takes the
+	  responsibility to "interpret" the representative frequency and
+	  to set its every clock accordingly with the "target" callback
+	  given to devfreq.
+
+	  When OPP is used with the devfreq device, it is recommended to
+	  register devfreq's nb to the OPP's notifier head.  If OPP is
+	  used with the devfreq device, you may use OPP helper
+	  functions defined in devfreq.h.
+
+if PM_DEVFREQ
+
+comment "DEVFREQ Governors"
+
+config DEVFREQ_GOV_SIMPLE_ONDEMAND
+	tristate "Simple Ondemand"
+	help
+	  Chooses frequency based on the recent load on the device. Works
+	  similar as ONDEMAND governor of CPUFREQ does. A device with
+	  Simple-Ondemand should be able to provide busy/total counter
+	  values that imply the usage rate. A device may provide tuned
+	  values to the governor with data field at devfreq_add_device().
+
+config DEVFREQ_GOV_PERFORMANCE
+	tristate "Performance"
+	help
+	  Sets the frequency at the maximum available frequency.
+	  This governor always returns UINT_MAX as frequency so that
+	  the DEVFREQ framework returns the highest frequency available
+	  at any time.
+
+config DEVFREQ_GOV_POWERSAVE
+	tristate "Powersave"
+	help
+	  Sets the frequency at the minimum available frequency.
+	  This governor always returns 0 as frequency so that
+	  the DEVFREQ framework returns the lowest frequency available
+	  at any time.
+
+config DEVFREQ_GOV_USERSPACE
+	tristate "Userspace"
+	help
+	  Sets the frequency at the user specified one.
+	  This governor returns the user configured frequency if there
+	  has been an input to /sys/devices/.../power/devfreq_set_freq.
+	  Otherwise, the governor does not change the frequency
+	  given at the initialization.
+
+config DEVFREQ_GOV_PASSIVE
+	tristate "Passive"
+	help
+	  Sets the frequency based on the frequency of its parent devfreq
+	  device. This governor does not change the frequency by itself
+	  through sysfs entries. The passive governor recommends that
+	  devfreq device uses the OPP table to get the frequency/voltage.
+
+comment "DEVFREQ Drivers"
+
+config ARM_EXYNOS_BUS_DEVFREQ
+	tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
+	depends on ARCH_EXYNOS || COMPILE_TEST
+	select DEVFREQ_GOV_SIMPLE_ONDEMAND
+	select DEVFREQ_GOV_PASSIVE
+	select DEVFREQ_EVENT_EXYNOS_PPMU
+	select PM_DEVFREQ_EVENT
+	select PM_OPP
+	help
+	  This adds the common DEVFREQ driver for Exynos Memory bus. Exynos
+	  Memory bus has one more group of memory bus (e.g, MIF and INT block).
+	  Each memory bus group could contain many memoby bus block. It reads
+	  PPMU counters of memory controllers by using DEVFREQ-event device
+	  and adjusts the operating frequencies and voltages with OPP support.
+	  This does not yet operate with optimal voltages.
+
+config ARM_TEGRA_DEVFREQ
+	tristate "Tegra DEVFREQ Driver"
+	depends on ARCH_TEGRA_124_SOC
+	select DEVFREQ_GOV_SIMPLE_ONDEMAND
+	select PM_OPP
+	help
+	  This adds the DEVFREQ driver for the Tegra family of SoCs.
+	  It reads ACTMON counters of memory controllers and adjusts the
+	  operating frequencies and voltages with OPP support.
+
+config ARM_RK3399_DMC_DEVFREQ
+	tristate "ARM RK3399 DMC DEVFREQ Driver"
+	depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \
+		(COMPILE_TEST && HAVE_ARM_SMCCC)
+	select DEVFREQ_EVENT_ROCKCHIP_DFI
+	select DEVFREQ_GOV_SIMPLE_ONDEMAND
+	select PM_DEVFREQ_EVENT
+	select PM_OPP
+	help
+          This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
+          It sets the frequency for the memory controller and reads the usage counts
+          from hardware.
+
+source "drivers/devfreq/event/Kconfig"
+
+endif # PM_DEVFREQ
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/Makefile b/src/kernel/linux/v4.14/drivers/devfreq/Makefile
new file mode 100644
index 0000000..d115e55
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PM_DEVFREQ)	+= devfreq.o
+obj-$(CONFIG_PM_DEVFREQ_EVENT)	+= devfreq-event.o
+obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)	+= governor_simpleondemand.o
+obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE)	+= governor_performance.o
+obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE)	+= governor_powersave.o
+obj-$(CONFIG_DEVFREQ_GOV_USERSPACE)	+= governor_userspace.o
+obj-$(CONFIG_DEVFREQ_GOV_PASSIVE)	+= governor_passive.o
+
+# DEVFREQ Drivers
+obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ)	+= exynos-bus.o
+obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ)	+= rk3399_dmc.o
+obj-$(CONFIG_ARM_TEGRA_DEVFREQ)		+= tegra-devfreq.o
+obj-$(CONFIG_MTK_BASE_POWER)		+= mediatek/
+
+# DEVFREQ Event Drivers
+obj-$(CONFIG_PM_DEVFREQ_EVENT)		+= event/
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/devfreq-event.c b/src/kernel/linux/v4.14/drivers/devfreq/devfreq-event.c
new file mode 100644
index 0000000..d67242d
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/devfreq-event.c
@@ -0,0 +1,483 @@
+/*
+ * devfreq-event: a framework to provide raw data and events of devfreq devices
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver is based on drivers/devfreq/devfreq.c.
+ */
+
+#include <linux/devfreq-event.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/of.h>
+
+static struct class *devfreq_event_class;
+
+/* The list of all devfreq event list */
+static LIST_HEAD(devfreq_event_list);
+static DEFINE_MUTEX(devfreq_event_list_lock);
+
+#define to_devfreq_event(DEV) container_of(DEV, struct devfreq_event_dev, dev)
+
+/**
+ * devfreq_event_enable_edev() - Enable the devfreq-event dev and increase
+ *				 the enable_count of devfreq-event dev.
+ * @edev	: the devfreq-event device
+ *
+ * Note that this function increase the enable_count and enable the
+ * devfreq-event device. The devfreq-event device should be enabled before
+ * using it by devfreq device.
+ */
+int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
+{
+	int ret = 0;
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	mutex_lock(&edev->lock);
+	if (edev->desc->ops && edev->desc->ops->enable
+			&& edev->enable_count == 0) {
+		ret = edev->desc->ops->enable(edev);
+		if (ret < 0)
+			goto err;
+	}
+	edev->enable_count++;
+err:
+	mutex_unlock(&edev->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_enable_edev);
+
+/**
+ * devfreq_event_disable_edev() - Disable the devfreq-event dev and decrease
+ *				  the enable_count of the devfreq-event dev.
+ * @edev	: the devfreq-event device
+ *
+ * Note that this function decrease the enable_count and disable the
+ * devfreq-event device. After the devfreq-event device is disabled,
+ * devfreq device can't use the devfreq-event device for get/set/reset
+ * operations.
+ */
+int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
+{
+	int ret = 0;
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	mutex_lock(&edev->lock);
+	if (edev->enable_count <= 0) {
+		dev_warn(&edev->dev, "unbalanced enable_count\n");
+		ret = -EIO;
+		goto err;
+	}
+
+	if (edev->desc->ops && edev->desc->ops->disable
+			&& edev->enable_count == 1) {
+		ret = edev->desc->ops->disable(edev);
+		if (ret < 0)
+			goto err;
+	}
+	edev->enable_count--;
+err:
+	mutex_unlock(&edev->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_disable_edev);
+
+/**
+ * devfreq_event_is_enabled() - Check whether devfreq-event dev is enabled or
+ *				not.
+ * @edev	: the devfreq-event device
+ *
+ * Note that this function check whether devfreq-event dev is enabled or not.
+ * If return true, the devfreq-event dev is enabeld. If return false, the
+ * devfreq-event dev is disabled.
+ */
+bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
+{
+	bool enabled = false;
+
+	if (!edev || !edev->desc)
+		return enabled;
+
+	mutex_lock(&edev->lock);
+
+	if (edev->enable_count > 0)
+		enabled = true;
+
+	mutex_unlock(&edev->lock);
+
+	return enabled;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_is_enabled);
+
+/**
+ * devfreq_event_set_event() - Set event to devfreq-event dev to start.
+ * @edev	: the devfreq-event device
+ *
+ * Note that this function set the event to the devfreq-event device to start
+ * for getting the event data which could be various event type.
+ */
+int devfreq_event_set_event(struct devfreq_event_dev *edev)
+{
+	int ret;
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	if (!edev->desc->ops || !edev->desc->ops->set_event)
+		return -EINVAL;
+
+	if (!devfreq_event_is_enabled(edev))
+		return -EPERM;
+
+	mutex_lock(&edev->lock);
+	ret = edev->desc->ops->set_event(edev);
+	mutex_unlock(&edev->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_set_event);
+
+/**
+ * devfreq_event_get_event() - Get {load|total}_count from devfreq-event dev.
+ * @edev	: the devfreq-event device
+ * @edata	: the calculated data of devfreq-event device
+ *
+ * Note that this function get the calculated event data from devfreq-event dev
+ * after stoping the progress of whole sequence of devfreq-event dev.
+ */
+int devfreq_event_get_event(struct devfreq_event_dev *edev,
+			    struct devfreq_event_data *edata)
+{
+	int ret;
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	if (!edev->desc->ops || !edev->desc->ops->get_event)
+		return -EINVAL;
+
+	if (!devfreq_event_is_enabled(edev))
+		return -EINVAL;
+
+	edata->total_count = edata->load_count = 0;
+
+	mutex_lock(&edev->lock);
+	ret = edev->desc->ops->get_event(edev, edata);
+	if (ret < 0)
+		edata->total_count = edata->load_count = 0;
+	mutex_unlock(&edev->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_get_event);
+
+/**
+ * devfreq_event_reset_event() - Reset all opeations of devfreq-event dev.
+ * @edev	: the devfreq-event device
+ *
+ * Note that this function stop all operations of devfreq-event dev and reset
+ * the current event data to make the devfreq-event device into initial state.
+ */
+int devfreq_event_reset_event(struct devfreq_event_dev *edev)
+{
+	int ret = 0;
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	if (!devfreq_event_is_enabled(edev))
+		return -EPERM;
+
+	mutex_lock(&edev->lock);
+	if (edev->desc->ops && edev->desc->ops->reset)
+		ret = edev->desc->ops->reset(edev);
+	mutex_unlock(&edev->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_reset_event);
+
+/**
+ * devfreq_event_get_edev_by_phandle() - Get the devfreq-event dev from
+ *					 devicetree.
+ * @dev		: the pointer to the given device
+ * @index	: the index into list of devfreq-event device
+ *
+ * Note that this function return the pointer of devfreq-event device.
+ */
+struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
+						      int index)
+{
+	struct device_node *node;
+	struct devfreq_event_dev *edev;
+
+	if (!dev->of_node)
+		return ERR_PTR(-EINVAL);
+
+	node = of_parse_phandle(dev->of_node, "devfreq-events", index);
+	if (!node)
+		return ERR_PTR(-ENODEV);
+
+	mutex_lock(&devfreq_event_list_lock);
+	list_for_each_entry(edev, &devfreq_event_list, node) {
+		if (edev->dev.parent && edev->dev.parent->of_node == node)
+			goto out;
+	}
+
+	list_for_each_entry(edev, &devfreq_event_list, node) {
+		if (!strcmp(edev->desc->name, node->name))
+			goto out;
+	}
+	edev = NULL;
+out:
+	mutex_unlock(&devfreq_event_list_lock);
+
+	if (!edev) {
+		of_node_put(node);
+		return ERR_PTR(-ENODEV);
+	}
+
+	of_node_put(node);
+
+	return edev;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_get_edev_by_phandle);
+
+/**
+ * devfreq_event_get_edev_count() - Get the count of devfreq-event dev
+ * @dev		: the pointer to the given device
+ *
+ * Note that this function return the count of devfreq-event devices.
+ */
+int devfreq_event_get_edev_count(struct device *dev)
+{
+	int count;
+
+	if (!dev->of_node) {
+		dev_err(dev, "device does not have a device node entry\n");
+		return -EINVAL;
+	}
+
+	count = of_property_count_elems_of_size(dev->of_node, "devfreq-events",
+						sizeof(u32));
+	if (count < 0) {
+		dev_err(dev,
+			"failed to get the count of devfreq-event in %pOF node\n",
+			dev->of_node);
+		return count;
+	}
+
+	return count;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_get_edev_count);
+
+static void devfreq_event_release_edev(struct device *dev)
+{
+	struct devfreq_event_dev *edev = to_devfreq_event(dev);
+
+	kfree(edev);
+}
+
+/**
+ * devfreq_event_add_edev() - Add new devfreq-event device.
+ * @dev		: the device owning the devfreq-event device being created
+ * @desc	: the devfreq-event device's decriptor which include essential
+ *		  data for devfreq-event device.
+ *
+ * Note that this function add new devfreq-event device to devfreq-event class
+ * list and register the device of the devfreq-event device.
+ */
+struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
+						struct devfreq_event_desc *desc)
+{
+	struct devfreq_event_dev *edev;
+	static atomic_t event_no = ATOMIC_INIT(-1);
+	int ret;
+
+	if (!dev || !desc)
+		return ERR_PTR(-EINVAL);
+
+	if (!desc->name || !desc->ops)
+		return ERR_PTR(-EINVAL);
+
+	if (!desc->ops->set_event || !desc->ops->get_event)
+		return ERR_PTR(-EINVAL);
+
+	edev = kzalloc(sizeof(struct devfreq_event_dev), GFP_KERNEL);
+	if (!edev)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_init(&edev->lock);
+	edev->desc = desc;
+	edev->enable_count = 0;
+	edev->dev.parent = dev;
+	edev->dev.class = devfreq_event_class;
+	edev->dev.release = devfreq_event_release_edev;
+
+	dev_set_name(&edev->dev, "event%d", atomic_inc_return(&event_no));
+	ret = device_register(&edev->dev);
+	if (ret < 0) {
+		put_device(&edev->dev);
+		return ERR_PTR(ret);
+	}
+	dev_set_drvdata(&edev->dev, edev);
+
+	INIT_LIST_HEAD(&edev->node);
+
+	mutex_lock(&devfreq_event_list_lock);
+	list_add(&edev->node, &devfreq_event_list);
+	mutex_unlock(&devfreq_event_list_lock);
+
+	return edev;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_add_edev);
+
+/**
+ * devfreq_event_remove_edev() - Remove the devfreq-event device registered.
+ * @dev		: the devfreq-event device
+ *
+ * Note that this function remove the registered devfreq-event device.
+ */
+int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
+{
+	if (!edev)
+		return -EINVAL;
+
+	WARN_ON(edev->enable_count);
+
+	mutex_lock(&devfreq_event_list_lock);
+	list_del(&edev->node);
+	mutex_unlock(&devfreq_event_list_lock);
+
+	device_unregister(&edev->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_remove_edev);
+
+static int devm_devfreq_event_match(struct device *dev, void *res, void *data)
+{
+	struct devfreq_event_dev **r = res;
+
+	if (WARN_ON(!r || !*r))
+		return 0;
+
+	return *r == data;
+}
+
+static void devm_devfreq_event_release(struct device *dev, void *res)
+{
+	devfreq_event_remove_edev(*(struct devfreq_event_dev **)res);
+}
+
+/**
+ * devm_devfreq_event_add_edev() - Resource-managed devfreq_event_add_edev()
+ * @dev		: the device owning the devfreq-event device being created
+ * @desc	: the devfreq-event device's decriptor which include essential
+ *		  data for devfreq-event device.
+ *
+ * Note that this function manages automatically the memory of devfreq-event
+ * device using device resource management and simplify the free operation
+ * for memory of devfreq-event device.
+ */
+struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
+						struct devfreq_event_desc *desc)
+{
+	struct devfreq_event_dev **ptr, *edev;
+
+	ptr = devres_alloc(devm_devfreq_event_release, sizeof(*ptr),
+				GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	edev = devfreq_event_add_edev(dev, desc);
+	if (IS_ERR(edev)) {
+		devres_free(ptr);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	*ptr = edev;
+	devres_add(dev, ptr);
+
+	return edev;
+}
+EXPORT_SYMBOL_GPL(devm_devfreq_event_add_edev);
+
+/**
+ * devm_devfreq_event_remove_edev()- Resource-managed devfreq_event_remove_edev()
+ * @dev		: the device owning the devfreq-event device being created
+ * @edev	: the devfreq-event device
+ *
+ * Note that this function manages automatically the memory of devfreq-event
+ * device using device resource management.
+ */
+void devm_devfreq_event_remove_edev(struct device *dev,
+				struct devfreq_event_dev *edev)
+{
+	WARN_ON(devres_release(dev, devm_devfreq_event_release,
+			       devm_devfreq_event_match, edev));
+}
+EXPORT_SYMBOL_GPL(devm_devfreq_event_remove_edev);
+
+/*
+ * Device attributes for devfreq-event class.
+ */
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct devfreq_event_dev *edev = to_devfreq_event(dev);
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	return sprintf(buf, "%s\n", edev->desc->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t enable_count_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct devfreq_event_dev *edev = to_devfreq_event(dev);
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	return sprintf(buf, "%d\n", edev->enable_count);
+}
+static DEVICE_ATTR_RO(enable_count);
+
+static struct attribute *devfreq_event_attrs[] = {
+	&dev_attr_name.attr,
+	&dev_attr_enable_count.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(devfreq_event);
+
+static int __init devfreq_event_init(void)
+{
+	devfreq_event_class = class_create(THIS_MODULE, "devfreq-event");
+	if (IS_ERR(devfreq_event_class)) {
+		pr_err("%s: couldn't create class\n", __FILE__);
+		return PTR_ERR(devfreq_event_class);
+	}
+
+	devfreq_event_class->dev_groups = devfreq_event_groups;
+
+	return 0;
+}
+subsys_initcall(devfreq_event_init);
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/devfreq.c b/src/kernel/linux/v4.14/drivers/devfreq/devfreq.c
new file mode 100644
index 0000000..b05e6a1
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/devfreq.c
@@ -0,0 +1,1482 @@
+/*
+ * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
+ *	    for Non-CPU Devices.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ *	MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/pm_opp.h>
+#include <linux/devfreq.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/printk.h>
+#include <linux/hrtimer.h>
+#include <linux/of.h>
+#include "governor.h"
+
+static struct class *devfreq_class;
+
+/*
+ * devfreq core provides delayed work based load monitoring helper
+ * functions. Governors can use these or can implement their own
+ * monitoring mechanism.
+ */
+static struct workqueue_struct *devfreq_wq;
+
+/* The list of all device-devfreq governors */
+static LIST_HEAD(devfreq_governor_list);
+/* The list of all device-devfreq */
+static LIST_HEAD(devfreq_list);
+static DEFINE_MUTEX(devfreq_list_lock);
+
+/**
+ * find_device_devfreq() - find devfreq struct using device pointer
+ * @dev:	device pointer used to lookup device devfreq.
+ *
+ * Search the list of device devfreqs and return the matched device's
+ * devfreq info. devfreq_list_lock should be held by the caller.
+ */
+static struct devfreq *find_device_devfreq(struct device *dev)
+{
+	struct devfreq *tmp_devfreq;
+
+	if (IS_ERR_OR_NULL(dev)) {
+		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+	WARN(!mutex_is_locked(&devfreq_list_lock),
+	     "devfreq_list_lock must be locked.");
+
+	list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
+		if (tmp_devfreq->dev.parent == dev)
+			return tmp_devfreq;
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
+/**
+ * devfreq_get_freq_level() - Lookup freq_table for the frequency
+ * @devfreq:	the devfreq instance
+ * @freq:	the target frequency
+ */
+static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
+{
+	int lev;
+
+	for (lev = 0; lev < devfreq->profile->max_state; lev++)
+		if (freq == devfreq->profile->freq_table[lev])
+			return lev;
+
+	return -EINVAL;
+}
+
+/**
+ * devfreq_set_freq_table() - Initialize freq_table for the frequency
+ * @devfreq:	the devfreq instance
+ */
+static void devfreq_set_freq_table(struct devfreq *devfreq)
+{
+	struct devfreq_dev_profile *profile = devfreq->profile;
+	struct dev_pm_opp *opp;
+	unsigned long freq;
+	int i, count;
+
+	/* Initialize the freq_table from OPP table */
+	count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
+	if (count <= 0)
+		return;
+
+	profile->max_state = count;
+	profile->freq_table = devm_kcalloc(devfreq->dev.parent,
+					profile->max_state,
+					sizeof(*profile->freq_table),
+					GFP_KERNEL);
+	if (!profile->freq_table) {
+		profile->max_state = 0;
+		return;
+	}
+
+	for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
+		opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
+		if (IS_ERR(opp)) {
+			devm_kfree(devfreq->dev.parent, profile->freq_table);
+			profile->max_state = 0;
+			return;
+		}
+		dev_pm_opp_put(opp);
+		profile->freq_table[i] = freq;
+	}
+}
+
+/**
+ * devfreq_update_status() - Update statistics of devfreq behavior
+ * @devfreq:	the devfreq instance
+ * @freq:	the update target frequency
+ */
+int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
+{
+	int lev, prev_lev, ret = 0;
+	unsigned long cur_time;
+
+	lockdep_assert_held(&devfreq->lock);
+	cur_time = jiffies;
+
+	/* Immediately exit if previous_freq is not initialized yet. */
+	if (!devfreq->previous_freq)
+		goto out;
+
+	prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
+	if (prev_lev < 0) {
+		ret = prev_lev;
+		goto out;
+	}
+
+	devfreq->time_in_state[prev_lev] +=
+			 cur_time - devfreq->last_stat_updated;
+
+	lev = devfreq_get_freq_level(devfreq, freq);
+	if (lev < 0) {
+		ret = lev;
+		goto out;
+	}
+
+	if (lev != prev_lev) {
+		devfreq->trans_table[(prev_lev *
+				devfreq->profile->max_state) + lev]++;
+		devfreq->total_trans++;
+	}
+
+out:
+	devfreq->last_stat_updated = cur_time;
+	return ret;
+}
+EXPORT_SYMBOL(devfreq_update_status);
+
+/**
+ * find_devfreq_governor() - find devfreq governor from name
+ * @name:	name of the governor
+ *
+ * Search the list of devfreq governors and return the matched
+ * governor's pointer. devfreq_list_lock should be held by the caller.
+ */
+static struct devfreq_governor *find_devfreq_governor(const char *name)
+{
+	struct devfreq_governor *tmp_governor;
+
+	if (IS_ERR_OR_NULL(name)) {
+		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+	WARN(!mutex_is_locked(&devfreq_list_lock),
+	     "devfreq_list_lock must be locked.");
+
+	list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
+		if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
+			return tmp_governor;
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
+static int devfreq_notify_transition(struct devfreq *devfreq,
+		struct devfreq_freqs *freqs, unsigned int state)
+{
+	if (!devfreq)
+		return -EINVAL;
+
+	switch (state) {
+	case DEVFREQ_PRECHANGE:
+		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
+				DEVFREQ_PRECHANGE, freqs);
+		break;
+
+	case DEVFREQ_POSTCHANGE:
+		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
+				DEVFREQ_POSTCHANGE, freqs);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Load monitoring helper functions for governors use */
+
+/**
+ * update_devfreq() - Reevaluate the device and configure frequency.
+ * @devfreq:	the devfreq instance.
+ *
+ * Note: Lock devfreq->lock before calling update_devfreq
+ *	 This function is exported for governors.
+ */
+int update_devfreq(struct devfreq *devfreq)
+{
+	struct devfreq_freqs freqs;
+	unsigned long freq, cur_freq;
+	int err = 0;
+	u32 flags = 0;
+
+	if (!mutex_is_locked(&devfreq->lock)) {
+		WARN(true, "devfreq->lock must be locked by the caller.\n");
+		return -EINVAL;
+	}
+
+	if (!devfreq->governor)
+		return -EINVAL;
+
+	/* Reevaluate the proper frequency */
+	err = devfreq->governor->get_target_freq(devfreq, &freq);
+	if (err)
+		return err;
+
+	/*
+	 * Adjust the frequency with user freq and QoS.
+	 *
+	 * List from the highest priority
+	 * max_freq
+	 * min_freq
+	 */
+
+	if (devfreq->min_freq && freq < devfreq->min_freq) {
+		freq = devfreq->min_freq;
+		flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
+	}
+	if (devfreq->max_freq && freq > devfreq->max_freq) {
+		freq = devfreq->max_freq;
+		flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
+	}
+
+	if (devfreq->profile->get_cur_freq)
+		devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
+	else
+		cur_freq = devfreq->previous_freq;
+
+	freqs.old = cur_freq;
+	freqs.new = freq;
+	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
+
+	err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
+	if (err) {
+		freqs.new = cur_freq;
+		devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
+		return err;
+	}
+
+	freqs.new = freq;
+	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
+
+	if (devfreq->profile->freq_table)
+		if (devfreq_update_status(devfreq, freq))
+			dev_err(&devfreq->dev,
+				"Couldn't update frequency transition information.\n");
+
+	devfreq->previous_freq = freq;
+	return err;
+}
+EXPORT_SYMBOL(update_devfreq);
+
+/**
+ * devfreq_monitor() - Periodically poll devfreq objects.
+ * @work:	the work struct used to run devfreq_monitor periodically.
+ *
+ */
+static void devfreq_monitor(struct work_struct *work)
+{
+	int err;
+	struct devfreq *devfreq = container_of(work,
+					struct devfreq, work.work);
+
+	mutex_lock(&devfreq->lock);
+	err = update_devfreq(devfreq);
+	if (err)
+		dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
+
+	queue_delayed_work(devfreq_wq, &devfreq->work,
+				msecs_to_jiffies(devfreq->profile->polling_ms));
+	mutex_unlock(&devfreq->lock);
+}
+
+/**
+ * devfreq_monitor_start() - Start load monitoring of devfreq instance
+ * @devfreq:	the devfreq instance.
+ *
+ * Helper function for starting devfreq device load monitoing. By
+ * default delayed work based monitoring is supported. Function
+ * to be called from governor in response to DEVFREQ_GOV_START
+ * event when device is added to devfreq framework.
+ */
+void devfreq_monitor_start(struct devfreq *devfreq)
+{
+	INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
+	if (devfreq->profile->polling_ms)
+		queue_delayed_work(devfreq_wq, &devfreq->work,
+			msecs_to_jiffies(devfreq->profile->polling_ms));
+}
+EXPORT_SYMBOL(devfreq_monitor_start);
+
+/**
+ * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
+ * @devfreq:	the devfreq instance.
+ *
+ * Helper function to stop devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_STOP
+ * event when device is removed from devfreq framework.
+ */
+void devfreq_monitor_stop(struct devfreq *devfreq)
+{
+	cancel_delayed_work_sync(&devfreq->work);
+}
+EXPORT_SYMBOL(devfreq_monitor_stop);
+
+/**
+ * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
+ * @devfreq:	the devfreq instance.
+ *
+ * Helper function to suspend devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_SUSPEND
+ * event or when polling interval is set to zero.
+ *
+ * Note: Though this function is same as devfreq_monitor_stop(),
+ * intentionally kept separate to provide hooks for collecting
+ * transition statistics.
+ */
+void devfreq_monitor_suspend(struct devfreq *devfreq)
+{
+	mutex_lock(&devfreq->lock);
+	if (devfreq->stop_polling) {
+		mutex_unlock(&devfreq->lock);
+		return;
+	}
+
+	devfreq_update_status(devfreq, devfreq->previous_freq);
+	devfreq->stop_polling = true;
+	mutex_unlock(&devfreq->lock);
+	cancel_delayed_work_sync(&devfreq->work);
+}
+EXPORT_SYMBOL(devfreq_monitor_suspend);
+
+/**
+ * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
+ * @devfreq:    the devfreq instance.
+ *
+ * Helper function to resume devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_RESUME
+ * event or when polling interval is set to non-zero.
+ */
+void devfreq_monitor_resume(struct devfreq *devfreq)
+{
+	unsigned long freq;
+
+	mutex_lock(&devfreq->lock);
+	if (!devfreq->stop_polling)
+		goto out;
+
+	if (!delayed_work_pending(&devfreq->work) &&
+			devfreq->profile->polling_ms)
+		queue_delayed_work(devfreq_wq, &devfreq->work,
+			msecs_to_jiffies(devfreq->profile->polling_ms));
+
+	devfreq->last_stat_updated = jiffies;
+	devfreq->stop_polling = false;
+
+	if (devfreq->profile->get_cur_freq &&
+		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
+		devfreq->previous_freq = freq;
+
+out:
+	mutex_unlock(&devfreq->lock);
+}
+EXPORT_SYMBOL(devfreq_monitor_resume);
+
+/**
+ * devfreq_interval_update() - Update device devfreq monitoring interval
+ * @devfreq:    the devfreq instance.
+ * @delay:      new polling interval to be set.
+ *
+ * Helper function to set new load monitoring polling interval. Function
+ * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
+ */
+void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
+{
+	unsigned int cur_delay = devfreq->profile->polling_ms;
+	unsigned int new_delay = *delay;
+
+	mutex_lock(&devfreq->lock);
+	devfreq->profile->polling_ms = new_delay;
+
+	if (devfreq->stop_polling)
+		goto out;
+
+	/* if new delay is zero, stop polling */
+	if (!new_delay) {
+		mutex_unlock(&devfreq->lock);
+		cancel_delayed_work_sync(&devfreq->work);
+		return;
+	}
+
+	/* if current delay is zero, start polling with new delay */
+	if (!cur_delay) {
+		queue_delayed_work(devfreq_wq, &devfreq->work,
+			msecs_to_jiffies(devfreq->profile->polling_ms));
+		goto out;
+	}
+
+	/* if current delay is greater than new delay, restart polling */
+	if (cur_delay > new_delay) {
+		mutex_unlock(&devfreq->lock);
+		cancel_delayed_work_sync(&devfreq->work);
+		mutex_lock(&devfreq->lock);
+		if (!devfreq->stop_polling)
+			queue_delayed_work(devfreq_wq, &devfreq->work,
+			      msecs_to_jiffies(devfreq->profile->polling_ms));
+	}
+out:
+	mutex_unlock(&devfreq->lock);
+}
+EXPORT_SYMBOL(devfreq_interval_update);
+
+/**
+ * devfreq_notifier_call() - Notify that the device frequency requirements
+ *			   has been changed out of devfreq framework.
+ * @nb:		the notifier_block (supposed to be devfreq->nb)
+ * @type:	not used
+ * @devp:	not used
+ *
+ * Called by a notifier that uses devfreq->nb.
+ */
+static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
+				 void *devp)
+{
+	struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
+	int ret;
+
+	mutex_lock(&devfreq->lock);
+	ret = update_devfreq(devfreq);
+	mutex_unlock(&devfreq->lock);
+
+	return ret;
+}
+
+/**
+ * devfreq_dev_release() - Callback for struct device to release the device.
+ * @dev:	the devfreq device
+ *
+ * Remove devfreq from the list and release its resources.
+ */
+static void devfreq_dev_release(struct device *dev)
+{
+	struct devfreq *devfreq = to_devfreq(dev);
+
+	mutex_lock(&devfreq_list_lock);
+	list_del(&devfreq->node);
+	mutex_unlock(&devfreq_list_lock);
+
+	if (devfreq->governor)
+		devfreq->governor->event_handler(devfreq,
+						 DEVFREQ_GOV_STOP, NULL);
+
+	if (devfreq->profile->exit)
+		devfreq->profile->exit(devfreq->dev.parent);
+
+	mutex_destroy(&devfreq->lock);
+	kfree(devfreq);
+}
+
+/**
+ * devfreq_add_device() - Add devfreq feature to the device
+ * @dev:	the device to add devfreq feature.
+ * @profile:	device-specific profile to run devfreq.
+ * @governor_name:	name of the policy to choose frequency.
+ * @data:	private data for the governor. The devfreq framework does not
+ *		touch this value.
+ */
+struct devfreq *devfreq_add_device(struct device *dev,
+				   struct devfreq_dev_profile *profile,
+				   const char *governor_name,
+				   void *data)
+{
+	struct devfreq *devfreq;
+	struct devfreq_governor *governor;
+	int err = 0;
+
+	if (!dev || !profile || !governor_name) {
+		dev_err(dev, "%s: Invalid parameters.\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mutex_lock(&devfreq_list_lock);
+	devfreq = find_device_devfreq(dev);
+	mutex_unlock(&devfreq_list_lock);
+	if (!IS_ERR(devfreq)) {
+		dev_err(dev, "%s: Unable to create devfreq for the device.\n",
+			__func__);
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
+	if (!devfreq) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	mutex_init(&devfreq->lock);
+	mutex_lock(&devfreq->lock);
+	devfreq->dev.parent = dev;
+	devfreq->dev.class = devfreq_class;
+	devfreq->dev.release = devfreq_dev_release;
+	INIT_LIST_HEAD(&devfreq->node);
+	devfreq->profile = profile;
+	strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
+	devfreq->previous_freq = profile->initial_freq;
+	devfreq->last_status.current_frequency = profile->initial_freq;
+	devfreq->data = data;
+	devfreq->nb.notifier_call = devfreq_notifier_call;
+
+	if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
+		mutex_unlock(&devfreq->lock);
+		devfreq_set_freq_table(devfreq);
+		mutex_lock(&devfreq->lock);
+	}
+
+	dev_set_name(&devfreq->dev, "%s", dev_name(dev));
+	err = device_register(&devfreq->dev);
+	if (err) {
+		mutex_unlock(&devfreq->lock);
+		goto err_dev;
+	}
+
+	devfreq->trans_table =	devm_kzalloc(&devfreq->dev,
+						sizeof(unsigned int) *
+						devfreq->profile->max_state *
+						devfreq->profile->max_state,
+						GFP_KERNEL);
+	devfreq->time_in_state = devm_kzalloc(&devfreq->dev,
+						sizeof(unsigned long) *
+						devfreq->profile->max_state,
+						GFP_KERNEL);
+	devfreq->last_stat_updated = jiffies;
+
+	srcu_init_notifier_head(&devfreq->transition_notifier_list);
+
+	mutex_unlock(&devfreq->lock);
+
+	mutex_lock(&devfreq_list_lock);
+	list_add(&devfreq->node, &devfreq_list);
+
+	governor = find_devfreq_governor(devfreq->governor_name);
+	if (IS_ERR(governor)) {
+		dev_err(dev, "%s: Unable to find governor for the device\n",
+			__func__);
+		err = PTR_ERR(governor);
+		goto err_init;
+	}
+
+	devfreq->governor = governor;
+	err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
+						NULL);
+	if (err) {
+		dev_err(dev, "%s: Unable to start governor for the device\n",
+			__func__);
+		goto err_init;
+	}
+	mutex_unlock(&devfreq_list_lock);
+
+	return devfreq;
+
+err_init:
+	list_del(&devfreq->node);
+	mutex_unlock(&devfreq_list_lock);
+
+	device_unregister(&devfreq->dev);
+err_dev:
+	if (devfreq)
+		kfree(devfreq);
+err_out:
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(devfreq_add_device);
+
+/**
+ * devfreq_remove_device() - Remove devfreq feature from a device.
+ * @devfreq:	the devfreq instance to be removed
+ *
+ * The opposite of devfreq_add_device().
+ */
+int devfreq_remove_device(struct devfreq *devfreq)
+{
+	if (!devfreq)
+		return -EINVAL;
+
+	device_unregister(&devfreq->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL(devfreq_remove_device);
+
+static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
+{
+	struct devfreq **r = res;
+
+	if (WARN_ON(!r || !*r))
+		return 0;
+
+	return *r == data;
+}
+
+static void devm_devfreq_dev_release(struct device *dev, void *res)
+{
+	devfreq_remove_device(*(struct devfreq **)res);
+}
+
+/**
+ * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
+ * @dev:	the device to add devfreq feature.
+ * @profile:	device-specific profile to run devfreq.
+ * @governor_name:	name of the policy to choose frequency.
+ * @data:	private data for the governor. The devfreq framework does not
+ *		touch this value.
+ *
+ * This function manages automatically the memory of devfreq device using device
+ * resource management and simplify the free operation for memory of devfreq
+ * device.
+ */
+struct devfreq *devm_devfreq_add_device(struct device *dev,
+					struct devfreq_dev_profile *profile,
+					const char *governor_name,
+					void *data)
+{
+	struct devfreq **ptr, *devfreq;
+
+	ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	devfreq = devfreq_add_device(dev, profile, governor_name, data);
+	if (IS_ERR(devfreq)) {
+		devres_free(ptr);
+		return devfreq;
+	}
+
+	*ptr = devfreq;
+	devres_add(dev, ptr);
+
+	return devfreq;
+}
+EXPORT_SYMBOL(devm_devfreq_add_device);
+
+#ifdef CONFIG_OF
+/*
+ * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
+ * @dev - instance to the given device
+ * @index - index into list of devfreq
+ *
+ * return the instance of devfreq device
+ */
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
+{
+	struct device_node *node;
+	struct devfreq *devfreq;
+
+	if (!dev)
+		return ERR_PTR(-EINVAL);
+
+	if (!dev->of_node)
+		return ERR_PTR(-EINVAL);
+
+	node = of_parse_phandle(dev->of_node, "devfreq", index);
+	if (!node)
+		return ERR_PTR(-ENODEV);
+
+	mutex_lock(&devfreq_list_lock);
+	list_for_each_entry(devfreq, &devfreq_list, node) {
+		if (devfreq->dev.parent
+			&& devfreq->dev.parent->of_node == node) {
+			mutex_unlock(&devfreq_list_lock);
+			of_node_put(node);
+			return devfreq;
+		}
+	}
+	mutex_unlock(&devfreq_list_lock);
+	of_node_put(node);
+
+	return ERR_PTR(-EPROBE_DEFER);
+}
+#else
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
+{
+	return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
+
+/**
+ * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
+ * @dev:	the device to add devfreq feature.
+ * @devfreq:	the devfreq instance to be removed
+ */
+void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
+{
+	WARN_ON(devres_release(dev, devm_devfreq_dev_release,
+			       devm_devfreq_dev_match, devfreq));
+}
+EXPORT_SYMBOL(devm_devfreq_remove_device);
+
+/**
+ * devfreq_suspend_device() - Suspend devfreq of a device.
+ * @devfreq: the devfreq instance to be suspended
+ *
+ * This function is intended to be called by the pm callbacks
+ * (e.g., runtime_suspend, suspend) of the device driver that
+ * holds the devfreq.
+ */
+int devfreq_suspend_device(struct devfreq *devfreq)
+{
+	if (!devfreq)
+		return -EINVAL;
+
+	if (!devfreq->governor)
+		return 0;
+
+	return devfreq->governor->event_handler(devfreq,
+				DEVFREQ_GOV_SUSPEND, NULL);
+}
+EXPORT_SYMBOL(devfreq_suspend_device);
+
+/**
+ * devfreq_resume_device() - Resume devfreq of a device.
+ * @devfreq: the devfreq instance to be resumed
+ *
+ * This function is intended to be called by the pm callbacks
+ * (e.g., runtime_resume, resume) of the device driver that
+ * holds the devfreq.
+ */
+int devfreq_resume_device(struct devfreq *devfreq)
+{
+	if (!devfreq)
+		return -EINVAL;
+
+	if (!devfreq->governor)
+		return 0;
+
+	return devfreq->governor->event_handler(devfreq,
+				DEVFREQ_GOV_RESUME, NULL);
+}
+EXPORT_SYMBOL(devfreq_resume_device);
+
+/**
+ * devfreq_add_governor() - Add devfreq governor
+ * @governor:	the devfreq governor to be added
+ */
+int devfreq_add_governor(struct devfreq_governor *governor)
+{
+	struct devfreq_governor *g;
+	struct devfreq *devfreq;
+	int err = 0;
+
+	if (!governor) {
+		pr_err("%s: Invalid parameters.\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&devfreq_list_lock);
+	g = find_devfreq_governor(governor->name);
+	if (!IS_ERR(g)) {
+		pr_err("%s: governor %s already registered\n", __func__,
+		       g->name);
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	list_add(&governor->node, &devfreq_governor_list);
+
+	list_for_each_entry(devfreq, &devfreq_list, node) {
+		int ret = 0;
+		struct device *dev = devfreq->dev.parent;
+
+		if (!strncmp(devfreq->governor_name, governor->name,
+			     DEVFREQ_NAME_LEN)) {
+			/* The following should never occur */
+			if (devfreq->governor) {
+				dev_warn(dev,
+					 "%s: Governor %s already present\n",
+					 __func__, devfreq->governor->name);
+				ret = devfreq->governor->event_handler(devfreq,
+							DEVFREQ_GOV_STOP, NULL);
+				if (ret) {
+					dev_warn(dev,
+						 "%s: Governor %s stop = %d\n",
+						 __func__,
+						 devfreq->governor->name, ret);
+				}
+				/* Fall through */
+			}
+			devfreq->governor = governor;
+			ret = devfreq->governor->event_handler(devfreq,
+						DEVFREQ_GOV_START, NULL);
+			if (ret) {
+				dev_warn(dev, "%s: Governor %s start=%d\n",
+					 __func__, devfreq->governor->name,
+					 ret);
+			}
+		}
+	}
+
+err_out:
+	mutex_unlock(&devfreq_list_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(devfreq_add_governor);
+
+/**
+ * devfreq_remove_governor() - Remove devfreq feature from a device.
+ * @governor:	the devfreq governor to be removed
+ */
+int devfreq_remove_governor(struct devfreq_governor *governor)
+{
+	struct devfreq_governor *g;
+	struct devfreq *devfreq;
+	int err = 0;
+
+	if (!governor) {
+		pr_err("%s: Invalid parameters.\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&devfreq_list_lock);
+	g = find_devfreq_governor(governor->name);
+	if (IS_ERR(g)) {
+		pr_err("%s: governor %s not registered\n", __func__,
+		       governor->name);
+		err = PTR_ERR(g);
+		goto err_out;
+	}
+	list_for_each_entry(devfreq, &devfreq_list, node) {
+		int ret;
+		struct device *dev = devfreq->dev.parent;
+
+		if (!strncmp(devfreq->governor_name, governor->name,
+			     DEVFREQ_NAME_LEN)) {
+			/* we should have a devfreq governor! */
+			if (!devfreq->governor) {
+				dev_warn(dev, "%s: Governor %s NOT present\n",
+					 __func__, governor->name);
+				continue;
+				/* Fall through */
+			}
+			ret = devfreq->governor->event_handler(devfreq,
+						DEVFREQ_GOV_STOP, NULL);
+			if (ret) {
+				dev_warn(dev, "%s: Governor %s stop=%d\n",
+					 __func__, devfreq->governor->name,
+					 ret);
+			}
+			devfreq->governor = NULL;
+		}
+	}
+
+	list_del(&governor->node);
+err_out:
+	mutex_unlock(&devfreq_list_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(devfreq_remove_governor);
+
+static ssize_t name_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct devfreq *devfreq = to_devfreq(dev);
+	return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t governor_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	if (!to_devfreq(dev)->governor)
+		return -EINVAL;
+
+	return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
+}
+
+static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct devfreq *df = to_devfreq(dev);
+	int ret;
+	char str_governor[DEVFREQ_NAME_LEN + 1];
+	struct devfreq_governor *governor;
+
+	ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
+	if (ret != 1)
+		return -EINVAL;
+
+	mutex_lock(&devfreq_list_lock);
+	governor = find_devfreq_governor(str_governor);
+	if (IS_ERR(governor)) {
+		ret = PTR_ERR(governor);
+		goto out;
+	}
+	if (df->governor == governor) {
+		ret = 0;
+		goto out;
+	} else if ((df->governor && df->governor->immutable) ||
+					governor->immutable) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (df->governor) {
+		ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
+		if (ret) {
+			dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
+				 __func__, df->governor->name, ret);
+			goto out;
+		}
+	}
+	df->governor = governor;
+	strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
+	ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
+	if (ret)
+		dev_warn(dev, "%s: Governor %s not started(%d)\n",
+			 __func__, df->governor->name, ret);
+out:
+	mutex_unlock(&devfreq_list_lock);
+
+	if (!ret)
+		ret = count;
+	return ret;
+}
+static DEVICE_ATTR_RW(governor);
+
+static ssize_t available_governors_show(struct device *d,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct devfreq *df = to_devfreq(d);
+	ssize_t count = 0;
+
+	mutex_lock(&devfreq_list_lock);
+
+	/*
+	 * The devfreq with immutable governor (e.g., passive) shows
+	 * only own governor.
+	 */
+	if (df->governor && df->governor->immutable) {
+		count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
+				   "%s ", df->governor_name);
+	/*
+	 * The devfreq device shows the registered governor except for
+	 * immutable governors such as passive governor .
+	 */
+	} else {
+		struct devfreq_governor *governor;
+
+		list_for_each_entry(governor, &devfreq_governor_list, node) {
+			if (governor->immutable)
+				continue;
+			count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+					   "%s ", governor->name);
+		}
+	}
+
+	mutex_unlock(&devfreq_list_lock);
+
+	/* Truncate the trailing space */
+	if (count)
+		count--;
+
+	count += sprintf(&buf[count], "\n");
+
+	return count;
+}
+static DEVICE_ATTR_RO(available_governors);
+
+static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	unsigned long freq;
+	struct devfreq *devfreq = to_devfreq(dev);
+
+	if (devfreq->profile->get_cur_freq &&
+		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
+		return sprintf(buf, "%lu\n", freq);
+
+	return sprintf(buf, "%lu\n", devfreq->previous_freq);
+}
+static DEVICE_ATTR_RO(cur_freq);
+
+static ssize_t target_freq_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
+}
+static DEVICE_ATTR_RO(target_freq);
+
+static ssize_t polling_interval_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
+}
+
+static ssize_t polling_interval_store(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct devfreq *df = to_devfreq(dev);
+	unsigned int value;
+	int ret;
+
+	if (!df->governor)
+		return -EINVAL;
+
+	ret = sscanf(buf, "%u", &value);
+	if (ret != 1)
+		return -EINVAL;
+
+	df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
+	ret = count;
+
+	return ret;
+}
+static DEVICE_ATTR_RW(polling_interval);
+
+static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct devfreq *df = to_devfreq(dev);
+	unsigned long value;
+	int ret;
+	unsigned long max;
+
+	ret = sscanf(buf, "%lu", &value);
+	if (ret != 1)
+		return -EINVAL;
+
+	mutex_lock(&df->lock);
+	max = df->max_freq;
+	if (value && max && value > max) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	df->min_freq = value;
+	update_devfreq(df);
+	ret = count;
+unlock:
+	mutex_unlock(&df->lock);
+	return ret;
+}
+
+static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct devfreq *df = to_devfreq(dev);
+	unsigned long value;
+	int ret;
+	unsigned long min;
+
+	ret = sscanf(buf, "%lu", &value);
+	if (ret != 1)
+		return -EINVAL;
+
+	mutex_lock(&df->lock);
+	min = df->min_freq;
+	if (value && min && value < min) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	df->max_freq = value;
+	update_devfreq(df);
+	ret = count;
+unlock:
+	mutex_unlock(&df->lock);
+	return ret;
+}
+
+#define show_one(name)						\
+static ssize_t name##_show					\
+(struct device *dev, struct device_attribute *attr, char *buf)	\
+{								\
+	return sprintf(buf, "%lu\n", to_devfreq(dev)->name);	\
+}
+show_one(min_freq);
+show_one(max_freq);
+
+static DEVICE_ATTR_RW(min_freq);
+static DEVICE_ATTR_RW(max_freq);
+
+static ssize_t available_frequencies_show(struct device *d,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct devfreq *df = to_devfreq(d);
+	struct device *dev = df->dev.parent;
+	struct dev_pm_opp *opp;
+	ssize_t count = 0;
+	unsigned long freq = 0;
+
+	do {
+		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+		if (IS_ERR(opp))
+			break;
+
+		dev_pm_opp_put(opp);
+		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+				   "%lu ", freq);
+		freq++;
+	} while (1);
+
+	/* Truncate the trailing space */
+	if (count)
+		count--;
+
+	count += sprintf(&buf[count], "\n");
+
+	return count;
+}
+static DEVICE_ATTR_RO(available_frequencies);
+
+static ssize_t trans_stat_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct devfreq *devfreq = to_devfreq(dev);
+	ssize_t len;
+	int i, j;
+	unsigned int max_state = devfreq->profile->max_state;
+
+	if (max_state == 0)
+		return sprintf(buf, "Not Supported.\n");
+
+	mutex_lock(&devfreq->lock);
+	if (!devfreq->stop_polling &&
+			devfreq_update_status(devfreq, devfreq->previous_freq)) {
+		mutex_unlock(&devfreq->lock);
+		return 0;
+	}
+	mutex_unlock(&devfreq->lock);
+
+	len = sprintf(buf, "     From  :   To\n");
+	len += sprintf(buf + len, "           :");
+	for (i = 0; i < max_state; i++)
+		len += sprintf(buf + len, "%10lu",
+				devfreq->profile->freq_table[i]);
+
+	len += sprintf(buf + len, "   time(ms)\n");
+
+	for (i = 0; i < max_state; i++) {
+		if (devfreq->profile->freq_table[i]
+					== devfreq->previous_freq) {
+			len += sprintf(buf + len, "*");
+		} else {
+			len += sprintf(buf + len, " ");
+		}
+		len += sprintf(buf + len, "%10lu:",
+				devfreq->profile->freq_table[i]);
+		for (j = 0; j < max_state; j++)
+			len += sprintf(buf + len, "%10u",
+				devfreq->trans_table[(i * max_state) + j]);
+		len += sprintf(buf + len, "%10u\n",
+			jiffies_to_msecs(devfreq->time_in_state[i]));
+	}
+
+	len += sprintf(buf + len, "Total transition : %u\n",
+					devfreq->total_trans);
+	return len;
+}
+static DEVICE_ATTR_RO(trans_stat);
+
+static struct attribute *devfreq_attrs[] = {
+	&dev_attr_name.attr,
+	&dev_attr_governor.attr,
+	&dev_attr_available_governors.attr,
+	&dev_attr_cur_freq.attr,
+	&dev_attr_available_frequencies.attr,
+	&dev_attr_target_freq.attr,
+	&dev_attr_polling_interval.attr,
+	&dev_attr_min_freq.attr,
+	&dev_attr_max_freq.attr,
+	&dev_attr_trans_stat.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(devfreq);
+
+static int __init devfreq_init(void)
+{
+	devfreq_class = class_create(THIS_MODULE, "devfreq");
+	if (IS_ERR(devfreq_class)) {
+		pr_err("%s: couldn't create class\n", __FILE__);
+		return PTR_ERR(devfreq_class);
+	}
+
+	devfreq_wq = create_freezable_workqueue("devfreq_wq");
+	if (!devfreq_wq) {
+		class_destroy(devfreq_class);
+		pr_err("%s: couldn't create workqueue\n", __FILE__);
+		return -ENOMEM;
+	}
+	devfreq_class->dev_groups = devfreq_groups;
+
+	return 0;
+}
+subsys_initcall(devfreq_init);
+
+/*
+ * The following are helper functions for devfreq user device drivers with
+ * OPP framework.
+ */
+
+/**
+ * devfreq_recommended_opp() - Helper function to get proper OPP for the
+ *			     freq value given to target callback.
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @freq:	The frequency given to target function
+ * @flags:	Flags handed from devfreq framework.
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+					   unsigned long *freq,
+					   u32 flags)
+{
+	struct dev_pm_opp *opp;
+
+	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
+		/* The freq is an upper bound. opp should be lower */
+		opp = dev_pm_opp_find_freq_floor(dev, freq);
+
+		/* If not available, use the closest opp */
+		if (opp == ERR_PTR(-ERANGE))
+			opp = dev_pm_opp_find_freq_ceil(dev, freq);
+	} else {
+		/* The freq is an lower bound. opp should be higher */
+		opp = dev_pm_opp_find_freq_ceil(dev, freq);
+
+		/* If not available, use the closest opp */
+		if (opp == ERR_PTR(-ERANGE))
+			opp = dev_pm_opp_find_freq_floor(dev, freq);
+	}
+
+	return opp;
+}
+EXPORT_SYMBOL(devfreq_recommended_opp);
+
+/**
+ * devfreq_register_opp_notifier() - Helper function to get devfreq notified
+ *				   for any changes in the OPP availability
+ *				   changes
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ */
+int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
+{
+	return dev_pm_opp_register_notifier(dev, &devfreq->nb);
+}
+EXPORT_SYMBOL(devfreq_register_opp_notifier);
+
+/**
+ * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
+ *				     notified for any changes in the OPP
+ *				     availability changes anymore.
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ *
+ * At exit() callback of devfreq_dev_profile, this must be included if
+ * devfreq_recommended_opp is used.
+ */
+int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
+{
+	return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
+}
+EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
+
+static void devm_devfreq_opp_release(struct device *dev, void *res)
+{
+	devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
+}
+
+/**
+ * devm_ devfreq_register_opp_notifier()
+ *		- Resource-managed devfreq_register_opp_notifier()
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ */
+int devm_devfreq_register_opp_notifier(struct device *dev,
+				       struct devfreq *devfreq)
+{
+	struct devfreq **ptr;
+	int ret;
+
+	ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	ret = devfreq_register_opp_notifier(dev, devfreq);
+	if (ret) {
+		devres_free(ptr);
+		return ret;
+	}
+
+	*ptr = devfreq;
+	devres_add(dev, ptr);
+
+	return 0;
+}
+EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
+
+/**
+ * devm_devfreq_unregister_opp_notifier()
+ *		- Resource-managed devfreq_unregister_opp_notifier()
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ */
+void devm_devfreq_unregister_opp_notifier(struct device *dev,
+					 struct devfreq *devfreq)
+{
+	WARN_ON(devres_release(dev, devm_devfreq_opp_release,
+			       devm_devfreq_dev_match, devfreq));
+}
+EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
+
+/**
+ * devfreq_register_notifier() - Register a driver with devfreq
+ * @devfreq:	The devfreq object.
+ * @nb:		The notifier block to register.
+ * @list:	DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devfreq_register_notifier(struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+	int ret = 0;
+
+	if (!devfreq)
+		return -EINVAL;
+
+	switch (list) {
+	case DEVFREQ_TRANSITION_NOTIFIER:
+		ret = srcu_notifier_chain_register(
+				&devfreq->transition_notifier_list, nb);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(devfreq_register_notifier);
+
+/*
+ * devfreq_unregister_notifier() - Unregister a driver with devfreq
+ * @devfreq:	The devfreq object.
+ * @nb:		The notifier block to be unregistered.
+ * @list:	DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devfreq_unregister_notifier(struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+	int ret = 0;
+
+	if (!devfreq)
+		return -EINVAL;
+
+	switch (list) {
+	case DEVFREQ_TRANSITION_NOTIFIER:
+		ret = srcu_notifier_chain_unregister(
+				&devfreq->transition_notifier_list, nb);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(devfreq_unregister_notifier);
+
+struct devfreq_notifier_devres {
+	struct devfreq *devfreq;
+	struct notifier_block *nb;
+	unsigned int list;
+};
+
+static void devm_devfreq_notifier_release(struct device *dev, void *res)
+{
+	struct devfreq_notifier_devres *this = res;
+
+	devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
+}
+
+/**
+ * devm_devfreq_register_notifier()
+	- Resource-managed devfreq_register_notifier()
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ * @nb:		The notifier block to be unregistered.
+ * @list:	DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devm_devfreq_register_notifier(struct device *dev,
+				struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+	struct devfreq_notifier_devres *ptr;
+	int ret;
+
+	ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
+				GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	ret = devfreq_register_notifier(devfreq, nb, list);
+	if (ret) {
+		devres_free(ptr);
+		return ret;
+	}
+
+	ptr->devfreq = devfreq;
+	ptr->nb = nb;
+	ptr->list = list;
+	devres_add(dev, ptr);
+
+	return 0;
+}
+EXPORT_SYMBOL(devm_devfreq_register_notifier);
+
+/**
+ * devm_devfreq_unregister_notifier()
+	- Resource-managed devfreq_unregister_notifier()
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ * @nb:		The notifier block to be unregistered.
+ * @list:	DEVFREQ_TRANSITION_NOTIFIER.
+ */
+void devm_devfreq_unregister_notifier(struct device *dev,
+				struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+	WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
+			       devm_devfreq_dev_match, devfreq));
+}
+EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/event/Kconfig b/src/kernel/linux/v4.14/drivers/devfreq/event/Kconfig
new file mode 100644
index 0000000..8851bc4
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/event/Kconfig
@@ -0,0 +1,41 @@
+menuconfig PM_DEVFREQ_EVENT
+	bool "DEVFREQ-Event device Support"
+	help
+	  The devfreq-event device provide the raw data and events which
+	  indicate the current state of devfreq-event device. The provided
+	  data from devfreq-event device is used to monitor the state of
+	  device and determine the suitable size of resource to reduce the
+	  wasted resource.
+
+	  The devfreq-event device can support the various type of events
+	  (e.g., raw data, utilization, latency, bandwidth). The events
+	  may be used by devfreq governor and other subsystem.
+
+if PM_DEVFREQ_EVENT
+
+config DEVFREQ_EVENT_EXYNOS_NOCP
+	tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
+	depends on ARCH_EXYNOS || COMPILE_TEST
+	select PM_OPP
+	select REGMAP_MMIO
+	help
+	  This add the devfreq-event driver for Exynos SoC. It provides NoC
+	  (Network on Chip) Probe counters to measure the bandwidth of AXI bus.
+
+config DEVFREQ_EVENT_EXYNOS_PPMU
+	tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
+	depends on ARCH_EXYNOS || COMPILE_TEST
+	select PM_OPP
+	help
+	  This add the devfreq-event driver for Exynos SoC. It provides PPMU
+	  (Platform Performance Monitoring Unit) counters to estimate the
+	  utilization of each module.
+
+config DEVFREQ_EVENT_ROCKCHIP_DFI
+	tristate "ROCKCHIP DFI DEVFREQ event Driver"
+	depends on ARCH_ROCKCHIP || COMPILE_TEST
+	help
+	  This add the devfreq-event driver for Rockchip SoC. It provides DFI
+	  (DDR Monitor Module) driver to count ddr load.
+
+endif # PM_DEVFREQ_EVENT
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/event/Makefile b/src/kernel/linux/v4.14/drivers/devfreq/event/Makefile
new file mode 100644
index 0000000..dda7090
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/event/Makefile
@@ -0,0 +1,5 @@
+# Exynos DEVFREQ Event Drivers
+
+obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_NOCP) += exynos-nocp.o
+obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_PPMU) += exynos-ppmu.o
+obj-$(CONFIG_DEVFREQ_EVENT_ROCKCHIP_DFI) += rockchip-dfi.o
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/event/exynos-nocp.c b/src/kernel/linux/v4.14/drivers/devfreq/event/exynos-nocp.c
new file mode 100644
index 0000000..f6e7956
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/event/exynos-nocp.c
@@ -0,0 +1,303 @@
+/*
+ * exynos-nocp.c - EXYNOS NoC (Network On Chip) Probe support
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/devfreq-event.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "exynos-nocp.h"
+
+struct exynos_nocp {
+	struct devfreq_event_dev *edev;
+	struct devfreq_event_desc desc;
+
+	struct device *dev;
+
+	struct regmap *regmap;
+	struct clk *clk;
+};
+
+/*
+ * The devfreq-event ops structure for nocp probe.
+ */
+static int exynos_nocp_set_event(struct devfreq_event_dev *edev)
+{
+	struct exynos_nocp *nocp = devfreq_event_get_drvdata(edev);
+	int ret;
+
+	/* Disable NoC probe */
+	ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+				NOCP_MAIN_CTL_STATEN_MASK, 0);
+	if (ret < 0) {
+		dev_err(nocp->dev, "failed to disable the NoC probe device\n");
+		return ret;
+	}
+
+	/* Set a statistics dump period to 0 */
+	ret = regmap_write(nocp->regmap, NOCP_STAT_PERIOD, 0x0);
+	if (ret < 0)
+		goto out;
+
+	/* Set the IntEvent fields of *_SRC */
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_0_SRC,
+				NOCP_CNT_SRC_INTEVENT_MASK,
+				NOCP_CNT_SRC_INTEVENT_BYTE_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_1_SRC,
+				NOCP_CNT_SRC_INTEVENT_MASK,
+				NOCP_CNT_SRC_INTEVENT_CHAIN_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_2_SRC,
+				NOCP_CNT_SRC_INTEVENT_MASK,
+				NOCP_CNT_SRC_INTEVENT_CYCLE_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_3_SRC,
+				NOCP_CNT_SRC_INTEVENT_MASK,
+				NOCP_CNT_SRC_INTEVENT_CHAIN_MASK);
+	if (ret < 0)
+		goto out;
+
+
+	/* Set an alarm with a max/min value of 0 to generate StatALARM */
+	ret = regmap_write(nocp->regmap, NOCP_STAT_ALARM_MIN, 0x0);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_write(nocp->regmap, NOCP_STAT_ALARM_MAX, 0x0);
+	if (ret < 0)
+		goto out;
+
+	/* Set AlarmMode */
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_0_ALARM_MODE,
+				NOCP_CNT_ALARM_MODE_MASK,
+				NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_1_ALARM_MODE,
+				NOCP_CNT_ALARM_MODE_MASK,
+				NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_2_ALARM_MODE,
+				NOCP_CNT_ALARM_MODE_MASK,
+				NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_3_ALARM_MODE,
+				NOCP_CNT_ALARM_MODE_MASK,
+				NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+	if (ret < 0)
+		goto out;
+
+	/* Enable the measurements by setting AlarmEn and StatEn */
+	ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+			NOCP_MAIN_CTL_STATEN_MASK | NOCP_MAIN_CTL_ALARMEN_MASK,
+			NOCP_MAIN_CTL_STATEN_MASK | NOCP_MAIN_CTL_ALARMEN_MASK);
+	if (ret < 0)
+		goto out;
+
+	/* Set GlobalEN */
+	ret = regmap_update_bits(nocp->regmap, NOCP_CFG_CTL,
+				NOCP_CFG_CTL_GLOBALEN_MASK,
+				NOCP_CFG_CTL_GLOBALEN_MASK);
+	if (ret < 0)
+		goto out;
+
+	/* Enable NoC probe */
+	ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+				NOCP_MAIN_CTL_STATEN_MASK,
+				NOCP_MAIN_CTL_STATEN_MASK);
+	if (ret < 0)
+		goto out;
+
+	return 0;
+
+out:
+	/* Reset NoC probe */
+	if (regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+				NOCP_MAIN_CTL_STATEN_MASK, 0)) {
+		dev_err(nocp->dev, "Failed to reset NoC probe device\n");
+	}
+
+	return ret;
+}
+
+static int exynos_nocp_get_event(struct devfreq_event_dev *edev,
+				struct devfreq_event_data *edata)
+{
+	struct exynos_nocp *nocp = devfreq_event_get_drvdata(edev);
+	unsigned int counter[4];
+	int ret;
+
+	/* Read cycle count */
+	ret = regmap_read(nocp->regmap, NOCP_COUNTERS_0_VAL, &counter[0]);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_read(nocp->regmap, NOCP_COUNTERS_1_VAL, &counter[1]);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_read(nocp->regmap, NOCP_COUNTERS_2_VAL, &counter[2]);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_read(nocp->regmap, NOCP_COUNTERS_3_VAL, &counter[3]);
+	if (ret < 0)
+		goto out;
+
+	edata->load_count = ((counter[1] << 16) | counter[0]);
+	edata->total_count = ((counter[3] << 16) | counter[2]);
+
+	dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
+					edata->load_count, edata->total_count);
+
+	return 0;
+
+out:
+	dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n");
+
+	return ret;
+}
+
+static const struct devfreq_event_ops exynos_nocp_ops = {
+	.set_event = exynos_nocp_set_event,
+	.get_event = exynos_nocp_get_event,
+};
+
+static const struct of_device_id exynos_nocp_id_match[] = {
+	{ .compatible = "samsung,exynos5420-nocp", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, exynos_nocp_id_match);
+
+static struct regmap_config exynos_nocp_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_stride = 4,
+	.max_register = NOCP_COUNTERS_3_VAL,
+};
+
+static int exynos_nocp_parse_dt(struct platform_device *pdev,
+				struct exynos_nocp *nocp)
+{
+	struct device *dev = nocp->dev;
+	struct device_node *np = dev->of_node;
+	struct resource *res;
+	void __iomem *base;
+
+	if (!np) {
+		dev_err(dev, "failed to find devicetree node\n");
+		return -EINVAL;
+	}
+
+	nocp->clk = devm_clk_get(dev, "nocp");
+	if (IS_ERR(nocp->clk))
+		nocp->clk = NULL;
+
+	/* Maps the memory mapped IO to control nocp register */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	exynos_nocp_regmap_config.max_register = resource_size(res) - 4;
+
+	nocp->regmap = devm_regmap_init_mmio(dev, base,
+					&exynos_nocp_regmap_config);
+	if (IS_ERR(nocp->regmap)) {
+		dev_err(dev, "failed to initialize regmap\n");
+		return PTR_ERR(nocp->regmap);
+	}
+
+	return 0;
+}
+
+static int exynos_nocp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct exynos_nocp *nocp;
+	int ret;
+
+	nocp = devm_kzalloc(&pdev->dev, sizeof(*nocp), GFP_KERNEL);
+	if (!nocp)
+		return -ENOMEM;
+
+	nocp->dev = &pdev->dev;
+
+	/* Parse dt data to get resource */
+	ret = exynos_nocp_parse_dt(pdev, nocp);
+	if (ret < 0) {
+		dev_err(&pdev->dev,
+			"failed to parse devicetree for resource\n");
+		return ret;
+	}
+
+	/* Add devfreq-event device to measure the bandwidth of NoC */
+	nocp->desc.ops = &exynos_nocp_ops;
+	nocp->desc.driver_data = nocp;
+	nocp->desc.name = np->full_name;
+	nocp->edev = devm_devfreq_event_add_edev(&pdev->dev, &nocp->desc);
+	if (IS_ERR(nocp->edev)) {
+		dev_err(&pdev->dev,
+			"failed to add devfreq-event device\n");
+		return PTR_ERR(nocp->edev);
+	}
+	platform_set_drvdata(pdev, nocp);
+
+	ret = clk_prepare_enable(nocp->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
+		return ret;
+	}
+
+	pr_info("exynos-nocp: new NoC Probe device registered: %s\n",
+			dev_name(dev));
+
+	return 0;
+}
+
+static int exynos_nocp_remove(struct platform_device *pdev)
+{
+	struct exynos_nocp *nocp = platform_get_drvdata(pdev);
+
+	clk_disable_unprepare(nocp->clk);
+
+	return 0;
+}
+
+static struct platform_driver exynos_nocp_driver = {
+	.probe	= exynos_nocp_probe,
+	.remove	= exynos_nocp_remove,
+	.driver = {
+		.name	= "exynos-nocp",
+		.of_match_table = exynos_nocp_id_match,
+	},
+};
+module_platform_driver(exynos_nocp_driver);
+
+MODULE_DESCRIPTION("Exynos NoC (Network on Chip) Probe driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/event/exynos-nocp.h b/src/kernel/linux/v4.14/drivers/devfreq/event/exynos-nocp.h
new file mode 100644
index 0000000..28564db
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/event/exynos-nocp.h
@@ -0,0 +1,78 @@
+/*
+ * exynos-nocp.h - EXYNOS NoC (Network on Chip) Probe header file
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_NOCP_H__
+#define __EXYNOS_NOCP_H__
+
+enum nocp_reg {
+	NOCP_ID_REVISION_ID		= 0x04,
+	NOCP_MAIN_CTL			= 0x08,
+	NOCP_CFG_CTL			= 0x0C,
+
+	NOCP_STAT_PERIOD		= 0x24,
+	NOCP_STAT_GO			= 0x28,
+	NOCP_STAT_ALARM_MIN		= 0x2C,
+	NOCP_STAT_ALARM_MAX		= 0x30,
+	NOCP_STAT_ALARM_STATUS		= 0x34,
+	NOCP_STAT_ALARM_CLR		= 0x38,
+
+	NOCP_COUNTERS_0_SRC		= 0x138,
+	NOCP_COUNTERS_0_ALARM_MODE	= 0x13C,
+	NOCP_COUNTERS_0_VAL		= 0x140,
+
+	NOCP_COUNTERS_1_SRC		= 0x14C,
+	NOCP_COUNTERS_1_ALARM_MODE	= 0x150,
+	NOCP_COUNTERS_1_VAL		= 0x154,
+
+	NOCP_COUNTERS_2_SRC		= 0x160,
+	NOCP_COUNTERS_2_ALARM_MODE	= 0x164,
+	NOCP_COUNTERS_2_VAL		= 0x168,
+
+	NOCP_COUNTERS_3_SRC		= 0x174,
+	NOCP_COUNTERS_3_ALARM_MODE	= 0x178,
+	NOCP_COUNTERS_3_VAL		= 0x17C,
+};
+
+/* NOCP_MAIN_CTL register */
+#define NOCP_MAIN_CTL_ERREN_MASK		BIT(0)
+#define NOCP_MAIN_CTL_TRACEEN_MASK		BIT(1)
+#define NOCP_MAIN_CTL_PAYLOADEN_MASK		BIT(2)
+#define NOCP_MAIN_CTL_STATEN_MASK		BIT(3)
+#define NOCP_MAIN_CTL_ALARMEN_MASK		BIT(4)
+#define NOCP_MAIN_CTL_STATCONDDUMP_MASK	BIT(5)
+#define NOCP_MAIN_CTL_INTRUSIVEMODE_MASK	BIT(6)
+
+/* NOCP_CFG_CTL register */
+#define NOCP_CFG_CTL_GLOBALEN_MASK		BIT(0)
+#define NOCP_CFG_CTL_ACTIVE_MASK		BIT(1)
+
+/* NOCP_COUNTERS_x_SRC register */
+#define NOCP_CNT_SRC_INTEVENT_SHIFT		0
+#define NOCP_CNT_SRC_INTEVENT_MASK		(0x1F << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_OFF_MASK		(0x0 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_CYCLE_MASK	(0x1 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_IDLE_MASK		(0x2 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_XFER_MASK		(0x3 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_BUSY_MASK		(0x4 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_WAIT_MASK		(0x5 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_PKT_MASK		(0x6 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_BYTE_MASK		(0x8 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_CHAIN_MASK	(0x10 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+
+/* NOCP_COUNTERS_x_ALARM_MODE register */
+#define NOCP_CNT_ALARM_MODE_SHIFT		0
+#define NOCP_CNT_ALARM_MODE_MASK		(0x3 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_OFF_MASK		(0x0 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MIN_MASK		(0x1 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MAX_MASK		(0x2 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MIN_MAX_MASK	(0x3 << NOCP_CNT_ALARM_MODE_SHIFT)
+
+#endif /* __EXYNOS_NOCP_H__ */
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/event/exynos-ppmu.c b/src/kernel/linux/v4.14/drivers/devfreq/event/exynos-ppmu.c
new file mode 100644
index 0000000..d96e3dc
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/event/exynos-ppmu.c
@@ -0,0 +1,681 @@
+/*
+ * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
+ *
+ * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/suspend.h>
+#include <linux/devfreq-event.h>
+
+#include "exynos-ppmu.h"
+
+struct exynos_ppmu_data {
+	struct clk *clk;
+};
+
+struct exynos_ppmu {
+	struct devfreq_event_dev **edev;
+	struct devfreq_event_desc *desc;
+	unsigned int num_events;
+
+	struct device *dev;
+	struct regmap *regmap;
+
+	struct exynos_ppmu_data ppmu;
+};
+
+#define PPMU_EVENT(name)			\
+	{ "ppmu-event0-"#name, PPMU_PMNCNT0 },	\
+	{ "ppmu-event1-"#name, PPMU_PMNCNT1 },	\
+	{ "ppmu-event2-"#name, PPMU_PMNCNT2 },	\
+	{ "ppmu-event3-"#name, PPMU_PMNCNT3 }
+
+static struct __exynos_ppmu_events {
+	char *name;
+	int id;
+} ppmu_events[] = {
+	/* For Exynos3250, Exynos4 and Exynos5260 */
+	PPMU_EVENT(g3d),
+	PPMU_EVENT(fsys),
+
+	/* For Exynos4 SoCs and Exynos3250 */
+	PPMU_EVENT(dmc0),
+	PPMU_EVENT(dmc1),
+	PPMU_EVENT(cpu),
+	PPMU_EVENT(rightbus),
+	PPMU_EVENT(leftbus),
+	PPMU_EVENT(lcd0),
+	PPMU_EVENT(camif),
+
+	/* Only for Exynos3250 and Exynos5260 */
+	PPMU_EVENT(mfc),
+
+	/* Only for Exynos4 SoCs */
+	PPMU_EVENT(mfc-left),
+	PPMU_EVENT(mfc-right),
+
+	/* Only for Exynos5260 SoCs */
+	PPMU_EVENT(drex0-s0),
+	PPMU_EVENT(drex0-s1),
+	PPMU_EVENT(drex1-s0),
+	PPMU_EVENT(drex1-s1),
+	PPMU_EVENT(eagle),
+	PPMU_EVENT(kfc),
+	PPMU_EVENT(isp),
+	PPMU_EVENT(fimc),
+	PPMU_EVENT(gscl),
+	PPMU_EVENT(mscl),
+	PPMU_EVENT(fimd0x),
+	PPMU_EVENT(fimd1x),
+
+	/* Only for Exynos5433 SoCs */
+	PPMU_EVENT(d0-cpu),
+	PPMU_EVENT(d0-general),
+	PPMU_EVENT(d0-rt),
+	PPMU_EVENT(d1-cpu),
+	PPMU_EVENT(d1-general),
+	PPMU_EVENT(d1-rt),
+};
+
+static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
+		if (!strcmp(edev->desc->name, ppmu_events[i].name))
+			return ppmu_events[i].id;
+
+	return -EINVAL;
+}
+
+/*
+ * The devfreq-event ops structure for PPMU v1.1
+ */
+static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	int ret;
+	u32 pmnc;
+
+	/* Disable all counters */
+	ret = regmap_write(info->regmap, PPMU_CNTENC,
+				PPMU_CCNT_MASK |
+				PPMU_PMCNT0_MASK |
+				PPMU_PMCNT1_MASK |
+				PPMU_PMCNT2_MASK |
+				PPMU_PMCNT3_MASK);
+	if (ret < 0)
+		return ret;
+
+	/* Disable PPMU */
+	ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
+	if (ret < 0)
+		return ret;
+
+	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+	ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	int id = exynos_ppmu_find_ppmu_id(edev);
+	int ret;
+	u32 pmnc, cntens;
+
+	if (id < 0)
+		return id;
+
+	/* Enable specific counter */
+	ret = regmap_read(info->regmap, PPMU_CNTENS, &cntens);
+	if (ret < 0)
+		return ret;
+
+	cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+	ret = regmap_write(info->regmap, PPMU_CNTENS, cntens);
+	if (ret < 0)
+		return ret;
+
+	/* Set the event of Read/Write data count  */
+	ret = regmap_write(info->regmap, PPMU_BEVTxSEL(id),
+				PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT);
+	if (ret < 0)
+		return ret;
+
+	/* Reset cycle counter/performance counter and enable PPMU */
+	ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
+	if (ret < 0)
+		return ret;
+
+	pmnc &= ~(PPMU_PMNC_ENABLE_MASK
+			| PPMU_PMNC_COUNTER_RESET_MASK
+			| PPMU_PMNC_CC_RESET_MASK);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
+	ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
+				struct devfreq_event_data *edata)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	int id = exynos_ppmu_find_ppmu_id(edev);
+	unsigned int total_count, load_count;
+	unsigned int pmcnt3_high, pmcnt3_low;
+	unsigned int pmnc, cntenc;
+	int ret;
+
+	if (id < 0)
+		return -EINVAL;
+
+	/* Disable PPMU */
+	ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
+	if (ret < 0)
+		return ret;
+
+	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+	ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
+	if (ret < 0)
+		return ret;
+
+	/* Read cycle count */
+	ret = regmap_read(info->regmap, PPMU_CCNT, &total_count);
+	if (ret < 0)
+		return ret;
+	edata->total_count = total_count;
+
+	/* Read performance count */
+	switch (id) {
+	case PPMU_PMNCNT0:
+	case PPMU_PMNCNT1:
+	case PPMU_PMNCNT2:
+		ret = regmap_read(info->regmap, PPMU_PMNCT(id), &load_count);
+		if (ret < 0)
+			return ret;
+		edata->load_count = load_count;
+		break;
+	case PPMU_PMNCNT3:
+		ret = regmap_read(info->regmap, PPMU_PMCNT3_HIGH, &pmcnt3_high);
+		if (ret < 0)
+			return ret;
+
+		ret = regmap_read(info->regmap, PPMU_PMCNT3_LOW, &pmcnt3_low);
+		if (ret < 0)
+			return ret;
+
+		edata->load_count = ((pmcnt3_high << 8) | pmcnt3_low);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Disable specific counter */
+	ret = regmap_read(info->regmap, PPMU_CNTENC, &cntenc);
+	if (ret < 0)
+		return ret;
+
+	cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+	ret = regmap_write(info->regmap, PPMU_CNTENC, cntenc);
+	if (ret < 0)
+		return ret;
+
+	dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
+					edata->load_count, edata->total_count);
+
+	return 0;
+}
+
+static const struct devfreq_event_ops exynos_ppmu_ops = {
+	.disable = exynos_ppmu_disable,
+	.set_event = exynos_ppmu_set_event,
+	.get_event = exynos_ppmu_get_event,
+};
+
+/*
+ * The devfreq-event ops structure for PPMU v2.0
+ */
+static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	int ret;
+	u32 pmnc, clear;
+
+	/* Disable all counters */
+	clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
+		| PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
+	ret = regmap_write(info->regmap, PPMU_V2_FLAG, clear);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_INTENC, clear);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CNTENC, clear);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CNT_RESET, clear);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG0, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG1, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG2, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CIG_RESULT, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CNT_AUTO, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CH_EV0_TYPE, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CH_EV1_TYPE, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CH_EV2_TYPE, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CH_EV3_TYPE, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_SM_ID_V, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_SM_ID_A, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_V, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_A, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_INTERRUPT_RESET, 0x0);
+	if (ret < 0)
+		return ret;
+
+	/* Disable PPMU */
+	ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
+	if (ret < 0)
+		return ret;
+
+	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+	ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	unsigned int pmnc, cntens;
+	int id = exynos_ppmu_find_ppmu_id(edev);
+	int ret;
+
+	/* Enable all counters */
+	ret = regmap_read(info->regmap, PPMU_V2_CNTENS, &cntens);
+	if (ret < 0)
+		return ret;
+
+	cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+	ret = regmap_write(info->regmap, PPMU_V2_CNTENS, cntens);
+	if (ret < 0)
+		return ret;
+
+	/* Set the event of Read/Write data count  */
+	switch (id) {
+	case PPMU_PMNCNT0:
+	case PPMU_PMNCNT1:
+	case PPMU_PMNCNT2:
+		ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id),
+				PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT);
+		if (ret < 0)
+			return ret;
+		break;
+	case PPMU_PMNCNT3:
+		ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id),
+				PPMU_V2_EVT3_RW_DATA_CNT);
+		if (ret < 0)
+			return ret;
+		break;
+	}
+
+	/* Reset cycle counter/performance counter and enable PPMU */
+	ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
+	if (ret < 0)
+		return ret;
+
+	pmnc &= ~(PPMU_PMNC_ENABLE_MASK
+			| PPMU_PMNC_COUNTER_RESET_MASK
+			| PPMU_PMNC_CC_RESET_MASK
+			| PPMU_PMNC_CC_DIVIDER_MASK
+			| PPMU_V2_PMNC_START_MODE_MASK);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
+	pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
+
+	ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
+				    struct devfreq_event_data *edata)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	int id = exynos_ppmu_find_ppmu_id(edev);
+	int ret;
+	unsigned int pmnc, cntenc;
+	unsigned int pmcnt_high, pmcnt_low;
+	unsigned int total_count, count;
+	unsigned long load_count = 0;
+
+	/* Disable PPMU */
+	ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
+	if (ret < 0)
+		return ret;
+
+	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+	ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
+	if (ret < 0)
+		return ret;
+
+	/* Read cycle count and performance count */
+	ret = regmap_read(info->regmap, PPMU_V2_CCNT, &total_count);
+	if (ret < 0)
+		return ret;
+	edata->total_count = total_count;
+
+	switch (id) {
+	case PPMU_PMNCNT0:
+	case PPMU_PMNCNT1:
+	case PPMU_PMNCNT2:
+		ret = regmap_read(info->regmap, PPMU_V2_PMNCT(id), &count);
+		if (ret < 0)
+			return ret;
+		load_count = count;
+		break;
+	case PPMU_PMNCNT3:
+		ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_HIGH,
+						&pmcnt_high);
+		if (ret < 0)
+			return ret;
+
+		ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_LOW, &pmcnt_low);
+		if (ret < 0)
+			return ret;
+
+		load_count = ((u64)((pmcnt_high & 0xff)) << 32)+ (u64)pmcnt_low;
+		break;
+	}
+	edata->load_count = load_count;
+
+	/* Disable all counters */
+	ret = regmap_read(info->regmap, PPMU_V2_CNTENC, &cntenc);
+	if (ret < 0)
+		return 0;
+
+	cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+	ret = regmap_write(info->regmap, PPMU_V2_CNTENC, cntenc);
+	if (ret < 0)
+		return ret;
+
+	dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
+					edata->load_count, edata->total_count);
+	return 0;
+}
+
+static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
+	.disable = exynos_ppmu_v2_disable,
+	.set_event = exynos_ppmu_v2_set_event,
+	.get_event = exynos_ppmu_v2_get_event,
+};
+
+static const struct of_device_id exynos_ppmu_id_match[] = {
+	{
+		.compatible = "samsung,exynos-ppmu",
+		.data = (void *)&exynos_ppmu_ops,
+	}, {
+		.compatible = "samsung,exynos-ppmu-v2",
+		.data = (void *)&exynos_ppmu_v2_ops,
+	},
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, exynos_ppmu_id_match);
+
+static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
+{
+	const struct of_device_id *match;
+
+	match = of_match_node(exynos_ppmu_id_match, np);
+	return (struct devfreq_event_ops *)match->data;
+}
+
+static int of_get_devfreq_events(struct device_node *np,
+				 struct exynos_ppmu *info)
+{
+	struct devfreq_event_desc *desc;
+	struct devfreq_event_ops *event_ops;
+	struct device *dev = info->dev;
+	struct device_node *events_np, *node;
+	int i, j, count;
+
+	events_np = of_get_child_by_name(np, "events");
+	if (!events_np) {
+		dev_err(dev,
+			"failed to get child node of devfreq-event devices\n");
+		return -EINVAL;
+	}
+	event_ops = exynos_bus_get_ops(np);
+
+	count = of_get_child_count(events_np);
+	desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+	info->num_events = count;
+
+	j = 0;
+	for_each_child_of_node(events_np, node) {
+		for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
+			if (!ppmu_events[i].name)
+				continue;
+
+			if (!of_node_cmp(node->name, ppmu_events[i].name))
+				break;
+		}
+
+		if (i == ARRAY_SIZE(ppmu_events)) {
+			dev_warn(dev,
+				"don't know how to configure events : %s\n",
+				node->name);
+			continue;
+		}
+
+		desc[j].ops = event_ops;
+		desc[j].driver_data = info;
+
+		of_property_read_string(node, "event-name", &desc[j].name);
+
+		j++;
+	}
+	info->desc = desc;
+
+	of_node_put(events_np);
+
+	return 0;
+}
+
+static struct regmap_config exynos_ppmu_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_stride = 4,
+};
+
+static int exynos_ppmu_parse_dt(struct platform_device *pdev,
+				struct exynos_ppmu *info)
+{
+	struct device *dev = info->dev;
+	struct device_node *np = dev->of_node;
+	struct resource *res;
+	void __iomem *base;
+	int ret = 0;
+
+	if (!np) {
+		dev_err(dev, "failed to find devicetree node\n");
+		return -EINVAL;
+	}
+
+	/* Maps the memory mapped IO to control PPMU register */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	exynos_ppmu_regmap_config.max_register = resource_size(res) - 4;
+	info->regmap = devm_regmap_init_mmio(dev, base,
+					&exynos_ppmu_regmap_config);
+	if (IS_ERR(info->regmap)) {
+		dev_err(dev, "failed to initialize regmap\n");
+		return PTR_ERR(info->regmap);
+	}
+
+	info->ppmu.clk = devm_clk_get(dev, "ppmu");
+	if (IS_ERR(info->ppmu.clk)) {
+		info->ppmu.clk = NULL;
+		dev_warn(dev, "cannot get PPMU clock\n");
+	}
+
+	ret = of_get_devfreq_events(np, info);
+	if (ret < 0) {
+		dev_err(dev, "failed to parse exynos ppmu dt node\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int exynos_ppmu_probe(struct platform_device *pdev)
+{
+	struct exynos_ppmu *info;
+	struct devfreq_event_dev **edev;
+	struct devfreq_event_desc *desc;
+	int i, ret = 0, size;
+
+	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->dev = &pdev->dev;
+
+	/* Parse dt data to get resource */
+	ret = exynos_ppmu_parse_dt(pdev, info);
+	if (ret < 0) {
+		dev_err(&pdev->dev,
+			"failed to parse devicetree for resource\n");
+		return ret;
+	}
+	desc = info->desc;
+
+	size = sizeof(struct devfreq_event_dev *) * info->num_events;
+	info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!info->edev) {
+		dev_err(&pdev->dev,
+			"failed to allocate memory devfreq-event devices\n");
+		return -ENOMEM;
+	}
+	edev = info->edev;
+	platform_set_drvdata(pdev, info);
+
+	for (i = 0; i < info->num_events; i++) {
+		edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
+		if (IS_ERR(edev[i])) {
+			ret = PTR_ERR(edev[i]);
+			dev_err(&pdev->dev,
+				"failed to add devfreq-event device\n");
+			return PTR_ERR(edev[i]);
+		}
+
+		pr_info("exynos-ppmu: new PPMU device registered %s (%s)\n",
+			dev_name(&pdev->dev), desc[i].name);
+	}
+
+	ret = clk_prepare_enable(info->ppmu.clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int exynos_ppmu_remove(struct platform_device *pdev)
+{
+	struct exynos_ppmu *info = platform_get_drvdata(pdev);
+
+	clk_disable_unprepare(info->ppmu.clk);
+
+	return 0;
+}
+
+static struct platform_driver exynos_ppmu_driver = {
+	.probe	= exynos_ppmu_probe,
+	.remove	= exynos_ppmu_remove,
+	.driver = {
+		.name	= "exynos-ppmu",
+		.of_match_table = exynos_ppmu_id_match,
+	},
+};
+module_platform_driver(exynos_ppmu_driver);
+
+MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/event/exynos-ppmu.h b/src/kernel/linux/v4.14/drivers/devfreq/event/exynos-ppmu.h
new file mode 100644
index 0000000..05774c4
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/event/exynos-ppmu.h
@@ -0,0 +1,163 @@
+/*
+ * exynos_ppmu.h - EXYNOS PPMU header file
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_PPMU_H__
+#define __EXYNOS_PPMU_H__
+
+enum ppmu_state {
+	PPMU_DISABLE = 0,
+	PPMU_ENABLE,
+};
+
+enum ppmu_counter {
+	PPMU_PMNCNT0 = 0,
+	PPMU_PMNCNT1,
+	PPMU_PMNCNT2,
+	PPMU_PMNCNT3,
+
+	PPMU_PMNCNT_MAX,
+};
+
+/***
+ * PPMUv1.1 Definitions
+ */
+enum ppmu_event_type {
+	PPMU_RO_BUSY_CYCLE_CNT	= 0x0,
+	PPMU_WO_BUSY_CYCLE_CNT	= 0x1,
+	PPMU_RW_BUSY_CYCLE_CNT	= 0x2,
+	PPMU_RO_REQUEST_CNT	= 0x3,
+	PPMU_WO_REQUEST_CNT	= 0x4,
+	PPMU_RO_DATA_CNT	= 0x5,
+	PPMU_WO_DATA_CNT	= 0x6,
+	PPMU_RO_LATENCY		= 0x12,
+	PPMU_WO_LATENCY		= 0x16,
+};
+
+enum ppmu_reg {
+	/* PPC control register */
+	PPMU_PMNC		= 0x00,
+	PPMU_CNTENS		= 0x10,
+	PPMU_CNTENC		= 0x20,
+	PPMU_INTENS		= 0x30,
+	PPMU_INTENC		= 0x40,
+	PPMU_FLAG		= 0x50,
+
+	/* Cycle Counter and Performance Event Counter Register */
+	PPMU_CCNT		= 0x100,
+	PPMU_PMCNT0		= 0x110,
+	PPMU_PMCNT1		= 0x120,
+	PPMU_PMCNT2		= 0x130,
+	PPMU_PMCNT3_HIGH	= 0x140,
+	PPMU_PMCNT3_LOW		= 0x150,
+
+	/* Bus Event Generator */
+	PPMU_BEVT0SEL		= 0x1000,
+	PPMU_BEVT1SEL		= 0x1100,
+	PPMU_BEVT2SEL		= 0x1200,
+	PPMU_BEVT3SEL		= 0x1300,
+	PPMU_COUNTER_RESET	= 0x1810,
+	PPMU_READ_OVERFLOW_CNT	= 0x1810,
+	PPMU_READ_UNDERFLOW_CNT	= 0x1814,
+	PPMU_WRITE_OVERFLOW_CNT	= 0x1850,
+	PPMU_WRITE_UNDERFLOW_CNT = 0x1854,
+	PPMU_READ_PENDING_CNT	= 0x1880,
+	PPMU_WRITE_PENDING_CNT	= 0x1884
+};
+
+/* PMNC register */
+#define PPMU_PMNC_CC_RESET_SHIFT	2
+#define PPMU_PMNC_COUNTER_RESET_SHIFT	1
+#define PPMU_PMNC_ENABLE_SHIFT		0
+#define PPMU_PMNC_START_MODE_MASK	BIT(16)
+#define PPMU_PMNC_CC_DIVIDER_MASK	BIT(3)
+#define PPMU_PMNC_CC_RESET_MASK		BIT(2)
+#define PPMU_PMNC_COUNTER_RESET_MASK	BIT(1)
+#define PPMU_PMNC_ENABLE_MASK		BIT(0)
+
+/* CNTENS/CNTENC/INTENS/INTENC/FLAG register */
+#define PPMU_CCNT_MASK			BIT(31)
+#define PPMU_PMCNT3_MASK		BIT(3)
+#define PPMU_PMCNT2_MASK		BIT(2)
+#define PPMU_PMCNT1_MASK		BIT(1)
+#define PPMU_PMCNT0_MASK		BIT(0)
+
+/* PPMU_PMNCTx/PPMU_BETxSEL registers */
+#define PPMU_PMNCT(x)			(PPMU_PMCNT0 + (0x10 * x))
+#define PPMU_BEVTxSEL(x)		(PPMU_BEVT0SEL + (0x100 * x))
+
+/***
+ * PPMU_V2.0 definitions
+ */
+enum ppmu_v2_mode {
+	PPMU_V2_MODE_MANUAL = 0,
+	PPMU_V2_MODE_AUTO = 1,
+	PPMU_V2_MODE_CIG = 2,	/* CIG (Conditional Interrupt Generation) */
+};
+
+enum ppmu_v2_event_type {
+	PPMU_V2_RO_DATA_CNT	= 0x4,
+	PPMU_V2_WO_DATA_CNT	= 0x5,
+
+	PPMU_V2_EVT3_RW_DATA_CNT = 0x22,	/* Only for Event3 */
+};
+
+enum ppmu_V2_reg {
+	/* PPC control register */
+	PPMU_V2_PMNC		= 0x04,
+	PPMU_V2_CNTENS		= 0x08,
+	PPMU_V2_CNTENC		= 0x0c,
+	PPMU_V2_INTENS		= 0x10,
+	PPMU_V2_INTENC		= 0x14,
+	PPMU_V2_FLAG		= 0x18,
+
+	/* Cycle Counter and Performance Event Counter Register */
+	PPMU_V2_CCNT		= 0x48,
+	PPMU_V2_PMCNT0		= 0x34,
+	PPMU_V2_PMCNT1		= 0x38,
+	PPMU_V2_PMCNT2		= 0x3c,
+	PPMU_V2_PMCNT3_LOW	= 0x40,
+	PPMU_V2_PMCNT3_HIGH	= 0x44,
+
+	/* Bus Event Generator */
+	PPMU_V2_CIG_CFG0		= 0x1c,
+	PPMU_V2_CIG_CFG1		= 0x20,
+	PPMU_V2_CIG_CFG2		= 0x24,
+	PPMU_V2_CIG_RESULT	= 0x28,
+	PPMU_V2_CNT_RESET	= 0x2c,
+	PPMU_V2_CNT_AUTO		= 0x30,
+	PPMU_V2_CH_EV0_TYPE	= 0x200,
+	PPMU_V2_CH_EV1_TYPE	= 0x204,
+	PPMU_V2_CH_EV2_TYPE	= 0x208,
+	PPMU_V2_CH_EV3_TYPE	= 0x20c,
+	PPMU_V2_SM_ID_V		= 0x220,
+	PPMU_V2_SM_ID_A		= 0x224,
+	PPMU_V2_SM_OTHERS_V	= 0x228,
+	PPMU_V2_SM_OTHERS_A	= 0x22c,
+	PPMU_V2_INTERRUPT_RESET	= 0x260,
+};
+
+/* PMNC register */
+#define PPMU_V2_PMNC_START_MODE_SHIFT	20
+#define PPMU_V2_PMNC_START_MODE_MASK	(0x3 << PPMU_V2_PMNC_START_MODE_SHIFT)
+
+#define PPMU_PMNC_CC_RESET_SHIFT	2
+#define PPMU_PMNC_COUNTER_RESET_SHIFT	1
+#define PPMU_PMNC_ENABLE_SHIFT		0
+#define PPMU_PMNC_START_MODE_MASK	BIT(16)
+#define PPMU_PMNC_CC_DIVIDER_MASK	BIT(3)
+#define PPMU_PMNC_CC_RESET_MASK		BIT(2)
+#define PPMU_PMNC_COUNTER_RESET_MASK	BIT(1)
+#define PPMU_PMNC_ENABLE_MASK		BIT(0)
+
+#define PPMU_V2_PMNCT(x)		(PPMU_V2_PMCNT0 + (0x4 * x))
+#define PPMU_V2_CH_EVx_TYPE(x)		(PPMU_V2_CH_EV0_TYPE + (0x4 * x))
+
+#endif /* __EXYNOS_PPMU_H__ */
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/event/rockchip-dfi.c b/src/kernel/linux/v4.14/drivers/devfreq/event/rockchip-dfi.c
new file mode 100644
index 0000000..22b1133
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/event/rockchip-dfi.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
+ * Author: Lin Huang <hl@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq-event.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/of.h>
+
+#define RK3399_DMC_NUM_CH	2
+
+/* DDRMON_CTRL */
+#define DDRMON_CTRL	0x04
+#define CLR_DDRMON_CTRL	(0x1f0000 << 0)
+#define LPDDR4_EN	(0x10001 << 4)
+#define HARDWARE_EN	(0x10001 << 3)
+#define LPDDR3_EN	(0x10001 << 2)
+#define SOFTWARE_EN	(0x10001 << 1)
+#define SOFTWARE_DIS	(0x10000 << 1)
+#define TIME_CNT_EN	(0x10001 << 0)
+
+#define DDRMON_CH0_COUNT_NUM		0x28
+#define DDRMON_CH0_DFI_ACCESS_NUM	0x2c
+#define DDRMON_CH1_COUNT_NUM		0x3c
+#define DDRMON_CH1_DFI_ACCESS_NUM	0x40
+
+/* pmu grf */
+#define PMUGRF_OS_REG2	0x308
+#define DDRTYPE_SHIFT	13
+#define DDRTYPE_MASK	7
+
+enum {
+	DDR3 = 3,
+	LPDDR3 = 6,
+	LPDDR4 = 7,
+	UNUSED = 0xFF
+};
+
+struct dmc_usage {
+	u32 access;
+	u32 total;
+};
+
+/*
+ * The dfi controller can monitor DDR load. It has an upper and lower threshold
+ * for the operating points. Whenever the usage leaves these bounds an event is
+ * generated to indicate the DDR frequency should be changed.
+ */
+struct rockchip_dfi {
+	struct devfreq_event_dev *edev;
+	struct devfreq_event_desc *desc;
+	struct dmc_usage ch_usage[RK3399_DMC_NUM_CH];
+	struct device *dev;
+	void __iomem *regs;
+	struct regmap *regmap_pmu;
+	struct clk *clk;
+};
+
+static void rockchip_dfi_start_hardware_counter(struct devfreq_event_dev *edev)
+{
+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+	void __iomem *dfi_regs = info->regs;
+	u32 val;
+	u32 ddr_type;
+
+	/* get ddr type */
+	regmap_read(info->regmap_pmu, PMUGRF_OS_REG2, &val);
+	ddr_type = (val >> DDRTYPE_SHIFT) & DDRTYPE_MASK;
+
+	/* clear DDRMON_CTRL setting */
+	writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL);
+
+	/* set ddr type to dfi */
+	if (ddr_type == LPDDR3)
+		writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL);
+	else if (ddr_type == LPDDR4)
+		writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL);
+
+	/* enable count, use software mode */
+	writel_relaxed(SOFTWARE_EN, dfi_regs + DDRMON_CTRL);
+}
+
+static void rockchip_dfi_stop_hardware_counter(struct devfreq_event_dev *edev)
+{
+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+	void __iomem *dfi_regs = info->regs;
+
+	writel_relaxed(SOFTWARE_DIS, dfi_regs + DDRMON_CTRL);
+}
+
+static int rockchip_dfi_get_busier_ch(struct devfreq_event_dev *edev)
+{
+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+	u32 tmp, max = 0;
+	u32 i, busier_ch = 0;
+	void __iomem *dfi_regs = info->regs;
+
+	rockchip_dfi_stop_hardware_counter(edev);
+
+	/* Find out which channel is busier */
+	for (i = 0; i < RK3399_DMC_NUM_CH; i++) {
+		info->ch_usage[i].access = readl_relaxed(dfi_regs +
+				DDRMON_CH0_DFI_ACCESS_NUM + i * 20) * 4;
+		info->ch_usage[i].total = readl_relaxed(dfi_regs +
+				DDRMON_CH0_COUNT_NUM + i * 20);
+		tmp = info->ch_usage[i].access;
+		if (tmp > max) {
+			busier_ch = i;
+			max = tmp;
+		}
+	}
+	rockchip_dfi_start_hardware_counter(edev);
+
+	return busier_ch;
+}
+
+static int rockchip_dfi_disable(struct devfreq_event_dev *edev)
+{
+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+
+	rockchip_dfi_stop_hardware_counter(edev);
+	clk_disable_unprepare(info->clk);
+
+	return 0;
+}
+
+static int rockchip_dfi_enable(struct devfreq_event_dev *edev)
+{
+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+	int ret;
+
+	ret = clk_prepare_enable(info->clk);
+	if (ret) {
+		dev_err(&edev->dev, "failed to enable dfi clk: %d\n", ret);
+		return ret;
+	}
+
+	rockchip_dfi_start_hardware_counter(edev);
+	return 0;
+}
+
+static int rockchip_dfi_set_event(struct devfreq_event_dev *edev)
+{
+	return 0;
+}
+
+static int rockchip_dfi_get_event(struct devfreq_event_dev *edev,
+				  struct devfreq_event_data *edata)
+{
+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+	int busier_ch;
+
+	busier_ch = rockchip_dfi_get_busier_ch(edev);
+
+	edata->load_count = info->ch_usage[busier_ch].access;
+	edata->total_count = info->ch_usage[busier_ch].total;
+
+	return 0;
+}
+
+static const struct devfreq_event_ops rockchip_dfi_ops = {
+	.disable = rockchip_dfi_disable,
+	.enable = rockchip_dfi_enable,
+	.get_event = rockchip_dfi_get_event,
+	.set_event = rockchip_dfi_set_event,
+};
+
+static const struct of_device_id rockchip_dfi_id_match[] = {
+	{ .compatible = "rockchip,rk3399-dfi" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match);
+
+static int rockchip_dfi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rockchip_dfi *data;
+	struct resource *res;
+	struct devfreq_event_desc *desc;
+	struct device_node *np = pdev->dev.of_node, *node;
+
+	data = devm_kzalloc(dev, sizeof(struct rockchip_dfi), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	data->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(data->regs))
+		return PTR_ERR(data->regs);
+
+	data->clk = devm_clk_get(dev, "pclk_ddr_mon");
+	if (IS_ERR(data->clk)) {
+		dev_err(dev, "Cannot get the clk dmc_clk\n");
+		return PTR_ERR(data->clk);
+	};
+
+	/* try to find the optional reference to the pmu syscon */
+	node = of_parse_phandle(np, "rockchip,pmu", 0);
+	if (node) {
+		data->regmap_pmu = syscon_node_to_regmap(node);
+		if (IS_ERR(data->regmap_pmu))
+			return PTR_ERR(data->regmap_pmu);
+	}
+	data->dev = dev;
+
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	desc->ops = &rockchip_dfi_ops;
+	desc->driver_data = data;
+	desc->name = np->name;
+	data->desc = desc;
+
+	data->edev = devm_devfreq_event_add_edev(&pdev->dev, desc);
+	if (IS_ERR(data->edev)) {
+		dev_err(&pdev->dev,
+			"failed to add devfreq-event device\n");
+		return PTR_ERR(data->edev);
+	}
+
+	platform_set_drvdata(pdev, data);
+
+	return 0;
+}
+
+static struct platform_driver rockchip_dfi_driver = {
+	.probe	= rockchip_dfi_probe,
+	.driver = {
+		.name	= "rockchip-dfi",
+		.of_match_table = rockchip_dfi_id_match,
+	},
+};
+module_platform_driver(rockchip_dfi_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip DFI driver");
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/exynos-bus.c b/src/kernel/linux/v4.14/drivers/devfreq/exynos-bus.c
new file mode 100644
index 0000000..25ff31e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/exynos-bus.c
@@ -0,0 +1,571 @@
+/*
+ * Generic Exynos Bus frequency driver with DEVFREQ Framework
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This driver support Exynos Bus frequency feature by using
+ * DEVFREQ framework and is based on drivers/devfreq/exynos/exynos4_bus.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq-event.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define DEFAULT_SATURATION_RATIO	40
+#define DEFAULT_VOLTAGE_TOLERANCE	2
+
+struct exynos_bus {
+	struct device *dev;
+
+	struct devfreq *devfreq;
+	struct devfreq_event_dev **edev;
+	unsigned int edev_count;
+	struct mutex lock;
+
+	unsigned long curr_freq;
+
+	struct regulator *regulator;
+	struct clk *clk;
+	unsigned int voltage_tolerance;
+	unsigned int ratio;
+};
+
+/*
+ * Control the devfreq-event device to get the current state of bus
+ */
+#define exynos_bus_ops_edev(ops)				\
+static int exynos_bus_##ops(struct exynos_bus *bus)		\
+{								\
+	int i, ret;						\
+								\
+	for (i = 0; i < bus->edev_count; i++) {			\
+		if (!bus->edev[i])				\
+			continue;				\
+		ret = devfreq_event_##ops(bus->edev[i]);	\
+		if (ret < 0)					\
+			return ret;				\
+	}							\
+								\
+	return 0;						\
+}
+exynos_bus_ops_edev(enable_edev);
+exynos_bus_ops_edev(disable_edev);
+exynos_bus_ops_edev(set_event);
+
+static int exynos_bus_get_event(struct exynos_bus *bus,
+				struct devfreq_event_data *edata)
+{
+	struct devfreq_event_data event_data;
+	unsigned long load_count = 0, total_count = 0;
+	int i, ret = 0;
+
+	for (i = 0; i < bus->edev_count; i++) {
+		if (!bus->edev[i])
+			continue;
+
+		ret = devfreq_event_get_event(bus->edev[i], &event_data);
+		if (ret < 0)
+			return ret;
+
+		if (i == 0 || event_data.load_count > load_count) {
+			load_count = event_data.load_count;
+			total_count = event_data.total_count;
+		}
+	}
+
+	edata->load_count = load_count;
+	edata->total_count = total_count;
+
+	return ret;
+}
+
+/*
+ * Must necessary function for devfreq simple-ondemand governor
+ */
+static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	struct dev_pm_opp *new_opp;
+	unsigned long old_freq, new_freq, new_volt, tol;
+	int ret = 0;
+
+	/* Get new opp-bus instance according to new bus clock */
+	new_opp = devfreq_recommended_opp(dev, freq, flags);
+	if (IS_ERR(new_opp)) {
+		dev_err(dev, "failed to get recommended opp instance\n");
+		return PTR_ERR(new_opp);
+	}
+
+	new_freq = dev_pm_opp_get_freq(new_opp);
+	new_volt = dev_pm_opp_get_voltage(new_opp);
+	dev_pm_opp_put(new_opp);
+
+	old_freq = bus->curr_freq;
+
+	if (old_freq == new_freq)
+		return 0;
+	tol = new_volt * bus->voltage_tolerance / 100;
+
+	/* Change voltage and frequency according to new OPP level */
+	mutex_lock(&bus->lock);
+
+	if (old_freq < new_freq) {
+		ret = regulator_set_voltage_tol(bus->regulator, new_volt, tol);
+		if (ret < 0) {
+			dev_err(bus->dev, "failed to set voltage\n");
+			goto out;
+		}
+	}
+
+	ret = clk_set_rate(bus->clk, new_freq);
+	if (ret < 0) {
+		dev_err(dev, "failed to change clock of bus\n");
+		clk_set_rate(bus->clk, old_freq);
+		goto out;
+	}
+
+	if (old_freq > new_freq) {
+		ret = regulator_set_voltage_tol(bus->regulator, new_volt, tol);
+		if (ret < 0) {
+			dev_err(bus->dev, "failed to set voltage\n");
+			goto out;
+		}
+	}
+	bus->curr_freq = new_freq;
+
+	dev_dbg(dev, "Set the frequency of bus (%luHz -> %luHz, %luHz)\n",
+			old_freq, new_freq, clk_get_rate(bus->clk));
+out:
+	mutex_unlock(&bus->lock);
+
+	return ret;
+}
+
+static int exynos_bus_get_dev_status(struct device *dev,
+				     struct devfreq_dev_status *stat)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	struct devfreq_event_data edata;
+	int ret;
+
+	stat->current_frequency = bus->curr_freq;
+
+	ret = exynos_bus_get_event(bus, &edata);
+	if (ret < 0) {
+		stat->total_time = stat->busy_time = 0;
+		goto err;
+	}
+
+	stat->busy_time = (edata.load_count * 100) / bus->ratio;
+	stat->total_time = edata.total_count;
+
+	dev_dbg(dev, "Usage of devfreq-event : %lu/%lu\n", stat->busy_time,
+							stat->total_time);
+
+err:
+	ret = exynos_bus_set_event(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to set event to devfreq-event devices\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static void exynos_bus_exit(struct device *dev)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	int ret;
+
+	ret = exynos_bus_disable_edev(bus);
+	if (ret < 0)
+		dev_warn(dev, "failed to disable the devfreq-event devices\n");
+
+	dev_pm_opp_of_remove_table(dev);
+	clk_disable_unprepare(bus->clk);
+	if (bus->regulator)
+		regulator_disable(bus->regulator);
+}
+
+/*
+ * Must necessary function for devfreq passive governor
+ */
+static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
+					u32 flags)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	struct dev_pm_opp *new_opp;
+	unsigned long old_freq, new_freq;
+	int ret = 0;
+
+	/* Get new opp-bus instance according to new bus clock */
+	new_opp = devfreq_recommended_opp(dev, freq, flags);
+	if (IS_ERR(new_opp)) {
+		dev_err(dev, "failed to get recommended opp instance\n");
+		return PTR_ERR(new_opp);
+	}
+
+	new_freq = dev_pm_opp_get_freq(new_opp);
+	dev_pm_opp_put(new_opp);
+
+	old_freq = bus->curr_freq;
+
+	if (old_freq == new_freq)
+		return 0;
+
+	/* Change the frequency according to new OPP level */
+	mutex_lock(&bus->lock);
+
+	ret = clk_set_rate(bus->clk, new_freq);
+	if (ret < 0) {
+		dev_err(dev, "failed to set the clock of bus\n");
+		goto out;
+	}
+
+	*freq = new_freq;
+	bus->curr_freq = new_freq;
+
+	dev_dbg(dev, "Set the frequency of bus (%luHz -> %luHz, %luHz)\n",
+			old_freq, new_freq, clk_get_rate(bus->clk));
+out:
+	mutex_unlock(&bus->lock);
+
+	return ret;
+}
+
+static void exynos_bus_passive_exit(struct device *dev)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+
+	dev_pm_opp_of_remove_table(dev);
+	clk_disable_unprepare(bus->clk);
+}
+
+static int exynos_bus_parent_parse_of(struct device_node *np,
+					struct exynos_bus *bus)
+{
+	struct device *dev = bus->dev;
+	int i, ret, count, size;
+
+	/* Get the regulator to provide each bus with the power */
+	bus->regulator = devm_regulator_get(dev, "vdd");
+	if (IS_ERR(bus->regulator)) {
+		dev_err(dev, "failed to get VDD regulator\n");
+		return PTR_ERR(bus->regulator);
+	}
+
+	ret = regulator_enable(bus->regulator);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable VDD regulator\n");
+		return ret;
+	}
+
+	/*
+	 * Get the devfreq-event devices to get the current utilization of
+	 * buses. This raw data will be used in devfreq ondemand governor.
+	 */
+	count = devfreq_event_get_edev_count(dev);
+	if (count < 0) {
+		dev_err(dev, "failed to get the count of devfreq-event dev\n");
+		ret = count;
+		goto err_regulator;
+	}
+	bus->edev_count = count;
+
+	size = sizeof(*bus->edev) * count;
+	bus->edev = devm_kzalloc(dev, size, GFP_KERNEL);
+	if (!bus->edev) {
+		ret = -ENOMEM;
+		goto err_regulator;
+	}
+
+	for (i = 0; i < count; i++) {
+		bus->edev[i] = devfreq_event_get_edev_by_phandle(dev, i);
+		if (IS_ERR(bus->edev[i])) {
+			ret = -EPROBE_DEFER;
+			goto err_regulator;
+		}
+	}
+
+	/*
+	 * Optionally, Get the saturation ratio according to Exynos SoC
+	 * When measuring the utilization of each AXI bus with devfreq-event
+	 * devices, the measured real cycle might be much lower than the
+	 * total cycle of bus during sampling rate. In result, the devfreq
+	 * simple-ondemand governor might not decide to change the current
+	 * frequency due to too utilization (= real cycle/total cycle).
+	 * So, this property is used to adjust the utilization when calculating
+	 * the busy_time in exynos_bus_get_dev_status().
+	 */
+	if (of_property_read_u32(np, "exynos,saturation-ratio", &bus->ratio))
+		bus->ratio = DEFAULT_SATURATION_RATIO;
+
+	if (of_property_read_u32(np, "exynos,voltage-tolerance",
+					&bus->voltage_tolerance))
+		bus->voltage_tolerance = DEFAULT_VOLTAGE_TOLERANCE;
+
+	return 0;
+
+err_regulator:
+	regulator_disable(bus->regulator);
+
+	return ret;
+}
+
+static int exynos_bus_parse_of(struct device_node *np,
+			      struct exynos_bus *bus)
+{
+	struct device *dev = bus->dev;
+	struct dev_pm_opp *opp;
+	unsigned long rate;
+	int ret;
+
+	/* Get the clock to provide each bus with source clock */
+	bus->clk = devm_clk_get(dev, "bus");
+	if (IS_ERR(bus->clk)) {
+		dev_err(dev, "failed to get bus clock\n");
+		return PTR_ERR(bus->clk);
+	}
+
+	ret = clk_prepare_enable(bus->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to get enable clock\n");
+		return ret;
+	}
+
+	/* Get the freq and voltage from OPP table to scale the bus freq */
+	ret = dev_pm_opp_of_add_table(dev);
+	if (ret < 0) {
+		dev_err(dev, "failed to get OPP table\n");
+		goto err_clk;
+	}
+
+	rate = clk_get_rate(bus->clk);
+
+	opp = devfreq_recommended_opp(dev, &rate, 0);
+	if (IS_ERR(opp)) {
+		dev_err(dev, "failed to find dev_pm_opp\n");
+		ret = PTR_ERR(opp);
+		goto err_opp;
+	}
+	bus->curr_freq = dev_pm_opp_get_freq(opp);
+	dev_pm_opp_put(opp);
+
+	return 0;
+
+err_opp:
+	dev_pm_opp_of_remove_table(dev);
+err_clk:
+	clk_disable_unprepare(bus->clk);
+
+	return ret;
+}
+
+static int exynos_bus_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node, *node;
+	struct devfreq_dev_profile *profile;
+	struct devfreq_simple_ondemand_data *ondemand_data;
+	struct devfreq_passive_data *passive_data;
+	struct devfreq *parent_devfreq;
+	struct exynos_bus *bus;
+	int ret, max_state;
+	unsigned long min_freq, max_freq;
+	bool passive = false;
+
+	if (!np) {
+		dev_err(dev, "failed to find devicetree node\n");
+		return -EINVAL;
+	}
+
+	bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+	if (!bus)
+		return -ENOMEM;
+	mutex_init(&bus->lock);
+	bus->dev = &pdev->dev;
+	platform_set_drvdata(pdev, bus);
+
+	profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+	if (!profile)
+		return -ENOMEM;
+
+	node = of_parse_phandle(dev->of_node, "devfreq", 0);
+	if (node) {
+		of_node_put(node);
+		passive = true;
+	} else {
+		ret = exynos_bus_parent_parse_of(np, bus);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* Parse the device-tree to get the resource information */
+	ret = exynos_bus_parse_of(np, bus);
+	if (ret < 0)
+		goto err_reg;
+
+	if (passive)
+		goto passive;
+
+	/* Initialize the struct profile and governor data for parent device */
+	profile->polling_ms = 50;
+	profile->target = exynos_bus_target;
+	profile->get_dev_status = exynos_bus_get_dev_status;
+	profile->exit = exynos_bus_exit;
+
+	ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL);
+	if (!ondemand_data) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	ondemand_data->upthreshold = 40;
+	ondemand_data->downdifferential = 5;
+
+	/* Add devfreq device to monitor and handle the exynos bus */
+	bus->devfreq = devm_devfreq_add_device(dev, profile, "simple_ondemand",
+						ondemand_data);
+	if (IS_ERR(bus->devfreq)) {
+		dev_err(dev, "failed to add devfreq device\n");
+		ret = PTR_ERR(bus->devfreq);
+		goto err;
+	}
+
+	/* Register opp_notifier to catch the change of OPP  */
+	ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq);
+	if (ret < 0) {
+		dev_err(dev, "failed to register opp notifier\n");
+		goto err;
+	}
+
+	/*
+	 * Enable devfreq-event to get raw data which is used to determine
+	 * current bus load.
+	 */
+	ret = exynos_bus_enable_edev(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable devfreq-event devices\n");
+		goto err;
+	}
+
+	ret = exynos_bus_set_event(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to set event to devfreq-event devices\n");
+		goto err;
+	}
+
+	goto out;
+passive:
+	/* Initialize the struct profile and governor data for passive device */
+	profile->target = exynos_bus_passive_target;
+	profile->exit = exynos_bus_passive_exit;
+
+	/* Get the instance of parent devfreq device */
+	parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0);
+	if (IS_ERR(parent_devfreq)) {
+		ret = -EPROBE_DEFER;
+		goto err;
+	}
+
+	passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL);
+	if (!passive_data) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	passive_data->parent = parent_devfreq;
+
+	/* Add devfreq device for exynos bus with passive governor */
+	bus->devfreq = devm_devfreq_add_device(dev, profile, "passive",
+						passive_data);
+	if (IS_ERR(bus->devfreq)) {
+		dev_err(dev,
+			"failed to add devfreq dev with passive governor\n");
+		ret = PTR_ERR(bus->devfreq);
+		goto err;
+	}
+
+out:
+	max_state = bus->devfreq->profile->max_state;
+	min_freq = (bus->devfreq->profile->freq_table[0] / 1000);
+	max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000);
+	pr_info("exynos-bus: new bus device registered: %s (%6ld KHz ~ %6ld KHz)\n",
+			dev_name(dev), min_freq, max_freq);
+
+	return 0;
+
+err:
+	dev_pm_opp_of_remove_table(dev);
+	clk_disable_unprepare(bus->clk);
+err_reg:
+	if (!passive)
+		regulator_disable(bus->regulator);
+
+	return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int exynos_bus_resume(struct device *dev)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	int ret;
+
+	ret = exynos_bus_enable_edev(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable the devfreq-event devices\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int exynos_bus_suspend(struct device *dev)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	int ret;
+
+	ret = exynos_bus_disable_edev(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to disable the devfreq-event devices\n");
+		return ret;
+	}
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops exynos_bus_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(exynos_bus_suspend, exynos_bus_resume)
+};
+
+static const struct of_device_id exynos_bus_of_match[] = {
+	{ .compatible = "samsung,exynos-bus", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, exynos_bus_of_match);
+
+static struct platform_driver exynos_bus_platdrv = {
+	.probe		= exynos_bus_probe,
+	.driver = {
+		.name	= "exynos-bus",
+		.pm	= &exynos_bus_pm,
+		.of_match_table = of_match_ptr(exynos_bus_of_match),
+	},
+};
+module_platform_driver(exynos_bus_platdrv);
+
+MODULE_DESCRIPTION("Generic Exynos Bus frequency driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/governor.h b/src/kernel/linux/v4.14/drivers/devfreq/governor.h
new file mode 100644
index 0000000..cfc50a6
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/governor.h
@@ -0,0 +1,76 @@
+/*
+ * governor.h - internal header for devfreq governors.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ *	MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This header is for devfreq governors in drivers/devfreq/
+ */
+
+#ifndef _GOVERNOR_H
+#define _GOVERNOR_H
+
+#include <linux/devfreq.h>
+
+#define to_devfreq(DEV)	container_of((DEV), struct devfreq, dev)
+
+/* Devfreq events */
+#define DEVFREQ_GOV_START			0x1
+#define DEVFREQ_GOV_STOP			0x2
+#define DEVFREQ_GOV_INTERVAL			0x3
+#define DEVFREQ_GOV_SUSPEND			0x4
+#define DEVFREQ_GOV_RESUME			0x5
+
+/**
+ * struct devfreq_governor - Devfreq policy governor
+ * @node:		list node - contains registered devfreq governors
+ * @name:		Governor's name
+ * @immutable:		Immutable flag for governor. If the value is 1,
+ *			this govenror is never changeable to other governor.
+ * @get_target_freq:	Returns desired operating frequency for the device.
+ *			Basically, get_target_freq will run
+ *			devfreq_dev_profile.get_dev_status() to get the
+ *			status of the device (load = busy_time / total_time).
+ *			If no_central_polling is set, this callback is called
+ *			only with update_devfreq() notified by OPP.
+ * @event_handler:      Callback for devfreq core framework to notify events
+ *                      to governors. Events include per device governor
+ *                      init and exit, opp changes out of devfreq, suspend
+ *                      and resume of per device devfreq during device idle.
+ *
+ * Note that the callbacks are called with devfreq->lock locked by devfreq.
+ */
+struct devfreq_governor {
+	struct list_head node;
+
+	const char name[DEVFREQ_NAME_LEN];
+	const unsigned int immutable;
+	int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+	int (*event_handler)(struct devfreq *devfreq,
+				unsigned int event, void *data);
+};
+
+/* Caution: devfreq->lock must be locked before calling update_devfreq */
+extern int update_devfreq(struct devfreq *devfreq);
+
+extern void devfreq_monitor_start(struct devfreq *devfreq);
+extern void devfreq_monitor_stop(struct devfreq *devfreq);
+extern void devfreq_monitor_suspend(struct devfreq *devfreq);
+extern void devfreq_monitor_resume(struct devfreq *devfreq);
+extern void devfreq_interval_update(struct devfreq *devfreq,
+					unsigned int *delay);
+
+extern int devfreq_add_governor(struct devfreq_governor *governor);
+extern int devfreq_remove_governor(struct devfreq_governor *governor);
+
+extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
+
+static inline int devfreq_update_stats(struct devfreq *df)
+{
+	return df->profile->get_dev_status(df->dev.parent, &df->last_status);
+}
+#endif /* _GOVERNOR_H */
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/governor_passive.c b/src/kernel/linux/v4.14/drivers/devfreq/governor_passive.c
new file mode 100644
index 0000000..d2ebdb7
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/governor_passive.c
@@ -0,0 +1,210 @@
+/*
+ * linux/drivers/devfreq/governor_passive.c
+ *
+ * Copyright (C) 2016 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/devfreq.h>
+#include "governor.h"
+
+static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
+					unsigned long *freq)
+{
+	struct devfreq_passive_data *p_data
+			= (struct devfreq_passive_data *)devfreq->data;
+	struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent;
+	unsigned long child_freq = ULONG_MAX;
+	struct dev_pm_opp *opp;
+	int i, count, ret = 0;
+
+	/*
+	 * If the devfreq device with passive governor has the specific method
+	 * to determine the next frequency, should use the get_target_freq()
+	 * of struct devfreq_passive_data.
+	 */
+	if (p_data->get_target_freq) {
+		ret = p_data->get_target_freq(devfreq, freq);
+		goto out;
+	}
+
+	/*
+	 * If the parent and passive devfreq device uses the OPP table,
+	 * get the next frequency by using the OPP table.
+	 */
+
+	/*
+	 * - parent devfreq device uses the governors except for passive.
+	 * - passive devfreq device uses the passive governor.
+	 *
+	 * Each devfreq has the OPP table. After deciding the new frequency
+	 * from the governor of parent devfreq device, the passive governor
+	 * need to get the index of new frequency on OPP table of parent
+	 * device. And then the index is used for getting the suitable
+	 * new frequency for passive devfreq device.
+	 */
+	if (!devfreq->profile || !devfreq->profile->freq_table
+		|| devfreq->profile->max_state <= 0)
+		return -EINVAL;
+
+	/*
+	 * The passive governor have to get the correct frequency from OPP
+	 * list of parent device. Because in this case, *freq is temporary
+	 * value which is decided by ondemand governor.
+	 */
+	opp = devfreq_recommended_opp(parent_devfreq->dev.parent, freq, 0);
+	if (IS_ERR(opp)) {
+		ret = PTR_ERR(opp);
+		goto out;
+	}
+
+	dev_pm_opp_put(opp);
+
+	/*
+	 * Get the OPP table's index of decided freqeuncy by governor
+	 * of parent device.
+	 */
+	for (i = 0; i < parent_devfreq->profile->max_state; i++)
+		if (parent_devfreq->profile->freq_table[i] == *freq)
+			break;
+
+	if (i == parent_devfreq->profile->max_state) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Get the suitable frequency by using index of parent device. */
+	if (i < devfreq->profile->max_state) {
+		child_freq = devfreq->profile->freq_table[i];
+	} else {
+		count = devfreq->profile->max_state;
+		child_freq = devfreq->profile->freq_table[count - 1];
+	}
+
+	/* Return the suitable frequency for passive device. */
+	*freq = child_freq;
+
+out:
+	return ret;
+}
+
+static int update_devfreq_passive(struct devfreq *devfreq, unsigned long freq)
+{
+	int ret;
+
+	if (!devfreq->governor)
+		return -EINVAL;
+
+	mutex_lock_nested(&devfreq->lock, SINGLE_DEPTH_NESTING);
+
+	ret = devfreq->governor->get_target_freq(devfreq, &freq);
+	if (ret < 0)
+		goto out;
+
+	ret = devfreq->profile->target(devfreq->dev.parent, &freq, 0);
+	if (ret < 0)
+		goto out;
+
+	if (devfreq->profile->freq_table
+		&& (devfreq_update_status(devfreq, freq)))
+		dev_err(&devfreq->dev,
+			"Couldn't update frequency transition information.\n");
+
+	devfreq->previous_freq = freq;
+
+out:
+	mutex_unlock(&devfreq->lock);
+
+	return 0;
+}
+
+static int devfreq_passive_notifier_call(struct notifier_block *nb,
+				unsigned long event, void *ptr)
+{
+	struct devfreq_passive_data *data
+			= container_of(nb, struct devfreq_passive_data, nb);
+	struct devfreq *devfreq = (struct devfreq *)data->this;
+	struct devfreq *parent = (struct devfreq *)data->parent;
+	struct devfreq_freqs *freqs = (struct devfreq_freqs *)ptr;
+	unsigned long freq = freqs->new;
+
+	switch (event) {
+	case DEVFREQ_PRECHANGE:
+		if (parent->previous_freq > freq)
+			update_devfreq_passive(devfreq, freq);
+		break;
+	case DEVFREQ_POSTCHANGE:
+		if (parent->previous_freq < freq)
+			update_devfreq_passive(devfreq, freq);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int devfreq_passive_event_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
+{
+	struct devfreq_passive_data *p_data
+			= (struct devfreq_passive_data *)devfreq->data;
+	struct devfreq *parent = (struct devfreq *)p_data->parent;
+	struct notifier_block *nb = &p_data->nb;
+	int ret = 0;
+
+	if (!parent)
+		return -EPROBE_DEFER;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		if (!p_data->this)
+			p_data->this = devfreq;
+
+		nb->notifier_call = devfreq_passive_notifier_call;
+		ret = devfreq_register_notifier(parent, nb,
+					DEVFREQ_TRANSITION_NOTIFIER);
+		break;
+	case DEVFREQ_GOV_STOP:
+		WARN_ON(devfreq_unregister_notifier(parent, nb,
+					DEVFREQ_TRANSITION_NOTIFIER));
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static struct devfreq_governor devfreq_passive = {
+	.name = "passive",
+	.immutable = 1,
+	.get_target_freq = devfreq_passive_get_target_freq,
+	.event_handler = devfreq_passive_event_handler,
+};
+
+static int __init devfreq_passive_init(void)
+{
+	return devfreq_add_governor(&devfreq_passive);
+}
+subsys_initcall(devfreq_passive_init);
+
+static void __exit devfreq_passive_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_passive);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+}
+module_exit(devfreq_passive_exit);
+
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_DESCRIPTION("DEVFREQ Passive governor");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/governor_performance.c b/src/kernel/linux/v4.14/drivers/devfreq/governor_performance.c
new file mode 100644
index 0000000..c72f942
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/governor_performance.c
@@ -0,0 +1,67 @@
+/*
+ *  linux/drivers/devfreq/governor_performance.c
+ *
+ *  Copyright (C) 2011 Samsung Electronics
+ *	MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/devfreq.h>
+#include <linux/module.h>
+#include "governor.h"
+
+static int devfreq_performance_func(struct devfreq *df,
+				    unsigned long *freq)
+{
+	/*
+	 * target callback should be able to get floor value as
+	 * said in devfreq.h
+	 */
+	if (!df->max_freq)
+		*freq = UINT_MAX;
+	else
+		*freq = df->max_freq;
+	return 0;
+}
+
+static int devfreq_performance_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
+{
+	int ret = 0;
+
+	if (event == DEVFREQ_GOV_START) {
+		mutex_lock(&devfreq->lock);
+		ret = update_devfreq(devfreq);
+		mutex_unlock(&devfreq->lock);
+	}
+
+	return ret;
+}
+
+static struct devfreq_governor devfreq_performance = {
+	.name = "performance",
+	.get_target_freq = devfreq_performance_func,
+	.event_handler = devfreq_performance_handler,
+};
+
+static int __init devfreq_performance_init(void)
+{
+	return devfreq_add_governor(&devfreq_performance);
+}
+subsys_initcall(devfreq_performance_init);
+
+static void __exit devfreq_performance_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_performance);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+	return;
+}
+module_exit(devfreq_performance_exit);
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/governor_powersave.c b/src/kernel/linux/v4.14/drivers/devfreq/governor_powersave.c
new file mode 100644
index 0000000..0c6bed5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/governor_powersave.c
@@ -0,0 +1,64 @@
+/*
+ *  linux/drivers/devfreq/governor_powersave.c
+ *
+ *  Copyright (C) 2011 Samsung Electronics
+ *	MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/devfreq.h>
+#include <linux/module.h>
+#include "governor.h"
+
+static int devfreq_powersave_func(struct devfreq *df,
+				  unsigned long *freq)
+{
+	/*
+	 * target callback should be able to get ceiling value as
+	 * said in devfreq.h
+	 */
+	*freq = df->min_freq;
+	return 0;
+}
+
+static int devfreq_powersave_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
+{
+	int ret = 0;
+
+	if (event == DEVFREQ_GOV_START) {
+		mutex_lock(&devfreq->lock);
+		ret = update_devfreq(devfreq);
+		mutex_unlock(&devfreq->lock);
+	}
+
+	return ret;
+}
+
+static struct devfreq_governor devfreq_powersave = {
+	.name = "powersave",
+	.get_target_freq = devfreq_powersave_func,
+	.event_handler = devfreq_powersave_handler,
+};
+
+static int __init devfreq_powersave_init(void)
+{
+	return devfreq_add_governor(&devfreq_powersave);
+}
+subsys_initcall(devfreq_powersave_init);
+
+static void __exit devfreq_powersave_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_powersave);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+	return;
+}
+module_exit(devfreq_powersave_exit);
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/governor_simpleondemand.c b/src/kernel/linux/v4.14/drivers/devfreq/governor_simpleondemand.c
new file mode 100644
index 0000000..ae72ba5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/governor_simpleondemand.c
@@ -0,0 +1,150 @@
+/*
+ *  linux/drivers/devfreq/governor_simpleondemand.c
+ *
+ *  Copyright (C) 2011 Samsung Electronics
+ *	MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/devfreq.h>
+#include <linux/math64.h>
+#include "governor.h"
+
+/* Default constants for DevFreq-Simple-Ondemand (DFSO) */
+#define DFSO_UPTHRESHOLD	(90)
+#define DFSO_DOWNDIFFERENCTIAL	(5)
+static int devfreq_simple_ondemand_func(struct devfreq *df,
+					unsigned long *freq)
+{
+	int err;
+	struct devfreq_dev_status *stat;
+	unsigned long long a, b;
+	unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
+	unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
+	struct devfreq_simple_ondemand_data *data = df->data;
+	unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
+
+	err = devfreq_update_stats(df);
+	if (err)
+		return err;
+
+	stat = &df->last_status;
+
+	if (data) {
+		if (data->upthreshold)
+			dfso_upthreshold = data->upthreshold;
+		if (data->downdifferential)
+			dfso_downdifferential = data->downdifferential;
+	}
+	if (dfso_upthreshold > 100 ||
+	    dfso_upthreshold < dfso_downdifferential)
+		return -EINVAL;
+
+	/* Assume MAX if it is going to be divided by zero */
+	if (stat->total_time == 0) {
+		*freq = max;
+		return 0;
+	}
+
+	/* Prevent overflow */
+	if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) {
+		stat->busy_time >>= 7;
+		stat->total_time >>= 7;
+	}
+
+	/* Set MAX if it's busy enough */
+	if (stat->busy_time * 100 >
+	    stat->total_time * dfso_upthreshold) {
+		*freq = max;
+		return 0;
+	}
+
+	/* Set MAX if we do not know the initial frequency */
+	if (stat->current_frequency == 0) {
+		*freq = max;
+		return 0;
+	}
+
+	/* Keep the current frequency */
+	if (stat->busy_time * 100 >
+	    stat->total_time * (dfso_upthreshold - dfso_downdifferential)) {
+		*freq = stat->current_frequency;
+		return 0;
+	}
+
+	/* Set the desired frequency based on the load */
+	a = stat->busy_time;
+	a *= stat->current_frequency;
+	b = div_u64(a, stat->total_time);
+	b *= 100;
+	b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
+	*freq = (unsigned long) b;
+
+	if (df->min_freq && *freq < df->min_freq)
+		*freq = df->min_freq;
+	if (df->max_freq && *freq > df->max_freq)
+		*freq = df->max_freq;
+
+	return 0;
+}
+
+static int devfreq_simple_ondemand_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
+{
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		devfreq_monitor_start(devfreq);
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		devfreq_monitor_stop(devfreq);
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		devfreq_interval_update(devfreq, (unsigned int *)data);
+		break;
+
+	case DEVFREQ_GOV_SUSPEND:
+		devfreq_monitor_suspend(devfreq);
+		break;
+
+	case DEVFREQ_GOV_RESUME:
+		devfreq_monitor_resume(devfreq);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_simple_ondemand = {
+	.name = "simple_ondemand",
+	.get_target_freq = devfreq_simple_ondemand_func,
+	.event_handler = devfreq_simple_ondemand_handler,
+};
+
+static int __init devfreq_simple_ondemand_init(void)
+{
+	return devfreq_add_governor(&devfreq_simple_ondemand);
+}
+subsys_initcall(devfreq_simple_ondemand_init);
+
+static void __exit devfreq_simple_ondemand_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_simple_ondemand);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+	return;
+}
+module_exit(devfreq_simple_ondemand_exit);
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/governor_userspace.c b/src/kernel/linux/v4.14/drivers/devfreq/governor_userspace.c
new file mode 100644
index 0000000..77028c2
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/governor_userspace.c
@@ -0,0 +1,167 @@
+/*
+ *  linux/drivers/devfreq/governor_userspace.c
+ *
+ *  Copyright (C) 2011 Samsung Electronics
+ *	MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/devfreq.h>
+#include <linux/pm.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include "governor.h"
+
+struct userspace_data {
+	unsigned long user_frequency;
+	bool valid;
+};
+
+static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
+{
+	struct userspace_data *data = df->data;
+
+	if (data->valid) {
+		unsigned long adjusted_freq = data->user_frequency;
+
+		if (df->max_freq && adjusted_freq > df->max_freq)
+			adjusted_freq = df->max_freq;
+
+		if (df->min_freq && adjusted_freq < df->min_freq)
+			adjusted_freq = df->min_freq;
+
+		*freq = adjusted_freq;
+	} else {
+		*freq = df->previous_freq; /* No user freq specified yet */
+	}
+	return 0;
+}
+
+static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct devfreq *devfreq = to_devfreq(dev);
+	struct userspace_data *data;
+	unsigned long wanted;
+	int err = 0;
+
+	mutex_lock(&devfreq->lock);
+	data = devfreq->data;
+
+	sscanf(buf, "%lu", &wanted);
+	data->user_frequency = wanted;
+	data->valid = true;
+	err = update_devfreq(devfreq);
+	if (err == 0)
+		err = count;
+	mutex_unlock(&devfreq->lock);
+	return err;
+}
+
+static ssize_t show_freq(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct devfreq *devfreq = to_devfreq(dev);
+	struct userspace_data *data;
+	int err = 0;
+
+	mutex_lock(&devfreq->lock);
+	data = devfreq->data;
+
+	if (data->valid)
+		err = sprintf(buf, "%lu\n", data->user_frequency);
+	else
+		err = sprintf(buf, "undefined\n");
+	mutex_unlock(&devfreq->lock);
+	return err;
+}
+
+static DEVICE_ATTR(set_freq, 0644, show_freq, store_freq);
+static struct attribute *dev_entries[] = {
+	&dev_attr_set_freq.attr,
+	NULL,
+};
+static const struct attribute_group dev_attr_group = {
+	.name	= "userspace",
+	.attrs	= dev_entries,
+};
+
+static int userspace_init(struct devfreq *devfreq)
+{
+	int err = 0;
+	struct userspace_data *data = kzalloc(sizeof(struct userspace_data),
+					      GFP_KERNEL);
+
+	if (!data) {
+		err = -ENOMEM;
+		goto out;
+	}
+	data->valid = false;
+	devfreq->data = data;
+
+	err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
+out:
+	return err;
+}
+
+static void userspace_exit(struct devfreq *devfreq)
+{
+	/*
+	 * Remove the sysfs entry, unless this is being called after
+	 * device_del(), which should have done this already via kobject_del().
+	 */
+	if (devfreq->dev.kobj.sd)
+		sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+
+	kfree(devfreq->data);
+	devfreq->data = NULL;
+}
+
+static int devfreq_userspace_handler(struct devfreq *devfreq,
+			unsigned int event, void *data)
+{
+	int ret = 0;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		ret = userspace_init(devfreq);
+		break;
+	case DEVFREQ_GOV_STOP:
+		userspace_exit(devfreq);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static struct devfreq_governor devfreq_userspace = {
+	.name = "userspace",
+	.get_target_freq = devfreq_userspace_func,
+	.event_handler = devfreq_userspace_handler,
+};
+
+static int __init devfreq_userspace_init(void)
+{
+	return devfreq_add_governor(&devfreq_userspace);
+}
+subsys_initcall(devfreq_userspace_init);
+
+static void __exit devfreq_userspace_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_userspace);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+	return;
+}
+module_exit(devfreq_userspace_exit);
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/mediatek/Makefile b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/Makefile
new file mode 100644
index 0000000..afc3706
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/Makefile
@@ -0,0 +1,14 @@
+#
+# Copyright (C) 2018 MediaTek Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+
+obj-$(CONFIG_MACH_MT2731)	+= mt2731/
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/Makefile b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/Makefile
new file mode 100644
index 0000000..c3630db
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/Makefile
@@ -0,0 +1,26 @@
+
+#
+# Copyright (C) 2018 MediaTek Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+MTK_PLATFORM := $(subst ",,$(CONFIG_MTK_PLATFORM))
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/include/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/include/mt-plat/$(MTK_PLATFORM)/include
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/include/mt-plat/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/dramc/$(MTK_PLATFORM)/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/emi/$(MTK_PLATFORM)
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/emi/submodule/
+
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/base/power/spm/$(MTK_PLATFORM)/inc
+ccflags-y += -I$(srctree)/drivers/devfreq/
+
+obj-y	+= mtk-dvfsrc-opp.o mtk-dvfsrc-sysfs.o
+obj-y	+= mtk-dvfsrc_v2.o mtk-dvfsrc-$(MTK_PLATFORM).o mtk-dvfsrc-opp-$(MTK_PLATFORM).o
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-mt2731.c b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-mt2731.c
new file mode 100644
index 0000000..7d02225
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-mt2731.c
@@ -0,0 +1,637 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#include <linux/fb.h>
+#include <linux/notifier.h>
+#include <linux/string.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/sched/clock.h>
+#include <linux/sched.h>
+
+
+#ifdef CONFIG_MTK_DRAMC
+#include <mtk_dramc.h>
+#endif
+#ifdef CONFIG_MTK_PMIC_COMMON
+#include <mt-plat/upmu_common.h>
+#endif
+#ifdef CONFIG_MTK_WATCHDOG
+#include <ext_wd_drv.h>
+#endif
+#ifdef CONFIG_MTK_EMI
+#include <mt_emi_api.h>
+#endif
+
+#include "mtk-dvfsrc.h"
+#include "mtk-dvfsrc-opp.h"
+#include "mtk-dvfsrc_reg.h"
+#include <mtk_spm_internal.h>
+#include <mtk_vcore_dvfs.h>
+#include <linux/mutex.h>
+
+#ifdef CONFIG_MTK_AEE_FEATURE
+#include <mt-plat/aee.h>
+#endif
+#if 0
+#include <mmdvfs_mgr.h>
+#include <mtk_qos_sram.h>
+#endif
+
+static DEFINE_SPINLOCK(force_req_lock);
+static char opp_forced;
+
+static void dvfsrc_set_sw_req(int data, int mask, int shift)
+{
+	dvfsrc_rmw(DVFSRC_SW_REQ, data, mask, shift);
+}
+
+static int is_dvfsrc_forced(void)
+{
+	return opp_forced;
+}
+
+static void dvfsrc_set_force_start(int data)
+{
+	opp_forced = 1;
+	dvfsrc_rmw(DVFSRC_BASIC_CONTROL, 0,
+				DVFSRC_OUT_EN_MASK, DVFSRC_OUT_EN_SHIFT);
+	dvfsrc_rmw(DVFSRC_BASIC_CONTROL, 0,
+			FORCE_EN_TAR_MASK, FORCE_EN_TAR_SHIFT);
+	dvfsrc_rmw(DVFSRC_FORCE, data, TARGET_FORCE_MASK, TARGET_FORCE_SHIFT);
+	dvfsrc_rmw(DVFSRC_BASIC_CONTROL, 1,
+			FORCE_EN_TAR_MASK, FORCE_EN_TAR_SHIFT);
+	dvfsrc_rmw(DVFSRC_BASIC_CONTROL, 1,
+				DVFSRC_OUT_EN_MASK, DVFSRC_OUT_EN_SHIFT);
+}
+
+static void dvfsrc_set_force_end(void)
+{
+	dvfsrc_rmw(DVFSRC_BASIC_CONTROL, 0,
+				DVFSRC_OUT_EN_MASK, DVFSRC_OUT_EN_SHIFT);
+	dvfsrc_rmw(DVFSRC_BASIC_CONTROL, 0,
+			FORCE_EN_TAR_MASK, FORCE_EN_TAR_SHIFT);
+	dvfsrc_rmw(DVFSRC_FORCE, 0, TARGET_FORCE_MASK, TARGET_FORCE_SHIFT);
+	dvfsrc_rmw(DVFSRC_BASIC_CONTROL, 1,
+			FORCE_EN_TAR_MASK, FORCE_EN_TAR_SHIFT);
+	dvfsrc_rmw(DVFSRC_BASIC_CONTROL, 1,
+				DVFSRC_OUT_EN_MASK, DVFSRC_OUT_EN_SHIFT);
+}
+
+static void dvfsrc_release_force(void)
+{
+	dvfsrc_rmw(DVFSRC_BASIC_CONTROL, 0,
+				DVFSRC_OUT_EN_MASK, DVFSRC_OUT_EN_SHIFT);
+	dvfsrc_rmw(DVFSRC_BASIC_CONTROL, 0,
+				FORCE_EN_TAR_MASK, FORCE_EN_TAR_SHIFT);
+	dvfsrc_rmw(DVFSRC_BASIC_CONTROL, 0,
+			FORCE_EN_CUR_MASK, FORCE_EN_CUR_SHIFT);
+
+	dvfsrc_write(DVFSRC_FORCE, 0);
+	dvfsrc_rmw(DVFSRC_BASIC_CONTROL, 1,
+				DVFSRC_OUT_EN_MASK, DVFSRC_OUT_EN_SHIFT);
+	opp_forced = 0;
+}
+
+int commit_data(int type, int data)
+{
+	int ret = 0;
+	int level = 16, opp = 16;
+	unsigned long flags;
+	u32 current_level;
+
+	if (!is_dvfsrc_enabled())
+		return ret;
+
+	switch (type) {
+	case DVFSRC_DDR_OPP:
+		spin_lock_irqsave(&force_req_lock, flags);
+		if (data >= DDR_OPP_NUM || data < 0)
+			data = DDR_OPP_NUM - 1;
+
+		opp = data;
+		level = DDR_OPP_NUM - data - 1;
+
+		dvfsrc_set_sw_req(level, EMI_SW_AP_MASK, EMI_SW_AP_SHIFT);
+
+		if (!is_dvfsrc_forced()) {
+			ret = dvfsrc_wait_for_completion(
+					get_cur_ddr_opp() <= opp,
+					DVFSRC_TIMEOUT);
+		}
+		spin_unlock_irqrestore(&force_req_lock, flags);
+		break;
+	case DVFSRC_VCORE_OPP:
+		spin_lock_irqsave(&force_req_lock, flags);
+		if (data >= VCORE_OPP_NUM || data < 0)
+			data = VCORE_OPP_NUM - 1;
+
+		opp = data;
+		level = VCORE_OPP_NUM - data - 1;
+
+		dvfsrc_set_sw_req(level, VCORE_SW_AP_MASK, VCORE_SW_AP_SHIFT);
+
+		if (!is_dvfsrc_forced()) {
+			ret = dvfsrc_wait_for_completion(
+					get_cur_vcore_opp() <= opp,
+					DVFSRC_TIMEOUT);
+		}
+		spin_unlock_irqrestore(&force_req_lock, flags);
+		break;
+	case DVFSRC_VCORE_DVFS_FORCE_OPP:
+		spin_lock_irqsave(&force_req_lock, flags);
+		if (data >= VCORE_DVFS_OPP_NUM || data < 0)
+			data = VCORE_DVFS_OPP_NUM;
+
+		opp = data;
+		level = data;
+
+		if (opp == VCORE_DVFS_OPP_NUM) {
+			dvfsrc_release_force();
+			spin_unlock_irqrestore(&force_req_lock, flags);
+			break;
+		}
+		dvfsrc_set_force_start(1 << level);
+		ret = dvfsrc_wait_for_completion(
+				get_cur_vcore_dvfs_opp() == opp,
+				DVFSRC_TIMEOUT);
+
+		dvfsrc_set_force_end();
+		spin_unlock_irqrestore(&force_req_lock, flags);
+		break;
+	case DVFSRC_VCORE_DVFS_FORCE_OPP_WORKAROUND:
+		spin_lock_irqsave(&force_req_lock, flags);
+
+		current_level = (dvfsrc_read(DVFSRC_LEVEL)
+			& CURRENT_LEVEL_MASK) >> CURRENT_LEVEL_SHIFT;
+
+		dvfsrc_set_force_start(current_level);
+		ret = dvfsrc_wait_for_completion(
+				get_cur_vcore_dvfs_opp() == opp,
+				DVFSRC_TIMEOUT);
+
+		dvfsrc_set_force_end();
+
+		dvfsrc_release_force();
+
+		spin_unlock_irqrestore(&force_req_lock, flags);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+void dvfsrc_update_md_scenario(bool blank)
+{
+	if (blank)
+		dvfsrc_write(DVFSRC_MD_TURBO, 0x1FFF0000);
+	else
+		dvfsrc_write(DVFSRC_MD_TURBO, 0x00000000);
+}
+
+static int dvfsrc_fb_notifier_call(struct notifier_block *self,
+		unsigned long event, void *data)
+{
+	struct fb_event *evdata = data;
+	int blank;
+
+	if (event != FB_EVENT_BLANK)
+		return 0;
+
+	blank = *(int *)evdata->data;
+
+	switch (blank) {
+	case FB_BLANK_UNBLANK:
+		dvfsrc_update_md_scenario(false);
+		break;
+	case FB_BLANK_POWERDOWN:
+		dvfsrc_update_md_scenario(true);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static struct notifier_block dvfsrc_fb_notifier = {
+	.notifier_call = dvfsrc_fb_notifier_call,
+};
+
+#ifdef AUTOK_ENABLE
+__weak int emmc_autok(void)
+{
+	pr_info("NOT SUPPORT EMMC AUTOK(%s)\n", __func__);
+	return 0;
+}
+
+__weak int sd_autok(void)
+{
+	pr_info("NOT SUPPORT SD AUTOK(%s)\n", __func__);
+	return 0;
+}
+
+__weak int sdio_autok(void)
+{
+	pr_info("NOT SUPPORT SDIO AUTOK(%s)\n", __func__);
+	return 0;
+}
+
+
+void begin_autok_task(void)
+{
+#if 0
+	struct mmdvfs_prepare_event evt_from_vcore = {
+		MMDVFS_EVENT_PREPARE_CALIBRATION_START};
+
+	/* notify MM DVFS for msdc autok start */
+	mmdvfs_notify_prepare_action(&evt_from_vcore);
+#endif
+}
+
+void finish_autok_task(void)
+{
+	int force;
+
+	/* check if dvfs force is released */
+	force = dvfsrc_get_reguest(DVFSRC_VCORE_DVFS_FORCE_OPP);
+#if 0
+	struct mmdvfs_prepare_event evt_from_vcore = {
+		MMDVFS_EVENT_PREPARE_CALIBRATION_END};
+
+	/* notify MM DVFS for msdc autok finish */
+	mmdvfs_notify_prepare_action(&evt_from_vcore);
+#endif
+	if (force >= 0 && force < 12)
+		pr_info("autok task not release force opp: %d\n", force);
+}
+
+void dvfsrc_autok_manager(void)
+{
+	int r = 0;
+
+	begin_autok_task();
+
+	r = emmc_autok();
+	pr_info("EMMC autok done: %s\n", (r == 0) ? "Yes" : "No");
+
+	r = sd_autok();
+	pr_info("SD autok done: %s\n", (r == 0) ? "Yes" : "No");
+
+	r = sdio_autok();
+	pr_info("SDIO autok done: %s\n", (r == 0) ? "Yes" : "No");
+
+	finish_autok_task();
+}
+#endif
+
+
+static irqreturn_t dvfsrc_interrupt(int irq, void *dev_id)
+{
+	u32 val;
+
+	pr_info("[DVFSRC] IRQ HANDLER SHOULD NOT BE EXECUTED\n");
+	val = dvfsrc_read(DVFSRC_INT);
+	if (val & TIMEOUT_INT_MASK)
+		dvfsrc_dump_reg(NULL);
+
+	dvfsrc_write(DVFSRC_INT_CLR, dvfsrc_read(DVFSRC_INT));
+	dvfsrc_write(DVFSRC_INT_CLR, 0x0);
+
+	return IRQ_HANDLED;
+}
+
+int dvfsrc_platform_init(struct dvfsrc *dvfsrc)
+{
+	struct device_node *node;
+	int irq;
+	int ret;
+
+#ifdef CONFIG_MTK_WATCHDOG
+	mtk_rgu_cfg_dvfsrc(1);
+#endif
+
+	dvfsrc_enable(1);
+
+#ifdef AUTOK_ENABLE
+	dvfsrc_autok_manager();
+#endif
+	fb_register_client(&dvfsrc_fb_notifier);
+
+	node = of_find_compatible_node(NULL, NULL, "mediatek,dvfsrc");
+	if (!node)
+		pr_info("[DVFSRC] find DVSFRC node failed\n");
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (!irq)
+		pr_info("[DVFSRC] get DVFSRC IRQ failed\n");
+
+	ret = request_irq(irq, dvfsrc_interrupt,
+		IRQF_TRIGGER_HIGH, "DVFSRC", NULL);
+
+	if (ret)
+		pr_info("[DVFSRC] FAILED TO REQUEST IRQ(%d)\n", ret);
+
+	return 0;
+}
+
+void get_opp_info(char *p)
+{
+	int vcore_uv = 0;
+#ifdef CONFIG_MTK_PMIC_COMMON
+	int pmic_val = 0;
+
+	pmic_val = pmic_get_register_value(PMIC_VCORE_ADDR);
+	vcore_uv = vcore_pmic_to_uv(pmic_val);
+#endif
+#if defined(CONFIG_MTK_DRAMC)
+	unsigned int ddr_hz = get_dram_data_rate();
+	int ddr_type = get_ddr_type();
+#endif
+
+	p += sprintf(p, "%-24s: %-8u uv  (PMIC: 0x%x)\n",
+			"Vcore", vcore_uv, vcore_uv_to_pmic(vcore_uv));
+
+#if defined(CONFIG_MTK_DRAMC)
+	if (ddr_type == TYPE_LPDDR4X)
+		p += sprintf(p, "%-24s: LPDDR4X %-8u hz\n", "DDR", ddr_hz);
+	else if (ddr_type == TYPE_LPDDR2)
+		p += sprintf(p, "%-24s: LPDDR2 %-8u hz\n", "DDR", ddr_hz);
+	else
+		p += sprintf(p, "%-24s: UNKNOWN DDR %-8u hz\n", "DDR", ddr_hz);
+#endif
+}
+
+void get_dvfsrc_reg(char *p)
+{
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_BASIC_CONTROL",
+			dvfsrc_read(DVFSRC_BASIC_CONTROL));
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_SW_REQ",
+			dvfsrc_read(DVFSRC_SW_REQ));
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_SW_REQ2",
+			dvfsrc_read(DVFSRC_SW_REQ2));
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_SEC_SW_REQ",
+			dvfsrc_read(DVFSRC_SEC_SW_REQ));
+	p += sprintf(p, "%-24s: 0x%08x, 0x%08x\n",
+			"DVFSRC_VCORE_REQUEST(2)",
+			dvfsrc_read(DVFSRC_VCORE_REQUEST),
+			dvfsrc_read(DVFSRC_VCORE_REQUEST2));
+	p += sprintf(p, "%-24s: 0x%08x, 0x%08x, 0x%08x\n",
+			"DVFSRC_VCORE_MD2SPM0~2",
+			dvfsrc_read(DVFSRC_VCORE_MD2SPM0),
+			dvfsrc_read(DVFSRC_VCORE_MD2SPM1),
+			dvfsrc_read(DVFSRC_VCORE_MD2SPM2));
+	p += sprintf(p, "%-24s: 0x%08x, 0x%08x, 0x%08x\n",
+			"DVFSRC_VCORE_MD2SPM0~2_T",
+			dvfsrc_read(DVFSRC_VCORE_MD2SPM0_T),
+			dvfsrc_read(DVFSRC_VCORE_MD2SPM1_T),
+			dvfsrc_read(DVFSRC_VCORE_MD2SPM2_T));
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_MD_REQUEST",
+			dvfsrc_read(DVFSRC_MD_REQUEST));
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_INT",
+			dvfsrc_read(DVFSRC_INT));
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_INT_EN",
+			dvfsrc_read(DVFSRC_INT_EN));
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_LEVEL",
+			dvfsrc_read(DVFSRC_LEVEL));
+	p += sprintf(p, "%-24s: %d, %d, %d, %d, %d\n",
+			"DVFSRC_SW_BW_0~4",
+			dvfsrc_read(DVFSRC_SW_BW_0),
+			dvfsrc_read(DVFSRC_SW_BW_1),
+			dvfsrc_read(DVFSRC_SW_BW_2),
+			dvfsrc_read(DVFSRC_SW_BW_3),
+			dvfsrc_read(DVFSRC_SW_BW_4));
+}
+
+void get_dvfsrc_record(char *p)
+{
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_FORCE",
+			dvfsrc_read(DVFSRC_FORCE));
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_SEC_SW_REQ",
+			dvfsrc_read(DVFSRC_SEC_SW_REQ));
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_LAST",
+			dvfsrc_read(DVFSRC_LAST));
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_MD_SCENARIO",
+			dvfsrc_read(DVFSRC_MD_SCENARIO));
+	p += sprintf(p, "%-24s: 0x%08x, 0x%08x, 0x%08x\n",
+			"DVFSRC_RECORD_0_0~0_2",
+			dvfsrc_read(DVFSRC_RECORD_0_0),
+			dvfsrc_read(DVFSRC_RECORD_0_1),
+			dvfsrc_read(DVFSRC_RECORD_0_2));
+	p += sprintf(p, "%-24s: 0x%08x, 0x%08x, 0x%08x\n",
+			"DVFSRC_RECORD_1_0~1_2",
+			dvfsrc_read(DVFSRC_RECORD_1_0),
+			dvfsrc_read(DVFSRC_RECORD_1_1),
+			dvfsrc_read(DVFSRC_RECORD_1_2));
+	p += sprintf(p, "%-24s: 0x%08x, 0x%08x, 0x%08x\n",
+			"DVFSRC_RECORD_2_0~2_2",
+			dvfsrc_read(DVFSRC_RECORD_2_0),
+			dvfsrc_read(DVFSRC_RECORD_2_1),
+			dvfsrc_read(DVFSRC_RECORD_2_2));
+	p += sprintf(p, "%-24s: 0x%08x, 0x%08x, 0x%08x\n",
+			"DVFSRC_RECORD_3_0~3_2",
+			dvfsrc_read(DVFSRC_RECORD_3_0),
+			dvfsrc_read(DVFSRC_RECORD_3_1),
+			dvfsrc_read(DVFSRC_RECORD_3_2));
+	p += sprintf(p, "%-24s: 0x%08x, 0x%08x, 0x%08x\n",
+			"DVFSRC_RECORD_4_0~4_2",
+			dvfsrc_read(DVFSRC_RECORD_4_0),
+			dvfsrc_read(DVFSRC_RECORD_4_1),
+			dvfsrc_read(DVFSRC_RECORD_4_2));
+	p += sprintf(p, "%-24s: 0x%08x, 0x%08x, 0x%08x\n",
+			"DVFSRC_RECORD_5_0~5_2",
+			dvfsrc_read(DVFSRC_RECORD_5_0),
+			dvfsrc_read(DVFSRC_RECORD_5_1),
+			dvfsrc_read(DVFSRC_RECORD_5_2));
+	p += sprintf(p, "%-24s: 0x%08x, 0x%08x, 0x%08x\n",
+			"DVFSRC_RECORD_6_0~6_2",
+			dvfsrc_read(DVFSRC_RECORD_6_0),
+			dvfsrc_read(DVFSRC_RECORD_6_1),
+			dvfsrc_read(DVFSRC_RECORD_6_2));
+	p += sprintf(p, "%-24s: 0x%08x, 0x%08x, 0x%08x\n",
+			"DVFSRC_RECORD_7_0~7_2",
+			dvfsrc_read(DVFSRC_RECORD_7_0),
+			dvfsrc_read(DVFSRC_RECORD_7_1),
+			dvfsrc_read(DVFSRC_RECORD_7_2));
+	p += sprintf(p, "%-24s: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+			"DVFSRC_RECORD_MD_0~3",
+			dvfsrc_read(DVFSRC_RECORD_MD_0),
+			dvfsrc_read(DVFSRC_RECORD_MD_1),
+			dvfsrc_read(DVFSRC_RECORD_MD_2),
+			dvfsrc_read(DVFSRC_RECORD_MD_3));
+	p += sprintf(p, "%-24s: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+			"DVFSRC_RECORD_MD_4~7",
+			dvfsrc_read(DVFSRC_RECORD_MD_4),
+			dvfsrc_read(DVFSRC_RECORD_MD_5),
+			dvfsrc_read(DVFSRC_RECORD_MD_6),
+			dvfsrc_read(DVFSRC_RECORD_MD_7));
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_RECORD_COUNT",
+			dvfsrc_read(DVFSRC_RECORD_COUNT));
+	p += sprintf(p, "%-24s: 0x%08x\n",
+			"DVFSRC_RSRV_0",
+			dvfsrc_read(DVFSRC_RSRV_0));
+}
+
+/* met profile table */
+unsigned int met_vcorefs_info[INFO_MAX];
+unsigned int met_vcorefs_src[SRC_MAX];
+
+char *met_info_name[INFO_MAX] = {
+	"OPP",
+	"FREQ",
+	"VCORE",
+	"SPM_LEVEL",
+};
+
+char *met_src_name[SRC_MAX] = {
+	"MD2SPM",
+	"QOS_EMI_LEVEL",
+	"QOS_VCORE_LEVEL",
+	"CM_MGR_LEVEL",
+	"TOTAL_EMI_LEVEL_1",
+	"TOTAL_EMI_LEVEL_2",
+	"TOTAL_EMI_RESULT",
+	"QOS_BW_LEVEL1",
+	"QOS_BW_LEVEL2",
+	"QOS_BW_RESULT",
+	"SCP_VCORE_LEVEL",
+};
+
+/* met profile function */
+int vcorefs_get_num_opp(void)
+{
+	return VCORE_DVFS_OPP_NUM;
+}
+EXPORT_SYMBOL(vcorefs_get_num_opp);
+
+
+int vcorefs_get_opp_info_num(void)
+{
+	return INFO_MAX;
+}
+EXPORT_SYMBOL(vcorefs_get_opp_info_num);
+
+int vcorefs_get_src_req_num(void)
+{
+	return SRC_MAX;
+}
+EXPORT_SYMBOL(vcorefs_get_src_req_num);
+
+char **vcorefs_get_opp_info_name(void)
+{
+	return met_info_name;
+}
+EXPORT_SYMBOL(vcorefs_get_opp_info_name);
+
+char **vcorefs_get_src_req_name(void)
+{
+	return met_src_name;
+}
+EXPORT_SYMBOL(vcorefs_get_src_req_name);
+
+unsigned int *vcorefs_get_opp_info(void)
+{
+	met_vcorefs_info[INFO_OPP_IDX] = get_cur_vcore_dvfs_opp();
+	met_vcorefs_info[INFO_FREQ_IDX] = get_cur_ddr_khz();
+	met_vcorefs_info[INFO_VCORE_IDX] = get_cur_vcore_uv();
+	met_vcorefs_info[INFO_SPM_LEVEL_IDX] = spm_get_dvfs_level();
+	return met_vcorefs_info;
+}
+EXPORT_SYMBOL(vcorefs_get_opp_info);
+
+__weak void dvfsrc_trace_dbg_show_request(int dvfsrc_class)
+{
+	pr_info("NOT SUPPORT %s\n", __func__);
+}
+
+static DEFINE_RATELIMIT_STATE(tracelimit, 5 * HZ, 1);
+
+static void vcorefs_trace_qos(void)
+{
+	if (__ratelimit(&tracelimit)) {
+		dvfsrc_trace_dbg_show_request(DVFSRC_DDR_OPP);
+		dvfsrc_trace_dbg_show_request(DVFSRC_VCORE_OPP);
+	}
+}
+
+unsigned int *vcorefs_get_src_req(void)
+{
+	unsigned int qos_total_bw = dvfsrc_read(DVFSRC_SW_BW_0) +
+			   dvfsrc_read(DVFSRC_SW_BW_1) +
+			   dvfsrc_read(DVFSRC_SW_BW_2) +
+			   dvfsrc_read(DVFSRC_SW_BW_3) +
+			   dvfsrc_read(DVFSRC_SW_BW_4);
+#ifdef CONFIG_MTK_EMI
+	unsigned int total_bw_status = get_emi_bwst(0);
+	unsigned int total_bw_last = (get_emi_bwvl(0) & 0x7F) * 813;
+#endif
+	unsigned int qos0_thres = dvfsrc_read(DVFSRC_EMI_QOS0);
+	unsigned int qos1_thres = dvfsrc_read(DVFSRC_EMI_QOS1);
+	unsigned int sw_req = dvfsrc_read(DVFSRC_SW_REQ);
+
+	met_vcorefs_src[SRC_MD2SPM_IDX] =
+		spm_vcorefs_get_MD_status();
+
+	met_vcorefs_src[SRC_QOS_EMI_LEVEL_IDX] =
+		(sw_req >> EMI_SW_AP_SHIFT) & EMI_SW_AP_MASK;
+
+	met_vcorefs_src[SRC_QOS_VCORE_LEVEL_IDX] =
+		(sw_req >> VCORE_SW_AP_SHIFT) & VCORE_SW_AP_MASK;
+
+	met_vcorefs_src[SRC_CM_MGR_LEVEL_IDX] =
+		(dvfsrc_read(DVFSRC_SW_REQ2) >> EMI_SW_AP2_SHIFT) &
+			EMI_SW_AP2_MASK;
+
+#ifdef CONFIG_MTK_EMI
+	met_vcorefs_src[SRC_TOTAL_EMI_LEVEL_1_IDX] =
+		total_bw_status & 0x1;
+	met_vcorefs_src[SRC_TOTAL_EMI_LEVEL_2_IDX] =
+		(total_bw_status >> 1) & 0x1;
+	met_vcorefs_src[SRC_TOTAL_EMI_RESULT_IDX] =
+		total_bw_last;
+#endif
+	met_vcorefs_src[SRC_QOS_BW_LEVEL1_IDX] =
+		(qos_total_bw >= qos0_thres) ? 1 : 0;
+	met_vcorefs_src[SRC_QOS_BW_LEVEL2_IDX] =
+		(qos_total_bw >= qos1_thres) ? 1 : 0;
+	met_vcorefs_src[SRC_QOS_BW_RESUT_IDX] =
+		qos_total_bw * 100;
+
+	met_vcorefs_src[SRC_SCP_VCORE_LEVEL_IDX] =
+	(dvfsrc_read(DVFSRC_VCORE_REQUEST) >> VCORE_SCP_GEAR_SHIFT) &
+	VCORE_SCP_GEAR_MASK;
+
+	vcorefs_trace_qos();
+
+	return met_vcorefs_src;
+}
+EXPORT_SYMBOL(vcorefs_get_src_req);
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-mt2731.h b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-mt2731.h
new file mode 100644
index 0000000..ff070e9
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-mt2731.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#ifndef __MTK_DVFSRC_MT2731_H
+#define __MTK_DVFSRC_MT2731_H
+
+#ifdef CONFIG_MTK_PMIC_COMMON
+#include <mach/upmu_hw.h>
+#endif
+
+#ifdef CONFIG_MTK_PMIC_COMMON
+#define PMIC_VCORE_ADDR		PMIC_RG_BUCK_VCORE_VOSEL
+#endif
+
+#define VCORE_BASE_UV		500000
+#define VCORE_STEP_UV		6250
+
+#define AUTOK_ENABLE
+
+#define dvfsrc_rmw(offset, val, mask, shift) \
+	dvfsrc_write(offset, (dvfsrc_read(offset) & ~(mask)) \
+			| (val << shift))
+
+/* met profile table index */
+enum met_info_index {
+	INFO_OPP_IDX = 0,
+	INFO_FREQ_IDX,
+	INFO_VCORE_IDX,
+	INFO_SPM_LEVEL_IDX,
+	INFO_MAX,
+};
+
+enum met_src_index {
+	SRC_MD2SPM_IDX = 0,
+	SRC_QOS_EMI_LEVEL_IDX,
+	SRC_QOS_VCORE_LEVEL_IDX,
+	SRC_CM_MGR_LEVEL_IDX,
+	SRC_TOTAL_EMI_LEVEL_1_IDX,
+	SRC_TOTAL_EMI_LEVEL_2_IDX,
+	SRC_TOTAL_EMI_RESULT_IDX,
+	SRC_QOS_BW_LEVEL1_IDX,
+	SRC_QOS_BW_LEVEL2_IDX,
+	SRC_QOS_BW_RESUT_IDX,
+	SRC_SCP_VCORE_LEVEL_IDX,
+	SRC_MAX
+};
+
+extern int commit_data(int type, int data);
+
+#endif /* __MTK_DVFSRC_MT2731_H */
+
+
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-opp-mt2731.c b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-opp-mt2731.c
new file mode 100644
index 0000000..3b91f33
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-opp-mt2731.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#include <linux/kernel.h>
+#include <mtk_spm_internal.h>
+
+#include "mtk-dvfsrc-opp.h"
+#ifdef CONFIG_MTK_DRAMC
+#include <mtk_dramc.h>
+#endif
+
+int __weak dram_steps_freq(unsigned int step)
+{
+	pr_info("get dram steps_freq fail\n");
+	return -1;
+}
+
+int ddr_level_to_step(int opp)
+{
+	unsigned int step[] = {0, 8, 6, 4, 2, 7};
+	return step[opp];
+}
+
+void dvfsrc_opp_level_mapping(void)
+{
+	int vcore_opp_0_uv, vcore_opp_1_uv;
+
+	set_pwrap_cmd(VCORE_OPP_0, 1);
+	set_pwrap_cmd(VCORE_OPP_1, 0);
+	vcore_opp_0_uv = 800000;
+	vcore_opp_1_uv = 750000;
+	pr_info("%s: FINAL vcore_opp_uv: %d, %d\n",
+			__func__,
+			vcore_opp_0_uv,
+			vcore_opp_1_uv);
+
+	set_vcore_uv_table(VCORE_OPP_0, vcore_opp_0_uv);
+	set_vcore_uv_table(VCORE_OPP_1, vcore_opp_1_uv);
+
+	set_vcore_opp(VCORE_DVFS_OPP_0, VCORE_OPP_1);
+	set_vcore_opp(VCORE_DVFS_OPP_1, VCORE_OPP_0);
+
+	set_ddr_opp(VCORE_DVFS_OPP_0, DDR_OPP_0);
+	set_ddr_opp(VCORE_DVFS_OPP_1, DDR_OPP_0);
+
+}
+
+void dvfsrc_opp_table_init(void)
+{
+	int i;
+	int vcore_opp, ddr_opp;
+
+	for (i = 0; i < VCORE_DVFS_OPP_NUM; i++) {
+		vcore_opp = get_vcore_opp(i);
+		ddr_opp = get_ddr_opp(i);
+
+		if (vcore_opp == VCORE_OPP_UNREQ || ddr_opp == DDR_OPP_UNREQ) {
+			set_opp_table(i, 0, 0);
+			continue;
+		}
+		set_opp_table(i, get_vcore_uv_table(vcore_opp),
+		dram_steps_freq(ddr_level_to_step(ddr_opp)) * 1000);
+	}
+}
+
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-opp-mt2731.h b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-opp-mt2731.h
new file mode 100644
index 0000000..2bfed87
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-opp-mt2731.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#ifndef __MTK_DVFSRC_OPP_MT2731_H
+#define __MTK_DVFSRC_OPP_MT2731_H
+
+#define DVFSRC_DDR_OPP_DEFAULT_VALUE			16
+#define DVFSRC_VCORE_OPP_DEFAULT_VALUE			16
+#define DVFSRC_VCORE_DVFS_FORCE_OPP_DEFAULT_VALUE	16
+
+
+enum {
+	DVFSRC_DDR_OPP = 0,
+	DVFSRC_VCORE_OPP,
+	DVFSRC_VCORE_DVFS_FORCE_OPP,
+	DVFSRC_VCORE_DVFS_FORCE_OPP_WORKAROUND,
+	DVFSRC_NUM_CLASSES,
+	DVFSRC_UNREQ = 16,
+};
+
+int ddr_level_to_step(int opp);
+
+enum ddr_opp {
+	DDR_OPP_0 = 0,
+	DDR_OPP_NUM,
+	DDR_OPP_UNREQ = DVFSRC_DDR_OPP_DEFAULT_VALUE,
+};
+
+enum vcore_opp {
+	VCORE_OPP_0 = 0,
+	VCORE_OPP_1,
+	VCORE_OPP_NUM,
+	VCORE_OPP_UNREQ = DVFSRC_VCORE_OPP_DEFAULT_VALUE,
+};
+
+enum vcore_dvfs_opp {
+	VCORE_DVFS_OPP_0 = 0,
+	VCORE_DVFS_OPP_1,
+	VCORE_DVFS_OPP_2,
+	VCORE_DVFS_OPP_3,
+	VCORE_DVFS_OPP_NUM,
+	VCORE_DVFS_OPP_UNREQ = DVFSRC_VCORE_DVFS_FORCE_OPP_DEFAULT_VALUE,
+};
+
+#endif /* __MTK_DVFSRC_OPP_MT2731_H */
+
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-opp.c b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-opp.c
new file mode 100644
index 0000000..e7ea6f2
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-opp.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#include "mtk-dvfsrc.h"
+#include "mtk-dvfsrc-opp.h"
+
+static struct opp_profile opp_table[VCORE_DVFS_OPP_NUM];
+static int vcore_dvfs_to_vcore_opp[VCORE_DVFS_OPP_NUM];
+static int vcore_dvfs_to_ddr_opp[VCORE_DVFS_OPP_NUM];
+static int vcore_uv_table[VCORE_OPP_NUM];
+static int vcore_opp_to_pwrap_cmd[VCORE_OPP_NUM];
+static int ddr_table[DDR_OPP_NUM];
+
+
+/* ToDo: Copy Opp Table to AEE Dump */
+int get_cur_vcore_dvfs_opp(void)
+{
+#if defined(VCOREFS_LEVEL_POSITIVE)
+	int val = __builtin_ffs(spm_get_dvfs_level());
+
+	if (val == 0)
+		return VCORE_DVFS_OPP_NUM;
+	else
+		return val - 1;
+#else
+	return VCORE_DVFS_OPP_NUM - __builtin_ffs(spm_get_dvfs_level());
+#endif
+}
+
+void set_opp_table(int vcore_dvfs_opp, int vcore_uv, int ddr_khz)
+{
+	opp_table[vcore_dvfs_opp].vcore_uv = vcore_uv;
+	opp_table[vcore_dvfs_opp].ddr_khz = ddr_khz;
+}
+
+void set_vcore_opp(int vcore_dvfs_opp, int vcore_opp)
+{
+	vcore_dvfs_to_vcore_opp[vcore_dvfs_opp] = vcore_opp;
+}
+
+int get_vcore_opp(int opp)
+{
+	return vcore_dvfs_to_vcore_opp[opp];
+}
+
+int get_vcore_uv(int opp)
+{
+	return opp_table[opp].vcore_uv;
+}
+
+int get_cur_vcore_opp(void)
+{
+	int idx;
+
+	if (!is_dvfsrc_enabled())
+		return VCORE_OPP_UNREQ;
+
+	idx = get_cur_vcore_dvfs_opp();
+
+	if (idx >= VCORE_DVFS_OPP_NUM)
+		return VCORE_OPP_UNREQ;
+	return vcore_dvfs_to_vcore_opp[idx];
+}
+
+int get_cur_vcore_uv(void)
+{
+	int idx;
+
+	if (!is_dvfsrc_enabled())
+		return 0;
+
+	idx = get_cur_vcore_dvfs_opp();
+
+	if (idx >= VCORE_DVFS_OPP_NUM)
+		return 0;
+	return opp_table[idx].vcore_uv;
+}
+
+void set_ddr_opp(int vcore_dvfs_opp, int ddr_opp)
+{
+	vcore_dvfs_to_ddr_opp[vcore_dvfs_opp] = ddr_opp;
+}
+
+int get_ddr_opp(int opp)
+{
+	return vcore_dvfs_to_ddr_opp[opp];
+}
+
+int get_ddr_khz(int opp)
+{
+	return opp_table[opp].ddr_khz;
+}
+
+int get_cur_ddr_opp(void)
+{
+	int idx;
+
+	if (!is_dvfsrc_enabled())
+		return DDR_OPP_UNREQ;
+
+	idx = get_cur_vcore_dvfs_opp();
+
+	if (idx >= VCORE_DVFS_OPP_NUM)
+		return DDR_OPP_UNREQ;
+	return vcore_dvfs_to_ddr_opp[idx];
+}
+
+int get_cur_ddr_khz(void)
+{
+	int idx;
+
+	if (!is_dvfsrc_enabled())
+		return 0;
+
+	idx = get_cur_vcore_dvfs_opp();
+
+	if (idx >= VCORE_DVFS_OPP_NUM)
+		return 0;
+	return opp_table[idx].ddr_khz;
+}
+
+void set_vcore_uv_table(int vcore_opp, int vcore_uv)
+{
+	spm_dvfs_pwrap_cmd(get_pwrap_cmd(vcore_opp),
+			vcore_uv_to_pmic(vcore_uv));
+	vcore_uv_table[vcore_opp] = vcore_uv;
+}
+
+int get_opp_ddr_freq(int ddr_opp)
+{
+	return ddr_table[ddr_opp];
+}
+
+void set_opp_ddr_freq(int ddr_opp, int ddr_freq)
+{
+	ddr_table[ddr_opp] = ddr_freq;
+}
+
+int get_vcore_uv_table(int vcore_opp)
+{
+	return vcore_uv_table[vcore_opp];
+}
+
+void set_pwrap_cmd(int vcore_opp, int pwrap_cmd)
+{
+	vcore_opp_to_pwrap_cmd[vcore_opp] = pwrap_cmd;
+}
+
+int get_pwrap_cmd(int vcore_opp)
+{
+	return vcore_opp_to_pwrap_cmd[vcore_opp];
+}
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-opp.h b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-opp.h
new file mode 100644
index 0000000..6968194
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-opp.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#ifndef __MTK_DVFSRC_OPP_H
+#define __MTK_DVFSRC_OPP_H
+
+#if defined(CONFIG_MACH_MT2731)
+#include "mtk-dvfsrc-opp-mt2731.h"
+#endif
+
+struct opp_profile {
+	int vcore_uv;
+	int ddr_khz;
+};
+
+extern int get_cur_vcore_dvfs_opp(void);
+extern void set_opp_table(int vcore_dvfs_opp, int vcore_uv, int ddr_khz);
+
+extern int get_vcore_opp(int opp);
+extern int get_vcore_uv(int opp);
+extern int get_cur_vcore_opp(void);
+extern int get_cur_vcore_uv(void);
+extern void set_vcore_opp(int vcore_dvfs_opp, int vcore_opp);
+
+extern int get_ddr_opp(int opp);
+extern int get_ddr_khz(int opp);
+extern int get_cur_ddr_opp(void);
+extern int get_cur_ddr_khz(void);
+extern void set_ddr_opp(int vcore_dvfs_opp, int ddr_opp);
+
+extern void set_vcore_uv_table(int vcore_opp, int vcore_uv);
+extern int get_vcore_uv_table(int vcore_opp);
+
+extern void set_pwrap_cmd(int vcore_opp, int pwrap_cmd);
+extern int get_pwrap_cmd(int vcore_opp);
+extern int get_opp_ddr_freq(int ddr_opp);
+extern void set_opp_ddr_freq(int ddr_opp, int ddr_freq);
+
+#endif /* __MTK_DVFSRC_OPP_H */
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-sysfs.c b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-sysfs.c
new file mode 100644
index 0000000..7cfa70d
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc-sysfs.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+
+#include "mtk-dvfsrc.h"
+#include "mtk-dvfsrc-opp.h"
+
+__weak void dvfsrc_enable_dvfs_freq_hopping(int gps_on)
+{
+	pr_info("dummy %s(%d)\n", __func__, gps_on);
+}
+
+__weak int dvfsrc_get_dvfs_freq_hopping_status(void)
+{
+	pr_info("dummy %s\n", __func__);
+	return 0;
+}
+/* Enable DVFSRC */
+static ssize_t dvfsrc_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", is_dvfsrc_enabled());
+}
+static ssize_t dvfsrc_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val = 0;
+
+	if (kstrtoint(buf, 10, &val))
+		return -EINVAL;
+
+	dvfsrc_enable(val);
+
+	return count;
+}
+static DEVICE_ATTR(dvfsrc_enable, 0644,
+		dvfsrc_enable_show, dvfsrc_enable_store);
+
+/* Set DVFSRC RUN FLAG */
+static ssize_t dvfsrc_enable_flag_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%x\n", dvfsrc_flag_get());
+}
+static ssize_t dvfsrc_enable_flag_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val = 0;
+
+	if (kstrtoint(buf, 16, &val))
+		return -EINVAL;
+
+	dvfsrc_flag_set(val);
+
+	return count;
+}
+
+static DEVICE_ATTR(dvfsrc_enable_flag, 0644,
+		dvfsrc_enable_flag_show, dvfsrc_enable_flag_store);
+
+/* Request DRAM OPP */
+static ssize_t dvfsrc_req_ddr_opp_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val = 0;
+
+	if (kstrtoint(buf, 10, &val))
+		return -EINVAL;
+
+	dvfsrc_set_ddr_opp(val);
+
+	return count;
+}
+static DEVICE_ATTR(dvfsrc_req_ddr_opp, 0200,
+		NULL, dvfsrc_req_ddr_opp_store);
+
+/* Request Vcore OPP*/
+static ssize_t dvfsrc_req_vcore_opp_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val = 0;
+
+	if (kstrtoint(buf, 10, &val))
+		return -EINVAL;
+
+	dvfsrc_set_vcore_opp(val);
+
+	return count;
+}
+static DEVICE_ATTR(dvfsrc_req_vcore_opp, 0200,
+		NULL, dvfsrc_req_vcore_opp_store);
+
+/* Set Vcore uv */
+static ssize_t dvfsrc_set_vcore_uv_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int opp = 0, vcore_uv = 0;
+
+	if (sscanf(buf, "%d %d", &opp, &vcore_uv) != 2)
+		return -EINVAL;
+
+	set_vcore_uv_table(opp, vcore_uv);
+	dvfsrc_opp_table_init();
+
+	return count;
+}
+static DEVICE_ATTR(dvfsrc_set_vcore_uv, 0200,
+		NULL, dvfsrc_set_vcore_uv_store);
+
+/* Force VCORE DVFS OPP */
+static ssize_t dvfsrc_force_vcore_dvfs_opp_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val = 0;
+
+	if (kstrtoint(buf, 10, &val))
+		return -EINVAL;
+
+	dvfsrc_set_vcore_dvfs_force_opp(val);
+
+	return count;
+}
+static DEVICE_ATTR(dvfsrc_force_vcore_dvfs_opp, 0200,
+		NULL, dvfsrc_force_vcore_dvfs_opp_store);
+
+/* Get OPP Table */
+static ssize_t dvfsrc_opp_table_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct dvfsrc *dvfsrc;
+	char *p = buf;
+	char *buff_end = p + PAGE_SIZE;
+	int i;
+
+	dvfsrc = dev_get_drvdata(dev);
+
+	if (!dvfsrc)
+		return sprintf(buf, "Failed to access dvfsrc\n");
+
+	mutex_lock(&dvfsrc->devfreq->lock);
+	for (i = 0; i < VCORE_DVFS_OPP_NUM; i++) {
+		p += snprintf(p, buff_end - p, "[OPP%-2d]: %-8u uv %-8u khz\n",
+				i, get_vcore_uv(i), get_ddr_khz(i));
+	}
+
+	p += snprintf(p, buff_end - p, "\n");
+	mutex_unlock(&dvfsrc->devfreq->lock);
+
+	return p - buf;
+}
+
+static DEVICE_ATTR(dvfsrc_opp_table, 0444, dvfsrc_opp_table_show, NULL);
+
+/* Dump DVFSRC / SPM / PM QoS Register */
+static ssize_t dvfsrc_dump_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	char *p = buf;
+
+	p = dvfsrc_dump_reg(p);
+
+	return p - buf;
+}
+
+static DEVICE_ATTR(dvfsrc_dump, 0444, dvfsrc_dump_show, NULL);
+
+
+static ssize_t dvfsrc_freq_hopping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n",
+			dvfsrc_get_dvfs_freq_hopping_status());
+}
+
+static ssize_t dvfsrc_freq_hopping_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val = 0;
+
+	if (kstrtoint(buf, 10, &val))
+		return -EINVAL;
+
+	dvfsrc_enable_dvfs_freq_hopping(val);
+
+	return count;
+}
+
+static DEVICE_ATTR(dvfsrc_freq_hopping, 0644,
+		dvfsrc_freq_hopping_show, dvfsrc_freq_hopping_store);
+
+/* Dump DVFSRC latch data */
+static ssize_t dvfsrc_latch_dump_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	char *p = buf;
+
+	p = dvfsrc_dump_lacth_reg(p);
+
+	return p - buf;
+}
+
+static DEVICE_ATTR(dvfsrc_latch_dump, 0444, dvfsrc_latch_dump_show, NULL);
+
+/* Force VCORE DVFS OPP for Workaround */
+static ssize_t dvfsrc_force_vcore_dvfs_opp_workaround_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val = 0;
+
+	if (kstrtoint(buf, 10, &val))
+		return -EINVAL;
+
+	dvfsrc_set_vcore_dvfs_force_workaround_opp();
+
+	return count;
+}
+static DEVICE_ATTR(dvfsrc_force_vcore_dvfs_opp_workaround, 0200,
+		NULL, dvfsrc_force_vcore_dvfs_opp_workaround_store);
+
+static struct attribute *dvfsrc_attrs[] = {
+	&dev_attr_dvfsrc_enable.attr,
+	&dev_attr_dvfsrc_enable_flag.attr,
+	&dev_attr_dvfsrc_req_ddr_opp.attr,
+	&dev_attr_dvfsrc_req_vcore_opp.attr,
+	&dev_attr_dvfsrc_force_vcore_dvfs_opp.attr,
+	&dev_attr_dvfsrc_set_vcore_uv.attr,
+	&dev_attr_dvfsrc_opp_table.attr,
+	&dev_attr_dvfsrc_dump.attr,
+	&dev_attr_dvfsrc_freq_hopping.attr,
+	&dev_attr_dvfsrc_latch_dump.attr,
+	&dev_attr_dvfsrc_force_vcore_dvfs_opp_workaround.attr,
+	NULL,
+};
+
+static struct attribute_group dvfsrc_attr_group = {
+	.name = "dvfsrc",
+	.attrs = dvfsrc_attrs,
+};
+
+int dvfsrc_add_interface(struct device *dev)
+{
+	int r;
+
+	r = sysfs_create_group(power_kobj, &dvfsrc_attr_group);
+	if (r) {
+		pr_info("[SPM] FAILED TO CREATE ");
+		pr_info("/sys/power/dvfsrc(%d)\n", r);
+	}
+	return r;
+}
+
+void dvfsrc_remove_interface(struct device *dev)
+{
+	sysfs_remove_group(power_kobj, &dvfsrc_attr_group);
+}
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc.h b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc.h
new file mode 100644
index 0000000..b35a0e0
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#ifndef _MTK_DVFSRC_H
+#define _MTK_DVFSRC_H
+
+#include <linux/delay.h>
+#include <linux/devfreq.h>
+#include <linux/io.h>
+
+#if defined(CONFIG_MACH_MT2731)
+#include "mtk-dvfsrc-mt2731.h"
+#endif
+
+#include "mtk-dvfsrc-opp.h"
+
+struct dvfsrc {
+	struct devfreq		*devfreq;
+	struct device *dev;
+	bool qos_enabled;
+	bool dvfsrc_enabled;
+	int dvfsrc_flag;
+	void __iomem		*regs;
+	void __iomem		*sram_regs;
+	bool opp_forced;
+	char			force_start[20];
+	char			force_end[20];
+	int (*suspend)(struct dvfsrc *dvfsrc_dev);
+	int (*resume)(struct dvfsrc *dvfsrc_dev);
+};
+
+#define DVFSRC_TIMEOUT          1000
+
+/* PMIC */
+#define vcore_pmic_to_uv(pmic)	\
+	(((pmic) * VCORE_STEP_UV) + VCORE_BASE_UV)
+#define vcore_uv_to_pmic(uv)	/* pmic >= uv */	\
+	((((uv) - VCORE_BASE_UV) + (VCORE_STEP_UV - 1)) / VCORE_STEP_UV)
+
+#define dvfsrc_wait_for_completion(condition, timeout)			\
+({								\
+	int ret = 0;						\
+	if (is_dvfsrc_enabled())				\
+		ret = 1;					\
+	while (!(condition) && ret > 0) {			\
+		if (ret++ >= timeout)				\
+			ret = -EBUSY;				\
+		udelay(1);					\
+	}							\
+	ret;							\
+})
+
+extern int is_qos_enabled(void);
+extern int is_dvfsrc_enabled(void);
+extern int is_opp_forced(void);
+extern int dvfsrc_get_emi_bw(int type);
+extern int get_vcore_dvfs_level(void);
+extern void mtk_spmfw_init(int dvfsrc_en, int skip_check);
+extern void dvfsrc_enable(int dvfsrc_en);
+extern void dvfsrc_flag_set(int flag);
+extern int dvfsrc_flag_get(void);
+extern char *dvfsrc_dump_reg(char *ptr);
+extern char *dvfsrc_dump_lacth_reg(char *ptr);
+extern u32 dvfsrc_read(u32 offset);
+extern void dvfsrc_write(u32 offset, u32 val);
+extern u32 dvfsrc_sram_read(u32 offset);
+extern void dvfsrc_sram_write(u32 offset, u32 val);
+extern void dvfsrc_opp_table_init(void);
+extern void dvfsrc_sram_reg_init(void);
+
+extern int dvfsrc_add_interface(struct device *dev);
+extern void dvfsrc_remove_interface(struct device *dev);
+extern void dvfsrc_opp_level_mapping(void);
+extern void dvfsrc_sspm_ipi_init(int dvfsrc_en);
+extern void get_opp_info(char *p);
+extern void get_dvfsrc_reg(char *p);
+extern void get_dvfsrc_record(char *p);
+extern void get_spm_reg(char *p);
+extern void spm_dvfs_pwrap_cmd(int pwrap_cmd, int pwrap_vcore);
+extern int dvfsrc_platform_init(struct dvfsrc *dvfsrc);
+extern u32 spm_get_dvfs_level(void);
+extern u32 spm_get_pcm_reg9_data(void);
+
+/* met profile function */
+extern int vcorefs_get_opp_info_num(void);
+extern char **vcorefs_get_opp_info_name(void);
+extern unsigned int *vcorefs_get_opp_info(void);
+extern int vcorefs_get_src_req_num(void);
+extern char **vcorefs_get_src_req_name(void);
+extern unsigned int *vcorefs_get_src_req(void);
+extern u32 vcorefs_get_md_scenario(void);
+
+/* DVFSRC Set APIs */
+extern void dvfsrc_set_ddr_opp(int level);
+extern void dvfsrc_set_vcore_opp(int level);
+extern void dvfsrc_set_vcore_dvfs_force_opp(int level);
+extern void dvfsrc_set_vcore_dvfs_force_workaround_opp(void);
+
+/*DVFSRC Get APIs */
+extern int dvfsrc_get_reguest(unsigned int id);
+
+#endif /* __MTK_DVFSRC_H */
+
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc_reg.h b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc_reg.h
new file mode 100644
index 0000000..7f6e174
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc_reg.h
@@ -0,0 +1,1266 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#ifndef __MTK_DVFSRC_REG_H
+#define __MTK_DVFSRC_REG_H
+
+/**************************************
+ * Define and Declare
+ **************************************/
+
+/* it from dvfsrc_par.py */
+/* #define DVFSRC_APB_BASE        (0x10012000) */
+#define DVFSRC_BASIC_CONTROL      (0x0)
+#define DVFSRC_SW_REQ             (0x4)
+#define DVFSRC_SW_REQ2            (0x8)
+#define DVFSRC_EMI_REQUEST        (0xC)
+#define DVFSRC_EMI_REQUEST2       (0x10)
+#define DVFSRC_EMI_REQUEST3       (0x14)
+#define DVFSRC_EMI_HRT            (0x18)
+#define DVFSRC_EMI_HRT2           (0x1C)
+#define DVFSRC_EMI_HRT3           (0x20)
+#define DVFSRC_EMI_QOS0           (0x24)
+#define DVFSRC_EMI_QOS1           (0x28)
+#define DVFSRC_EMI_QOS2           (0x2C)
+#define DVFSRC_EMI_MD2SPM0        (0x30)
+#define DVFSRC_EMI_MD2SPM1        (0x34)
+#define DVFSRC_EMI_MD2SPM2        (0x38)
+#define DVFSRC_EMI_MD2SPM0_T      (0x3C)
+#define DVFSRC_EMI_MD2SPM1_T      (0x40)
+#define DVFSRC_EMI_MD2SPM2_T      (0x44)
+#define DVFSRC_VCORE_REQUEST      (0x48)
+#define DVFSRC_VCORE_REQUEST2     (0x4C)
+#define DVFSRC_VCORE_HRT          (0x50)
+#define DVFSRC_VCORE_HRT2         (0x54)
+#define DVFSRC_VCORE_HRT3         (0x58)
+#define DVFSRC_VCORE_QOS0         (0x5C)
+#define DVFSRC_VCORE_QOS1         (0x60)
+#define DVFSRC_VCORE_QOS2         (0x64)
+#define DVFSRC_VCORE_MD2SPM0      (0x68)
+#define DVFSRC_VCORE_MD2SPM1      (0x6C)
+#define DVFSRC_VCORE_MD2SPM2      (0x70)
+#define DVFSRC_VCORE_MD2SPM0_T    (0x74)
+#define DVFSRC_VCORE_MD2SPM1_T    (0x78)
+#define DVFSRC_VCORE_MD2SPM2_T    (0x7C)
+#define DVFSRC_MD_REQUEST         (0x80)
+#define DVFSRC_MD_SW_CONTROL      (0x84)
+#define DVFSRC_MD_VMODEM_REMAP    (0x88)
+#define DVFSRC_MD_VMD_REMAP       (0x8C)
+#define DVFSRC_MD_VSRAM_REMAP     (0x90)
+#define DVFSRC_HALT_SW_CONTROL    (0x94)
+#define DVFSRC_INT                (0x98)
+#define DVFSRC_INT_EN             (0x9C)
+#define DVFSRC_INT_CLR            (0xA0)
+#define DVFSRC_BW_MON_WINDOW      (0xA4)
+#define DVFSRC_BW_MON_THRES_1     (0xA8)
+#define DVFSRC_BW_MON_THRES_2     (0xAC)
+#define DVFSRC_MD_TURBO           (0xB0)
+#define DVFSRC_DEBOUNCE_FOUR      (0xD0)
+#define DVFSRC_DEBOUNCE_RISE_FALL (0xD4)
+#define DVFSRC_TIMEOUT_NEXTREQ    (0xD8)
+#define DVFSRC_LEVEL              (0xDC)
+#define DVFSRC_LEVEL_LABEL_0_1    (0xE0)
+#define DVFSRC_LEVEL_LABEL_2_3    (0xE4)
+#define DVFSRC_LEVEL_LABEL_4_5    (0xE8)
+#define DVFSRC_LEVEL_LABEL_6_7    (0xEC)
+#define DVFSRC_LEVEL_LABEL_8_9    (0xF0)
+#define DVFSRC_LEVEL_LABEL_10_11  (0xF4)
+#define DVFSRC_LEVEL_LABEL_12_13  (0xF8)
+#define DVFSRC_LEVEL_LABEL_14_15  (0xFC)
+#define DVFSRC_MM_BW_0            (0x100)
+#define DVFSRC_MM_BW_1            (0x104)
+#define DVFSRC_MM_BW_2            (0x108)
+#define DVFSRC_MM_BW_3            (0x10C)
+#define DVFSRC_MM_BW_4            (0x110)
+#define DVFSRC_MM_BW_5            (0x114)
+#define DVFSRC_MM_BW_6            (0x118)
+#define DVFSRC_MM_BW_7            (0x11C)
+#define DVFSRC_MM_BW_8            (0x120)
+#define DVFSRC_MM_BW_9            (0x124)
+#define DVFSRC_MM_BW_10           (0x128)
+#define DVFSRC_MM_BW_11           (0x12C)
+#define DVFSRC_MM_BW_12           (0x130)
+#define DVFSRC_MM_BW_13           (0x134)
+#define DVFSRC_MM_BW_14           (0x138)
+#define DVFSRC_MM_BW_15           (0x13C)
+#define DVFSRC_MD_BW_0            (0x140)
+#define DVFSRC_MD_BW_1            (0x144)
+#define DVFSRC_MD_BW_2            (0x148)
+#define DVFSRC_MD_BW_3            (0x14C)
+#define DVFSRC_MD_BW_4            (0x150)
+#define DVFSRC_MD_BW_5            (0x154)
+#define DVFSRC_MD_BW_6            (0x158)
+#define DVFSRC_MD_BW_7            (0x15C)
+#define DVFSRC_SW_BW_0            (0x160)
+#define DVFSRC_SW_BW_1            (0x164)
+#define DVFSRC_SW_BW_2            (0x168)
+#define DVFSRC_SW_BW_3            (0x16C)
+#define DVFSRC_SW_BW_4            (0x170)
+#define DVFSRC_QOS_EN             (0x180)
+#define DVFSRC_ISP_HRT            (0x190)
+#define DVFSRC_FORCE              (0x300)
+#define DVFSRC_SEC_SW_REQ         (0x304)
+#define DVFSRC_LAST               (0x308)
+#define DVFSRC_LAST_L             (0x30C)
+#define DVFSRC_MD_SCENARIO        (0x310)
+#define DVFSRC_RECORD_0_0         (0x400)
+#define DVFSRC_RECORD_0_1         (0x404)
+#define DVFSRC_RECORD_0_2         (0x408)
+#define DVFSRC_RECORD_1_0         (0x40C)
+#define DVFSRC_RECORD_1_1         (0x410)
+#define DVFSRC_RECORD_1_2         (0x414)
+#define DVFSRC_RECORD_2_0         (0x418)
+#define DVFSRC_RECORD_2_1         (0x41C)
+#define DVFSRC_RECORD_2_2         (0x420)
+#define DVFSRC_RECORD_3_0         (0x424)
+#define DVFSRC_RECORD_3_1         (0x428)
+#define DVFSRC_RECORD_3_2         (0x42C)
+#define DVFSRC_RECORD_4_0         (0x430)
+#define DVFSRC_RECORD_4_1         (0x434)
+#define DVFSRC_RECORD_4_2         (0x438)
+#define DVFSRC_RECORD_5_0         (0x43C)
+#define DVFSRC_RECORD_5_1         (0x440)
+#define DVFSRC_RECORD_5_2         (0x444)
+#define DVFSRC_RECORD_6_0         (0x448)
+#define DVFSRC_RECORD_6_1         (0x44C)
+#define DVFSRC_RECORD_6_2         (0x450)
+#define DVFSRC_RECORD_7_0         (0x454)
+#define DVFSRC_RECORD_7_1         (0x458)
+#define DVFSRC_RECORD_7_2         (0x45C)
+#define DVFSRC_RECORD_0_L_0       (0x460)
+#define DVFSRC_RECORD_0_L_1       (0x464)
+#define DVFSRC_RECORD_0_L_2       (0x468)
+#define DVFSRC_RECORD_1_L_0       (0x46C)
+#define DVFSRC_RECORD_1_L_1       (0x470)
+#define DVFSRC_RECORD_1_L_2       (0x474)
+#define DVFSRC_RECORD_2_L_0       (0x478)
+#define DVFSRC_RECORD_2_L_1       (0x47C)
+#define DVFSRC_RECORD_2_L_2       (0x480)
+#define DVFSRC_RECORD_3_L_0       (0x484)
+#define DVFSRC_RECORD_3_L_1       (0x488)
+#define DVFSRC_RECORD_3_L_2       (0x48C)
+#define DVFSRC_RECORD_4_L_0       (0x490)
+#define DVFSRC_RECORD_4_L_1       (0x494)
+#define DVFSRC_RECORD_4_L_2       (0x498)
+#define DVFSRC_RECORD_5_L_0       (0x49C)
+#define DVFSRC_RECORD_5_L_1       (0x4A0)
+#define DVFSRC_RECORD_5_L_2       (0x4A4)
+#define DVFSRC_RECORD_6_L_0       (0x4A8)
+#define DVFSRC_RECORD_6_L_1       (0x4AC)
+#define DVFSRC_RECORD_6_L_2       (0x4B0)
+#define DVFSRC_RECORD_7_L_0       (0x4B4)
+#define DVFSRC_RECORD_7_L_1       (0x4B8)
+#define DVFSRC_RECORD_7_L_2       (0x4BC)
+#define DVFSRC_RECORD_MD_0        (0x4C0)
+#define DVFSRC_RECORD_MD_1        (0x4C4)
+#define DVFSRC_RECORD_MD_2        (0x4C8)
+#define DVFSRC_RECORD_MD_3        (0x4CC)
+#define DVFSRC_RECORD_MD_4        (0x4D0)
+#define DVFSRC_RECORD_MD_5        (0x4D4)
+#define DVFSRC_RECORD_MD_6        (0x4D8)
+#define DVFSRC_RECORD_MD_7        (0x4DC)
+#define DVFSRC_RECORD_COUNT       (0x4F0)
+#define DVFSRC_RSRV_0             (0x600)
+#define DVFSRC_RSRV_1             (0x604)
+#define DVFSRC_RSRV_2             (0x608)
+#define DVFSRC_RSRV_3             (0x60C)
+#define DVFSRC_RSRV_4             (0x610)
+#define DVFSRC_RSRV_5             (0x614)
+
+/* DVFSRC_BASIC_CONTROL(0x10012000 + 0x0) */
+#define TARGET_LEVEL_LOCK_SHIFT            16
+#define TARGET_LEVEL_LOCK_MASK             0xffff0000
+#define FORCE_EN_TAR_SHIFT                 15
+#define FORCE_EN_TAR_MASK                  0x8000
+#define FORCE_EN_CUR_SHIFT                 14
+#define FORCE_EN_CUR_MASK                  0x4000
+#define ABORT_EN_SHIFT                     13
+#define ABORT_EN_MASK                      0x2000
+#define CLEAR_HALT_TIMEOUT_SHIFT           12
+#define CLEAR_HALT_TIMEOUT_MASK            0x1000
+#define HALT_TIMEOUT_EN_SHIFT              11
+#define HALT_TIMEOUT_EN_MASK               0x800
+#define READY_TEST_MODE_SHIFT              10
+#define READY_TEST_MODE_MASK               0x400
+#define LOCK_HIGH_ENABLE_SHIFT             9
+#define LOCK_HIGH_ENABLE_MASK              0x200
+#define DVFSRC_OUT_EN_SHIFT                8
+#define DVFSRC_OUT_EN_MASK                 0x100
+#define RG_SW_CLR_REQ_SHIFT                7
+#define RG_SW_CLR_REQ_MASK                 0x80
+#define MDSRCCLK_CHECK_SHIFT               6
+#define MDSRCCLK_CHECK_MASK                0x40
+#define RG_COUNTER_SEL_SHIFT               4
+#define RG_COUNTER_SEL_MASK                0x30
+#define DVFS_DEBUG_SW_RST_SHIFT            3
+#define DVFS_DEBUG_SW_RST_MASK             0x8
+#define RG_FREEZE_DEBUG_RECORD_SHIFT       2
+#define RG_FREEZE_DEBUG_RECORD_MASK        0x4
+#define DEBUG_EN_SHIFT                     1
+#define DEBUG_EN_MASK                      0x2
+#define DVFSRC_EN_SHIFT                    0
+#define DVFSRC_EN_MASK                     0x1
+
+/* DVFSRC_SW_REQ(0x10012000 + 0x4) */
+#define VCORE_SW_AP_SHIFT                  2
+#define VCORE_SW_AP_MASK                   0xc
+#define EMI_SW_AP_SHIFT                    0
+#define EMI_SW_AP_MASK                     0x3
+
+/* DVFSRC_SW_REQ2(0x10012000 + 0x8) */
+#define VCORE_SW_AP2_SHIFT                 2
+#define VCORE_SW_AP2_MASK                  0xc
+#define EMI_SW_AP2_SHIFT                   0
+#define EMI_SW_AP2_MASK                    0x3
+
+/* DVFSRC_EMI_REQUEST(0x10012000 + 0xC) */
+#define EMI_EX_GEAR_SHIFT                  28
+#define EMI_EX_GEAR_MASK                   0x30000000
+#define EMI_CONN_GEAR_SHIFT                26
+#define EMI_CONN_GEAR_MASK                 0xc000000
+#define EMI_MD2SPM_GEAR2_SHIFT             20
+#define EMI_MD2SPM_GEAR2_MASK              0x300000
+#define EMI_MD2SPM_GEAR1_SHIFT             18
+#define EMI_MD2SPM_GEAR1_MASK              0xc0000
+#define EMI_MD2SPM_GEAR0_SHIFT             16
+#define EMI_MD2SPM_GEAR0_MASK              0x30000
+#define EMI_MD_LATENCY_GEAR1_SHIFT         10
+#define EMI_MD_LATENCY_GEAR1_MASK          0xc00
+#define EMI_MD_LATENCY_GEAR0_SHIFT         8
+#define EMI_MD_LATENCY_GEAR0_MASK          0x300
+#define EMI_TOTAL_BW2_GEAR1_SHIFT          6
+#define EMI_TOTAL_BW2_GEAR1_MASK           0xc0
+#define EMI_TOTAL_BW2_GEAR0_SHIFT          4
+#define EMI_TOTAL_BW2_GEAR0_MASK           0x30
+#define EMI_TOTAL_BW_GEAR1_SHIFT           2
+#define EMI_TOTAL_BW_GEAR1_MASK            0xc
+#define EMI_TOTAL_BW_GEAR0_SHIFT           0
+#define EMI_TOTAL_BW_GEAR0_MASK            0x3
+
+/* DVFSRC_EMI_REQUEST2(0x10012000 + 0x10) */
+#define EMI_RSRV_BW2_GEAR1_SHIFT           14
+#define EMI_RSRV_BW2_GEAR1_MASK            0xc000
+#define EMI_RSRV_BW2_GEAR0_SHIFT           12
+#define EMI_RSRV_BW2_GEAR0_MASK            0x3000
+#define EMI_RSRV_BW_GEAR1_SHIFT            10
+#define EMI_RSRV_BW_GEAR1_MASK             0xc00
+#define EMI_RSRV_BW_GEAR0_SHIFT            8
+#define EMI_RSRV_BW_GEAR0_MASK             0x300
+#define EMI_GPU_BW_GEAR1_SHIFT             6
+#define EMI_GPU_BW_GEAR1_MASK              0xc0
+#define EMI_GPU_BW_GEAR0_SHIFT             4
+#define EMI_GPU_BW_GEAR0_MASK              0x30
+#define EMI_CPU_BW_GEAR1_SHIFT             2
+#define EMI_CPU_BW_GEAR1_MASK              0xc
+#define EMI_CPU_BW_GEAR0_SHIFT             0
+#define EMI_CPU_BW_GEAR0_MASK              0x3
+
+/* DVFSRC_EMI_REQUEST3(0x10012000 + 0x14) */
+#define EMI_QOS_GEAR2_SHIFT                28
+#define EMI_QOS_GEAR2_MASK                 0x30000000
+#define EMI_QOS_GEAR1_SHIFT                26
+#define EMI_QOS_GEAR1_MASK                 0xc000000
+#define EMI_QOS_GEAR0_SHIFT                24
+#define EMI_QOS_GEAR0_MASK                 0x3000000
+#define EMI_HRT3_GEAR2_SHIFT               20
+#define EMI_HRT3_GEAR2_MASK                0x300000
+#define EMI_HRT3_GEAR1_SHIFT               18
+#define EMI_HRT3_GEAR1_MASK                0xc0000
+#define EMI_HRT3_GEAR0_SHIFT               16
+#define EMI_HRT3_GEAR0_MASK                0x30000
+#define EMI_HRT2_GEAR2_SHIFT               12
+#define EMI_HRT2_GEAR2_MASK                0x3000
+#define EMI_HRT2_GEAR1_SHIFT               10
+#define EMI_HRT2_GEAR1_MASK                0xc00
+#define EMI_HRT2_GEAR0_SHIFT               8
+#define EMI_HRT2_GEAR0_MASK                0x300
+#define EMI_HRT_GEAR2_SHIFT                4
+#define EMI_HRT_GEAR2_MASK                 0x30
+#define EMI_HRT_GEAR1_SHIFT                2
+#define EMI_HRT_GEAR1_MASK                 0xc
+#define EMI_HRT_GEAR0_SHIFT                0
+#define EMI_HRT_GEAR0_MASK                 0x3
+
+/* DVFSRC_EMI_HRT(0x10012000 + 0x18) */
+#define EMI_HRT_THRESHOLD2_SHIFT           16
+#define EMI_HRT_THRESHOLD2_MASK            0xff0000
+#define EMI_HRT_THRESHOLD1_SHIFT           8
+#define EMI_HRT_THRESHOLD1_MASK            0xff00
+#define EMI_HRT_THRESHOLD0_SHIFT           0
+#define EMI_HRT_THRESHOLD0_MASK            0xff
+
+/* DVFSRC_EMI_HRT2(0x10012000 + 0x1C) */
+#define EMI_HRT2_THRESHOLD2_SHIFT          16
+#define EMI_HRT2_THRESHOLD2_MASK           0xff0000
+#define EMI_HRT2_THRESHOLD1_SHIFT          8
+#define EMI_HRT2_THRESHOLD1_MASK           0xff00
+#define EMI_HRT2_THRESHOLD0_SHIFT          0
+#define EMI_HRT2_THRESHOLD0_MASK           0xff
+
+/* DVFSRC_EMI_HRT3(0x10012000 + 0x20) */
+#define EMI_HRT3_THRESHOLD2_SHIFT          16
+#define EMI_HRT3_THRESHOLD2_MASK           0xff0000
+#define EMI_HRT3_THRESHOLD1_SHIFT          8
+#define EMI_HRT3_THRESHOLD1_MASK           0xff00
+#define EMI_HRT3_THRESHOLD0_SHIFT          0
+#define EMI_HRT3_THRESHOLD0_MASK           0xff
+
+/* DVFSRC_EMI_QOS0(0x10012000 + 0x24) */
+#define EMI_QOS_THRESHOLD0_SHIFT           0
+#define EMI_QOS_THRESHOLD0_MASK            0x1fff
+
+/* DVFSRC_EMI_QOS1(0x10012000 + 0x28) */
+#define EMI_QOS_THRESHOLD1_SHIFT           0
+#define EMI_QOS_THRESHOLD1_MASK            0x1fff
+
+/* DVFSRC_EMI_QOS2(0x10012000 + 0x2C) */
+#define EMI_QOS_THRESHOLD2_SHIFT           0
+#define EMI_QOS_THRESHOLD2_MASK            0x1fff
+
+/* DVFSRC_EMI_MD2SPM0(0x10012000 + 0x30) */
+#define EMI_MD2SPM_MASK0_SHIFT             0
+#define EMI_MD2SPM_MASK0_MASK              0xffffffff
+
+/* DVFSRC_EMI_MD2SPM1(0x10012000 + 0x34) */
+#define EMI_MD2SPM_MASK1_SHIFT             0
+#define EMI_MD2SPM_MASK1_MASK              0xffffffff
+
+/* DVFSRC_EMI_MD2SPM2(0x10012000 + 0x38) */
+#define EMI_MD2SPM_MASK2_SHIFT             0
+#define EMI_MD2SPM_MASK2_MASK              0xffffffff
+
+/* DVFSRC_EMI_MD2SPM0_T(0x10012000 + 0x3C) */
+#define EMI_MD2SPM_MASK0_T_SHIFT           0
+#define EMI_MD2SPM_MASK0_T_MASK            0xffffffff
+
+/* DVFSRC_EMI_MD2SPM1_T(0x10012000 + 0x40) */
+#define EMI_MD2SPM_MASK1_T_SHIFT           0
+#define EMI_MD2SPM_MASK1_T_MASK            0xffffffff
+
+/* DVFSRC_EMI_MD2SPM2_T(0x10012000 + 0x44) */
+#define EMI_MD2SPM_MASK2_T_SHIFT           0
+#define EMI_MD2SPM_MASK2_T_MASK            0xffffffff
+
+/* DVFSRC_VCORE_REQUEST(0x10012000 + 0x48) */
+#define VCORE_SCP_GEAR_SHIFT               30
+#define VCORE_SCP_GEAR_MASK                0xc0000000
+#define VCORE_EX_GEAR_SHIFT                28
+#define VCORE_EX_GEAR_MASK                 0x30000000
+#define VCORE_CONN_GEAR_SHIFT              26
+#define VCORE_CONN_GEAR_MASK               0xc000000
+#define VCORE_MD2SPM_GEAR2_SHIFT           20
+#define VCORE_MD2SPM_GEAR2_MASK            0x300000
+#define VCORE_MD2SPM_GEAR1_SHIFT           18
+#define VCORE_MD2SPM_GEAR1_MASK            0xc0000
+#define VCORE_MD2SPM_GEAR0_SHIFT           16
+#define VCORE_MD2SPM_GEAR0_MASK            0x30000
+
+/* DVFSRC_VCORE_REQUEST2(0x10012000 + 0x4C) */
+#define VCORE_QOS_GEAR2_SHIFT              28
+#define VCORE_QOS_GEAR2_MASK               0x30000000
+#define VCORE_QOS_GEAR1_SHIFT              26
+#define VCORE_QOS_GEAR1_MASK               0xc000000
+#define VCORE_QOS_GEAR0_SHIFT              24
+#define VCORE_QOS_GEAR0_MASK               0x3000000
+#define VCORE_HRT3_GEAR2_SHIFT             20
+#define VCORE_HRT3_GEAR2_MASK              0x300000
+#define VCORE_HRT3_GEAR1_SHIFT             18
+#define VCORE_HRT3_GEAR1_MASK              0xc0000
+#define VCORE_HRT3_GEAR0_SHIFT             16
+#define VCORE_HRT3_GEAR0_MASK              0x30000
+#define VCORE_HRT2_GEAR2_SHIFT             12
+#define VCORE_HRT2_GEAR2_MASK              0x3000
+#define VCORE_HRT2_GEAR1_SHIFT             10
+#define VCORE_HRT2_GEAR1_MASK              0xc00
+#define VCORE_HRT2_GEAR0_SHIFT             8
+#define VCORE_HRT2_GEAR0_MASK              0x300
+#define VCORE_HRT_GEAR2_SHIFT              4
+#define VCORE_HRT_GEAR2_MASK               0x30
+#define VCORE_HRT_GEAR1_SHIFT              2
+#define VCORE_HRT_GEAR1_MASK               0xc
+#define VCORE_HRT_GEAR0_SHIFT              0
+#define VCORE_HRT_GEAR0_MASK               0x3
+
+/* DVFSRC_VCORE_HRT(0x10012000 + 0x50) */
+#define VCORE_HRT_THRESHOLD2_SHIFT         16
+#define VCORE_HRT_THRESHOLD2_MASK          0xff0000
+#define VCORE_HRT_THRESHOLD1_SHIFT         8
+#define VCORE_HRT_THRESHOLD1_MASK          0xff00
+#define VCORE_HRT_THRESHOLD0_SHIFT         0
+#define VCORE_HRT_THRESHOLD0_MASK          0xff
+
+/* DVFSRC_VCORE_HRT2(0x10012000 + 0x54) */
+#define VCORE_HRT2_THRESHOLD2_SHIFT        16
+#define VCORE_HRT2_THRESHOLD2_MASK         0xff0000
+#define VCORE_HRT2_THRESHOLD1_SHIFT        8
+#define VCORE_HRT2_THRESHOLD1_MASK         0xff00
+#define VCORE_HRT2_THRESHOLD0_SHIFT        0
+#define VCORE_HRT2_THRESHOLD0_MASK         0xff
+
+/* DVFSRC_VCORE_HRT3(0x10012000 + 0x58) */
+#define VCORE_HRT3_THRESHOLD2_SHIFT        16
+#define VCORE_HRT3_THRESHOLD2_MASK         0xff0000
+#define VCORE_HRT3_THRESHOLD1_SHIFT        8
+#define VCORE_HRT3_THRESHOLD1_MASK         0xff00
+#define VCORE_HRT3_THRESHOLD0_SHIFT        0
+#define VCORE_HRT3_THRESHOLD0_MASK         0xff
+
+/* DVFSRC_VCORE_QOS0(0x10012000 + 0x5C) */
+#define VCORE_QOS_THRESHOLD0_SHIFT         0
+#define VCORE_QOS_THRESHOLD0_MASK          0x1fff
+
+/* DVFSRC_VCORE_QOS1(0x10012000 + 0x60) */
+#define VCORE_QOS_THRESHOLD1_SHIFT         0
+#define VCORE_QOS_THRESHOLD1_MASK          0x1fff
+
+/* DVFSRC_VCORE_QOS2(0x10012000 + 0x64) */
+#define VCORE_QOS_THRESHOLD2_SHIFT         0
+#define VCORE_QOS_THRESHOLD2_MASK          0x1fff
+
+/* DVFSRC_VCORE_MD2SPM0(0x10012000 + 0x68) */
+#define VCORE_MD2SPM_MASK0_SHIFT           0
+#define VCORE_MD2SPM_MASK0_MASK            0xffffffff
+
+/* DVFSRC_VCORE_MD2SPM1(0x10012000 + 0x6C) */
+#define VCORE_MD2SPM_MASK1_SHIFT           0
+#define VCORE_MD2SPM_MASK1_MASK            0xffffffff
+
+/* DVFSRC_VCORE_MD2SPM2(0x10012000 + 0x70) */
+#define VCORE_MD2SPM_MASK2_SHIFT           0
+#define VCORE_MD2SPM_MASK2_MASK            0xffffffff
+
+/* DVFSRC_VCORE_MD2SPM0_T(0x10012000 + 0x74) */
+#define VCORE_MD2SPM_MASK0_T_SHIFT         0
+#define VCORE_MD2SPM_MASK0_T_MASK          0xffffffff
+
+/* DVFSRC_VCORE_MD2SPM1_T(0x10012000 + 0x78) */
+#define VCORE_MD2SPM_MASK1_T_SHIFT         0
+#define VCORE_MD2SPM_MASK1_T_MASK          0xffffffff
+
+/* DVFSRC_VCORE_MD2SPM2_T(0x10012000 + 0x7C) */
+#define VCORE_MD2SPM_MASK2_T_SHIFT         0
+#define VCORE_MD2SPM_MASK2_T_MASK          0xffffffff
+
+/* DVFSRC_MD_REQUEST(0x10012000 + 0x80) */
+#define MD_GEAR_SW_REQUEST_SHIFT           0
+#define MD_GEAR_SW_REQUEST_MASK            0x1ff
+
+/* DVFSRC_MD_SW_CONTROL(0x10012000 + 0x84) */
+#define MD_SRCCLKEN_GEAR_MASK_B_SHIFT      29
+#define MD_SRCCLKEN_GEAR_MASK_B_MASK       0x20000000
+#define MD_GEAR_VAL_SAMPLE_SWMODE_SHIFT    20
+#define MD_GEAR_VAL_SAMPLE_SWMODE_MASK     0x1ff00000
+#define MD_GEAR_VAL_SWMODE_SHIFT           8
+#define MD_GEAR_VAL_SWMODE_MASK            0x1ff00
+#define MD_GEAR_RDY_SWMODE_SHIFT           6
+#define MD_GEAR_RDY_SWMODE_MASK            0x40
+#define MD_GEAR_REQ_SWMODE_SHIFT           5
+#define MD_GEAR_REQ_SWMODE_MASK            0x20
+#define MD_GEAR_VAL_SAMPLE_SWMODE_EN_SHIFT 4
+#define MD_GEAR_VAL_SAMPLE_SWMODE_EN_MASK  0x10
+#define MD_GEAR_VAL_SWMODE_EN_SHIFT        3
+#define MD_GEAR_VAL_SWMODE_EN_MASK         0x8
+#define MD_GEAR_RDY_SWMODE_EN_SHIFT        1
+#define MD_GEAR_RDY_SWMODE_EN_MASK         0x2
+#define MD_GEAR_REQ_SWMODE_EN_SHIFT        0
+#define MD_GEAR_REQ_SWMODE_EN_MASK         0x1
+
+/* DVFSRC_MD_VMODEM_REMAP(0x10012000 + 0x88) */
+#define VMODEM_REMAP_EN_SHIFT              31
+#define VMODEM_REMAP_EN_MASK               0x80000000
+#define VMODEM_REMAP_7_SHIFT               28
+#define VMODEM_REMAP_7_MASK                0x70000000
+#define VMODEM_REMAP_6_SHIFT               24
+#define VMODEM_REMAP_6_MASK                0x7000000
+#define VMODEM_REMAP_5_SHIFT               20
+#define VMODEM_REMAP_5_MASK                0x700000
+#define VMODEM_REMAP_4_SHIFT               16
+#define VMODEM_REMAP_4_MASK                0x70000
+#define VMODEM_REMAP_3_SHIFT               12
+#define VMODEM_REMAP_3_MASK                0x7000
+#define VMODEM_REMAP_2_SHIFT               8
+#define VMODEM_REMAP_2_MASK                0x700
+#define VMODEM_REMAP_1_SHIFT               4
+#define VMODEM_REMAP_1_MASK                0x70
+#define VMODEM_REMAP_0_SHIFT               0
+#define VMODEM_REMAP_0_MASK                0x7
+
+/* DVFSRC_MD_VMD_REMAP(0x10012000 + 0x8C) */
+#define VMD_REMAP_EN_SHIFT                 31
+#define VMD_REMAP_EN_MASK                  0x80000000
+#define VMD_REMAP_7_SHIFT                  28
+#define VMD_REMAP_7_MASK                   0x70000000
+#define VMD_REMAP_6_SHIFT                  24
+#define VMD_REMAP_6_MASK                   0x7000000
+#define VMD_REMAP_5_SHIFT                  20
+#define VMD_REMAP_5_MASK                   0x700000
+#define VMD_REMAP_4_SHIFT                  16
+#define VMD_REMAP_4_MASK                   0x70000
+#define VMD_REMAP_3_SHIFT                  12
+#define VMD_REMAP_3_MASK                   0x7000
+#define VMD_REMAP_2_SHIFT                  8
+#define VMD_REMAP_2_MASK                   0x700
+#define VMD_REMAP_1_SHIFT                  4
+#define VMD_REMAP_1_MASK                   0x70
+#define VMD_REMAP_0_SHIFT                  0
+#define VMD_REMAP_0_MASK                   0x7
+
+/* DVFSRC_MD_VSRAM_REMAP(0x10012000 + 0x90) */
+#define VSRAM_REMAP_EN_SHIFT               31
+#define VSRAM_REMAP_EN_MASK                0x80000000
+#define VSRAM_REMAP_7_SHIFT                28
+#define VSRAM_REMAP_7_MASK                 0x70000000
+#define VSRAM_REMAP_6_SHIFT                24
+#define VSRAM_REMAP_6_MASK                 0x7000000
+#define VSRAM_REMAP_5_SHIFT                20
+#define VSRAM_REMAP_5_MASK                 0x700000
+#define VSRAM_REMAP_4_SHIFT                16
+#define VSRAM_REMAP_4_MASK                 0x70000
+#define VSRAM_REMAP_3_SHIFT                12
+#define VSRAM_REMAP_3_MASK                 0x7000
+#define VSRAM_REMAP_2_SHIFT                8
+#define VSRAM_REMAP_2_MASK                 0x700
+#define VSRAM_REMAP_1_SHIFT                4
+#define VSRAM_REMAP_1_MASK                 0x70
+#define VSRAM_REMAP_0_SHIFT                0
+#define VSRAM_REMAP_0_MASK                 0x7
+
+/* DVFSRC_HALT_SW_CONTROL(0x10012000 + 0x94) */
+#define RGU_HALT_INV_SHIFT                 1
+#define RGU_HALT_INV_MASK                  0x2
+#define RGU_HALT_DISABLE_SHIFT             0
+#define RGU_HALT_DISABLE_MASK              0x1
+
+/* DVFSRC_INT(0x10012000 + 0x98) */
+#define MD_SCENARIO_INT_SHIFT              14
+#define MD_SCENARIO_INT_MASK               0x4000
+#define BW_MON_5_INT_SHIFT                 13
+#define BW_MON_5_INT_MASK                  0x2000
+#define BW_MON_4_INT_SHIFT                 12
+#define BW_MON_4_INT_MASK                  0x1000
+#define BW_MON_3_INT_SHIFT                 11
+#define BW_MON_3_INT_MASK                  0x800
+#define BW_MON_2_INT_SHIFT                 10
+#define BW_MON_2_INT_MASK                  0x400
+#define BW_MON_1_INT_SHIFT                 9
+#define BW_MON_1_INT_MASK                  0x200
+#define BW_MON_0_INT_SHIFT                 8
+#define BW_MON_0_INT_MASK                  0x100
+#define SW_4_INT_SHIFT                     7
+#define SW_4_INT_MASK                      0x80
+#define SW_3_INT_SHIFT                     6
+#define SW_3_INT_MASK                      0x40
+#define SW_2_INT_SHIFT                     5
+#define SW_2_INT_MASK                      0x20
+#define SW_1_INT_SHIFT                     4
+#define SW_1_INT_MASK                      0x10
+#define SW_0_INT_SHIFT                     3
+#define SW_0_INT_MASK                      0x8
+#define LEVEL_INT_SHIFT                    2
+#define LEVEL_INT_MASK                     0x4
+#define TIMEOUT_INT_SHIFT                  1
+#define TIMEOUT_INT_MASK                   0x2
+#define LATENCY_INT_SHIFT                  0
+#define LATENCY_INT_MASK                   0x1
+
+/* DVFSRC_INT_EN(0x10012000 + 0x9C) */
+#define MD_SCENARIO_INT_EN_SHIFT           14
+#define MD_SCENARIO_INT_EN_MASK            0x4000
+#define BW_MON_5_INT_EN_SHIFT              13
+#define BW_MON_5_INT_EN_MASK               0x2000
+#define BW_MON_4_INT_EN_SHIFT              12
+#define BW_MON_4_INT_EN_MASK               0x1000
+#define BW_MON_3_INT_EN_SHIFT              11
+#define BW_MON_3_INT_EN_MASK               0x800
+#define BW_MON_2_INT_EN_SHIFT              10
+#define BW_MON_2_INT_EN_MASK               0x400
+#define BW_MON_1_INT_EN_SHIFT              9
+#define BW_MON_1_INT_EN_MASK               0x200
+#define BW_MON_0_INT_EN_SHIFT              8
+#define BW_MON_0_INT_EN_MASK               0x100
+#define SW_4_INT_EN_SHIFT                  7
+#define SW_4_INT_EN_MASK                   0x80
+#define SW_3_INT_EN_SHIFT                  6
+#define SW_3_INT_EN_MASK                   0x40
+#define SW_2_INT_EN_SHIFT                  5
+#define SW_2_INT_EN_MASK                   0x20
+#define SW_1_INT_EN_SHIFT                  4
+#define SW_1_INT_EN_MASK                   0x10
+#define SW_0_INT_EN_SHIFT                  3
+#define SW_0_INT_EN_MASK                   0x8
+#define LEVEL_INT_EN_SHIFT                 2
+#define LEVEL_INT_EN_MASK                  0x4
+#define TIMEOUT_INT_EN_SHIFT               1
+#define TIMEOUT_INT_EN_MASK                0x2
+#define LATENCY_INT_EN_SHIFT               0
+#define LATENCY_INT_EN_MASK                0x1
+
+/* DVFSRC_INT_CLR(0x10012000 + 0xA0) */
+#define MD_SCENARIO_INT_CLR_SHIFT          14
+#define MD_SCENARIO_INT_CLR_MASK           0x4000
+#define BW_MON_5_INT_CLR_SHIFT             13
+#define BW_MON_5_INT_CLR_MASK              0x2000
+#define BW_MON_4_INT_CLR_SHIFT             12
+#define BW_MON_4_INT_CLR_MASK              0x1000
+#define BW_MON_3_INT_CLR_SHIFT             11
+#define BW_MON_3_INT_CLR_MASK              0x800
+#define BW_MON_2_INT_CLR_SHIFT             10
+#define BW_MON_2_INT_CLR_MASK              0x400
+#define BW_MON_1_INT_CLR_SHIFT             9
+#define BW_MON_1_INT_CLR_MASK              0x200
+#define BW_MON_0_INT_CLR_SHIFT             8
+#define BW_MON_0_INT_CLR_MASK              0x100
+#define SW_4_INT_CLR_SHIFT                 7
+#define SW_4_INT_CLR_MASK                  0x80
+#define SW_3_INT_CLR_SHIFT                 6
+#define SW_3_INT_CLR_MASK                  0x40
+#define SW_2_INT_CLR_SHIFT                 5
+#define SW_2_INT_CLR_MASK                  0x20
+#define SW_1_INT_CLR_SHIFT                 4
+#define SW_1_INT_CLR_MASK                  0x10
+#define SW_0_INT_CLR_SHIFT                 3
+#define SW_0_INT_CLR_MASK                  0x8
+#define LEVEL_INT_CLR_SHIFT                2
+#define LEVEL_INT_CLR_MASK                 0x4
+#define TIMEOUT_INT_CLR_SHIFT              1
+#define TIMEOUT_INT_CLR_MASK               0x2
+#define LATENCY_INT_CLR_SHIFT              0
+#define LATENCY_INT_CLR_MASK               0x1
+
+/* DVFSRC_BW_MON_WINDOW(0x10012000 + 0xA4) */
+#define BW_MON_5_WINDOW_SHIFT              20
+#define BW_MON_5_WINDOW_MASK               0xf00000
+#define BW_MON_4_WINDOW_SHIFT              16
+#define BW_MON_4_WINDOW_MASK               0xf0000
+#define BW_MON_3_WINDOW_SHIFT              12
+#define BW_MON_3_WINDOW_MASK               0xf000
+#define BW_MON_2_WINDOW_SHIFT              8
+#define BW_MON_2_WINDOW_MASK               0xf00
+#define BW_MON_1_WINDOW_SHIFT              4
+#define BW_MON_1_WINDOW_MASK               0xf0
+#define BW_MON_0_WINDOW_SHIFT              0
+#define BW_MON_0_WINDOW_MASK               0xf
+
+/* DVFSRC_BW_MON_THRES_1(0x10012000 + 0xA8) */
+#define BW_MON_2_BIT1_THRES_SHIFT          20
+#define BW_MON_2_BIT1_THRES_MASK           0xf00000
+#define BW_MON_2_BIT0_THRES_SHIFT          16
+#define BW_MON_2_BIT0_THRES_MASK           0xf0000
+#define BW_MON_1_BIT1_THRES_SHIFT          12
+#define BW_MON_1_BIT1_THRES_MASK           0xf000
+#define BW_MON_1_BIT0_THRES_SHIFT          8
+#define BW_MON_1_BIT0_THRES_MASK           0xf00
+#define BW_MON_0_BIT1_THRES_SHIFT          4
+#define BW_MON_0_BIT1_THRES_MASK           0xf0
+#define BW_MON_0_BIT0_THRES_SHIFT          0
+#define BW_MON_0_BIT0_THRES_MASK           0xf
+
+/* DVFSRC_BW_MON_THRES_2(0x10012000 + 0xAC) */
+#define BW_MON_5_BIT1_THRES_SHIFT          20
+#define BW_MON_5_BIT1_THRES_MASK           0xf00000
+#define BW_MON_5_BIT0_THRES_SHIFT          16
+#define BW_MON_5_BIT0_THRES_MASK           0xf0000
+#define BW_MON_4_BIT1_THRES_SHIFT          12
+#define BW_MON_4_BIT1_THRES_MASK           0xf000
+#define BW_MON_4_BIT0_THRES_SHIFT          8
+#define BW_MON_4_BIT0_THRES_MASK           0xf00
+#define BW_MON_3_BIT1_THRES_SHIFT          4
+#define BW_MON_3_BIT1_THRES_MASK           0xf0
+#define BW_MON_3_BIT0_THRES_SHIFT          0
+#define BW_MON_3_BIT0_THRES_MASK           0xf
+
+/* DVFSRC_MD_TURBO(0x10012000 + 0xB0) */
+#define MD_TURBO_THRES_SHIFT               16
+#define MD_TURBO_THRES_MASK                0x1fff0000
+#define MD_TURBO_SW_EN_SHIFT               1
+#define MD_TURBO_SW_EN_MASK                0x2
+#define MD_TURBO_SW_MODE_SHIFT             0
+#define MD_TURBO_SW_MODE_MASK              0x1
+
+/* DVFSRC_DEBOUNCE_FOUR(0x10012000 + 0xD0) */
+#define RG_DEBOUNCE_FOUR_SHIFT             0
+#define RG_DEBOUNCE_FOUR_MASK              0xffff
+
+/* DVFSRC_DEBOUNCE_RISE_FALL(0x10012000 + 0xD4) */
+#define RG_DEBOUNCE_FALL_SHIFT             16
+#define RG_DEBOUNCE_FALL_MASK              0xffff0000
+#define RG_DEBOUNCE_RISE_SHIFT             0
+#define RG_DEBOUNCE_RISE_MASK              0xffff
+
+/* DVFSRC_TIMEOUT_NEXTREQ(0x10012000 + 0xD8) */
+#define RG_NEXT_REQ_SHIFT                  8
+#define RG_NEXT_REQ_MASK                   0xffff00
+#define RG_REQ_TIMEOUT_SHIFT               0
+#define RG_REQ_TIMEOUT_MASK                0xff
+
+/* DVFSRC_LEVEL(0x10012000 + 0xDC) */
+#define CURRENT_LEVEL_SHIFT                16
+#define CURRENT_LEVEL_MASK                 0xffff0000
+#define TARGET_LEVEL_SHIFT                 0
+#define TARGET_LEVEL_MASK                  0xffff
+
+/* DVFSRC_LEVEL_LABEL_0_1(0x10012000 + 0xE0) */
+#define LEVEL1_VMODEM_SHIFT                24
+#define LEVEL1_VMODEM_MASK                 0x7000000
+#define LEVEL1_EMI_SHIFT                   20
+#define LEVEL1_EMI_MASK                    0x700000
+#define LEVEL1_VCORE_SHIFT                 16
+#define LEVEL1_VCORE_MASK                  0x70000
+#define LEVEL0_VMODEM_SHIFT                8
+#define LEVEL0_VMODEM_MASK                 0x700
+#define LEVEL0_EMI_SHIFT                   4
+#define LEVEL0_EMI_MASK                    0x70
+#define LEVEL0_VCORE_SHIFT                 0
+#define LEVEL0_VCORE_MASK                  0x7
+
+/* DVFSRC_LEVEL_LABEL_2_3(0x10012000 + 0xE4) */
+#define LEVEL3_VMODEM_SHIFT                24
+#define LEVEL3_VMODEM_MASK                 0x7000000
+#define LEVEL3_EMI_SHIFT                   20
+#define LEVEL3_EMI_MASK                    0x700000
+#define LEVEL3_VCORE_SHIFT                 16
+#define LEVEL3_VCORE_MASK                  0x70000
+#define LEVEL2_VMODEM_SHIFT                8
+#define LEVEL2_VMODEM_MASK                 0x700
+#define LEVEL2_EMI_SHIFT                   4
+#define LEVEL2_EMI_MASK                    0x70
+#define LEVEL2_VCORE_SHIFT                 0
+#define LEVEL2_VCORE_MASK                  0x7
+
+/* DVFSRC_LEVEL_LABEL_4_5(0x10012000 + 0xE8) */
+#define LEVEL5_VMODEM_SHIFT                24
+#define LEVEL5_VMODEM_MASK                 0x7000000
+#define LEVEL5_EMI_SHIFT                   20
+#define LEVEL5_EMI_MASK                    0x700000
+#define LEVEL5_VCORE_SHIFT                 16
+#define LEVEL5_VCORE_MASK                  0x70000
+#define LEVEL4_VMODEM_SHIFT                8
+#define LEVEL4_VMODEM_MASK                 0x700
+#define LEVEL4_EMI_SHIFT                   4
+#define LEVEL4_EMI_MASK                    0x70
+#define LEVEL4_VCORE_SHIFT                 0
+#define LEVEL4_VCORE_MASK                  0x7
+
+/* DVFSRC_LEVEL_LABEL_6_7(0x10012000 + 0xEC) */
+#define LEVEL7_VMODEM_SHIFT                24
+#define LEVEL7_VMODEM_MASK                 0x7000000
+#define LEVEL7_EMI_SHIFT                   20
+#define LEVEL7_EMI_MASK                    0x700000
+#define LEVEL7_VCORE_SHIFT                 16
+#define LEVEL7_VCORE_MASK                  0x70000
+#define LEVEL6_VMODEM_SHIFT                8
+#define LEVEL6_VMODEM_MASK                 0x700
+#define LEVEL6_EMI_SHIFT                   4
+#define LEVEL6_EMI_MASK                    0x70
+#define LEVEL6_VCORE_SHIFT                 0
+#define LEVEL6_VCORE_MASK                  0x7
+
+/* DVFSRC_LEVEL_LABEL_8_9(0x10012000 + 0xF0) */
+#define LEVEL9_VMODEM_SHIFT                24
+#define LEVEL9_VMODEM_MASK                 0x7000000
+#define LEVEL9_EMI_SHIFT                   20
+#define LEVEL9_EMI_MASK                    0x700000
+#define LEVEL9_VCORE_SHIFT                 16
+#define LEVEL9_VCORE_MASK                  0x70000
+#define LEVEL8_VMODEM_SHIFT                8
+#define LEVEL8_VMODEM_MASK                 0x700
+#define LEVEL8_EMI_SHIFT                   4
+#define LEVEL8_EMI_MASK                    0x70
+#define LEVEL8_VCORE_SHIFT                 0
+#define LEVEL8_VCORE_MASK                  0x7
+
+/* DVFSRC_LEVEL_LABEL_10_11(0x10012000 + 0xF4) */
+#define LEVEL11_VMODEM_SHIFT               24
+#define LEVEL11_VMODEM_MASK                0x7000000
+#define LEVEL11_EMI_SHIFT                  20
+#define LEVEL11_EMI_MASK                   0x700000
+#define LEVEL11_VCORE_SHIFT                16
+#define LEVEL11_VCORE_MASK                 0x70000
+#define LEVEL10_VMODEM_SHIFT               8
+#define LEVEL10_VMODEM_MASK                0x700
+#define LEVEL10_EMI_SHIFT                  4
+#define LEVEL10_EMI_MASK                   0x70
+#define LEVEL10_VCORE_SHIFT                0
+#define LEVEL10_VCORE_MASK                 0x7
+
+/* DVFSRC_LEVEL_LABEL_12_13(0x10012000 + 0xF8) */
+#define LEVEL13_VMODEM_SHIFT               24
+#define LEVEL13_VMODEM_MASK                0x7000000
+#define LEVEL13_EMI_SHIFT                  20
+#define LEVEL13_EMI_MASK                   0x700000
+#define LEVEL13_VCORE_SHIFT                16
+#define LEVEL13_VCORE_MASK                 0x70000
+#define LEVEL12_VMODEM_SHIFT               8
+#define LEVEL12_VMODEM_MASK                0x700
+#define LEVEL12_EMI_SHIFT                  4
+#define LEVEL12_EMI_MASK                   0x70
+#define LEVEL12_VCORE_SHIFT                0
+#define LEVEL12_VCORE_MASK                 0x7
+
+/* DVFSRC_LEVEL_LABEL_14_15(0x10012000 + 0xFC) */
+#define LEVEL15_VMODEM_SHIFT               24
+#define LEVEL15_VMODEM_MASK                0x7000000
+#define LEVEL15_EMI_SHIFT                  20
+#define LEVEL15_EMI_MASK                   0x700000
+#define LEVEL15_VCORE_SHIFT                16
+#define LEVEL15_VCORE_MASK                 0x70000
+#define LEVEL14_VMODEM_SHIFT               8
+#define LEVEL14_VMODEM_MASK                0x700
+#define LEVEL14_EMI_SHIFT                  4
+#define LEVEL14_EMI_MASK                   0x70
+#define LEVEL14_VCORE_SHIFT                0
+#define LEVEL14_VCORE_MASK                 0x7
+
+/* DVFSRC_MM_BW_0(0x10012000 + 0x100) */
+#define MM_BW_0_SHIFT                      0
+#define MM_BW_0_MASK                       0xffffffff
+
+/* DVFSRC_MM_BW_1(0x10012000 + 0x104) */
+#define MM_BW_1_SHIFT                      0
+#define MM_BW_1_MASK                       0xffffffff
+
+/* DVFSRC_MM_BW_2(0x10012000 + 0x108) */
+#define MM_BW_2_SHIFT                      0
+#define MM_BW_2_MASK                       0xffffffff
+
+/* DVFSRC_MM_BW_3(0x10012000 + 0x10C) */
+#define MM_BW_3_SHIFT                      0
+#define MM_BW_3_MASK                       0xffffffff
+
+/* DVFSRC_MM_BW_4(0x10012000 + 0x110) */
+#define MM_BW_4_SHIFT                      0
+#define MM_BW_4_MASK                       0xffffffff
+
+/* DVFSRC_MM_BW_5(0x10012000 + 0x114) */
+#define MM_BW_5_SHIFT                      0
+#define MM_BW_5_MASK                       0xffffffff
+
+/* DVFSRC_MM_BW_6(0x10012000 + 0x118) */
+#define MM_BW_6_SHIFT                      0
+#define MM_BW_6_MASK                       0xffffffff
+
+/* DVFSRC_MM_BW_7(0x10012000 + 0x11C) */
+#define MM_BW_7_SHIFT                      0
+#define MM_BW_7_MASK                       0xffffffff
+
+/* DVFSRC_MM_BW_8(0x10012000 + 0x120) */
+#define MM_BW_8_SHIFT                      0
+#define MM_BW_8_MASK                       0xffffffff
+
+/* DVFSRC_MM_BW_9(0x10012000 + 0x124) */
+#define MM_BW_9_SHIFT                      0
+#define MM_BW_9_MASK                       0xffffffff
+
+/* DVFSRC_MM_BW_10(0x10012000 + 0x128) */
+#define MM_BW_10_SHIFT                     0
+#define MM_BW_10_MASK                      0xffffffff
+
+/* DVFSRC_MM_BW_11(0x10012000 + 0x12C) */
+#define MM_BW_11_SHIFT                     0
+#define MM_BW_11_MASK                      0xffffffff
+
+/* DVFSRC_MM_BW_12(0x10012000 + 0x130) */
+#define MM_BW_12_SHIFT                     0
+#define MM_BW_12_MASK                      0xffffffff
+
+/* DVFSRC_MM_BW_13(0x10012000 + 0x134) */
+#define MM_BW_13_SHIFT                     0
+#define MM_BW_13_MASK                      0xffffffff
+
+/* DVFSRC_MM_BW_14(0x10012000 + 0x138) */
+#define MM_BW_14_SHIFT                     0
+#define MM_BW_14_MASK                      0xffffffff
+
+/* DVFSRC_MM_BW_15(0x10012000 + 0x13C) */
+#define MM_BW_15_SHIFT                     0
+#define MM_BW_15_MASK                      0xffffffff
+
+/* DVFSRC_MD_BW_0(0x10012000 + 0x140) */
+#define MD_BW_0_SHIFT                      0
+#define MD_BW_0_MASK                       0xffffffff
+
+/* DVFSRC_MD_BW_1(0x10012000 + 0x144) */
+#define MD_BW_1_SHIFT                      0
+#define MD_BW_1_MASK                       0xffffffff
+
+/* DVFSRC_MD_BW_2(0x10012000 + 0x148) */
+#define MD_BW_2_SHIFT                      0
+#define MD_BW_2_MASK                       0xffffffff
+
+/* DVFSRC_MD_BW_3(0x10012000 + 0x14C) */
+#define MD_BW_3_SHIFT                      0
+#define MD_BW_3_MASK                       0xffffffff
+
+/* DVFSRC_MD_BW_4(0x10012000 + 0x150) */
+#define MD_BW_4_SHIFT                      0
+#define MD_BW_4_MASK                       0xffffffff
+
+/* DVFSRC_MD_BW_5(0x10012000 + 0x154) */
+#define MD_BW_5_SHIFT                      0
+#define MD_BW_5_MASK                       0xffffffff
+
+/* DVFSRC_MD_BW_6(0x10012000 + 0x158) */
+#define MD_BW_6_SHIFT                      0
+#define MD_BW_6_MASK                       0xffffffff
+
+/* DVFSRC_MD_BW_7(0x10012000 + 0x15C) */
+#define MD_BW_7_SHIFT                      0
+#define MD_BW_7_MASK                       0xffffffff
+
+/* DVFSRC_SW_BW_0(0x10012000 + 0x160) */
+#define SW_BW_0_SHIFT                      0
+#define SW_BW_0_MASK                       0xff
+
+/* DVFSRC_SW_BW_1(0x10012000 + 0x164) */
+#define SW_BW_1_SHIFT                      0
+#define SW_BW_1_MASK                       0xff
+
+/* DVFSRC_SW_BW_2(0x10012000 + 0x168) */
+#define SW_BW_2_SHIFT                      0
+#define SW_BW_2_MASK                       0xff
+
+/* DVFSRC_SW_BW_3(0x10012000 + 0x16C) */
+#define SW_BW_3_SHIFT                      0
+#define SW_BW_3_MASK                       0xff
+
+/* DVFSRC_SW_BW_4(0x10012000 + 0x170) */
+#define SW_BW_4_SHIFT                      0
+#define SW_BW_4_MASK                       0xff
+
+/* DVFSRC_QOS_EN(0x10012000 + 0x180) */
+#define EN_FINAL_AVG_PEK_MAX_SHIFT         16
+#define EN_FINAL_AVG_PEK_MAX_MASK          0x10000
+#define EN_FINAL_PEK_SHIFT                 15
+#define EN_FINAL_PEK_MASK                  0x8000
+#define EN_FINAL_AVG_SHIFT                 14
+#define EN_FINAL_AVG_MASK                  0x4000
+#define EN_PEK_SW4_SHIFT                   13
+#define EN_PEK_SW4_MASK                    0x2000
+#define EN_PEK_SW3_SHIFT                   12
+#define EN_PEK_SW3_MASK                    0x1000
+#define EN_PEK_SW2_SHIFT                   11
+#define EN_PEK_SW2_MASK                    0x800
+#define EN_PEK_SW1_SHIFT                   10
+#define EN_PEK_SW1_MASK                    0x400
+#define EN_PEK_SW0_SHIFT                   9
+#define EN_PEK_SW0_MASK                    0x200
+#define EN_PEK_MD_SHIFT                    8
+#define EN_PEK_MD_MASK                     0x100
+#define EN_PEK_MM_SHIFT                    7
+#define EN_PEK_MM_MASK                     0x80
+#define EN_AVG_SW4_SHIFT                   6
+#define EN_AVG_SW4_MASK                    0x40
+#define EN_AVG_SW3_SHIFT                   5
+#define EN_AVG_SW3_MASK                    0x20
+#define EN_AVG_SW2_SHIFT                   4
+#define EN_AVG_SW2_MASK                    0x10
+#define EN_AVG_SW1_SHIFT                   3
+#define EN_AVG_SW1_MASK                    0x8
+#define EN_AVG_SW0_SHIFT                   2
+#define EN_AVG_SW0_MASK                    0x4
+#define EN_AVG_MD_SHIFT                    1
+#define EN_AVG_MD_MASK                     0x2
+#define EN_AVG_MM_SHIFT                    0
+#define EN_AVG_MM_MASK                     0x1
+
+/* DVFSRC_ISP_HRT(0x10012000 + 0x190) */
+#define ISP_HRT_SHIFT                      0
+#define ISP_HRT_MASK                       0xff
+
+/* DVFSRC_FORCE(0x10012000 + 0x300) */
+#define CURRENT_FORCE_SHIFT                16
+#define CURRENT_FORCE_MASK                 0xffff0000
+#define TARGET_FORCE_SHIFT                 0
+#define TARGET_FORCE_MASK                  0xffff
+
+/* DVFSRC_SEC_SW_REQ(0x10012000 + 0x304) */
+#define EMI_SEC_SW_SHIFT                   2
+#define EMI_SEC_SW_MASK                    0xc
+#define VCORE_SEC_SW_SHIFT                 0
+#define VCORE_SEC_SW_MASK                  0x3
+
+/* DVFSRC_LAST(0x10012000 + 0x308) */
+#define LAST_ONE_SHIFT                     0
+#define LAST_ONE_MASK                      0x7
+
+/* DVFSRC_LAST_L(0x10012000 + 0x30C) */
+#define LAST_ONE_L_SHIFT                   0
+#define LAST_ONE_L_MASK                    0x7
+
+/* DVFSRC_MD_SCENARIO(0x10012000 + 0x310) */
+#define MD_SCENARIO_SHIFT                  0
+#define MD_SCENARIO_MASK                   0xffffffff
+
+/* DVFSRC_RECORD_0_0(0x10012000 + 0x400) */
+#define RECORD_0_SHIFT                     0
+#define RECORD_0_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_0_1(0x10012000 + 0x404) */
+#define RECORD_0_SHIFT                     0
+#define RECORD_0_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_0_2(0x10012000 + 0x408) */
+#define RECORD_0_SHIFT                     0
+#define RECORD_0_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_1_0(0x10012000 + 0x40C) */
+#define RECORD_1_SHIFT                     0
+#define RECORD_1_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_1_1(0x10012000 + 0x410) */
+#define RECORD_1_SHIFT                     0
+#define RECORD_1_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_1_2(0x10012000 + 0x414) */
+#define RECORD_1_SHIFT                     0
+#define RECORD_1_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_2_0(0x10012000 + 0x418) */
+#define RECORD_2_SHIFT                     0
+#define RECORD_2_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_2_1(0x10012000 + 0x41C) */
+#define RECORD_2_SHIFT                     0
+#define RECORD_2_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_2_2(0x10012000 + 0x420) */
+#define RECORD_2_SHIFT                     0
+#define RECORD_2_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_3_0(0x10012000 + 0x424) */
+#define RECORD_3_SHIFT                     0
+#define RECORD_3_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_3_1(0x10012000 + 0x428) */
+#define RECORD_3_SHIFT                     0
+#define RECORD_3_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_3_2(0x10012000 + 0x42C) */
+#define RECORD_3_SHIFT                     0
+#define RECORD_3_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_4_0(0x10012000 + 0x430) */
+#define RECORD_4_SHIFT                     0
+#define RECORD_4_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_4_1(0x10012000 + 0x434) */
+#define RECORD_4_SHIFT                     0
+#define RECORD_4_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_4_2(0x10012000 + 0x438) */
+#define RECORD_4_SHIFT                     0
+#define RECORD_4_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_5_0(0x10012000 + 0x43C) */
+#define RECORD_5_SHIFT                     0
+#define RECORD_5_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_5_1(0x10012000 + 0x440) */
+#define RECORD_5_SHIFT                     0
+#define RECORD_5_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_5_2(0x10012000 + 0x444) */
+#define RECORD_5_SHIFT                     0
+#define RECORD_5_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_6_0(0x10012000 + 0x448) */
+#define RECORD_6_SHIFT                     0
+#define RECORD_6_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_6_1(0x10012000 + 0x44C) */
+#define RECORD_6_SHIFT                     0
+#define RECORD_6_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_6_2(0x10012000 + 0x450) */
+#define RECORD_6_SHIFT                     0
+#define RECORD_6_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_7_0(0x10012000 + 0x454) */
+#define RECORD_7_SHIFT                     0
+#define RECORD_7_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_7_1(0x10012000 + 0x458) */
+#define RECORD_7_SHIFT                     0
+#define RECORD_7_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_7_2(0x10012000 + 0x45C) */
+#define RECORD_7_SHIFT                     0
+#define RECORD_7_MASK                      0xffffffff
+
+/* DVFSRC_RECORD_0_L_0(0x10012000 + 0x460) */
+#define RECORD_0_L_SHIFT                   0
+#define RECORD_0_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_0_L_1(0x10012000 + 0x464) */
+#define RECORD_0_L_SHIFT                   0
+#define RECORD_0_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_0_L_2(0x10012000 + 0x468) */
+#define RECORD_0_L_SHIFT                   0
+#define RECORD_0_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_1_L_0(0x10012000 + 0x46C) */
+#define RECORD_1_L_SHIFT                   0
+#define RECORD_1_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_1_L_1(0x10012000 + 0x470) */
+#define RECORD_1_L_SHIFT                   0
+#define RECORD_1_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_1_L_2(0x10012000 + 0x474) */
+#define RECORD_1_L_SHIFT                   0
+#define RECORD_1_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_2_L_0(0x10012000 + 0x478) */
+#define RECORD_2_L_SHIFT                   0
+#define RECORD_2_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_2_L_1(0x10012000 + 0x47C) */
+#define RECORD_2_L_SHIFT                   0
+#define RECORD_2_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_2_L_2(0x10012000 + 0x480) */
+#define RECORD_2_L_SHIFT                   0
+#define RECORD_2_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_3_L_0(0x10012000 + 0x484) */
+#define RECORD_3_L_SHIFT                   0
+#define RECORD_3_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_3_L_1(0x10012000 + 0x488) */
+#define RECORD_3_L_SHIFT                   0
+#define RECORD_3_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_3_L_2(0x10012000 + 0x48C) */
+#define RECORD_3_L_SHIFT                   0
+#define RECORD_3_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_4_L_0(0x10012000 + 0x490) */
+#define RECORD_4_L_SHIFT                   0
+#define RECORD_4_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_4_L_1(0x10012000 + 0x494) */
+#define RECORD_4_L_SHIFT                   0
+#define RECORD_4_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_4_L_2(0x10012000 + 0x498) */
+#define RECORD_4_L_SHIFT                   0
+#define RECORD_4_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_5_L_0(0x10012000 + 0x49C) */
+#define RECORD_5_L_SHIFT                   0
+#define RECORD_5_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_5_L_1(0x10012000 + 0x4A0) */
+#define RECORD_5_L_SHIFT                   0
+#define RECORD_5_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_5_L_2(0x10012000 + 0x4A4) */
+#define RECORD_5_L_SHIFT                   0
+#define RECORD_5_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_6_L_0(0x10012000 + 0x4A8) */
+#define RECORD_6_L_SHIFT                   0
+#define RECORD_6_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_6_L_1(0x10012000 + 0x4AC) */
+#define RECORD_6_L_SHIFT                   0
+#define RECORD_6_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_6_L_2(0x10012000 + 0x4B0) */
+#define RECORD_6_L_SHIFT                   0
+#define RECORD_6_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_7_L_0(0x10012000 + 0x4B4) */
+#define RECORD_7_L_SHIFT                   0
+#define RECORD_7_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_7_L_1(0x10012000 + 0x4B8) */
+#define RECORD_7_L_SHIFT                   0
+#define RECORD_7_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_7_L_2(0x10012000 + 0x4BC) */
+#define RECORD_7_L_SHIFT                   0
+#define RECORD_7_L_MASK                    0xffffffff
+
+/* DVFSRC_RECORD_MD_0(0x10012000 + 0x4C0) */
+#define RECORD_MD_0_SHIFT                  0
+#define RECORD_MD_0_MASK                   0xffffffff
+
+/* DVFSRC_RECORD_MD_1(0x10012000 + 0x4C4) */
+#define RECORD_MD_1_SHIFT                  0
+#define RECORD_MD_1_MASK                   0xffffffff
+
+/* DVFSRC_RECORD_MD_2(0x10012000 + 0x4C8) */
+#define RECORD_MD_2_SHIFT                  0
+#define RECORD_MD_2_MASK                   0xffffffff
+
+/* DVFSRC_RECORD_MD_3(0x10012000 + 0x4CC) */
+#define RECORD_MD_3_SHIFT                  0
+#define RECORD_MD_3_MASK                   0xffffffff
+
+/* DVFSRC_RECORD_MD_4(0x10012000 + 0x4D0) */
+#define RECORD_MD_4_SHIFT                  0
+#define RECORD_MD_4_MASK                   0xffffffff
+
+/* DVFSRC_RECORD_MD_5(0x10012000 + 0x4D4) */
+#define RECORD_MD_5_SHIFT                  0
+#define RECORD_MD_5_MASK                   0xffffffff
+
+/* DVFSRC_RECORD_MD_6(0x10012000 + 0x4D8) */
+#define RECORD_MD_6_SHIFT                  0
+#define RECORD_MD_6_MASK                   0xffffffff
+
+/* DVFSRC_RECORD_MD_7(0x10012000 + 0x4DC) */
+#define RECORD_MD_7_SHIFT                  0
+#define RECORD_MD_7_MASK                   0xffffffff
+
+/* DVFSRC_RECORD_COUNT(0x10012000 + 0x4F0) */
+#define COUNTER_SHIFT                      0
+#define COUNTER_MASK                       0x7fffff
+
+/* DVFSRC_RSRV_0(0x10012000 + 0x600) */
+#define RSRV_0_SHIFT                       6
+#define RSRV_0_MASK                        0xffffffc0
+#define RSRV_STATUS_SHIFT                  0
+#define RSRV_STATUS_MASK                   0x3f
+
+/* DVFSRC_RSRV_1(0x10012000 + 0x604) */
+#define RSRV_1_SHIFT                       4
+#define RSRV_1_MASK                        0xfffffff0
+#define ECO_FUL_SHIFT                      3
+#define ECO_FUL_MASK                       0x8
+#define DVFS_FUL_SHIFT                     2
+#define DVFS_FUL_MASK                      0x4
+#define MASK2_EN_SHIFT                     1
+#define MASK2_EN_MASK                      0x2
+#define MASK_EN_SHIFT                      0
+#define MASK_EN_MASK                       0x1
+
+/* DVFSRC_RSRV_2(0x10012000 + 0x608) */
+#define RSRV_2_SHIFT                       0
+#define RSRV_2_MASK                        0xffffffff
+
+/* DVFSRC_RSRV_3(0x10012000 + 0x60C) */
+#define RSRV_3_SHIFT                       0
+#define RSRV_3_MASK                        0xffffffff
+
+/* DVFSRC_RSRV_4(0x10012000 + 0x610) */
+#define RSRV_4_SHIFT                       0
+#define RSRV_4_MASK                        0xffffffff
+
+/* DVFSRC_RSRV_5(0x10012000 + 0x614) */
+#define RSRV_5_SHIFT                       0
+#define RSRV_5_MASK                        0xffffffff
+
+#endif /* __MTK_DVFSRC_REG_H */
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc_v2.c b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc_v2.c
new file mode 100644
index 0000000..f4859b7
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/mediatek/mt2731/mtk-dvfsrc_v2.c
@@ -0,0 +1,382 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#include <linux/devfreq.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+
+#include <governor.h>
+
+#ifdef CONFIG_MTK_AEE_FEATURE
+#include <mt-plat/aee.h>
+#endif
+#include <mtk_spm.h>
+#include "mtk-dvfsrc.h"
+#include "mtk-dvfsrc_reg.h"
+
+static struct dvfsrc *dvfsrc;
+
+static int dvfsrc_target_array[DVFSRC_NUM_CLASSES];
+
+#define DVFSRC_REG(offset) (dvfsrc->regs + offset)
+
+u32 dvfsrc_read(u32 offset)
+{
+	return readl(DVFSRC_REG(offset));
+}
+
+void dvfsrc_write(u32 offset, u32 val)
+{
+	writel(val, DVFSRC_REG(offset));
+}
+
+static void dvfsrc_restore(void)
+{
+	commit_data(DVFSRC_DDR_OPP,
+		DVFSRC_DDR_OPP_DEFAULT_VALUE);
+	commit_data(DVFSRC_VCORE_OPP,
+		DVFSRC_VCORE_OPP_DEFAULT_VALUE);
+	commit_data(DVFSRC_VCORE_DVFS_FORCE_OPP,
+		DVFSRC_VCORE_DVFS_FORCE_OPP_DEFAULT_VALUE);
+}
+
+static void dvfsrc_target_restore(void)
+{
+	dvfsrc_target_array[DVFSRC_DDR_OPP]
+		= DVFSRC_DDR_OPP_DEFAULT_VALUE;
+	dvfsrc_target_array[DVFSRC_VCORE_OPP]
+		= DVFSRC_VCORE_OPP_DEFAULT_VALUE;
+}
+
+void dvfsrc_enable(int dvfsrc_en)
+{
+	if (dvfsrc_en > 1 || dvfsrc_en < 0)
+		return;
+
+	dvfsrc->dvfsrc_enabled = dvfsrc_en;
+	dvfsrc->opp_forced = 0;
+	sprintf(dvfsrc->force_start, "0");
+	sprintf(dvfsrc->force_end, "0");
+
+	dvfsrc_restore();
+	dvfsrc_target_restore();
+	if (dvfsrc_en)
+		dvfsrc_en |= (dvfsrc->dvfsrc_flag << 1);
+}
+
+void dvfsrc_flag_set(int flag)
+{
+	dvfsrc->dvfsrc_flag = flag;
+}
+
+int dvfsrc_flag_get(void)
+{
+	return	dvfsrc->dvfsrc_flag;
+}
+
+static int dvfsrc_common_init(void)
+{
+	dvfsrc_opp_level_mapping();
+	dvfsrc_opp_table_init();
+
+	return 0;
+}
+
+int is_dvfsrc_enabled(void)
+{
+	if (dvfsrc)
+		return dvfsrc->dvfsrc_enabled == 1;
+
+	return 0;
+}
+
+static void get_dvfsrc_info(char *p)
+{
+	p += sprintf(p, "%-24s: 0x%x\n",
+			"DVFSRC_VCORE_OPP",
+			dvfsrc_target_array[DVFSRC_VCORE_OPP]);
+	p += sprintf(p, "%-24s: 0x%x\n",
+			"DVFSRC_FORCE_OPP",
+			dvfsrc_target_array[DVFSRC_VCORE_DVFS_FORCE_OPP]);
+}
+char *dvfsrc_dump_reg(char *ptr)
+{
+	char buf[1024];
+
+	memset(buf, '\0', sizeof(buf));
+	get_opp_info(buf);
+	if (ptr)
+		ptr += sprintf(ptr, "%s\n", buf);
+	else
+		pr_info("%s\n", buf);
+
+	memset(buf, '\0', sizeof(buf));
+	get_dvfsrc_reg(buf);
+	if (ptr)
+		ptr += sprintf(ptr, "%s\n", buf);
+	else
+		pr_info("%s\n", buf);
+
+	memset(buf, '\0', sizeof(buf));
+	get_dvfsrc_record(buf);
+	if (ptr)
+		ptr += sprintf(ptr, "%s\n", buf);
+	else
+		pr_info("%s\n", buf);
+	memset(buf, '\0', sizeof(buf));
+	get_spm_reg(buf);
+	if (ptr)
+		ptr += sprintf(ptr, "%s\n", buf);
+	else
+		pr_info("%s\n", buf);
+	memset(buf, '\0', sizeof(buf));
+	get_dvfsrc_info(buf);
+	if (ptr)
+		ptr += sprintf(ptr, "%s\n", buf);
+	else
+		pr_info("%s\n", buf);
+	return ptr;
+}
+
+static void get_dvfsrc_latch_reg(char *p)
+{
+	u32 i;
+
+	for (i = 0x460; i <= 0x4BC; i += 4)
+		p += sprintf(p, "DVFSRC[0x%x]:0x%x\n",
+			i, dvfsrc_read(i));
+}
+
+char *dvfsrc_dump_lacth_reg(char *ptr)
+{
+	char buf[1024];
+
+	memset(buf, '\0', sizeof(buf));
+	get_dvfsrc_latch_reg(buf);
+	if (ptr)
+		ptr += sprintf(ptr, "%s\n", buf);
+	else
+		pr_info("%s\n", buf);
+	return ptr;
+}
+
+static struct devfreq_dev_profile devfreq_profile = {
+	.polling_ms	= 0,
+};
+
+static int governor_event_handler(struct devfreq *devfreq,
+					unsigned int event, void *data)
+{
+	switch (event) {
+	case DEVFREQ_GOV_SUSPEND:
+		break;
+
+	case DEVFREQ_GOV_RESUME:
+		break;
+
+	default:
+		break;
+	}
+	return 0;
+}
+
+static struct devfreq_governor dvfsrc_governor = {
+	.name = "dvfsrc",
+	.event_handler = governor_event_handler,
+};
+
+/* DVFSRC Set Request */
+int dvfsrc_get_reguest(unsigned int id)
+{
+	int ret = -1;
+
+	if (id >= DVFSRC_NUM_CLASSES) {
+		pr_info("%s:The request is not supported %d\n", __func__, id);
+		return ret;
+	}
+
+	return dvfsrc_target_array[id];
+}
+EXPORT_SYMBOL(dvfsrc_get_reguest);
+
+void dvfsrc_set_ddr_opp(int level)
+{
+	commit_data(DVFSRC_DDR_OPP, level);
+	dvfsrc_target_array[DVFSRC_DDR_OPP] = level;
+}
+EXPORT_SYMBOL(dvfsrc_set_ddr_opp);
+
+void dvfsrc_set_vcore_opp(int level)
+{
+	commit_data(DVFSRC_VCORE_OPP, level);
+	dvfsrc_target_array[DVFSRC_VCORE_OPP] = level;
+}
+EXPORT_SYMBOL(dvfsrc_set_vcore_opp);
+
+void dvfsrc_set_vcore_dvfs_force_opp(int level)
+{
+	commit_data(DVFSRC_VCORE_DVFS_FORCE_OPP, level);
+	dvfsrc_target_array[DVFSRC_VCORE_DVFS_FORCE_OPP] = level;
+}
+EXPORT_SYMBOL(dvfsrc_set_vcore_dvfs_force_opp);
+
+void dvfsrc_set_vcore_dvfs_force_workaround_opp(void)
+{
+	commit_data(DVFSRC_VCORE_DVFS_FORCE_OPP_WORKAROUND, 0);
+}
+EXPORT_SYMBOL(dvfsrc_set_vcore_dvfs_force_workaround_opp);
+
+static int dvfsrc_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct resource *res;
+	struct device_node *np = pdev->dev.of_node;
+
+	dvfsrc = devm_kzalloc(&pdev->dev, sizeof(*dvfsrc), GFP_KERNEL);
+	if (!dvfsrc)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	dvfsrc->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(dvfsrc->regs))
+		return PTR_ERR(dvfsrc->regs);
+
+	platform_set_drvdata(pdev, dvfsrc);
+	dvfsrc->dev = &pdev->dev;
+
+	dvfsrc->devfreq = devm_devfreq_add_device(&pdev->dev,
+						 &devfreq_profile,
+						 "dvfsrc",
+						 NULL);
+
+	ret = dvfsrc_add_interface(&pdev->dev);
+	if (ret)
+		return ret;
+
+	dvfsrc_common_init();
+
+	if (of_property_read_u32(np, "dvfsrc_flag",
+		(u32 *) &dvfsrc->dvfsrc_flag))
+		dvfsrc->dvfsrc_flag = 0;
+
+	dvfsrc_platform_init(dvfsrc);
+
+	pr_info("%s: init done\n", __func__);
+
+	return 0;
+}
+
+static int dvfsrc_remove(struct platform_device *pdev)
+{
+	dvfsrc_remove_interface(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id dvfsrc_of_match[] = {
+	{ .compatible = "mediatek,dvfsrc" },
+	{ .compatible = "mediatek,dvfsrc-v2" },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, dvfsrc_of_match);
+
+static __maybe_unused int dvfsrc_suspend(struct device *dev)
+{
+	int ret = 0;
+
+	if (dvfsrc->suspend) {
+		ret = dvfsrc->suspend(dvfsrc);
+		if (ret)
+			return ret;
+	}
+
+	ret = devfreq_suspend_device(dvfsrc->devfreq);
+	if (ret < 0) {
+		dev_dbg(dev, "failed to suspend the devfreq devices\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static __maybe_unused int dvfsrc_resume(struct device *dev)
+{
+	int ret = 0;
+
+	if (dvfsrc->resume) {
+		ret = dvfsrc->resume(dvfsrc);
+		if (ret)
+			return ret;
+	}
+
+	ret = devfreq_resume_device(dvfsrc->devfreq);
+	if (ret < 0) {
+		dev_dbg(dev, "failed to resume the devfreq devices\n");
+		return ret;
+	}
+	return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(dvfsrc_pm, dvfsrc_suspend,
+			 dvfsrc_resume);
+
+static struct platform_driver dvfsrc_driver = {
+	.probe	= dvfsrc_probe,
+	.remove	= dvfsrc_remove,
+	.driver = {
+		.name = "dvfsrc",
+		.pm	= &dvfsrc_pm,
+		.of_match_table = dvfsrc_of_match,
+	},
+};
+
+static int __init dvfsrc_init(void)
+{
+	int ret = 0;
+
+	ret = devfreq_add_governor(&dvfsrc_governor);
+	if (ret) {
+		pr_info("%s: failed to add governor: %d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = platform_driver_register(&dvfsrc_driver);
+	if (ret)
+		devfreq_remove_governor(&dvfsrc_governor);
+
+	return ret;
+}
+late_initcall_sync(dvfsrc_init)
+
+static void __exit dvfsrc_exit(void)
+{
+	int ret = 0;
+
+	platform_driver_unregister(&dvfsrc_driver);
+
+	ret = devfreq_remove_governor(&dvfsrc_governor);
+	if (ret)
+		pr_info("%s: failed to remove governor: %d\n", __func__, ret);
+}
+module_exit(dvfsrc_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("dvfsrc driver");
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/rk3399_dmc.c b/src/kernel/linux/v4.14/drivers/devfreq/rk3399_dmc.c
new file mode 100644
index 0000000..1b89ebb
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/rk3399_dmc.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd.
+ * Author: Lin Huang <hl@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq-event.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/rwsem.h>
+#include <linux/suspend.h>
+
+#include <soc/rockchip/rockchip_sip.h>
+
+struct dram_timing {
+	unsigned int ddr3_speed_bin;
+	unsigned int pd_idle;
+	unsigned int sr_idle;
+	unsigned int sr_mc_gate_idle;
+	unsigned int srpd_lite_idle;
+	unsigned int standby_idle;
+	unsigned int auto_pd_dis_freq;
+	unsigned int dram_dll_dis_freq;
+	unsigned int phy_dll_dis_freq;
+	unsigned int ddr3_odt_dis_freq;
+	unsigned int ddr3_drv;
+	unsigned int ddr3_odt;
+	unsigned int phy_ddr3_ca_drv;
+	unsigned int phy_ddr3_dq_drv;
+	unsigned int phy_ddr3_odt;
+	unsigned int lpddr3_odt_dis_freq;
+	unsigned int lpddr3_drv;
+	unsigned int lpddr3_odt;
+	unsigned int phy_lpddr3_ca_drv;
+	unsigned int phy_lpddr3_dq_drv;
+	unsigned int phy_lpddr3_odt;
+	unsigned int lpddr4_odt_dis_freq;
+	unsigned int lpddr4_drv;
+	unsigned int lpddr4_dq_odt;
+	unsigned int lpddr4_ca_odt;
+	unsigned int phy_lpddr4_ca_drv;
+	unsigned int phy_lpddr4_ck_cs_drv;
+	unsigned int phy_lpddr4_dq_drv;
+	unsigned int phy_lpddr4_odt;
+};
+
+struct rk3399_dmcfreq {
+	struct device *dev;
+	struct devfreq *devfreq;
+	struct devfreq_simple_ondemand_data ondemand_data;
+	struct clk *dmc_clk;
+	struct devfreq_event_dev *edev;
+	struct mutex lock;
+	struct dram_timing timing;
+
+	/*
+	 * DDR Converser of Frequency (DCF) is used to implement DDR frequency
+	 * conversion without the participation of CPU, we will implement and
+	 * control it in arm trust firmware.
+	 */
+	wait_queue_head_t	wait_dcf_queue;
+	int irq;
+	int wait_dcf_flag;
+	struct regulator *vdd_center;
+	unsigned long rate, target_rate;
+	unsigned long volt, target_volt;
+};
+
+static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
+				 u32 flags)
+{
+	struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev);
+	struct dev_pm_opp *opp;
+	unsigned long old_clk_rate = dmcfreq->rate;
+	unsigned long target_volt, target_rate;
+	int err;
+
+	opp = devfreq_recommended_opp(dev, freq, flags);
+	if (IS_ERR(opp))
+		return PTR_ERR(opp);
+
+	target_rate = dev_pm_opp_get_freq(opp);
+	target_volt = dev_pm_opp_get_voltage(opp);
+	dev_pm_opp_put(opp);
+
+	if (dmcfreq->rate == target_rate)
+		return 0;
+
+	mutex_lock(&dmcfreq->lock);
+
+	/*
+	 * If frequency scaling from low to high, adjust voltage first.
+	 * If frequency scaling from high to low, adjust frequency first.
+	 */
+	if (old_clk_rate < target_rate) {
+		err = regulator_set_voltage(dmcfreq->vdd_center, target_volt,
+					    target_volt);
+		if (err) {
+			dev_err(dev, "Cannot to set voltage %lu uV\n",
+				target_volt);
+			goto out;
+		}
+	}
+	dmcfreq->wait_dcf_flag = 1;
+
+	err = clk_set_rate(dmcfreq->dmc_clk, target_rate);
+	if (err) {
+		dev_err(dev, "Cannot to set frequency %lu (%d)\n",
+			target_rate, err);
+		regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
+				      dmcfreq->volt);
+		goto out;
+	}
+
+	/*
+	 * Wait until bcf irq happen, it means freq scaling finish in
+	 * arm trust firmware, use 100ms as timeout time.
+	 */
+	if (!wait_event_timeout(dmcfreq->wait_dcf_queue,
+				!dmcfreq->wait_dcf_flag, HZ / 10))
+		dev_warn(dev, "Timeout waiting for dcf interrupt\n");
+
+	/*
+	 * Check the dpll rate,
+	 * There only two result we will get,
+	 * 1. Ddr frequency scaling fail, we still get the old rate.
+	 * 2. Ddr frequency scaling sucessful, we get the rate we set.
+	 */
+	dmcfreq->rate = clk_get_rate(dmcfreq->dmc_clk);
+
+	/* If get the incorrect rate, set voltage to old value. */
+	if (dmcfreq->rate != target_rate) {
+		dev_err(dev, "Get wrong ddr frequency, Request frequency %lu,\
+			Current frequency %lu\n", target_rate, dmcfreq->rate);
+		regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
+				      dmcfreq->volt);
+		goto out;
+	} else if (old_clk_rate > target_rate)
+		err = regulator_set_voltage(dmcfreq->vdd_center, target_volt,
+					    target_volt);
+	if (err)
+		dev_err(dev, "Cannot to set vol %lu uV\n", target_volt);
+
+	dmcfreq->rate = target_rate;
+	dmcfreq->volt = target_volt;
+
+out:
+	mutex_unlock(&dmcfreq->lock);
+	return err;
+}
+
+static int rk3399_dmcfreq_get_dev_status(struct device *dev,
+					 struct devfreq_dev_status *stat)
+{
+	struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev);
+	struct devfreq_event_data edata;
+	int ret = 0;
+
+	ret = devfreq_event_get_event(dmcfreq->edev, &edata);
+	if (ret < 0)
+		return ret;
+
+	stat->current_frequency = dmcfreq->rate;
+	stat->busy_time = edata.load_count;
+	stat->total_time = edata.total_count;
+
+	return ret;
+}
+
+static int rk3399_dmcfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+	struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev);
+
+	*freq = dmcfreq->rate;
+
+	return 0;
+}
+
+static struct devfreq_dev_profile rk3399_devfreq_dmc_profile = {
+	.polling_ms	= 200,
+	.target		= rk3399_dmcfreq_target,
+	.get_dev_status	= rk3399_dmcfreq_get_dev_status,
+	.get_cur_freq	= rk3399_dmcfreq_get_cur_freq,
+};
+
+static __maybe_unused int rk3399_dmcfreq_suspend(struct device *dev)
+{
+	struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev);
+	int ret = 0;
+
+	ret = devfreq_event_disable_edev(dmcfreq->edev);
+	if (ret < 0) {
+		dev_err(dev, "failed to disable the devfreq-event devices\n");
+		return ret;
+	}
+
+	ret = devfreq_suspend_device(dmcfreq->devfreq);
+	if (ret < 0) {
+		dev_err(dev, "failed to suspend the devfreq devices\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static __maybe_unused int rk3399_dmcfreq_resume(struct device *dev)
+{
+	struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev);
+	int ret = 0;
+
+	ret = devfreq_event_enable_edev(dmcfreq->edev);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable the devfreq-event devices\n");
+		return ret;
+	}
+
+	ret = devfreq_resume_device(dmcfreq->devfreq);
+	if (ret < 0) {
+		dev_err(dev, "failed to resume the devfreq devices\n");
+		return ret;
+	}
+	return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(rk3399_dmcfreq_pm, rk3399_dmcfreq_suspend,
+			 rk3399_dmcfreq_resume);
+
+static irqreturn_t rk3399_dmc_irq(int irq, void *dev_id)
+{
+	struct rk3399_dmcfreq *dmcfreq = dev_id;
+	struct arm_smccc_res res;
+
+	dmcfreq->wait_dcf_flag = 0;
+	wake_up(&dmcfreq->wait_dcf_queue);
+
+	/* Clear the DCF interrupt */
+	arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
+		      ROCKCHIP_SIP_CONFIG_DRAM_CLR_IRQ,
+		      0, 0, 0, 0, &res);
+
+	return IRQ_HANDLED;
+}
+
+static int of_get_ddr_timings(struct dram_timing *timing,
+			      struct device_node *np)
+{
+	int ret = 0;
+
+	ret = of_property_read_u32(np, "rockchip,ddr3_speed_bin",
+				   &timing->ddr3_speed_bin);
+	ret |= of_property_read_u32(np, "rockchip,pd_idle",
+				    &timing->pd_idle);
+	ret |= of_property_read_u32(np, "rockchip,sr_idle",
+				    &timing->sr_idle);
+	ret |= of_property_read_u32(np, "rockchip,sr_mc_gate_idle",
+				    &timing->sr_mc_gate_idle);
+	ret |= of_property_read_u32(np, "rockchip,srpd_lite_idle",
+				    &timing->srpd_lite_idle);
+	ret |= of_property_read_u32(np, "rockchip,standby_idle",
+				    &timing->standby_idle);
+	ret |= of_property_read_u32(np, "rockchip,auto_pd_dis_freq",
+				    &timing->auto_pd_dis_freq);
+	ret |= of_property_read_u32(np, "rockchip,dram_dll_dis_freq",
+				    &timing->dram_dll_dis_freq);
+	ret |= of_property_read_u32(np, "rockchip,phy_dll_dis_freq",
+				    &timing->phy_dll_dis_freq);
+	ret |= of_property_read_u32(np, "rockchip,ddr3_odt_dis_freq",
+				    &timing->ddr3_odt_dis_freq);
+	ret |= of_property_read_u32(np, "rockchip,ddr3_drv",
+				    &timing->ddr3_drv);
+	ret |= of_property_read_u32(np, "rockchip,ddr3_odt",
+				    &timing->ddr3_odt);
+	ret |= of_property_read_u32(np, "rockchip,phy_ddr3_ca_drv",
+				    &timing->phy_ddr3_ca_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_ddr3_dq_drv",
+				    &timing->phy_ddr3_dq_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_ddr3_odt",
+				    &timing->phy_ddr3_odt);
+	ret |= of_property_read_u32(np, "rockchip,lpddr3_odt_dis_freq",
+				    &timing->lpddr3_odt_dis_freq);
+	ret |= of_property_read_u32(np, "rockchip,lpddr3_drv",
+				    &timing->lpddr3_drv);
+	ret |= of_property_read_u32(np, "rockchip,lpddr3_odt",
+				    &timing->lpddr3_odt);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr3_ca_drv",
+				    &timing->phy_lpddr3_ca_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr3_dq_drv",
+				    &timing->phy_lpddr3_dq_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr3_odt",
+				    &timing->phy_lpddr3_odt);
+	ret |= of_property_read_u32(np, "rockchip,lpddr4_odt_dis_freq",
+				    &timing->lpddr4_odt_dis_freq);
+	ret |= of_property_read_u32(np, "rockchip,lpddr4_drv",
+				    &timing->lpddr4_drv);
+	ret |= of_property_read_u32(np, "rockchip,lpddr4_dq_odt",
+				    &timing->lpddr4_dq_odt);
+	ret |= of_property_read_u32(np, "rockchip,lpddr4_ca_odt",
+				    &timing->lpddr4_ca_odt);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_ca_drv",
+				    &timing->phy_lpddr4_ca_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_ck_cs_drv",
+				    &timing->phy_lpddr4_ck_cs_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_dq_drv",
+				    &timing->phy_lpddr4_dq_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_odt",
+				    &timing->phy_lpddr4_odt);
+
+	return ret;
+}
+
+static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+{
+	struct arm_smccc_res res;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = pdev->dev.of_node;
+	struct rk3399_dmcfreq *data;
+	int ret, irq, index, size;
+	uint32_t *timing;
+	struct dev_pm_opp *opp;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev,
+			"Cannot get the dmc interrupt resource: %d\n", irq);
+		return irq;
+	}
+	data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	mutex_init(&data->lock);
+
+	data->vdd_center = devm_regulator_get(dev, "center");
+	if (IS_ERR(data->vdd_center)) {
+		dev_err(dev, "Cannot get the regulator \"center\"\n");
+		return PTR_ERR(data->vdd_center);
+	}
+
+	data->dmc_clk = devm_clk_get(dev, "dmc_clk");
+	if (IS_ERR(data->dmc_clk)) {
+		dev_err(dev, "Cannot get the clk dmc_clk\n");
+		return PTR_ERR(data->dmc_clk);
+	};
+
+	data->irq = irq;
+	ret = devm_request_irq(dev, irq, rk3399_dmc_irq, 0,
+			       dev_name(dev), data);
+	if (ret) {
+		dev_err(dev, "Failed to request dmc irq: %d\n", ret);
+		return ret;
+	}
+
+	init_waitqueue_head(&data->wait_dcf_queue);
+	data->wait_dcf_flag = 0;
+
+	data->edev = devfreq_event_get_edev_by_phandle(dev, 0);
+	if (IS_ERR(data->edev))
+		return -EPROBE_DEFER;
+
+	ret = devfreq_event_enable_edev(data->edev);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable devfreq-event devices\n");
+		return ret;
+	}
+
+	/*
+	 * Get dram timing and pass it to arm trust firmware,
+	 * the dram drvier in arm trust firmware will get these
+	 * timing and to do dram initial.
+	 */
+	if (!of_get_ddr_timings(&data->timing, np)) {
+		timing = &data->timing.ddr3_speed_bin;
+		size = sizeof(struct dram_timing) / 4;
+		for (index = 0; index < size; index++) {
+			arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, *timing++, index,
+				      ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM,
+				      0, 0, 0, 0, &res);
+			if (res.a0) {
+				dev_err(dev, "Failed to set dram param: %ld\n",
+					res.a0);
+				return -EINVAL;
+			}
+		}
+	}
+
+	arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
+		      ROCKCHIP_SIP_CONFIG_DRAM_INIT,
+		      0, 0, 0, 0, &res);
+
+	/*
+	 * We add a devfreq driver to our parent since it has a device tree node
+	 * with operating points.
+	 */
+	if (dev_pm_opp_of_add_table(dev)) {
+		dev_err(dev, "Invalid operating-points in device tree.\n");
+		return -EINVAL;
+	}
+
+	of_property_read_u32(np, "upthreshold",
+			     &data->ondemand_data.upthreshold);
+	of_property_read_u32(np, "downdifferential",
+			     &data->ondemand_data.downdifferential);
+
+	data->rate = clk_get_rate(data->dmc_clk);
+
+	opp = devfreq_recommended_opp(dev, &data->rate, 0);
+	if (IS_ERR(opp))
+		return PTR_ERR(opp);
+
+	data->rate = dev_pm_opp_get_freq(opp);
+	data->volt = dev_pm_opp_get_voltage(opp);
+	dev_pm_opp_put(opp);
+
+	rk3399_devfreq_dmc_profile.initial_freq = data->rate;
+
+	data->devfreq = devm_devfreq_add_device(dev,
+					   &rk3399_devfreq_dmc_profile,
+					   "simple_ondemand",
+					   &data->ondemand_data);
+	if (IS_ERR(data->devfreq))
+		return PTR_ERR(data->devfreq);
+	devm_devfreq_register_opp_notifier(dev, data->devfreq);
+
+	data->dev = dev;
+	platform_set_drvdata(pdev, data);
+
+	return 0;
+}
+
+static const struct of_device_id rk3399dmc_devfreq_of_match[] = {
+	{ .compatible = "rockchip,rk3399-dmc" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match);
+
+static struct platform_driver rk3399_dmcfreq_driver = {
+	.probe	= rk3399_dmcfreq_probe,
+	.driver = {
+		.name	= "rk3399-dmc-freq",
+		.pm	= &rk3399_dmcfreq_pm,
+		.of_match_table = rk3399dmc_devfreq_of_match,
+	},
+};
+module_platform_driver(rk3399_dmcfreq_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");
+MODULE_DESCRIPTION("RK3399 dmcfreq driver with devfreq framework");
diff --git a/src/kernel/linux/v4.14/drivers/devfreq/tegra-devfreq.c b/src/kernel/linux/v4.14/drivers/devfreq/tegra-devfreq.c
new file mode 100644
index 0000000..f6a2dd6
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/devfreq/tegra-devfreq.c
@@ -0,0 +1,787 @@
+/*
+ * A devfreq driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
+ * Copyright (C) 2014 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/devfreq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/reset.h>
+
+#include "governor.h"
+
+#define ACTMON_GLB_STATUS					0x0
+#define ACTMON_GLB_PERIOD_CTRL					0x4
+
+#define ACTMON_DEV_CTRL						0x0
+#define ACTMON_DEV_CTRL_K_VAL_SHIFT				10
+#define ACTMON_DEV_CTRL_ENB_PERIODIC				BIT(18)
+#define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN			BIT(20)
+#define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN			BIT(21)
+#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT	23
+#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT	26
+#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN		BIT(29)
+#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN		BIT(30)
+#define ACTMON_DEV_CTRL_ENB					BIT(31)
+
+#define ACTMON_DEV_UPPER_WMARK					0x4
+#define ACTMON_DEV_LOWER_WMARK					0x8
+#define ACTMON_DEV_INIT_AVG					0xc
+#define ACTMON_DEV_AVG_UPPER_WMARK				0x10
+#define ACTMON_DEV_AVG_LOWER_WMARK				0x14
+#define ACTMON_DEV_COUNT_WEIGHT					0x18
+#define ACTMON_DEV_AVG_COUNT					0x20
+#define ACTMON_DEV_INTR_STATUS					0x24
+
+#define ACTMON_INTR_STATUS_CLEAR				0xffffffff
+
+#define ACTMON_DEV_INTR_CONSECUTIVE_UPPER			BIT(31)
+#define ACTMON_DEV_INTR_CONSECUTIVE_LOWER			BIT(30)
+
+#define ACTMON_ABOVE_WMARK_WINDOW				1
+#define ACTMON_BELOW_WMARK_WINDOW				3
+#define ACTMON_BOOST_FREQ_STEP					16000
+
+/*
+ * Activity counter is incremented every 256 memory transactions, and each
+ * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is
+ * 4 * 256 = 1024.
+ */
+#define ACTMON_COUNT_WEIGHT					0x400
+
+/*
+ * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
+ * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
+ */
+#define ACTMON_AVERAGE_WINDOW_LOG2			6
+#define ACTMON_SAMPLING_PERIOD				12 /* ms */
+#define ACTMON_DEFAULT_AVG_BAND				6  /* 1/10 of % */
+
+#define KHZ							1000
+
+#define KHZ_MAX						(ULONG_MAX / KHZ)
+
+/* Assume that the bus is saturated if the utilization is 25% */
+#define BUS_SATURATION_RATIO					25
+
+/**
+ * struct tegra_devfreq_device_config - configuration specific to an ACTMON
+ * device
+ *
+ * Coefficients and thresholds are percentages unless otherwise noted
+ */
+struct tegra_devfreq_device_config {
+	u32		offset;
+	u32		irq_mask;
+
+	/* Factors applied to boost_freq every consecutive watermark breach */
+	unsigned int	boost_up_coeff;
+	unsigned int	boost_down_coeff;
+
+	/* Define the watermark bounds when applied to the current avg */
+	unsigned int	boost_up_threshold;
+	unsigned int	boost_down_threshold;
+
+	/*
+	 * Threshold of activity (cycles) below which the CPU frequency isn't
+	 * to be taken into account. This is to avoid increasing the EMC
+	 * frequency when the CPU is very busy but not accessing the bus often.
+	 */
+	u32		avg_dependency_threshold;
+};
+
+enum tegra_actmon_device {
+	MCALL = 0,
+	MCCPU,
+};
+
+static struct tegra_devfreq_device_config actmon_device_configs[] = {
+	{
+		/* MCALL: All memory accesses (including from the CPUs) */
+		.offset = 0x1c0,
+		.irq_mask = 1 << 26,
+		.boost_up_coeff = 200,
+		.boost_down_coeff = 50,
+		.boost_up_threshold = 60,
+		.boost_down_threshold = 40,
+	},
+	{
+		/* MCCPU: memory accesses from the CPUs */
+		.offset = 0x200,
+		.irq_mask = 1 << 25,
+		.boost_up_coeff = 800,
+		.boost_down_coeff = 90,
+		.boost_up_threshold = 27,
+		.boost_down_threshold = 10,
+		.avg_dependency_threshold = 50000,
+	},
+};
+
+/**
+ * struct tegra_devfreq_device - state specific to an ACTMON device
+ *
+ * Frequencies are in kHz.
+ */
+struct tegra_devfreq_device {
+	const struct tegra_devfreq_device_config *config;
+	void __iomem *regs;
+	spinlock_t lock;
+
+	/* Average event count sampled in the last interrupt */
+	u32 avg_count;
+
+	/*
+	 * Extra frequency to increase the target by due to consecutive
+	 * watermark breaches.
+	 */
+	unsigned long boost_freq;
+
+	/* Optimal frequency calculated from the stats for this device */
+	unsigned long target_freq;
+};
+
+struct tegra_devfreq {
+	struct devfreq		*devfreq;
+
+	struct reset_control	*reset;
+	struct clk		*clock;
+	void __iomem		*regs;
+
+	struct clk		*emc_clock;
+	unsigned long		max_freq;
+	unsigned long		cur_freq;
+	struct notifier_block	rate_change_nb;
+
+	struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
+};
+
+struct tegra_actmon_emc_ratio {
+	unsigned long cpu_freq;
+	unsigned long emc_freq;
+};
+
+static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
+	{ 1400000,    KHZ_MAX },
+	{ 1200000,    750000 },
+	{ 1100000,    600000 },
+	{ 1000000,    500000 },
+	{  800000,    375000 },
+	{  500000,    200000 },
+	{  250000,    100000 },
+};
+
+static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset)
+{
+	return readl(tegra->regs + offset);
+}
+
+static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset)
+{
+	writel(val, tegra->regs + offset);
+}
+
+static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset)
+{
+	return readl(dev->regs + offset);
+}
+
+static void device_writel(struct tegra_devfreq_device *dev, u32 val,
+			  u32 offset)
+{
+	writel(val, dev->regs + offset);
+}
+
+static unsigned long do_percent(unsigned long val, unsigned int pct)
+{
+	return val * pct / 100;
+}
+
+static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
+					   struct tegra_devfreq_device *dev)
+{
+	u32 avg = dev->avg_count;
+	u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
+	u32 band = avg_band_freq * ACTMON_SAMPLING_PERIOD;
+
+	device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
+
+	avg = max(dev->avg_count, band);
+	device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK);
+}
+
+static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
+				       struct tegra_devfreq_device *dev)
+{
+	u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+
+	device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
+		      ACTMON_DEV_UPPER_WMARK);
+
+	device_writel(dev, do_percent(val, dev->config->boost_down_threshold),
+		      ACTMON_DEV_LOWER_WMARK);
+}
+
+static void actmon_write_barrier(struct tegra_devfreq *tegra)
+{
+	/* ensure the update has reached the ACTMON */
+	wmb();
+	actmon_readl(tegra, ACTMON_GLB_STATUS);
+}
+
+static void actmon_isr_device(struct tegra_devfreq *tegra,
+			      struct tegra_devfreq_device *dev)
+{
+	unsigned long flags;
+	u32 intr_status, dev_ctrl;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT);
+	tegra_devfreq_update_avg_wmark(tegra, dev);
+
+	intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS);
+	dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL);
+
+	if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
+		/*
+		 * new_boost = min(old_boost * up_coef + step, max_freq)
+		 */
+		dev->boost_freq = do_percent(dev->boost_freq,
+					     dev->config->boost_up_coeff);
+		dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
+
+		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+
+		if (dev->boost_freq >= tegra->max_freq)
+			dev->boost_freq = tegra->max_freq;
+		else
+			dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+	} else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
+		/*
+		 * new_boost = old_boost * down_coef
+		 * or 0 if (old_boost * down_coef < step / 2)
+		 */
+		dev->boost_freq = do_percent(dev->boost_freq,
+					     dev->config->boost_down_coeff);
+
+		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+
+		if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1))
+			dev->boost_freq = 0;
+		else
+			dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+	}
+
+	if (dev->config->avg_dependency_threshold) {
+		if (dev->avg_count >= dev->config->avg_dependency_threshold)
+			dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+		else if (dev->boost_freq == 0)
+			dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+	}
+
+	device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
+
+	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
+
+	actmon_write_barrier(tegra);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static irqreturn_t actmon_isr(int irq, void *data)
+{
+	struct tegra_devfreq *tegra = data;
+	bool handled = false;
+	unsigned int i;
+	u32 val;
+
+	val = actmon_readl(tegra, ACTMON_GLB_STATUS);
+	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+		if (val & tegra->devices[i].config->irq_mask) {
+			actmon_isr_device(tegra, tegra->devices + i);
+			handled = true;
+		}
+	}
+
+	return handled ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
+					    unsigned long cpu_freq)
+{
+	unsigned int i;
+	struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
+
+	for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
+		if (cpu_freq >= ratio->cpu_freq) {
+			if (ratio->emc_freq >= tegra->max_freq)
+				return tegra->max_freq;
+			else
+				return ratio->emc_freq;
+		}
+	}
+
+	return 0;
+}
+
+static void actmon_update_target(struct tegra_devfreq *tegra,
+				 struct tegra_devfreq_device *dev)
+{
+	unsigned long cpu_freq = 0;
+	unsigned long static_cpu_emc_freq = 0;
+	unsigned int avg_sustain_coef;
+	unsigned long flags;
+
+	if (dev->config->avg_dependency_threshold) {
+		cpu_freq = cpufreq_get(0);
+		static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
+	avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
+	dev->target_freq = do_percent(dev->target_freq, avg_sustain_coef);
+	dev->target_freq += dev->boost_freq;
+
+	if (dev->avg_count >= dev->config->avg_dependency_threshold)
+		dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static irqreturn_t actmon_thread_isr(int irq, void *data)
+{
+	struct tegra_devfreq *tegra = data;
+
+	mutex_lock(&tegra->devfreq->lock);
+	update_devfreq(tegra->devfreq);
+	mutex_unlock(&tegra->devfreq->lock);
+
+	return IRQ_HANDLED;
+}
+
+static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
+				       unsigned long action, void *ptr)
+{
+	struct clk_notifier_data *data = ptr;
+	struct tegra_devfreq *tegra;
+	struct tegra_devfreq_device *dev;
+	unsigned int i;
+	unsigned long flags;
+
+	if (action != POST_RATE_CHANGE)
+		return NOTIFY_OK;
+
+	tegra = container_of(nb, struct tegra_devfreq, rate_change_nb);
+
+	tegra->cur_freq = data->new_rate / KHZ;
+
+	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+		dev = &tegra->devices[i];
+
+		spin_lock_irqsave(&dev->lock, flags);
+		tegra_devfreq_update_wmark(tegra, dev);
+		spin_unlock_irqrestore(&dev->lock, flags);
+	}
+
+	actmon_write_barrier(tegra);
+
+	return NOTIFY_OK;
+}
+
+static void tegra_actmon_enable_interrupts(struct tegra_devfreq *tegra)
+{
+	struct tegra_devfreq_device *dev;
+	u32 val;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+		dev = &tegra->devices[i];
+
+		val = device_readl(dev, ACTMON_DEV_CTRL);
+		val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
+		val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
+		val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+		val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+
+		device_writel(dev, val, ACTMON_DEV_CTRL);
+	}
+
+	actmon_write_barrier(tegra);
+}
+
+static void tegra_actmon_disable_interrupts(struct tegra_devfreq *tegra)
+{
+	struct tegra_devfreq_device *dev;
+	u32 val;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+		dev = &tegra->devices[i];
+
+		val = device_readl(dev, ACTMON_DEV_CTRL);
+		val &= ~ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
+		val &= ~ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
+		val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+		val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+
+		device_writel(dev, val, ACTMON_DEV_CTRL);
+	}
+
+	actmon_write_barrier(tegra);
+}
+
+static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
+					  struct tegra_devfreq_device *dev)
+{
+	u32 val = 0;
+
+	dev->target_freq = tegra->cur_freq;
+
+	dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+	device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
+
+	tegra_devfreq_update_avg_wmark(tegra, dev);
+	tegra_devfreq_update_wmark(tegra, dev);
+
+	device_writel(dev, ACTMON_COUNT_WEIGHT, ACTMON_DEV_COUNT_WEIGHT);
+	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
+
+	val |= ACTMON_DEV_CTRL_ENB_PERIODIC;
+	val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
+		<< ACTMON_DEV_CTRL_K_VAL_SHIFT;
+	val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
+		<< ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
+	val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
+		<< ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
+	val |= ACTMON_DEV_CTRL_ENB;
+
+	device_writel(dev, val, ACTMON_DEV_CTRL);
+
+	actmon_write_barrier(tegra);
+}
+
+static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
+				u32 flags)
+{
+	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
+	struct dev_pm_opp *opp;
+	unsigned long rate;
+
+	opp = devfreq_recommended_opp(dev, freq, flags);
+	if (IS_ERR(opp)) {
+		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
+		return PTR_ERR(opp);
+	}
+	rate = dev_pm_opp_get_freq(opp);
+	dev_pm_opp_put(opp);
+
+	clk_set_min_rate(tegra->emc_clock, rate);
+	clk_set_rate(tegra->emc_clock, 0);
+
+	return 0;
+}
+
+static int tegra_devfreq_get_dev_status(struct device *dev,
+					struct devfreq_dev_status *stat)
+{
+	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
+	struct tegra_devfreq_device *actmon_dev;
+
+	stat->current_frequency = tegra->cur_freq * KHZ;
+
+	/* To be used by the tegra governor */
+	stat->private_data = tegra;
+
+	/* The below are to be used by the other governors */
+
+	actmon_dev = &tegra->devices[MCALL];
+
+	/* Number of cycles spent on memory access */
+	stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT);
+
+	/* The bus can be considered to be saturated way before 100% */
+	stat->busy_time *= 100 / BUS_SATURATION_RATIO;
+
+	/* Number of cycles in a sampling period */
+	stat->total_time = ACTMON_SAMPLING_PERIOD * tegra->cur_freq;
+
+	stat->busy_time = min(stat->busy_time, stat->total_time);
+
+	return 0;
+}
+
+static struct devfreq_dev_profile tegra_devfreq_profile = {
+	.polling_ms	= 0,
+	.target		= tegra_devfreq_target,
+	.get_dev_status	= tegra_devfreq_get_dev_status,
+};
+
+static int tegra_governor_get_target(struct devfreq *devfreq,
+				     unsigned long *freq)
+{
+	struct devfreq_dev_status *stat;
+	struct tegra_devfreq *tegra;
+	struct tegra_devfreq_device *dev;
+	unsigned long target_freq = 0;
+	unsigned int i;
+	int err;
+
+	err = devfreq_update_stats(devfreq);
+	if (err)
+		return err;
+
+	stat = &devfreq->last_status;
+
+	tegra = stat->private_data;
+
+	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+		dev = &tegra->devices[i];
+
+		actmon_update_target(tegra, dev);
+
+		target_freq = max(target_freq, dev->target_freq);
+	}
+
+	*freq = target_freq * KHZ;
+
+	return 0;
+}
+
+static int tegra_governor_event_handler(struct devfreq *devfreq,
+					unsigned int event, void *data)
+{
+	struct tegra_devfreq *tegra;
+	int ret = 0;
+
+	tegra = dev_get_drvdata(devfreq->dev.parent);
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		devfreq_monitor_start(devfreq);
+		tegra_actmon_enable_interrupts(tegra);
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		tegra_actmon_disable_interrupts(tegra);
+		devfreq_monitor_stop(devfreq);
+		break;
+
+	case DEVFREQ_GOV_SUSPEND:
+		tegra_actmon_disable_interrupts(tegra);
+		devfreq_monitor_suspend(devfreq);
+		break;
+
+	case DEVFREQ_GOV_RESUME:
+		devfreq_monitor_resume(devfreq);
+		tegra_actmon_enable_interrupts(tegra);
+		break;
+	}
+
+	return ret;
+}
+
+static struct devfreq_governor tegra_devfreq_governor = {
+	.name = "tegra_actmon",
+	.get_target_freq = tegra_governor_get_target,
+	.event_handler = tegra_governor_event_handler,
+};
+
+static int tegra_devfreq_probe(struct platform_device *pdev)
+{
+	struct tegra_devfreq *tegra;
+	struct tegra_devfreq_device *dev;
+	struct resource *res;
+	unsigned int i;
+	unsigned long rate;
+	int irq;
+	int err;
+
+	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+	if (!tegra)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	tegra->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(tegra->regs))
+		return PTR_ERR(tegra->regs);
+
+	tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
+	if (IS_ERR(tegra->reset)) {
+		dev_err(&pdev->dev, "Failed to get reset\n");
+		return PTR_ERR(tegra->reset);
+	}
+
+	tegra->clock = devm_clk_get(&pdev->dev, "actmon");
+	if (IS_ERR(tegra->clock)) {
+		dev_err(&pdev->dev, "Failed to get actmon clock\n");
+		return PTR_ERR(tegra->clock);
+	}
+
+	tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
+	if (IS_ERR(tegra->emc_clock)) {
+		dev_err(&pdev->dev, "Failed to get emc clock\n");
+		return PTR_ERR(tegra->emc_clock);
+	}
+
+	clk_set_rate(tegra->emc_clock, ULONG_MAX);
+
+	tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
+	err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Failed to register rate change notifier\n");
+		return err;
+	}
+
+	reset_control_assert(tegra->reset);
+
+	err = clk_prepare_enable(tegra->clock);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Failed to prepare and enable ACTMON clock\n");
+		return err;
+	}
+
+	reset_control_deassert(tegra->reset);
+
+	tegra->max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX) / KHZ;
+	tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
+
+	actmon_writel(tegra, ACTMON_SAMPLING_PERIOD - 1,
+		      ACTMON_GLB_PERIOD_CTRL);
+
+	for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
+		dev = tegra->devices + i;
+		dev->config = actmon_device_configs + i;
+		dev->regs = tegra->regs + dev->config->offset;
+		spin_lock_init(&dev->lock);
+
+		tegra_actmon_configure_device(tegra, dev);
+	}
+
+	for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) {
+		rate = clk_round_rate(tegra->emc_clock, rate);
+		dev_pm_opp_add(&pdev->dev, rate, 0);
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+		return irq;
+	}
+
+	platform_set_drvdata(pdev, tegra);
+
+	err = devm_request_threaded_irq(&pdev->dev, irq, actmon_isr,
+					actmon_thread_isr, IRQF_SHARED,
+					"tegra-devfreq", tegra);
+	if (err) {
+		dev_err(&pdev->dev, "Interrupt request failed\n");
+		return err;
+	}
+
+	tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
+	tegra->devfreq = devm_devfreq_add_device(&pdev->dev,
+						 &tegra_devfreq_profile,
+						 "tegra_actmon",
+						 NULL);
+
+	return 0;
+}
+
+static int tegra_devfreq_remove(struct platform_device *pdev)
+{
+	struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
+	int irq = platform_get_irq(pdev, 0);
+	u32 val;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
+		val = device_readl(&tegra->devices[i], ACTMON_DEV_CTRL);
+		val &= ~ACTMON_DEV_CTRL_ENB;
+		device_writel(&tegra->devices[i], val, ACTMON_DEV_CTRL);
+	}
+
+	actmon_write_barrier(tegra);
+
+	devm_free_irq(&pdev->dev, irq, tegra);
+
+	clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
+
+	clk_disable_unprepare(tegra->clock);
+
+	return 0;
+}
+
+static const struct of_device_id tegra_devfreq_of_match[] = {
+	{ .compatible = "nvidia,tegra124-actmon" },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
+
+static struct platform_driver tegra_devfreq_driver = {
+	.probe	= tegra_devfreq_probe,
+	.remove	= tegra_devfreq_remove,
+	.driver = {
+		.name = "tegra-devfreq",
+		.of_match_table = tegra_devfreq_of_match,
+	},
+};
+
+static int __init tegra_devfreq_init(void)
+{
+	int ret = 0;
+
+	ret = devfreq_add_governor(&tegra_devfreq_governor);
+	if (ret) {
+		pr_err("%s: failed to add governor: %d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = platform_driver_register(&tegra_devfreq_driver);
+	if (ret)
+		devfreq_remove_governor(&tegra_devfreq_governor);
+
+	return ret;
+}
+module_init(tegra_devfreq_init)
+
+static void __exit tegra_devfreq_exit(void)
+{
+	int ret = 0;
+
+	platform_driver_unregister(&tegra_devfreq_driver);
+
+	ret = devfreq_remove_governor(&tegra_devfreq_governor);
+	if (ret)
+		pr_err("%s: failed to remove governor: %d\n", __func__, ret);
+}
+module_exit(tegra_devfreq_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Tegra devfreq driver");
+MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");