ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/dsi.c b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi.c
new file mode 100644
index 0000000..fedade8
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include "dsi.h"
+
+struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
+{
+	if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
+		return NULL;
+
+	return msm_dsi->encoder;
+}
+
+static int dsi_get_phy(struct msm_dsi *msm_dsi)
+{
+	struct platform_device *pdev = msm_dsi->pdev;
+	struct platform_device *phy_pdev;
+	struct device_node *phy_node;
+
+	phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
+	if (!phy_node) {
+		DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
+		return -ENXIO;
+	}
+
+	phy_pdev = of_find_device_by_node(phy_node);
+	if (phy_pdev) {
+		msm_dsi->phy = platform_get_drvdata(phy_pdev);
+		msm_dsi->phy_dev = &phy_pdev->dev;
+	}
+
+	of_node_put(phy_node);
+
+	if (!phy_pdev) {
+		DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+		return -EPROBE_DEFER;
+	}
+	if (!msm_dsi->phy) {
+		put_device(&phy_pdev->dev);
+		DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	return 0;
+}
+
+static void dsi_destroy(struct msm_dsi *msm_dsi)
+{
+	if (!msm_dsi)
+		return;
+
+	msm_dsi_manager_unregister(msm_dsi);
+
+	if (msm_dsi->phy_dev) {
+		put_device(msm_dsi->phy_dev);
+		msm_dsi->phy = NULL;
+		msm_dsi->phy_dev = NULL;
+	}
+
+	if (msm_dsi->host) {
+		msm_dsi_host_destroy(msm_dsi->host);
+		msm_dsi->host = NULL;
+	}
+
+	platform_set_drvdata(msm_dsi->pdev, NULL);
+}
+
+static struct msm_dsi *dsi_init(struct platform_device *pdev)
+{
+	struct msm_dsi *msm_dsi;
+	int ret;
+
+	if (!pdev)
+		return ERR_PTR(-ENXIO);
+
+	msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
+	if (!msm_dsi)
+		return ERR_PTR(-ENOMEM);
+	DBG("dsi probed=%p", msm_dsi);
+
+	msm_dsi->id = -1;
+	msm_dsi->pdev = pdev;
+	platform_set_drvdata(pdev, msm_dsi);
+
+	/* Init dsi host */
+	ret = msm_dsi_host_init(msm_dsi);
+	if (ret)
+		goto destroy_dsi;
+
+	/* GET dsi PHY */
+	ret = dsi_get_phy(msm_dsi);
+	if (ret)
+		goto destroy_dsi;
+
+	/* Register to dsi manager */
+	ret = msm_dsi_manager_register(msm_dsi);
+	if (ret)
+		goto destroy_dsi;
+
+	return msm_dsi;
+
+destroy_dsi:
+	dsi_destroy(msm_dsi);
+	return ERR_PTR(ret);
+}
+
+static int dsi_bind(struct device *dev, struct device *master, void *data)
+{
+	struct drm_device *drm = dev_get_drvdata(master);
+	struct msm_drm_private *priv = drm->dev_private;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_dsi *msm_dsi;
+
+	DBG("");
+	msm_dsi = dsi_init(pdev);
+	if (IS_ERR(msm_dsi)) {
+		/* Don't fail the bind if the dsi port is not connected */
+		if (PTR_ERR(msm_dsi) == -ENODEV)
+			return 0;
+		else
+			return PTR_ERR(msm_dsi);
+	}
+
+	priv->dsi[msm_dsi->id] = msm_dsi;
+
+	return 0;
+}
+
+static void dsi_unbind(struct device *dev, struct device *master,
+		void *data)
+{
+	struct drm_device *drm = dev_get_drvdata(master);
+	struct msm_drm_private *priv = drm->dev_private;
+	struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+	int id = msm_dsi->id;
+
+	if (priv->dsi[id]) {
+		dsi_destroy(msm_dsi);
+		priv->dsi[id] = NULL;
+	}
+}
+
+static const struct component_ops dsi_ops = {
+	.bind   = dsi_bind,
+	.unbind = dsi_unbind,
+};
+
+static int dsi_dev_probe(struct platform_device *pdev)
+{
+	return component_add(&pdev->dev, &dsi_ops);
+}
+
+static int dsi_dev_remove(struct platform_device *pdev)
+{
+	DBG("");
+	component_del(&pdev->dev, &dsi_ops);
+	return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+	{ .compatible = "qcom,mdss-dsi-ctrl" },
+	{}
+};
+
+static const struct dev_pm_ops dsi_pm_ops = {
+	SET_RUNTIME_PM_OPS(msm_dsi_runtime_suspend, msm_dsi_runtime_resume, NULL)
+};
+
+static struct platform_driver dsi_driver = {
+	.probe = dsi_dev_probe,
+	.remove = dsi_dev_remove,
+	.driver = {
+		.name = "msm_dsi",
+		.of_match_table = dt_match,
+		.pm = &dsi_pm_ops,
+	},
+};
+
+void __init msm_dsi_register(void)
+{
+	DBG("");
+	msm_dsi_phy_driver_register();
+	platform_driver_register(&dsi_driver);
+}
+
+void __exit msm_dsi_unregister(void)
+{
+	DBG("");
+	msm_dsi_phy_driver_unregister();
+	platform_driver_unregister(&dsi_driver);
+}
+
+int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
+			 struct drm_encoder *encoder)
+{
+	struct msm_drm_private *priv;
+	struct drm_bridge *ext_bridge;
+	int ret;
+
+	if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev))
+		return -EINVAL;
+
+	priv = dev->dev_private;
+
+	if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
+		DRM_DEV_ERROR(dev->dev, "too many bridges\n");
+		return -ENOSPC;
+	}
+
+	msm_dsi->dev = dev;
+
+	ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
+	if (ret) {
+		DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret);
+		goto fail;
+	}
+
+	if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	msm_dsi->encoder = encoder;
+
+	msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
+	if (IS_ERR(msm_dsi->bridge)) {
+		ret = PTR_ERR(msm_dsi->bridge);
+		DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
+		msm_dsi->bridge = NULL;
+		goto fail;
+	}
+
+	/*
+	 * check if the dsi encoder output is connected to a panel or an
+	 * external bridge. We create a connector only if we're connected to a
+	 * drm_panel device. When we're connected to an external bridge, we
+	 * assume that the drm_bridge driver will create the connector itself.
+	 */
+	ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host);
+
+	if (ext_bridge)
+		msm_dsi->connector =
+			msm_dsi_manager_ext_bridge_init(msm_dsi->id);
+	else
+		msm_dsi->connector =
+			msm_dsi_manager_connector_init(msm_dsi->id);
+
+	if (IS_ERR(msm_dsi->connector)) {
+		ret = PTR_ERR(msm_dsi->connector);
+		DRM_DEV_ERROR(dev->dev,
+			"failed to create dsi connector: %d\n", ret);
+		msm_dsi->connector = NULL;
+		goto fail;
+	}
+
+	msm_dsi_manager_setup_encoder(msm_dsi->id);
+
+	priv->bridges[priv->num_bridges++]       = msm_dsi->bridge;
+	priv->connectors[priv->num_connectors++] = msm_dsi->connector;
+
+	return 0;
+fail:
+	/* bridge/connector are normally destroyed by drm: */
+	if (msm_dsi->bridge) {
+		msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
+		msm_dsi->bridge = NULL;
+	}
+
+	/* don't destroy connector if we didn't make it */
+	if (msm_dsi->connector && !msm_dsi->external_bridge)
+		msm_dsi->connector->funcs->destroy(msm_dsi->connector);
+
+	msm_dsi->connector = NULL;
+
+	return ret;
+}
+
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/dsi.h b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi.h
new file mode 100644
index 0000000..0da8a4e
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DSI_CONNECTOR_H__
+#define __DSI_CONNECTOR_H__
+
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include "msm_drv.h"
+
+#define DSI_0	0
+#define DSI_1	1
+#define DSI_MAX	2
+
+struct msm_dsi_phy_shared_timings;
+struct msm_dsi_phy_clk_request;
+
+enum msm_dsi_phy_type {
+	MSM_DSI_PHY_28NM_HPM,
+	MSM_DSI_PHY_28NM_LP,
+	MSM_DSI_PHY_20NM,
+	MSM_DSI_PHY_28NM_8960,
+	MSM_DSI_PHY_14NM,
+	MSM_DSI_PHY_10NM,
+	MSM_DSI_PHY_MAX
+};
+
+enum msm_dsi_phy_usecase {
+	MSM_DSI_PHY_STANDALONE,
+	MSM_DSI_PHY_MASTER,
+	MSM_DSI_PHY_SLAVE,
+};
+
+#define DSI_DEV_REGULATOR_MAX	8
+#define DSI_BUS_CLK_MAX		4
+
+/* Regulators for DSI devices */
+struct dsi_reg_entry {
+	char name[32];
+	int enable_load;
+	int disable_load;
+};
+
+struct dsi_reg_config {
+	int num;
+	struct dsi_reg_entry regs[DSI_DEV_REGULATOR_MAX];
+};
+
+struct msm_dsi {
+	struct drm_device *dev;
+	struct platform_device *pdev;
+
+	/* connector managed by us when we're connected to a drm_panel */
+	struct drm_connector *connector;
+	/* internal dsi bridge attached to MDP interface */
+	struct drm_bridge *bridge;
+
+	struct mipi_dsi_host *host;
+	struct msm_dsi_phy *phy;
+
+	/*
+	 * panel/external_bridge connected to dsi bridge output, only one of the
+	 * two can be valid at a time
+	 */
+	struct drm_panel *panel;
+	struct drm_bridge *external_bridge;
+
+	struct device *phy_dev;
+	bool phy_enabled;
+
+	/* the encoder we are hooked to (outside of dsi block) */
+	struct drm_encoder *encoder;
+
+	int id;
+};
+
+/* dsi manager */
+struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
+void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
+struct drm_connector *msm_dsi_manager_connector_init(u8 id);
+struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
+int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
+void msm_dsi_manager_setup_encoder(int id);
+int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
+void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
+bool msm_dsi_manager_validate_current_config(u8 id);
+
+/* msm dsi */
+static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
+{
+	return msm_dsi->panel || msm_dsi->external_bridge;
+}
+
+struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
+
+/* dsi pll */
+struct msm_dsi_pll;
+#ifdef CONFIG_DRM_MSM_DSI_PLL
+struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
+			enum msm_dsi_phy_type type, int dsi_id);
+void msm_dsi_pll_destroy(struct msm_dsi_pll *pll);
+int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
+	struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
+void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
+int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
+int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
+			    enum msm_dsi_phy_usecase uc);
+#else
+static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
+			 enum msm_dsi_phy_type type, int id) {
+	return ERR_PTR(-ENODEV);
+}
+static inline void msm_dsi_pll_destroy(struct msm_dsi_pll *pll)
+{
+}
+static inline int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
+	struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
+{
+	return -ENODEV;
+}
+static inline void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
+{
+}
+static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
+{
+	return 0;
+}
+static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
+					  enum msm_dsi_phy_usecase uc)
+{
+	return -ENODEV;
+}
+#endif
+
+/* dsi host */
+struct msm_dsi_host;
+int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
+					const struct mipi_dsi_msg *msg);
+void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
+					const struct mipi_dsi_msg *msg);
+int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
+					const struct mipi_dsi_msg *msg);
+int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
+					const struct mipi_dsi_msg *msg);
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
+					u32 dma_base, u32 len);
+int msm_dsi_host_enable(struct mipi_dsi_host *host);
+int msm_dsi_host_disable(struct mipi_dsi_host *host);
+int msm_dsi_host_power_on(struct mipi_dsi_host *host,
+			struct msm_dsi_phy_shared_timings *phy_shared_timings,
+			bool is_dual_dsi);
+int msm_dsi_host_power_off(struct mipi_dsi_host *host);
+int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
+				  const struct drm_display_mode *mode);
+struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host);
+unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host);
+struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
+int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
+void msm_dsi_host_unregister(struct mipi_dsi_host *host);
+int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
+			struct msm_dsi_pll *src_pll);
+void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
+void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
+	struct msm_dsi_phy_clk_request *clk_req,
+	bool is_dual_dsi);
+void msm_dsi_host_destroy(struct mipi_dsi_host *host);
+int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
+					struct drm_device *dev);
+int msm_dsi_host_init(struct msm_dsi *msm_dsi);
+int msm_dsi_runtime_suspend(struct device *dev);
+int msm_dsi_runtime_resume(struct device *dev);
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host);
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size);
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+
+/* dsi phy */
+struct msm_dsi_phy;
+struct msm_dsi_phy_shared_timings {
+	u32 clk_post;
+	u32 clk_pre;
+	bool clk_pre_inc_by_2;
+};
+
+struct msm_dsi_phy_clk_request {
+	unsigned long bitclk_rate;
+	unsigned long escclk_rate;
+};
+
+void msm_dsi_phy_driver_register(void);
+void msm_dsi_phy_driver_unregister(void);
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+			struct msm_dsi_phy_clk_request *clk_req);
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
+void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
+			struct msm_dsi_phy_shared_timings *shared_timing);
+struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
+void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
+			     enum msm_dsi_phy_usecase uc);
+
+#endif /* __DSI_CONNECTOR_H__ */
+
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/dsi.xml.h b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi.xml.h
new file mode 100644
index 0000000..21f489a
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -0,0 +1,1730 @@
+#ifndef DSI_XML
+#define DSI_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum dsi_traffic_mode {
+	NON_BURST_SYNCH_PULSE = 0,
+	NON_BURST_SYNCH_EVENT = 1,
+	BURST_MODE = 2,
+};
+
+enum dsi_vid_dst_format {
+	VID_DST_FORMAT_RGB565 = 0,
+	VID_DST_FORMAT_RGB666 = 1,
+	VID_DST_FORMAT_RGB666_LOOSE = 2,
+	VID_DST_FORMAT_RGB888 = 3,
+};
+
+enum dsi_rgb_swap {
+	SWAP_RGB = 0,
+	SWAP_RBG = 1,
+	SWAP_BGR = 2,
+	SWAP_BRG = 3,
+	SWAP_GRB = 4,
+	SWAP_GBR = 5,
+};
+
+enum dsi_cmd_trigger {
+	TRIGGER_NONE = 0,
+	TRIGGER_SEOF = 1,
+	TRIGGER_TE = 2,
+	TRIGGER_SW = 4,
+	TRIGGER_SW_SEOF = 5,
+	TRIGGER_SW_TE = 6,
+};
+
+enum dsi_cmd_dst_format {
+	CMD_DST_FORMAT_RGB111 = 0,
+	CMD_DST_FORMAT_RGB332 = 3,
+	CMD_DST_FORMAT_RGB444 = 4,
+	CMD_DST_FORMAT_RGB565 = 6,
+	CMD_DST_FORMAT_RGB666 = 7,
+	CMD_DST_FORMAT_RGB888 = 8,
+};
+
+enum dsi_lane_swap {
+	LANE_SWAP_0123 = 0,
+	LANE_SWAP_3012 = 1,
+	LANE_SWAP_2301 = 2,
+	LANE_SWAP_1230 = 3,
+	LANE_SWAP_0321 = 4,
+	LANE_SWAP_1032 = 5,
+	LANE_SWAP_2103 = 6,
+	LANE_SWAP_3210 = 7,
+};
+
+#define DSI_IRQ_CMD_DMA_DONE					0x00000001
+#define DSI_IRQ_MASK_CMD_DMA_DONE				0x00000002
+#define DSI_IRQ_CMD_MDP_DONE					0x00000100
+#define DSI_IRQ_MASK_CMD_MDP_DONE				0x00000200
+#define DSI_IRQ_VIDEO_DONE					0x00010000
+#define DSI_IRQ_MASK_VIDEO_DONE					0x00020000
+#define DSI_IRQ_BTA_DONE					0x00100000
+#define DSI_IRQ_MASK_BTA_DONE					0x00200000
+#define DSI_IRQ_ERROR						0x01000000
+#define DSI_IRQ_MASK_ERROR					0x02000000
+#define REG_DSI_6G_HW_VERSION					0x00000000
+#define DSI_6G_HW_VERSION_MAJOR__MASK				0xf0000000
+#define DSI_6G_HW_VERSION_MAJOR__SHIFT				28
+static inline uint32_t DSI_6G_HW_VERSION_MAJOR(uint32_t val)
+{
+	return ((val) << DSI_6G_HW_VERSION_MAJOR__SHIFT) & DSI_6G_HW_VERSION_MAJOR__MASK;
+}
+#define DSI_6G_HW_VERSION_MINOR__MASK				0x0fff0000
+#define DSI_6G_HW_VERSION_MINOR__SHIFT				16
+static inline uint32_t DSI_6G_HW_VERSION_MINOR(uint32_t val)
+{
+	return ((val) << DSI_6G_HW_VERSION_MINOR__SHIFT) & DSI_6G_HW_VERSION_MINOR__MASK;
+}
+#define DSI_6G_HW_VERSION_STEP__MASK				0x0000ffff
+#define DSI_6G_HW_VERSION_STEP__SHIFT				0
+static inline uint32_t DSI_6G_HW_VERSION_STEP(uint32_t val)
+{
+	return ((val) << DSI_6G_HW_VERSION_STEP__SHIFT) & DSI_6G_HW_VERSION_STEP__MASK;
+}
+
+#define REG_DSI_CTRL						0x00000000
+#define DSI_CTRL_ENABLE						0x00000001
+#define DSI_CTRL_VID_MODE_EN					0x00000002
+#define DSI_CTRL_CMD_MODE_EN					0x00000004
+#define DSI_CTRL_LANE0						0x00000010
+#define DSI_CTRL_LANE1						0x00000020
+#define DSI_CTRL_LANE2						0x00000040
+#define DSI_CTRL_LANE3						0x00000080
+#define DSI_CTRL_CLK_EN						0x00000100
+#define DSI_CTRL_ECC_CHECK					0x00100000
+#define DSI_CTRL_CRC_CHECK					0x01000000
+
+#define REG_DSI_STATUS0						0x00000004
+#define DSI_STATUS0_CMD_MODE_ENGINE_BUSY			0x00000001
+#define DSI_STATUS0_CMD_MODE_DMA_BUSY				0x00000002
+#define DSI_STATUS0_CMD_MODE_MDP_BUSY				0x00000004
+#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY			0x00000008
+#define DSI_STATUS0_DSI_BUSY					0x00000010
+#define DSI_STATUS0_INTERLEAVE_OP_CONTENTION			0x80000000
+
+#define REG_DSI_FIFO_STATUS					0x00000008
+#define DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW			0x00000080
+
+#define REG_DSI_VID_CFG0					0x0000000c
+#define DSI_VID_CFG0_VIRT_CHANNEL__MASK				0x00000003
+#define DSI_VID_CFG0_VIRT_CHANNEL__SHIFT			0
+static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
+{
+	return ((val) << DSI_VID_CFG0_VIRT_CHANNEL__SHIFT) & DSI_VID_CFG0_VIRT_CHANNEL__MASK;
+}
+#define DSI_VID_CFG0_DST_FORMAT__MASK				0x00000030
+#define DSI_VID_CFG0_DST_FORMAT__SHIFT				4
+static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_vid_dst_format val)
+{
+	return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
+}
+#define DSI_VID_CFG0_TRAFFIC_MODE__MASK				0x00000300
+#define DSI_VID_CFG0_TRAFFIC_MODE__SHIFT			8
+static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
+{
+	return ((val) << DSI_VID_CFG0_TRAFFIC_MODE__SHIFT) & DSI_VID_CFG0_TRAFFIC_MODE__MASK;
+}
+#define DSI_VID_CFG0_BLLP_POWER_STOP				0x00001000
+#define DSI_VID_CFG0_EOF_BLLP_POWER_STOP			0x00008000
+#define DSI_VID_CFG0_HSA_POWER_STOP				0x00010000
+#define DSI_VID_CFG0_HBP_POWER_STOP				0x00100000
+#define DSI_VID_CFG0_HFP_POWER_STOP				0x01000000
+#define DSI_VID_CFG0_PULSE_MODE_HSA_HE				0x10000000
+
+#define REG_DSI_VID_CFG1					0x0000001c
+#define DSI_VID_CFG1_R_SEL					0x00000001
+#define DSI_VID_CFG1_G_SEL					0x00000010
+#define DSI_VID_CFG1_B_SEL					0x00000100
+#define DSI_VID_CFG1_RGB_SWAP__MASK				0x00007000
+#define DSI_VID_CFG1_RGB_SWAP__SHIFT				12
+static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
+{
+	return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
+}
+
+#define REG_DSI_ACTIVE_H					0x00000020
+#define DSI_ACTIVE_H_START__MASK				0x00000fff
+#define DSI_ACTIVE_H_START__SHIFT				0
+static inline uint32_t DSI_ACTIVE_H_START(uint32_t val)
+{
+	return ((val) << DSI_ACTIVE_H_START__SHIFT) & DSI_ACTIVE_H_START__MASK;
+}
+#define DSI_ACTIVE_H_END__MASK					0x0fff0000
+#define DSI_ACTIVE_H_END__SHIFT					16
+static inline uint32_t DSI_ACTIVE_H_END(uint32_t val)
+{
+	return ((val) << DSI_ACTIVE_H_END__SHIFT) & DSI_ACTIVE_H_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_V					0x00000024
+#define DSI_ACTIVE_V_START__MASK				0x00000fff
+#define DSI_ACTIVE_V_START__SHIFT				0
+static inline uint32_t DSI_ACTIVE_V_START(uint32_t val)
+{
+	return ((val) << DSI_ACTIVE_V_START__SHIFT) & DSI_ACTIVE_V_START__MASK;
+}
+#define DSI_ACTIVE_V_END__MASK					0x0fff0000
+#define DSI_ACTIVE_V_END__SHIFT					16
+static inline uint32_t DSI_ACTIVE_V_END(uint32_t val)
+{
+	return ((val) << DSI_ACTIVE_V_END__SHIFT) & DSI_ACTIVE_V_END__MASK;
+}
+
+#define REG_DSI_TOTAL						0x00000028
+#define DSI_TOTAL_H_TOTAL__MASK					0x00000fff
+#define DSI_TOTAL_H_TOTAL__SHIFT				0
+static inline uint32_t DSI_TOTAL_H_TOTAL(uint32_t val)
+{
+	return ((val) << DSI_TOTAL_H_TOTAL__SHIFT) & DSI_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_TOTAL_V_TOTAL__MASK					0x0fff0000
+#define DSI_TOTAL_V_TOTAL__SHIFT				16
+static inline uint32_t DSI_TOTAL_V_TOTAL(uint32_t val)
+{
+	return ((val) << DSI_TOTAL_V_TOTAL__SHIFT) & DSI_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_DSI_ACTIVE_HSYNC					0x0000002c
+#define DSI_ACTIVE_HSYNC_START__MASK				0x00000fff
+#define DSI_ACTIVE_HSYNC_START__SHIFT				0
+static inline uint32_t DSI_ACTIVE_HSYNC_START(uint32_t val)
+{
+	return ((val) << DSI_ACTIVE_HSYNC_START__SHIFT) & DSI_ACTIVE_HSYNC_START__MASK;
+}
+#define DSI_ACTIVE_HSYNC_END__MASK				0x0fff0000
+#define DSI_ACTIVE_HSYNC_END__SHIFT				16
+static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
+{
+	return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_VSYNC_HPOS				0x00000030
+#define DSI_ACTIVE_VSYNC_HPOS_START__MASK			0x00000fff
+#define DSI_ACTIVE_VSYNC_HPOS_START__SHIFT			0
+static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_START(uint32_t val)
+{
+	return ((val) << DSI_ACTIVE_VSYNC_HPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_START__MASK;
+}
+#define DSI_ACTIVE_VSYNC_HPOS_END__MASK				0x0fff0000
+#define DSI_ACTIVE_VSYNC_HPOS_END__SHIFT			16
+static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_END(uint32_t val)
+{
+	return ((val) << DSI_ACTIVE_VSYNC_HPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_VSYNC_VPOS				0x00000034
+#define DSI_ACTIVE_VSYNC_VPOS_START__MASK			0x00000fff
+#define DSI_ACTIVE_VSYNC_VPOS_START__SHIFT			0
+static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_START(uint32_t val)
+{
+	return ((val) << DSI_ACTIVE_VSYNC_VPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_START__MASK;
+}
+#define DSI_ACTIVE_VSYNC_VPOS_END__MASK				0x0fff0000
+#define DSI_ACTIVE_VSYNC_VPOS_END__SHIFT			16
+static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_END(uint32_t val)
+{
+	return ((val) << DSI_ACTIVE_VSYNC_VPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_END__MASK;
+}
+
+#define REG_DSI_CMD_DMA_CTRL					0x00000038
+#define DSI_CMD_DMA_CTRL_BROADCAST_EN				0x80000000
+#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER			0x10000000
+#define DSI_CMD_DMA_CTRL_LOW_POWER				0x04000000
+
+#define REG_DSI_CMD_CFG0					0x0000003c
+#define DSI_CMD_CFG0_DST_FORMAT__MASK				0x0000000f
+#define DSI_CMD_CFG0_DST_FORMAT__SHIFT				0
+static inline uint32_t DSI_CMD_CFG0_DST_FORMAT(enum dsi_cmd_dst_format val)
+{
+	return ((val) << DSI_CMD_CFG0_DST_FORMAT__SHIFT) & DSI_CMD_CFG0_DST_FORMAT__MASK;
+}
+#define DSI_CMD_CFG0_R_SEL					0x00000010
+#define DSI_CMD_CFG0_G_SEL					0x00000100
+#define DSI_CMD_CFG0_B_SEL					0x00001000
+#define DSI_CMD_CFG0_INTERLEAVE_MAX__MASK			0x00f00000
+#define DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT			20
+static inline uint32_t DSI_CMD_CFG0_INTERLEAVE_MAX(uint32_t val)
+{
+	return ((val) << DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT) & DSI_CMD_CFG0_INTERLEAVE_MAX__MASK;
+}
+#define DSI_CMD_CFG0_RGB_SWAP__MASK				0x00070000
+#define DSI_CMD_CFG0_RGB_SWAP__SHIFT				16
+static inline uint32_t DSI_CMD_CFG0_RGB_SWAP(enum dsi_rgb_swap val)
+{
+	return ((val) << DSI_CMD_CFG0_RGB_SWAP__SHIFT) & DSI_CMD_CFG0_RGB_SWAP__MASK;
+}
+
+#define REG_DSI_CMD_CFG1					0x00000040
+#define DSI_CMD_CFG1_WR_MEM_START__MASK				0x000000ff
+#define DSI_CMD_CFG1_WR_MEM_START__SHIFT			0
+static inline uint32_t DSI_CMD_CFG1_WR_MEM_START(uint32_t val)
+{
+	return ((val) << DSI_CMD_CFG1_WR_MEM_START__SHIFT) & DSI_CMD_CFG1_WR_MEM_START__MASK;
+}
+#define DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK			0x0000ff00
+#define DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT			8
+static inline uint32_t DSI_CMD_CFG1_WR_MEM_CONTINUE(uint32_t val)
+{
+	return ((val) << DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT) & DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK;
+}
+#define DSI_CMD_CFG1_INSERT_DCS_COMMAND				0x00010000
+
+#define REG_DSI_DMA_BASE					0x00000044
+
+#define REG_DSI_DMA_LEN						0x00000048
+
+#define REG_DSI_CMD_MDP_STREAM_CTRL				0x00000054
+#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK			0x0000003f
+#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT		0
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(uint32_t val)
+{
+	return ((val) << DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK;
+}
+#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK		0x00000300
+#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT		8
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+{
+	return ((val) << DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK;
+}
+#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK		0xffff0000
+#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT		16
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(uint32_t val)
+{
+	return ((val) << DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK;
+}
+
+#define REG_DSI_CMD_MDP_STREAM_TOTAL				0x00000058
+#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK			0x00000fff
+#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT			0
+static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(uint32_t val)
+{
+	return ((val) << DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK			0x0fff0000
+#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT			16
+static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(uint32_t val)
+{
+	return ((val) << DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_DSI_ACK_ERR_STATUS					0x00000064
+
+static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
+
+static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
+
+#define REG_DSI_TRIG_CTRL					0x00000080
+#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK				0x00000007
+#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT			0
+static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
+{
+	return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
+}
+#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK				0x00000070
+#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT			4
+static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
+{
+	return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
+}
+#define DSI_TRIG_CTRL_STREAM__MASK				0x00000300
+#define DSI_TRIG_CTRL_STREAM__SHIFT				8
+static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
+{
+	return ((val) << DSI_TRIG_CTRL_STREAM__SHIFT) & DSI_TRIG_CTRL_STREAM__MASK;
+}
+#define DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME			0x00001000
+#define DSI_TRIG_CTRL_TE					0x80000000
+
+#define REG_DSI_TRIG_DMA					0x0000008c
+
+#define REG_DSI_DLN0_PHY_ERR					0x000000b0
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_ESC				0x00000001
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC			0x00000010
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL			0x00000100
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0		0x00001000
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1		0x00010000
+
+#define REG_DSI_TIMEOUT_STATUS					0x000000bc
+
+#define REG_DSI_CLKOUT_TIMING_CTRL				0x000000c0
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK			0x0000003f
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT			0
+static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(uint32_t val)
+{
+	return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK;
+}
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK			0x00003f00
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT		8
+static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
+{
+	return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK;
+}
+
+#define REG_DSI_EOT_PACKET_CTRL					0x000000c8
+#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND			0x00000001
+#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE			0x00000010
+
+#define REG_DSI_LANE_CTRL					0x000000a8
+#define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST			0x10000000
+
+#define REG_DSI_LANE_SWAP_CTRL					0x000000ac
+#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK			0x00000007
+#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT			0
+static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
+{
+	return ((val) << DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT) & DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK;
+}
+
+#define REG_DSI_ERR_INT_MASK0					0x00000108
+
+#define REG_DSI_INTR_CTRL					0x0000010c
+
+#define REG_DSI_RESET						0x00000114
+
+#define REG_DSI_CLK_CTRL					0x00000118
+#define DSI_CLK_CTRL_AHBS_HCLK_ON				0x00000001
+#define DSI_CLK_CTRL_AHBM_SCLK_ON				0x00000002
+#define DSI_CLK_CTRL_PCLK_ON					0x00000004
+#define DSI_CLK_CTRL_DSICLK_ON					0x00000008
+#define DSI_CLK_CTRL_BYTECLK_ON					0x00000010
+#define DSI_CLK_CTRL_ESCCLK_ON					0x00000020
+#define DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK			0x00000200
+
+#define REG_DSI_CLK_STATUS					0x0000011c
+#define DSI_CLK_STATUS_PLL_UNLOCKED				0x00010000
+
+#define REG_DSI_PHY_RESET					0x00000128
+#define DSI_PHY_RESET_RESET					0x00000001
+
+#define REG_DSI_T_CLK_PRE_EXTEND				0x0000017c
+#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK			0x00000001
+
+#define REG_DSI_RDBK_DATA_CTRL					0x000001d0
+#define DSI_RDBK_DATA_CTRL_COUNT__MASK				0x00ff0000
+#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT				16
+static inline uint32_t DSI_RDBK_DATA_CTRL_COUNT(uint32_t val)
+{
+	return ((val) << DSI_RDBK_DATA_CTRL_COUNT__SHIFT) & DSI_RDBK_DATA_CTRL_COUNT__MASK;
+}
+#define DSI_RDBK_DATA_CTRL_CLR					0x00000001
+
+#define REG_DSI_VERSION						0x000001f0
+#define DSI_VERSION_MAJOR__MASK					0xff000000
+#define DSI_VERSION_MAJOR__SHIFT				24
+static inline uint32_t DSI_VERSION_MAJOR(uint32_t val)
+{
+	return ((val) << DSI_VERSION_MAJOR__SHIFT) & DSI_VERSION_MAJOR__MASK;
+}
+
+#define REG_DSI_PHY_PLL_CTRL_0					0x00000200
+#define DSI_PHY_PLL_CTRL_0_ENABLE				0x00000001
+
+#define REG_DSI_PHY_PLL_CTRL_1					0x00000204
+
+#define REG_DSI_PHY_PLL_CTRL_2					0x00000208
+
+#define REG_DSI_PHY_PLL_CTRL_3					0x0000020c
+
+#define REG_DSI_PHY_PLL_CTRL_4					0x00000210
+
+#define REG_DSI_PHY_PLL_CTRL_5					0x00000214
+
+#define REG_DSI_PHY_PLL_CTRL_6					0x00000218
+
+#define REG_DSI_PHY_PLL_CTRL_7					0x0000021c
+
+#define REG_DSI_PHY_PLL_CTRL_8					0x00000220
+
+#define REG_DSI_PHY_PLL_CTRL_9					0x00000224
+
+#define REG_DSI_PHY_PLL_CTRL_10					0x00000228
+
+#define REG_DSI_PHY_PLL_CTRL_11					0x0000022c
+
+#define REG_DSI_PHY_PLL_CTRL_12					0x00000230
+
+#define REG_DSI_PHY_PLL_CTRL_13					0x00000234
+
+#define REG_DSI_PHY_PLL_CTRL_14					0x00000238
+
+#define REG_DSI_PHY_PLL_CTRL_15					0x0000023c
+
+#define REG_DSI_PHY_PLL_CTRL_16					0x00000240
+
+#define REG_DSI_PHY_PLL_CTRL_17					0x00000244
+
+#define REG_DSI_PHY_PLL_CTRL_18					0x00000248
+
+#define REG_DSI_PHY_PLL_CTRL_19					0x0000024c
+
+#define REG_DSI_PHY_PLL_CTRL_20					0x00000250
+
+#define REG_DSI_PHY_PLL_STATUS					0x00000280
+#define DSI_PHY_PLL_STATUS_PLL_BUSY				0x00000001
+
+#define REG_DSI_8x60_PHY_TPA_CTRL_1				0x00000258
+
+#define REG_DSI_8x60_PHY_TPA_CTRL_2				0x0000025c
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_0				0x00000260
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_1				0x00000264
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_2				0x00000268
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_3				0x0000026c
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_4				0x00000270
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_5				0x00000274
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_6				0x00000278
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_7				0x0000027c
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_8				0x00000280
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_9				0x00000284
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_10				0x00000288
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_11				0x0000028c
+
+#define REG_DSI_8x60_PHY_CTRL_0					0x00000290
+
+#define REG_DSI_8x60_PHY_CTRL_1					0x00000294
+
+#define REG_DSI_8x60_PHY_CTRL_2					0x00000298
+
+#define REG_DSI_8x60_PHY_CTRL_3					0x0000029c
+
+#define REG_DSI_8x60_PHY_STRENGTH_0				0x000002a0
+
+#define REG_DSI_8x60_PHY_STRENGTH_1				0x000002a4
+
+#define REG_DSI_8x60_PHY_STRENGTH_2				0x000002a8
+
+#define REG_DSI_8x60_PHY_STRENGTH_3				0x000002ac
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_0			0x000002cc
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_1			0x000002d0
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_2			0x000002d4
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_3			0x000002d8
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_4			0x000002dc
+
+#define REG_DSI_8x60_PHY_CAL_HW_TRIGGER				0x000000f0
+
+#define REG_DSI_8x60_PHY_CAL_CTRL				0x000000f4
+
+#define REG_DSI_8x60_PHY_CAL_STATUS				0x000000fc
+#define DSI_8x60_PHY_CAL_STATUS_CAL_BUSY			0x10000000
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+#define REG_DSI_28nm_8960_PHY_LNCK_CFG_0			0x00000100
+
+#define REG_DSI_28nm_8960_PHY_LNCK_CFG_1			0x00000104
+
+#define REG_DSI_28nm_8960_PHY_LNCK_CFG_2			0x00000108
+
+#define REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH		0x0000010c
+
+#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0			0x00000114
+
+#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1			0x00000118
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_0			0x00000140
+#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK		0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT		0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+	return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_1			0x00000144
+#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK		0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT	0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_2			0x00000148
+#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK	0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT	0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_3			0x0000014c
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_4			0x00000150
+#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK		0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT		0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+	return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_5			0x00000154
+#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK		0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT		0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+	return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_6			0x00000158
+#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK	0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT	0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_7			0x0000015c
+#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK		0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT		0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_8			0x00000160
+#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK		0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT		0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+	return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_9			0x00000164
+#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK		0x00000007
+#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT		0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+	return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK		0x00000070
+#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT		4
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+	return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_10			0x00000168
+#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK		0x00000007
+#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT		0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+	return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_11			0x0000016c
+#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK	0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT	0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+	return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_CTRL_0				0x00000170
+
+#define REG_DSI_28nm_8960_PHY_CTRL_1				0x00000174
+
+#define REG_DSI_28nm_8960_PHY_CTRL_2				0x00000178
+
+#define REG_DSI_28nm_8960_PHY_CTRL_3				0x0000017c
+
+#define REG_DSI_28nm_8960_PHY_STRENGTH_0			0x00000180
+
+#define REG_DSI_28nm_8960_PHY_STRENGTH_1			0x00000184
+
+#define REG_DSI_28nm_8960_PHY_STRENGTH_2			0x00000188
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_0			0x0000018c
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_1			0x00000190
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_2			0x00000194
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_3			0x00000198
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_4			0x0000019c
+
+#define REG_DSI_28nm_8960_PHY_LDO_CTRL				0x000001b0
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0		0x00000000
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1		0x00000004
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2		0x00000008
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3		0x0000000c
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4		0x00000010
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_5		0x00000014
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG	0x00000018
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER		0x00000028
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_0			0x0000002c
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_1			0x00000030
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2			0x00000034
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0			0x00000038
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1			0x0000003c
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_2			0x00000040
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3			0x00000044
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4			0x00000048
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS			0x00000050
+#define DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY		0x00000010
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_0			0x00000000
+#define DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE			0x00000001
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_1			0x00000004
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_2			0x00000008
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_3			0x0000000c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_4			0x00000010
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_5			0x00000014
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_6			0x00000018
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_7			0x0000001c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_8			0x00000020
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_9			0x00000024
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_10			0x00000028
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_11			0x0000002c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_12			0x00000030
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_13			0x00000034
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_14			0x00000038
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_15			0x0000003c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_16			0x00000040
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_17			0x00000044
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_18			0x00000048
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_19			0x0000004c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_20			0x00000050
+
+#define REG_DSI_28nm_8960_PHY_PLL_RDY				0x00000080
+#define DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY			0x00000001
+
+static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_0				0x00000100
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_1				0x00000104
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_2				0x00000108
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_3				0x0000010c
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_4				0x00000110
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_DATAPATH			0x00000114
+
+#define REG_DSI_28nm_PHY_LNCK_DEBUG_SEL				0x00000118
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR0				0x0000011c
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR1				0x00000120
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_0				0x00000140
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_1				0x00000144
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_2				0x00000148
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_3				0x0000014c
+#define DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8			0x00000001
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_4				0x00000150
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_5				0x00000154
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_6				0x00000158
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_7				0x0000015c
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_8				0x00000160
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_9				0x00000164
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK			0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT			0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK		0x00000070
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT		4
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_10				0x00000168
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK		0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_11				0x0000016c
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_28nm_PHY_CTRL_0					0x00000170
+
+#define REG_DSI_28nm_PHY_CTRL_1					0x00000174
+
+#define REG_DSI_28nm_PHY_CTRL_2					0x00000178
+
+#define REG_DSI_28nm_PHY_CTRL_3					0x0000017c
+
+#define REG_DSI_28nm_PHY_CTRL_4					0x00000180
+
+#define REG_DSI_28nm_PHY_STRENGTH_0				0x00000184
+
+#define REG_DSI_28nm_PHY_STRENGTH_1				0x00000188
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_0				0x000001b4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_1				0x000001b8
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_2				0x000001bc
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_3				0x000001c0
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_4				0x000001c4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_5				0x000001c8
+
+#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL				0x000001d4
+#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL		0x00000001
+
+#define REG_DSI_28nm_PHY_LDO_CNTRL				0x000001dc
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_0			0x00000000
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_1			0x00000004
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_2			0x00000008
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_3			0x0000000c
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_4			0x00000010
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_5			0x00000014
+
+#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG			0x00000018
+
+#define REG_DSI_28nm_PHY_PLL_REFCLK_CFG				0x00000000
+#define DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR			0x00000001
+
+#define REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG			0x00000004
+
+#define REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG			0x00000008
+
+#define REG_DSI_28nm_PHY_PLL_VCOLPF_CFG				0x0000000c
+
+#define REG_DSI_28nm_PHY_PLL_VREG_CFG				0x00000010
+#define DSI_28nm_PHY_PLL_VREG_CFG_POSTDIV1_BYPASS_B		0x00000002
+
+#define REG_DSI_28nm_PHY_PLL_PWRGEN_CFG				0x00000014
+
+#define REG_DSI_28nm_PHY_PLL_DMUX_CFG				0x00000018
+
+#define REG_DSI_28nm_PHY_PLL_AMUX_CFG				0x0000001c
+
+#define REG_DSI_28nm_PHY_PLL_GLB_CFG				0x00000020
+#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B			0x00000001
+#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B		0x00000002
+#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B		0x00000004
+#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE			0x00000008
+
+#define REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG			0x00000024
+
+#define REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG			0x00000028
+
+#define REG_DSI_28nm_PHY_PLL_LPFR_CFG				0x0000002c
+
+#define REG_DSI_28nm_PHY_PLL_LPFC1_CFG				0x00000030
+
+#define REG_DSI_28nm_PHY_PLL_LPFC2_CFG				0x00000034
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG0				0x00000038
+#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK			0x0000003f
+#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK;
+}
+#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP				0x00000040
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG1				0x0000003c
+#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK		0x0000003f
+#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
+}
+#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK		0x00000040
+#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT		6
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK;
+}
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG2				0x00000040
+#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK		0x000000ff
+#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK;
+}
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG3				0x00000044
+#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK		0x000000ff
+#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK;
+}
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG4				0x00000048
+
+#define REG_DSI_28nm_PHY_PLL_SSC_CFG0				0x0000004c
+
+#define REG_DSI_28nm_PHY_PLL_SSC_CFG1				0x00000050
+
+#define REG_DSI_28nm_PHY_PLL_SSC_CFG2				0x00000054
+
+#define REG_DSI_28nm_PHY_PLL_SSC_CFG3				0x00000058
+
+#define REG_DSI_28nm_PHY_PLL_LKDET_CFG0				0x0000005c
+
+#define REG_DSI_28nm_PHY_PLL_LKDET_CFG1				0x00000060
+
+#define REG_DSI_28nm_PHY_PLL_LKDET_CFG2				0x00000064
+
+#define REG_DSI_28nm_PHY_PLL_TEST_CFG				0x00000068
+#define DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET			0x00000001
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG0				0x0000006c
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG1				0x00000070
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG2				0x00000074
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG3				0x00000078
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG4				0x0000007c
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG5				0x00000080
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG6				0x00000084
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG7				0x00000088
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG8				0x0000008c
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG9				0x00000090
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG10				0x00000094
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG11				0x00000098
+
+#define REG_DSI_28nm_PHY_PLL_EFUSE_CFG				0x0000009c
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS_SEL			0x000000a0
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_42				0x000000a4
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_43				0x000000a8
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_44				0x000000ac
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_45				0x000000b0
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_46				0x000000b4
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_47				0x000000b8
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_48				0x000000bc
+
+#define REG_DSI_28nm_PHY_PLL_STATUS				0x000000c0
+#define DSI_28nm_PHY_PLL_STATUS_PLL_RDY				0x00000001
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS0				0x000000c4
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS1				0x000000c8
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS2				0x000000cc
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS3				0x000000d0
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_54				0x000000d4
+
+static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_0				0x00000100
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_1				0x00000104
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_2				0x00000108
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_3				0x0000010c
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_4				0x00000110
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH			0x00000114
+
+#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL				0x00000118
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR0				0x0000011c
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR1				0x00000120
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_0				0x00000140
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_1				0x00000144
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_2				0x00000148
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_3				0x0000014c
+#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8			0x00000001
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_4				0x00000150
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_5				0x00000154
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_6				0x00000158
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_7				0x0000015c
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_8				0x00000160
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_9				0x00000164
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK			0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT			0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK		0x00000070
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT		4
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_10				0x00000168
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK		0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_11				0x0000016c
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_20nm_PHY_CTRL_0					0x00000170
+
+#define REG_DSI_20nm_PHY_CTRL_1					0x00000174
+
+#define REG_DSI_20nm_PHY_CTRL_2					0x00000178
+
+#define REG_DSI_20nm_PHY_CTRL_3					0x0000017c
+
+#define REG_DSI_20nm_PHY_CTRL_4					0x00000180
+
+#define REG_DSI_20nm_PHY_STRENGTH_0				0x00000184
+
+#define REG_DSI_20nm_PHY_STRENGTH_1				0x00000188
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_0				0x000001b4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_1				0x000001b8
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_2				0x000001bc
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_3				0x000001c0
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_4				0x000001c4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_5				0x000001c8
+
+#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL				0x000001d4
+#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL		0x00000001
+
+#define REG_DSI_20nm_PHY_LDO_CNTRL				0x000001dc
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0			0x00000000
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1			0x00000004
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2			0x00000008
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3			0x0000000c
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4			0x00000010
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5			0x00000014
+
+#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG			0x00000018
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID0			0x00000000
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID1			0x00000004
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID2			0x00000008
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID3			0x0000000c
+
+#define REG_DSI_14nm_PHY_CMN_CLK_CFG0				0x00000010
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK		0x000000f0
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT		4
+static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK;
+}
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK		0x000000f0
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT		4
+static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK;
+}
+
+#define REG_DSI_14nm_PHY_CMN_CLK_CFG1				0x00000014
+#define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL			0x00000001
+
+#define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL			0x00000018
+#define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL		0x00000004
+
+#define REG_DSI_14nm_PHY_CMN_CTRL_0				0x0000001c
+
+#define REG_DSI_14nm_PHY_CMN_CTRL_1				0x00000020
+
+#define REG_DSI_14nm_PHY_CMN_HW_TRIGGER				0x00000024
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG0				0x00000028
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG1				0x0000002c
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG2				0x00000030
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG0				0x00000034
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG1				0x00000038
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG2				0x0000003c
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG3				0x00000040
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG4				0x00000044
+
+#define REG_DSI_14nm_PHY_CMN_PLL_CNTRL				0x00000048
+#define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START			0x00000001
+
+#define REG_DSI_14nm_PHY_CMN_LDO_CNTRL				0x0000004c
+#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK		0x0000003f
+#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK			0x000000c0
+#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT			6
+static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN			0x00000001
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK		0x00000007
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK		0x00000070
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT		4
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK		0x00000007
+#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; }
+
+#define REG_DSI_14nm_PHY_PLL_IE_TRIM				0x00000000
+
+#define REG_DSI_14nm_PHY_PLL_IP_TRIM				0x00000004
+
+#define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM				0x00000010
+
+#define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN			0x0000001c
+
+#define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET			0x00000028
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL			0x0000002c
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2			0x00000030
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3			0x00000034
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4			0x00000038
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5			0x0000003c
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1			0x00000040
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2			0x00000044
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1			0x00000048
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2			0x0000004c
+
+#define REG_DSI_14nm_PHY_PLL_VREF_CFG1				0x0000005c
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_CODE				0x00000058
+
+#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1			0x0000006c
+
+#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2			0x00000070
+
+#define REG_DSI_14nm_PHY_PLL_VCO_COUNT1				0x00000074
+
+#define REG_DSI_14nm_PHY_PLL_VCO_COUNT2				0x00000078
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1			0x0000007c
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2			0x00000080
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3			0x00000084
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN			0x00000088
+
+#define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE			0x0000008c
+
+#define REG_DSI_14nm_PHY_PLL_DEC_START				0x00000090
+
+#define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER			0x00000094
+
+#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1			0x00000098
+
+#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2			0x0000009c
+
+#define REG_DSI_14nm_PHY_PLL_SSC_PER1				0x000000a0
+
+#define REG_DSI_14nm_PHY_PLL_SSC_PER2				0x000000a4
+
+#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1			0x000000a8
+
+#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2			0x000000ac
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1			0x000000b4
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2			0x000000b8
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3			0x000000bc
+
+#define REG_DSI_14nm_PHY_PLL_TXCLK_EN				0x000000c0
+
+#define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL				0x000000c4
+
+#define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS		0x000000cc
+
+#define REG_DSI_14nm_PHY_PLL_PLL_MISC1				0x000000e8
+
+#define REG_DSI_14nm_PHY_PLL_CP_SET_CUR				0x000000f0
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET			0x000000f4
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET			0x000000f8
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET			0x000000fc
+
+#define REG_DSI_14nm_PHY_PLL_PLL_LPF1				0x00000100
+
+#define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV			0x00000104
+
+#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP			0x00000108
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID0			0x00000000
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID1			0x00000004
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID2			0x00000008
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID3			0x0000000c
+
+#define REG_DSI_10nm_PHY_CMN_CLK_CFG0				0x00000010
+
+#define REG_DSI_10nm_PHY_CMN_CLK_CFG1				0x00000014
+
+#define REG_DSI_10nm_PHY_CMN_GLBL_CTRL				0x00000018
+
+#define REG_DSI_10nm_PHY_CMN_RBUF_CTRL				0x0000001c
+
+#define REG_DSI_10nm_PHY_CMN_VREG_CTRL				0x00000020
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_0				0x00000024
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_1				0x00000028
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_2				0x0000002c
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CFG0				0x00000030
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CFG1				0x00000034
+
+#define REG_DSI_10nm_PHY_CMN_PLL_CNTRL				0x00000038
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL0				0x00000098
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL1				0x0000009c
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL2				0x000000a0
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL3				0x000000a4
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL4				0x000000a8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0			0x000000ac
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1			0x000000b0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2			0x000000b4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3			0x000000b8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4			0x000000bc
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5			0x000000c0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6			0x000000c4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7			0x000000c8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8			0x000000cc
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9			0x000000d0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10			0x000000d4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11			0x000000d8
+
+#define REG_DSI_10nm_PHY_CMN_PHY_STATUS				0x000000ec
+
+#define REG_DSI_10nm_PHY_CMN_LANE_STATUS0			0x000000f4
+
+#define REG_DSI_10nm_PHY_CMN_LANE_STATUS1			0x000000f8
+
+static inline uint32_t REG_DSI_10nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(uint32_t i0) { return 0x0000001c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(uint32_t i0) { return 0x00000020 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(uint32_t i0) { return 0x00000024 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000028 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000002c + 0x80*i0; }
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE		0x00000000
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO		0x00000004
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE		0x00000010
+
+#define REG_DSI_10nm_PHY_PLL_DSM_DIVIDER			0x0000001c
+
+#define REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER			0x00000020
+
+#define REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES			0x00000024
+
+#define REG_DSI_10nm_PHY_PLL_CMODE				0x0000002c
+
+#define REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS		0x00000030
+
+#define REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE	0x00000054
+
+#define REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE		0x00000064
+
+#define REG_DSI_10nm_PHY_PLL_PFILT				0x0000007c
+
+#define REG_DSI_10nm_PHY_PLL_IFILT				0x00000080
+
+#define REG_DSI_10nm_PHY_PLL_OUTDIV				0x00000094
+
+#define REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE			0x000000a4
+
+#define REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE		0x000000a8
+
+#define REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO		0x000000b4
+
+#define REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1		0x000000cc
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1		0x000000d0
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1		0x000000d4
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1		0x000000d8
+
+#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1			0x0000010c
+
+#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1		0x00000110
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1			0x00000114
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1			0x00000118
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1		0x0000011c
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1		0x00000120
+
+#define REG_DSI_10nm_PHY_PLL_SSC_CONTROL			0x0000013c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE			0x00000140
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1			0x00000144
+
+#define REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1		0x0000014c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1		0x00000154
+
+#define REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1		0x0000015c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1	0x00000164
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE			0x00000180
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY			0x00000184
+
+#define REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS			0x0000018c
+
+#define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE			0x000001a0
+
+
+#endif /* DSI_XML */
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi_cfg.c
new file mode 100644
index 0000000..726c881
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include "dsi_cfg.h"
+
+static const char * const dsi_v2_bus_clk_names[] = {
+	"core_mmss", "iface", "bus",
+};
+
+static const struct msm_dsi_config apq8064_dsi_cfg = {
+	.io_offset = 0,
+	.reg_cfg = {
+		.num = 3,
+		.regs = {
+			{"vdda", 100000, 100},	/* 1.2 V */
+			{"avdd", 10000, 100},	/* 3.0 V */
+			{"vddio", 100000, 100},	/* 1.8 V */
+		},
+	},
+	.bus_clk_names = dsi_v2_bus_clk_names,
+	.num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
+	.io_start = { 0x4700000, 0x5800000 },
+	.num_dsi = 2,
+};
+
+static const char * const dsi_6g_bus_clk_names[] = {
+	"mdp_core", "iface", "bus", "core_mmss",
+};
+
+static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 4,
+		.regs = {
+			{"gdsc", -1, -1},
+			{"vdd", 150000, 100},	/* 3.0 V */
+			{"vdda", 100000, 100},	/* 1.2 V */
+			{"vddio", 100000, 100},	/* 1.8 V */
+		},
+	},
+	.bus_clk_names = dsi_6g_bus_clk_names,
+	.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
+	.io_start = { 0xfd922800, 0xfd922b00 },
+	.num_dsi = 2,
+};
+
+static const char * const dsi_8916_bus_clk_names[] = {
+	"mdp_core", "iface", "bus",
+};
+
+static const struct msm_dsi_config msm8916_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 3,
+		.regs = {
+			{"gdsc", -1, -1},
+			{"vdda", 100000, 100},	/* 1.2 V */
+			{"vddio", 100000, 100},	/* 1.8 V */
+		},
+	},
+	.bus_clk_names = dsi_8916_bus_clk_names,
+	.num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
+	.io_start = { 0x1a98000 },
+	.num_dsi = 1,
+};
+
+static const struct msm_dsi_config msm8994_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 7,
+		.regs = {
+			{"gdsc", -1, -1},
+			{"vdda", 100000, 100},	/* 1.25 V */
+			{"vddio", 100000, 100},	/* 1.8 V */
+			{"vcca", 10000, 100},	/* 1.0 V */
+			{"vdd", 100000, 100},	/* 1.8 V */
+			{"lab_reg", -1, -1},
+			{"ibb_reg", -1, -1},
+		},
+	},
+	.bus_clk_names = dsi_6g_bus_clk_names,
+	.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
+	.io_start = { 0xfd998000, 0xfd9a0000 },
+	.num_dsi = 2,
+};
+
+/*
+ * TODO: core_mmss_clk fails to enable for some reason, but things work fine
+ * without it too. Figure out why it doesn't enable and uncomment below
+ */
+static const char * const dsi_8996_bus_clk_names[] = {
+	"mdp_core", "iface", "bus", /* "core_mmss", */
+};
+
+static const struct msm_dsi_config msm8996_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 3,
+		.regs = {
+			{"vdda", 18160, 1 },	/* 1.25 V */
+			{"vcca", 17000, 32 },	/* 0.925 V */
+			{"vddio", 100000, 100 },/* 1.8 V */
+		},
+	},
+	.bus_clk_names = dsi_8996_bus_clk_names,
+	.num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names),
+	.io_start = { 0x994000, 0x996000 },
+	.num_dsi = 2,
+};
+
+static const char * const dsi_msm8998_bus_clk_names[] = {
+	"iface", "bus", "core",
+};
+
+static const struct msm_dsi_config msm8998_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 2,
+		.regs = {
+			{"vdd", 367000, 16 },	/* 0.9 V */
+			{"vdda", 62800, 2 },	/* 1.2 V */
+		},
+	},
+	.bus_clk_names = dsi_msm8998_bus_clk_names,
+	.num_bus_clks = ARRAY_SIZE(dsi_msm8998_bus_clk_names),
+	.io_start = { 0xc994000, 0xc996000 },
+	.num_dsi = 2,
+};
+
+static const char * const dsi_sdm845_bus_clk_names[] = {
+	"iface", "bus",
+};
+
+static const struct msm_dsi_config sdm845_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vdda", 21800, 4 },	/* 1.2 V */
+		},
+	},
+	.bus_clk_names = dsi_sdm845_bus_clk_names,
+	.num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names),
+	.io_start = { 0xae94000, 0xae96000 },
+	.num_dsi = 2,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+	.link_clk_enable = dsi_link_clk_enable_v2,
+	.link_clk_disable = dsi_link_clk_disable_v2,
+	.clk_init_ver = dsi_clk_init_v2,
+	.tx_buf_alloc = dsi_tx_buf_alloc_v2,
+	.tx_buf_get = dsi_tx_buf_get_v2,
+	.tx_buf_put = NULL,
+	.dma_base_get = dsi_dma_base_get_v2,
+	.calc_clk_rate = dsi_calc_clk_rate_v2,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+	.link_clk_enable = dsi_link_clk_enable_6g,
+	.link_clk_disable = dsi_link_clk_disable_6g,
+	.clk_init_ver = NULL,
+	.tx_buf_alloc = dsi_tx_buf_alloc_6g,
+	.tx_buf_get = dsi_tx_buf_get_6g,
+	.tx_buf_put = dsi_tx_buf_put_6g,
+	.dma_base_get = dsi_dma_base_get_6g,
+	.calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+	.link_clk_enable = dsi_link_clk_enable_6g,
+	.link_clk_disable = dsi_link_clk_disable_6g,
+	.clk_init_ver = dsi_clk_init_6g_v2,
+	.tx_buf_alloc = dsi_tx_buf_alloc_6g,
+	.tx_buf_get = dsi_tx_buf_get_6g,
+	.tx_buf_put = dsi_tx_buf_put_6g,
+	.dma_base_get = dsi_dma_base_get_6g,
+	.calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
+static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
+	{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064,
+		&apq8064_dsi_cfg, &msm_dsi_v2_host_ops},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
+		&msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
+		&msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
+		&msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
+		&msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3,
+		&msm8994_dsi_cfg, &msm_dsi_6g_host_ops},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1,
+		&msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
+		&msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0,
+		&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
+		&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
+{
+	const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
+	int i;
+
+	for (i = ARRAY_SIZE(dsi_cfg_handlers) - 1; i >= 0; i--) {
+		if ((dsi_cfg_handlers[i].major == major) &&
+			(dsi_cfg_handlers[i].minor == minor)) {
+			cfg_hnd = &dsi_cfg_handlers[i];
+			break;
+		}
+	}
+
+	return cfg_hnd;
+}
+
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi_cfg.h
new file mode 100644
index 0000000..e2b7a7d
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MSM_DSI_CFG_H__
+#define __MSM_DSI_CFG_H__
+
+#include "dsi.h"
+
+#define MSM_DSI_VER_MAJOR_V2	0x02
+#define MSM_DSI_VER_MAJOR_6G	0x03
+#define MSM_DSI_6G_VER_MINOR_V1_0	0x10000000
+#define MSM_DSI_6G_VER_MINOR_V1_1	0x10010000
+#define MSM_DSI_6G_VER_MINOR_V1_1_1	0x10010001
+#define MSM_DSI_6G_VER_MINOR_V1_2	0x10020000
+#define MSM_DSI_6G_VER_MINOR_V1_3	0x10030000
+#define MSM_DSI_6G_VER_MINOR_V1_3_1	0x10030001
+#define MSM_DSI_6G_VER_MINOR_V1_4_1	0x10040001
+#define MSM_DSI_6G_VER_MINOR_V2_2_0	0x20000000
+#define MSM_DSI_6G_VER_MINOR_V2_2_1	0x20020001
+
+#define MSM_DSI_V2_VER_MINOR_8064	0x0
+
+#define DSI_6G_REG_SHIFT	4
+
+struct msm_dsi_config {
+	u32 io_offset;
+	struct dsi_reg_config reg_cfg;
+	const char * const *bus_clk_names;
+	const int num_bus_clks;
+	const resource_size_t io_start[DSI_MAX];
+	const int num_dsi;
+};
+
+struct msm_dsi_host_cfg_ops {
+	int (*link_clk_enable)(struct msm_dsi_host *msm_host);
+	void (*link_clk_disable)(struct msm_dsi_host *msm_host);
+	int (*clk_init_ver)(struct msm_dsi_host *msm_host);
+	int (*tx_buf_alloc)(struct msm_dsi_host *msm_host, int size);
+	void* (*tx_buf_get)(struct msm_dsi_host *msm_host);
+	void (*tx_buf_put)(struct msm_dsi_host *msm_host);
+	int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
+	int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+};
+
+struct msm_dsi_cfg_handler {
+	u32 major;
+	u32 minor;
+	const struct msm_dsi_config *cfg;
+	const struct msm_dsi_host_cfg_ops *ops;
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
+
+#endif /* __MSM_DSI_CFG_H__ */
+
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/dsi_host.c b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi_host.c
new file mode 100644
index 0000000..41b6804
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -0,0 +1,2476 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+
+#include <video/mipi_display.h>
+
+#include "dsi.h"
+#include "dsi.xml.h"
+#include "sfpb.xml.h"
+#include "dsi_cfg.h"
+#include "msm_kms.h"
+
+#define DSI_RESET_TOGGLE_DELAY_MS 20
+
+static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
+{
+	u32 ver;
+
+	if (!major || !minor)
+		return -EINVAL;
+
+	/*
+	 * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
+	 * makes all other registers 4-byte shifted down.
+	 *
+	 * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
+	 * older, we read the DSI_VERSION register without any shift(offset
+	 * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
+	 * the case of DSI6G, this has to be zero (the offset points to a
+	 * scratch register which we never touch)
+	 */
+
+	ver = msm_readl(base + REG_DSI_VERSION);
+	if (ver) {
+		/* older dsi host, there is no register shift */
+		ver = FIELD(ver, DSI_VERSION_MAJOR);
+		if (ver <= MSM_DSI_VER_MAJOR_V2) {
+			/* old versions */
+			*major = ver;
+			*minor = 0;
+			return 0;
+		} else {
+			return -EINVAL;
+		}
+	} else {
+		/*
+		 * newer host, offset 0 has 6G_HW_VERSION, the rest of the
+		 * registers are shifted down, read DSI_VERSION again with
+		 * the shifted offset
+		 */
+		ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
+		ver = FIELD(ver, DSI_VERSION_MAJOR);
+		if (ver == MSM_DSI_VER_MAJOR_6G) {
+			/* 6G version */
+			*major = ver;
+			*minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
+			return 0;
+		} else {
+			return -EINVAL;
+		}
+	}
+}
+
+#define DSI_ERR_STATE_ACK			0x0000
+#define DSI_ERR_STATE_TIMEOUT			0x0001
+#define DSI_ERR_STATE_DLN0_PHY			0x0002
+#define DSI_ERR_STATE_FIFO			0x0004
+#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW	0x0008
+#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION	0x0010
+#define DSI_ERR_STATE_PLL_UNLOCKED		0x0020
+
+#define DSI_CLK_CTRL_ENABLE_CLKS	\
+		(DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
+		DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
+		DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
+		DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
+
+struct msm_dsi_host {
+	struct mipi_dsi_host base;
+
+	struct platform_device *pdev;
+	struct drm_device *dev;
+
+	int id;
+
+	void __iomem *ctrl_base;
+	struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
+
+	struct clk *bus_clks[DSI_BUS_CLK_MAX];
+
+	struct clk *byte_clk;
+	struct clk *esc_clk;
+	struct clk *pixel_clk;
+	struct clk *byte_clk_src;
+	struct clk *pixel_clk_src;
+	struct clk *byte_intf_clk;
+
+	u32 byte_clk_rate;
+	u32 pixel_clk_rate;
+	u32 esc_clk_rate;
+
+	/* DSI v2 specific clocks */
+	struct clk *src_clk;
+	struct clk *esc_clk_src;
+	struct clk *dsi_clk_src;
+
+	u32 src_clk_rate;
+
+	struct gpio_desc *disp_en_gpio;
+	struct gpio_desc *te_gpio;
+
+	const struct msm_dsi_cfg_handler *cfg_hnd;
+
+	struct completion dma_comp;
+	struct completion video_comp;
+	struct mutex dev_mutex;
+	struct mutex cmd_mutex;
+	spinlock_t intr_lock; /* Protect interrupt ctrl register */
+
+	u32 err_work_state;
+	struct work_struct err_work;
+	struct work_struct hpd_work;
+	struct workqueue_struct *workqueue;
+
+	/* DSI 6G TX buffer*/
+	struct drm_gem_object *tx_gem_obj;
+
+	/* DSI v2 TX buffer */
+	void *tx_buf;
+	dma_addr_t tx_buf_paddr;
+
+	int tx_size;
+
+	u8 *rx_buf;
+
+	struct regmap *sfpb;
+
+	struct drm_display_mode *mode;
+
+	/* connected device info */
+	struct device_node *device_node;
+	unsigned int channel;
+	unsigned int lanes;
+	enum mipi_dsi_pixel_format format;
+	unsigned long mode_flags;
+
+	/* lane data parsed via DT */
+	int dlane_swap;
+	int num_data_lanes;
+
+	u32 dma_cmd_ctrl_restore;
+
+	bool registered;
+	bool power_on;
+	bool enabled;
+	int irq;
+};
+
+static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
+{
+	switch (fmt) {
+	case MIPI_DSI_FMT_RGB565:		return 16;
+	case MIPI_DSI_FMT_RGB666_PACKED:	return 18;
+	case MIPI_DSI_FMT_RGB666:
+	case MIPI_DSI_FMT_RGB888:
+	default:				return 24;
+	}
+}
+
+static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
+{
+	return msm_readl(msm_host->ctrl_base + reg);
+}
+static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
+{
+	msm_writel(data, msm_host->ctrl_base + reg);
+}
+
+static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
+static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
+
+static const struct msm_dsi_cfg_handler *dsi_get_config(
+						struct msm_dsi_host *msm_host)
+{
+	const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
+	struct device *dev = &msm_host->pdev->dev;
+	struct regulator *gdsc_reg;
+	struct clk *ahb_clk;
+	int ret;
+	u32 major = 0, minor = 0;
+
+	gdsc_reg = regulator_get(dev, "gdsc");
+	if (IS_ERR(gdsc_reg)) {
+		pr_err("%s: cannot get gdsc\n", __func__);
+		goto exit;
+	}
+
+	ahb_clk = msm_clk_get(msm_host->pdev, "iface");
+	if (IS_ERR(ahb_clk)) {
+		pr_err("%s: cannot get interface clock\n", __func__);
+		goto put_gdsc;
+	}
+
+	pm_runtime_get_sync(dev);
+
+	ret = regulator_enable(gdsc_reg);
+	if (ret) {
+		pr_err("%s: unable to enable gdsc\n", __func__);
+		goto put_gdsc;
+	}
+
+	ret = clk_prepare_enable(ahb_clk);
+	if (ret) {
+		pr_err("%s: unable to enable ahb_clk\n", __func__);
+		goto disable_gdsc;
+	}
+
+	ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
+	if (ret) {
+		pr_err("%s: Invalid version\n", __func__);
+		goto disable_clks;
+	}
+
+	cfg_hnd = msm_dsi_cfg_get(major, minor);
+
+	DBG("%s: Version %x:%x\n", __func__, major, minor);
+
+disable_clks:
+	clk_disable_unprepare(ahb_clk);
+disable_gdsc:
+	regulator_disable(gdsc_reg);
+	pm_runtime_put_sync(dev);
+put_gdsc:
+	regulator_put(gdsc_reg);
+exit:
+	return cfg_hnd;
+}
+
+static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
+{
+	return container_of(host, struct msm_dsi_host, base);
+}
+
+static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
+{
+	struct regulator_bulk_data *s = msm_host->supplies;
+	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
+	int i;
+
+	DBG("");
+	for (i = num - 1; i >= 0; i--)
+		if (regs[i].disable_load >= 0)
+			regulator_set_load(s[i].consumer,
+					   regs[i].disable_load);
+
+	regulator_bulk_disable(num, s);
+}
+
+static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
+{
+	struct regulator_bulk_data *s = msm_host->supplies;
+	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
+	int ret, i;
+
+	DBG("");
+	for (i = 0; i < num; i++) {
+		if (regs[i].enable_load >= 0) {
+			ret = regulator_set_load(s[i].consumer,
+						 regs[i].enable_load);
+			if (ret < 0) {
+				pr_err("regulator %d set op mode failed, %d\n",
+					i, ret);
+				goto fail;
+			}
+		}
+	}
+
+	ret = regulator_bulk_enable(num, s);
+	if (ret < 0) {
+		pr_err("regulator enable failed, %d\n", ret);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	for (i--; i >= 0; i--)
+		regulator_set_load(s[i].consumer, regs[i].disable_load);
+	return ret;
+}
+
+static int dsi_regulator_init(struct msm_dsi_host *msm_host)
+{
+	struct regulator_bulk_data *s = msm_host->supplies;
+	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
+	int i, ret;
+
+	for (i = 0; i < num; i++)
+		s[i].supply = regs[i].name;
+
+	ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
+	if (ret < 0) {
+		pr_err("%s: failed to init regulator, ret=%d\n",
+						__func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
+{
+	struct platform_device *pdev = msm_host->pdev;
+	int ret = 0;
+
+	msm_host->src_clk = msm_clk_get(pdev, "src");
+
+	if (IS_ERR(msm_host->src_clk)) {
+		ret = PTR_ERR(msm_host->src_clk);
+		pr_err("%s: can't find src clock. ret=%d\n",
+			__func__, ret);
+		msm_host->src_clk = NULL;
+		return ret;
+	}
+
+	msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
+	if (!msm_host->esc_clk_src) {
+		ret = -ENODEV;
+		pr_err("%s: can't get esc clock parent. ret=%d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
+	if (!msm_host->dsi_clk_src) {
+		ret = -ENODEV;
+		pr_err("%s: can't get src clock parent. ret=%d\n",
+			__func__, ret);
+	}
+
+	return ret;
+}
+
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
+{
+	struct platform_device *pdev = msm_host->pdev;
+	int ret = 0;
+
+	msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
+	if (IS_ERR(msm_host->byte_intf_clk)) {
+		ret = PTR_ERR(msm_host->byte_intf_clk);
+		pr_err("%s: can't find byte_intf clock. ret=%d\n",
+			__func__, ret);
+	}
+
+	return ret;
+}
+
+static int dsi_clk_init(struct msm_dsi_host *msm_host)
+{
+	struct platform_device *pdev = msm_host->pdev;
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+	const struct msm_dsi_config *cfg = cfg_hnd->cfg;
+	int i, ret = 0;
+
+	/* get bus clocks */
+	for (i = 0; i < cfg->num_bus_clks; i++) {
+		msm_host->bus_clks[i] = msm_clk_get(pdev,
+						cfg->bus_clk_names[i]);
+		if (IS_ERR(msm_host->bus_clks[i])) {
+			ret = PTR_ERR(msm_host->bus_clks[i]);
+			pr_err("%s: Unable to get %s clock, ret = %d\n",
+				__func__, cfg->bus_clk_names[i], ret);
+			goto exit;
+		}
+	}
+
+	/* get link and source clocks */
+	msm_host->byte_clk = msm_clk_get(pdev, "byte");
+	if (IS_ERR(msm_host->byte_clk)) {
+		ret = PTR_ERR(msm_host->byte_clk);
+		pr_err("%s: can't find dsi_byte clock. ret=%d\n",
+			__func__, ret);
+		msm_host->byte_clk = NULL;
+		goto exit;
+	}
+
+	msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
+	if (IS_ERR(msm_host->pixel_clk)) {
+		ret = PTR_ERR(msm_host->pixel_clk);
+		pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
+			__func__, ret);
+		msm_host->pixel_clk = NULL;
+		goto exit;
+	}
+
+	msm_host->esc_clk = msm_clk_get(pdev, "core");
+	if (IS_ERR(msm_host->esc_clk)) {
+		ret = PTR_ERR(msm_host->esc_clk);
+		pr_err("%s: can't find dsi_esc clock. ret=%d\n",
+			__func__, ret);
+		msm_host->esc_clk = NULL;
+		goto exit;
+	}
+
+	msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
+	if (IS_ERR(msm_host->byte_clk_src)) {
+		ret = PTR_ERR(msm_host->byte_clk_src);
+		pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
+		goto exit;
+	}
+
+	msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
+	if (IS_ERR(msm_host->pixel_clk_src)) {
+		ret = PTR_ERR(msm_host->pixel_clk_src);
+		pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
+		goto exit;
+	}
+
+	if (cfg_hnd->ops->clk_init_ver)
+		ret = cfg_hnd->ops->clk_init_ver(msm_host);
+exit:
+	return ret;
+}
+
+static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
+{
+	const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
+	int i, ret;
+
+	DBG("id=%d", msm_host->id);
+
+	for (i = 0; i < cfg->num_bus_clks; i++) {
+		ret = clk_prepare_enable(msm_host->bus_clks[i]);
+		if (ret) {
+			pr_err("%s: failed to enable bus clock %d ret %d\n",
+				__func__, i, ret);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	while (--i >= 0)
+		clk_disable_unprepare(msm_host->bus_clks[i]);
+
+	return ret;
+}
+
+static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
+{
+	const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
+	int i;
+
+	DBG("");
+
+	for (i = cfg->num_bus_clks - 1; i >= 0; i--)
+		clk_disable_unprepare(msm_host->bus_clks[i]);
+}
+
+int msm_dsi_runtime_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
+	struct mipi_dsi_host *host = msm_dsi->host;
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	if (!msm_host->cfg_hnd)
+		return 0;
+
+	dsi_bus_clk_disable(msm_host);
+
+	return 0;
+}
+
+int msm_dsi_runtime_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
+	struct mipi_dsi_host *host = msm_dsi->host;
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	if (!msm_host->cfg_hnd)
+		return 0;
+
+	return dsi_bus_clk_enable(msm_host);
+}
+
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+{
+	int ret;
+
+	DBG("Set clk rates: pclk=%d, byteclk=%d",
+		msm_host->mode->clock, msm_host->byte_clk_rate);
+
+	ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
+	if (ret) {
+		pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
+		goto error;
+	}
+
+	ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
+	if (ret) {
+		pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
+		goto error;
+	}
+
+	if (msm_host->byte_intf_clk) {
+		ret = clk_set_rate(msm_host->byte_intf_clk,
+				   msm_host->byte_clk_rate / 2);
+		if (ret) {
+			pr_err("%s: Failed to set rate byte intf clk, %d\n",
+			       __func__, ret);
+			goto error;
+		}
+	}
+
+	ret = clk_prepare_enable(msm_host->esc_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+		goto error;
+	}
+
+	ret = clk_prepare_enable(msm_host->byte_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+		goto byte_clk_err;
+	}
+
+	ret = clk_prepare_enable(msm_host->pixel_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+		goto pixel_clk_err;
+	}
+
+	if (msm_host->byte_intf_clk) {
+		ret = clk_prepare_enable(msm_host->byte_intf_clk);
+		if (ret) {
+			pr_err("%s: Failed to enable byte intf clk\n",
+			       __func__);
+			goto byte_intf_clk_err;
+		}
+	}
+
+	return 0;
+
+byte_intf_clk_err:
+	clk_disable_unprepare(msm_host->pixel_clk);
+pixel_clk_err:
+	clk_disable_unprepare(msm_host->byte_clk);
+byte_clk_err:
+	clk_disable_unprepare(msm_host->esc_clk);
+error:
+	return ret;
+}
+
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+{
+	int ret;
+
+	DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
+		msm_host->mode->clock, msm_host->byte_clk_rate,
+		msm_host->esc_clk_rate, msm_host->src_clk_rate);
+
+	ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
+	if (ret) {
+		pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
+		goto error;
+	}
+
+	ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
+	if (ret) {
+		pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
+		goto error;
+	}
+
+	ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
+	if (ret) {
+		pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
+		goto error;
+	}
+
+	ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
+	if (ret) {
+		pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
+		goto error;
+	}
+
+	ret = clk_prepare_enable(msm_host->byte_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+		goto error;
+	}
+
+	ret = clk_prepare_enable(msm_host->esc_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+		goto esc_clk_err;
+	}
+
+	ret = clk_prepare_enable(msm_host->src_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable dsi src clk\n", __func__);
+		goto src_clk_err;
+	}
+
+	ret = clk_prepare_enable(msm_host->pixel_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+		goto pixel_clk_err;
+	}
+
+	return 0;
+
+pixel_clk_err:
+	clk_disable_unprepare(msm_host->src_clk);
+src_clk_err:
+	clk_disable_unprepare(msm_host->esc_clk);
+esc_clk_err:
+	clk_disable_unprepare(msm_host->byte_clk);
+error:
+	return ret;
+}
+
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
+{
+	clk_disable_unprepare(msm_host->esc_clk);
+	clk_disable_unprepare(msm_host->pixel_clk);
+	if (msm_host->byte_intf_clk)
+		clk_disable_unprepare(msm_host->byte_intf_clk);
+	clk_disable_unprepare(msm_host->byte_clk);
+}
+
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
+{
+	clk_disable_unprepare(msm_host->pixel_clk);
+	clk_disable_unprepare(msm_host->src_clk);
+	clk_disable_unprepare(msm_host->esc_clk);
+	clk_disable_unprepare(msm_host->byte_clk);
+}
+
+static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+	struct drm_display_mode *mode = msm_host->mode;
+	u32 pclk_rate;
+
+	pclk_rate = mode->clock * 1000u;
+
+	/*
+	 * For dual DSI mode, the current DRM mode has the complete width of the
+	 * panel. Since, the complete panel is driven by two DSI controllers,
+	 * the clock rates have to be split between the two dsi controllers.
+	 * Adjust the byte and pixel clock rates for each dsi host accordingly.
+	 */
+	if (is_dual_dsi)
+		pclk_rate /= 2;
+
+	return pclk_rate;
+}
+
+static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+	u8 lanes = msm_host->lanes;
+	u32 bpp = dsi_get_bpp(msm_host->format);
+	u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi);
+	u64 pclk_bpp = (u64)pclk_rate * bpp;
+
+	if (lanes == 0) {
+		pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+		lanes = 1;
+	}
+
+	do_div(pclk_bpp, (8 * lanes));
+
+	msm_host->pixel_clk_rate = pclk_rate;
+	msm_host->byte_clk_rate = pclk_bpp;
+
+	DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate,
+				msm_host->byte_clk_rate);
+
+}
+
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+	if (!msm_host->mode) {
+		pr_err("%s: mode not set\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_calc_pclk(msm_host, is_dual_dsi);
+	msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+	return 0;
+}
+
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+	u32 bpp = dsi_get_bpp(msm_host->format);
+	u64 pclk_bpp;
+	unsigned int esc_mhz, esc_div;
+	unsigned long byte_mhz;
+
+	dsi_calc_pclk(msm_host, is_dual_dsi);
+
+	pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp;
+	do_div(pclk_bpp, 8);
+	msm_host->src_clk_rate = pclk_bpp;
+
+	/*
+	 * esc clock is byte clock followed by a 4 bit divider,
+	 * we need to find an escape clock frequency within the
+	 * mipi DSI spec range within the maximum divider limit
+	 * We iterate here between an escape clock frequencey
+	 * between 20 Mhz to 5 Mhz and pick up the first one
+	 * that can be supported by our divider
+	 */
+
+	byte_mhz = msm_host->byte_clk_rate / 1000000;
+
+	for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
+		esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
+
+		/*
+		 * TODO: Ideally, we shouldn't know what sort of divider
+		 * is available in mmss_cc, we're just assuming that
+		 * it'll always be a 4 bit divider. Need to come up with
+		 * a better way here.
+		 */
+		if (esc_div >= 1 && esc_div <= 16)
+			break;
+	}
+
+	if (esc_mhz < 5)
+		return -EINVAL;
+
+	msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+
+	DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
+		msm_host->src_clk_rate);
+
+	return 0;
+}
+
+static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
+{
+	u32 intr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_host->intr_lock, flags);
+	intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
+
+	if (enable)
+		intr |= mask;
+	else
+		intr &= ~mask;
+
+	DBG("intr=%x enable=%d", intr, enable);
+
+	dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
+	spin_unlock_irqrestore(&msm_host->intr_lock, flags);
+}
+
+static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
+{
+	if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+		return BURST_MODE;
+	else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+		return NON_BURST_SYNCH_PULSE;
+
+	return NON_BURST_SYNCH_EVENT;
+}
+
+static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
+				const enum mipi_dsi_pixel_format mipi_fmt)
+{
+	switch (mipi_fmt) {
+	case MIPI_DSI_FMT_RGB888:	return VID_DST_FORMAT_RGB888;
+	case MIPI_DSI_FMT_RGB666:	return VID_DST_FORMAT_RGB666_LOOSE;
+	case MIPI_DSI_FMT_RGB666_PACKED:	return VID_DST_FORMAT_RGB666;
+	case MIPI_DSI_FMT_RGB565:	return VID_DST_FORMAT_RGB565;
+	default:			return VID_DST_FORMAT_RGB888;
+	}
+}
+
+static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
+				const enum mipi_dsi_pixel_format mipi_fmt)
+{
+	switch (mipi_fmt) {
+	case MIPI_DSI_FMT_RGB888:	return CMD_DST_FORMAT_RGB888;
+	case MIPI_DSI_FMT_RGB666_PACKED:
+	case MIPI_DSI_FMT_RGB666:	return CMD_DST_FORMAT_RGB666;
+	case MIPI_DSI_FMT_RGB565:	return CMD_DST_FORMAT_RGB565;
+	default:			return CMD_DST_FORMAT_RGB888;
+	}
+}
+
+static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
+			struct msm_dsi_phy_shared_timings *phy_shared_timings)
+{
+	u32 flags = msm_host->mode_flags;
+	enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+	u32 data = 0;
+
+	if (!enable) {
+		dsi_write(msm_host, REG_DSI_CTRL, 0);
+		return;
+	}
+
+	if (flags & MIPI_DSI_MODE_VIDEO) {
+		if (flags & MIPI_DSI_MODE_VIDEO_HSE)
+			data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
+		if (flags & MIPI_DSI_MODE_VIDEO_HFP)
+			data |= DSI_VID_CFG0_HFP_POWER_STOP;
+		if (flags & MIPI_DSI_MODE_VIDEO_HBP)
+			data |= DSI_VID_CFG0_HBP_POWER_STOP;
+		if (flags & MIPI_DSI_MODE_VIDEO_HSA)
+			data |= DSI_VID_CFG0_HSA_POWER_STOP;
+		/* Always set low power stop mode for BLLP
+		 * to let command engine send packets
+		 */
+		data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
+			DSI_VID_CFG0_BLLP_POWER_STOP;
+		data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
+		data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
+		data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
+		dsi_write(msm_host, REG_DSI_VID_CFG0, data);
+
+		/* Do not swap RGB colors */
+		data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
+		dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
+	} else {
+		/* Do not swap RGB colors */
+		data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
+		data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
+		dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
+
+		data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
+			DSI_CMD_CFG1_WR_MEM_CONTINUE(
+					MIPI_DCS_WRITE_MEMORY_CONTINUE);
+		/* Always insert DCS command */
+		data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
+		dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
+	}
+
+	dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
+			DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
+			DSI_CMD_DMA_CTRL_LOW_POWER);
+
+	data = 0;
+	/* Always assume dedicated TE pin */
+	data |= DSI_TRIG_CTRL_TE;
+	data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
+	data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
+	data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
+	if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+		(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
+		data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
+	dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
+
+	data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) |
+		DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre);
+	dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
+
+	if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+	    (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) &&
+	    phy_shared_timings->clk_pre_inc_by_2)
+		dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND,
+			  DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK);
+
+	data = 0;
+	if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
+		data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
+	dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
+
+	/* allow only ack-err-status to generate interrupt */
+	dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
+
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
+
+	dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+
+	data = DSI_CTRL_CLK_EN;
+
+	DBG("lane number=%d", msm_host->lanes);
+	data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
+
+	dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+		  DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
+
+	if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+		dsi_write(msm_host, REG_DSI_LANE_CTRL,
+			DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
+
+	data |= DSI_CTRL_ENABLE;
+
+	dsi_write(msm_host, REG_DSI_CTRL, data);
+}
+
+static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+	struct drm_display_mode *mode = msm_host->mode;
+	u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
+	u32 h_total = mode->htotal;
+	u32 v_total = mode->vtotal;
+	u32 hs_end = mode->hsync_end - mode->hsync_start;
+	u32 vs_end = mode->vsync_end - mode->vsync_start;
+	u32 ha_start = h_total - mode->hsync_start;
+	u32 ha_end = ha_start + mode->hdisplay;
+	u32 va_start = v_total - mode->vsync_start;
+	u32 va_end = va_start + mode->vdisplay;
+	u32 hdisplay = mode->hdisplay;
+	u32 wc;
+
+	DBG("");
+
+	/*
+	 * For dual DSI mode, the current DRM mode has
+	 * the complete width of the panel. Since, the complete
+	 * panel is driven by two DSI controllers, the horizontal
+	 * timings have to be split between the two dsi controllers.
+	 * Adjust the DSI host timing values accordingly.
+	 */
+	if (is_dual_dsi) {
+		h_total /= 2;
+		hs_end /= 2;
+		ha_start /= 2;
+		ha_end /= 2;
+		hdisplay /= 2;
+	}
+
+	if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
+		dsi_write(msm_host, REG_DSI_ACTIVE_H,
+			DSI_ACTIVE_H_START(ha_start) |
+			DSI_ACTIVE_H_END(ha_end));
+		dsi_write(msm_host, REG_DSI_ACTIVE_V,
+			DSI_ACTIVE_V_START(va_start) |
+			DSI_ACTIVE_V_END(va_end));
+		dsi_write(msm_host, REG_DSI_TOTAL,
+			DSI_TOTAL_H_TOTAL(h_total - 1) |
+			DSI_TOTAL_V_TOTAL(v_total - 1));
+
+		dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
+			DSI_ACTIVE_HSYNC_START(hs_start) |
+			DSI_ACTIVE_HSYNC_END(hs_end));
+		dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
+		dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
+			DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
+			DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
+	} else {		/* command mode */
+		/* image data and 1 byte write_memory_start cmd */
+		wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+
+		dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
+			DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
+			DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
+					msm_host->channel) |
+			DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
+					MIPI_DSI_DCS_LONG_WRITE));
+
+		dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
+			DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(hdisplay) |
+			DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
+	}
+}
+
+static void dsi_sw_reset(struct msm_dsi_host *msm_host)
+{
+	dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+	wmb(); /* clocks need to be enabled before reset */
+
+	dsi_write(msm_host, REG_DSI_RESET, 1);
+	msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
+	dsi_write(msm_host, REG_DSI_RESET, 0);
+}
+
+static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
+					bool video_mode, bool enable)
+{
+	u32 dsi_ctrl;
+
+	dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
+
+	if (!enable) {
+		dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
+				DSI_CTRL_CMD_MODE_EN);
+		dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
+					DSI_IRQ_MASK_VIDEO_DONE, 0);
+	} else {
+		if (video_mode) {
+			dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
+		} else {		/* command mode */
+			dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
+			dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
+		}
+		dsi_ctrl |= DSI_CTRL_ENABLE;
+	}
+
+	dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
+}
+
+static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
+{
+	u32 data;
+
+	data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
+
+	if (mode == 0)
+		data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
+	else
+		data |= DSI_CMD_DMA_CTRL_LOW_POWER;
+
+	dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
+}
+
+static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
+{
+	u32 ret = 0;
+	struct device *dev = &msm_host->pdev->dev;
+
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
+
+	reinit_completion(&msm_host->video_comp);
+
+	ret = wait_for_completion_timeout(&msm_host->video_comp,
+			msecs_to_jiffies(70));
+
+	if (ret == 0)
+		DRM_DEV_ERROR(dev, "wait for video done timed out\n");
+
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
+}
+
+static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
+{
+	u32 data;
+
+	if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
+		return;
+
+	data = dsi_read(msm_host, REG_DSI_STATUS0);
+
+	/* if video mode engine is not busy, its because
+	 * either timing engine was not turned on or the
+	 * DSI controller has finished transmitting the video
+	 * data already, so no need to wait in those cases
+	 */
+	if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY))
+		return;
+
+	if (msm_host->power_on && msm_host->enabled) {
+		dsi_wait4video_done(msm_host);
+		/* delay 4 ms to skip BLLP */
+		usleep_range(2000, 4000);
+	}
+}
+
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
+{
+	struct drm_device *dev = msm_host->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	uint64_t iova;
+	u8 *data;
+
+	data = msm_gem_kernel_new(dev, size, MSM_BO_UNCACHED,
+					priv->kms->aspace,
+					&msm_host->tx_gem_obj, &iova);
+
+	if (IS_ERR(data)) {
+		msm_host->tx_gem_obj = NULL;
+		return PTR_ERR(data);
+	}
+
+	msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
+
+	msm_host->tx_size = msm_host->tx_gem_obj->size;
+
+	return 0;
+}
+
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
+{
+	struct drm_device *dev = msm_host->dev;
+
+	msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
+					&msm_host->tx_buf_paddr, GFP_KERNEL);
+	if (!msm_host->tx_buf)
+		return -ENOMEM;
+
+	msm_host->tx_size = size;
+
+	return 0;
+}
+
+static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
+{
+	struct drm_device *dev = msm_host->dev;
+	struct msm_drm_private *priv;
+
+	/*
+	 * This is possible if we're tearing down before we've had a chance to
+	 * fully initialize. A very real possibility if our probe is deferred,
+	 * in which case we'll hit msm_dsi_host_destroy() without having run
+	 * through the dsi_tx_buf_alloc().
+	 */
+	if (!dev)
+		return;
+
+	priv = dev->dev_private;
+	if (msm_host->tx_gem_obj) {
+		msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+		drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
+		msm_host->tx_gem_obj = NULL;
+	}
+
+	if (msm_host->tx_buf)
+		dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
+			msm_host->tx_buf_paddr);
+}
+
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host)
+{
+	return msm_gem_get_vaddr(msm_host->tx_gem_obj);
+}
+
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host)
+{
+	return msm_host->tx_buf;
+}
+
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host)
+{
+	msm_gem_put_vaddr(msm_host->tx_gem_obj);
+}
+
+/*
+ * prepare cmd buffer to be txed
+ */
+static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
+			   const struct mipi_dsi_msg *msg)
+{
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+	struct mipi_dsi_packet packet;
+	int len;
+	int ret;
+	u8 *data;
+
+	ret = mipi_dsi_create_packet(&packet, msg);
+	if (ret) {
+		pr_err("%s: create packet failed, %d\n", __func__, ret);
+		return ret;
+	}
+	len = (packet.size + 3) & (~0x3);
+
+	if (len > msm_host->tx_size) {
+		pr_err("%s: packet size is too big\n", __func__);
+		return -EINVAL;
+	}
+
+	data = cfg_hnd->ops->tx_buf_get(msm_host);
+	if (IS_ERR(data)) {
+		ret = PTR_ERR(data);
+		pr_err("%s: get vaddr failed, %d\n", __func__, ret);
+		return ret;
+	}
+
+	/* MSM specific command format in memory */
+	data[0] = packet.header[1];
+	data[1] = packet.header[2];
+	data[2] = packet.header[0];
+	data[3] = BIT(7); /* Last packet */
+	if (mipi_dsi_packet_format_is_long(msg->type))
+		data[3] |= BIT(6);
+	if (msg->rx_buf && msg->rx_len)
+		data[3] |= BIT(5);
+
+	/* Long packet */
+	if (packet.payload && packet.payload_length)
+		memcpy(data + 4, packet.payload, packet.payload_length);
+
+	/* Append 0xff to the end */
+	if (packet.size < len)
+		memset(data + packet.size, 0xff, len - packet.size);
+
+	if (cfg_hnd->ops->tx_buf_put)
+		cfg_hnd->ops->tx_buf_put(msm_host);
+
+	return len;
+}
+
+/*
+ * dsi_short_read1_resp: 1 parameter
+ */
+static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+	u8 *data = msg->rx_buf;
+	if (data && (msg->rx_len >= 1)) {
+		*data = buf[1]; /* strip out dcs type */
+		return 1;
+	} else {
+		pr_err("%s: read data does not match with rx_buf len %zu\n",
+			__func__, msg->rx_len);
+		return -EINVAL;
+	}
+}
+
+/*
+ * dsi_short_read2_resp: 2 parameter
+ */
+static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+	u8 *data = msg->rx_buf;
+	if (data && (msg->rx_len >= 2)) {
+		data[0] = buf[1]; /* strip out dcs type */
+		data[1] = buf[2];
+		return 2;
+	} else {
+		pr_err("%s: read data does not match with rx_buf len %zu\n",
+			__func__, msg->rx_len);
+		return -EINVAL;
+	}
+}
+
+static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+	/* strip out 4 byte dcs header */
+	if (msg->rx_buf && msg->rx_len)
+		memcpy(msg->rx_buf, buf + 4, msg->rx_len);
+
+	return msg->rx_len;
+}
+
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
+{
+	struct drm_device *dev = msm_host->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+
+	if (!dma_base)
+		return -EINVAL;
+
+	return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
+				priv->kms->aspace, dma_base);
+}
+
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
+{
+	if (!dma_base)
+		return -EINVAL;
+
+	*dma_base = msm_host->tx_buf_paddr;
+	return 0;
+}
+
+static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+{
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+	int ret;
+	uint64_t dma_base;
+	bool triggered;
+
+	ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base);
+	if (ret) {
+		pr_err("%s: failed to get iova: %d\n", __func__, ret);
+		return ret;
+	}
+
+	reinit_completion(&msm_host->dma_comp);
+
+	dsi_wait4video_eng_busy(msm_host);
+
+	triggered = msm_dsi_manager_cmd_xfer_trigger(
+						msm_host->id, dma_base, len);
+	if (triggered) {
+		ret = wait_for_completion_timeout(&msm_host->dma_comp,
+					msecs_to_jiffies(200));
+		DBG("ret=%d", ret);
+		if (ret == 0)
+			ret = -ETIMEDOUT;
+		else
+			ret = len;
+	} else
+		ret = len;
+
+	return ret;
+}
+
+static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
+			u8 *buf, int rx_byte, int pkt_size)
+{
+	u32 *lp, *temp, data;
+	int i, j = 0, cnt;
+	u32 read_cnt;
+	u8 reg[16];
+	int repeated_bytes = 0;
+	int buf_offset = buf - msm_host->rx_buf;
+
+	lp = (u32 *)buf;
+	temp = (u32 *)reg;
+	cnt = (rx_byte + 3) >> 2;
+	if (cnt > 4)
+		cnt = 4; /* 4 x 32 bits registers only */
+
+	if (rx_byte == 4)
+		read_cnt = 4;
+	else
+		read_cnt = pkt_size + 6;
+
+	/*
+	 * In case of multiple reads from the panel, after the first read, there
+	 * is possibility that there are some bytes in the payload repeating in
+	 * the RDBK_DATA registers. Since we read all the parameters from the
+	 * panel right from the first byte for every pass. We need to skip the
+	 * repeating bytes and then append the new parameters to the rx buffer.
+	 */
+	if (read_cnt > 16) {
+		int bytes_shifted;
+		/* Any data more than 16 bytes will be shifted out.
+		 * The temp read buffer should already contain these bytes.
+		 * The remaining bytes in read buffer are the repeated bytes.
+		 */
+		bytes_shifted = read_cnt - 16;
+		repeated_bytes = buf_offset - bytes_shifted;
+	}
+
+	for (i = cnt - 1; i >= 0; i--) {
+		data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
+		*temp++ = ntohl(data); /* to host byte order */
+		DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
+	}
+
+	for (i = repeated_bytes; i < 16; i++)
+		buf[j++] = reg[i];
+
+	return j;
+}
+
+static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
+				const struct mipi_dsi_msg *msg)
+{
+	int len, ret;
+	int bllp_len = msm_host->mode->hdisplay *
+			dsi_get_bpp(msm_host->format) / 8;
+
+	len = dsi_cmd_dma_add(msm_host, msg);
+	if (len < 0) {
+		pr_err("%s: failed to add cmd type = 0x%x\n",
+			__func__,  msg->type);
+		return len;
+	}
+
+	/* for video mode, do not send cmds more than
+	* one pixel line, since it only transmit it
+	* during BLLP.
+	*/
+	/* TODO: if the command is sent in LP mode, the bit rate is only
+	 * half of esc clk rate. In this case, if the video is already
+	 * actively streaming, we need to check more carefully if the
+	 * command can be fit into one BLLP.
+	 */
+	if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
+		pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
+			__func__, len);
+		return -EINVAL;
+	}
+
+	ret = dsi_cmd_dma_tx(msm_host, len);
+	if (ret < 0) {
+		pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d, ret=%d\n",
+			__func__, msg->type, (*(u8 *)(msg->tx_buf)), len, ret);
+		return ret;
+	} else if (ret < len) {
+		pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, ret=%d len=%d\n",
+			__func__, msg->type, (*(u8 *)(msg->tx_buf)), ret, len);
+		return -EIO;
+	}
+
+	return len;
+}
+
+static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
+{
+	u32 data0, data1;
+
+	data0 = dsi_read(msm_host, REG_DSI_CTRL);
+	data1 = data0;
+	data1 &= ~DSI_CTRL_ENABLE;
+	dsi_write(msm_host, REG_DSI_CTRL, data1);
+	/*
+	 * dsi controller need to be disabled before
+	 * clocks turned on
+	 */
+	wmb();
+
+	dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+	wmb();	/* make sure clocks enabled */
+
+	/* dsi controller can only be reset while clocks are running */
+	dsi_write(msm_host, REG_DSI_RESET, 1);
+	msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
+	dsi_write(msm_host, REG_DSI_RESET, 0);
+	wmb();	/* controller out of reset */
+	dsi_write(msm_host, REG_DSI_CTRL, data0);
+	wmb();	/* make sure dsi controller enabled again */
+}
+
+static void dsi_hpd_worker(struct work_struct *work)
+{
+	struct msm_dsi_host *msm_host =
+		container_of(work, struct msm_dsi_host, hpd_work);
+
+	drm_helper_hpd_irq_event(msm_host->dev);
+}
+
+static void dsi_err_worker(struct work_struct *work)
+{
+	struct msm_dsi_host *msm_host =
+		container_of(work, struct msm_dsi_host, err_work);
+	u32 status = msm_host->err_work_state;
+
+	pr_err_ratelimited("%s: status=%x\n", __func__, status);
+	if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
+		dsi_sw_reset_restore(msm_host);
+
+	/* It is safe to clear here because error irq is disabled. */
+	msm_host->err_work_state = 0;
+
+	/* enable dsi error interrupt */
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
+}
+
+static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
+{
+	u32 status;
+
+	status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
+
+	if (status) {
+		dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
+		/* Writing of an extra 0 needed to clear error bits */
+		dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
+		msm_host->err_work_state |= DSI_ERR_STATE_ACK;
+	}
+}
+
+static void dsi_timeout_status(struct msm_dsi_host *msm_host)
+{
+	u32 status;
+
+	status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
+
+	if (status) {
+		dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
+		msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
+	}
+}
+
+static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
+{
+	u32 status;
+
+	status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
+
+	if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
+		dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
+		msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
+	}
+}
+
+static void dsi_fifo_status(struct msm_dsi_host *msm_host)
+{
+	u32 status;
+
+	status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
+
+	/* fifo underflow, overflow */
+	if (status) {
+		dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
+		msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
+		if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
+			msm_host->err_work_state |=
+					DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
+	}
+}
+
+static void dsi_status(struct msm_dsi_host *msm_host)
+{
+	u32 status;
+
+	status = dsi_read(msm_host, REG_DSI_STATUS0);
+
+	if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
+		dsi_write(msm_host, REG_DSI_STATUS0, status);
+		msm_host->err_work_state |=
+			DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
+	}
+}
+
+static void dsi_clk_status(struct msm_dsi_host *msm_host)
+{
+	u32 status;
+
+	status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
+
+	if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
+		dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
+		msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
+	}
+}
+
+static void dsi_error(struct msm_dsi_host *msm_host)
+{
+	/* disable dsi error interrupt */
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
+
+	dsi_clk_status(msm_host);
+	dsi_fifo_status(msm_host);
+	dsi_ack_err_status(msm_host);
+	dsi_timeout_status(msm_host);
+	dsi_status(msm_host);
+	dsi_dln0_phy_err(msm_host);
+
+	queue_work(msm_host->workqueue, &msm_host->err_work);
+}
+
+static irqreturn_t dsi_host_irq(int irq, void *ptr)
+{
+	struct msm_dsi_host *msm_host = ptr;
+	u32 isr;
+	unsigned long flags;
+
+	if (!msm_host->ctrl_base)
+		return IRQ_HANDLED;
+
+	spin_lock_irqsave(&msm_host->intr_lock, flags);
+	isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
+	dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
+	spin_unlock_irqrestore(&msm_host->intr_lock, flags);
+
+	DBG("isr=0x%x, id=%d", isr, msm_host->id);
+
+	if (isr & DSI_IRQ_ERROR)
+		dsi_error(msm_host);
+
+	if (isr & DSI_IRQ_VIDEO_DONE)
+		complete(&msm_host->video_comp);
+
+	if (isr & DSI_IRQ_CMD_DMA_DONE)
+		complete(&msm_host->dma_comp);
+
+	return IRQ_HANDLED;
+}
+
+static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
+			struct device *panel_device)
+{
+	msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
+							 "disp-enable",
+							 GPIOD_OUT_LOW);
+	if (IS_ERR(msm_host->disp_en_gpio)) {
+		DBG("cannot get disp-enable-gpios %ld",
+				PTR_ERR(msm_host->disp_en_gpio));
+		return PTR_ERR(msm_host->disp_en_gpio);
+	}
+
+	msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
+								GPIOD_IN);
+	if (IS_ERR(msm_host->te_gpio)) {
+		DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
+		return PTR_ERR(msm_host->te_gpio);
+	}
+
+	return 0;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+					struct mipi_dsi_device *dsi)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	int ret;
+
+	if (dsi->lanes > msm_host->num_data_lanes)
+		return -EINVAL;
+
+	msm_host->channel = dsi->channel;
+	msm_host->lanes = dsi->lanes;
+	msm_host->format = dsi->format;
+	msm_host->mode_flags = dsi->mode_flags;
+
+	/* Some gpios defined in panel DT need to be controlled by host */
+	ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
+	if (ret)
+		return ret;
+
+	DBG("id=%d", msm_host->id);
+	if (msm_host->dev)
+		queue_work(msm_host->workqueue, &msm_host->hpd_work);
+
+	return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+					struct mipi_dsi_device *dsi)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	msm_host->device_node = NULL;
+
+	DBG("id=%d", msm_host->id);
+	if (msm_host->dev)
+		queue_work(msm_host->workqueue, &msm_host->hpd_work);
+
+	return 0;
+}
+
+static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
+					const struct mipi_dsi_msg *msg)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	int ret;
+
+	if (!msg || !msm_host->power_on)
+		return -EINVAL;
+
+	mutex_lock(&msm_host->cmd_mutex);
+	ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
+	mutex_unlock(&msm_host->cmd_mutex);
+
+	return ret;
+}
+
+static struct mipi_dsi_host_ops dsi_host_ops = {
+	.attach = dsi_host_attach,
+	.detach = dsi_host_detach,
+	.transfer = dsi_host_transfer,
+};
+
+/*
+ * List of supported physical to logical lane mappings.
+ * For example, the 2nd entry represents the following mapping:
+ *
+ * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
+ */
+static const int supported_data_lane_swaps[][4] = {
+	{ 0, 1, 2, 3 },
+	{ 3, 0, 1, 2 },
+	{ 2, 3, 0, 1 },
+	{ 1, 2, 3, 0 },
+	{ 0, 3, 2, 1 },
+	{ 1, 0, 3, 2 },
+	{ 2, 1, 0, 3 },
+	{ 3, 2, 1, 0 },
+};
+
+static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
+				    struct device_node *ep)
+{
+	struct device *dev = &msm_host->pdev->dev;
+	struct property *prop;
+	u32 lane_map[4];
+	int ret, i, len, num_lanes;
+
+	prop = of_find_property(ep, "data-lanes", &len);
+	if (!prop) {
+		DRM_DEV_DEBUG(dev,
+			"failed to find data lane mapping, using default\n");
+		/* Set the number of date lanes to 4 by default. */
+		msm_host->num_data_lanes = 4;
+		return 0;
+	}
+
+	num_lanes = len / sizeof(u32);
+
+	if (num_lanes < 1 || num_lanes > 4) {
+		DRM_DEV_ERROR(dev, "bad number of data lanes\n");
+		return -EINVAL;
+	}
+
+	msm_host->num_data_lanes = num_lanes;
+
+	ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
+					 num_lanes);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "failed to read lane data\n");
+		return ret;
+	}
+
+	/*
+	 * compare DT specified physical-logical lane mappings with the ones
+	 * supported by hardware
+	 */
+	for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) {
+		const int *swap = supported_data_lane_swaps[i];
+		int j;
+
+		/*
+		 * the data-lanes array we get from DT has a logical->physical
+		 * mapping. The "data lane swap" register field represents
+		 * supported configurations in a physical->logical mapping.
+		 * Translate the DT mapping to what we understand and find a
+		 * configuration that works.
+		 */
+		for (j = 0; j < num_lanes; j++) {
+			if (lane_map[j] < 0 || lane_map[j] > 3)
+				DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
+					lane_map[j]);
+
+			if (swap[lane_map[j]] != j)
+				break;
+		}
+
+		if (j == num_lanes) {
+			msm_host->dlane_swap = i;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
+{
+	struct device *dev = &msm_host->pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct device_node *endpoint, *device_node;
+	int ret = 0;
+
+	/*
+	 * Get the endpoint of the output port of the DSI host. In our case,
+	 * this is mapped to port number with reg = 1. Don't return an error if
+	 * the remote endpoint isn't defined. It's possible that there is
+	 * nothing connected to the dsi output.
+	 */
+	endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
+	if (!endpoint) {
+		DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
+		return 0;
+	}
+
+	ret = dsi_host_parse_lane_data(msm_host, endpoint);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
+			__func__, ret);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* Get panel node from the output port's endpoint data */
+	device_node = of_graph_get_remote_node(np, 1, 0);
+	if (!device_node) {
+		DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
+		ret = -ENODEV;
+		goto err;
+	}
+
+	msm_host->device_node = device_node;
+
+	if (of_property_read_bool(np, "syscon-sfpb")) {
+		msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
+					"syscon-sfpb");
+		if (IS_ERR(msm_host->sfpb)) {
+			DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
+				__func__);
+			ret = PTR_ERR(msm_host->sfpb);
+		}
+	}
+
+	of_node_put(device_node);
+
+err:
+	of_node_put(endpoint);
+
+	return ret;
+}
+
+static int dsi_host_get_id(struct msm_dsi_host *msm_host)
+{
+	struct platform_device *pdev = msm_host->pdev;
+	const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
+	struct resource *res;
+	int i;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
+	if (!res)
+		return -EINVAL;
+
+	for (i = 0; i < cfg->num_dsi; i++) {
+		if (cfg->io_start[i] == res->start)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+int msm_dsi_host_init(struct msm_dsi *msm_dsi)
+{
+	struct msm_dsi_host *msm_host = NULL;
+	struct platform_device *pdev = msm_dsi->pdev;
+	int ret;
+
+	msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
+	if (!msm_host) {
+		pr_err("%s: FAILED: cannot alloc dsi host\n",
+		       __func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	msm_host->pdev = pdev;
+	msm_dsi->host = &msm_host->base;
+
+	ret = dsi_host_parse_dt(msm_host);
+	if (ret) {
+		pr_err("%s: failed to parse dt\n", __func__);
+		goto fail;
+	}
+
+	msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
+	if (IS_ERR(msm_host->ctrl_base)) {
+		pr_err("%s: unable to map Dsi ctrl base\n", __func__);
+		ret = PTR_ERR(msm_host->ctrl_base);
+		goto fail;
+	}
+
+	pm_runtime_enable(&pdev->dev);
+
+	msm_host->cfg_hnd = dsi_get_config(msm_host);
+	if (!msm_host->cfg_hnd) {
+		ret = -EINVAL;
+		pr_err("%s: get config failed\n", __func__);
+		goto fail;
+	}
+
+	msm_host->id = dsi_host_get_id(msm_host);
+	if (msm_host->id < 0) {
+		ret = msm_host->id;
+		pr_err("%s: unable to identify DSI host index\n", __func__);
+		goto fail;
+	}
+
+	/* fixup base address by io offset */
+	msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
+
+	ret = dsi_regulator_init(msm_host);
+	if (ret) {
+		pr_err("%s: regulator init failed\n", __func__);
+		goto fail;
+	}
+
+	ret = dsi_clk_init(msm_host);
+	if (ret) {
+		pr_err("%s: unable to initialize dsi clks\n", __func__);
+		goto fail;
+	}
+
+	msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
+	if (!msm_host->rx_buf) {
+		ret = -ENOMEM;
+		pr_err("%s: alloc rx temp buf failed\n", __func__);
+		goto fail;
+	}
+
+	init_completion(&msm_host->dma_comp);
+	init_completion(&msm_host->video_comp);
+	mutex_init(&msm_host->dev_mutex);
+	mutex_init(&msm_host->cmd_mutex);
+	spin_lock_init(&msm_host->intr_lock);
+
+	/* setup workqueue */
+	msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
+	if (!msm_host->workqueue)
+		return -ENOMEM;
+
+	INIT_WORK(&msm_host->err_work, dsi_err_worker);
+	INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
+
+	msm_dsi->id = msm_host->id;
+
+	DBG("Dsi Host %d initialized", msm_host->id);
+	return 0;
+
+fail:
+	return ret;
+}
+
+void msm_dsi_host_destroy(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	DBG("");
+	dsi_tx_buf_free(msm_host);
+	if (msm_host->workqueue) {
+		flush_workqueue(msm_host->workqueue);
+		destroy_workqueue(msm_host->workqueue);
+		msm_host->workqueue = NULL;
+	}
+
+	mutex_destroy(&msm_host->cmd_mutex);
+	mutex_destroy(&msm_host->dev_mutex);
+
+	pm_runtime_disable(&msm_host->pdev->dev);
+}
+
+int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
+					struct drm_device *dev)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+	struct platform_device *pdev = msm_host->pdev;
+	int ret;
+
+	msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	if (msm_host->irq < 0) {
+		ret = msm_host->irq;
+		DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
+		return ret;
+	}
+
+	ret = devm_request_irq(&pdev->dev, msm_host->irq,
+			dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+			"dsi_isr", msm_host);
+	if (ret < 0) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
+				msm_host->irq, ret);
+		return ret;
+	}
+
+	msm_host->dev = dev;
+	ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
+	if (ret) {
+		pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	int ret;
+
+	/* Register mipi dsi host */
+	if (!msm_host->registered) {
+		host->dev = &msm_host->pdev->dev;
+		host->ops = &dsi_host_ops;
+		ret = mipi_dsi_host_register(host);
+		if (ret)
+			return ret;
+
+		msm_host->registered = true;
+
+		/* If the panel driver has not been probed after host register,
+		 * we should defer the host's probe.
+		 * It makes sure panel is connected when fbcon detects
+		 * connector status and gets the proper display mode to
+		 * create framebuffer.
+		 * Don't try to defer if there is nothing connected to the dsi
+		 * output
+		 */
+		if (check_defer && msm_host->device_node) {
+			if (IS_ERR(of_drm_find_panel(msm_host->device_node)))
+				if (!of_drm_find_bridge(msm_host->device_node))
+					return -EPROBE_DEFER;
+		}
+	}
+
+	return 0;
+}
+
+void msm_dsi_host_unregister(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	if (msm_host->registered) {
+		mipi_dsi_host_unregister(host);
+		host->dev = NULL;
+		host->ops = NULL;
+		msm_host->registered = false;
+	}
+}
+
+int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
+				const struct mipi_dsi_msg *msg)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+
+	/* TODO: make sure dsi_cmd_mdp is idle.
+	 * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
+	 * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
+	 * How to handle the old versions? Wait for mdp cmd done?
+	 */
+
+	/*
+	 * mdss interrupt is generated in mdp core clock domain
+	 * mdp clock need to be enabled to receive dsi interrupt
+	 */
+	pm_runtime_get_sync(&msm_host->pdev->dev);
+	cfg_hnd->ops->link_clk_enable(msm_host);
+
+	/* TODO: vote for bus bandwidth */
+
+	if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
+		dsi_set_tx_power_mode(0, msm_host);
+
+	msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
+	dsi_write(msm_host, REG_DSI_CTRL,
+		msm_host->dma_cmd_ctrl_restore |
+		DSI_CTRL_CMD_MODE_EN |
+		DSI_CTRL_ENABLE);
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
+
+	return 0;
+}
+
+void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
+				const struct mipi_dsi_msg *msg)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
+	dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
+
+	if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
+		dsi_set_tx_power_mode(1, msm_host);
+
+	/* TODO: unvote for bus bandwidth */
+
+	cfg_hnd->ops->link_clk_disable(msm_host);
+	pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+}
+
+int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
+				const struct mipi_dsi_msg *msg)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	return dsi_cmds2buf_tx(msm_host, msg);
+}
+
+int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
+				const struct mipi_dsi_msg *msg)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+	int data_byte, rx_byte, dlen, end;
+	int short_response, diff, pkt_size, ret = 0;
+	char cmd;
+	int rlen = msg->rx_len;
+	u8 *buf;
+
+	if (rlen <= 2) {
+		short_response = 1;
+		pkt_size = rlen;
+		rx_byte = 4;
+	} else {
+		short_response = 0;
+		data_byte = 10;	/* first read */
+		if (rlen < data_byte)
+			pkt_size = rlen;
+		else
+			pkt_size = data_byte;
+		rx_byte = data_byte + 6; /* 4 header + 2 crc */
+	}
+
+	buf = msm_host->rx_buf;
+	end = 0;
+	while (!end) {
+		u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
+		struct mipi_dsi_msg max_pkt_size_msg = {
+			.channel = msg->channel,
+			.type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
+			.tx_len = 2,
+			.tx_buf = tx,
+		};
+
+		DBG("rlen=%d pkt_size=%d rx_byte=%d",
+			rlen, pkt_size, rx_byte);
+
+		ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
+		if (ret < 2) {
+			pr_err("%s: Set max pkt size failed, %d\n",
+				__func__, ret);
+			return -EINVAL;
+		}
+
+		if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+			(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
+			/* Clear the RDBK_DATA registers */
+			dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
+					DSI_RDBK_DATA_CTRL_CLR);
+			wmb(); /* make sure the RDBK registers are cleared */
+			dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
+			wmb(); /* release cleared status before transfer */
+		}
+
+		ret = dsi_cmds2buf_tx(msm_host, msg);
+		if (ret < 0) {
+			pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
+			return ret;
+		} else if (ret < msg->tx_len) {
+			pr_err("%s: Read cmd Tx failed, too short: %d\n", __func__, ret);
+			return -ECOMM;
+		}
+
+		/*
+		 * once cmd_dma_done interrupt received,
+		 * return data from client is ready and stored
+		 * at RDBK_DATA register already
+		 * since rx fifo is 16 bytes, dcs header is kept at first loop,
+		 * after that dcs header lost during shift into registers
+		 */
+		dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
+
+		if (dlen <= 0)
+			return 0;
+
+		if (short_response)
+			break;
+
+		if (rlen <= data_byte) {
+			diff = data_byte - rlen;
+			end = 1;
+		} else {
+			diff = 0;
+			rlen -= data_byte;
+		}
+
+		if (!end) {
+			dlen -= 2; /* 2 crc */
+			dlen -= diff;
+			buf += dlen;	/* next start position */
+			data_byte = 14;	/* NOT first read */
+			if (rlen < data_byte)
+				pkt_size += rlen;
+			else
+				pkt_size += data_byte;
+			DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
+		}
+	}
+
+	/*
+	 * For single Long read, if the requested rlen < 10,
+	 * we need to shift the start position of rx
+	 * data buffer to skip the bytes which are not
+	 * updated.
+	 */
+	if (pkt_size < 10 && !short_response)
+		buf = msm_host->rx_buf + (10 - rlen);
+	else
+		buf = msm_host->rx_buf;
+
+	cmd = buf[0];
+	switch (cmd) {
+	case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+		pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
+		ret = 0;
+		break;
+	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+		ret = dsi_short_read1_resp(buf, msg);
+		break;
+	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+		ret = dsi_short_read2_resp(buf, msg);
+		break;
+	case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+	case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+		ret = dsi_long_read_resp(buf, msg);
+		break;
+	default:
+		pr_warn("%s:Invalid response cmd\n", __func__);
+		ret = 0;
+	}
+
+	return ret;
+}
+
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
+				  u32 len)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
+	dsi_write(msm_host, REG_DSI_DMA_LEN, len);
+	dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
+
+	/* Make sure trigger happens */
+	wmb();
+}
+
+int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
+	struct msm_dsi_pll *src_pll)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	struct clk *byte_clk_provider, *pixel_clk_provider;
+	int ret;
+
+	ret = msm_dsi_pll_get_clk_provider(src_pll,
+				&byte_clk_provider, &pixel_clk_provider);
+	if (ret) {
+		pr_info("%s: can't get provider from pll, don't set parent\n",
+			__func__);
+		return 0;
+	}
+
+	ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
+	if (ret) {
+		pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
+			__func__, ret);
+		goto exit;
+	}
+
+	ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
+	if (ret) {
+		pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
+			__func__, ret);
+		goto exit;
+	}
+
+	if (msm_host->dsi_clk_src) {
+		ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
+		if (ret) {
+			pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
+				__func__, ret);
+			goto exit;
+		}
+	}
+
+	if (msm_host->esc_clk_src) {
+		ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
+		if (ret) {
+			pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
+				__func__, ret);
+			goto exit;
+		}
+	}
+
+exit:
+	return ret;
+}
+
+void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	DBG("");
+	dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
+	/* Make sure fully reset */
+	wmb();
+	udelay(1000);
+	dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
+	udelay(100);
+}
+
+void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
+			struct msm_dsi_phy_clk_request *clk_req,
+			bool is_dual_dsi)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+	int ret;
+
+	ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi);
+	if (ret) {
+		pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+		return;
+	}
+
+	clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
+	clk_req->escclk_rate = msm_host->esc_clk_rate;
+}
+
+int msm_dsi_host_enable(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	dsi_op_mode_config(msm_host,
+		!!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
+
+	/* TODO: clock should be turned off for command mode,
+	 * and only turned on before MDP START.
+	 * This part of code should be enabled once mdp driver support it.
+	 */
+	/* if (msm_panel->mode == MSM_DSI_CMD_MODE) {
+	 *	dsi_link_clk_disable(msm_host);
+	 *	pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+	 * }
+	 */
+	msm_host->enabled = true;
+	return 0;
+}
+
+int msm_dsi_host_disable(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	msm_host->enabled = false;
+	dsi_op_mode_config(msm_host,
+		!!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
+
+	/* Since we have disabled INTF, the video engine won't stop so that
+	 * the cmd engine will be blocked.
+	 * Reset to disable video engine so that we can send off cmd.
+	 */
+	dsi_sw_reset(msm_host);
+
+	return 0;
+}
+
+static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
+{
+	enum sfpb_ahb_arb_master_port_en en;
+
+	if (!msm_host->sfpb)
+		return;
+
+	en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
+
+	regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
+			SFPB_GPREG_MASTER_PORT_EN__MASK,
+			SFPB_GPREG_MASTER_PORT_EN(en));
+}
+
+int msm_dsi_host_power_on(struct mipi_dsi_host *host,
+			struct msm_dsi_phy_shared_timings *phy_shared_timings,
+			bool is_dual_dsi)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+	int ret = 0;
+
+	mutex_lock(&msm_host->dev_mutex);
+	if (msm_host->power_on) {
+		DBG("dsi host already on");
+		goto unlock_ret;
+	}
+
+	msm_dsi_sfpb_config(msm_host, true);
+
+	ret = dsi_host_regulator_enable(msm_host);
+	if (ret) {
+		pr_err("%s:Failed to enable vregs.ret=%d\n",
+			__func__, ret);
+		goto unlock_ret;
+	}
+
+	pm_runtime_get_sync(&msm_host->pdev->dev);
+	ret = cfg_hnd->ops->link_clk_enable(msm_host);
+	if (ret) {
+		pr_err("%s: failed to enable link clocks. ret=%d\n",
+		       __func__, ret);
+		goto fail_disable_reg;
+	}
+
+	ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
+	if (ret) {
+		pr_err("%s: failed to set pinctrl default state, %d\n",
+			__func__, ret);
+		goto fail_disable_clk;
+	}
+
+	dsi_timing_setup(msm_host, is_dual_dsi);
+	dsi_sw_reset(msm_host);
+	dsi_ctrl_config(msm_host, true, phy_shared_timings);
+
+	if (msm_host->disp_en_gpio)
+		gpiod_set_value(msm_host->disp_en_gpio, 1);
+
+	msm_host->power_on = true;
+	mutex_unlock(&msm_host->dev_mutex);
+
+	return 0;
+
+fail_disable_clk:
+	cfg_hnd->ops->link_clk_disable(msm_host);
+	pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+fail_disable_reg:
+	dsi_host_regulator_disable(msm_host);
+unlock_ret:
+	mutex_unlock(&msm_host->dev_mutex);
+	return ret;
+}
+
+int msm_dsi_host_power_off(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+
+	mutex_lock(&msm_host->dev_mutex);
+	if (!msm_host->power_on) {
+		DBG("dsi host already off");
+		goto unlock_ret;
+	}
+
+	dsi_ctrl_config(msm_host, false, NULL);
+
+	if (msm_host->disp_en_gpio)
+		gpiod_set_value(msm_host->disp_en_gpio, 0);
+
+	pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
+
+	cfg_hnd->ops->link_clk_disable(msm_host);
+	pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+
+	dsi_host_regulator_disable(msm_host);
+
+	msm_dsi_sfpb_config(msm_host, false);
+
+	DBG("-");
+
+	msm_host->power_on = false;
+
+unlock_ret:
+	mutex_unlock(&msm_host->dev_mutex);
+	return 0;
+}
+
+int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
+				  const struct drm_display_mode *mode)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	if (msm_host->mode) {
+		drm_mode_destroy(msm_host->dev, msm_host->mode);
+		msm_host->mode = NULL;
+	}
+
+	msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
+	if (!msm_host->mode) {
+		pr_err("%s: cannot duplicate mode\n", __func__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host)
+{
+	return of_drm_find_panel(to_msm_dsi_host(host)->device_node);
+}
+
+unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host)
+{
+	return to_msm_dsi_host(host)->mode_flags;
+}
+
+struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	return of_drm_find_bridge(msm_host->device_node);
+}
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/dsi_manager.c b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi_manager.c
new file mode 100644
index 0000000..f3ff2cd
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_kms.h"
+#include "dsi.h"
+
+#define DSI_CLOCK_MASTER	DSI_0
+#define DSI_CLOCK_SLAVE		DSI_1
+
+#define DSI_LEFT		DSI_0
+#define DSI_RIGHT		DSI_1
+
+/* According to the current drm framework sequence, take the encoder of
+ * DSI_1 as master encoder
+ */
+#define DSI_ENCODER_MASTER	DSI_1
+#define DSI_ENCODER_SLAVE	DSI_0
+
+struct msm_dsi_manager {
+	struct msm_dsi *dsi[DSI_MAX];
+
+	bool is_dual_dsi;
+	bool is_sync_needed;
+	int master_dsi_link_id;
+};
+
+static struct msm_dsi_manager msm_dsim_glb;
+
+#define IS_DUAL_DSI()		(msm_dsim_glb.is_dual_dsi)
+#define IS_SYNC_NEEDED()	(msm_dsim_glb.is_sync_needed)
+#define IS_MASTER_DSI_LINK(id)	(msm_dsim_glb.master_dsi_link_id == id)
+
+static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
+{
+	return msm_dsim_glb.dsi[id];
+}
+
+static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
+{
+	return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
+}
+
+static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id)
+{
+	struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+
+	/* We assume 2 dsi nodes have the same information of dual-dsi and
+	 * sync-mode, and only one node specifies master in case of dual mode.
+	 */
+	if (!msm_dsim->is_dual_dsi)
+		msm_dsim->is_dual_dsi = of_property_read_bool(
+						np, "qcom,dual-dsi-mode");
+
+	if (msm_dsim->is_dual_dsi) {
+		if (of_property_read_bool(np, "qcom,master-dsi"))
+			msm_dsim->master_dsi_link_id = id;
+		if (!msm_dsim->is_sync_needed)
+			msm_dsim->is_sync_needed = of_property_read_bool(
+					np, "qcom,sync-dual-dsi");
+	}
+
+	return 0;
+}
+
+static int dsi_mgr_setup_components(int id)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+	struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+	struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+	struct msm_dsi_pll *src_pll;
+	int ret;
+
+	if (!IS_DUAL_DSI()) {
+		ret = msm_dsi_host_register(msm_dsi->host, true);
+		if (ret)
+			return ret;
+
+		msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
+		src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+		if (IS_ERR(src_pll))
+			return PTR_ERR(src_pll);
+		ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
+	} else if (!other_dsi) {
+		ret = 0;
+	} else {
+		struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
+							msm_dsi : other_dsi;
+		struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
+							other_dsi : msm_dsi;
+		/* Register slave host first, so that slave DSI device
+		 * has a chance to probe, and do not block the master
+		 * DSI device's probe.
+		 * Also, do not check defer for the slave host,
+		 * because only master DSI device adds the panel to global
+		 * panel list. The panel's device is the master DSI device.
+		 */
+		ret = msm_dsi_host_register(slave_link_dsi->host, false);
+		if (ret)
+			return ret;
+		ret = msm_dsi_host_register(master_link_dsi->host, true);
+		if (ret)
+			return ret;
+
+		/* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */
+		msm_dsi_phy_set_usecase(clk_master_dsi->phy,
+					MSM_DSI_PHY_MASTER);
+		msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
+					MSM_DSI_PHY_SLAVE);
+		src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy);
+		if (IS_ERR(src_pll))
+			return PTR_ERR(src_pll);
+		ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
+		if (ret)
+			return ret;
+		ret = msm_dsi_host_set_src_pll(other_dsi->host, src_pll);
+	}
+
+	return ret;
+}
+
+static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
+		      struct msm_dsi_phy_shared_timings *shared_timings)
+{
+	struct msm_dsi_phy_clk_request clk_req;
+	int ret;
+	bool is_dual_dsi = IS_DUAL_DSI();
+
+	msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_dual_dsi);
+
+	ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req);
+	msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
+
+	return ret;
+}
+
+static int
+dsi_mgr_phy_enable(int id,
+		   struct msm_dsi_phy_shared_timings shared_timings[DSI_MAX])
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+	struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+	int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
+	int ret;
+
+	/* In case of dual DSI, some registers in PHY1 have been programmed
+	 * during PLL0 clock's set_rate. The PHY1 reset called by host1 here
+	 * will silently reset those PHY1 registers. Therefore we need to reset
+	 * and enable both PHYs before any PLL clock operation.
+	 */
+	if (IS_DUAL_DSI() && mdsi && sdsi) {
+		if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
+			msm_dsi_host_reset_phy(mdsi->host);
+			msm_dsi_host_reset_phy(sdsi->host);
+
+			ret = enable_phy(mdsi, src_pll_id,
+					 &shared_timings[DSI_CLOCK_MASTER]);
+			if (ret)
+				return ret;
+			ret = enable_phy(sdsi, src_pll_id,
+					 &shared_timings[DSI_CLOCK_SLAVE]);
+			if (ret) {
+				msm_dsi_phy_disable(mdsi->phy);
+				return ret;
+			}
+		}
+	} else {
+		msm_dsi_host_reset_phy(msm_dsi->host);
+		ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
+		if (ret)
+			return ret;
+	}
+
+	msm_dsi->phy_enabled = true;
+
+	return 0;
+}
+
+static void dsi_mgr_phy_disable(int id)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+	struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+
+	/* disable DSI phy
+	 * In dual-dsi configuration, the phy should be disabled for the
+	 * first controller only when the second controller is disabled.
+	 */
+	msm_dsi->phy_enabled = false;
+	if (IS_DUAL_DSI() && mdsi && sdsi) {
+		if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
+			msm_dsi_phy_disable(sdsi->phy);
+			msm_dsi_phy_disable(mdsi->phy);
+		}
+	} else {
+		msm_dsi_phy_disable(msm_dsi->phy);
+	}
+}
+
+struct dsi_connector {
+	struct drm_connector base;
+	int id;
+};
+
+struct dsi_bridge {
+	struct drm_bridge base;
+	int id;
+};
+
+#define to_dsi_connector(x) container_of(x, struct dsi_connector, base)
+#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base)
+
+static inline int dsi_mgr_connector_get_id(struct drm_connector *connector)
+{
+	struct dsi_connector *dsi_connector = to_dsi_connector(connector);
+	return dsi_connector->id;
+}
+
+static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
+{
+	struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge);
+	return dsi_bridge->id;
+}
+
+static bool dsi_mgr_is_cmd_mode(struct msm_dsi *msm_dsi)
+{
+	unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
+	return !(host_flags & MIPI_DSI_MODE_VIDEO);
+}
+
+void msm_dsi_manager_setup_encoder(int id)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_drm_private *priv = msm_dsi->dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi);
+
+	if (encoder && kms->funcs->set_encoder_mode)
+		kms->funcs->set_encoder_mode(kms, encoder,
+					     dsi_mgr_is_cmd_mode(msm_dsi));
+}
+
+static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
+{
+	struct msm_drm_private *priv = conn->dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+	struct msm_dsi *master_dsi, *slave_dsi;
+	struct drm_panel *panel;
+
+	if (IS_DUAL_DSI() && !IS_MASTER_DSI_LINK(id)) {
+		master_dsi = other_dsi;
+		slave_dsi = msm_dsi;
+	} else {
+		master_dsi = msm_dsi;
+		slave_dsi = other_dsi;
+	}
+
+	/*
+	 * There is only 1 panel in the global panel list for dual DSI mode.
+	 * Therefore slave dsi should get the drm_panel instance from master
+	 * dsi.
+	 */
+	panel = msm_dsi_host_get_panel(master_dsi->host);
+	if (IS_ERR(panel)) {
+		DRM_ERROR("Could not find panel for %u (%ld)\n", msm_dsi->id,
+			  PTR_ERR(panel));
+		return PTR_ERR(panel);
+	}
+
+	if (!panel || !IS_DUAL_DSI())
+		goto out;
+
+	drm_object_attach_property(&conn->base,
+				   conn->dev->mode_config.tile_property, 0);
+
+	/*
+	 * Set split display info to kms once dual DSI panel is connected to
+	 * both hosts.
+	 */
+	if (other_dsi && other_dsi->panel && kms->funcs->set_split_display) {
+		kms->funcs->set_split_display(kms, master_dsi->encoder,
+					      slave_dsi->encoder,
+					      dsi_mgr_is_cmd_mode(msm_dsi));
+	}
+
+out:
+	msm_dsi->panel = panel;
+	return 0;
+}
+
+static enum drm_connector_status dsi_mgr_connector_detect(
+		struct drm_connector *connector, bool force)
+{
+	int id = dsi_mgr_connector_get_id(connector);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+
+	return msm_dsi->panel ? connector_status_connected :
+		connector_status_disconnected;
+}
+
+static void dsi_mgr_connector_destroy(struct drm_connector *connector)
+{
+	struct dsi_connector *dsi_connector = to_dsi_connector(connector);
+
+	DBG("");
+
+	drm_connector_cleanup(connector);
+
+	kfree(dsi_connector);
+}
+
+static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
+{
+	int id = dsi_mgr_connector_get_id(connector);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_panel *panel = msm_dsi->panel;
+	int num;
+
+	if (!panel)
+		return 0;
+
+	/*
+	 * In dual DSI mode, we have one connector that can be
+	 * attached to the drm_panel.
+	 */
+	drm_panel_attach(panel, connector);
+	num = drm_panel_get_modes(panel);
+	if (!num)
+		return 0;
+
+	return num;
+}
+
+static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	int id = dsi_mgr_connector_get_id(connector);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi);
+	struct msm_drm_private *priv = connector->dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	long actual, requested;
+
+	DBG("");
+	requested = 1000 * mode->clock;
+	actual = kms->funcs->round_pixclk(kms, requested, encoder);
+
+	DBG("requested=%ld, actual=%ld", requested, actual);
+	if (actual != requested)
+		return MODE_CLOCK_RANGE;
+
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+dsi_mgr_connector_best_encoder(struct drm_connector *connector)
+{
+	int id = dsi_mgr_connector_get_id(connector);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+
+	DBG("");
+	return msm_dsi_get_encoder(msm_dsi);
+}
+
+static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
+{
+	int id = dsi_mgr_bridge_get_id(bridge);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+	struct mipi_dsi_host *host = msm_dsi->host;
+	struct drm_panel *panel = msm_dsi->panel;
+	struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX];
+	bool is_dual_dsi = IS_DUAL_DSI();
+	int ret;
+
+	DBG("id=%d", id);
+	if (!msm_dsi_device_connected(msm_dsi))
+		return;
+
+	ret = dsi_mgr_phy_enable(id, phy_shared_timings);
+	if (ret)
+		goto phy_en_fail;
+
+	/* Do nothing with the host if it is slave-DSI in case of dual DSI */
+	if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+		return;
+
+	ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_dual_dsi);
+	if (ret) {
+		pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
+		goto host_on_fail;
+	}
+
+	if (is_dual_dsi && msm_dsi1) {
+		ret = msm_dsi_host_power_on(msm_dsi1->host,
+				&phy_shared_timings[DSI_1], is_dual_dsi);
+		if (ret) {
+			pr_err("%s: power on host1 failed, %d\n",
+							__func__, ret);
+			goto host1_on_fail;
+		}
+	}
+
+	/* Always call panel functions once, because even for dual panels,
+	 * there is only one drm_panel instance.
+	 */
+	if (panel) {
+		ret = drm_panel_prepare(panel);
+		if (ret) {
+			pr_err("%s: prepare panel %d failed, %d\n", __func__,
+								id, ret);
+			goto panel_prep_fail;
+		}
+	}
+
+	ret = msm_dsi_host_enable(host);
+	if (ret) {
+		pr_err("%s: enable host %d failed, %d\n", __func__, id, ret);
+		goto host_en_fail;
+	}
+
+	if (is_dual_dsi && msm_dsi1) {
+		ret = msm_dsi_host_enable(msm_dsi1->host);
+		if (ret) {
+			pr_err("%s: enable host1 failed, %d\n", __func__, ret);
+			goto host1_en_fail;
+		}
+	}
+
+	if (panel) {
+		ret = drm_panel_enable(panel);
+		if (ret) {
+			pr_err("%s: enable panel %d failed, %d\n", __func__, id,
+									ret);
+			goto panel_en_fail;
+		}
+	}
+
+	return;
+
+panel_en_fail:
+	if (is_dual_dsi && msm_dsi1)
+		msm_dsi_host_disable(msm_dsi1->host);
+host1_en_fail:
+	msm_dsi_host_disable(host);
+host_en_fail:
+	if (panel)
+		drm_panel_unprepare(panel);
+panel_prep_fail:
+	if (is_dual_dsi && msm_dsi1)
+		msm_dsi_host_power_off(msm_dsi1->host);
+host1_on_fail:
+	msm_dsi_host_power_off(host);
+host_on_fail:
+	dsi_mgr_phy_disable(id);
+phy_en_fail:
+	return;
+}
+
+static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
+{
+	DBG("");
+}
+
+static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
+{
+	DBG("");
+}
+
+static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
+{
+	int id = dsi_mgr_bridge_get_id(bridge);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+	struct mipi_dsi_host *host = msm_dsi->host;
+	struct drm_panel *panel = msm_dsi->panel;
+	struct msm_dsi_pll *src_pll;
+	bool is_dual_dsi = IS_DUAL_DSI();
+	int ret;
+
+	DBG("id=%d", id);
+
+	if (!msm_dsi_device_connected(msm_dsi))
+		return;
+
+	/*
+	 * Do nothing with the host if it is slave-DSI in case of dual DSI.
+	 * It is safe to call dsi_mgr_phy_disable() here because a single PHY
+	 * won't be diabled until both PHYs request disable.
+	 */
+	if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+		goto disable_phy;
+
+	if (panel) {
+		ret = drm_panel_disable(panel);
+		if (ret)
+			pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
+									ret);
+	}
+
+	ret = msm_dsi_host_disable(host);
+	if (ret)
+		pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
+
+	if (is_dual_dsi && msm_dsi1) {
+		ret = msm_dsi_host_disable(msm_dsi1->host);
+		if (ret)
+			pr_err("%s: host1 disable failed, %d\n", __func__, ret);
+	}
+
+	if (panel) {
+		ret = drm_panel_unprepare(panel);
+		if (ret)
+			pr_err("%s: Panel %d unprepare failed,%d\n", __func__,
+								id, ret);
+	}
+
+	/* Save PLL status if it is a clock source */
+	src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+	msm_dsi_pll_save_state(src_pll);
+
+	ret = msm_dsi_host_power_off(host);
+	if (ret)
+		pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
+
+	if (is_dual_dsi && msm_dsi1) {
+		ret = msm_dsi_host_power_off(msm_dsi1->host);
+		if (ret)
+			pr_err("%s: host1 power off failed, %d\n",
+								__func__, ret);
+	}
+
+disable_phy:
+	dsi_mgr_phy_disable(id);
+}
+
+static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
+		const struct drm_display_mode *mode,
+		const struct drm_display_mode *adjusted_mode)
+{
+	int id = dsi_mgr_bridge_get_id(bridge);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+	struct mipi_dsi_host *host = msm_dsi->host;
+	bool is_dual_dsi = IS_DUAL_DSI();
+
+	DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
+
+	if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+		return;
+
+	msm_dsi_host_set_display_mode(host, adjusted_mode);
+	if (is_dual_dsi && other_dsi)
+		msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
+}
+
+static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
+	.detect = dsi_mgr_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = dsi_mgr_connector_destroy,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = {
+	.get_modes = dsi_mgr_connector_get_modes,
+	.mode_valid = dsi_mgr_connector_mode_valid,
+	.best_encoder = dsi_mgr_connector_best_encoder,
+};
+
+static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
+	.pre_enable = dsi_mgr_bridge_pre_enable,
+	.enable = dsi_mgr_bridge_enable,
+	.disable = dsi_mgr_bridge_disable,
+	.post_disable = dsi_mgr_bridge_post_disable,
+	.mode_set = dsi_mgr_bridge_mode_set,
+};
+
+/* initialize connector when we're connected to a drm_panel */
+struct drm_connector *msm_dsi_manager_connector_init(u8 id)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_connector *connector = NULL;
+	struct dsi_connector *dsi_connector;
+	int ret;
+
+	dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL);
+	if (!dsi_connector)
+		return ERR_PTR(-ENOMEM);
+
+	dsi_connector->id = id;
+
+	connector = &dsi_connector->base;
+
+	ret = drm_connector_init(msm_dsi->dev, connector,
+			&dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
+	if (ret)
+		return ERR_PTR(ret);
+
+	drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
+
+	/* Enable HPD to let hpd event is handled
+	 * when panel is attached to the host.
+	 */
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+	/* Display driver doesn't support interlace now. */
+	connector->interlace_allowed = 0;
+	connector->doublescan_allowed = 0;
+
+	drm_connector_attach_encoder(connector, msm_dsi->encoder);
+
+	ret = msm_dsi_manager_panel_init(connector, id);
+	if (ret) {
+		DRM_DEV_ERROR(msm_dsi->dev->dev, "init panel failed %d\n", ret);
+		goto fail;
+	}
+
+	return connector;
+
+fail:
+	connector->funcs->destroy(connector);
+	return ERR_PTR(ret);
+}
+
+bool msm_dsi_manager_validate_current_config(u8 id)
+{
+	bool is_dual_dsi = IS_DUAL_DSI();
+
+	/*
+	 * For dual DSI, we only have one drm panel. For this
+	 * use case, we register only one bridge/connector.
+	 * Skip bridge/connector initialisation if it is
+	 * slave-DSI for dual DSI configuration.
+	 */
+	if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) {
+		DBG("Skip bridge registration for slave DSI->id: %d\n", id);
+		return false;
+	}
+	return true;
+}
+
+/* initialize bridge */
+struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_bridge *bridge = NULL;
+	struct dsi_bridge *dsi_bridge;
+	struct drm_encoder *encoder;
+	int ret;
+
+	dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
+				sizeof(*dsi_bridge), GFP_KERNEL);
+	if (!dsi_bridge) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	dsi_bridge->id = id;
+
+	encoder = msm_dsi->encoder;
+
+	bridge = &dsi_bridge->base;
+	bridge->funcs = &dsi_mgr_bridge_funcs;
+
+	ret = drm_bridge_attach(encoder, bridge, NULL);
+	if (ret)
+		goto fail;
+
+	return bridge;
+
+fail:
+	if (bridge)
+		msm_dsi_manager_bridge_destroy(bridge);
+
+	return ERR_PTR(ret);
+}
+
+struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_device *dev = msm_dsi->dev;
+	struct drm_encoder *encoder;
+	struct drm_bridge *int_bridge, *ext_bridge;
+	struct drm_connector *connector;
+	struct list_head *connector_list;
+
+	int_bridge = msm_dsi->bridge;
+	ext_bridge = msm_dsi->external_bridge =
+			msm_dsi_host_get_bridge(msm_dsi->host);
+
+	encoder = msm_dsi->encoder;
+
+	/* link the internal dsi bridge to the external bridge */
+	drm_bridge_attach(encoder, ext_bridge, int_bridge);
+
+	/*
+	 * we need the drm_connector created by the external bridge
+	 * driver (or someone else) to feed it to our driver's
+	 * priv->connector[] list, mainly for msm_fbdev_init()
+	 */
+	connector_list = &dev->mode_config.connector_list;
+
+	list_for_each_entry(connector, connector_list, head) {
+		if (drm_connector_has_possible_encoder(connector, encoder))
+			return connector;
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
+void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
+{
+}
+
+int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
+	struct mipi_dsi_host *host = msm_dsi->host;
+	bool is_read = (msg->rx_buf && msg->rx_len);
+	bool need_sync = (IS_SYNC_NEEDED() && !is_read);
+	int ret;
+
+	if (!msg->tx_buf || !msg->tx_len)
+		return 0;
+
+	/* In dual master case, panel requires the same commands sent to
+	 * both DSI links. Host issues the command trigger to both links
+	 * when DSI_1 calls the cmd transfer function, no matter it happens
+	 * before or after DSI_0 cmd transfer.
+	 */
+	if (need_sync && (id == DSI_0))
+		return is_read ? msg->rx_len : msg->tx_len;
+
+	if (need_sync && msm_dsi0) {
+		ret = msm_dsi_host_xfer_prepare(msm_dsi0->host, msg);
+		if (ret) {
+			pr_err("%s: failed to prepare non-trigger host, %d\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+	ret = msm_dsi_host_xfer_prepare(host, msg);
+	if (ret) {
+		pr_err("%s: failed to prepare host, %d\n", __func__, ret);
+		goto restore_host0;
+	}
+
+	ret = is_read ? msm_dsi_host_cmd_rx(host, msg) :
+			msm_dsi_host_cmd_tx(host, msg);
+
+	msm_dsi_host_xfer_restore(host, msg);
+
+restore_host0:
+	if (need_sync && msm_dsi0)
+		msm_dsi_host_xfer_restore(msm_dsi0->host, msg);
+
+	return ret;
+}
+
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
+	struct mipi_dsi_host *host = msm_dsi->host;
+
+	if (IS_SYNC_NEEDED() && (id == DSI_0))
+		return false;
+
+	if (IS_SYNC_NEEDED() && msm_dsi0)
+		msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, dma_base, len);
+
+	msm_dsi_host_cmd_xfer_commit(host, dma_base, len);
+
+	return true;
+}
+
+int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
+{
+	struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+	int id = msm_dsi->id;
+	int ret;
+
+	if (id >= DSI_MAX) {
+		pr_err("%s: invalid id %d\n", __func__, id);
+		return -EINVAL;
+	}
+
+	if (msm_dsim->dsi[id]) {
+		pr_err("%s: dsi%d already registered\n", __func__, id);
+		return -EBUSY;
+	}
+
+	msm_dsim->dsi[id] = msm_dsi;
+
+	ret = dsi_mgr_parse_dual_dsi(msm_dsi->pdev->dev.of_node, id);
+	if (ret) {
+		pr_err("%s: failed to parse dual DSI info\n", __func__);
+		goto fail;
+	}
+
+	ret = dsi_mgr_setup_components(id);
+	if (ret) {
+		pr_err("%s: failed to register mipi dsi host for DSI %d\n",
+			__func__, id);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	msm_dsim->dsi[id] = NULL;
+	return ret;
+}
+
+void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
+{
+	struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+
+	if (msm_dsi->host)
+		msm_dsi_host_unregister(msm_dsi->host);
+
+	if (msm_dsi->id >= 0)
+		msm_dsim->dsi[msm_dsi->id] = NULL;
+}
+
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/marvell/linux/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
new file mode 100644
index 0000000..8742653
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -0,0 +1,124 @@
+#ifndef MMSS_CC_XML
+#define MMSS_CC_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mmss_cc_clk {
+	CLK = 0,
+	PCLK = 1,
+};
+
+#define REG_MMSS_CC_AHB						0x00000008
+
+static inline uint32_t __offset_CLK(enum mmss_cc_clk idx)
+{
+	switch (idx) {
+		case CLK: return 0x0000004c;
+		case PCLK: return 0x00000130;
+		default: return INVALID_IDX(idx);
+	}
+}
+static inline uint32_t REG_MMSS_CC_CLK(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
+
+static inline uint32_t REG_MMSS_CC_CLK_CC(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_CC_CLK_EN					0x00000001
+#define MMSS_CC_CLK_CC_ROOT_EN					0x00000004
+#define MMSS_CC_CLK_CC_MND_EN					0x00000020
+#define MMSS_CC_CLK_CC_MND_MODE__MASK				0x000000c0
+#define MMSS_CC_CLK_CC_MND_MODE__SHIFT				6
+static inline uint32_t MMSS_CC_CLK_CC_MND_MODE(uint32_t val)
+{
+	return ((val) << MMSS_CC_CLK_CC_MND_MODE__SHIFT) & MMSS_CC_CLK_CC_MND_MODE__MASK;
+}
+#define MMSS_CC_CLK_CC_PMXO_SEL__MASK				0x00000300
+#define MMSS_CC_CLK_CC_PMXO_SEL__SHIFT				8
+static inline uint32_t MMSS_CC_CLK_CC_PMXO_SEL(uint32_t val)
+{
+	return ((val) << MMSS_CC_CLK_CC_PMXO_SEL__SHIFT) & MMSS_CC_CLK_CC_PMXO_SEL__MASK;
+}
+
+static inline uint32_t REG_MMSS_CC_CLK_MD(enum mmss_cc_clk i0) { return 0x00000004 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_MD_D__MASK					0x000000ff
+#define MMSS_CC_CLK_MD_D__SHIFT					0
+static inline uint32_t MMSS_CC_CLK_MD_D(uint32_t val)
+{
+	return ((val) << MMSS_CC_CLK_MD_D__SHIFT) & MMSS_CC_CLK_MD_D__MASK;
+}
+#define MMSS_CC_CLK_MD_M__MASK					0x0000ff00
+#define MMSS_CC_CLK_MD_M__SHIFT					8
+static inline uint32_t MMSS_CC_CLK_MD_M(uint32_t val)
+{
+	return ((val) << MMSS_CC_CLK_MD_M__SHIFT) & MMSS_CC_CLK_MD_M__MASK;
+}
+
+static inline uint32_t REG_MMSS_CC_CLK_NS(enum mmss_cc_clk i0) { return 0x00000008 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_NS_SRC__MASK				0x0000000f
+#define MMSS_CC_CLK_NS_SRC__SHIFT				0
+static inline uint32_t MMSS_CC_CLK_NS_SRC(uint32_t val)
+{
+	return ((val) << MMSS_CC_CLK_NS_SRC__SHIFT) & MMSS_CC_CLK_NS_SRC__MASK;
+}
+#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK			0x00fff000
+#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT			12
+static inline uint32_t MMSS_CC_CLK_NS_PRE_DIV_FUNC(uint32_t val)
+{
+	return ((val) << MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT) & MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK;
+}
+#define MMSS_CC_CLK_NS_VAL__MASK				0xff000000
+#define MMSS_CC_CLK_NS_VAL__SHIFT				24
+static inline uint32_t MMSS_CC_CLK_NS_VAL(uint32_t val)
+{
+	return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK;
+}
+
+#define REG_MMSS_CC_DSI2_PIXEL_CC				0x00000094
+
+#define REG_MMSS_CC_DSI2_PIXEL_NS				0x000000e4
+
+#define REG_MMSS_CC_DSI2_PIXEL_CC2				0x00000264
+
+
+#endif /* MMSS_CC_XML */
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
new file mode 100644
index 0000000..925262e
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -0,0 +1,761 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+
+#include "dsi_phy.h"
+
+#define S_DIV_ROUND_UP(n, d)	\
+	(((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
+
+static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
+				s32 min_result, bool even)
+{
+	s32 v;
+
+	v = (tmax - tmin) * percent;
+	v = S_DIV_ROUND_UP(v, 100) + tmin;
+	if (even && (v & 0x1))
+		return max_t(s32, min_result, v - 1);
+	else
+		return max_t(s32, min_result, v);
+}
+
+static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
+					s32 ui, s32 coeff, s32 pcnt)
+{
+	s32 tmax, tmin, clk_z;
+	s32 temp;
+
+	/* reset */
+	temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+	if (tmin > 255) {
+		tmax = 511;
+		clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
+	} else {
+		tmax = 255;
+		clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
+	}
+
+	/* adjust */
+	temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
+	timing->clk_zero = clk_z + 8 - temp;
+}
+
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
+			     struct msm_dsi_phy_clk_request *clk_req)
+{
+	const unsigned long bit_rate = clk_req->bitclk_rate;
+	const unsigned long esc_rate = clk_req->escclk_rate;
+	s32 ui, lpx;
+	s32 tmax, tmin;
+	s32 pcnt0 = 10;
+	s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
+	s32 pcnt2 = 10;
+	s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
+	s32 coeff = 1000; /* Precision, should avoid overflow */
+	s32 temp;
+
+	if (!bit_rate || !esc_rate)
+		return -EINVAL;
+
+	ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+	lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+	tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
+	tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
+	timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
+
+	temp = lpx / ui;
+	if (temp & 0x1)
+		timing->hs_rqst = temp;
+	else
+		timing->hs_rqst = max_t(s32, 0, temp - 2);
+
+	/* Calculate clk_zero after clk_prepare and hs_rqst */
+	dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
+
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+	tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
+	timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+	temp = 85 * coeff + 6 * ui;
+	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+	temp = 40 * coeff + 4 * ui;
+	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+	timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
+
+	tmax = 255;
+	temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
+	temp = 145 * coeff + 10 * ui - temp;
+	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+	timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
+
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+	temp = 60 * coeff + 4 * ui;
+	tmin = DIV_ROUND_UP(temp, ui) - 2;
+	timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+	tmax = 255;
+	tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
+	timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
+
+	tmax = 63;
+	temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
+	temp = 60 * coeff + 52 * ui - 24 * ui - temp;
+	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+	timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0,
+						       false);
+	tmax = 63;
+	temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+	temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
+	temp += 8 * ui + lpx;
+	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+	if (tmin > tmax) {
+		temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
+		timing->shared_timings.clk_pre = temp >> 1;
+		timing->shared_timings.clk_pre_inc_by_2 = true;
+	} else {
+		timing->shared_timings.clk_pre =
+				linear_inter(tmax, tmin, pcnt2, 0, false);
+		timing->shared_timings.clk_pre_inc_by_2 = false;
+	}
+
+	timing->ta_go = 3;
+	timing->ta_sure = 0;
+	timing->ta_get = 4;
+
+	DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+		timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+		timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+		timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+		timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+		timing->hs_rqst);
+
+	return 0;
+}
+
+int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
+				struct msm_dsi_phy_clk_request *clk_req)
+{
+	const unsigned long bit_rate = clk_req->bitclk_rate;
+	const unsigned long esc_rate = clk_req->escclk_rate;
+	s32 ui, ui_x8, lpx;
+	s32 tmax, tmin;
+	s32 pcnt0 = 50;
+	s32 pcnt1 = 50;
+	s32 pcnt2 = 10;
+	s32 pcnt3 = 30;
+	s32 pcnt4 = 10;
+	s32 pcnt5 = 2;
+	s32 coeff = 1000; /* Precision, should avoid overflow */
+	s32 hb_en, hb_en_ckln, pd_ckln, pd;
+	s32 val, val_ckln;
+	s32 temp;
+
+	if (!bit_rate || !esc_rate)
+		return -EINVAL;
+
+	timing->hs_halfbyte_en = 0;
+	hb_en = 0;
+	timing->hs_halfbyte_en_ckln = 0;
+	hb_en_ckln = 0;
+	timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3;
+	pd_ckln = timing->hs_prep_dly_ckln;
+	timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1;
+	pd = timing->hs_prep_dly;
+
+	val = (hb_en << 2) + (pd << 1);
+	val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1);
+
+	ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+	ui_x8 = ui << 3;
+	lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+	temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
+	tmin = max_t(s32, temp, 0);
+	temp = (95 * coeff - val_ckln * ui) / ui_x8;
+	tmax = max_t(s32, temp, 0);
+	timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
+
+	temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui;
+	tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
+	tmax = (tmin > 255) ? 511 : 255;
+	timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
+
+	tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = (temp + 3 * ui) / ui_x8;
+	timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+	temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8);
+	tmin = max_t(s32, temp, 0);
+	temp = (85 * coeff + 6 * ui - val * ui) / ui_x8;
+	tmax = max_t(s32, temp, 0);
+	timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
+
+	temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui;
+	tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
+	tmax = 255;
+	timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
+
+	tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8);
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = (temp + 3 * ui) / ui_x8;
+	timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+	temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+	timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+	tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+	tmax = 255;
+	timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+	temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
+	timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
+
+	temp = 60 * coeff + 52 * ui - 43 * ui;
+	tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
+	tmax = 63;
+	timing->shared_timings.clk_post =
+				linear_inter(tmax, tmin, pcnt2, 0, false);
+
+	temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui;
+	temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui;
+	temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
+				(((timing->hs_rqst_ckln << 3) + 8) * ui);
+	tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+	tmax = 63;
+	if (tmin > tmax) {
+		temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
+		timing->shared_timings.clk_pre = temp >> 1;
+		timing->shared_timings.clk_pre_inc_by_2 = 1;
+	} else {
+		timing->shared_timings.clk_pre =
+				linear_inter(tmax, tmin, pcnt2, 0, false);
+		timing->shared_timings.clk_pre_inc_by_2 = 0;
+	}
+
+	timing->ta_go = 3;
+	timing->ta_sure = 0;
+	timing->ta_get = 4;
+
+	DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+	    timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+	    timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+	    timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+	    timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+	    timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
+	    timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
+	    timing->hs_prep_dly_ckln);
+
+	return 0;
+}
+
+int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+	struct msm_dsi_phy_clk_request *clk_req)
+{
+	const unsigned long bit_rate = clk_req->bitclk_rate;
+	const unsigned long esc_rate = clk_req->escclk_rate;
+	s32 ui, ui_x8, lpx;
+	s32 tmax, tmin;
+	s32 pcnt0 = 50;
+	s32 pcnt1 = 50;
+	s32 pcnt2 = 10;
+	s32 pcnt3 = 30;
+	s32 pcnt4 = 10;
+	s32 pcnt5 = 2;
+	s32 coeff = 1000; /* Precision, should avoid overflow */
+	s32 hb_en, hb_en_ckln;
+	s32 temp;
+
+	if (!bit_rate || !esc_rate)
+		return -EINVAL;
+
+	timing->hs_halfbyte_en = 0;
+	hb_en = 0;
+	timing->hs_halfbyte_en_ckln = 0;
+	hb_en_ckln = 0;
+
+	ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+	ui_x8 = ui << 3;
+	lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+	temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
+	tmin = max_t(s32, temp, 0);
+	temp = (95 * coeff) / ui_x8;
+	tmax = max_t(s32, temp, 0);
+	timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
+
+	temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
+	tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+	tmax = (tmin > 255) ? 511 : 255;
+	timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
+
+	tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = (temp + 3 * ui) / ui_x8;
+	timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+	temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
+	tmin = max_t(s32, temp, 0);
+	temp = (85 * coeff + 6 * ui) / ui_x8;
+	tmax = max_t(s32, temp, 0);
+	timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
+
+	temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
+	tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+	tmax = 255;
+	timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
+
+	tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = (temp / ui_x8) - 1;
+	timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+	temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+	timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+	tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+	tmax = 255;
+	timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+	temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
+	timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
+
+	temp = 60 * coeff + 52 * ui - 43 * ui;
+	tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
+	tmax = 63;
+	timing->shared_timings.clk_post =
+		linear_inter(tmax, tmin, pcnt2, 0, false);
+
+	temp = 8 * ui + (timing->clk_prepare << 3) * ui;
+	temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
+	temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
+		(((timing->hs_rqst_ckln << 3) + 8) * ui);
+	tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+	tmax = 63;
+	if (tmin > tmax) {
+		temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
+		timing->shared_timings.clk_pre = temp >> 1;
+		timing->shared_timings.clk_pre_inc_by_2 = 1;
+	} else {
+		timing->shared_timings.clk_pre =
+			linear_inter(tmax, tmin, pcnt2, 0, false);
+		timing->shared_timings.clk_pre_inc_by_2 = 0;
+	}
+
+	timing->ta_go = 3;
+	timing->ta_sure = 0;
+	timing->ta_get = 4;
+
+	DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+		timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+		timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+		timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+		timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+		timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
+		timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
+		timing->hs_prep_dly_ckln);
+
+	return 0;
+}
+
+void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
+				u32 bit_mask)
+{
+	int phy_id = phy->id;
+	u32 val;
+
+	if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
+		return;
+
+	val = dsi_phy_read(phy->base + reg);
+
+	if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
+		dsi_phy_write(phy->base + reg, val | bit_mask);
+	else
+		dsi_phy_write(phy->base + reg, val & (~bit_mask));
+}
+
+static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
+{
+	struct regulator_bulk_data *s = phy->supplies;
+	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+	struct device *dev = &phy->pdev->dev;
+	int num = phy->cfg->reg_cfg.num;
+	int i, ret;
+
+	for (i = 0; i < num; i++)
+		s[i].supply = regs[i].name;
+
+	ret = devm_regulator_bulk_get(dev, num, s);
+	if (ret < 0) {
+		if (ret != -EPROBE_DEFER) {
+			DRM_DEV_ERROR(dev,
+				      "%s: failed to init regulator, ret=%d\n",
+				      __func__, ret);
+		}
+
+		return ret;
+	}
+
+	return 0;
+}
+
+static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
+{
+	struct regulator_bulk_data *s = phy->supplies;
+	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+	int num = phy->cfg->reg_cfg.num;
+	int i;
+
+	DBG("");
+	for (i = num - 1; i >= 0; i--)
+		if (regs[i].disable_load >= 0)
+			regulator_set_load(s[i].consumer, regs[i].disable_load);
+
+	regulator_bulk_disable(num, s);
+}
+
+static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
+{
+	struct regulator_bulk_data *s = phy->supplies;
+	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+	struct device *dev = &phy->pdev->dev;
+	int num = phy->cfg->reg_cfg.num;
+	int ret, i;
+
+	DBG("");
+	for (i = 0; i < num; i++) {
+		if (regs[i].enable_load >= 0) {
+			ret = regulator_set_load(s[i].consumer,
+							regs[i].enable_load);
+			if (ret < 0) {
+				DRM_DEV_ERROR(dev,
+					"regulator %d set op mode failed, %d\n",
+					i, ret);
+				goto fail;
+			}
+		}
+	}
+
+	ret = regulator_bulk_enable(num, s);
+	if (ret < 0) {
+		DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	for (i--; i >= 0; i--)
+		regulator_set_load(s[i].consumer, regs[i].disable_load);
+	return ret;
+}
+
+static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
+{
+	struct device *dev = &phy->pdev->dev;
+	int ret;
+
+	ret = pm_runtime_resume_and_get(dev);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(phy->ahb_clk);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+		pm_runtime_put_sync(dev);
+	}
+
+	return ret;
+}
+
+static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
+{
+	clk_disable_unprepare(phy->ahb_clk);
+	pm_runtime_put_autosuspend(&phy->pdev->dev);
+}
+
+static const struct of_device_id dsi_phy_dt_match[] = {
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
+	{ .compatible = "qcom,dsi-phy-28nm-hpm",
+	  .data = &dsi_phy_28nm_hpm_cfgs },
+	{ .compatible = "qcom,dsi-phy-28nm-lp",
+	  .data = &dsi_phy_28nm_lp_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
+	{ .compatible = "qcom,dsi-phy-20nm",
+	  .data = &dsi_phy_20nm_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
+	{ .compatible = "qcom,dsi-phy-28nm-8960",
+	  .data = &dsi_phy_28nm_8960_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
+	{ .compatible = "qcom,dsi-phy-14nm",
+	  .data = &dsi_phy_14nm_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
+	{ .compatible = "qcom,dsi-phy-10nm",
+	  .data = &dsi_phy_10nm_cfgs },
+	{ .compatible = "qcom,dsi-phy-10nm-8998",
+	  .data = &dsi_phy_10nm_8998_cfgs },
+#endif
+	{}
+};
+
+/*
+ * Currently, we only support one SoC for each PHY type. When we have multiple
+ * SoCs for the same PHY, we can try to make the index searching a bit more
+ * clever.
+ */
+static int dsi_phy_get_id(struct msm_dsi_phy *phy)
+{
+	struct platform_device *pdev = phy->pdev;
+	const struct msm_dsi_phy_cfg *cfg = phy->cfg;
+	struct resource *res;
+	int i;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
+	if (!res)
+		return -EINVAL;
+
+	for (i = 0; i < cfg->num_dsi_phy; i++) {
+		if (cfg->io_start[i] == res->start)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
+{
+	struct platform_device *pdev = phy->pdev;
+	int ret = 0;
+
+	phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
+				"DSI_PHY_REG");
+	if (IS_ERR(phy->reg_base)) {
+		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n",
+			__func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+fail:
+	return ret;
+}
+
+static int dsi_phy_driver_probe(struct platform_device *pdev)
+{
+	struct msm_dsi_phy *phy;
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *match;
+	int ret;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	match = of_match_node(dsi_phy_dt_match, dev->of_node);
+	if (!match)
+		return -ENODEV;
+
+	phy->cfg = match->data;
+	phy->pdev = pdev;
+
+	phy->id = dsi_phy_get_id(phy);
+	if (phy->id < 0) {
+		ret = phy->id;
+		DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
+			__func__, ret);
+		goto fail;
+	}
+
+	phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
+				"qcom,dsi-phy-regulator-ldo-mode");
+
+	phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+	if (IS_ERR(phy->base)) {
+		DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = dsi_phy_regulator_init(phy);
+	if (ret)
+		goto fail;
+
+	phy->ahb_clk = msm_clk_get(pdev, "iface");
+	if (IS_ERR(phy->ahb_clk)) {
+		DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
+		ret = PTR_ERR(phy->ahb_clk);
+		goto fail;
+	}
+
+	if (phy->cfg->ops.init) {
+		ret = phy->cfg->ops.init(phy);
+		if (ret)
+			goto fail;
+	}
+
+	ret = devm_pm_runtime_enable(&pdev->dev);
+	if (ret)
+		return ret;
+
+	/* PLL init will call into clk_register which requires
+	 * register access, so we need to enable power and ahb clock.
+	 */
+	ret = dsi_phy_enable_resource(phy);
+	if (ret)
+		goto fail;
+
+	phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
+	if (IS_ERR_OR_NULL(phy->pll)) {
+		DRM_DEV_INFO(dev,
+			"%s: pll init failed: %ld, need separate pll clk driver\n",
+			__func__, PTR_ERR(phy->pll));
+		phy->pll = NULL;
+	}
+
+	dsi_phy_disable_resource(phy);
+
+	platform_set_drvdata(pdev, phy);
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int dsi_phy_driver_remove(struct platform_device *pdev)
+{
+	struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
+
+	if (phy && phy->pll) {
+		msm_dsi_pll_destroy(phy->pll);
+		phy->pll = NULL;
+	}
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver dsi_phy_platform_driver = {
+	.probe      = dsi_phy_driver_probe,
+	.remove     = dsi_phy_driver_remove,
+	.driver     = {
+		.name   = "msm_dsi_phy",
+		.of_match_table = dsi_phy_dt_match,
+	},
+};
+
+void __init msm_dsi_phy_driver_register(void)
+{
+	platform_driver_register(&dsi_phy_platform_driver);
+}
+
+void __exit msm_dsi_phy_driver_unregister(void)
+{
+	platform_driver_unregister(&dsi_phy_platform_driver);
+}
+
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+			struct msm_dsi_phy_clk_request *clk_req)
+{
+	struct device *dev;
+	int ret;
+
+	if (!phy || !phy->cfg->ops.enable)
+		return -EINVAL;
+
+	dev = &phy->pdev->dev;
+
+	ret = dsi_phy_enable_resource(phy);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
+			__func__, ret);
+		goto res_en_fail;
+	}
+
+	ret = dsi_phy_regulator_enable(phy);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
+			__func__, ret);
+		goto reg_en_fail;
+	}
+
+	ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
+		goto phy_en_fail;
+	}
+
+	/*
+	 * Resetting DSI PHY silently changes its PLL registers to reset status,
+	 * which will confuse clock driver and result in wrong output rate of
+	 * link clocks. Restore PLL status if its PLL is being used as clock
+	 * source.
+	 */
+	if (phy->usecase != MSM_DSI_PHY_SLAVE) {
+		ret = msm_dsi_pll_restore_state(phy->pll);
+		if (ret) {
+			DRM_DEV_ERROR(dev, "%s: failed to restore pll state, %d\n",
+				__func__, ret);
+			goto pll_restor_fail;
+		}
+	}
+
+	return 0;
+
+pll_restor_fail:
+	if (phy->cfg->ops.disable)
+		phy->cfg->ops.disable(phy);
+phy_en_fail:
+	dsi_phy_regulator_disable(phy);
+reg_en_fail:
+	dsi_phy_disable_resource(phy);
+res_en_fail:
+	return ret;
+}
+
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+{
+	if (!phy || !phy->cfg->ops.disable)
+		return;
+
+	phy->cfg->ops.disable(phy);
+
+	dsi_phy_regulator_disable(phy);
+	dsi_phy_disable_resource(phy);
+}
+
+void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
+			struct msm_dsi_phy_shared_timings *shared_timings)
+{
+	memcpy(shared_timings, &phy->timing.shared_timings,
+	       sizeof(*shared_timings));
+}
+
+struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
+{
+	if (!phy)
+		return NULL;
+
+	return phy->pll;
+}
+
+void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
+			     enum msm_dsi_phy_usecase uc)
+{
+	if (phy)
+		phy->usecase = uc;
+}
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
new file mode 100644
index 0000000..c4069ce
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DSI_PHY_H__
+#define __DSI_PHY_H__
+
+#include <linux/regulator/consumer.h>
+
+#include "dsi.h"
+
+#define dsi_phy_read(offset) msm_readl((offset))
+#define dsi_phy_write(offset, data) msm_writel((data), (offset))
+
+/* v3.0.0 10nm implementation that requires the old timings settings */
+#define V3_0_0_10NM_OLD_TIMINGS_QUIRK	BIT(0)
+
+struct msm_dsi_phy_ops {
+	int (*init) (struct msm_dsi_phy *phy);
+	int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
+			struct msm_dsi_phy_clk_request *clk_req);
+	void (*disable)(struct msm_dsi_phy *phy);
+};
+
+struct msm_dsi_phy_cfg {
+	enum msm_dsi_phy_type type;
+	struct dsi_reg_config reg_cfg;
+	struct msm_dsi_phy_ops ops;
+
+	/*
+	 * Each cell {phy_id, pll_id} of the truth table indicates
+	 * if the source PLL selection bit should be set for each PHY.
+	 * Fill default H/W values in illegal cells, eg. cell {0, 1}.
+	 */
+	bool src_pll_truthtable[DSI_MAX][DSI_MAX];
+	const resource_size_t io_start[DSI_MAX];
+	const int num_dsi_phy;
+	const int quirks;
+};
+
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
+
+struct msm_dsi_dphy_timing {
+	u32 clk_pre;
+	u32 clk_post;
+	u32 clk_zero;
+	u32 clk_trail;
+	u32 clk_prepare;
+	u32 hs_exit;
+	u32 hs_zero;
+	u32 hs_prepare;
+	u32 hs_trail;
+	u32 hs_rqst;
+	u32 ta_go;
+	u32 ta_sure;
+	u32 ta_get;
+
+	struct msm_dsi_phy_shared_timings shared_timings;
+
+	/* For PHY v2 only */
+	u32 hs_rqst_ckln;
+	u32 hs_prep_dly;
+	u32 hs_prep_dly_ckln;
+	u8 hs_halfbyte_en;
+	u8 hs_halfbyte_en_ckln;
+};
+
+struct msm_dsi_phy {
+	struct platform_device *pdev;
+	void __iomem *base;
+	void __iomem *reg_base;
+	void __iomem *lane_base;
+	int id;
+
+	struct clk *ahb_clk;
+	struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
+
+	struct msm_dsi_dphy_timing timing;
+	const struct msm_dsi_phy_cfg *cfg;
+
+	enum msm_dsi_phy_usecase usecase;
+	bool regulator_ldo_mode;
+
+	struct msm_dsi_pll *pll;
+};
+
+/*
+ * PHY internal functions
+ */
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
+			     struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
+				struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+				struct msm_dsi_phy_clk_request *clk_req);
+void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
+				u32 bit_mask);
+int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
+
+#endif /* __DSI_PHY_H__ */
+
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
new file mode 100644
index 0000000..47403d4
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -0,0 +1,247 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/iopoll.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
+{
+	void __iomem *base = phy->base;
+	u32 data = 0;
+
+	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
+	mb(); /* make sure read happened */
+
+	return (data & BIT(0));
+}
+
+static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
+{
+	void __iomem *lane_base = phy->lane_base;
+	int phy_lane_0 = 0;	/* TODO: Support all lane swap configs */
+
+	/*
+	 * LPRX and CDRX need to enabled only for physical data lane
+	 * corresponding to the logical data lane 0
+	 */
+	if (enable)
+		dsi_phy_write(lane_base +
+			      REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
+	else
+		dsi_phy_write(lane_base +
+			      REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
+}
+
+static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
+{
+	int i;
+	u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
+	void __iomem *lane_base = phy->lane_base;
+
+	if (phy->cfg->quirks & V3_0_0_10NM_OLD_TIMINGS_QUIRK)
+		tx_dctrl[3] = 0x02;
+
+	/* Strength ctrl settings */
+	for (i = 0; i < 5; i++) {
+		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
+			      0x55);
+		/*
+		 * Disable LPRX and CDRX for all lanes. And later on, it will
+		 * be only enabled for the physical data lane corresponding
+		 * to the logical data lane 0
+		 */
+		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
+		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
+		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
+			      0x88);
+	}
+
+	dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
+
+	/* other settings */
+	for (i = 0; i < 5; i++) {
+		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
+		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
+		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
+		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
+			      i == 4 ? 0x80 : 0x0);
+		dsi_phy_write(lane_base +
+			      REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0);
+		dsi_phy_write(lane_base +
+			      REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0);
+		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
+			      tx_dctrl[i]);
+	}
+
+	if (!(phy->cfg->quirks & V3_0_0_10NM_OLD_TIMINGS_QUIRK)) {
+		/* Toggle BIT 0 to release freeze I/0 */
+		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
+		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
+	}
+}
+
+static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+			       struct msm_dsi_phy_clk_request *clk_req)
+{
+	int ret;
+	u32 status;
+	u32 const delay_us = 5;
+	u32 const timeout_us = 1000;
+	struct msm_dsi_dphy_timing *timing = &phy->timing;
+	void __iomem *base = phy->base;
+	u32 data;
+
+	DBG("");
+
+	if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
+		DRM_DEV_ERROR(&phy->pdev->dev,
+			"%s: D-PHY timing calculation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	if (dsi_phy_hw_v3_0_is_pll_on(phy))
+		pr_warn("PLL turned on before configuring PHY\n");
+
+	/* wait for REFGEN READY */
+	ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
+					status, (status & BIT(0)),
+					delay_us, timeout_us);
+	if (ret) {
+		pr_err("Ref gen not ready. Aborting\n");
+		return -EINVAL;
+	}
+
+	/* de-assert digital and pll power down */
+	data = BIT(6) | BIT(5);
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+
+	/* Assert PLL core reset */
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+	/* turn off resync FIFO */
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
+
+	/* Select MS1 byte-clk */
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
+
+	/* Enable LDO */
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59);
+
+	/* Configure PHY lane swap (TODO: we need to calculate this) */
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
+
+	/* DSI PHY timings */
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
+		      timing->hs_halfbyte_en);
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
+		      timing->clk_zero);
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
+		      timing->clk_prepare);
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
+		      timing->clk_trail);
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
+		      timing->hs_exit);
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
+		      timing->hs_zero);
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
+		      timing->hs_prepare);
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
+		      timing->hs_trail);
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
+		      timing->hs_rqst);
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
+		      timing->ta_go | (timing->ta_sure << 3));
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
+		      timing->ta_get);
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
+		      0x00);
+
+	/* Remove power down from all blocks */
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
+
+	/* power up lanes */
+	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+	/* TODO: only power up lanes that are used */
+	data |= 0x1F;
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
+
+	/* Select full-rate mode */
+	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
+
+	ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
+	if (ret) {
+		DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	/* DSI lane settings */
+	dsi_phy_hw_v3_0_lane_settings(phy);
+
+	DBG("DSI%d PHY enabled", phy->id);
+
+	return 0;
+}
+
+static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
+{
+}
+
+static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
+{
+	struct platform_device *pdev = phy->pdev;
+
+	phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
+				     "DSI_PHY_LANE");
+	if (IS_ERR(phy->lane_base)) {
+		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
+	.type = MSM_DSI_PHY_10NM,
+	.src_pll_truthtable = { {false, false}, {true, false} },
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vdds", 36000, 32},
+		},
+	},
+	.ops = {
+		.enable = dsi_10nm_phy_enable,
+		.disable = dsi_10nm_phy_disable,
+		.init = dsi_10nm_phy_init,
+	},
+	.io_start = { 0xae94400, 0xae96400 },
+	.num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
+	.type = MSM_DSI_PHY_10NM,
+	.src_pll_truthtable = { {false, false}, {true, false} },
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vdds", 36000, 32},
+		},
+	},
+	.ops = {
+		.enable = dsi_10nm_phy_enable,
+		.disable = dsi_10nm_phy_disable,
+		.init = dsi_10nm_phy_init,
+	},
+	.io_start = { 0xc994400, 0xc996400 },
+	.num_dsi_phy = 2,
+	.quirks = V3_0_0_10NM_OLD_TIMINGS_QUIRK,
+};
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
new file mode 100644
index 0000000..1594f14
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+#define PHY_14NM_CKLN_IDX	4
+
+static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy,
+				     struct msm_dsi_dphy_timing *timing,
+				     int lane_idx)
+{
+	void __iomem *base = phy->lane_base;
+	bool clk_ln = (lane_idx == PHY_14NM_CKLN_IDX);
+	u32 zero = clk_ln ? timing->clk_zero : timing->hs_zero;
+	u32 prepare = clk_ln ? timing->clk_prepare : timing->hs_prepare;
+	u32 trail = clk_ln ? timing->clk_trail : timing->hs_trail;
+	u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst;
+	u32 prep_dly = clk_ln ? timing->hs_prep_dly_ckln : timing->hs_prep_dly;
+	u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln :
+				   timing->hs_halfbyte_en;
+
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx),
+		      DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx),
+		      halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0);
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+		      DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get));
+	dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx),
+		      DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0));
+}
+
+static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+			       struct msm_dsi_phy_clk_request *clk_req)
+{
+	struct msm_dsi_dphy_timing *timing = &phy->timing;
+	u32 data;
+	int i;
+	int ret;
+	void __iomem *base = phy->base;
+	void __iomem *lane_base = phy->lane_base;
+
+	if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
+		DRM_DEV_ERROR(&phy->pdev->dev,
+			"%s: D-PHY timing calculation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	data = 0x1c;
+	if (phy->usecase != MSM_DSI_PHY_STANDALONE)
+		data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32);
+	dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
+
+	dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0x1);
+
+	/* 4 data lanes + 1 clk lane configuration */
+	for (i = 0; i < 5; i++) {
+		dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i),
+			      0x1d);
+
+		dsi_phy_write(lane_base +
+			      REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i), 0xff);
+		dsi_phy_write(lane_base +
+			      REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i),
+			      (i == PHY_14NM_CKLN_IDX) ? 0x00 : 0x06);
+
+		dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG3(i),
+			      (i == PHY_14NM_CKLN_IDX) ? 0x8f : 0x0f);
+		dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG2(i), 0x10);
+		dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i),
+			      0);
+		dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i),
+			      0x88);
+
+		dsi_14nm_dphy_set_timing(phy, timing, i);
+	}
+
+	/* Make sure PLL is not start */
+	dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+	wmb(); /* make sure everything is written before reset and enable */
+
+	/* reset digital block */
+	dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x80);
+	wmb(); /* ensure reset is asserted */
+	udelay(100);
+	dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00);
+
+	msm_dsi_phy_set_src_pll(phy, src_pll_id,
+				REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL,
+				DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+	ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
+	if (ret) {
+		DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	/* Remove power down from PLL and all lanes */
+	dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0xff);
+
+	return 0;
+}
+
+static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
+{
+	dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0);
+	dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0);
+
+	/* ensure that the phy is completely disabled */
+	wmb();
+}
+
+static int dsi_14nm_phy_init(struct msm_dsi_phy *phy)
+{
+	struct platform_device *pdev = phy->pdev;
+
+	phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
+				"DSI_PHY_LANE");
+	if (IS_ERR(phy->lane_base)) {
+		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
+	.type = MSM_DSI_PHY_14NM,
+	.src_pll_truthtable = { {false, false}, {true, false} },
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vcca", 17000, 32},
+		},
+	},
+	.ops = {
+		.enable = dsi_14nm_phy_enable,
+		.disable = dsi_14nm_phy_disable,
+		.init = dsi_14nm_phy_init,
+	},
+	.io_start = { 0x994400, 0x996400 },
+	.num_dsi_phy = 2,
+};
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
new file mode 100644
index 0000000..eca86bf
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy,
+		struct msm_dsi_dphy_timing *timing)
+{
+	void __iomem *base = phy->base;
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_0,
+		DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_1,
+		DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_2,
+		DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+	if (timing->clk_zero & BIT(8))
+		dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_3,
+			DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_4,
+		DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_5,
+		DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_6,
+		DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_7,
+		DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_8,
+		DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_9,
+		DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+		DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_10,
+		DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_11,
+		DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+	void __iomem *base = phy->reg_base;
+
+	if (!enable) {
+		dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+		return;
+	}
+
+	if (phy->regulator_ldo_mode) {
+		dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x1d);
+		return;
+	}
+
+	/* non LDO mode */
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1, 0x03);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2, 0x03);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4, 0x20);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0x01);
+	dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03);
+}
+
+static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+				struct msm_dsi_phy_clk_request *clk_req)
+{
+	struct msm_dsi_dphy_timing *timing = &phy->timing;
+	int i;
+	void __iomem *base = phy->base;
+	u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00};
+
+	DBG("");
+
+	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
+		DRM_DEV_ERROR(&phy->pdev->dev,
+			"%s: D-PHY timing calculation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_20nm_phy_regulator_ctrl(phy, true);
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff);
+
+	msm_dsi_phy_set_src_pll(phy, src_pll_id,
+				REG_DSI_20nm_PHY_GLBL_TEST_CTRL,
+				DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+	for (i = 0; i < 4; i++) {
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i),
+							(i >> 1) * 0x40);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i), 0x01);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i), 0x46);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_0(i), 0x02);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_1(i), 0xa0);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_4(i), cfg_4[i]);
+	}
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_3, 0x80);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR0, 0x01);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR1, 0x46);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_0, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_1, 0xa0);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_2, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_4, 0x00);
+
+	dsi_20nm_dphy_set_timing(phy, timing);
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_1, 0x00);
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_1, 0x06);
+
+	/* make sure everything is written before enable */
+	wmb();
+	dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_0, 0x7f);
+
+	return 0;
+}
+
+static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
+{
+	dsi_phy_write(phy->base + REG_DSI_20nm_PHY_CTRL_0, 0);
+	dsi_20nm_phy_regulator_ctrl(phy, false);
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
+	.type = MSM_DSI_PHY_20NM,
+	.src_pll_truthtable = { {false, true}, {false, true} },
+	.reg_cfg = {
+		.num = 2,
+		.regs = {
+			{"vddio", 100000, 100},	/* 1.8 V */
+			{"vcca", 10000, 100},	/* 1.0 V */
+		},
+	},
+	.ops = {
+		.enable = dsi_20nm_phy_enable,
+		.disable = dsi_20nm_phy_disable,
+		.init = msm_dsi_phy_init_common,
+	},
+	.io_start = { 0xfd998500, 0xfd9a0500 },
+	.num_dsi_phy = 2,
+};
+
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
new file mode 100644
index 0000000..b3f678f
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
+		struct msm_dsi_dphy_timing *timing)
+{
+	void __iomem *base = phy->base;
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
+		DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
+		DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
+		DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+	if (timing->clk_zero & BIT(8))
+		dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
+			DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
+		DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
+		DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
+		DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
+		DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
+		DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
+		DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+		DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
+		DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
+		DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+	void __iomem *base = phy->reg_base;
+
+	if (!enable) {
+		dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+		return;
+	}
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+				struct msm_dsi_phy_clk_request *clk_req)
+{
+	struct msm_dsi_dphy_timing *timing = &phy->timing;
+	int i;
+	void __iomem *base = phy->base;
+
+	DBG("");
+
+	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
+		DRM_DEV_ERROR(&phy->pdev->dev,
+			"%s: D-PHY timing calculation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
+
+	dsi_28nm_phy_regulator_ctrl(phy, true);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+
+	dsi_28nm_dphy_set_timing(phy, timing);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
+
+	for (i = 0; i < 4; i++) {
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+	}
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_4, 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+	msm_dsi_phy_set_src_pll(phy, src_pll_id,
+				REG_DSI_28nm_PHY_GLBL_TEST_CTRL,
+				DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+	return 0;
+}
+
+static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
+	dsi_28nm_phy_regulator_ctrl(phy, false);
+
+	/*
+	 * Wait for the registers writes to complete in order to
+	 * ensure that the phy is completely disabled
+	 */
+	wmb();
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
+	.type = MSM_DSI_PHY_28NM_HPM,
+	.src_pll_truthtable = { {true, true}, {false, true} },
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vddio", 100000, 100},
+		},
+	},
+	.ops = {
+		.enable = dsi_28nm_phy_enable,
+		.disable = dsi_28nm_phy_disable,
+		.init = msm_dsi_phy_init_common,
+	},
+	.io_start = { 0xfd922b00, 0xfd923100 },
+	.num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
+	.type = MSM_DSI_PHY_28NM_LP,
+	.src_pll_truthtable = { {true, true}, {true, true} },
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vddio", 100000, 100},	/* 1.8 V */
+		},
+	},
+	.ops = {
+		.enable = dsi_28nm_phy_enable,
+		.disable = dsi_28nm_phy_disable,
+		.init = msm_dsi_phy_init_common,
+	},
+	.io_start = { 0x1a98500 },
+	.num_dsi_phy = 1,
+};
+
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
new file mode 100644
index 0000000..f225833
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
+		struct msm_dsi_dphy_timing *timing)
+{
+	void __iomem *base = phy->base;
+
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
+		DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
+		DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
+		DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
+		DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
+		DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
+		DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
+		DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
+		DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
+		DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+		DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
+		DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
+		DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
+{
+	void __iomem *base = phy->reg_base;
+
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 1);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
+		0x100);
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
+{
+	void __iomem *base = phy->reg_base;
+
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 0xa);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 0x4);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0x0);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4, 0x20);
+}
+
+static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
+{
+	void __iomem *base = phy->reg_base;
+	u32 status;
+	int i = 5000;
+
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
+			0x3);
+
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3, 0x10);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4, 0x1);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0, 0x1);
+
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x1);
+	usleep_range(5000, 6000);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x0);
+
+	do {
+		status = dsi_phy_read(base +
+				REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS);
+
+		if (!(status & DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY))
+			break;
+
+		udelay(1);
+	} while (--i > 0);
+}
+
+static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
+{
+	void __iomem *base = phy->base;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_0(i), 0x80);
+		dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
+		dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
+		dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
+			0x00);
+		dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
+			0x01);
+		dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
+			0x66);
+	}
+
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_1, 0x67);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_2, 0x0);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH, 0x0);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0, 0x1);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1, 0x88);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+				struct msm_dsi_phy_clk_request *clk_req)
+{
+	struct msm_dsi_dphy_timing *timing = &phy->timing;
+	void __iomem *base = phy->base;
+
+	DBG("");
+
+	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
+		DRM_DEV_ERROR(&phy->pdev->dev,
+			"%s: D-PHY timing calculation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_28nm_phy_regulator_init(phy);
+
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LDO_CTRL, 0x04);
+
+	/* strength control */
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_0, 0xff);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_1, 0x00);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_2, 0x06);
+
+	/* phy ctrl */
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x5f);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_1, 0x00);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_2, 0x00);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_3, 0x10);
+
+	dsi_28nm_phy_regulator_ctrl(phy);
+
+	dsi_28nm_phy_calibration(phy);
+
+	dsi_28nm_phy_lane_config(phy);
+
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0f);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_1, 0x03);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_0, 0x03);
+	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0);
+
+	dsi_28nm_dphy_set_timing(phy, timing);
+
+	return 0;
+}
+
+static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+	dsi_phy_write(phy->base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x0);
+
+	/*
+	 * Wait for the registers writes to complete in order to
+	 * ensure that the phy is completely disabled
+	 */
+	wmb();
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
+	.type = MSM_DSI_PHY_28NM_8960,
+	.src_pll_truthtable = { {true, true}, {false, true} },
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vddio", 100000, 100},	/* 1.8 V */
+		},
+	},
+	.ops = {
+		.enable = dsi_28nm_phy_enable,
+		.disable = dsi_28nm_phy_disable,
+		.init = msm_dsi_phy_init_common,
+	},
+	.io_start = { 0x4700300, 0x5800300 },
+	.num_dsi_phy = 2,
+};
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
new file mode 100644
index 0000000..4a4aa3c
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ */
+
+#include "dsi_pll.h"
+
+static int dsi_pll_enable(struct msm_dsi_pll *pll)
+{
+	int i, ret = 0;
+
+	/*
+	 * Certain PLLs do not allow VCO rate update when it is on.
+	 * Keep track of their status to turn on/off after set rate success.
+	 */
+	if (unlikely(pll->pll_on))
+		return 0;
+
+	/* Try all enable sequences until one succeeds */
+	for (i = 0; i < pll->en_seq_cnt; i++) {
+		ret = pll->enable_seqs[i](pll);
+		DBG("DSI PLL %s after sequence #%d",
+			ret ? "unlocked" : "locked", i + 1);
+		if (!ret)
+			break;
+	}
+
+	if (ret) {
+		DRM_ERROR("DSI PLL failed to lock\n");
+		return ret;
+	}
+
+	pll->pll_on = true;
+
+	return 0;
+}
+
+static void dsi_pll_disable(struct msm_dsi_pll *pll)
+{
+	if (unlikely(!pll->pll_on))
+		return;
+
+	pll->disable_seq(pll);
+
+	pll->pll_on = false;
+}
+
+/*
+ * DSI PLL Helper functions
+ */
+long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
+		unsigned long rate, unsigned long *parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+
+	if      (rate < pll->min_rate)
+		return  pll->min_rate;
+	else if (rate > pll->max_rate)
+		return  pll->max_rate;
+	else
+		return rate;
+}
+
+int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+
+	return dsi_pll_enable(pll);
+}
+
+void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+
+	dsi_pll_disable(pll);
+}
+
+void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
+					struct clk **clks, u32 num_clks)
+{
+	of_clk_del_provider(pdev->dev.of_node);
+
+	if (!num_clks || !clks)
+		return;
+
+	do {
+		clk_unregister(clks[--num_clks]);
+		clks[num_clks] = NULL;
+	} while (num_clks);
+}
+
+/*
+ * DSI PLL API
+ */
+int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
+	struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
+{
+	if (pll->get_provider)
+		return pll->get_provider(pll,
+					byte_clk_provider,
+					pixel_clk_provider);
+
+	return -EINVAL;
+}
+
+void msm_dsi_pll_destroy(struct msm_dsi_pll *pll)
+{
+	if (pll->destroy)
+		pll->destroy(pll);
+}
+
+void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
+{
+	if (pll->save_state) {
+		pll->save_state(pll);
+		pll->state_saved = true;
+	}
+}
+
+int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
+{
+	int ret;
+
+	if (pll->restore_state && pll->state_saved) {
+		ret = pll->restore_state(pll);
+		if (ret)
+			return ret;
+
+		pll->state_saved = false;
+	}
+
+	return 0;
+}
+
+int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
+			    enum msm_dsi_phy_usecase uc)
+{
+	if (pll->set_usecase)
+		return pll->set_usecase(pll, uc);
+
+	return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
+			enum msm_dsi_phy_type type, int id)
+{
+	struct device *dev = &pdev->dev;
+	struct msm_dsi_pll *pll;
+
+	switch (type) {
+	case MSM_DSI_PHY_28NM_HPM:
+	case MSM_DSI_PHY_28NM_LP:
+		pll = msm_dsi_pll_28nm_init(pdev, type, id);
+		break;
+	case MSM_DSI_PHY_28NM_8960:
+		pll = msm_dsi_pll_28nm_8960_init(pdev, id);
+		break;
+	case MSM_DSI_PHY_14NM:
+		pll = msm_dsi_pll_14nm_init(pdev, id);
+		break;
+	case MSM_DSI_PHY_10NM:
+		pll = msm_dsi_pll_10nm_init(pdev, id);
+		break;
+	default:
+		pll = ERR_PTR(-ENXIO);
+		break;
+	}
+
+	if (IS_ERR(pll)) {
+		DRM_DEV_ERROR(dev, "%s: failed to init DSI PLL\n", __func__);
+		return pll;
+	}
+
+	pll->type = type;
+
+	DBG("DSI:%d PLL registered", id);
+
+	return pll;
+}
+
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
new file mode 100644
index 0000000..c6a3623
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DSI_PLL_H__
+#define __DSI_PLL_H__
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+
+#include "dsi.h"
+
+#define NUM_DSI_CLOCKS_MAX	6
+#define MAX_DSI_PLL_EN_SEQS	10
+
+struct msm_dsi_pll {
+	enum msm_dsi_phy_type type;
+
+	struct clk_hw	clk_hw;
+	bool		pll_on;
+	bool		state_saved;
+
+	unsigned long	min_rate;
+	unsigned long	max_rate;
+	u32		en_seq_cnt;
+
+	int (*enable_seqs[MAX_DSI_PLL_EN_SEQS])(struct msm_dsi_pll *pll);
+	void (*disable_seq)(struct msm_dsi_pll *pll);
+	int (*get_provider)(struct msm_dsi_pll *pll,
+			struct clk **byte_clk_provider,
+			struct clk **pixel_clk_provider);
+	void (*destroy)(struct msm_dsi_pll *pll);
+	void (*save_state)(struct msm_dsi_pll *pll);
+	int (*restore_state)(struct msm_dsi_pll *pll);
+	int (*set_usecase)(struct msm_dsi_pll *pll,
+			   enum msm_dsi_phy_usecase uc);
+};
+
+#define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw)
+
+static inline void pll_write(void __iomem *reg, u32 data)
+{
+	msm_writel(data, reg);
+}
+
+static inline u32 pll_read(const void __iomem *reg)
+{
+	return msm_readl(reg);
+}
+
+static inline void pll_write_udelay(void __iomem *reg, u32 data, u32 delay_us)
+{
+	pll_write(reg, data);
+	udelay(delay_us);
+}
+
+static inline void pll_write_ndelay(void __iomem *reg, u32 data, u32 delay_ns)
+{
+	pll_write((reg), data);
+	ndelay(delay_ns);
+}
+
+/*
+ * DSI PLL Helper functions
+ */
+
+/* clock callbacks */
+long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
+		unsigned long rate, unsigned long *parent_rate);
+int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw);
+void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw);
+/* misc */
+void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
+					struct clk **clks, u32 num_clks);
+
+/*
+ * Initialization for Each PLL Type
+ */
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
+struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
+					enum msm_dsi_phy_type type, int id);
+#else
+static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init(
+	struct platform_device *pdev, enum msm_dsi_phy_type type, int id)
+{
+	return ERR_PTR(-ENODEV);
+}
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
+struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
+					       int id);
+#else
+static inline struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(
+	struct platform_device *pdev, int id)
+{
+	return ERR_PTR(-ENODEV);
+}
+#endif
+
+#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
+struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id);
+#else
+static inline struct msm_dsi_pll *
+msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
+{
+	return ERR_PTR(-ENODEV);
+}
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
+struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id);
+#else
+static inline struct msm_dsi_pll *
+msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
+{
+	return ERR_PTR(-ENODEV);
+}
+#endif
+#endif /* __DSI_PLL_H__ */
+
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
new file mode 100644
index 0000000..33033b9
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -0,0 +1,880 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 10nm - clock diagram (eg: DSI0):
+ *
+ *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
+ *                              |                |
+ *                              |                |
+ *                 +---------+  |  +----------+  |  +----+
+ *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
+ *                 +---------+  |  +----------+  |  +----+
+ *                              |                |
+ *                              |                |         dsi0_pll_by_2_bit_clk
+ *                              |                |          |
+ *                              |                |  +----+  |  |\  dsi0_pclk_mux
+ *                              |                |--| /2 |--o--| \   |
+ *                              |                |  +----+     |  \  |  +---------+
+ *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
+ *                              |------------------------------|  /     +---------+
+ *                              |          +-----+             | /
+ *                              -----------| /4? |--o----------|/
+ *                                         +-----+  |           |
+ *                                                  |           |dsiclk_sel
+ *                                                  |
+ *                                                  dsi0_pll_post_out_div_clk
+ */
+
+#define DSI_BYTE_PLL_CLK		0
+#define DSI_PIXEL_PLL_CLK		1
+#define NUM_PROVIDED_CLKS		2
+
+#define VCO_REF_CLK_RATE		19200000
+
+struct dsi_pll_regs {
+	u32 pll_prop_gain_rate;
+	u32 pll_lockdet_rate;
+	u32 decimal_div_start;
+	u32 frac_div_start_low;
+	u32 frac_div_start_mid;
+	u32 frac_div_start_high;
+	u32 pll_clock_inverters;
+	u32 ssc_stepsize_low;
+	u32 ssc_stepsize_high;
+	u32 ssc_div_per_low;
+	u32 ssc_div_per_high;
+	u32 ssc_adjper_low;
+	u32 ssc_adjper_high;
+	u32 ssc_control;
+};
+
+struct dsi_pll_config {
+	u32 ref_freq;
+	bool div_override;
+	u32 output_div;
+	bool ignore_frac;
+	bool disable_prescaler;
+	bool enable_ssc;
+	bool ssc_center;
+	u32 dec_bits;
+	u32 frac_bits;
+	u32 lock_timer;
+	u32 ssc_freq;
+	u32 ssc_offset;
+	u32 ssc_adj_per;
+	u32 thresh_cycles;
+	u32 refclk_cycles;
+};
+
+struct pll_10nm_cached_state {
+	unsigned long vco_rate;
+	u8 bit_clk_div;
+	u8 pix_clk_div;
+	u8 pll_out_div;
+	u8 pll_mux;
+};
+
+struct dsi_pll_10nm {
+	struct msm_dsi_pll base;
+
+	int id;
+	struct platform_device *pdev;
+
+	void __iomem *phy_cmn_mmio;
+	void __iomem *mmio;
+
+	u64 vco_ref_clk_rate;
+	u64 vco_current_rate;
+
+	/* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
+	spinlock_t postdiv_lock;
+
+	int vco_delay;
+	struct dsi_pll_config pll_configuration;
+	struct dsi_pll_regs reg_setup;
+
+	/* private clocks: */
+	struct clk_hw *out_div_clk_hw;
+	struct clk_hw *bit_clk_hw;
+	struct clk_hw *byte_clk_hw;
+	struct clk_hw *by_2_bit_clk_hw;
+	struct clk_hw *post_out_div_clk_hw;
+	struct clk_hw *pclk_mux_hw;
+	struct clk_hw *out_dsiclk_hw;
+
+	/* clock-provider: */
+	struct clk_hw_onecell_data *hw_data;
+
+	struct pll_10nm_cached_state cached_state;
+
+	enum msm_dsi_phy_usecase uc;
+	struct dsi_pll_10nm *slave;
+};
+
+#define to_pll_10nm(x)	container_of(x, struct dsi_pll_10nm, base)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_10nm *pll)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+
+	config->ref_freq = pll->vco_ref_clk_rate;
+	config->output_div = 1;
+	config->dec_bits = 8;
+	config->frac_bits = 18;
+	config->lock_timer = 64;
+	config->ssc_freq = 31500;
+	config->ssc_offset = 5000;
+	config->ssc_adj_per = 2;
+	config->thresh_cycles = 32;
+	config->refclk_cycles = 256;
+
+	config->div_override = false;
+	config->ignore_frac = false;
+	config->disable_prescaler = false;
+
+	config->enable_ssc = false;
+	config->ssc_center = 0;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u64 fref = pll->vco_ref_clk_rate;
+	u64 pll_freq;
+	u64 divider;
+	u64 dec, dec_multiple;
+	u32 frac;
+	u64 multiplier;
+
+	pll_freq = pll->vco_current_rate;
+
+	if (config->disable_prescaler)
+		divider = fref;
+	else
+		divider = fref * 2;
+
+	multiplier = 1 << config->frac_bits;
+	dec_multiple = div_u64(pll_freq * multiplier, divider);
+	div_u64_rem(dec_multiple, multiplier, &frac);
+
+	dec = div_u64(dec_multiple, multiplier);
+
+	if (pll_freq <= 1900000000UL)
+		regs->pll_prop_gain_rate = 8;
+	else if (pll_freq <= 3000000000UL)
+		regs->pll_prop_gain_rate = 10;
+	else
+		regs->pll_prop_gain_rate = 12;
+	if (pll_freq < 1100000000UL)
+		regs->pll_clock_inverters = 8;
+	else
+		regs->pll_clock_inverters = 0;
+
+	regs->pll_lockdet_rate = config->lock_timer;
+	regs->decimal_div_start = dec;
+	regs->frac_div_start_low = (frac & 0xff);
+	regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+	regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+#define SSC_CENTER		BIT(0)
+#define SSC_EN			BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u32 ssc_per;
+	u32 ssc_mod;
+	u64 ssc_step_size;
+	u64 frac;
+
+	if (!config->enable_ssc) {
+		DBG("SSC not enabled\n");
+		return;
+	}
+
+	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+	ssc_per -= ssc_mod;
+
+	frac = regs->frac_div_start_low |
+			(regs->frac_div_start_mid << 8) |
+			(regs->frac_div_start_high << 16);
+	ssc_step_size = regs->decimal_div_start;
+	ssc_step_size *= (1 << config->frac_bits);
+	ssc_step_size += frac;
+	ssc_step_size *= config->ssc_offset;
+	ssc_step_size *= (config->ssc_adj_per + 1);
+	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+	regs->ssc_div_per_low = ssc_per & 0xFF;
+	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+		 regs->decimal_div_start, frac, config->frac_bits);
+	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll)
+{
+	void __iomem *base = pll->mmio;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+
+	if (pll->pll_configuration.enable_ssc) {
+		pr_debug("SSC is enabled\n");
+
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+			  regs->ssc_stepsize_low);
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+			  regs->ssc_stepsize_high);
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+			  regs->ssc_div_per_low);
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+			  regs->ssc_div_per_high);
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
+			  regs->ssc_adjper_low);
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
+			  regs->ssc_adjper_high);
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
+			  SSC_EN | regs->ssc_control);
+	}
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
+{
+	void __iomem *base = pll->mmio;
+
+	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
+		  0xba);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
+		  0x4c);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
+}
+
+static void dsi_pll_commit(struct dsi_pll_10nm *pll)
+{
+	void __iomem *base = pll->mmio;
+	struct dsi_pll_regs *reg = &pll->reg_setup;
+
+	pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
+		  reg->decimal_div_start);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
+		  reg->frac_div_start_low);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
+		  reg->frac_div_start_mid);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
+		  reg->frac_div_start_high);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
+		  reg->pll_clock_inverters);
+}
+
+static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+				     unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+
+	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->id, rate,
+	    parent_rate);
+
+	pll_10nm->vco_current_rate = rate;
+	pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
+
+	dsi_pll_setup_config(pll_10nm);
+
+	dsi_pll_calc_dec_frac(pll_10nm);
+
+	dsi_pll_calc_ssc(pll_10nm);
+
+	dsi_pll_commit(pll_10nm);
+
+	dsi_pll_config_hzindep_reg(pll_10nm);
+
+	dsi_pll_ssc_commit(pll_10nm);
+
+	/* flush, ensure all register writes are done*/
+	wmb();
+
+	return 0;
+}
+
+static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
+{
+	int rc;
+	u32 status = 0;
+	u32 const delay_us = 100;
+	u32 const timeout_us = 5000;
+
+	rc = readl_poll_timeout_atomic(pll->mmio +
+				       REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
+				       status,
+				       ((status & BIT(0)) > 0),
+				       delay_us,
+				       timeout_us);
+	if (rc)
+		pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+		       pll->id, status);
+
+	return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
+{
+	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+	pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
+	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
+		  data & ~BIT(5));
+	ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
+{
+	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
+		  data | BIT(5));
+	pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+	ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
+{
+	u32 data;
+
+	data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+		  data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
+{
+	u32 data;
+
+	data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+		  data | BIT(5));
+}
+
+static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	int rc;
+
+	dsi_pll_enable_pll_bias(pll_10nm);
+	if (pll_10nm->slave)
+		dsi_pll_enable_pll_bias(pll_10nm->slave);
+
+	rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
+	if (rc) {
+		pr_err("vco_set_rate failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Start PLL */
+	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
+		  0x01);
+
+	/*
+	 * ensure all PLL configurations are written prior to checking
+	 * for PLL lock.
+	 */
+	wmb();
+
+	/* Check for PLL lock */
+	rc = dsi_pll_10nm_lock_status(pll_10nm);
+	if (rc) {
+		pr_err("PLL(%d) lock failed\n", pll_10nm->id);
+		goto error;
+	}
+
+	pll->pll_on = true;
+
+	dsi_pll_enable_global_clk(pll_10nm);
+	if (pll_10nm->slave)
+		dsi_pll_enable_global_clk(pll_10nm->slave);
+
+	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
+		  0x01);
+	if (pll_10nm->slave)
+		pll_write(pll_10nm->slave->phy_cmn_mmio +
+			  REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
+
+error:
+	return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
+{
+	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
+	dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+
+	/*
+	 * To avoid any stray glitches while abruptly powering down the PLL
+	 * make sure to gate the clock using the clock enable bit before
+	 * powering down the PLL
+	 */
+	dsi_pll_disable_global_clk(pll_10nm);
+	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
+	dsi_pll_disable_sub(pll_10nm);
+	if (pll_10nm->slave) {
+		dsi_pll_disable_global_clk(pll_10nm->slave);
+		dsi_pll_disable_sub(pll_10nm->slave);
+	}
+	/* flush, ensure all register writes are done */
+	wmb();
+	pll->pll_on = false;
+}
+
+static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
+						  unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	void __iomem *base = pll_10nm->mmio;
+	u64 ref_clk = pll_10nm->vco_ref_clk_rate;
+	u64 vco_rate = 0x0;
+	u64 multiplier;
+	u32 frac;
+	u32 dec;
+	u64 pll_freq, tmp64;
+
+	dec = pll_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
+	dec &= 0xff;
+
+	frac = pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+	frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+		  0xff) << 8);
+	frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+		  0x3) << 16);
+
+	/*
+	 * TODO:
+	 *	1. Assumes prescaler is disabled
+	 *	2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
+	 */
+	multiplier = 1 << 18;
+	pll_freq = dec * (ref_clk * 2);
+	tmp64 = (ref_clk * 2 * frac);
+	pll_freq += div_u64(tmp64, multiplier);
+
+	vco_rate = pll_freq;
+
+	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+	    pll_10nm->id, (unsigned long)vco_rate, dec, frac);
+
+	return (unsigned long)vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
+	.round_rate = msm_dsi_pll_helper_clk_round_rate,
+	.set_rate = dsi_pll_10nm_vco_set_rate,
+	.recalc_rate = dsi_pll_10nm_vco_recalc_rate,
+	.prepare = dsi_pll_10nm_vco_prepare,
+	.unprepare = dsi_pll_10nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_pll_10nm_save_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+	void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
+	u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+	cached->pll_out_div = pll_read(pll_10nm->mmio +
+				       REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+	cached->pll_out_div &= 0x3;
+
+	cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
+	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+	cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+	cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+	    pll_10nm->id, cached->pll_out_div, cached->bit_clk_div,
+	    cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+	void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
+	u32 val;
+	int ret;
+
+	val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+	val &= ~0x3;
+	val |= cached->pll_out_div;
+	pll_write(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+	pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+		  cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+	val = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+	val &= ~0x3;
+	val |= cached->pll_mux;
+	pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
+
+	ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw, pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate);
+	if (ret) {
+		DRM_DEV_ERROR(&pll_10nm->pdev->dev,
+			"restore vco rate failed. ret=%d\n", ret);
+		return ret;
+	}
+
+	DBG("DSI PLL%d", pll_10nm->id);
+
+	return 0;
+}
+
+static int dsi_pll_10nm_set_usecase(struct msm_dsi_pll *pll,
+				    enum msm_dsi_phy_usecase uc)
+{
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	void __iomem *base = pll_10nm->phy_cmn_mmio;
+	u32 data = 0x0;	/* internal PLL */
+
+	DBG("DSI PLL%d", pll_10nm->id);
+
+	switch (uc) {
+	case MSM_DSI_PHY_STANDALONE:
+		break;
+	case MSM_DSI_PHY_MASTER:
+		pll_10nm->slave = pll_10nm_list[(pll_10nm->id + 1) % DSI_MAX];
+		break;
+	case MSM_DSI_PHY_SLAVE:
+		data = 0x1; /* external PLL */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* set PLL src */
+	pll_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+	pll_10nm->uc = uc;
+
+	return 0;
+}
+
+static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll,
+				     struct clk **byte_clk_provider,
+				     struct clk **pixel_clk_provider)
+{
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	struct clk_hw_onecell_data *hw_data = pll_10nm->hw_data;
+
+	DBG("DSI PLL%d", pll_10nm->id);
+
+	if (byte_clk_provider)
+		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
+	if (pixel_clk_provider)
+		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+	return 0;
+}
+
+static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	struct device *dev = &pll_10nm->pdev->dev;
+
+	DBG("DSI PLL%d", pll_10nm->id);
+	of_clk_del_provider(dev->of_node);
+
+	clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);
+	clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);
+	clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);
+	clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);
+	clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);
+	clk_hw_unregister_divider(pll_10nm->bit_clk_hw);
+	clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);
+	clk_hw_unregister(&pll_10nm->base.clk_hw);
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
+{
+	char clk_name[32], parent[32], vco_name[32];
+	char parent2[32], parent3[32], parent4[32];
+	struct clk_init_data vco_init = {
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.name = vco_name,
+		.flags = CLK_IGNORE_UNUSED,
+		.ops = &clk_ops_dsi_pll_10nm_vco,
+	};
+	struct device *dev = &pll_10nm->pdev->dev;
+	struct clk_hw_onecell_data *hw_data;
+	struct clk_hw *hw;
+	int ret;
+
+	DBG("DSI%d", pll_10nm->id);
+
+	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
+			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
+			       GFP_KERNEL);
+	if (!hw_data)
+		return -ENOMEM;
+
+	snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->id);
+	pll_10nm->base.clk_hw.init = &vco_init;
+
+	ret = clk_hw_register(dev, &pll_10nm->base.clk_hw);
+	if (ret)
+		return ret;
+
+	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+	snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id);
+
+	hw = clk_hw_register_divider(dev, clk_name,
+				     parent, CLK_SET_RATE_PARENT,
+				     pll_10nm->mmio +
+				     REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
+				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_base_clk_hw;
+	}
+
+	pll_10nm->out_div_clk_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+
+	/* BIT CLK: DIV_CTRL_3_0 */
+	hw = clk_hw_register_divider(dev, clk_name, parent,
+				     CLK_SET_RATE_PARENT,
+				     pll_10nm->phy_cmn_mmio +
+				     REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+				     0, 4, CLK_DIVIDER_ONE_BASED,
+				     &pll_10nm->postdiv_lock);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_out_div_clk_hw;
+	}
+
+	pll_10nm->bit_clk_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
+	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+
+	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+					  CLK_SET_RATE_PARENT, 1, 8);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_bit_clk_hw;
+	}
+
+	pll_10nm->byte_clk_hw = hw;
+	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
+
+	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
+	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+					  0, 1, 2);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_byte_clk_hw;
+	}
+
+	pll_10nm->by_2_bit_clk_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
+	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+					  0, 1, 4);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_by_2_bit_clk_hw;
+	}
+
+	pll_10nm->post_out_div_clk_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id);
+	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
+	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
+
+	hw = clk_hw_register_mux(dev, clk_name,
+				 (const char *[]){
+				 parent, parent2, parent3, parent4
+				 }, 4, 0, pll_10nm->phy_cmn_mmio +
+				 REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+				 0, 2, 0, NULL);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_post_out_div_clk_hw;
+	}
+
+	pll_10nm->pclk_mux_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
+	snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
+
+	/* PIX CLK DIV : DIV_CTRL_7_4*/
+	hw = clk_hw_register_divider(dev, clk_name, parent,
+				     0, pll_10nm->phy_cmn_mmio +
+					REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+				     4, 4, CLK_DIVIDER_ONE_BASED,
+				     &pll_10nm->postdiv_lock);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_pclk_mux_hw;
+	}
+
+	pll_10nm->out_dsiclk_hw = hw;
+	hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
+
+	hw_data->num = NUM_PROVIDED_CLKS;
+	pll_10nm->hw_data = hw_data;
+
+	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+				     pll_10nm->hw_data);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+		goto err_dsiclk_hw;
+	}
+
+	return 0;
+
+err_dsiclk_hw:
+	clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);
+err_pclk_mux_hw:
+	clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);
+err_post_out_div_clk_hw:
+	clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);
+err_by_2_bit_clk_hw:
+	clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);
+err_byte_clk_hw:
+	clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);
+err_bit_clk_hw:
+	clk_hw_unregister_divider(pll_10nm->bit_clk_hw);
+err_out_div_clk_hw:
+	clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);
+err_base_clk_hw:
+	clk_hw_unregister(&pll_10nm->base.clk_hw);
+
+	return ret;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
+{
+	struct dsi_pll_10nm *pll_10nm;
+	struct msm_dsi_pll *pll;
+	int ret;
+
+	pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
+	if (!pll_10nm)
+		return ERR_PTR(-ENOMEM);
+
+	DBG("DSI PLL%d", id);
+
+	pll_10nm->pdev = pdev;
+	pll_10nm->id = id;
+	pll_10nm_list[id] = pll_10nm;
+
+	pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+	if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+	if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spin_lock_init(&pll_10nm->postdiv_lock);
+
+	pll = &pll_10nm->base;
+	pll->min_rate = 1000000000UL;
+	pll->max_rate = 3500000000UL;
+	pll->get_provider = dsi_pll_10nm_get_provider;
+	pll->destroy = dsi_pll_10nm_destroy;
+	pll->save_state = dsi_pll_10nm_save_state;
+	pll->restore_state = dsi_pll_10nm_restore_state;
+	pll->set_usecase = dsi_pll_10nm_set_usecase;
+
+	pll_10nm->vco_delay = 1;
+
+	ret = pll_10nm_register(pll_10nm);
+	if (ret) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	/* TODO: Remove this when we have proper display handover support */
+	msm_dsi_pll_save_state(pll);
+
+	return pll;
+}
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
new file mode 100644
index 0000000..f847376
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
@@ -0,0 +1,1096 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 14nm - clock diagram (eg: DSI0):
+ *
+ *         dsi0n1_postdiv_clk
+ *                         |
+ *                         |
+ *                 +----+  |  +----+
+ *  dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte
+ *                 +----+  |  +----+
+ *                         |           dsi0n1_postdivby2_clk
+ *                         |   +----+  |
+ *                         o---| /2 |--o--|\
+ *                         |   +----+     | \   +----+
+ *                         |              |  |--| n2 |-- dsi0pll
+ *                         o--------------| /   +----+
+ *                                        |/
+ */
+
+#define POLL_MAX_READS			15
+#define POLL_TIMEOUT_US			1000
+
+#define NUM_PROVIDED_CLKS		2
+
+#define VCO_REF_CLK_RATE		19200000
+#define VCO_MIN_RATE			1300000000UL
+#define VCO_MAX_RATE			2600000000UL
+
+#define DSI_BYTE_PLL_CLK		0
+#define DSI_PIXEL_PLL_CLK		1
+
+#define DSI_PLL_DEFAULT_VCO_POSTDIV	1
+
+struct dsi_pll_input {
+	u32 fref;	/* reference clk */
+	u32 fdata;	/* bit clock rate */
+	u32 dsiclk_sel; /* Mux configuration (see diagram) */
+	u32 ssc_en;	/* SSC enable/disable */
+	u32 ldo_en;
+
+	/* fixed params */
+	u32 refclk_dbler_en;
+	u32 vco_measure_time;
+	u32 kvco_measure_time;
+	u32 bandgap_timer;
+	u32 pll_wakeup_timer;
+	u32 plllock_cnt;
+	u32 plllock_rng;
+	u32 ssc_center;
+	u32 ssc_adj_period;
+	u32 ssc_spread;
+	u32 ssc_freq;
+	u32 pll_ie_trim;
+	u32 pll_ip_trim;
+	u32 pll_iptat_trim;
+	u32 pll_cpcset_cur;
+	u32 pll_cpmset_cur;
+
+	u32 pll_icpmset;
+	u32 pll_icpcset;
+
+	u32 pll_icpmset_p;
+	u32 pll_icpmset_m;
+
+	u32 pll_icpcset_p;
+	u32 pll_icpcset_m;
+
+	u32 pll_lpf_res1;
+	u32 pll_lpf_cap1;
+	u32 pll_lpf_cap2;
+	u32 pll_c3ctrl;
+	u32 pll_r3ctrl;
+};
+
+struct dsi_pll_output {
+	u32 pll_txclk_en;
+	u32 dec_start;
+	u32 div_frac_start;
+	u32 ssc_period;
+	u32 ssc_step_size;
+	u32 plllock_cmp;
+	u32 pll_vco_div_ref;
+	u32 pll_vco_count;
+	u32 pll_kvco_div_ref;
+	u32 pll_kvco_count;
+	u32 pll_misc1;
+	u32 pll_lpf2_postdiv;
+	u32 pll_resetsm_cntrl;
+	u32 pll_resetsm_cntrl2;
+	u32 pll_resetsm_cntrl5;
+	u32 pll_kvco_code;
+
+	u32 cmn_clk_cfg0;
+	u32 cmn_clk_cfg1;
+	u32 cmn_ldo_cntrl;
+
+	u32 pll_postdiv;
+	u32 fcvo;
+};
+
+struct pll_14nm_cached_state {
+	unsigned long vco_rate;
+	u8 n2postdiv;
+	u8 n1postdiv;
+};
+
+struct dsi_pll_14nm {
+	struct msm_dsi_pll base;
+
+	int id;
+	struct platform_device *pdev;
+
+	void __iomem *phy_cmn_mmio;
+	void __iomem *mmio;
+
+	int vco_delay;
+
+	struct dsi_pll_input in;
+	struct dsi_pll_output out;
+
+	/* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */
+	spinlock_t postdiv_lock;
+
+	u64 vco_current_rate;
+	u64 vco_ref_clk_rate;
+
+	/* private clocks: */
+	struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];
+	u32 num_hws;
+
+	/* clock-provider: */
+	struct clk_hw_onecell_data *hw_data;
+
+	struct pll_14nm_cached_state cached_state;
+
+	enum msm_dsi_phy_usecase uc;
+	struct dsi_pll_14nm *slave;
+};
+
+#define to_pll_14nm(x)	container_of(x, struct dsi_pll_14nm, base)
+
+/*
+ * Private struct for N1/N2 post-divider clocks. These clocks are similar to
+ * the generic clk_divider class of clocks. The only difference is that it
+ * also sets the slave DSI PLL's post-dividers if in Dual DSI mode
+ */
+struct dsi_pll_14nm_postdiv {
+	struct clk_hw hw;
+
+	/* divider params */
+	u8 shift;
+	u8 width;
+	u8 flags; /* same flags as used by clk_divider struct */
+
+	struct dsi_pll_14nm *pll;
+};
+
+#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
+
+static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
+				    u32 nb_tries, u32 timeout_us)
+{
+	bool pll_locked = false;
+	void __iomem *base = pll_14nm->mmio;
+	u32 tries, val;
+
+	tries = nb_tries;
+	while (tries--) {
+		val = pll_read(base +
+			       REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+		pll_locked = !!(val & BIT(5));
+
+		if (pll_locked)
+			break;
+
+		udelay(timeout_us);
+	}
+
+	if (!pll_locked) {
+		tries = nb_tries;
+		while (tries--) {
+			val = pll_read(base +
+				REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+			pll_locked = !!(val & BIT(0));
+
+			if (pll_locked)
+				break;
+
+			udelay(timeout_us);
+		}
+	}
+
+	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+	return pll_locked;
+}
+
+static void dsi_pll_14nm_input_init(struct dsi_pll_14nm *pll)
+{
+	pll->in.fref = pll->vco_ref_clk_rate;
+	pll->in.fdata = 0;
+	pll->in.dsiclk_sel = 1;	/* Use the /2 path in Mux */
+	pll->in.ldo_en = 0;	/* disabled for now */
+
+	/* fixed input */
+	pll->in.refclk_dbler_en = 0;
+	pll->in.vco_measure_time = 5;
+	pll->in.kvco_measure_time = 5;
+	pll->in.bandgap_timer = 4;
+	pll->in.pll_wakeup_timer = 5;
+	pll->in.plllock_cnt = 1;
+	pll->in.plllock_rng = 0;
+
+	/*
+	 * SSC is enabled by default. We might need DT props for configuring
+	 * some SSC params like PPM and center/down spread etc.
+	 */
+	pll->in.ssc_en = 1;
+	pll->in.ssc_center = 0;		/* down spread by default */
+	pll->in.ssc_spread = 5;		/* PPM / 1000 */
+	pll->in.ssc_freq = 31500;	/* default recommended */
+	pll->in.ssc_adj_period = 37;
+
+	pll->in.pll_ie_trim = 4;
+	pll->in.pll_ip_trim = 4;
+	pll->in.pll_cpcset_cur = 1;
+	pll->in.pll_cpmset_cur = 1;
+	pll->in.pll_icpmset = 4;
+	pll->in.pll_icpcset = 4;
+	pll->in.pll_icpmset_p = 0;
+	pll->in.pll_icpmset_m = 0;
+	pll->in.pll_icpcset_p = 0;
+	pll->in.pll_icpcset_m = 0;
+	pll->in.pll_lpf_res1 = 3;
+	pll->in.pll_lpf_cap1 = 11;
+	pll->in.pll_lpf_cap2 = 1;
+	pll->in.pll_iptat_trim = 7;
+	pll->in.pll_c3ctrl = 2;
+	pll->in.pll_r3ctrl = 1;
+}
+
+#define CEIL(x, y)		(((x) + ((y) - 1)) / (y))
+
+static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll)
+{
+	u32 period, ssc_period;
+	u32 ref, rem;
+	u64 step_size;
+
+	DBG("vco=%lld ref=%lld", pll->vco_current_rate, pll->vco_ref_clk_rate);
+
+	ssc_period = pll->in.ssc_freq / 500;
+	period = (u32)pll->vco_ref_clk_rate / 1000;
+	ssc_period  = CEIL(period, ssc_period);
+	ssc_period -= 1;
+	pll->out.ssc_period = ssc_period;
+
+	DBG("ssc freq=%d spread=%d period=%d", pll->in.ssc_freq,
+	    pll->in.ssc_spread, pll->out.ssc_period);
+
+	step_size = (u32)pll->vco_current_rate;
+	ref = pll->vco_ref_clk_rate;
+	ref /= 1000;
+	step_size = div_u64(step_size, ref);
+	step_size <<= 20;
+	step_size = div_u64(step_size, 1000);
+	step_size *= pll->in.ssc_spread;
+	step_size = div_u64(step_size, 1000);
+	step_size *= (pll->in.ssc_adj_period + 1);
+
+	rem = 0;
+	step_size = div_u64_rem(step_size, ssc_period + 1, &rem);
+	if (rem)
+		step_size++;
+
+	DBG("step_size=%lld", step_size);
+
+	step_size &= 0x0ffff;	/* take lower 16 bits */
+
+	pll->out.ssc_step_size = step_size;
+}
+
+static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll)
+{
+	struct dsi_pll_input *pin = &pll->in;
+	struct dsi_pll_output *pout = &pll->out;
+	u64 multiplier = BIT(20);
+	u64 dec_start_multiple, dec_start, pll_comp_val;
+	u32 duration, div_frac_start;
+	u64 vco_clk_rate = pll->vco_current_rate;
+	u64 fref = pll->vco_ref_clk_rate;
+
+	DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);
+
+	dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);
+	div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
+
+	dec_start = div_u64(dec_start_multiple, multiplier);
+
+	pout->dec_start = (u32)dec_start;
+	pout->div_frac_start = div_frac_start;
+
+	if (pin->plllock_cnt == 0)
+		duration = 1024;
+	else if (pin->plllock_cnt == 1)
+		duration = 256;
+	else if (pin->plllock_cnt == 2)
+		duration = 128;
+	else
+		duration = 32;
+
+	pll_comp_val = duration * dec_start_multiple;
+	pll_comp_val = div_u64(pll_comp_val, multiplier);
+	do_div(pll_comp_val, 10);
+
+	pout->plllock_cmp = (u32)pll_comp_val;
+
+	pout->pll_txclk_en = 1;
+	pout->cmn_ldo_cntrl = 0x3c;
+}
+
+static u32 pll_14nm_kvco_slop(u32 vrate)
+{
+	u32 slop = 0;
+
+	if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL)
+		slop =  600;
+	else if (vrate > 1800000000UL && vrate < 2300000000UL)
+		slop = 400;
+	else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE)
+		slop = 280;
+
+	return slop;
+}
+
+static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll)
+{
+	struct dsi_pll_input *pin = &pll->in;
+	struct dsi_pll_output *pout = &pll->out;
+	u64 vco_clk_rate = pll->vco_current_rate;
+	u64 fref = pll->vco_ref_clk_rate;
+	u64 data;
+	u32 cnt;
+
+	data = fref * pin->vco_measure_time;
+	do_div(data, 1000000);
+	data &= 0x03ff;	/* 10 bits */
+	data -= 2;
+	pout->pll_vco_div_ref = data;
+
+	data = div_u64(vco_clk_rate, 1000000);	/* unit is Mhz */
+	data *= pin->vco_measure_time;
+	do_div(data, 10);
+	pout->pll_vco_count = data;
+
+	data = fref * pin->kvco_measure_time;
+	do_div(data, 1000000);
+	data &= 0x03ff;	/* 10 bits */
+	data -= 1;
+	pout->pll_kvco_div_ref = data;
+
+	cnt = pll_14nm_kvco_slop(vco_clk_rate);
+	cnt *= 2;
+	cnt /= 100;
+	cnt *= pin->kvco_measure_time;
+	pout->pll_kvco_count = cnt;
+
+	pout->pll_misc1 = 16;
+	pout->pll_resetsm_cntrl = 48;
+	pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;
+	pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;
+	pout->pll_kvco_code = 0;
+}
+
+static void pll_db_commit_ssc(struct dsi_pll_14nm *pll)
+{
+	void __iomem *base = pll->mmio;
+	struct dsi_pll_input *pin = &pll->in;
+	struct dsi_pll_output *pout = &pll->out;
+	u8 data;
+
+	data = pin->ssc_adj_period;
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);
+	data = (pin->ssc_adj_period >> 8);
+	data &= 0x03;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);
+
+	data = pout->ssc_period;
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);
+	data = (pout->ssc_period >> 8);
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);
+
+	data = pout->ssc_step_size;
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);
+	data = (pout->ssc_step_size >> 8);
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);
+
+	data = (pin->ssc_center & 0x01);
+	data <<= 1;
+	data |= 0x01; /* enable */
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);
+
+	wmb();	/* make sure register committed */
+}
+
+static void pll_db_commit_common(struct dsi_pll_14nm *pll,
+				 struct dsi_pll_input *pin,
+				 struct dsi_pll_output *pout)
+{
+	void __iomem *base = pll->mmio;
+	u8 data;
+
+	/* confgiure the non frequency dependent pll registers */
+	data = 0;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);
+
+	data = pout->pll_txclk_en;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, data);
+
+	data = pout->pll_resetsm_cntrl;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, data);
+	data = pout->pll_resetsm_cntrl2;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, data);
+	data = pout->pll_resetsm_cntrl5;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, data);
+
+	data = pout->pll_vco_div_ref & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);
+	data = (pout->pll_vco_div_ref >> 8) & 0x3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);
+
+	data = pout->pll_kvco_div_ref & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);
+	data = (pout->pll_kvco_div_ref >> 8) & 0x3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);
+
+	data = pout->pll_misc1;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, data);
+
+	data = pin->pll_ie_trim;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, data);
+
+	data = pin->pll_ip_trim;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, data);
+
+	data = pin->pll_cpmset_cur << 3 | pin->pll_cpcset_cur;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, data);
+
+	data = pin->pll_icpcset_p << 3 | pin->pll_icpcset_m;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, data);
+
+	data = pin->pll_icpmset_p << 3 | pin->pll_icpcset_m;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, data);
+
+	data = pin->pll_icpmset << 3 | pin->pll_icpcset;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, data);
+
+	data = pin->pll_lpf_cap2 << 4 | pin->pll_lpf_cap1;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, data);
+
+	data = pin->pll_iptat_trim;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, data);
+
+	data = pin->pll_c3ctrl | pin->pll_r3ctrl << 4;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, data);
+}
+
+static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)
+{
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+
+	/* de assert pll start and apply pll sw reset */
+
+	/* stop pll */
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+
+	/* pll sw reset */
+	pll_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);
+	wmb();	/* make sure register committed */
+
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);
+	wmb();	/* make sure register committed */
+}
+
+static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,
+			       struct dsi_pll_input *pin,
+			       struct dsi_pll_output *pout)
+{
+	void __iomem *base = pll->mmio;
+	void __iomem *cmn_base = pll->phy_cmn_mmio;
+	u8 data;
+
+	DBG("DSI%d PLL", pll->id);
+
+	data = pout->cmn_ldo_cntrl;
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
+
+	pll_db_commit_common(pll, pin, pout);
+
+	pll_14nm_software_reset(pll);
+
+	data = pin->dsiclk_sel; /* set dsiclk_sel = 1  */
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, data);
+
+	data = 0xff; /* data, clk, pll normal operation */
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);
+
+	/* configure the frequency dependent pll registers */
+	data = pout->dec_start;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);
+
+	data = pout->div_frac_start & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);
+	data = (pout->div_frac_start >> 8) & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);
+	data = (pout->div_frac_start >> 16) & 0xf;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);
+
+	data = pout->plllock_cmp & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);
+
+	data = (pout->plllock_cmp >> 8) & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);
+
+	data = (pout->plllock_cmp >> 16) & 0x3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);
+
+	data = pin->plllock_cnt << 1 | pin->plllock_rng << 3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);
+
+	data = pout->pll_vco_count & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);
+	data = (pout->pll_vco_count >> 8) & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);
+
+	data = pout->pll_kvco_count & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);
+	data = (pout->pll_kvco_count >> 8) & 0x3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);
+
+	data = (pout->pll_postdiv - 1) << 4 | pin->pll_lpf_res1;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, data);
+
+	if (pin->ssc_en)
+		pll_db_commit_ssc(pll);
+
+	wmb();	/* make sure register committed */
+}
+
+/*
+ * VCO clock Callbacks
+ */
+static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+				     unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct dsi_pll_input *pin = &pll_14nm->in;
+	struct dsi_pll_output *pout = &pll_14nm->out;
+
+	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->id, rate,
+	    parent_rate);
+
+	pll_14nm->vco_current_rate = rate;
+	pll_14nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
+
+	dsi_pll_14nm_input_init(pll_14nm);
+
+	/*
+	 * This configures the post divider internal to the VCO. It's
+	 * fixed to divide by 1 for now.
+	 *
+	 * tx_band = pll_postdiv.
+	 * 0: divided by 1
+	 * 1: divided by 2
+	 * 2: divided by 4
+	 * 3: divided by 8
+	 */
+	pout->pll_postdiv = DSI_PLL_DEFAULT_VCO_POSTDIV;
+
+	pll_14nm_dec_frac_calc(pll_14nm);
+
+	if (pin->ssc_en)
+		pll_14nm_ssc_calc(pll_14nm);
+
+	pll_14nm_calc_vco_count(pll_14nm);
+
+	/* commit the slave DSI PLL registers if we're master. Note that we
+	 * don't lock the slave PLL. We just ensure that the PLL/PHY registers
+	 * of the master and slave are identical
+	 */
+	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
+		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+
+		pll_db_commit_14nm(pll_14nm_slave, pin, pout);
+	}
+
+	pll_db_commit_14nm(pll_14nm, pin, pout);
+
+	return 0;
+}
+
+static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,
+						  unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	void __iomem *base = pll_14nm->mmio;
+	u64 vco_rate, multiplier = BIT(20);
+	u32 div_frac_start;
+	u32 dec_start;
+	u64 ref_clk = parent_rate;
+
+	dec_start = pll_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);
+	dec_start &= 0x0ff;
+
+	DBG("dec_start = %x", dec_start);
+
+	div_frac_start = (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)
+				& 0xf) << 16;
+	div_frac_start |= (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)
+				& 0xff) << 8;
+	div_frac_start |= pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)
+				& 0xff;
+
+	DBG("div_frac_start = %x", div_frac_start);
+
+	vco_rate = ref_clk * dec_start;
+
+	vco_rate += ((ref_clk * div_frac_start) / multiplier);
+
+	/*
+	 * Recalculating the rate from dec_start and frac_start doesn't end up
+	 * the rate we originally set. Convert the freq to KHz, round it up and
+	 * convert it back to MHz.
+	 */
+	vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000;
+
+	DBG("returning vco rate = %lu", (unsigned long)vco_rate);
+
+	return (unsigned long)vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
+	.round_rate = msm_dsi_pll_helper_clk_round_rate,
+	.set_rate = dsi_pll_14nm_vco_set_rate,
+	.recalc_rate = dsi_pll_14nm_vco_recalc_rate,
+	.prepare = msm_dsi_pll_helper_clk_prepare,
+	.unprepare = msm_dsi_pll_helper_clk_unprepare,
+};
+
+/*
+ * N1 and N2 post-divider clock callbacks
+ */
+#define div_mask(width)	((1 << (width)) - 1)
+static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
+						      unsigned long parent_rate)
+{
+	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+	struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+	void __iomem *base = pll_14nm->phy_cmn_mmio;
+	u8 shift = postdiv->shift;
+	u8 width = postdiv->width;
+	u32 val;
+
+	DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, parent_rate);
+
+	val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;
+	val &= div_mask(width);
+
+	return divider_recalc_rate(hw, parent_rate, val, NULL,
+				   postdiv->flags, width);
+}
+
+static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
+					    unsigned long rate,
+					    unsigned long *prate)
+{
+	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+	struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+
+	DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, rate);
+
+	return divider_round_rate(hw, rate, prate, NULL,
+				  postdiv->width,
+				  postdiv->flags);
+}
+
+static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+					 unsigned long parent_rate)
+{
+	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+	struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+	void __iomem *base = pll_14nm->phy_cmn_mmio;
+	spinlock_t *lock = &pll_14nm->postdiv_lock;
+	u8 shift = postdiv->shift;
+	u8 width = postdiv->width;
+	unsigned int value;
+	unsigned long flags = 0;
+	u32 val;
+
+	DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->id, rate,
+	    parent_rate);
+
+	value = divider_get_val(rate, parent_rate, NULL, postdiv->width,
+				postdiv->flags);
+
+	spin_lock_irqsave(lock, flags);
+
+	val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+	val &= ~(div_mask(width) << shift);
+
+	val |= value << shift;
+	pll_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+
+	/* If we're master in dual DSI mode, then the slave PLL's post-dividers
+	 * follow the master's post dividers
+	 */
+	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
+		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+		void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;
+
+		pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+	}
+
+	spin_unlock_irqrestore(lock, flags);
+
+	return 0;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
+	.recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
+	.round_rate = dsi_pll_14nm_postdiv_round_rate,
+	.set_rate = dsi_pll_14nm_postdiv_set_rate,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	void __iomem *base = pll_14nm->mmio;
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+	bool locked;
+
+	DBG("");
+
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
+
+	locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,
+					 POLL_TIMEOUT_US);
+
+	if (unlikely(!locked))
+		DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
+	else
+		DBG("DSI PLL lock success");
+
+	return locked ? 0 : -EINVAL;
+}
+
+static void dsi_pll_14nm_disable_seq(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+
+	DBG("");
+
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+}
+
+static void dsi_pll_14nm_save_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+	u32 data;
+
+	data = pll_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+
+	cached_state->n1postdiv = data & 0xf;
+	cached_state->n2postdiv = (data >> 4) & 0xf;
+
+	DBG("DSI%d PLL save state %x %x", pll_14nm->id,
+	    cached_state->n1postdiv, cached_state->n2postdiv);
+
+	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
+}
+
+static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+	u32 data;
+	int ret;
+
+	ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
+					cached_state->vco_rate, 0);
+	if (ret) {
+		DRM_DEV_ERROR(&pll_14nm->pdev->dev,
+			"restore vco rate failed. ret=%d\n", ret);
+		return ret;
+	}
+
+	data = cached_state->n1postdiv | (cached_state->n2postdiv << 4);
+
+	DBG("DSI%d PLL restore state %x %x", pll_14nm->id,
+	    cached_state->n1postdiv, cached_state->n2postdiv);
+
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+
+	/* also restore post-dividers for slave DSI PLL */
+	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
+		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+		void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;
+
+		pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+	}
+
+	return 0;
+}
+
+static int dsi_pll_14nm_set_usecase(struct msm_dsi_pll *pll,
+				    enum msm_dsi_phy_usecase uc)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	void __iomem *base = pll_14nm->mmio;
+	u32 clkbuflr_en, bandgap = 0;
+
+	switch (uc) {
+	case MSM_DSI_PHY_STANDALONE:
+		clkbuflr_en = 0x1;
+		break;
+	case MSM_DSI_PHY_MASTER:
+		clkbuflr_en = 0x3;
+		pll_14nm->slave = pll_14nm_list[(pll_14nm->id + 1) % DSI_MAX];
+		break;
+	case MSM_DSI_PHY_SLAVE:
+		clkbuflr_en = 0x0;
+		bandgap = 0x3;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	pll_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);
+	if (bandgap)
+		pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);
+
+	pll_14nm->uc = uc;
+
+	return 0;
+}
+
+static int dsi_pll_14nm_get_provider(struct msm_dsi_pll *pll,
+				     struct clk **byte_clk_provider,
+				     struct clk **pixel_clk_provider)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct clk_hw_onecell_data *hw_data = pll_14nm->hw_data;
+
+	if (byte_clk_provider)
+		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
+	if (pixel_clk_provider)
+		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+	return 0;
+}
+
+static void dsi_pll_14nm_destroy(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct platform_device *pdev = pll_14nm->pdev;
+	int num_hws = pll_14nm->num_hws;
+
+	of_clk_del_provider(pdev->dev.of_node);
+
+	while (num_hws--)
+		clk_hw_unregister(pll_14nm->hws[num_hws]);
+}
+
+static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
+						const char *name,
+						const char *parent_name,
+						unsigned long flags,
+						u8 shift)
+{
+	struct dsi_pll_14nm_postdiv *pll_postdiv;
+	struct device *dev = &pll_14nm->pdev->dev;
+	struct clk_init_data postdiv_init = {
+		.parent_names = (const char *[]) { parent_name },
+		.num_parents = 1,
+		.name = name,
+		.flags = flags,
+		.ops = &clk_ops_dsi_pll_14nm_postdiv,
+	};
+	int ret;
+
+	pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL);
+	if (!pll_postdiv)
+		return ERR_PTR(-ENOMEM);
+
+	pll_postdiv->pll = pll_14nm;
+	pll_postdiv->shift = shift;
+	/* both N1 and N2 postdividers are 4 bits wide */
+	pll_postdiv->width = 4;
+	/* range of each divider is from 1 to 15 */
+	pll_postdiv->flags = CLK_DIVIDER_ONE_BASED;
+	pll_postdiv->hw.init = &postdiv_init;
+
+	ret = clk_hw_register(dev, &pll_postdiv->hw);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return &pll_postdiv->hw;
+}
+
+static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)
+{
+	char clk_name[32], parent[32], vco_name[32];
+	struct clk_init_data vco_init = {
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.name = vco_name,
+		.flags = CLK_IGNORE_UNUSED,
+		.ops = &clk_ops_dsi_pll_14nm_vco,
+	};
+	struct device *dev = &pll_14nm->pdev->dev;
+	struct clk_hw **hws = pll_14nm->hws;
+	struct clk_hw_onecell_data *hw_data;
+	struct clk_hw *hw;
+	int num = 0;
+	int ret;
+
+	DBG("DSI%d", pll_14nm->id);
+
+	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
+			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
+			       GFP_KERNEL);
+	if (!hw_data)
+		return -ENOMEM;
+
+	snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->id);
+	pll_14nm->base.clk_hw.init = &vco_init;
+
+	ret = clk_hw_register(dev, &pll_14nm->base.clk_hw);
+	if (ret)
+		return ret;
+
+	hws[num++] = &pll_14nm->base.clk_hw;
+
+	snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
+	snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->id);
+
+	/* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
+	hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,
+				       CLK_SET_RATE_PARENT, 0);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
+	hws[num++] = hw;
+
+	snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->id);
+	snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
+
+	/* DSI Byte clock = VCO_CLK / N1 / 8 */
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+					  CLK_SET_RATE_PARENT, 1, 8);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
+	hws[num++] = hw;
+	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
+
+	snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);
+	snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
+
+	/*
+	 * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
+	 * on the way. Don't let it set parent.
+	 */
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
+	hws[num++] = hw;
+
+	snprintf(clk_name, 32, "dsi%dpll", pll_14nm->id);
+	snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);
+
+	/* DSI pixel clock = VCO_CLK / N1 / 2 / N2
+	 * This is the output of N2 post-divider, bits 4-7 in
+	 * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
+	 */
+	hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
+	hws[num++] = hw;
+	hw_data->hws[DSI_PIXEL_PLL_CLK]	= hw;
+
+	pll_14nm->num_hws = num;
+
+	hw_data->num = NUM_PROVIDED_CLKS;
+	pll_14nm->hw_data = hw_data;
+
+	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+				     pll_14nm->hw_data);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
+{
+	struct dsi_pll_14nm *pll_14nm;
+	struct msm_dsi_pll *pll;
+	int ret;
+
+	if (!pdev)
+		return ERR_PTR(-ENODEV);
+
+	pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL);
+	if (!pll_14nm)
+		return ERR_PTR(-ENOMEM);
+
+	DBG("PLL%d", id);
+
+	pll_14nm->pdev = pdev;
+	pll_14nm->id = id;
+	pll_14nm_list[id] = pll_14nm;
+
+	pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+	if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+	if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spin_lock_init(&pll_14nm->postdiv_lock);
+
+	pll = &pll_14nm->base;
+	pll->min_rate = VCO_MIN_RATE;
+	pll->max_rate = VCO_MAX_RATE;
+	pll->get_provider = dsi_pll_14nm_get_provider;
+	pll->destroy = dsi_pll_14nm_destroy;
+	pll->disable_seq = dsi_pll_14nm_disable_seq;
+	pll->save_state = dsi_pll_14nm_save_state;
+	pll->restore_state = dsi_pll_14nm_restore_state;
+	pll->set_usecase = dsi_pll_14nm_set_usecase;
+
+	pll_14nm->vco_delay = 1;
+
+	pll->en_seq_cnt = 1;
+	pll->enable_seqs[0] = dsi_pll_14nm_enable_seq;
+
+	ret = pll_14nm_register(pll_14nm);
+	if (ret) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	return pll;
+}
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
new file mode 100644
index 0000000..8c99e01
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 28nm - clock diagram (eg: DSI0):
+ *
+ *         dsi0analog_postdiv_clk
+ *                             |         dsi0indirect_path_div2_clk
+ *                             |          |
+ *                   +------+  |  +----+  |  |\   dsi0byte_mux
+ *  dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \   |
+ *                |  +------+     +----+     | m|  |  +----+
+ *                |                          | u|--o--| /4 |-- dsi0pllbyte
+ *                |                          | x|     +----+
+ *                o--------------------------| /
+ *                |                          |/
+ *                |          +------+
+ *                o----------| DIV3 |------------------------- dsi0pll
+ *                           +------+
+ */
+
+#define POLL_MAX_READS			10
+#define POLL_TIMEOUT_US		50
+
+#define NUM_PROVIDED_CLKS		2
+
+#define VCO_REF_CLK_RATE		19200000
+#define VCO_MIN_RATE			350000000
+#define VCO_MAX_RATE			750000000
+
+#define DSI_BYTE_PLL_CLK		0
+#define DSI_PIXEL_PLL_CLK		1
+
+#define LPFR_LUT_SIZE			10
+struct lpfr_cfg {
+	unsigned long vco_rate;
+	u32 resistance;
+};
+
+/* Loop filter resistance: */
+static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
+	{ 479500000,  8 },
+	{ 480000000, 11 },
+	{ 575500000,  8 },
+	{ 576000000, 12 },
+	{ 610500000,  8 },
+	{ 659500000,  9 },
+	{ 671500000, 10 },
+	{ 672000000, 14 },
+	{ 708500000, 10 },
+	{ 750000000, 11 },
+};
+
+struct pll_28nm_cached_state {
+	unsigned long vco_rate;
+	u8 postdiv3;
+	u8 postdiv1;
+	u8 byte_mux;
+};
+
+struct dsi_pll_28nm {
+	struct msm_dsi_pll base;
+
+	int id;
+	struct platform_device *pdev;
+	void __iomem *mmio;
+
+	int vco_delay;
+
+	/* private clocks: */
+	struct clk *clks[NUM_DSI_CLOCKS_MAX];
+	u32 num_clks;
+
+	/* clock-provider: */
+	struct clk *provided_clks[NUM_PROVIDED_CLKS];
+	struct clk_onecell_data clk_data;
+
+	struct pll_28nm_cached_state cached_state;
+};
+
+#define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, base)
+
+static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
+				u32 nb_tries, u32 timeout_us)
+{
+	bool pll_locked = false;
+	u32 val;
+
+	while (nb_tries--) {
+		val = pll_read(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_STATUS);
+		pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
+
+		if (pll_locked)
+			break;
+
+		udelay(timeout_us);
+	}
+	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+	return pll_locked;
+}
+
+static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
+{
+	void __iomem *base = pll_28nm->mmio;
+
+	/*
+	 * Add HW recommended delays after toggling the software
+	 * reset bit off and back on.
+	 */
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
+			DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
+}
+
+/*
+ * Clock Callbacks
+ */
+static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct device *dev = &pll_28nm->pdev->dev;
+	void __iomem *base = pll_28nm->mmio;
+	unsigned long div_fbx1000, gen_vco_clk;
+	u32 refclk_cfg, frac_n_mode, frac_n_value;
+	u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
+	u32 cal_cfg10, cal_cfg11;
+	u32 rem;
+	int i;
+
+	VERB("rate=%lu, parent's=%lu", rate, parent_rate);
+
+	/* Force postdiv2 to be div-4 */
+	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
+
+	/* Configure the Loop filter resistance */
+	for (i = 0; i < LPFR_LUT_SIZE; i++)
+		if (rate <= lpfr_lut[i].vco_rate)
+			break;
+	if (i == LPFR_LUT_SIZE) {
+		DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
+				rate);
+		return -EINVAL;
+	}
+	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
+
+	/* Loop filter capacitance values : c1 and c2 */
+	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
+
+	rem = rate % VCO_REF_CLK_RATE;
+	if (rem) {
+		refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
+		frac_n_mode = 1;
+		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
+		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
+	} else {
+		refclk_cfg = 0x0;
+		frac_n_mode = 0;
+		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
+		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
+	}
+
+	DBG("refclk_cfg = %d", refclk_cfg);
+
+	rem = div_fbx1000 % 1000;
+	frac_n_value = (rem << 16) / 1000;
+
+	DBG("div_fb = %lu", div_fbx1000);
+	DBG("frac_n_value = %d", frac_n_value);
+
+	DBG("Generated VCO Clock: %lu", gen_vco_clk);
+	rem = 0;
+	sdm_cfg1 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
+	sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
+	if (frac_n_mode) {
+		sdm_cfg0 = 0x0;
+		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
+		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
+				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
+		sdm_cfg3 = frac_n_value >> 8;
+		sdm_cfg2 = frac_n_value & 0xff;
+	} else {
+		sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
+		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
+				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
+		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
+		sdm_cfg2 = 0;
+		sdm_cfg3 = 0;
+	}
+
+	DBG("sdm_cfg0=%d", sdm_cfg0);
+	DBG("sdm_cfg1=%d", sdm_cfg1);
+	DBG("sdm_cfg2=%d", sdm_cfg2);
+	DBG("sdm_cfg3=%d", sdm_cfg3);
+
+	cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
+	cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
+	DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
+
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3,    0x2b);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4,    0x06);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,  0x0d);
+
+	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
+		DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
+	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
+		DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
+	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
+
+	/* Add hardware recommended delay for correct PLL configuration */
+	if (pll_28nm->vco_delay)
+		udelay(pll_28nm->vco_delay);
+
+	pll_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0,   sdm_cfg0);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0,   0x12);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6,   0x30);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7,   0x00);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8,   0x60);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9,   0x00);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10,  cal_cfg10 & 0xff);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11,  cal_cfg11 & 0xff);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG,  0x20);
+
+	return 0;
+}
+
+static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
+					POLL_TIMEOUT_US);
+}
+
+static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	void __iomem *base = pll_28nm->mmio;
+	u32 sdm0, doubler, sdm_byp_div;
+	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
+	u32 ref_clk = VCO_REF_CLK_RATE;
+	unsigned long vco_rate;
+
+	VERB("parent_rate=%lu", parent_rate);
+
+	/* Check to see if the ref clk doubler is enabled */
+	doubler = pll_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
+			DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
+	ref_clk += (doubler * VCO_REF_CLK_RATE);
+
+	/* see if it is integer mode or sdm mode */
+	sdm0 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
+	if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
+		/* integer mode */
+		sdm_byp_div = FIELD(
+				pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
+				DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
+		vco_rate = ref_clk * sdm_byp_div;
+	} else {
+		/* sdm mode */
+		sdm_dc_off = FIELD(
+				pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
+				DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
+		DBG("sdm_dc_off = %d", sdm_dc_off);
+		sdm2 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
+				DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
+		sdm3 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
+				DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
+		sdm_freq_seed = (sdm3 << 8) | sdm2;
+		DBG("sdm_freq_seed = %d", sdm_freq_seed);
+
+		vco_rate = (ref_clk * (sdm_dc_off + 1)) +
+			mult_frac(ref_clk, sdm_freq_seed, BIT(16));
+		DBG("vco rate = %lu", vco_rate);
+	}
+
+	DBG("returning vco rate = %lu", vco_rate);
+
+	return vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
+	.round_rate = msm_dsi_pll_helper_clk_round_rate,
+	.set_rate = dsi_pll_28nm_clk_set_rate,
+	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+	.prepare = msm_dsi_pll_helper_clk_prepare,
+	.unprepare = msm_dsi_pll_helper_clk_unprepare,
+	.is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+/*
+ * PLL Callbacks
+ */
+static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct device *dev = &pll_28nm->pdev->dev;
+	void __iomem *base = pll_28nm->mmio;
+	u32 max_reads = 5, timeout_us = 100;
+	bool locked;
+	u32 val;
+	int i;
+
+	DBG("id=%d", pll_28nm->id);
+
+	pll_28nm_software_reset(pll_28nm);
+
+	/*
+	 * PLL power up sequence.
+	 * Add necessary delays recommended by hardware.
+	 */
+	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
+
+	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
+
+	for (i = 0; i < 2; i++) {
+		/* DSI Uniphy lock detect setting */
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
+				0x0c, 100);
+		pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
+
+		/* poll for PLL ready status */
+		locked = pll_28nm_poll_for_ready(pll_28nm,
+						max_reads, timeout_us);
+		if (locked)
+			break;
+
+		pll_28nm_software_reset(pll_28nm);
+
+		/*
+		 * PLL power up sequence.
+		 * Add necessary delays recommended by hardware.
+		 */
+		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
+
+		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
+
+		val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
+	}
+
+	if (unlikely(!locked))
+		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+	else
+		DBG("DSI PLL Lock success");
+
+	return locked ? 0 : -EINVAL;
+}
+
+static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct device *dev = &pll_28nm->pdev->dev;
+	void __iomem *base = pll_28nm->mmio;
+	bool locked;
+	u32 max_reads = 10, timeout_us = 50;
+	u32 val;
+
+	DBG("id=%d", pll_28nm->id);
+
+	pll_28nm_software_reset(pll_28nm);
+
+	/*
+	 * PLL power up sequence.
+	 * Add necessary delays recommended by hardware.
+	 */
+	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
+
+	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
+		DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+	/* DSI PLL toggle lock detect setting */
+	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
+
+	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
+
+	if (unlikely(!locked))
+		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+	else
+		DBG("DSI PLL lock success");
+
+	return locked ? 0 : -EINVAL;
+}
+
+static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	DBG("id=%d", pll_28nm->id);
+	pll_write(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
+}
+
+static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+	void __iomem *base = pll_28nm->mmio;
+
+	cached_state->postdiv3 =
+			pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
+	cached_state->postdiv1 =
+			pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
+	cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
+	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
+}
+
+static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+	void __iomem *base = pll_28nm->mmio;
+	int ret;
+
+	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
+					cached_state->vco_rate, 0);
+	if (ret) {
+		DRM_DEV_ERROR(&pll_28nm->pdev->dev,
+			"restore vco rate failed. ret=%d\n", ret);
+		return ret;
+	}
+
+	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+			cached_state->postdiv3);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+			cached_state->postdiv1);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
+			cached_state->byte_mux);
+
+	return 0;
+}
+
+static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
+				struct clk **byte_clk_provider,
+				struct clk **pixel_clk_provider)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	if (byte_clk_provider)
+		*byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
+	if (pixel_clk_provider)
+		*pixel_clk_provider =
+				pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
+
+	return 0;
+}
+
+static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	int i;
+
+	msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
+					pll_28nm->clks, pll_28nm->num_clks);
+
+	for (i = 0; i < NUM_PROVIDED_CLKS; i++)
+		pll_28nm->provided_clks[i] = NULL;
+
+	pll_28nm->num_clks = 0;
+	pll_28nm->clk_data.clks = NULL;
+	pll_28nm->clk_data.clk_num = 0;
+}
+
+static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
+{
+	char clk_name[32], parent1[32], parent2[32], vco_name[32];
+	struct clk_init_data vco_init = {
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.name = vco_name,
+		.flags = CLK_IGNORE_UNUSED,
+		.ops = &clk_ops_dsi_pll_28nm_vco,
+	};
+	struct device *dev = &pll_28nm->pdev->dev;
+	struct clk **clks = pll_28nm->clks;
+	struct clk **provided_clks = pll_28nm->provided_clks;
+	int num = 0;
+	int ret;
+
+	DBG("%d", pll_28nm->id);
+
+	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
+	pll_28nm->base.clk_hw.init = &vco_init;
+	clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
+
+	snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
+	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
+	clks[num++] = clk_register_divider(dev, clk_name,
+			parent1, CLK_SET_RATE_PARENT,
+			pll_28nm->mmio +
+			REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+			0, 4, 0, NULL);
+
+	snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
+	snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
+	clks[num++] = clk_register_fixed_factor(dev, clk_name,
+			parent1, CLK_SET_RATE_PARENT,
+			1, 2);
+
+	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
+	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
+	clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
+			clk_register_divider(dev, clk_name,
+				parent1, 0, pll_28nm->mmio +
+				REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+				0, 8, 0, NULL);
+
+	snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->id);
+	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
+	snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
+	clks[num++] = clk_register_mux(dev, clk_name,
+			(const char *[]){
+				parent1, parent2
+			}, 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
+			REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
+
+	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
+	snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->id);
+	clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
+			clk_register_fixed_factor(dev, clk_name,
+				parent1, CLK_SET_RATE_PARENT, 1, 4);
+
+	pll_28nm->num_clks = num;
+
+	pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
+	pll_28nm->clk_data.clks = provided_clks;
+
+	ret = of_clk_add_provider(dev->of_node,
+			of_clk_src_onecell_get, &pll_28nm->clk_data);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
+					enum msm_dsi_phy_type type, int id)
+{
+	struct dsi_pll_28nm *pll_28nm;
+	struct msm_dsi_pll *pll;
+	int ret;
+
+	if (!pdev)
+		return ERR_PTR(-ENODEV);
+
+	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
+	if (!pll_28nm)
+		return ERR_PTR(-ENOMEM);
+
+	pll_28nm->pdev = pdev;
+	pll_28nm->id = id;
+
+	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pll = &pll_28nm->base;
+	pll->min_rate = VCO_MIN_RATE;
+	pll->max_rate = VCO_MAX_RATE;
+	pll->get_provider = dsi_pll_28nm_get_provider;
+	pll->destroy = dsi_pll_28nm_destroy;
+	pll->disable_seq = dsi_pll_28nm_disable_seq;
+	pll->save_state = dsi_pll_28nm_save_state;
+	pll->restore_state = dsi_pll_28nm_restore_state;
+
+	if (type == MSM_DSI_PHY_28NM_HPM) {
+		pll_28nm->vco_delay = 1;
+
+		pll->en_seq_cnt = 3;
+		pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_hpm;
+		pll->enable_seqs[1] = dsi_pll_28nm_enable_seq_hpm;
+		pll->enable_seqs[2] = dsi_pll_28nm_enable_seq_hpm;
+	} else if (type == MSM_DSI_PHY_28NM_LP) {
+		pll_28nm->vco_delay = 1000;
+
+		pll->en_seq_cnt = 1;
+		pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
+	} else {
+		DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);
+		return ERR_PTR(-EINVAL);
+	}
+
+	ret = pll_28nm_register(pll_28nm);
+	if (ret) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	return pll;
+}
+
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
new file mode 100644
index 0000000..a6e7a25
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
+ *
+ *
+ *                        +------+
+ *  dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
+ *  F * byte_clk    |     +------+
+ *                  | bit clock divider (F / 8)
+ *                  |
+ *                  |     +------+
+ *                  o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
+ *                  |     +------+                 | (sets parent rate)
+ *                  | byte clock divider (F)       |
+ *                  |                              |
+ *                  |                              o---> To esc RCG
+ *                  |                                (doesn't set parent rate)
+ *                  |
+ *                  |     +------+
+ *                  o-----| DIV3 |----dsi0pll------o---> To dsi RCG
+ *                        +------+                 | (sets parent rate)
+ *                  dsi clock divider (F * magic)  |
+ *                                                 |
+ *                                                 o---> To pixel rcg
+ *                                                  (doesn't set parent rate)
+ */
+
+#define POLL_MAX_READS		8000
+#define POLL_TIMEOUT_US		1
+
+#define NUM_PROVIDED_CLKS	2
+
+#define VCO_REF_CLK_RATE	27000000
+#define VCO_MIN_RATE		600000000
+#define VCO_MAX_RATE		1200000000
+
+#define DSI_BYTE_PLL_CLK	0
+#define DSI_PIXEL_PLL_CLK	1
+
+#define VCO_PREF_DIV_RATIO	27
+
+struct pll_28nm_cached_state {
+	unsigned long vco_rate;
+	u8 postdiv3;
+	u8 postdiv2;
+	u8 postdiv1;
+};
+
+struct clk_bytediv {
+	struct clk_hw hw;
+	void __iomem *reg;
+};
+
+struct dsi_pll_28nm {
+	struct msm_dsi_pll base;
+
+	int id;
+	struct platform_device *pdev;
+	void __iomem *mmio;
+
+	/* custom byte clock divider */
+	struct clk_bytediv *bytediv;
+
+	/* private clocks: */
+	struct clk *clks[NUM_DSI_CLOCKS_MAX];
+	u32 num_clks;
+
+	/* clock-provider: */
+	struct clk *provided_clks[NUM_PROVIDED_CLKS];
+	struct clk_onecell_data clk_data;
+
+	struct pll_28nm_cached_state cached_state;
+};
+
+#define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, base)
+
+static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
+				    int nb_tries, int timeout_us)
+{
+	bool pll_locked = false;
+	u32 val;
+
+	while (nb_tries--) {
+		val = pll_read(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_RDY);
+		pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
+
+		if (pll_locked)
+			break;
+
+		udelay(timeout_us);
+	}
+	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+	return pll_locked;
+}
+
+/*
+ * Clock Callbacks
+ */
+static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+				     unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	void __iomem *base = pll_28nm->mmio;
+	u32 val, temp, fb_divider;
+
+	DBG("rate=%lu, parent's=%lu", rate, parent_rate);
+
+	temp = rate / 10;
+	val = VCO_REF_CLK_RATE / 10;
+	fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
+	fb_divider = fb_divider / 2 - 1;
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
+			fb_divider & 0xff);
+
+	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
+
+	val |= (fb_divider >> 8) & 0x07;
+
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
+			val);
+
+	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
+
+	val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
+
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
+			val);
+
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
+			0xf);
+
+	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+	val |= 0x7 << 4;
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
+			val);
+
+	return 0;
+}
+
+static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
+					POLL_TIMEOUT_US);
+}
+
+static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
+						  unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	void __iomem *base = pll_28nm->mmio;
+	unsigned long vco_rate;
+	u32 status, fb_divider, temp, ref_divider;
+
+	VERB("parent_rate=%lu", parent_rate);
+
+	status = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
+
+	if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
+		fb_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
+		fb_divider &= 0xff;
+		temp = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
+		fb_divider = (temp << 8) | fb_divider;
+		fb_divider += 1;
+
+		ref_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
+		ref_divider &= 0x3f;
+		ref_divider += 1;
+
+		/* multiply by 2 */
+		vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
+	} else {
+		vco_rate = 0;
+	}
+
+	DBG("returning vco rate = %lu", vco_rate);
+
+	return vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
+	.round_rate = msm_dsi_pll_helper_clk_round_rate,
+	.set_rate = dsi_pll_28nm_clk_set_rate,
+	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+	.prepare = msm_dsi_pll_helper_clk_prepare,
+	.unprepare = msm_dsi_pll_helper_clk_unprepare,
+	.is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+/*
+ * Custom byte clock divier clk_ops
+ *
+ * This clock is the entry point to configuring the PLL. The user (dsi host)
+ * will set this clock's rate to the desired byte clock rate. The VCO lock
+ * frequency is a multiple of the byte clock rate. The multiplication factor
+ * (shown as F in the diagram above) is a function of the byte clock rate.
+ *
+ * This custom divider clock ensures that its parent (VCO) is set to the
+ * desired rate, and that the byte clock postdivider (POSTDIV2) is configured
+ * accordingly
+ */
+#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
+
+static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct clk_bytediv *bytediv = to_clk_bytediv(hw);
+	unsigned int div;
+
+	div = pll_read(bytediv->reg) & 0xff;
+
+	return parent_rate / (div + 1);
+}
+
+/* find multiplication factor(wrt byte clock) at which the VCO should be set */
+static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
+{
+	unsigned long bit_mhz;
+
+	/* convert to bit clock in Mhz */
+	bit_mhz = (byte_clk_rate * 8) / 1000000;
+
+	if (bit_mhz < 125)
+		return 64;
+	else if (bit_mhz < 250)
+		return 32;
+	else if (bit_mhz < 600)
+		return 16;
+	else
+		return 8;
+}
+
+static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
+				   unsigned long *prate)
+{
+	unsigned long best_parent;
+	unsigned int factor;
+
+	factor = get_vco_mul_factor(rate);
+
+	best_parent = rate * factor;
+	*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+
+	return *prate / factor;
+}
+
+static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate)
+{
+	struct clk_bytediv *bytediv = to_clk_bytediv(hw);
+	u32 val;
+	unsigned int factor;
+
+	factor = get_vco_mul_factor(rate);
+
+	val = pll_read(bytediv->reg);
+	val |= (factor - 1) & 0xff;
+	pll_write(bytediv->reg, val);
+
+	return 0;
+}
+
+/* Our special byte clock divider ops */
+static const struct clk_ops clk_bytediv_ops = {
+	.round_rate = clk_bytediv_round_rate,
+	.set_rate = clk_bytediv_set_rate,
+	.recalc_rate = clk_bytediv_recalc_rate,
+};
+
+/*
+ * PLL Callbacks
+ */
+static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct device *dev = &pll_28nm->pdev->dev;
+	void __iomem *base = pll_28nm->mmio;
+	bool locked;
+	unsigned int bit_div, byte_div;
+	int max_reads = 1000, timeout_us = 100;
+	u32 val;
+
+	DBG("id=%d", pll_28nm->id);
+
+	/*
+	 * before enabling the PLL, configure the bit clock divider since we
+	 * don't expose it as a clock to the outside world
+	 * 1: read back the byte clock divider that should already be set
+	 * 2: divide by 8 to get bit clock divider
+	 * 3: write it to POSTDIV1
+	 */
+	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
+	byte_div = val + 1;
+	bit_div = byte_div / 8;
+
+	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+	val &= ~0xf;
+	val |= (bit_div - 1);
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
+
+	/* enable the PLL */
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
+			DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
+
+	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
+
+	if (unlikely(!locked))
+		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+	else
+		DBG("DSI PLL lock success");
+
+	return locked ? 0 : -EINVAL;
+}
+
+static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	DBG("id=%d", pll_28nm->id);
+	pll_write(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
+}
+
+static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+	void __iomem *base = pll_28nm->mmio;
+
+	cached_state->postdiv3 =
+			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
+	cached_state->postdiv2 =
+			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
+	cached_state->postdiv1 =
+			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+
+	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
+}
+
+static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+	void __iomem *base = pll_28nm->mmio;
+	int ret;
+
+	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
+					cached_state->vco_rate, 0);
+	if (ret) {
+		DRM_DEV_ERROR(&pll_28nm->pdev->dev,
+			"restore vco rate failed. ret=%d\n", ret);
+		return ret;
+	}
+
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
+			cached_state->postdiv3);
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
+			cached_state->postdiv2);
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
+			cached_state->postdiv1);
+
+	return 0;
+}
+
+static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
+				struct clk **byte_clk_provider,
+				struct clk **pixel_clk_provider)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	if (byte_clk_provider)
+		*byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
+	if (pixel_clk_provider)
+		*pixel_clk_provider =
+				pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
+
+	return 0;
+}
+
+static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
+					pll_28nm->clks, pll_28nm->num_clks);
+}
+
+static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
+{
+	char *clk_name, *parent_name, *vco_name;
+	struct clk_init_data vco_init = {
+		.parent_names = (const char *[]){ "pxo" },
+		.num_parents = 1,
+		.flags = CLK_IGNORE_UNUSED,
+		.ops = &clk_ops_dsi_pll_28nm_vco,
+	};
+	struct device *dev = &pll_28nm->pdev->dev;
+	struct clk **clks = pll_28nm->clks;
+	struct clk **provided_clks = pll_28nm->provided_clks;
+	struct clk_bytediv *bytediv;
+	struct clk_init_data bytediv_init = { };
+	int ret, num = 0;
+
+	DBG("%d", pll_28nm->id);
+
+	bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
+	if (!bytediv)
+		return -ENOMEM;
+
+	vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+	if (!vco_name)
+		return -ENOMEM;
+
+	parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+	if (!parent_name)
+		return -ENOMEM;
+
+	clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+	if (!clk_name)
+		return -ENOMEM;
+
+	pll_28nm->bytediv = bytediv;
+
+	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
+	vco_init.name = vco_name;
+
+	pll_28nm->base.clk_hw.init = &vco_init;
+
+	clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
+
+	/* prepare and register bytediv */
+	bytediv->hw.init = &bytediv_init;
+	bytediv->reg = pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
+
+	snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->id);
+	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
+
+	bytediv_init.name = clk_name;
+	bytediv_init.ops = &clk_bytediv_ops;
+	bytediv_init.flags = CLK_SET_RATE_PARENT;
+	bytediv_init.parent_names = (const char * const *) &parent_name;
+	bytediv_init.num_parents = 1;
+
+	/* DIV2 */
+	clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
+			clk_register(dev, &bytediv->hw);
+
+	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
+	/* DIV3 */
+	clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
+			clk_register_divider(dev, clk_name,
+				parent_name, 0, pll_28nm->mmio +
+				REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
+				0, 8, 0, NULL);
+
+	pll_28nm->num_clks = num;
+
+	pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
+	pll_28nm->clk_data.clks = provided_clks;
+
+	ret = of_clk_add_provider(dev->of_node,
+			of_clk_src_onecell_get, &pll_28nm->clk_data);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
+					       int id)
+{
+	struct dsi_pll_28nm *pll_28nm;
+	struct msm_dsi_pll *pll;
+	int ret;
+
+	if (!pdev)
+		return ERR_PTR(-ENODEV);
+
+	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
+	if (!pll_28nm)
+		return ERR_PTR(-ENOMEM);
+
+	pll_28nm->pdev = pdev;
+	pll_28nm->id = id + 1;
+
+	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pll = &pll_28nm->base;
+	pll->min_rate = VCO_MIN_RATE;
+	pll->max_rate = VCO_MAX_RATE;
+	pll->get_provider = dsi_pll_28nm_get_provider;
+	pll->destroy = dsi_pll_28nm_destroy;
+	pll->disable_seq = dsi_pll_28nm_disable_seq;
+	pll->save_state = dsi_pll_28nm_save_state;
+	pll->restore_state = dsi_pll_28nm_restore_state;
+
+	pll->en_seq_cnt = 1;
+	pll->enable_seqs[0] = dsi_pll_28nm_enable_seq;
+
+	ret = pll_28nm_register(pll_28nm);
+	if (ret) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	return pll;
+}
diff --git a/marvell/linux/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/marvell/linux/drivers/gpu/drm/msm/dsi/sfpb.xml.h
new file mode 100644
index 0000000..07c48dd
--- /dev/null
+++ b/marvell/linux/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -0,0 +1,63 @@
+#ifndef SFPB_XML
+#define SFPB_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum sfpb_ahb_arb_master_port_en {
+	SFPB_MASTER_PORT_ENABLE = 3,
+	SFPB_MASTER_PORT_DISABLE = 0,
+};
+
+#define REG_SFPB_GPREG						0x00000058
+#define SFPB_GPREG_MASTER_PORT_EN__MASK				0x00001800
+#define SFPB_GPREG_MASTER_PORT_EN__SHIFT			11
+static inline uint32_t SFPB_GPREG_MASTER_PORT_EN(enum sfpb_ahb_arb_master_port_en val)
+{
+	return ((val) << SFPB_GPREG_MASTER_PORT_EN__SHIFT) & SFPB_GPREG_MASTER_PORT_EN__MASK;
+}
+
+
+#endif /* SFPB_XML */