[Feature] add GA346 baseline version
Change-Id: Ic62933698569507dcf98240cdf5d9931ae34348f
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/Kconfig b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/Kconfig
new file mode 100644
index 0000000..4218c0b
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/Kconfig
@@ -0,0 +1,34 @@
+config INTERCONNECT_MTK
+ bool "Mediatek Network-on-Chip interconnect drivers"
+ depends on ARCH_MEDIATEK
+ help
+ Support for Mediatek's Network-on-Chip interconnect hardware.
+
+config INTERCONNECT_MTK_EMI
+ tristate "Mediatek EMI interconnect driver"
+ depends on INTERCONNECT_MTK
+ depends on (MTK_DVFSRC && OF)
+ help
+ This is a driver for the Mediatek Network-on-Chip
+ with DVFSRC-based platforms.
+ The Mediatek EMI(external memory interface) Interconnect driver
+ will aggregate require EMI BW of users to DVFSRC(dynamic voltage
+ and frequency scaling resource collector). The DVFSRC will adjust
+ appropriate DRAM frequency to fulfill this bandwidth request.
+
+config INTERCONNECT_MTK_MMQOS_COMMON
+ tristate "Mediatek MMQoS support"
+ help
+ Support for multimedia QoS in Mediatek's SoCs.
+ The Mediatek MMQoS(Multimedia Quality of Service) Interconnect
+ driver will collect BW requests from MM users, such as display,
+ camera, mdp and video codec, and configure SMI settings dynamically
+ according to the aggregated BW.
+
+config INTERCONNECT_MTK_MMQOS_MT6779
+ tristate "Mediatek MT6779 MMQoS interconnect driver"
+ depends on INTERCONNECT_MTK
+ depends on MTK_SMI
+ select INTERCONNECT_MTK_MMQOS_COMMON
+ help
+ Support MT6779 multimedia QoS.
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/Makefile b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/Makefile
new file mode 100644
index 0000000..f5eec6a
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_INTERCONNECT_MTK_EMI) += mtk-dvfsrc-emi.o
+obj-$(CONFIG_INTERCONNECT_MTK_MMQOS_COMMON) += mmqos-mtk.o mmqos-hrt.o
+obj-$(CONFIG_INTERCONNECT_MTK_MMQOS_MT6779) += mmqos-mt6779.o
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-hrt.c b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-hrt.c
new file mode 100644
index 0000000..98aec4b
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-hrt.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Anthony Huang <anthony.huang@mediatek.com>
+ */
+
+#include <linux/module.h>
+#include "mmqos-mtk.h"
+
+#define MULTIPLY_W_DRAM_WEIGHT(value) ((value)*6/5)
+
+struct mmqos_hrt *mmqos_hrt;
+
+s32 mtk_mmqos_get_avail_hrt_bw(enum hrt_type type)
+{
+ u32 i, used_bw = 0;
+
+ if (!mmqos_hrt)
+ return 0xFFFF;
+
+ for (i = 0; i < HRT_TYPE_NUM; i++) {
+ if (mmqos_hrt->hrt_bw[i] != type)
+ used_bw += mmqos_hrt->hrt_bw[i];
+ }
+
+ if (mmqos_hrt->cam_max_bw)
+ used_bw = used_bw - mmqos_hrt->hrt_bw[HRT_CAM]
+ + mmqos_hrt->cam_max_bw;
+
+ return (mmqos_hrt->hrt_total_bw - used_bw);
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_get_avail_hrt_bw);
+
+
+s32 mtk_mmqos_register_bw_throttle_notifier(struct notifier_block *nb)
+{
+ if (!nb || !mmqos_hrt)
+ return -EINVAL;
+ return blocking_notifier_chain_register(
+ &mmqos_hrt->hrt_bw_throttle_notifier,
+ nb);
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_register_bw_throttle_notifier);
+
+s32 mtk_mmqos_unregister_bw_throttle_notifier(struct notifier_block *nb)
+{
+ if (!nb || !mmqos_hrt)
+ return -EINVAL;
+ return blocking_notifier_chain_unregister(
+ &mmqos_hrt->hrt_bw_throttle_notifier,
+ nb);
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_unregister_bw_throttle_notifier);
+
+void mtk_mmqos_wait_throttle_done(void)
+{
+ u32 wait_result;
+
+ if (!mmqos_hrt)
+ return;
+
+ if (atomic_read(&mmqos_hrt->lock_count) > 0) {
+ pr_notice("begin to blocking for cam_max_bw=%d\n",
+ mmqos_hrt->cam_max_bw);
+ wait_result = wait_event_timeout(mmqos_hrt->hrt_wait,
+ atomic_read(&mmqos_hrt->lock_count) == 0,
+ msecs_to_jiffies(200));
+ pr_notice("blocking wait_result=%d\n", wait_result);
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_wait_throttle_done);
+
+s32 mtk_mmqos_set_hrt_bw(enum hrt_type type, u32 bw)
+{
+ if (type >= HRT_TYPE_NUM) {
+ pr_notice("%s: wrong type:%d\n", __func__, type);
+ return -EINVAL;
+ }
+
+ if (!mmqos_hrt)
+ return -EINVAL;
+
+ mmqos_hrt->hrt_bw[type] = bw;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_set_hrt_bw);
+
+static void notify_bw_throttle(u32 bw)
+{
+ u64 start_jiffies = jiffies;
+
+ blocking_notifier_call_chain(&mmqos_hrt->hrt_bw_throttle_notifier,
+ (bw > 0)?BW_THROTTLE_START:BW_THROTTLE_END, NULL);
+
+ pr_notice("%s: notify_time=%u\n", __func__,
+ jiffies_to_msecs(jiffies-start_jiffies));
+}
+
+static void set_camera_max_bw(u32 bw)
+{
+ mmqos_hrt->cam_max_bw = bw;
+ pr_notice("%s: %d\n", __func__, bw);
+
+ if (mmqos_hrt->blocking) {
+ atomic_inc(&mmqos_hrt->lock_count);
+ pr_notice("%s: increase lock_count=%d\n", __func__,
+ atomic_read(&mmqos_hrt->lock_count));
+ }
+ notify_bw_throttle(bw);
+
+ if (mmqos_hrt->blocking) {
+ atomic_dec(&mmqos_hrt->lock_count);
+ wake_up(&mmqos_hrt->hrt_wait);
+ pr_notice("%s: decrease lock_count=%d\n", __func__,
+ atomic_read(&mmqos_hrt->lock_count));
+ }
+}
+
+static void delay_work_handler(struct work_struct *work)
+{
+ mutex_lock(&mmqos_hrt->blocking_lock);
+ set_camera_max_bw(mmqos_hrt->cam_occu_bw);
+ mutex_unlock(&mmqos_hrt->blocking_lock);
+}
+
+static ssize_t camera_max_bw_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ s32 ret;
+ u32 bw = 0;
+
+ ret = kstrtoint(buf, 10, &bw);
+ if (ret) {
+ dev_notice(dev, "wrong camera max bw string:%d\n", ret);
+ return ret;
+ }
+
+ cancel_delayed_work_sync(&mmqos_hrt->work);
+ mmqos_hrt->cam_occu_bw = MULTIPLY_W_DRAM_WEIGHT(bw);
+ mutex_lock(&mmqos_hrt->blocking_lock);
+ if (mmqos_hrt->cam_occu_bw < mmqos_hrt->cam_max_bw) {
+ mmqos_hrt->blocking = false;
+ schedule_delayed_work(&mmqos_hrt->work, 2 * HZ);
+ } else {
+ mmqos_hrt->blocking = true;
+ schedule_delayed_work(&mmqos_hrt->work, 0);
+ }
+ mutex_unlock(&mmqos_hrt->blocking_lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(camera_max_bw);
+
+void mtk_mmqos_init_hrt(struct mmqos_hrt *hrt)
+{
+ if (!hrt)
+ return;
+ mmqos_hrt = hrt;
+ atomic_set(&mmqos_hrt->lock_count, 0);
+ INIT_DELAYED_WORK(&mmqos_hrt->work, delay_work_handler);
+ BLOCKING_INIT_NOTIFIER_HEAD(&mmqos_hrt->hrt_bw_throttle_notifier);
+ mutex_init(&mmqos_hrt->blocking_lock);
+ init_waitqueue_head(&mmqos_hrt->hrt_wait);
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_init_hrt);
+
+static struct attribute *mmqos_hrt_sysfs_attrs[] = {
+ &dev_attr_camera_max_bw.attr,
+ NULL
+};
+
+static struct attribute_group mmqos_hrt_sysfs_attr_group = {
+ .name = "mmqos_hrt",
+ .attrs = mmqos_hrt_sysfs_attrs
+};
+
+int mtk_mmqos_register_hrt_sysfs(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &mmqos_hrt_sysfs_attr_group);
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_register_hrt_sysfs);
+
+void mtk_mmqos_unregister_hrt_sysfs(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &mmqos_hrt_sysfs_attr_group);
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_unregister_hrt_sysfs);
+
+
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mt6779.c b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mt6779.c
new file mode 100644
index 0000000..15f2ab8
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mt6779.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Ming-Fan Chen <ming-fan.chen@mediatek.com>
+ */
+
+#include <dt-bindings/interconnect/mtk,mmqos.h>
+#include <dt-bindings/interconnect/mtk,mt6779-emi.h>
+#include <dt-bindings/memory/mt6779-larb-port.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include "mmqos-mtk.h"
+
+
+static const struct mtk_node_desc node_descs_mt6779[] = {
+ DEFINE_MNODE(common0,
+ SLAVE_COMMON(0), 0, MMQOS_NO_LINK),
+ DEFINE_MNODE(common0_port0,
+ MASTER_COMMON_PORT(0, 0), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port1,
+ MASTER_COMMON_PORT(0, 1), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port2,
+ MASTER_COMMON_PORT(0, 2), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port3,
+ MASTER_COMMON_PORT(0, 3), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port4,
+ MASTER_COMMON_PORT(0, 4), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port5,
+ MASTER_COMMON_PORT(0, 5), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port6,
+ MASTER_COMMON_PORT(0, 6), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port7,
+ MASTER_COMMON_PORT(0, 7), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port8,
+ MASTER_COMMON_PORT(0, 8), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(larb0, SLAVE_LARB(0), 0, MASTER_COMMON_PORT(0, 0)),
+ DEFINE_MNODE(larb1, SLAVE_LARB(1), 0, MASTER_COMMON_PORT(0, 1)),
+ DEFINE_MNODE(larb2, SLAVE_LARB(2), 0, MASTER_COMMON_PORT(0, 2)),
+ DEFINE_MNODE(larb3, SLAVE_LARB(3), 0, MASTER_COMMON_PORT(0, 3)),
+ DEFINE_MNODE(larb5, SLAVE_LARB(5), 0, MASTER_COMMON_PORT(0, 4)),
+ DEFINE_MNODE(larb8, SLAVE_LARB(8), 0, MASTER_COMMON_PORT(0, 5)),
+ DEFINE_MNODE(larb9, SLAVE_LARB(9), 0, MASTER_COMMON_PORT(0, 7)),
+ DEFINE_MNODE(larb10, SLAVE_LARB(10), 0, MASTER_COMMON_PORT(0, 6)),
+ DEFINE_MNODE(larb12, SLAVE_LARB(12), 0, MASTER_COMMON_PORT(0, 8)),
+ DEFINE_MNODE(larb13, SLAVE_LARB(13), 0, MASTER_COMMON_PORT(0, 6)),
+ DEFINE_MNODE(disp_postmask0,
+ MASTER_LARB_PORT(M4U_PORT_DISP_POSTMASK0), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_ovl0_hdr,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL0_HDR), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_ovl1_hdr,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL1_HDR), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_ovl0,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL0), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_ovl1,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL1), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_pvric0,
+ MASTER_LARB_PORT(M4U_PORT_DISP_PVRIC0), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_rdma0,
+ MASTER_LARB_PORT(M4U_PORT_DISP_RDMA0), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_wdma0,
+ MASTER_LARB_PORT(M4U_PORT_DISP_WDMA0), 8, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_fake0,
+ MASTER_LARB_PORT(M4U_PORT_DISP_FAKE0), 7, SLAVE_LARB(0)),
+
+ DEFINE_MNODE(disp_ovl0_2l_hdr,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL0_2L_HDR), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(disp_ovl1_2l_hdr,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL1_2L_HDR), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(disp_ovl0_2l,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL0_2L), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(disp_ovl1_2l,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL1_2L), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(disp_rdma1,
+ MASTER_LARB_PORT(M4U_PORT_DISP_RDMA1), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_pvric0,
+ MASTER_LARB_PORT(M4U_PORT_MDP_PVRIC0), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_pvric1,
+ MASTER_LARB_PORT(M4U_PORT_MDP_PVRIC1), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_rdma0,
+ MASTER_LARB_PORT(M4U_PORT_MDP_RDMA0), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_rdma1,
+ MASTER_LARB_PORT(M4U_PORT_MDP_RDMA1), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_wrot0_r,
+ MASTER_LARB_PORT(M4U_PORT_MDP_WROT0_R), 8, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_wrot0_w,
+ MASTER_LARB_PORT(M4U_PORT_MDP_WROT0_W), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_wrot1_r,
+ MASTER_LARB_PORT(M4U_PORT_MDP_WROT1_R), 8, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_wrot1_w,
+ MASTER_LARB_PORT(M4U_PORT_MDP_WROT1_W), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(disp_fake1,
+ MASTER_LARB_PORT(M4U_PORT_DISP_FAKE1), 7, SLAVE_LARB(1)),
+
+ DEFINE_MNODE(vdec_mc_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_MC_EXT), 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_ufo_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_UFO_EXT), 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_pp_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_PP_EXT), 8, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_pred_rd_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_PRED_RD_EXT),
+ 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_pred_wr_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_PRED_WR_EXT),
+ 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_ppwrap_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_PPWRAP_EXT),
+ 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_tile_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_TILE_EXT), 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_vld_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_VLD_EXT), 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_vld2_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_VLD2_EXT), 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_avc_mv_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_AVC_MV_EXT),
+ 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_ufo_enc_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_UFO_ENC_EXT),
+ 8, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_rg_ctrl_dma_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_RG_CTRL_DMA_EXT),
+ 7, SLAVE_LARB(2)),
+
+ DEFINE_MNODE(venc_rcpu,
+ MASTER_LARB_PORT(M4U_PORT_VENC_RCPU), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_rec,
+ MASTER_LARB_PORT(M4U_PORT_VENC_REC), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_bsdma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_BSDMA), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_sv_comv,
+ MASTER_LARB_PORT(M4U_PORT_VENC_SV_COMV), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_rd_comv,
+ MASTER_LARB_PORT(M4U_PORT_VENC_RD_COMV), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_nbm_rdma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_NBM_RDMA), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_nbm_rdma_lite,
+ MASTER_LARB_PORT(M4U_PORT_VENC_NBM_RDMA_LITE),
+ 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(jpgenc_y_rdma,
+ MASTER_LARB_PORT(M4U_PORT_JPGENC_Y_RDMA), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(jpgenc_c_rdma,
+ MASTER_LARB_PORT(M4U_PORT_JPGENC_C_RDMA), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(jpgenc_q_table,
+ MASTER_LARB_PORT(M4U_PORT_JPGENC_Q_TABLE), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(jpgenc_bsdma,
+ MASTER_LARB_PORT(M4U_PORT_JPGENC_BSDMA), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(jpgedc_wdma,
+ MASTER_LARB_PORT(M4U_PORT_JPGDEC_WDMA), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(jpgedc_bsdma,
+ MASTER_LARB_PORT(M4U_PORT_JPGDEC_BSDMA), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_nbm_wdma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_NBM_WDMA), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_nbm_wdma_lite,
+ MASTER_LARB_PORT(M4U_PORT_VENC_NBM_WDMA_LITE),
+ 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_cur_luma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_CUR_LUMA), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_cur_chroma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_CUR_CHROMA), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_ref_luma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_REF_LUMA), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_ref_chroma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_REF_CHROMA), 7, SLAVE_LARB(3)),
+
+ DEFINE_MNODE(img_imgi_d1,
+ MASTER_LARB_PORT(M4U_PORT_IMGI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_imgbi_d1,
+ MASTER_LARB_PORT(M4U_PORT_IMGBI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_dmgi_d1,
+ MASTER_LARB_PORT(M4U_PORT_DMGI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_depi_d1,
+ MASTER_LARB_PORT(M4U_PORT_DEPI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_lcei_d1,
+ MASTER_LARB_PORT(M4U_PORT_LCEI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_smti_d1,
+ MASTER_LARB_PORT(M4U_PORT_SMTI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_smto_d2,
+ MASTER_LARB_PORT(M4U_PORT_SMTO_D2), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_smto_d1,
+ MASTER_LARB_PORT(M4U_PORT_SMTO_D1), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_crzo_d1,
+ MASTER_LARB_PORT(M4U_PORT_CRZO_D1), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_img3o_d1,
+ MASTER_LARB_PORT(M4U_PORT_IMG3O_D1), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_vipi_d1,
+ MASTER_LARB_PORT(M4U_PORT_VIPI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_wpe_rdma1,
+ MASTER_LARB_PORT(M4U_PORT_WPE_RDMA1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_wpe_rdma0,
+ MASTER_LARB_PORT(M4U_PORT_WPE_RDMA0), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_wpe_wdma,
+ MASTER_LARB_PORT(M4U_PORT_WPE_WDMA), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_timgo_d1,
+ MASTER_LARB_PORT(M4U_PORT_TIMGO_D1), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_mfb_rdma0,
+ MASTER_LARB_PORT(M4U_PORT_MFB_RDMA0), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_mfb_rdma1,
+ MASTER_LARB_PORT(M4U_PORT_MFB_RDMA1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_mfb_rdma2,
+ MASTER_LARB_PORT(M4U_PORT_MFB_RDMA2), 6, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_mfb_rdma3,
+ MASTER_LARB_PORT(M4U_PORT_MFB_RDMA3), 6, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_mfb_wdma,
+ MASTER_LARB_PORT(M4U_PORT_MFB_WDMA), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_reserve1,
+ MASTER_LARB_PORT(M4U_PORT_RESERVE1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_reserve2,
+ MASTER_LARB_PORT(M4U_PORT_RESERVE2), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_reserve3,
+ MASTER_LARB_PORT(M4U_PORT_RESERVE3), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_reserve4,
+ MASTER_LARB_PORT(M4U_PORT_RESERVE4), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_reserve5,
+ MASTER_LARB_PORT(M4U_PORT_RESERVE5), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_reserve6,
+ MASTER_LARB_PORT(M4U_PORT_RESERVE6), 7, SLAVE_LARB(5)),
+
+ DEFINE_MNODE(ipe_fdvt_rda,
+ MASTER_LARB_PORT(M4U_PORT_FDVT_RDA), 7, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fdvt_rdb,
+ MASTER_LARB_PORT(M4U_PORT_FDVT_RDB), 7, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fdvt_wra,
+ MASTER_LARB_PORT(M4U_PORT_FDVT_WRA), 8, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fdvt_wrb,
+ MASTER_LARB_PORT(M4U_PORT_FDVT_WRB), 8, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fe_rd0,
+ MASTER_LARB_PORT(M4U_PORT_FE_RD0), 7, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fe_rd1,
+ MASTER_LARB_PORT(M4U_PORT_FE_RD1), 7, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fe_wr0,
+ MASTER_LARB_PORT(M4U_PORT_FE_WR0), 8, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fe_wr1,
+ MASTER_LARB_PORT(M4U_PORT_FE_WR1), 8, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_rsc_rdma0,
+ MASTER_LARB_PORT(M4U_PORT_RSC_RDMA0), 6, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_rsc_wdma,
+ MASTER_LARB_PORT(M4U_PORT_RSC_WDMA), 7, SLAVE_LARB(8)),
+
+ DEFINE_MNODE(cam_imgo_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_IMGO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_rrzo_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RRZO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_lsci__r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LSCI_R1_C), 7, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_bpci_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_BPCI_R1_C), 7, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_yuvo_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_YUVO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_ufdi_r2_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_UFDI_R2_C), 7, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_rawi_r2_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RAWI_R2_C), 7, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_rawi_r5_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RAWI_R5_C), 7, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_camsv_1,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_1), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_camsv_2,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_2), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_camsv_3,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_3), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_camsv_4,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_4), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_camsv_5,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_5), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_camsv_6,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_6), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_aao_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_AAO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_afo_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_AFO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_flko_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_FLKO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_lceso_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LCESO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_crzo_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CRZO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_ltmso_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LTMSO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_rsso_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RSSO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_ccui,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CCUI), 7, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_ccuo,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CCUO), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_fake,
+ MASTER_LARB_PORT(M4U_PORT_CAM_FAKE), 8, SLAVE_LARB(9)),
+
+ DEFINE_MNODE(cam_imgo_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_IMGO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rrzo_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RRZO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_lsci_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LSCI_R1_A), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_bpci_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_BPCI_R1_A), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_yuvo_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_YUVO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_ufdi_r2_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_UFDI_R2_A), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rawi_r2_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RAWI_R2_A), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rawi_r5_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RAWI_R5_A), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_imgo_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_IMGO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rrzo_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RRZO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_lsci_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LSCI_R1_B), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_bpci_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_BPCI_R1_B), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_yuvo_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_YUVO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_ufdi_r2_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_UFDI_R2_B), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rawi_r2_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RAWI_R2_B), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rawi_r5_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RAWI_R5_B), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_camsv_0,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_0), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_aao_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_AAO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_afo_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_AFO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_flko_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_FLKO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_lceso_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LCESO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_crzo_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CRZO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_aao_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_AAO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_afo_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_AFO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_flko_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_FLKO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_lceso_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LCESO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_crzo_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CRZO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_ltmso_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LTMSO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rsso_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RSSO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_ltmso_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LTMSO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rsso_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RSSO_R1_B), 8, SLAVE_LARB(10)),
+};
+
+static const char * const comm_muxes_mt6779[] = { "mm" };
+
+static const char * const comm_icc_path_names_mt6779[] = { "mmsys_path" };
+
+static const struct mtk_mmqos_desc mmqos_desc_mt6779 = {
+ .nodes = node_descs_mt6779,
+ .num_nodes = ARRAY_SIZE(node_descs_mt6779),
+ .comm_muxes = comm_muxes_mt6779,
+ .comm_icc_path_names = comm_icc_path_names_mt6779,
+ .max_ratio = 40,
+ .hrt = {
+ .hrt_bw = {1600, 0, 0},
+ .hrt_total_bw = 7466, /* Todo: Use DRAMC API */
+ }
+};
+
+
+static const struct of_device_id mtk_mmqos_mt6779_of_ids[] = {
+ {
+ .compatible = "mediatek,mt6779-mmqos",
+ .data = &mmqos_desc_mt6779,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mtk_mmqos_mt6779_of_ids);
+
+static struct platform_driver mtk_mmqos_mt6779_driver = {
+ .probe = mtk_mmqos_probe,
+ .remove = mtk_mmqos_remove,
+ .driver = {
+ .name = "mtk-mt6779-mmqos",
+ .of_match_table = mtk_mmqos_mt6779_of_ids,
+ },
+};
+module_platform_driver(mtk_mmqos_mt6779_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mtk.c b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mtk.c
new file mode 100644
index 0000000..81066c3
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mtk.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Ming-Fan Chen <ming-fan.chen@mediatek.com>
+ */
+
+#include <dt-bindings/interconnect/mtk,mmqos.h>
+#include <linux/clk.h>
+#include <linux/interconnect-provider.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk_mmdvfs.h>
+#include <soc/mediatek/smi.h>
+
+#include "mmqos-mtk.h"
+
+#define SHIFT_ROUND(a, b) ((((a) - 1) >> (b)) + 1)
+#define icc_to_MBps(x) ((x) / 1000)
+
+static void mmqos_update_comm_bw(struct device *dev,
+ u32 comm_port, u32 freq, u64 mix_bw, u64 bw_peak, bool qos_bound)
+{
+ u32 comm_bw = 0;
+ u32 value;
+
+ if (!freq || !dev)
+ return;
+
+ if (mix_bw)
+ comm_bw = (mix_bw << 8) / freq;
+
+ if (comm_bw)
+ value = ((comm_bw > 0xfff) ? 0xfff : comm_bw) |
+ ((bw_peak > 0 || !qos_bound) ? 0x1000 : 0x3000);
+ else
+ value = 0x1200;
+
+ mtk_smi_common_bw_set(dev, comm_port, value);
+
+ dev_dbg(dev, "comm port=%d bw=%d freq=%d qos_bound=%d value=%#x\n",
+ comm_port, comm_bw, freq, qos_bound, value);
+}
+
+static int update_mm_clk(struct notifier_block *nb,
+ unsigned long value, void *v)
+{
+ struct mtk_mmqos *mmqos =
+ container_of(nb, struct mtk_mmqos, nb);
+ struct common_node *comm_node;
+ struct common_port_node *comm_port;
+
+ list_for_each_entry(comm_node, &mmqos->comm_list, list) {
+ comm_node->freq = clk_get_rate(comm_node->clk)/1000000;
+ list_for_each_entry(comm_port,
+ &comm_node->comm_port_list, list) {
+ mutex_lock(&comm_port->bw_lock);
+ if (comm_port->latest_mix_bw
+ || comm_port->latest_peak_bw) {
+ mmqos_update_comm_bw(comm_port->larb_dev,
+ comm_port->base->icc_node->id & 0xff,
+ comm_port->common->freq,
+ icc_to_MBps(comm_port->latest_mix_bw),
+ icc_to_MBps(comm_port->latest_peak_bw),
+ mmqos->qos_bound);
+ }
+ mutex_unlock(&comm_port->bw_lock);
+ }
+ }
+ return 0;
+}
+
+static void set_comm_icc_bw_handler(struct work_struct *work)
+{
+ struct common_node *comm_node = container_of(
+ work, struct common_node, work);
+ struct common_port_node *comm_port_node;
+ u32 avg_bw = 0, peak_bw = 0;
+
+ list_for_each_entry(comm_port_node, &comm_node->comm_port_list, list) {
+ mutex_lock(&comm_port_node->bw_lock);
+ avg_bw += comm_port_node->latest_avg_bw;
+ peak_bw += (comm_port_node->latest_peak_bw
+ & ~(MTK_MMQOS_MAX_BW));
+ mutex_unlock(&comm_port_node->bw_lock);
+ }
+ icc_set_bw(comm_node->icc_path, avg_bw, peak_bw);
+}
+
+static int mtk_mmqos_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct larb_node *larb_node;
+ struct larb_port_node *larb_port_node;
+ struct common_port_node *comm_port_node;
+ struct common_node *comm_node;
+ struct mtk_mmqos *mmqos = container_of(dst->provider,
+ struct mtk_mmqos, prov);
+ u32 value = 1;
+
+ switch (dst->id >> 16) {
+ case MTK_MMQOS_NODE_COMMON:
+ comm_node = (struct common_node *)dst->data;
+ queue_work(mmqos->wq, &comm_node->work);
+ break;
+ case MTK_MMQOS_NODE_COMMON_PORT:
+ comm_port_node = (struct common_port_node *)dst->data;
+ mutex_lock(&comm_port_node->bw_lock);
+ comm_port_node->latest_mix_bw = comm_port_node->base->mix_bw;
+ comm_port_node->latest_peak_bw = dst->peak_bw;
+ comm_port_node->latest_avg_bw = dst->avg_bw;
+ mmqos_update_comm_bw(comm_port_node->larb_dev,
+ dst->id & 0xff, comm_port_node->common->freq,
+ icc_to_MBps(comm_port_node->latest_mix_bw),
+ icc_to_MBps(comm_port_node->latest_peak_bw),
+ mmqos->qos_bound);
+ mutex_unlock(&comm_port_node->bw_lock);
+ break;
+ case MTK_MMQOS_NODE_LARB:
+ larb_port_node = (struct larb_port_node *)src->data;
+ larb_node = (struct larb_node *)dst->data;
+ if (larb_port_node->base->mix_bw)
+ value = SHIFT_ROUND(
+ icc_to_MBps(larb_port_node->base->mix_bw),
+ larb_port_node->bw_ratio);
+ if (value > mmqos->max_ratio)
+ value = mmqos->max_ratio;
+ mtk_smi_larb_bw_set(
+ larb_node->larb_dev,
+ src->id & 0xff, value);
+
+ if ((dst->id & 0xff) == 1) {
+ mtk_smi_larb_bw_set(
+ larb_node->larb_dev, 9, 8);
+ mtk_smi_larb_bw_set(
+ larb_node->larb_dev, 11, 8);
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int mtk_mmqos_aggregate(struct icc_node *node,
+ u32 avg_bw, u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ struct mmqos_base_node *base_node = NULL;
+ u32 mix_bw = peak_bw;
+
+ switch (node->id >> 16) {
+ case MTK_MMQOS_NODE_LARB_PORT:
+ base_node = ((struct larb_node *)node->data)->base;
+ if (peak_bw)
+ mix_bw = SHIFT_ROUND(peak_bw * 3, 1);
+ break;
+ case MTK_MMQOS_NODE_COMMON_PORT:
+ base_node = ((struct common_port_node *)node->data)->base;
+ break;
+ default:
+ return 0;
+ }
+
+ if (base_node) {
+ if (*agg_avg == 0 && *agg_peak == 0)
+ base_node->mix_bw = 0;
+ base_node->mix_bw += peak_bw ? mix_bw : avg_bw;
+ }
+
+ *agg_avg += avg_bw;
+ if (peak_bw == MTK_MMQOS_MAX_BW)
+ *agg_peak |= MTK_MMQOS_MAX_BW;
+ else
+ *agg_peak += peak_bw;
+ return 0;
+}
+
+static struct icc_node *mtk_mmqos_xlate(
+ struct of_phandle_args *spec, void *data)
+{
+ struct icc_onecell_data *icc_data;
+ s32 i;
+
+ if (!spec || !data)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ icc_data = (struct icc_onecell_data *)data;
+
+ for (i = 0; i < icc_data->num_nodes; i++)
+ if (icc_data->nodes[i]->id == spec->args[0])
+ return icc_data->nodes[i];
+
+ pr_notice("%s: invalid index %u\n", __func__, spec->args[0]);
+ return ERR_PTR(-EINVAL);
+}
+
+int mtk_mmqos_probe(struct platform_device *pdev)
+{
+ struct mtk_mmqos *mmqos;
+ struct of_phandle_iterator it;
+ struct icc_onecell_data *data;
+ struct icc_node *node, *temp;
+ struct mmqos_base_node *base_node;
+ struct common_node *comm_node;
+ struct common_port_node *comm_port_node;
+ struct larb_node *larb_node;
+ struct larb_port_node *larb_port_node;
+ struct mtk_smi_iommu smi_imu;
+ int i, id, num_larbs = 0, ret;
+ const struct mtk_mmqos_desc *mmqos_desc;
+ const struct mtk_node_desc *node_desc;
+ struct device *larb_dev;
+ struct mmqos_hrt *hrt;
+
+ mmqos = devm_kzalloc(&pdev->dev, sizeof(*mmqos), GFP_KERNEL);
+ if (!mmqos)
+ return -ENOMEM;
+ mmqos->dev = &pdev->dev;
+
+ of_for_each_phandle(
+ &it, ret, pdev->dev.of_node, "mediatek,larbs", NULL, 0) {
+ struct device_node *np;
+ struct platform_device *larb_pdev;
+
+ np = of_node_get(it.node);
+ if (!of_device_is_available(np))
+ continue;
+
+ larb_pdev = of_find_device_by_node(np);
+ if (!larb_pdev) {
+ larb_pdev = of_platform_device_create(
+ np, NULL, platform_bus_type.dev_root);
+ if (!larb_pdev || !larb_pdev->dev.driver) {
+ of_node_put(np);
+ return -EPROBE_DEFER;
+ }
+ }
+
+ if (of_property_read_u32(np, "mediatek,larb-id", &id))
+ id = num_larbs;
+ smi_imu.larb_imu[id].dev = &larb_pdev->dev;
+ num_larbs += 1;
+ }
+
+ INIT_LIST_HEAD(&mmqos->comm_list);
+
+ INIT_LIST_HEAD(&mmqos->prov.nodes);
+ mmqos->prov.set = mtk_mmqos_set;
+ mmqos->prov.aggregate = mtk_mmqos_aggregate;
+ mmqos->prov.xlate = mtk_mmqos_xlate;
+ mmqos->prov.dev = &pdev->dev;
+
+ ret = icc_provider_add(&mmqos->prov);
+ if (ret) {
+ dev_notice(&pdev->dev, "icc_provider_add failed:%d\n", ret);
+ return ret;
+ }
+
+ mmqos_desc = (struct mtk_mmqos_desc *)
+ of_device_get_match_data(&pdev->dev);
+ if (!mmqos_desc)
+ return -EINVAL;
+
+ data = devm_kzalloc(&pdev->dev,
+ sizeof(*data) + mmqos_desc->num_nodes * sizeof(node),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ for (i = 0; i < mmqos_desc->num_nodes; i++) {
+ node_desc = &mmqos_desc->nodes[i];
+ node = icc_node_create(node_desc->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+ icc_node_add(node, &mmqos->prov);
+
+ if (node_desc->link != MMQOS_NO_LINK) {
+ ret = icc_link_create(node, node_desc->link);
+ if (ret)
+ goto err;
+ }
+ node->name = node_desc->name;
+
+ base_node = devm_kzalloc(
+ &pdev->dev, sizeof(*base_node), GFP_KERNEL);
+ if (!base_node) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ base_node->icc_node = node;
+
+ switch (node->id >> 16) {
+ case MTK_MMQOS_NODE_COMMON:
+ comm_node = devm_kzalloc(
+ &pdev->dev, sizeof(*comm_node), GFP_KERNEL);
+ if (!comm_node) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ INIT_WORK(&comm_node->work, set_comm_icc_bw_handler);
+ comm_node->clk = devm_clk_get(&pdev->dev,
+ mmqos_desc->comm_muxes[node->id & 0xff]);
+ if (IS_ERR(comm_node->clk)) {
+ dev_notice(&pdev->dev, "get clk fail:%s\n",
+ mmqos_desc->comm_muxes[
+ node->id & 0xff]);
+ ret = -EINVAL;
+ goto err;
+ }
+ comm_node->freq = clk_get_rate(comm_node->clk)/1000000;
+ INIT_LIST_HEAD(&comm_node->list);
+ list_add_tail(&comm_node->list, &mmqos->comm_list);
+ INIT_LIST_HEAD(&comm_node->comm_port_list);
+ comm_node->icc_path = of_icc_get(&pdev->dev,
+ mmqos_desc->comm_icc_path_names[
+ node->id & 0xff]);
+ if (IS_ERR_OR_NULL(comm_node->icc_path)) {
+ dev_notice(&pdev->dev,
+ "get icc_path fail:%s\n",
+ mmqos_desc->comm_icc_path_names[
+ node->id & 0xff]);
+ ret = -EINVAL;
+ goto err;
+ }
+ comm_node->base = base_node;
+ node->data = (void *)comm_node;
+ break;
+ case MTK_MMQOS_NODE_COMMON_PORT:
+ comm_port_node = devm_kzalloc(&pdev->dev,
+ sizeof(*comm_port_node), GFP_KERNEL);
+ if (!comm_port_node) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ mutex_init(&comm_port_node->bw_lock);
+ comm_port_node->common = node->links[0]->data;
+ INIT_LIST_HEAD(&comm_port_node->list);
+ list_add_tail(&comm_port_node->list,
+ &comm_port_node->common->comm_port_list);
+ comm_port_node->base = base_node;
+ node->data = (void *)comm_port_node;
+ break;
+ case MTK_MMQOS_NODE_LARB:
+ larb_node = devm_kzalloc(
+ &pdev->dev, sizeof(*larb_node), GFP_KERNEL);
+ if (!larb_node) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ comm_port_node = node->links[0]->data;
+ larb_dev = smi_imu.larb_imu[node->id &
+ (MTK_LARB_NR_MAX-1)].dev;
+ if (larb_dev) {
+ comm_port_node->larb_dev = larb_dev;
+ larb_node->larb_dev = larb_dev;
+ }
+ larb_node->base = base_node;
+ node->data = (void *)larb_node;
+ break;
+ case MTK_MMQOS_NODE_LARB_PORT:
+ larb_port_node = devm_kzalloc(&pdev->dev,
+ sizeof(*larb_port_node), GFP_KERNEL);
+ if (!larb_port_node) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ larb_port_node->bw_ratio = node_desc->bw_ratio;
+ larb_port_node->base = base_node;
+ node->data = (void *)larb_port_node;
+ break;
+ default:
+ dev_notice(&pdev->dev,
+ "invalid node id:%#x\n", node->id);
+ ret = -EINVAL;
+ goto err;
+ }
+ data->nodes[i] = node;
+ }
+
+ data->num_nodes = mmqos_desc->num_nodes;
+ mmqos->prov.data = data;
+ mmqos->max_ratio = mmqos_desc->max_ratio;
+
+ mmqos->wq = create_singlethread_workqueue("mmqos_work_queue");
+ if (!mmqos->wq) {
+ dev_notice(&pdev->dev, "work queue create fail\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hrt = devm_kzalloc(&pdev->dev, sizeof(*hrt), GFP_KERNEL);
+ if (!hrt) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ memcpy(hrt, &mmqos_desc->hrt, sizeof(mmqos_desc->hrt));
+ mtk_mmqos_init_hrt(hrt);
+
+ mmqos->nb.notifier_call = update_mm_clk;
+ register_mmdvfs_notifier(&mmqos->nb);
+
+ ret = mtk_mmqos_register_hrt_sysfs(&pdev->dev);
+ if (ret)
+ dev_notice(&pdev->dev, "sysfs create fail\n");
+
+ platform_set_drvdata(pdev, mmqos);
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(node, temp, &mmqos->prov.nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+ icc_provider_del(&mmqos->prov);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_probe);
+
+int mtk_mmqos_remove(struct platform_device *pdev)
+{
+ struct mtk_mmqos *mmqos = platform_get_drvdata(pdev);
+ struct icc_node *node, *temp;
+
+ list_for_each_entry_safe(node, temp, &mmqos->prov.nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+ icc_provider_del(&mmqos->prov);
+ unregister_mmdvfs_notifier(&mmqos->nb);
+ destroy_workqueue(mmqos->wq);
+ mtk_mmqos_unregister_hrt_sysfs(&pdev->dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_remove);
+
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mtk.h b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mtk.h
new file mode 100644
index 0000000..c6ebf33
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mtk.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Ming-Fan Chen <ming-fan.chen@mediatek.com>
+ */
+#ifndef MMQOS_MTK_H
+#define MMQOS_MTK_H
+
+#include <linux/interconnect-provider.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <soc/mediatek/mmqos.h>
+
+#define MMQOS_NO_LINK (0xffffffff)
+#define MMQOS_MAX_COMM_PORT_NUM (15)
+
+struct mmqos_hrt {
+ u32 hrt_bw[HRT_TYPE_NUM];
+ u32 hrt_total_bw;
+ u32 cam_max_bw;
+ u32 cam_occu_bw;
+ bool blocking;
+ struct delayed_work work;
+ struct blocking_notifier_head hrt_bw_throttle_notifier;
+ atomic_t lock_count;
+ wait_queue_head_t hrt_wait;
+ struct mutex blocking_lock;
+};
+
+struct mmqos_base_node {
+ struct icc_node *icc_node;
+ u32 mix_bw;
+};
+
+struct common_node {
+ struct mmqos_base_node *base;
+ const char *clk_name;
+ struct clk *clk;
+ u64 freq;
+ struct list_head list;
+ struct icc_path *icc_path;
+ struct work_struct work;
+ struct list_head comm_port_list;
+};
+
+struct common_port_node {
+ struct mmqos_base_node *base;
+ struct common_node *common;
+ struct device *larb_dev;
+ struct mutex bw_lock;
+ u32 latest_mix_bw;
+ u32 latest_peak_bw;
+ u32 latest_avg_bw;
+ struct list_head list;
+};
+
+struct larb_node {
+ struct mmqos_base_node *base;
+ struct device *larb_dev;
+};
+
+struct larb_port_node {
+ struct mmqos_base_node *base;
+ u16 bw_ratio;
+};
+
+struct mtk_mmqos {
+ struct device *dev;
+ struct icc_provider prov;
+ struct notifier_block nb;
+ struct list_head comm_list;
+ struct workqueue_struct *wq;
+ u32 max_ratio;
+ bool qos_bound; /* Todo: Set qos_bound to true if necessary */
+};
+
+struct mtk_node_desc {
+ const char *name;
+ u32 id;
+ u32 link;
+ u16 bw_ratio;
+};
+
+struct mtk_mmqos_desc {
+ const struct mtk_node_desc *nodes;
+ const size_t num_nodes;
+ const char * const *comm_muxes;
+ const char * const *comm_icc_path_names;
+ const u32 max_ratio;
+ const struct mmqos_hrt hrt;
+};
+
+#define DEFINE_MNODE(_name, _id, _bw_ratio, _link) { \
+ .name = #_name, \
+ .id = _id, \
+ .bw_ratio = _bw_ratio, \
+ .link = _link, \
+ }
+
+int mtk_mmqos_probe(struct platform_device *pdev);
+int mtk_mmqos_remove(struct platform_device *pdev);
+
+/* For HRT */
+void mtk_mmqos_init_hrt(struct mmqos_hrt *hrt);
+int mtk_mmqos_register_hrt_sysfs(struct device *dev);
+void mtk_mmqos_unregister_hrt_sysfs(struct device *dev);
+#endif /* MMQOS_MTK_H */
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mtk-dvfsrc-emi.c b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mtk-dvfsrc-emi.c
new file mode 100644
index 0000000..f5870fc
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mtk-dvfsrc-emi.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk_dvfsrc.h>
+#include <dt-bindings/interconnect/mtk,mt6873-emi.h>
+
+enum mtk_icc_name {
+ SLAVE_DDR_EMI,
+ MASTER_MCUSYS,
+ MASTER_GPUSYS,
+ MASTER_MMSYS,
+ MASTER_MM_VPU,
+ MASTER_MM_DISP,
+ MASTER_MM_VDEC,
+ MASTER_MM_VENC,
+ MASTER_MM_CAM,
+ MASTER_MM_IMG,
+ MASTER_MM_MDP,
+ MASTER_VPUSYS,
+ MASTER_VPU_PORT_0,
+ MASTER_VPU_PORT_1,
+ MASTER_MDLASYS,
+ MASTER_MDLA_PORT_0,
+ MASTER_UFS,
+ MASTER_PCIE,
+ MASTER_USB,
+ MASTER_WIFI,
+ MASTER_BT,
+ MASTER_NETSYS,
+ MASTER_DBGIF,
+
+ SLAVE_HRT_DDR_EMI,
+ MASTER_HRT_MMSYS,
+ MASTER_HRT_MM_DISP,
+ MASTER_HRT_MM_VDEC,
+ MASTER_HRT_MM_VENC,
+ MASTER_HRT_MM_CAM,
+ MASTER_HRT_MM_IMG,
+ MASTER_HRT_MM_MDP,
+ MASTER_HRT_DBGIF,
+};
+
+#define MAX_LINKS 1
+
+/**
+ * struct mtk_icc_node - Mediatek specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @ep : the type of this endpoint
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus
+ * @sum_avg: current sum aggregate value of all avg bw kBps requests
+ * @max_peak: current max aggregate value of all peak bw kBps requests
+ */
+struct mtk_icc_node {
+ unsigned char *name;
+ int ep;
+ u16 id;
+ u16 links[MAX_LINKS];
+ u16 num_links;
+ u64 sum_avg;
+ u64 max_peak;
+};
+
+struct mtk_icc_desc {
+ struct mtk_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_MNODE(_name, _id, _ep, ...) \
+ static struct mtk_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .ep = _ep, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+}
+
+DEFINE_MNODE(ddr_emi, SLAVE_DDR_EMI, 1);
+DEFINE_MNODE(mcusys, MASTER_MCUSYS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(gpu, MASTER_GPUSYS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(mmsys, MASTER_MMSYS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(mm_vpu, MASTER_MM_VPU, 0, MASTER_MMSYS);
+DEFINE_MNODE(mm_disp, MASTER_MM_DISP, 0, MASTER_MMSYS);
+DEFINE_MNODE(mm_vdec, MASTER_MM_VDEC, 0, MASTER_MMSYS);
+DEFINE_MNODE(mm_venc, MASTER_MM_VENC, 0, MASTER_MMSYS);
+DEFINE_MNODE(mm_cam, MASTER_MM_CAM, 0, MASTER_MMSYS);
+DEFINE_MNODE(mm_img, MASTER_MM_IMG, 0, MASTER_MMSYS);
+DEFINE_MNODE(mm_mdp, MASTER_MM_MDP, 0, MASTER_MMSYS);
+DEFINE_MNODE(vpusys, MASTER_VPUSYS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(vpu_port_0, MASTER_VPU_PORT_0, 0, MASTER_VPUSYS);
+DEFINE_MNODE(vpu_port_1, MASTER_VPU_PORT_1, 0, MASTER_VPUSYS);
+DEFINE_MNODE(mdlasys, MASTER_MDLASYS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(mdla_port_0, MASTER_MDLA_PORT_0, 0, MASTER_MDLASYS);
+DEFINE_MNODE(ufs, MASTER_UFS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(pcie, MASTER_PCIE, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(usb, MASTER_USB, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(wifi, MASTER_WIFI, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(bt, MASTER_BT, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(netsys, MASTER_NETSYS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(dbgif, MASTER_DBGIF, 0, SLAVE_DDR_EMI);
+
+DEFINE_MNODE(hrt_ddr_emi, SLAVE_HRT_DDR_EMI, 2);
+DEFINE_MNODE(hrt_mmsys, MASTER_HRT_MMSYS, 0, SLAVE_HRT_DDR_EMI);
+DEFINE_MNODE(hrt_mm_disp, MASTER_HRT_MM_DISP, 0, MASTER_HRT_MMSYS);
+DEFINE_MNODE(hrt_mm_vdec, MASTER_HRT_MM_VDEC, 0, MASTER_HRT_MMSYS);
+DEFINE_MNODE(hrt_mm_venc, MASTER_HRT_MM_VENC, 0, MASTER_HRT_MMSYS);
+DEFINE_MNODE(hrt_mm_cam, MASTER_HRT_MM_CAM, 0, MASTER_HRT_MMSYS);
+DEFINE_MNODE(hrt_mm_img, MASTER_HRT_MM_IMG, 0, MASTER_HRT_MMSYS);
+DEFINE_MNODE(hrt_mm_mdp, MASTER_HRT_MM_MDP, 0, MASTER_HRT_MMSYS);
+DEFINE_MNODE(hrt_dbgif, MASTER_HRT_DBGIF, 0, SLAVE_HRT_DDR_EMI);
+
+static struct mtk_icc_node *mt6873_icc_nodes[] = {
+ [MT6873_SLAVE_DDR_EMI] = &ddr_emi,
+ [MT6873_MASTER_MCUSYS] = &mcusys,
+ [MT6873_MASTER_GPUSYS] = &gpu,
+ [MT6873_MASTER_MMSYS] = &mmsys,
+ [MT6873_MASTER_MM_VPU] = &mm_vpu,
+ [MT6873_MASTER_MM_DISP] = &mm_disp,
+ [MT6873_MASTER_MM_VDEC] = &mm_vdec,
+ [MT6873_MASTER_MM_VENC] = &mm_venc,
+ [MT6873_MASTER_MM_CAM] = &mm_cam,
+ [MT6873_MASTER_MM_IMG] = &mm_img,
+ [MT6873_MASTER_MM_MDP] = &mm_mdp,
+ [MT6873_MASTER_VPUSYS] = &vpusys,
+ [MT6873_MASTER_VPU_0] = &vpu_port_0,
+ [MT6873_MASTER_VPU_1] = &vpu_port_1,
+ [MT6873_MASTER_MDLASYS] = &mdlasys,
+ [MT6873_MASTER_MDLA_0] = &mdla_port_0,
+ [MT6873_MASTER_UFS] = &ufs,
+ [MT6873_MASTER_PCIE] = &pcie,
+ [MT6873_MASTER_USB] = &usb,
+ [MT6873_MASTER_WIFI] = &wifi,
+ [MT6873_MASTER_BT] = &bt,
+ [MT6873_MASTER_NETSYS] = &netsys,
+ [MT6873_MASTER_DBGIF] = &dbgif,
+
+ [MT6873_SLAVE_HRT_DDR_EMI] = &hrt_ddr_emi,
+ [MT6873_MASTER_HRT_MMSYS] = &hrt_mmsys,
+ [MT6873_MASTER_HRT_MM_DISP] = &hrt_mm_disp,
+ [MT6873_MASTER_HRT_MM_VDEC] = &hrt_mm_vdec,
+ [MT6873_MASTER_HRT_MM_VENC] = &hrt_mm_venc,
+ [MT6873_MASTER_HRT_MM_CAM] = &hrt_mm_cam,
+ [MT6873_MASTER_HRT_MM_IMG] = &hrt_mm_img,
+ [MT6873_MASTER_HRT_MM_MDP] = &hrt_mm_mdp,
+ [MT6873_MASTER_HRT_DBGIF] = &hrt_dbgif,
+};
+
+static struct mtk_icc_desc mt6873_icc = {
+ .nodes = mt6873_icc_nodes,
+ .num_nodes = ARRAY_SIZE(mt6873_icc_nodes),
+};
+
+static const struct of_device_id emi_icc_of_match[] = {
+ { .compatible = "mediatek,mt6873-dvfsrc", .data = &mt6873_icc },
+ { .compatible = "mediatek,mt6880-dvfsrc", .data = &mt6873_icc },
+ { .compatible = "mediatek,mt6890-dvfsrc", .data = &mt6873_icc },
+ { },
+};
+MODULE_DEVICE_TABLE(of, emi_icc_of_match);
+
+static int emi_icc_aggregate(struct icc_node *node, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ struct mtk_icc_node *in;
+
+ in = node->data;
+
+ *agg_avg += avg_bw;
+ *agg_peak = max_t(u32, *agg_peak, peak_bw);
+
+ in->sum_avg = *agg_avg;
+ in->max_peak = *agg_peak;
+
+ return 0;
+}
+
+static int emi_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ int ret = 0;
+ struct mtk_icc_node *node;
+
+ node = dst->data;
+
+ if (node->ep == 1) {
+ mtk_dvfsrc_send_request(src->provider->dev,
+ MTK_DVFSRC_CMD_PEAK_BW_REQUEST,
+ node->max_peak);
+ mtk_dvfsrc_send_request(src->provider->dev,
+ MTK_DVFSRC_CMD_BW_REQUEST,
+ node->sum_avg);
+ } else if (node->ep == 2) {
+ mtk_dvfsrc_send_request(src->provider->dev,
+ MTK_DVFSRC_CMD_HRTBW_REQUEST,
+ node->sum_avg);
+ }
+
+ return ret;
+}
+
+static int emi_icc_remove(struct platform_device *pdev);
+static int emi_icc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct mtk_icc_desc *desc;
+ struct device *dev = &pdev->dev;
+ struct icc_node *node;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct mtk_icc_node **mnodes;
+ struct icc_node *tmp;
+ size_t num_nodes, i, j;
+ int ret;
+
+ match = of_match_node(emi_icc_of_match, dev->parent->of_node);
+
+ if (!match) {
+ dev_err(dev, "invalid compatible string\n");
+ return -ENODEV;
+ }
+
+ desc = match->data;
+ mnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL);
+ if (!provider)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider->dev = pdev->dev.parent;
+ provider->set = emi_icc_set;
+ provider->aggregate = emi_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ node = icc_node_create(mnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = mnodes[i]->name;
+ node->data = mnodes[i];
+ icc_node_add(node, provider);
+
+ /* populate links */
+ for (j = 0; j < mnodes[i]->num_links; j++)
+ icc_link_create(node, mnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, provider);
+
+ return 0;
+err:
+ list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int emi_icc_remove(struct platform_device *pdev)
+{
+ struct icc_provider *provider = platform_get_drvdata(pdev);
+ struct icc_node *n, *tmp;
+
+ list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+
+ return icc_provider_del(provider);
+}
+
+static struct platform_driver emi_icc_driver = {
+ .probe = emi_icc_probe,
+ .remove = emi_icc_remove,
+ .driver = {
+ .name = "mediatek-emi-icc",
+ },
+};
+
+static int __init mtk_emi_icc_init(void)
+{
+ return platform_driver_register(&emi_icc_driver);
+}
+subsys_initcall(mtk_emi_icc_init);
+
+static void __exit mtk_emi_icc_exit(void)
+{
+ platform_driver_unregister(&emi_icc_driver);
+}
+module_exit(mtk_emi_icc_exit);
+
+MODULE_AUTHOR("Henry Chen <henryc.chen@mediatek.com>");
+MODULE_LICENSE("GPL v2");