[Feature] add GA346 baseline version
Change-Id: Ic62933698569507dcf98240cdf5d9931ae34348f
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/Kconfig b/src/kernel/linux/v4.19/drivers/interconnect/Kconfig
new file mode 100644
index 0000000..ac41ea6
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/Kconfig
@@ -0,0 +1,16 @@
+menuconfig INTERCONNECT
+ tristate "On-Chip Interconnect management support"
+ help
+ Support for management of the on-chip interconnects.
+
+ This framework is designed to provide a generic interface for
+ managing the interconnects in a SoC.
+
+ If unsure, say no.
+
+if INTERCONNECT
+
+source "drivers/interconnect/qcom/Kconfig"
+source "drivers/interconnect/mediatek/Kconfig"
+
+endif
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/Makefile b/src/kernel/linux/v4.19/drivers/interconnect/Makefile
new file mode 100644
index 0000000..253f24a
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+icc-core-objs := core.o
+
+obj-$(CONFIG_INTERCONNECT) += icc-core.o
+obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/
+obj-$(CONFIG_INTERCONNECT_MTK) += mediatek/
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/core.c b/src/kernel/linux/v4.19/drivers/interconnect/core.c
new file mode 100644
index 0000000..6005a1c
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/core.c
@@ -0,0 +1,799 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework core driver
+ *
+ * Copyright (c) 2017-2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/overflow.h>
+
+static DEFINE_IDR(icc_idr);
+static LIST_HEAD(icc_providers);
+static DEFINE_MUTEX(icc_lock);
+static struct dentry *icc_debugfs_dir;
+
+/**
+ * struct icc_req - constraints that are attached to each node
+ * @req_node: entry in list of requests for the particular @node
+ * @node: the interconnect node to which this constraint applies
+ * @dev: reference to the device that sets the constraints
+ * @avg_bw: an integer describing the average bandwidth in kBps
+ * @peak_bw: an integer describing the peak bandwidth in kBps
+ */
+struct icc_req {
+ struct hlist_node req_node;
+ struct icc_node *node;
+ struct device *dev;
+ u32 avg_bw;
+ u32 peak_bw;
+};
+
+/**
+ * struct icc_path - interconnect path structure
+ * @num_nodes: number of hops (nodes)
+ * @reqs: array of the requests applicable to this path of nodes
+ */
+struct icc_path {
+ size_t num_nodes;
+ struct icc_req reqs[];
+};
+
+static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
+{
+ if (!n)
+ return;
+
+ seq_printf(s, "%-30s %12u %12u\n",
+ n->name, n->avg_bw, n->peak_bw);
+}
+
+static int icc_summary_show(struct seq_file *s, void *data)
+{
+ struct icc_provider *provider;
+
+ seq_puts(s, " node avg peak\n");
+ seq_puts(s, "--------------------------------------------------------\n");
+
+ mutex_lock(&icc_lock);
+
+ list_for_each_entry(provider, &icc_providers, provider_list) {
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ struct icc_req *r;
+
+ icc_summary_show_one(s, n);
+ hlist_for_each_entry(r, &n->req_list, req_node) {
+ if (!r->dev)
+ continue;
+
+ seq_printf(s, " %-26s %12u %12u\n",
+ dev_name(r->dev), r->avg_bw,
+ r->peak_bw);
+ }
+ }
+ }
+
+ mutex_unlock(&icc_lock);
+
+ return 0;
+}
+
+static int icc_summary_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, icc_summary_show, inode->i_private);
+}
+
+static const struct file_operations icc_summary_fops = {
+ .open = icc_summary_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct icc_node *node_find(const int id)
+{
+ return idr_find(&icc_idr, id);
+}
+
+static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
+ ssize_t num_nodes)
+{
+ struct icc_node *node = dst;
+ struct icc_path *path;
+ int i;
+
+ path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
+ if (!path)
+ return ERR_PTR(-ENOMEM);
+
+ path->num_nodes = num_nodes;
+
+ for (i = num_nodes - 1; i >= 0; i--) {
+ node->provider->users++;
+ hlist_add_head(&path->reqs[i].req_node, &node->req_list);
+ path->reqs[i].node = node;
+ path->reqs[i].dev = dev;
+ /* reference to previous node was saved during path traversal */
+ node = node->reverse;
+ }
+
+ return path;
+}
+
+static struct icc_path *path_find(struct device *dev, struct icc_node *src,
+ struct icc_node *dst)
+{
+ struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+ struct icc_node *n, *node = NULL;
+ struct list_head traverse_list;
+ struct list_head edge_list;
+ struct list_head visited_list;
+ size_t i, depth = 1;
+ bool found = false;
+
+ INIT_LIST_HEAD(&traverse_list);
+ INIT_LIST_HEAD(&edge_list);
+ INIT_LIST_HEAD(&visited_list);
+
+ list_add(&src->search_list, &traverse_list);
+ src->reverse = NULL;
+
+ do {
+ list_for_each_entry_safe(node, n, &traverse_list, search_list) {
+ if (node == dst) {
+ found = true;
+ list_splice_init(&edge_list, &visited_list);
+ list_splice_init(&traverse_list, &visited_list);
+ break;
+ }
+ for (i = 0; i < node->num_links; i++) {
+ struct icc_node *tmp = node->links[i];
+
+ if (!tmp) {
+ path = ERR_PTR(-ENOENT);
+ goto out;
+ }
+
+ if (tmp->is_traversed)
+ continue;
+
+ tmp->is_traversed = true;
+ tmp->reverse = node;
+ list_add_tail(&tmp->search_list, &edge_list);
+ }
+ }
+
+ if (found)
+ break;
+
+ list_splice_init(&traverse_list, &visited_list);
+ list_splice_init(&edge_list, &traverse_list);
+
+ /* count the hops including the source */
+ depth++;
+
+ } while (!list_empty(&traverse_list));
+
+out:
+
+ /* reset the traversed state */
+ list_for_each_entry_reverse(n, &visited_list, search_list)
+ n->is_traversed = false;
+
+ if (found)
+ path = path_init(dev, dst, depth);
+
+ return path;
+}
+
+/*
+ * We want the path to honor all bandwidth requests, so the average and peak
+ * bandwidth requirements from each consumer are aggregated at each node.
+ * The aggregation is platform specific, so each platform can customize it by
+ * implementing its own aggregate() function.
+ */
+
+static int aggregate_requests(struct icc_node *node)
+{
+ struct icc_provider *p = node->provider;
+ struct icc_req *r;
+
+ node->avg_bw = 0;
+ node->peak_bw = 0;
+
+ hlist_for_each_entry(r, &node->req_list, req_node)
+ p->aggregate(node, r->avg_bw, r->peak_bw,
+ &node->avg_bw, &node->peak_bw);
+
+ return 0;
+}
+
+static int apply_constraints(struct icc_path *path)
+{
+ struct icc_node *next, *prev = NULL;
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < path->num_nodes; i++) {
+ next = path->reqs[i].node;
+
+ /*
+ * Both endpoints should be valid master-slave pairs of the
+ * same interconnect provider that will be configured.
+ */
+ if (!prev || next->provider != prev->provider) {
+ prev = next;
+ continue;
+ }
+
+ /* set the constraints */
+ ret = next->provider->set(prev, next);
+ if (ret)
+ goto out;
+
+ prev = next;
+ }
+out:
+ return ret;
+}
+
+/* of_icc_xlate_onecell() - Translate function using a single index.
+ * @spec: OF phandle args to map into an interconnect node.
+ * @data: private data (pointer to struct icc_onecell_data)
+ *
+ * This is a generic translate function that can be used to model simple
+ * interconnect providers that have one device tree node and provide
+ * multiple interconnect nodes. A single cell is used as an index into
+ * an array of icc nodes specified in the icc_onecell_data struct when
+ * registering the provider.
+ */
+struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
+ void *data)
+{
+ struct icc_onecell_data *icc_data = data;
+ unsigned int idx = spec->args[0];
+
+ if (idx >= icc_data->num_nodes) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return icc_data->nodes[idx];
+}
+EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
+
+/**
+ * of_icc_get_from_provider() - Look-up interconnect node
+ * @spec: OF phandle args to use for look-up
+ *
+ * Looks for interconnect provider under the node specified by @spec and if
+ * found, uses xlate function of the provider to map phandle args to node.
+ *
+ * Returns a valid pointer to struct icc_node on success or ERR_PTR()
+ * on failure.
+ */
+static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
+{
+ struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
+ struct icc_provider *provider;
+
+ if (!spec || spec->args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&icc_lock);
+ list_for_each_entry(provider, &icc_providers, provider_list) {
+ if (provider->dev->of_node == spec->np)
+ node = provider->xlate(spec, provider->data);
+ if (!IS_ERR(node))
+ break;
+ }
+ mutex_unlock(&icc_lock);
+
+ return node;
+}
+
+/**
+ * of_icc_get() - get a path handle from a DT node based on name
+ * @dev: device pointer for the consumer device
+ * @name: interconnect path name
+ *
+ * This function will search for a path between two endpoints and return an
+ * icc_path handle on success. Use icc_put() to release constraints when they
+ * are not needed anymore.
+ * If the interconnect API is disabled, NULL is returned and the consumer
+ * drivers will still build. Drivers are free to handle this specifically,
+ * but they don't have to.
+ *
+ * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
+ * when the API is disabled or the "interconnects" DT property is missing.
+ */
+struct icc_path *of_icc_get(struct device *dev, const char *name)
+{
+ struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+ struct icc_node *src_node, *dst_node;
+ struct device_node *np = NULL;
+ struct of_phandle_args src_args, dst_args;
+ int idx = 0;
+ int ret;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-ENODEV);
+
+ np = dev->of_node;
+
+ /*
+ * When the consumer DT node do not have "interconnects" property
+ * return a NULL path to skip setting constraints.
+ */
+ if (!of_find_property(np, "interconnects", NULL))
+ return NULL;
+
+ /*
+ * We use a combination of phandle and specifier for endpoint. For now
+ * lets support only global ids and extend this in the future if needed
+ * without breaking DT compatibility.
+ */
+ if (name) {
+ idx = of_property_match_string(np, "interconnect-names", name);
+ if (idx < 0)
+ return ERR_PTR(idx);
+ }
+
+ ret = of_parse_phandle_with_args(np, "interconnects",
+ "#interconnect-cells", idx * 2,
+ &src_args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ of_node_put(src_args.np);
+
+ ret = of_parse_phandle_with_args(np, "interconnects",
+ "#interconnect-cells", idx * 2 + 1,
+ &dst_args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ of_node_put(dst_args.np);
+
+ src_node = of_icc_get_from_provider(&src_args);
+
+ if (IS_ERR(src_node)) {
+ if (PTR_ERR(src_node) != -EPROBE_DEFER)
+ dev_err(dev, "error finding src node: %ld\n",
+ PTR_ERR(src_node));
+ return ERR_CAST(src_node);
+ }
+
+ dst_node = of_icc_get_from_provider(&dst_args);
+
+ if (IS_ERR(dst_node)) {
+ if (PTR_ERR(dst_node) != -EPROBE_DEFER)
+ dev_err(dev, "error finding dst node: %ld\n",
+ PTR_ERR(dst_node));
+ return ERR_CAST(dst_node);
+ }
+
+ mutex_lock(&icc_lock);
+ path = path_find(dev, src_node, dst_node);
+ if (IS_ERR(path))
+ dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+ mutex_unlock(&icc_lock);
+
+ return path;
+}
+EXPORT_SYMBOL_GPL(of_icc_get);
+
+/**
+ * icc_set_bw() - set bandwidth constraints on an interconnect path
+ * @path: reference to the path returned by icc_get()
+ * @avg_bw: average bandwidth in kilobytes per second
+ * @peak_bw: peak bandwidth in kilobytes per second
+ *
+ * This function is used by an interconnect consumer to express its own needs
+ * in terms of bandwidth for a previously requested path between two endpoints.
+ * The requests are aggregated and each node is updated accordingly. The entire
+ * path is locked by a mutex to ensure that the set() is completed.
+ * The @path can be NULL when the "interconnects" DT properties is missing,
+ * which will mean that no constraints will be set.
+ *
+ * Returns 0 on success, or an appropriate error code otherwise.
+ */
+int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+{
+ struct icc_node *node;
+ u32 old_avg, old_peak;
+ size_t i;
+ int ret;
+
+ if (!path || !path->num_nodes)
+ return 0;
+
+ mutex_lock(&icc_lock);
+
+ old_avg = path->reqs[0].avg_bw;
+ old_peak = path->reqs[0].peak_bw;
+
+ for (i = 0; i < path->num_nodes; i++) {
+ node = path->reqs[i].node;
+
+ /* update the consumer request for this path */
+ path->reqs[i].avg_bw = avg_bw;
+ path->reqs[i].peak_bw = peak_bw;
+
+ /* aggregate requests for this node */
+ aggregate_requests(node);
+ }
+
+ ret = apply_constraints(path);
+ if (ret) {
+ pr_debug("interconnect: error applying constraints (%d)\n",
+ ret);
+
+ for (i = 0; i < path->num_nodes; i++) {
+ node = path->reqs[i].node;
+ path->reqs[i].avg_bw = old_avg;
+ path->reqs[i].peak_bw = old_peak;
+ aggregate_requests(node);
+ }
+ apply_constraints(path);
+ }
+
+ mutex_unlock(&icc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_set_bw);
+
+/**
+ * icc_get() - return a handle for path between two endpoints
+ * @dev: the device requesting the path
+ * @src_id: source device port id
+ * @dst_id: destination device port id
+ *
+ * This function will search for a path between two endpoints and return an
+ * icc_path handle on success. Use icc_put() to release
+ * constraints when they are not needed anymore.
+ * If the interconnect API is disabled, NULL is returned and the consumer
+ * drivers will still build. Drivers are free to handle this specifically,
+ * but they don't have to.
+ *
+ * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
+ * interconnect API is disabled.
+ */
+struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
+{
+ struct icc_node *src, *dst;
+ struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+
+ mutex_lock(&icc_lock);
+
+ src = node_find(src_id);
+ if (!src)
+ goto out;
+
+ dst = node_find(dst_id);
+ if (!dst)
+ goto out;
+
+ path = path_find(dev, src, dst);
+ if (IS_ERR(path))
+ dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+
+out:
+ mutex_unlock(&icc_lock);
+ return path;
+}
+EXPORT_SYMBOL_GPL(icc_get);
+
+/**
+ * icc_put() - release the reference to the icc_path
+ * @path: interconnect path
+ *
+ * Use this function to release the constraints on a path when the path is
+ * no longer needed. The constraints will be re-aggregated.
+ */
+void icc_put(struct icc_path *path)
+{
+ struct icc_node *node;
+ size_t i;
+ int ret;
+
+ if (!path || WARN_ON(IS_ERR(path)))
+ return;
+
+ ret = icc_set_bw(path, 0, 0);
+ if (ret)
+ pr_err("%s: error (%d)\n", __func__, ret);
+
+ mutex_lock(&icc_lock);
+ for (i = 0; i < path->num_nodes; i++) {
+ node = path->reqs[i].node;
+ hlist_del(&path->reqs[i].req_node);
+ if (!WARN_ON(!node->provider->users))
+ node->provider->users--;
+ }
+ mutex_unlock(&icc_lock);
+
+ kfree(path);
+}
+EXPORT_SYMBOL_GPL(icc_put);
+
+static struct icc_node *icc_node_create_nolock(int id)
+{
+ struct icc_node *node;
+
+ /* check if node already exists */
+ node = node_find(id);
+ if (node)
+ return node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
+ if (id < 0) {
+ WARN(1, "%s: couldn't get idr\n", __func__);
+ kfree(node);
+ return ERR_PTR(id);
+ }
+
+ node->id = id;
+
+ return node;
+}
+
+/**
+ * icc_node_create() - create a node
+ * @id: node id
+ *
+ * Return: icc_node pointer on success, or ERR_PTR() on error
+ */
+struct icc_node *icc_node_create(int id)
+{
+ struct icc_node *node;
+
+ mutex_lock(&icc_lock);
+
+ node = icc_node_create_nolock(id);
+
+ mutex_unlock(&icc_lock);
+
+ return node;
+}
+EXPORT_SYMBOL_GPL(icc_node_create);
+
+/**
+ * icc_node_destroy() - destroy a node
+ * @id: node id
+ */
+void icc_node_destroy(int id)
+{
+ struct icc_node *node;
+
+ mutex_lock(&icc_lock);
+
+ node = node_find(id);
+ if (node) {
+ idr_remove(&icc_idr, node->id);
+ WARN_ON(!hlist_empty(&node->req_list));
+ }
+
+ mutex_unlock(&icc_lock);
+
+ kfree(node);
+}
+EXPORT_SYMBOL_GPL(icc_node_destroy);
+
+/**
+ * icc_link_create() - create a link between two nodes
+ * @node: source node id
+ * @dst_id: destination node id
+ *
+ * Create a link between two nodes. The nodes might belong to different
+ * interconnect providers and the @dst_id node might not exist (if the
+ * provider driver has not probed yet). So just create the @dst_id node
+ * and when the actual provider driver is probed, the rest of the node
+ * data is filled.
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_link_create(struct icc_node *node, const int dst_id)
+{
+ struct icc_node *dst;
+ struct icc_node **new;
+ int ret = 0;
+
+ if (!node->provider)
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ dst = node_find(dst_id);
+ if (!dst) {
+ dst = icc_node_create_nolock(dst_id);
+
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto out;
+ }
+ }
+
+ new = krealloc(node->links,
+ (node->num_links + 1) * sizeof(*node->links),
+ GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ node->links = new;
+ node->links[node->num_links++] = dst;
+
+out:
+ mutex_unlock(&icc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_link_create);
+
+/**
+ * icc_link_destroy() - destroy a link between two nodes
+ * @src: pointer to source node
+ * @dst: pointer to destination node
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
+{
+ struct icc_node **new;
+ size_t slot;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(src))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(dst))
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ for (slot = 0; slot < src->num_links; slot++)
+ if (src->links[slot] == dst)
+ break;
+
+ if (WARN_ON(slot == src->num_links)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ src->links[slot] = src->links[--src->num_links];
+
+ new = krealloc(src->links, src->num_links * sizeof(*src->links),
+ GFP_KERNEL);
+ if (new)
+ src->links = new;
+
+out:
+ mutex_unlock(&icc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_link_destroy);
+
+/**
+ * icc_node_add() - add interconnect node to interconnect provider
+ * @node: pointer to the interconnect node
+ * @provider: pointer to the interconnect provider
+ */
+void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+{
+ mutex_lock(&icc_lock);
+
+ node->provider = provider;
+ list_add_tail(&node->node_list, &provider->nodes);
+
+ mutex_unlock(&icc_lock);
+}
+EXPORT_SYMBOL_GPL(icc_node_add);
+
+/**
+ * icc_node_del() - delete interconnect node from interconnect provider
+ * @node: pointer to the interconnect node
+ */
+void icc_node_del(struct icc_node *node)
+{
+ mutex_lock(&icc_lock);
+
+ list_del(&node->node_list);
+
+ mutex_unlock(&icc_lock);
+}
+EXPORT_SYMBOL_GPL(icc_node_del);
+
+/**
+ * icc_provider_add() - add a new interconnect provider
+ * @provider: the interconnect provider that will be added into topology
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_provider_add(struct icc_provider *provider)
+{
+ if (WARN_ON(!provider->set))
+ return -EINVAL;
+ if (WARN_ON(!provider->xlate))
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ INIT_LIST_HEAD(&provider->nodes);
+ list_add_tail(&provider->provider_list, &icc_providers);
+
+ mutex_unlock(&icc_lock);
+
+ dev_dbg(provider->dev, "interconnect provider added to topology\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_provider_add);
+
+/**
+ * icc_provider_del() - delete previously added interconnect provider
+ * @provider: the interconnect provider that will be removed from topology
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_provider_del(struct icc_provider *provider)
+{
+ mutex_lock(&icc_lock);
+ if (provider->users) {
+ pr_warn("interconnect provider still has %d users\n",
+ provider->users);
+ mutex_unlock(&icc_lock);
+ return -EBUSY;
+ }
+
+ if (!list_empty(&provider->nodes)) {
+ pr_warn("interconnect provider still has nodes\n");
+ mutex_unlock(&icc_lock);
+ return -EBUSY;
+ }
+
+ list_del(&provider->provider_list);
+ mutex_unlock(&icc_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_provider_del);
+
+static int __init icc_init(void)
+{
+ icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
+ debugfs_create_file("interconnect_summary", 0444,
+ icc_debugfs_dir, NULL, &icc_summary_fops);
+ return 0;
+}
+
+static void __exit icc_exit(void)
+{
+ debugfs_remove_recursive(icc_debugfs_dir);
+}
+module_init(icc_init);
+module_exit(icc_exit);
+
+MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
+MODULE_DESCRIPTION("Interconnect Driver Core");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/Kconfig b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/Kconfig
new file mode 100644
index 0000000..4218c0b
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/Kconfig
@@ -0,0 +1,34 @@
+config INTERCONNECT_MTK
+ bool "Mediatek Network-on-Chip interconnect drivers"
+ depends on ARCH_MEDIATEK
+ help
+ Support for Mediatek's Network-on-Chip interconnect hardware.
+
+config INTERCONNECT_MTK_EMI
+ tristate "Mediatek EMI interconnect driver"
+ depends on INTERCONNECT_MTK
+ depends on (MTK_DVFSRC && OF)
+ help
+ This is a driver for the Mediatek Network-on-Chip
+ with DVFSRC-based platforms.
+ The Mediatek EMI(external memory interface) Interconnect driver
+ will aggregate require EMI BW of users to DVFSRC(dynamic voltage
+ and frequency scaling resource collector). The DVFSRC will adjust
+ appropriate DRAM frequency to fulfill this bandwidth request.
+
+config INTERCONNECT_MTK_MMQOS_COMMON
+ tristate "Mediatek MMQoS support"
+ help
+ Support for multimedia QoS in Mediatek's SoCs.
+ The Mediatek MMQoS(Multimedia Quality of Service) Interconnect
+ driver will collect BW requests from MM users, such as display,
+ camera, mdp and video codec, and configure SMI settings dynamically
+ according to the aggregated BW.
+
+config INTERCONNECT_MTK_MMQOS_MT6779
+ tristate "Mediatek MT6779 MMQoS interconnect driver"
+ depends on INTERCONNECT_MTK
+ depends on MTK_SMI
+ select INTERCONNECT_MTK_MMQOS_COMMON
+ help
+ Support MT6779 multimedia QoS.
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/Makefile b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/Makefile
new file mode 100644
index 0000000..f5eec6a
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_INTERCONNECT_MTK_EMI) += mtk-dvfsrc-emi.o
+obj-$(CONFIG_INTERCONNECT_MTK_MMQOS_COMMON) += mmqos-mtk.o mmqos-hrt.o
+obj-$(CONFIG_INTERCONNECT_MTK_MMQOS_MT6779) += mmqos-mt6779.o
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-hrt.c b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-hrt.c
new file mode 100644
index 0000000..98aec4b
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-hrt.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Anthony Huang <anthony.huang@mediatek.com>
+ */
+
+#include <linux/module.h>
+#include "mmqos-mtk.h"
+
+#define MULTIPLY_W_DRAM_WEIGHT(value) ((value)*6/5)
+
+struct mmqos_hrt *mmqos_hrt;
+
+s32 mtk_mmqos_get_avail_hrt_bw(enum hrt_type type)
+{
+ u32 i, used_bw = 0;
+
+ if (!mmqos_hrt)
+ return 0xFFFF;
+
+ for (i = 0; i < HRT_TYPE_NUM; i++) {
+ if (mmqos_hrt->hrt_bw[i] != type)
+ used_bw += mmqos_hrt->hrt_bw[i];
+ }
+
+ if (mmqos_hrt->cam_max_bw)
+ used_bw = used_bw - mmqos_hrt->hrt_bw[HRT_CAM]
+ + mmqos_hrt->cam_max_bw;
+
+ return (mmqos_hrt->hrt_total_bw - used_bw);
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_get_avail_hrt_bw);
+
+
+s32 mtk_mmqos_register_bw_throttle_notifier(struct notifier_block *nb)
+{
+ if (!nb || !mmqos_hrt)
+ return -EINVAL;
+ return blocking_notifier_chain_register(
+ &mmqos_hrt->hrt_bw_throttle_notifier,
+ nb);
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_register_bw_throttle_notifier);
+
+s32 mtk_mmqos_unregister_bw_throttle_notifier(struct notifier_block *nb)
+{
+ if (!nb || !mmqos_hrt)
+ return -EINVAL;
+ return blocking_notifier_chain_unregister(
+ &mmqos_hrt->hrt_bw_throttle_notifier,
+ nb);
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_unregister_bw_throttle_notifier);
+
+void mtk_mmqos_wait_throttle_done(void)
+{
+ u32 wait_result;
+
+ if (!mmqos_hrt)
+ return;
+
+ if (atomic_read(&mmqos_hrt->lock_count) > 0) {
+ pr_notice("begin to blocking for cam_max_bw=%d\n",
+ mmqos_hrt->cam_max_bw);
+ wait_result = wait_event_timeout(mmqos_hrt->hrt_wait,
+ atomic_read(&mmqos_hrt->lock_count) == 0,
+ msecs_to_jiffies(200));
+ pr_notice("blocking wait_result=%d\n", wait_result);
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_wait_throttle_done);
+
+s32 mtk_mmqos_set_hrt_bw(enum hrt_type type, u32 bw)
+{
+ if (type >= HRT_TYPE_NUM) {
+ pr_notice("%s: wrong type:%d\n", __func__, type);
+ return -EINVAL;
+ }
+
+ if (!mmqos_hrt)
+ return -EINVAL;
+
+ mmqos_hrt->hrt_bw[type] = bw;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_set_hrt_bw);
+
+static void notify_bw_throttle(u32 bw)
+{
+ u64 start_jiffies = jiffies;
+
+ blocking_notifier_call_chain(&mmqos_hrt->hrt_bw_throttle_notifier,
+ (bw > 0)?BW_THROTTLE_START:BW_THROTTLE_END, NULL);
+
+ pr_notice("%s: notify_time=%u\n", __func__,
+ jiffies_to_msecs(jiffies-start_jiffies));
+}
+
+static void set_camera_max_bw(u32 bw)
+{
+ mmqos_hrt->cam_max_bw = bw;
+ pr_notice("%s: %d\n", __func__, bw);
+
+ if (mmqos_hrt->blocking) {
+ atomic_inc(&mmqos_hrt->lock_count);
+ pr_notice("%s: increase lock_count=%d\n", __func__,
+ atomic_read(&mmqos_hrt->lock_count));
+ }
+ notify_bw_throttle(bw);
+
+ if (mmqos_hrt->blocking) {
+ atomic_dec(&mmqos_hrt->lock_count);
+ wake_up(&mmqos_hrt->hrt_wait);
+ pr_notice("%s: decrease lock_count=%d\n", __func__,
+ atomic_read(&mmqos_hrt->lock_count));
+ }
+}
+
+static void delay_work_handler(struct work_struct *work)
+{
+ mutex_lock(&mmqos_hrt->blocking_lock);
+ set_camera_max_bw(mmqos_hrt->cam_occu_bw);
+ mutex_unlock(&mmqos_hrt->blocking_lock);
+}
+
+static ssize_t camera_max_bw_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ s32 ret;
+ u32 bw = 0;
+
+ ret = kstrtoint(buf, 10, &bw);
+ if (ret) {
+ dev_notice(dev, "wrong camera max bw string:%d\n", ret);
+ return ret;
+ }
+
+ cancel_delayed_work_sync(&mmqos_hrt->work);
+ mmqos_hrt->cam_occu_bw = MULTIPLY_W_DRAM_WEIGHT(bw);
+ mutex_lock(&mmqos_hrt->blocking_lock);
+ if (mmqos_hrt->cam_occu_bw < mmqos_hrt->cam_max_bw) {
+ mmqos_hrt->blocking = false;
+ schedule_delayed_work(&mmqos_hrt->work, 2 * HZ);
+ } else {
+ mmqos_hrt->blocking = true;
+ schedule_delayed_work(&mmqos_hrt->work, 0);
+ }
+ mutex_unlock(&mmqos_hrt->blocking_lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(camera_max_bw);
+
+void mtk_mmqos_init_hrt(struct mmqos_hrt *hrt)
+{
+ if (!hrt)
+ return;
+ mmqos_hrt = hrt;
+ atomic_set(&mmqos_hrt->lock_count, 0);
+ INIT_DELAYED_WORK(&mmqos_hrt->work, delay_work_handler);
+ BLOCKING_INIT_NOTIFIER_HEAD(&mmqos_hrt->hrt_bw_throttle_notifier);
+ mutex_init(&mmqos_hrt->blocking_lock);
+ init_waitqueue_head(&mmqos_hrt->hrt_wait);
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_init_hrt);
+
+static struct attribute *mmqos_hrt_sysfs_attrs[] = {
+ &dev_attr_camera_max_bw.attr,
+ NULL
+};
+
+static struct attribute_group mmqos_hrt_sysfs_attr_group = {
+ .name = "mmqos_hrt",
+ .attrs = mmqos_hrt_sysfs_attrs
+};
+
+int mtk_mmqos_register_hrt_sysfs(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &mmqos_hrt_sysfs_attr_group);
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_register_hrt_sysfs);
+
+void mtk_mmqos_unregister_hrt_sysfs(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &mmqos_hrt_sysfs_attr_group);
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_unregister_hrt_sysfs);
+
+
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mt6779.c b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mt6779.c
new file mode 100644
index 0000000..15f2ab8
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mt6779.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Ming-Fan Chen <ming-fan.chen@mediatek.com>
+ */
+
+#include <dt-bindings/interconnect/mtk,mmqos.h>
+#include <dt-bindings/interconnect/mtk,mt6779-emi.h>
+#include <dt-bindings/memory/mt6779-larb-port.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include "mmqos-mtk.h"
+
+
+static const struct mtk_node_desc node_descs_mt6779[] = {
+ DEFINE_MNODE(common0,
+ SLAVE_COMMON(0), 0, MMQOS_NO_LINK),
+ DEFINE_MNODE(common0_port0,
+ MASTER_COMMON_PORT(0, 0), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port1,
+ MASTER_COMMON_PORT(0, 1), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port2,
+ MASTER_COMMON_PORT(0, 2), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port3,
+ MASTER_COMMON_PORT(0, 3), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port4,
+ MASTER_COMMON_PORT(0, 4), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port5,
+ MASTER_COMMON_PORT(0, 5), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port6,
+ MASTER_COMMON_PORT(0, 6), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port7,
+ MASTER_COMMON_PORT(0, 7), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(common0_port8,
+ MASTER_COMMON_PORT(0, 8), 0, SLAVE_COMMON(0)),
+ DEFINE_MNODE(larb0, SLAVE_LARB(0), 0, MASTER_COMMON_PORT(0, 0)),
+ DEFINE_MNODE(larb1, SLAVE_LARB(1), 0, MASTER_COMMON_PORT(0, 1)),
+ DEFINE_MNODE(larb2, SLAVE_LARB(2), 0, MASTER_COMMON_PORT(0, 2)),
+ DEFINE_MNODE(larb3, SLAVE_LARB(3), 0, MASTER_COMMON_PORT(0, 3)),
+ DEFINE_MNODE(larb5, SLAVE_LARB(5), 0, MASTER_COMMON_PORT(0, 4)),
+ DEFINE_MNODE(larb8, SLAVE_LARB(8), 0, MASTER_COMMON_PORT(0, 5)),
+ DEFINE_MNODE(larb9, SLAVE_LARB(9), 0, MASTER_COMMON_PORT(0, 7)),
+ DEFINE_MNODE(larb10, SLAVE_LARB(10), 0, MASTER_COMMON_PORT(0, 6)),
+ DEFINE_MNODE(larb12, SLAVE_LARB(12), 0, MASTER_COMMON_PORT(0, 8)),
+ DEFINE_MNODE(larb13, SLAVE_LARB(13), 0, MASTER_COMMON_PORT(0, 6)),
+ DEFINE_MNODE(disp_postmask0,
+ MASTER_LARB_PORT(M4U_PORT_DISP_POSTMASK0), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_ovl0_hdr,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL0_HDR), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_ovl1_hdr,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL1_HDR), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_ovl0,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL0), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_ovl1,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL1), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_pvric0,
+ MASTER_LARB_PORT(M4U_PORT_DISP_PVRIC0), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_rdma0,
+ MASTER_LARB_PORT(M4U_PORT_DISP_RDMA0), 7, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_wdma0,
+ MASTER_LARB_PORT(M4U_PORT_DISP_WDMA0), 8, SLAVE_LARB(0)),
+ DEFINE_MNODE(disp_fake0,
+ MASTER_LARB_PORT(M4U_PORT_DISP_FAKE0), 7, SLAVE_LARB(0)),
+
+ DEFINE_MNODE(disp_ovl0_2l_hdr,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL0_2L_HDR), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(disp_ovl1_2l_hdr,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL1_2L_HDR), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(disp_ovl0_2l,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL0_2L), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(disp_ovl1_2l,
+ MASTER_LARB_PORT(M4U_PORT_DISP_OVL1_2L), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(disp_rdma1,
+ MASTER_LARB_PORT(M4U_PORT_DISP_RDMA1), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_pvric0,
+ MASTER_LARB_PORT(M4U_PORT_MDP_PVRIC0), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_pvric1,
+ MASTER_LARB_PORT(M4U_PORT_MDP_PVRIC1), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_rdma0,
+ MASTER_LARB_PORT(M4U_PORT_MDP_RDMA0), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_rdma1,
+ MASTER_LARB_PORT(M4U_PORT_MDP_RDMA1), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_wrot0_r,
+ MASTER_LARB_PORT(M4U_PORT_MDP_WROT0_R), 8, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_wrot0_w,
+ MASTER_LARB_PORT(M4U_PORT_MDP_WROT0_W), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_wrot1_r,
+ MASTER_LARB_PORT(M4U_PORT_MDP_WROT1_R), 8, SLAVE_LARB(1)),
+ DEFINE_MNODE(mdp_wrot1_w,
+ MASTER_LARB_PORT(M4U_PORT_MDP_WROT1_W), 7, SLAVE_LARB(1)),
+ DEFINE_MNODE(disp_fake1,
+ MASTER_LARB_PORT(M4U_PORT_DISP_FAKE1), 7, SLAVE_LARB(1)),
+
+ DEFINE_MNODE(vdec_mc_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_MC_EXT), 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_ufo_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_UFO_EXT), 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_pp_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_PP_EXT), 8, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_pred_rd_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_PRED_RD_EXT),
+ 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_pred_wr_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_PRED_WR_EXT),
+ 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_ppwrap_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_PPWRAP_EXT),
+ 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_tile_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_TILE_EXT), 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_vld_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_VLD_EXT), 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_vld2_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_VLD2_EXT), 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_avc_mv_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_AVC_MV_EXT),
+ 7, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_ufo_enc_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_UFO_ENC_EXT),
+ 8, SLAVE_LARB(2)),
+ DEFINE_MNODE(vdec_rg_ctrl_dma_ext,
+ MASTER_LARB_PORT(M4U_PORT_HW_VDEC_RG_CTRL_DMA_EXT),
+ 7, SLAVE_LARB(2)),
+
+ DEFINE_MNODE(venc_rcpu,
+ MASTER_LARB_PORT(M4U_PORT_VENC_RCPU), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_rec,
+ MASTER_LARB_PORT(M4U_PORT_VENC_REC), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_bsdma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_BSDMA), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_sv_comv,
+ MASTER_LARB_PORT(M4U_PORT_VENC_SV_COMV), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_rd_comv,
+ MASTER_LARB_PORT(M4U_PORT_VENC_RD_COMV), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_nbm_rdma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_NBM_RDMA), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_nbm_rdma_lite,
+ MASTER_LARB_PORT(M4U_PORT_VENC_NBM_RDMA_LITE),
+ 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(jpgenc_y_rdma,
+ MASTER_LARB_PORT(M4U_PORT_JPGENC_Y_RDMA), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(jpgenc_c_rdma,
+ MASTER_LARB_PORT(M4U_PORT_JPGENC_C_RDMA), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(jpgenc_q_table,
+ MASTER_LARB_PORT(M4U_PORT_JPGENC_Q_TABLE), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(jpgenc_bsdma,
+ MASTER_LARB_PORT(M4U_PORT_JPGENC_BSDMA), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(jpgedc_wdma,
+ MASTER_LARB_PORT(M4U_PORT_JPGDEC_WDMA), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(jpgedc_bsdma,
+ MASTER_LARB_PORT(M4U_PORT_JPGDEC_BSDMA), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_nbm_wdma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_NBM_WDMA), 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_nbm_wdma_lite,
+ MASTER_LARB_PORT(M4U_PORT_VENC_NBM_WDMA_LITE),
+ 8, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_cur_luma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_CUR_LUMA), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_cur_chroma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_CUR_CHROMA), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_ref_luma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_REF_LUMA), 7, SLAVE_LARB(3)),
+ DEFINE_MNODE(venc_ref_chroma,
+ MASTER_LARB_PORT(M4U_PORT_VENC_REF_CHROMA), 7, SLAVE_LARB(3)),
+
+ DEFINE_MNODE(img_imgi_d1,
+ MASTER_LARB_PORT(M4U_PORT_IMGI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_imgbi_d1,
+ MASTER_LARB_PORT(M4U_PORT_IMGBI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_dmgi_d1,
+ MASTER_LARB_PORT(M4U_PORT_DMGI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_depi_d1,
+ MASTER_LARB_PORT(M4U_PORT_DEPI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_lcei_d1,
+ MASTER_LARB_PORT(M4U_PORT_LCEI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_smti_d1,
+ MASTER_LARB_PORT(M4U_PORT_SMTI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_smto_d2,
+ MASTER_LARB_PORT(M4U_PORT_SMTO_D2), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_smto_d1,
+ MASTER_LARB_PORT(M4U_PORT_SMTO_D1), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_crzo_d1,
+ MASTER_LARB_PORT(M4U_PORT_CRZO_D1), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_img3o_d1,
+ MASTER_LARB_PORT(M4U_PORT_IMG3O_D1), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_vipi_d1,
+ MASTER_LARB_PORT(M4U_PORT_VIPI_D1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_wpe_rdma1,
+ MASTER_LARB_PORT(M4U_PORT_WPE_RDMA1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_wpe_rdma0,
+ MASTER_LARB_PORT(M4U_PORT_WPE_RDMA0), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_wpe_wdma,
+ MASTER_LARB_PORT(M4U_PORT_WPE_WDMA), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_timgo_d1,
+ MASTER_LARB_PORT(M4U_PORT_TIMGO_D1), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_mfb_rdma0,
+ MASTER_LARB_PORT(M4U_PORT_MFB_RDMA0), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_mfb_rdma1,
+ MASTER_LARB_PORT(M4U_PORT_MFB_RDMA1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_mfb_rdma2,
+ MASTER_LARB_PORT(M4U_PORT_MFB_RDMA2), 6, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_mfb_rdma3,
+ MASTER_LARB_PORT(M4U_PORT_MFB_RDMA3), 6, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_mfb_wdma,
+ MASTER_LARB_PORT(M4U_PORT_MFB_WDMA), 8, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_reserve1,
+ MASTER_LARB_PORT(M4U_PORT_RESERVE1), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_reserve2,
+ MASTER_LARB_PORT(M4U_PORT_RESERVE2), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_reserve3,
+ MASTER_LARB_PORT(M4U_PORT_RESERVE3), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_reserve4,
+ MASTER_LARB_PORT(M4U_PORT_RESERVE4), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_reserve5,
+ MASTER_LARB_PORT(M4U_PORT_RESERVE5), 7, SLAVE_LARB(5)),
+ DEFINE_MNODE(img_reserve6,
+ MASTER_LARB_PORT(M4U_PORT_RESERVE6), 7, SLAVE_LARB(5)),
+
+ DEFINE_MNODE(ipe_fdvt_rda,
+ MASTER_LARB_PORT(M4U_PORT_FDVT_RDA), 7, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fdvt_rdb,
+ MASTER_LARB_PORT(M4U_PORT_FDVT_RDB), 7, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fdvt_wra,
+ MASTER_LARB_PORT(M4U_PORT_FDVT_WRA), 8, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fdvt_wrb,
+ MASTER_LARB_PORT(M4U_PORT_FDVT_WRB), 8, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fe_rd0,
+ MASTER_LARB_PORT(M4U_PORT_FE_RD0), 7, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fe_rd1,
+ MASTER_LARB_PORT(M4U_PORT_FE_RD1), 7, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fe_wr0,
+ MASTER_LARB_PORT(M4U_PORT_FE_WR0), 8, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_fe_wr1,
+ MASTER_LARB_PORT(M4U_PORT_FE_WR1), 8, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_rsc_rdma0,
+ MASTER_LARB_PORT(M4U_PORT_RSC_RDMA0), 6, SLAVE_LARB(8)),
+ DEFINE_MNODE(ipe_rsc_wdma,
+ MASTER_LARB_PORT(M4U_PORT_RSC_WDMA), 7, SLAVE_LARB(8)),
+
+ DEFINE_MNODE(cam_imgo_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_IMGO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_rrzo_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RRZO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_lsci__r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LSCI_R1_C), 7, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_bpci_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_BPCI_R1_C), 7, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_yuvo_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_YUVO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_ufdi_r2_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_UFDI_R2_C), 7, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_rawi_r2_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RAWI_R2_C), 7, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_rawi_r5_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RAWI_R5_C), 7, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_camsv_1,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_1), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_camsv_2,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_2), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_camsv_3,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_3), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_camsv_4,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_4), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_camsv_5,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_5), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_camsv_6,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_6), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_aao_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_AAO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_afo_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_AFO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_flko_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_FLKO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_lceso_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LCESO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_crzo_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CRZO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_ltmso_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LTMSO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_rsso_r1_c,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RSSO_R1_C), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_ccui,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CCUI), 7, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_ccuo,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CCUO), 8, SLAVE_LARB(9)),
+ DEFINE_MNODE(cam_fake,
+ MASTER_LARB_PORT(M4U_PORT_CAM_FAKE), 8, SLAVE_LARB(9)),
+
+ DEFINE_MNODE(cam_imgo_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_IMGO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rrzo_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RRZO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_lsci_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LSCI_R1_A), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_bpci_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_BPCI_R1_A), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_yuvo_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_YUVO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_ufdi_r2_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_UFDI_R2_A), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rawi_r2_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RAWI_R2_A), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rawi_r5_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RAWI_R5_A), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_imgo_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_IMGO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rrzo_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RRZO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_lsci_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LSCI_R1_B), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_bpci_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_BPCI_R1_B), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_yuvo_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_YUVO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_ufdi_r2_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_UFDI_R2_B), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rawi_r2_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RAWI_R2_B), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rawi_r5_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RAWI_R5_B), 7, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_camsv_0,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CAMSV_0), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_aao_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_AAO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_afo_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_AFO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_flko_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_FLKO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_lceso_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LCESO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_crzo_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CRZO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_aao_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_AAO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_afo_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_AFO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_flko_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_FLKO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_lceso_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LCESO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_crzo_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_CRZO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_ltmso_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LTMSO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rsso_r1_a,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RSSO_R1_A), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_ltmso_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_LTMSO_R1_B), 8, SLAVE_LARB(10)),
+ DEFINE_MNODE(cam_rsso_r1_b,
+ MASTER_LARB_PORT(M4U_PORT_CAM_RSSO_R1_B), 8, SLAVE_LARB(10)),
+};
+
+static const char * const comm_muxes_mt6779[] = { "mm" };
+
+static const char * const comm_icc_path_names_mt6779[] = { "mmsys_path" };
+
+static const struct mtk_mmqos_desc mmqos_desc_mt6779 = {
+ .nodes = node_descs_mt6779,
+ .num_nodes = ARRAY_SIZE(node_descs_mt6779),
+ .comm_muxes = comm_muxes_mt6779,
+ .comm_icc_path_names = comm_icc_path_names_mt6779,
+ .max_ratio = 40,
+ .hrt = {
+ .hrt_bw = {1600, 0, 0},
+ .hrt_total_bw = 7466, /* Todo: Use DRAMC API */
+ }
+};
+
+
+static const struct of_device_id mtk_mmqos_mt6779_of_ids[] = {
+ {
+ .compatible = "mediatek,mt6779-mmqos",
+ .data = &mmqos_desc_mt6779,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mtk_mmqos_mt6779_of_ids);
+
+static struct platform_driver mtk_mmqos_mt6779_driver = {
+ .probe = mtk_mmqos_probe,
+ .remove = mtk_mmqos_remove,
+ .driver = {
+ .name = "mtk-mt6779-mmqos",
+ .of_match_table = mtk_mmqos_mt6779_of_ids,
+ },
+};
+module_platform_driver(mtk_mmqos_mt6779_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mtk.c b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mtk.c
new file mode 100644
index 0000000..81066c3
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mtk.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Ming-Fan Chen <ming-fan.chen@mediatek.com>
+ */
+
+#include <dt-bindings/interconnect/mtk,mmqos.h>
+#include <linux/clk.h>
+#include <linux/interconnect-provider.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk_mmdvfs.h>
+#include <soc/mediatek/smi.h>
+
+#include "mmqos-mtk.h"
+
+#define SHIFT_ROUND(a, b) ((((a) - 1) >> (b)) + 1)
+#define icc_to_MBps(x) ((x) / 1000)
+
+static void mmqos_update_comm_bw(struct device *dev,
+ u32 comm_port, u32 freq, u64 mix_bw, u64 bw_peak, bool qos_bound)
+{
+ u32 comm_bw = 0;
+ u32 value;
+
+ if (!freq || !dev)
+ return;
+
+ if (mix_bw)
+ comm_bw = (mix_bw << 8) / freq;
+
+ if (comm_bw)
+ value = ((comm_bw > 0xfff) ? 0xfff : comm_bw) |
+ ((bw_peak > 0 || !qos_bound) ? 0x1000 : 0x3000);
+ else
+ value = 0x1200;
+
+ mtk_smi_common_bw_set(dev, comm_port, value);
+
+ dev_dbg(dev, "comm port=%d bw=%d freq=%d qos_bound=%d value=%#x\n",
+ comm_port, comm_bw, freq, qos_bound, value);
+}
+
+static int update_mm_clk(struct notifier_block *nb,
+ unsigned long value, void *v)
+{
+ struct mtk_mmqos *mmqos =
+ container_of(nb, struct mtk_mmqos, nb);
+ struct common_node *comm_node;
+ struct common_port_node *comm_port;
+
+ list_for_each_entry(comm_node, &mmqos->comm_list, list) {
+ comm_node->freq = clk_get_rate(comm_node->clk)/1000000;
+ list_for_each_entry(comm_port,
+ &comm_node->comm_port_list, list) {
+ mutex_lock(&comm_port->bw_lock);
+ if (comm_port->latest_mix_bw
+ || comm_port->latest_peak_bw) {
+ mmqos_update_comm_bw(comm_port->larb_dev,
+ comm_port->base->icc_node->id & 0xff,
+ comm_port->common->freq,
+ icc_to_MBps(comm_port->latest_mix_bw),
+ icc_to_MBps(comm_port->latest_peak_bw),
+ mmqos->qos_bound);
+ }
+ mutex_unlock(&comm_port->bw_lock);
+ }
+ }
+ return 0;
+}
+
+static void set_comm_icc_bw_handler(struct work_struct *work)
+{
+ struct common_node *comm_node = container_of(
+ work, struct common_node, work);
+ struct common_port_node *comm_port_node;
+ u32 avg_bw = 0, peak_bw = 0;
+
+ list_for_each_entry(comm_port_node, &comm_node->comm_port_list, list) {
+ mutex_lock(&comm_port_node->bw_lock);
+ avg_bw += comm_port_node->latest_avg_bw;
+ peak_bw += (comm_port_node->latest_peak_bw
+ & ~(MTK_MMQOS_MAX_BW));
+ mutex_unlock(&comm_port_node->bw_lock);
+ }
+ icc_set_bw(comm_node->icc_path, avg_bw, peak_bw);
+}
+
+static int mtk_mmqos_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct larb_node *larb_node;
+ struct larb_port_node *larb_port_node;
+ struct common_port_node *comm_port_node;
+ struct common_node *comm_node;
+ struct mtk_mmqos *mmqos = container_of(dst->provider,
+ struct mtk_mmqos, prov);
+ u32 value = 1;
+
+ switch (dst->id >> 16) {
+ case MTK_MMQOS_NODE_COMMON:
+ comm_node = (struct common_node *)dst->data;
+ queue_work(mmqos->wq, &comm_node->work);
+ break;
+ case MTK_MMQOS_NODE_COMMON_PORT:
+ comm_port_node = (struct common_port_node *)dst->data;
+ mutex_lock(&comm_port_node->bw_lock);
+ comm_port_node->latest_mix_bw = comm_port_node->base->mix_bw;
+ comm_port_node->latest_peak_bw = dst->peak_bw;
+ comm_port_node->latest_avg_bw = dst->avg_bw;
+ mmqos_update_comm_bw(comm_port_node->larb_dev,
+ dst->id & 0xff, comm_port_node->common->freq,
+ icc_to_MBps(comm_port_node->latest_mix_bw),
+ icc_to_MBps(comm_port_node->latest_peak_bw),
+ mmqos->qos_bound);
+ mutex_unlock(&comm_port_node->bw_lock);
+ break;
+ case MTK_MMQOS_NODE_LARB:
+ larb_port_node = (struct larb_port_node *)src->data;
+ larb_node = (struct larb_node *)dst->data;
+ if (larb_port_node->base->mix_bw)
+ value = SHIFT_ROUND(
+ icc_to_MBps(larb_port_node->base->mix_bw),
+ larb_port_node->bw_ratio);
+ if (value > mmqos->max_ratio)
+ value = mmqos->max_ratio;
+ mtk_smi_larb_bw_set(
+ larb_node->larb_dev,
+ src->id & 0xff, value);
+
+ if ((dst->id & 0xff) == 1) {
+ mtk_smi_larb_bw_set(
+ larb_node->larb_dev, 9, 8);
+ mtk_smi_larb_bw_set(
+ larb_node->larb_dev, 11, 8);
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int mtk_mmqos_aggregate(struct icc_node *node,
+ u32 avg_bw, u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ struct mmqos_base_node *base_node = NULL;
+ u32 mix_bw = peak_bw;
+
+ switch (node->id >> 16) {
+ case MTK_MMQOS_NODE_LARB_PORT:
+ base_node = ((struct larb_node *)node->data)->base;
+ if (peak_bw)
+ mix_bw = SHIFT_ROUND(peak_bw * 3, 1);
+ break;
+ case MTK_MMQOS_NODE_COMMON_PORT:
+ base_node = ((struct common_port_node *)node->data)->base;
+ break;
+ default:
+ return 0;
+ }
+
+ if (base_node) {
+ if (*agg_avg == 0 && *agg_peak == 0)
+ base_node->mix_bw = 0;
+ base_node->mix_bw += peak_bw ? mix_bw : avg_bw;
+ }
+
+ *agg_avg += avg_bw;
+ if (peak_bw == MTK_MMQOS_MAX_BW)
+ *agg_peak |= MTK_MMQOS_MAX_BW;
+ else
+ *agg_peak += peak_bw;
+ return 0;
+}
+
+static struct icc_node *mtk_mmqos_xlate(
+ struct of_phandle_args *spec, void *data)
+{
+ struct icc_onecell_data *icc_data;
+ s32 i;
+
+ if (!spec || !data)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ icc_data = (struct icc_onecell_data *)data;
+
+ for (i = 0; i < icc_data->num_nodes; i++)
+ if (icc_data->nodes[i]->id == spec->args[0])
+ return icc_data->nodes[i];
+
+ pr_notice("%s: invalid index %u\n", __func__, spec->args[0]);
+ return ERR_PTR(-EINVAL);
+}
+
+int mtk_mmqos_probe(struct platform_device *pdev)
+{
+ struct mtk_mmqos *mmqos;
+ struct of_phandle_iterator it;
+ struct icc_onecell_data *data;
+ struct icc_node *node, *temp;
+ struct mmqos_base_node *base_node;
+ struct common_node *comm_node;
+ struct common_port_node *comm_port_node;
+ struct larb_node *larb_node;
+ struct larb_port_node *larb_port_node;
+ struct mtk_smi_iommu smi_imu;
+ int i, id, num_larbs = 0, ret;
+ const struct mtk_mmqos_desc *mmqos_desc;
+ const struct mtk_node_desc *node_desc;
+ struct device *larb_dev;
+ struct mmqos_hrt *hrt;
+
+ mmqos = devm_kzalloc(&pdev->dev, sizeof(*mmqos), GFP_KERNEL);
+ if (!mmqos)
+ return -ENOMEM;
+ mmqos->dev = &pdev->dev;
+
+ of_for_each_phandle(
+ &it, ret, pdev->dev.of_node, "mediatek,larbs", NULL, 0) {
+ struct device_node *np;
+ struct platform_device *larb_pdev;
+
+ np = of_node_get(it.node);
+ if (!of_device_is_available(np))
+ continue;
+
+ larb_pdev = of_find_device_by_node(np);
+ if (!larb_pdev) {
+ larb_pdev = of_platform_device_create(
+ np, NULL, platform_bus_type.dev_root);
+ if (!larb_pdev || !larb_pdev->dev.driver) {
+ of_node_put(np);
+ return -EPROBE_DEFER;
+ }
+ }
+
+ if (of_property_read_u32(np, "mediatek,larb-id", &id))
+ id = num_larbs;
+ smi_imu.larb_imu[id].dev = &larb_pdev->dev;
+ num_larbs += 1;
+ }
+
+ INIT_LIST_HEAD(&mmqos->comm_list);
+
+ INIT_LIST_HEAD(&mmqos->prov.nodes);
+ mmqos->prov.set = mtk_mmqos_set;
+ mmqos->prov.aggregate = mtk_mmqos_aggregate;
+ mmqos->prov.xlate = mtk_mmqos_xlate;
+ mmqos->prov.dev = &pdev->dev;
+
+ ret = icc_provider_add(&mmqos->prov);
+ if (ret) {
+ dev_notice(&pdev->dev, "icc_provider_add failed:%d\n", ret);
+ return ret;
+ }
+
+ mmqos_desc = (struct mtk_mmqos_desc *)
+ of_device_get_match_data(&pdev->dev);
+ if (!mmqos_desc)
+ return -EINVAL;
+
+ data = devm_kzalloc(&pdev->dev,
+ sizeof(*data) + mmqos_desc->num_nodes * sizeof(node),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ for (i = 0; i < mmqos_desc->num_nodes; i++) {
+ node_desc = &mmqos_desc->nodes[i];
+ node = icc_node_create(node_desc->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+ icc_node_add(node, &mmqos->prov);
+
+ if (node_desc->link != MMQOS_NO_LINK) {
+ ret = icc_link_create(node, node_desc->link);
+ if (ret)
+ goto err;
+ }
+ node->name = node_desc->name;
+
+ base_node = devm_kzalloc(
+ &pdev->dev, sizeof(*base_node), GFP_KERNEL);
+ if (!base_node) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ base_node->icc_node = node;
+
+ switch (node->id >> 16) {
+ case MTK_MMQOS_NODE_COMMON:
+ comm_node = devm_kzalloc(
+ &pdev->dev, sizeof(*comm_node), GFP_KERNEL);
+ if (!comm_node) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ INIT_WORK(&comm_node->work, set_comm_icc_bw_handler);
+ comm_node->clk = devm_clk_get(&pdev->dev,
+ mmqos_desc->comm_muxes[node->id & 0xff]);
+ if (IS_ERR(comm_node->clk)) {
+ dev_notice(&pdev->dev, "get clk fail:%s\n",
+ mmqos_desc->comm_muxes[
+ node->id & 0xff]);
+ ret = -EINVAL;
+ goto err;
+ }
+ comm_node->freq = clk_get_rate(comm_node->clk)/1000000;
+ INIT_LIST_HEAD(&comm_node->list);
+ list_add_tail(&comm_node->list, &mmqos->comm_list);
+ INIT_LIST_HEAD(&comm_node->comm_port_list);
+ comm_node->icc_path = of_icc_get(&pdev->dev,
+ mmqos_desc->comm_icc_path_names[
+ node->id & 0xff]);
+ if (IS_ERR_OR_NULL(comm_node->icc_path)) {
+ dev_notice(&pdev->dev,
+ "get icc_path fail:%s\n",
+ mmqos_desc->comm_icc_path_names[
+ node->id & 0xff]);
+ ret = -EINVAL;
+ goto err;
+ }
+ comm_node->base = base_node;
+ node->data = (void *)comm_node;
+ break;
+ case MTK_MMQOS_NODE_COMMON_PORT:
+ comm_port_node = devm_kzalloc(&pdev->dev,
+ sizeof(*comm_port_node), GFP_KERNEL);
+ if (!comm_port_node) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ mutex_init(&comm_port_node->bw_lock);
+ comm_port_node->common = node->links[0]->data;
+ INIT_LIST_HEAD(&comm_port_node->list);
+ list_add_tail(&comm_port_node->list,
+ &comm_port_node->common->comm_port_list);
+ comm_port_node->base = base_node;
+ node->data = (void *)comm_port_node;
+ break;
+ case MTK_MMQOS_NODE_LARB:
+ larb_node = devm_kzalloc(
+ &pdev->dev, sizeof(*larb_node), GFP_KERNEL);
+ if (!larb_node) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ comm_port_node = node->links[0]->data;
+ larb_dev = smi_imu.larb_imu[node->id &
+ (MTK_LARB_NR_MAX-1)].dev;
+ if (larb_dev) {
+ comm_port_node->larb_dev = larb_dev;
+ larb_node->larb_dev = larb_dev;
+ }
+ larb_node->base = base_node;
+ node->data = (void *)larb_node;
+ break;
+ case MTK_MMQOS_NODE_LARB_PORT:
+ larb_port_node = devm_kzalloc(&pdev->dev,
+ sizeof(*larb_port_node), GFP_KERNEL);
+ if (!larb_port_node) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ larb_port_node->bw_ratio = node_desc->bw_ratio;
+ larb_port_node->base = base_node;
+ node->data = (void *)larb_port_node;
+ break;
+ default:
+ dev_notice(&pdev->dev,
+ "invalid node id:%#x\n", node->id);
+ ret = -EINVAL;
+ goto err;
+ }
+ data->nodes[i] = node;
+ }
+
+ data->num_nodes = mmqos_desc->num_nodes;
+ mmqos->prov.data = data;
+ mmqos->max_ratio = mmqos_desc->max_ratio;
+
+ mmqos->wq = create_singlethread_workqueue("mmqos_work_queue");
+ if (!mmqos->wq) {
+ dev_notice(&pdev->dev, "work queue create fail\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hrt = devm_kzalloc(&pdev->dev, sizeof(*hrt), GFP_KERNEL);
+ if (!hrt) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ memcpy(hrt, &mmqos_desc->hrt, sizeof(mmqos_desc->hrt));
+ mtk_mmqos_init_hrt(hrt);
+
+ mmqos->nb.notifier_call = update_mm_clk;
+ register_mmdvfs_notifier(&mmqos->nb);
+
+ ret = mtk_mmqos_register_hrt_sysfs(&pdev->dev);
+ if (ret)
+ dev_notice(&pdev->dev, "sysfs create fail\n");
+
+ platform_set_drvdata(pdev, mmqos);
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(node, temp, &mmqos->prov.nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+ icc_provider_del(&mmqos->prov);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_probe);
+
+int mtk_mmqos_remove(struct platform_device *pdev)
+{
+ struct mtk_mmqos *mmqos = platform_get_drvdata(pdev);
+ struct icc_node *node, *temp;
+
+ list_for_each_entry_safe(node, temp, &mmqos->prov.nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+ icc_provider_del(&mmqos->prov);
+ unregister_mmdvfs_notifier(&mmqos->nb);
+ destroy_workqueue(mmqos->wq);
+ mtk_mmqos_unregister_hrt_sysfs(&pdev->dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_mmqos_remove);
+
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mtk.h b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mtk.h
new file mode 100644
index 0000000..c6ebf33
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mmqos-mtk.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Ming-Fan Chen <ming-fan.chen@mediatek.com>
+ */
+#ifndef MMQOS_MTK_H
+#define MMQOS_MTK_H
+
+#include <linux/interconnect-provider.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <soc/mediatek/mmqos.h>
+
+#define MMQOS_NO_LINK (0xffffffff)
+#define MMQOS_MAX_COMM_PORT_NUM (15)
+
+struct mmqos_hrt {
+ u32 hrt_bw[HRT_TYPE_NUM];
+ u32 hrt_total_bw;
+ u32 cam_max_bw;
+ u32 cam_occu_bw;
+ bool blocking;
+ struct delayed_work work;
+ struct blocking_notifier_head hrt_bw_throttle_notifier;
+ atomic_t lock_count;
+ wait_queue_head_t hrt_wait;
+ struct mutex blocking_lock;
+};
+
+struct mmqos_base_node {
+ struct icc_node *icc_node;
+ u32 mix_bw;
+};
+
+struct common_node {
+ struct mmqos_base_node *base;
+ const char *clk_name;
+ struct clk *clk;
+ u64 freq;
+ struct list_head list;
+ struct icc_path *icc_path;
+ struct work_struct work;
+ struct list_head comm_port_list;
+};
+
+struct common_port_node {
+ struct mmqos_base_node *base;
+ struct common_node *common;
+ struct device *larb_dev;
+ struct mutex bw_lock;
+ u32 latest_mix_bw;
+ u32 latest_peak_bw;
+ u32 latest_avg_bw;
+ struct list_head list;
+};
+
+struct larb_node {
+ struct mmqos_base_node *base;
+ struct device *larb_dev;
+};
+
+struct larb_port_node {
+ struct mmqos_base_node *base;
+ u16 bw_ratio;
+};
+
+struct mtk_mmqos {
+ struct device *dev;
+ struct icc_provider prov;
+ struct notifier_block nb;
+ struct list_head comm_list;
+ struct workqueue_struct *wq;
+ u32 max_ratio;
+ bool qos_bound; /* Todo: Set qos_bound to true if necessary */
+};
+
+struct mtk_node_desc {
+ const char *name;
+ u32 id;
+ u32 link;
+ u16 bw_ratio;
+};
+
+struct mtk_mmqos_desc {
+ const struct mtk_node_desc *nodes;
+ const size_t num_nodes;
+ const char * const *comm_muxes;
+ const char * const *comm_icc_path_names;
+ const u32 max_ratio;
+ const struct mmqos_hrt hrt;
+};
+
+#define DEFINE_MNODE(_name, _id, _bw_ratio, _link) { \
+ .name = #_name, \
+ .id = _id, \
+ .bw_ratio = _bw_ratio, \
+ .link = _link, \
+ }
+
+int mtk_mmqos_probe(struct platform_device *pdev);
+int mtk_mmqos_remove(struct platform_device *pdev);
+
+/* For HRT */
+void mtk_mmqos_init_hrt(struct mmqos_hrt *hrt);
+int mtk_mmqos_register_hrt_sysfs(struct device *dev);
+void mtk_mmqos_unregister_hrt_sysfs(struct device *dev);
+#endif /* MMQOS_MTK_H */
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mtk-dvfsrc-emi.c b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mtk-dvfsrc-emi.c
new file mode 100644
index 0000000..f5870fc
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/mediatek/mtk-dvfsrc-emi.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk_dvfsrc.h>
+#include <dt-bindings/interconnect/mtk,mt6873-emi.h>
+
+enum mtk_icc_name {
+ SLAVE_DDR_EMI,
+ MASTER_MCUSYS,
+ MASTER_GPUSYS,
+ MASTER_MMSYS,
+ MASTER_MM_VPU,
+ MASTER_MM_DISP,
+ MASTER_MM_VDEC,
+ MASTER_MM_VENC,
+ MASTER_MM_CAM,
+ MASTER_MM_IMG,
+ MASTER_MM_MDP,
+ MASTER_VPUSYS,
+ MASTER_VPU_PORT_0,
+ MASTER_VPU_PORT_1,
+ MASTER_MDLASYS,
+ MASTER_MDLA_PORT_0,
+ MASTER_UFS,
+ MASTER_PCIE,
+ MASTER_USB,
+ MASTER_WIFI,
+ MASTER_BT,
+ MASTER_NETSYS,
+ MASTER_DBGIF,
+
+ SLAVE_HRT_DDR_EMI,
+ MASTER_HRT_MMSYS,
+ MASTER_HRT_MM_DISP,
+ MASTER_HRT_MM_VDEC,
+ MASTER_HRT_MM_VENC,
+ MASTER_HRT_MM_CAM,
+ MASTER_HRT_MM_IMG,
+ MASTER_HRT_MM_MDP,
+ MASTER_HRT_DBGIF,
+};
+
+#define MAX_LINKS 1
+
+/**
+ * struct mtk_icc_node - Mediatek specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @ep : the type of this endpoint
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus
+ * @sum_avg: current sum aggregate value of all avg bw kBps requests
+ * @max_peak: current max aggregate value of all peak bw kBps requests
+ */
+struct mtk_icc_node {
+ unsigned char *name;
+ int ep;
+ u16 id;
+ u16 links[MAX_LINKS];
+ u16 num_links;
+ u64 sum_avg;
+ u64 max_peak;
+};
+
+struct mtk_icc_desc {
+ struct mtk_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_MNODE(_name, _id, _ep, ...) \
+ static struct mtk_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .ep = _ep, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+}
+
+DEFINE_MNODE(ddr_emi, SLAVE_DDR_EMI, 1);
+DEFINE_MNODE(mcusys, MASTER_MCUSYS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(gpu, MASTER_GPUSYS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(mmsys, MASTER_MMSYS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(mm_vpu, MASTER_MM_VPU, 0, MASTER_MMSYS);
+DEFINE_MNODE(mm_disp, MASTER_MM_DISP, 0, MASTER_MMSYS);
+DEFINE_MNODE(mm_vdec, MASTER_MM_VDEC, 0, MASTER_MMSYS);
+DEFINE_MNODE(mm_venc, MASTER_MM_VENC, 0, MASTER_MMSYS);
+DEFINE_MNODE(mm_cam, MASTER_MM_CAM, 0, MASTER_MMSYS);
+DEFINE_MNODE(mm_img, MASTER_MM_IMG, 0, MASTER_MMSYS);
+DEFINE_MNODE(mm_mdp, MASTER_MM_MDP, 0, MASTER_MMSYS);
+DEFINE_MNODE(vpusys, MASTER_VPUSYS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(vpu_port_0, MASTER_VPU_PORT_0, 0, MASTER_VPUSYS);
+DEFINE_MNODE(vpu_port_1, MASTER_VPU_PORT_1, 0, MASTER_VPUSYS);
+DEFINE_MNODE(mdlasys, MASTER_MDLASYS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(mdla_port_0, MASTER_MDLA_PORT_0, 0, MASTER_MDLASYS);
+DEFINE_MNODE(ufs, MASTER_UFS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(pcie, MASTER_PCIE, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(usb, MASTER_USB, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(wifi, MASTER_WIFI, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(bt, MASTER_BT, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(netsys, MASTER_NETSYS, 0, SLAVE_DDR_EMI);
+DEFINE_MNODE(dbgif, MASTER_DBGIF, 0, SLAVE_DDR_EMI);
+
+DEFINE_MNODE(hrt_ddr_emi, SLAVE_HRT_DDR_EMI, 2);
+DEFINE_MNODE(hrt_mmsys, MASTER_HRT_MMSYS, 0, SLAVE_HRT_DDR_EMI);
+DEFINE_MNODE(hrt_mm_disp, MASTER_HRT_MM_DISP, 0, MASTER_HRT_MMSYS);
+DEFINE_MNODE(hrt_mm_vdec, MASTER_HRT_MM_VDEC, 0, MASTER_HRT_MMSYS);
+DEFINE_MNODE(hrt_mm_venc, MASTER_HRT_MM_VENC, 0, MASTER_HRT_MMSYS);
+DEFINE_MNODE(hrt_mm_cam, MASTER_HRT_MM_CAM, 0, MASTER_HRT_MMSYS);
+DEFINE_MNODE(hrt_mm_img, MASTER_HRT_MM_IMG, 0, MASTER_HRT_MMSYS);
+DEFINE_MNODE(hrt_mm_mdp, MASTER_HRT_MM_MDP, 0, MASTER_HRT_MMSYS);
+DEFINE_MNODE(hrt_dbgif, MASTER_HRT_DBGIF, 0, SLAVE_HRT_DDR_EMI);
+
+static struct mtk_icc_node *mt6873_icc_nodes[] = {
+ [MT6873_SLAVE_DDR_EMI] = &ddr_emi,
+ [MT6873_MASTER_MCUSYS] = &mcusys,
+ [MT6873_MASTER_GPUSYS] = &gpu,
+ [MT6873_MASTER_MMSYS] = &mmsys,
+ [MT6873_MASTER_MM_VPU] = &mm_vpu,
+ [MT6873_MASTER_MM_DISP] = &mm_disp,
+ [MT6873_MASTER_MM_VDEC] = &mm_vdec,
+ [MT6873_MASTER_MM_VENC] = &mm_venc,
+ [MT6873_MASTER_MM_CAM] = &mm_cam,
+ [MT6873_MASTER_MM_IMG] = &mm_img,
+ [MT6873_MASTER_MM_MDP] = &mm_mdp,
+ [MT6873_MASTER_VPUSYS] = &vpusys,
+ [MT6873_MASTER_VPU_0] = &vpu_port_0,
+ [MT6873_MASTER_VPU_1] = &vpu_port_1,
+ [MT6873_MASTER_MDLASYS] = &mdlasys,
+ [MT6873_MASTER_MDLA_0] = &mdla_port_0,
+ [MT6873_MASTER_UFS] = &ufs,
+ [MT6873_MASTER_PCIE] = &pcie,
+ [MT6873_MASTER_USB] = &usb,
+ [MT6873_MASTER_WIFI] = &wifi,
+ [MT6873_MASTER_BT] = &bt,
+ [MT6873_MASTER_NETSYS] = &netsys,
+ [MT6873_MASTER_DBGIF] = &dbgif,
+
+ [MT6873_SLAVE_HRT_DDR_EMI] = &hrt_ddr_emi,
+ [MT6873_MASTER_HRT_MMSYS] = &hrt_mmsys,
+ [MT6873_MASTER_HRT_MM_DISP] = &hrt_mm_disp,
+ [MT6873_MASTER_HRT_MM_VDEC] = &hrt_mm_vdec,
+ [MT6873_MASTER_HRT_MM_VENC] = &hrt_mm_venc,
+ [MT6873_MASTER_HRT_MM_CAM] = &hrt_mm_cam,
+ [MT6873_MASTER_HRT_MM_IMG] = &hrt_mm_img,
+ [MT6873_MASTER_HRT_MM_MDP] = &hrt_mm_mdp,
+ [MT6873_MASTER_HRT_DBGIF] = &hrt_dbgif,
+};
+
+static struct mtk_icc_desc mt6873_icc = {
+ .nodes = mt6873_icc_nodes,
+ .num_nodes = ARRAY_SIZE(mt6873_icc_nodes),
+};
+
+static const struct of_device_id emi_icc_of_match[] = {
+ { .compatible = "mediatek,mt6873-dvfsrc", .data = &mt6873_icc },
+ { .compatible = "mediatek,mt6880-dvfsrc", .data = &mt6873_icc },
+ { .compatible = "mediatek,mt6890-dvfsrc", .data = &mt6873_icc },
+ { },
+};
+MODULE_DEVICE_TABLE(of, emi_icc_of_match);
+
+static int emi_icc_aggregate(struct icc_node *node, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ struct mtk_icc_node *in;
+
+ in = node->data;
+
+ *agg_avg += avg_bw;
+ *agg_peak = max_t(u32, *agg_peak, peak_bw);
+
+ in->sum_avg = *agg_avg;
+ in->max_peak = *agg_peak;
+
+ return 0;
+}
+
+static int emi_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ int ret = 0;
+ struct mtk_icc_node *node;
+
+ node = dst->data;
+
+ if (node->ep == 1) {
+ mtk_dvfsrc_send_request(src->provider->dev,
+ MTK_DVFSRC_CMD_PEAK_BW_REQUEST,
+ node->max_peak);
+ mtk_dvfsrc_send_request(src->provider->dev,
+ MTK_DVFSRC_CMD_BW_REQUEST,
+ node->sum_avg);
+ } else if (node->ep == 2) {
+ mtk_dvfsrc_send_request(src->provider->dev,
+ MTK_DVFSRC_CMD_HRTBW_REQUEST,
+ node->sum_avg);
+ }
+
+ return ret;
+}
+
+static int emi_icc_remove(struct platform_device *pdev);
+static int emi_icc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct mtk_icc_desc *desc;
+ struct device *dev = &pdev->dev;
+ struct icc_node *node;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct mtk_icc_node **mnodes;
+ struct icc_node *tmp;
+ size_t num_nodes, i, j;
+ int ret;
+
+ match = of_match_node(emi_icc_of_match, dev->parent->of_node);
+
+ if (!match) {
+ dev_err(dev, "invalid compatible string\n");
+ return -ENODEV;
+ }
+
+ desc = match->data;
+ mnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL);
+ if (!provider)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider->dev = pdev->dev.parent;
+ provider->set = emi_icc_set;
+ provider->aggregate = emi_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ node = icc_node_create(mnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = mnodes[i]->name;
+ node->data = mnodes[i];
+ icc_node_add(node, provider);
+
+ /* populate links */
+ for (j = 0; j < mnodes[i]->num_links; j++)
+ icc_link_create(node, mnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, provider);
+
+ return 0;
+err:
+ list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int emi_icc_remove(struct platform_device *pdev)
+{
+ struct icc_provider *provider = platform_get_drvdata(pdev);
+ struct icc_node *n, *tmp;
+
+ list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+
+ return icc_provider_del(provider);
+}
+
+static struct platform_driver emi_icc_driver = {
+ .probe = emi_icc_probe,
+ .remove = emi_icc_remove,
+ .driver = {
+ .name = "mediatek-emi-icc",
+ },
+};
+
+static int __init mtk_emi_icc_init(void)
+{
+ return platform_driver_register(&emi_icc_driver);
+}
+subsys_initcall(mtk_emi_icc_init);
+
+static void __exit mtk_emi_icc_exit(void)
+{
+ platform_driver_unregister(&emi_icc_driver);
+}
+module_exit(mtk_emi_icc_exit);
+
+MODULE_AUTHOR("Henry Chen <henryc.chen@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/qcom/Kconfig b/src/kernel/linux/v4.19/drivers/interconnect/qcom/Kconfig
new file mode 100644
index 0000000..290d330
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/qcom/Kconfig
@@ -0,0 +1,13 @@
+config INTERCONNECT_QCOM
+ bool "Qualcomm Network-on-Chip interconnect drivers"
+ depends on ARCH_QCOM
+ help
+ Support for Qualcomm's Network-on-Chip interconnect hardware.
+
+config INTERCONNECT_QCOM_SDM845
+ tristate "Qualcomm SDM845 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sdm845-based
+ platforms.
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/qcom/Makefile b/src/kernel/linux/v4.19/drivers/interconnect/qcom/Makefile
new file mode 100644
index 0000000..1c1cea6
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/qcom/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+qnoc-sdm845-objs := sdm845.o
+
+obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
diff --git a/src/kernel/linux/v4.19/drivers/interconnect/qcom/sdm845.c b/src/kernel/linux/v4.19/drivers/interconnect/qcom/sdm845.c
new file mode 100644
index 0000000..4915b78
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/interconnect/qcom/sdm845.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <asm/div64.h>
+#include <dt-bindings/interconnect/qcom,sdm845.h>
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sort.h>
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <soc/qcom/tcs.h>
+
+#define BCM_TCS_CMD_COMMIT_SHFT 30
+#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
+#define BCM_TCS_CMD_VALID_SHFT 29
+#define BCM_TCS_CMD_VALID_MASK 0x20000000
+#define BCM_TCS_CMD_VOTE_X_SHFT 14
+#define BCM_TCS_CMD_VOTE_MASK 0x3fff
+#define BCM_TCS_CMD_VOTE_Y_SHFT 0
+#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000
+
+#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
+ (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \
+ ((valid) << BCM_TCS_CMD_VALID_SHFT) | \
+ ((cpu_to_le32(vote_x) & \
+ BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \
+ ((cpu_to_le32(vote_y) & \
+ BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
+
+#define to_qcom_provider(_provider) \
+ container_of(_provider, struct qcom_icc_provider, provider)
+
+struct qcom_icc_provider {
+ struct icc_provider provider;
+ struct device *dev;
+ struct qcom_icc_bcm **bcms;
+ size_t num_bcms;
+};
+
+/**
+ * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
+ * @unit: divisor used to convert bytes/sec bw value to an RPMh msg
+ * @width: multiplier used to convert bytes/sec bw value to an RPMh msg
+ * @vcd: virtual clock domain that this bcm belongs to
+ * @reserved: reserved field
+ */
+struct bcm_db {
+ __le32 unit;
+ __le16 width;
+ u8 vcd;
+ u8 reserved;
+};
+
+#define SDM845_MAX_LINKS 43
+#define SDM845_MAX_BCMS 30
+#define SDM845_MAX_BCM_PER_NODE 2
+#define SDM845_MAX_VCD 10
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @links: an array of nodes where we can go next while traversing
+ * @id: a unique node identifier
+ * @num_links: the total number of @links
+ * @channels: num of channels at this node
+ * @buswidth: width of the interconnect between a node and the bus
+ * @sum_avg: current sum aggregate value of all avg bw requests
+ * @max_peak: current max aggregate value of all peak bw requests
+ * @bcms: list of bcms associated with this logical node
+ * @num_bcms: num of @bcms
+ */
+struct qcom_icc_node {
+ const char *name;
+ u16 links[SDM845_MAX_LINKS];
+ u16 id;
+ u16 num_links;
+ u16 channels;
+ u16 buswidth;
+ u64 sum_avg;
+ u64 max_peak;
+ struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE];
+ size_t num_bcms;
+};
+
+/**
+ * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes
+ * known as Bus Clock Manager (BCM)
+ * @name: the bcm node name used to fetch BCM data from command db
+ * @type: latency or bandwidth bcm
+ * @addr: address offsets used when voting to RPMH
+ * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm
+ * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm
+ * @dirty: flag used to indicate whether the bcm needs to be committed
+ * @keepalive: flag used to indicate whether a keepalive is required
+ * @aux_data: auxiliary data used when calculating threshold values and
+ * communicating with RPMh
+ * @list: used to link to other bcms when compiling lists for commit
+ * @num_nodes: total number of @num_nodes
+ * @nodes: list of qcom_icc_nodes that this BCM encapsulates
+ */
+struct qcom_icc_bcm {
+ const char *name;
+ u32 type;
+ u32 addr;
+ u64 vote_x;
+ u64 vote_y;
+ bool dirty;
+ bool keepalive;
+ struct bcm_db aux_data;
+ struct list_head list;
+ size_t num_nodes;
+ struct qcom_icc_node *nodes[];
+};
+
+struct qcom_icc_fabric {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+};
+
+struct qcom_icc_desc {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+ struct qcom_icc_bcm **bcms;
+ size_t num_bcms;
+};
+
+#define DEFINE_QNODE(_name, _id, _channels, _buswidth, \
+ _numlinks, ...) \
+ static struct qcom_icc_node _name = { \
+ .id = _id, \
+ .name = #_name, \
+ .channels = _channels, \
+ .buswidth = _buswidth, \
+ .num_links = _numlinks, \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(qhm_a1noc_cfg, MASTER_A1NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(qhm_qup1, MASTER_BLSP_1, 1, 4, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_tsif, MASTER_TSIF, 1, 4, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc2, MASTER_SDCC_2, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc4, MASTER_SDCC_4, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_card, MASTER_UFS_CARD, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_mem, MASTER_UFS_MEM, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_pcie_0, MASTER_PCIE_0, 1, 8, 1, SLAVE_ANOC_PCIE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_a2noc_cfg, MASTER_A2NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qhm_qdss_bam, MASTER_QDSS_BAM, 1, 4, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qhm_qup2, MASTER_BLSP_2, 1, 4, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qnm_cnoc, MASTER_CNOC_A2NOC, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_crypto, MASTER_CRYPTO, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_ipa, MASTER_IPA, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_pcie3_1, MASTER_PCIE_1, 1, 8, 1, SLAVE_ANOC_PCIE_SNOC);
+DEFINE_QNODE(xm_qdss_etr, MASTER_QDSS_ETR, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_usb3_0, MASTER_USB3_0, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_usb3_1, MASTER_USB3_1, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_camnoc_hf0_uncomp, MASTER_CAMNOC_HF0_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_hf1_uncomp, MASTER_CAMNOC_HF1_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_sf_uncomp, MASTER_CAMNOC_SF_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qhm_spdm, MASTER_SPDM, 1, 4, 1, SLAVE_CNOC_A2NOC);
+DEFINE_QNODE(qhm_tic, MASTER_TIC, 1, 4, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qnm_snoc, MASTER_SNOC_CNOC, 1, 8, 42, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(xm_qdss_dap, MASTER_QDSS_DAP, 1, 8, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qhm_cnoc, MASTER_CNOC_DC_NOC, 1, 4, 2, SLAVE_LLCC_CFG, SLAVE_MEM_NOC_CFG);
+DEFINE_QNODE(acm_l3, MASTER_APPSS_PROC, 1, 16, 3, SLAVE_GNOC_SNOC, SLAVE_GNOC_MEM_NOC, SLAVE_SERVICE_GNOC);
+DEFINE_QNODE(pm_gnoc_cfg, MASTER_GNOC_CFG, 1, 4, 1, SLAVE_SERVICE_GNOC);
+DEFINE_QNODE(llcc_mc, MASTER_LLCC, 4, 4, 1, SLAVE_EBI1);
+DEFINE_QNODE(acm_tcu, MASTER_TCU_0, 1, 8, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_memnoc_cfg, MASTER_MEM_NOC_CFG, 1, 4, 2, SLAVE_MSS_PROC_MS_MPU_CFG, SLAVE_SERVICE_MEM_NOC);
+DEFINE_QNODE(qnm_apps, MASTER_GNOC_MEM_NOC, 2, 32, 1, SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_hf, MASTER_MNOC_HF_MEM_NOC, 2, 32, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_sf, MASTER_MNOC_SF_MEM_NOC, 1, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, MASTER_SNOC_GC_MEM_NOC, 1, 8, 1, SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, MASTER_SNOC_SF_MEM_NOC, 1, 16, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
+DEFINE_QNODE(qxm_gpu, MASTER_GFX3D, 2, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_mnoc_cfg, MASTER_CNOC_MNOC_CFG, 1, 4, 1, SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qxm_camnoc_hf0, MASTER_CAMNOC_HF0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_hf1, MASTER_CAMNOC_HF1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_sf, MASTER_CAMNOC_SF, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, MASTER_MDP0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp1, MASTER_MDP1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_rot, MASTER_ROTATOR, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus0, MASTER_VIDEO_P0, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus1, MASTER_VIDEO_P1, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus_arm9, MASTER_VIDEO_PROC, 1, 8, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qhm_snoc_cfg, MASTER_SNOC_CFG, 1, 4, 1, SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qnm_aggre1_noc, MASTER_A1NOC_SNOC, 1, 16, 6, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_aggre2_noc, MASTER_A2NOC_SNOC, 1, 16, 9, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
+DEFINE_QNODE(qnm_gladiator_sodv, MASTER_GNOC_SNOC, 1, 8, 8, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
+DEFINE_QNODE(qnm_memnoc, MASTER_MEM_NOC_SNOC, 1, 8, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_pcie_anoc, MASTER_ANOC_PCIE_SNOC, 1, 16, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_QDSS_STM);
+DEFINE_QNODE(qxm_pimem, MASTER_PIMEM, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
+DEFINE_QNODE(xm_gic, MASTER_GIC, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
+DEFINE_QNODE(qns_a1noc_snoc, SLAVE_A1NOC_SNOC, 1, 16, 1, MASTER_A1NOC_SNOC);
+DEFINE_QNODE(srvc_aggre1_noc, SLAVE_SERVICE_A1NOC, 1, 4, 0);
+DEFINE_QNODE(qns_pcie_a1noc_snoc, SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
+DEFINE_QNODE(qns_a2noc_snoc, SLAVE_A2NOC_SNOC, 1, 16, 1, MASTER_A2NOC_SNOC);
+DEFINE_QNODE(qns_pcie_snoc, SLAVE_ANOC_PCIE_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
+DEFINE_QNODE(srvc_aggre2_noc, SLAVE_SERVICE_A2NOC, 1, 4, 0);
+DEFINE_QNODE(qns_camnoc_uncomp, SLAVE_CAMNOC_UNCOMP, 1, 32, 0);
+DEFINE_QNODE(qhs_a1_noc_cfg, SLAVE_A1NOC_CFG, 1, 4, 1, MASTER_A1NOC_CFG);
+DEFINE_QNODE(qhs_a2_noc_cfg, SLAVE_A2NOC_CFG, 1, 4, 1, MASTER_A2NOC_CFG);
+DEFINE_QNODE(qhs_aop, SLAVE_AOP, 1, 4, 0);
+DEFINE_QNODE(qhs_aoss, SLAVE_AOSS, 1, 4, 0);
+DEFINE_QNODE(qhs_camera_cfg, SLAVE_CAMERA_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_clk_ctl, SLAVE_CLK_CTL, 1, 4, 0);
+DEFINE_QNODE(qhs_compute_dsp_cfg, SLAVE_CDSP_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_cpr_cx, SLAVE_RBCPR_CX_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_crypto0_cfg, SLAVE_CRYPTO_0_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_dcc_cfg, SLAVE_DCC_CFG, 1, 4, 1, MASTER_CNOC_DC_NOC);
+DEFINE_QNODE(qhs_ddrss_cfg, SLAVE_CNOC_DDRSS, 1, 4, 0);
+DEFINE_QNODE(qhs_display_cfg, SLAVE_DISPLAY_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_glm, SLAVE_GLM, 1, 4, 0);
+DEFINE_QNODE(qhs_gpuss_cfg, SLAVE_GFX3D_CFG, 1, 8, 0);
+DEFINE_QNODE(qhs_imem_cfg, SLAVE_IMEM_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_ipa, SLAVE_IPA_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_mnoc_cfg, SLAVE_CNOC_MNOC_CFG, 1, 4, 1, MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(qhs_pcie0_cfg, SLAVE_PCIE_0_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_pcie_gen3_cfg, SLAVE_PCIE_1_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_pdm, SLAVE_PDM, 1, 4, 0);
+DEFINE_QNODE(qhs_phy_refgen_south, SLAVE_SOUTH_PHY_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_pimem_cfg, SLAVE_PIMEM_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_prng, SLAVE_PRNG, 1, 4, 0);
+DEFINE_QNODE(qhs_qdss_cfg, SLAVE_QDSS_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_qupv3_north, SLAVE_BLSP_2, 1, 4, 0);
+DEFINE_QNODE(qhs_qupv3_south, SLAVE_BLSP_1, 1, 4, 0);
+DEFINE_QNODE(qhs_sdc2, SLAVE_SDCC_2, 1, 4, 0);
+DEFINE_QNODE(qhs_sdc4, SLAVE_SDCC_4, 1, 4, 0);
+DEFINE_QNODE(qhs_snoc_cfg, SLAVE_SNOC_CFG, 1, 4, 1, MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_spdm, SLAVE_SPDM_WRAPPER, 1, 4, 0);
+DEFINE_QNODE(qhs_spss_cfg, SLAVE_SPSS_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_tcsr, SLAVE_TCSR, 1, 4, 0);
+DEFINE_QNODE(qhs_tlmm_north, SLAVE_TLMM_NORTH, 1, 4, 0);
+DEFINE_QNODE(qhs_tlmm_south, SLAVE_TLMM_SOUTH, 1, 4, 0);
+DEFINE_QNODE(qhs_tsif, SLAVE_TSIF, 1, 4, 0);
+DEFINE_QNODE(qhs_ufs_card_cfg, SLAVE_UFS_CARD_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SLAVE_UFS_MEM_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_usb3_0, SLAVE_USB3_0, 1, 4, 0);
+DEFINE_QNODE(qhs_usb3_1, SLAVE_USB3_1, 1, 4, 0);
+DEFINE_QNODE(qhs_venus_cfg, SLAVE_VENUS_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SLAVE_VSENSE_CTRL_CFG, 1, 4, 0);
+DEFINE_QNODE(qns_cnoc_a2noc, SLAVE_CNOC_A2NOC, 1, 8, 1, MASTER_CNOC_A2NOC);
+DEFINE_QNODE(srvc_cnoc, SLAVE_SERVICE_CNOC, 1, 4, 0);
+DEFINE_QNODE(qhs_llcc, SLAVE_LLCC_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_memnoc, SLAVE_MEM_NOC_CFG, 1, 4, 1, MASTER_MEM_NOC_CFG);
+DEFINE_QNODE(qns_gladiator_sodv, SLAVE_GNOC_SNOC, 1, 8, 1, MASTER_GNOC_SNOC);
+DEFINE_QNODE(qns_gnoc_memnoc, SLAVE_GNOC_MEM_NOC, 2, 32, 1, MASTER_GNOC_MEM_NOC);
+DEFINE_QNODE(srvc_gnoc, SLAVE_SERVICE_GNOC, 1, 4, 0);
+DEFINE_QNODE(ebi, SLAVE_EBI1, 4, 4, 0);
+DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4, 0);
+DEFINE_QNODE(qns_apps_io, SLAVE_MEM_NOC_GNOC, 1, 32, 0);
+DEFINE_QNODE(qns_llcc, SLAVE_LLCC, 4, 16, 1, MASTER_LLCC);
+DEFINE_QNODE(qns_memnoc_snoc, SLAVE_MEM_NOC_SNOC, 1, 8, 1, MASTER_MEM_NOC_SNOC);
+DEFINE_QNODE(srvc_memnoc, SLAVE_SERVICE_MEM_NOC, 1, 4, 0);
+DEFINE_QNODE(qns2_mem_noc, SLAVE_MNOC_SF_MEM_NOC, 1, 32, 1, MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_hf, SLAVE_MNOC_HF_MEM_NOC, 2, 32, 1, MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SLAVE_SERVICE_MNOC, 1, 4, 0);
+DEFINE_QNODE(qhs_apss, SLAVE_APPSS, 1, 8, 0);
+DEFINE_QNODE(qns_cnoc, SLAVE_SNOC_CNOC, 1, 8, 1, MASTER_SNOC_CNOC);
+DEFINE_QNODE(qns_memnoc_gc, SLAVE_SNOC_MEM_NOC_GC, 1, 8, 1, MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_memnoc_sf, SLAVE_SNOC_MEM_NOC_SF, 1, 16, 1, MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SLAVE_IMEM, 1, 8, 0);
+DEFINE_QNODE(qxs_pcie, SLAVE_PCIE_0, 1, 8, 0);
+DEFINE_QNODE(qxs_pcie_gen3, SLAVE_PCIE_1, 1, 8, 0);
+DEFINE_QNODE(qxs_pimem, SLAVE_PIMEM, 1, 8, 0);
+DEFINE_QNODE(srvc_snoc, SLAVE_SERVICE_SNOC, 1, 4, 0);
+DEFINE_QNODE(xs_qdss_stm, SLAVE_QDSS_STM, 1, 4, 0);
+DEFINE_QNODE(xs_sys_tcu_cfg, SLAVE_TCU, 1, 8, 0);
+
+#define DEFINE_QBCM(_name, _bcmname, _keepalive, _numnodes, ...) \
+ static struct qcom_icc_bcm _name = { \
+ .name = _bcmname, \
+ .keepalive = _keepalive, \
+ .num_nodes = _numnodes, \
+ .nodes = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QBCM(bcm_acv, "ACV", false, 1, &ebi);
+DEFINE_QBCM(bcm_mc0, "MC0", true, 1, &ebi);
+DEFINE_QBCM(bcm_sh0, "SH0", true, 1, &qns_llcc);
+DEFINE_QBCM(bcm_mm0, "MM0", false, 1, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_sh1, "SH1", false, 1, &qns_apps_io);
+DEFINE_QBCM(bcm_mm1, "MM1", false, 7, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+DEFINE_QBCM(bcm_sh2, "SH2", false, 1, &qns_memnoc_snoc);
+DEFINE_QBCM(bcm_mm2, "MM2", false, 1, &qns2_mem_noc);
+DEFINE_QBCM(bcm_sh3, "SH3", false, 1, &acm_tcu);
+DEFINE_QBCM(bcm_mm3, "MM3", false, 5, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+DEFINE_QBCM(bcm_sh5, "SH5", false, 1, &qnm_apps);
+DEFINE_QBCM(bcm_sn0, "SN0", true, 1, &qns_memnoc_sf);
+DEFINE_QBCM(bcm_ce0, "CE0", false, 1, &qxm_crypto);
+DEFINE_QBCM(bcm_cn0, "CN0", false, 47, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+DEFINE_QBCM(bcm_qup0, "QUP0", false, 2, &qhm_qup1, &qhm_qup2);
+DEFINE_QBCM(bcm_sn1, "SN1", false, 1, &qxs_imem);
+DEFINE_QBCM(bcm_sn2, "SN2", false, 1, &qns_memnoc_gc);
+DEFINE_QBCM(bcm_sn3, "SN3", false, 1, &qns_cnoc);
+DEFINE_QBCM(bcm_sn4, "SN4", false, 1, &qxm_pimem);
+DEFINE_QBCM(bcm_sn5, "SN5", false, 1, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn6, "SN6", false, 3, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg);
+DEFINE_QBCM(bcm_sn7, "SN7", false, 1, &qxs_pcie);
+DEFINE_QBCM(bcm_sn8, "SN8", false, 1, &qxs_pcie_gen3);
+DEFINE_QBCM(bcm_sn9, "SN9", false, 2, &srvc_aggre1_noc, &qnm_aggre1_noc);
+DEFINE_QBCM(bcm_sn11, "SN11", false, 2, &srvc_aggre2_noc, &qnm_aggre2_noc);
+DEFINE_QBCM(bcm_sn12, "SN12", false, 2, &qnm_gladiator_sodv, &xm_gic);
+DEFINE_QBCM(bcm_sn14, "SN14", false, 1, &qnm_pcie_anoc);
+DEFINE_QBCM(bcm_sn15, "SN15", false, 1, &qnm_memnoc);
+
+static struct qcom_icc_node *rsc_hlos_nodes[] = {
+ [MASTER_APPSS_PROC] = &acm_l3,
+ [MASTER_TCU_0] = &acm_tcu,
+ [MASTER_LLCC] = &llcc_mc,
+ [MASTER_GNOC_CFG] = &pm_gnoc_cfg,
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc,
+ [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_BLSP_1] = &qhm_qup1,
+ [MASTER_BLSP_2] = &qhm_qup2,
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_SPDM] = &qhm_spdm,
+ [MASTER_TIC] = &qhm_tic,
+ [MASTER_TSIF] = &qhm_tsif,
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_GNOC_MEM_NOC] = &qnm_apps,
+ [MASTER_CNOC_A2NOC] = &qnm_cnoc,
+ [MASTER_GNOC_SNOC] = &qnm_gladiator_sodv,
+ [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_ANOC_PCIE_SNOC] = &qnm_pcie_anoc,
+ [MASTER_SNOC_CNOC] = &qnm_snoc,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
+ [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
+ [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
+ [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_GFX3D] = &qxm_gpu,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_MDP0] = &qxm_mdp0,
+ [MASTER_MDP1] = &qxm_mdp1,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [MASTER_VIDEO_P0] = &qxm_venus0,
+ [MASTER_VIDEO_P1] = &qxm_venus1,
+ [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
+ [MASTER_GIC] = &xm_gic,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [MASTER_PCIE_0] = &xm_pcie_0,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_CARD] = &xm_ufs_card,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [SLAVE_EBI1] = &ebi,
+ [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
+ [SLAVE_AOP] = &qhs_aop,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GLM] = &qhs_glm,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
+ [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie_gen3_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_SOUTH_PHY_CFG] = &qhs_phy_refgen_south,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_BLSP_2] = &qhs_qupv3_north,
+ [SLAVE_BLSP_1] = &qhs_qupv3_south,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_SPDM_WRAPPER] = &qhs_spdm,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM_NORTH] = &qhs_tlmm_north,
+ [SLAVE_TLMM_SOUTH] = &qhs_tlmm_south,
+ [SLAVE_TSIF] = &qhs_tsif,
+ [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_USB3_1] = &qhs_usb3_1,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_MEM_NOC_GNOC] = &qns_apps_io,
+ [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
+ [SLAVE_SNOC_CNOC] = &qns_cnoc,
+ [SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
+ [SLAVE_GNOC_SNOC] = &qns_gladiator_sodv,
+ [SLAVE_GNOC_MEM_NOC] = &qns_gnoc_memnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc,
+ [SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf,
+ [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
+ [SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc,
+ [SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PCIE_0] = &qxs_pcie,
+ [SLAVE_PCIE_1] = &qxs_pcie_gen3,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+ [SLAVE_SERVICE_GNOC] = &srvc_gnoc,
+ [SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static struct qcom_icc_bcm *rsc_hlos_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+ &bcm_sh0,
+ &bcm_mm0,
+ &bcm_sh1,
+ &bcm_mm1,
+ &bcm_sh2,
+ &bcm_mm2,
+ &bcm_sh3,
+ &bcm_mm3,
+ &bcm_sh5,
+ &bcm_sn0,
+ &bcm_ce0,
+ &bcm_cn0,
+ &bcm_qup0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn5,
+ &bcm_sn6,
+ &bcm_sn7,
+ &bcm_sn8,
+ &bcm_sn9,
+ &bcm_sn11,
+ &bcm_sn12,
+ &bcm_sn14,
+ &bcm_sn15,
+};
+
+static struct qcom_icc_desc sdm845_rsc_hlos = {
+ .nodes = rsc_hlos_nodes,
+ .num_nodes = ARRAY_SIZE(rsc_hlos_nodes),
+ .bcms = rsc_hlos_bcms,
+ .num_bcms = ARRAY_SIZE(rsc_hlos_bcms),
+};
+
+static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev)
+{
+ struct qcom_icc_node *qn;
+ const struct bcm_db *data;
+ size_t data_count;
+ int i;
+
+ bcm->addr = cmd_db_read_addr(bcm->name);
+ if (!bcm->addr) {
+ dev_err(dev, "%s could not find RPMh address\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ data = cmd_db_read_aux_data(bcm->name, &data_count);
+ if (IS_ERR(data)) {
+ dev_err(dev, "%s command db read error (%ld)\n",
+ bcm->name, PTR_ERR(data));
+ return PTR_ERR(data);
+ }
+ if (!data_count) {
+ dev_err(dev, "%s command db missing or partial aux data\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ bcm->aux_data.unit = le32_to_cpu(data->unit);
+ bcm->aux_data.width = le16_to_cpu(data->width);
+ bcm->aux_data.vcd = data->vcd;
+ bcm->aux_data.reserved = data->reserved;
+
+ /*
+ * Link Qnodes to their respective BCMs
+ */
+ for (i = 0; i < bcm->num_nodes; i++) {
+ qn = bcm->nodes[i];
+ qn->bcms[qn->num_bcms] = bcm;
+ qn->num_bcms++;
+ }
+
+ return 0;
+}
+
+inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
+ u32 addr, bool commit)
+{
+ bool valid = true;
+
+ if (!cmd)
+ return;
+
+ if (vote_x == 0 && vote_y == 0)
+ valid = false;
+
+ if (vote_x > BCM_TCS_CMD_VOTE_MASK)
+ vote_x = BCM_TCS_CMD_VOTE_MASK;
+
+ if (vote_y > BCM_TCS_CMD_VOTE_MASK)
+ vote_y = BCM_TCS_CMD_VOTE_MASK;
+
+ cmd->addr = addr;
+ cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
+
+ /*
+ * Set the wait for completion flag on command that need to be completed
+ * before the next command.
+ */
+ if (commit)
+ cmd->wait = true;
+}
+
+static void tcs_list_gen(struct list_head *bcm_list,
+ struct tcs_cmd tcs_list[SDM845_MAX_VCD],
+ int n[SDM845_MAX_VCD])
+{
+ struct qcom_icc_bcm *bcm;
+ bool commit;
+ size_t idx = 0, batch = 0, cur_vcd_size = 0;
+
+ memset(n, 0, sizeof(int) * SDM845_MAX_VCD);
+
+ list_for_each_entry(bcm, bcm_list, list) {
+ commit = false;
+ cur_vcd_size++;
+ if ((list_is_last(&bcm->list, bcm_list)) ||
+ bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
+ commit = true;
+ cur_vcd_size = 0;
+ }
+ tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y,
+ bcm->addr, commit);
+ idx++;
+ n[batch]++;
+ /*
+ * Batch the BCMs in such a way that we do not split them in
+ * multiple payloads when they are under the same VCD. This is
+ * to ensure that every BCM is committed since we only set the
+ * commit bit on the last BCM request of every VCD.
+ */
+ if (n[batch] >= MAX_RPMH_PAYLOAD) {
+ if (!commit) {
+ n[batch] -= cur_vcd_size;
+ n[batch + 1] = cur_vcd_size;
+ }
+ batch++;
+ }
+ }
+}
+
+static void bcm_aggregate(struct qcom_icc_bcm *bcm)
+{
+ size_t i;
+ u64 agg_avg = 0;
+ u64 agg_peak = 0;
+ u64 temp;
+
+ for (i = 0; i < bcm->num_nodes; i++) {
+ temp = bcm->nodes[i]->sum_avg * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
+ agg_avg = max(agg_avg, temp);
+
+ temp = bcm->nodes[i]->max_peak * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth);
+ agg_peak = max(agg_peak, temp);
+ }
+
+ temp = agg_avg * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_x = temp;
+
+ temp = agg_peak * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_y = temp;
+
+ if (bcm->keepalive && bcm->vote_x == 0 && bcm->vote_y == 0) {
+ bcm->vote_x = 1;
+ bcm->vote_y = 1;
+ }
+
+ bcm->dirty = false;
+}
+
+static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ size_t i;
+ struct qcom_icc_node *qn;
+
+ qn = node->data;
+
+ *agg_avg += avg_bw;
+ *agg_peak = max_t(u32, *agg_peak, peak_bw);
+
+ qn->sum_avg = *agg_avg;
+ qn->max_peak = *agg_peak;
+
+ for (i = 0; i < qn->num_bcms; i++)
+ qn->bcms[i]->dirty = true;
+
+ return 0;
+}
+
+static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ struct tcs_cmd cmds[SDM845_MAX_BCMS];
+ struct list_head commit_list;
+ int commit_idx[SDM845_MAX_VCD];
+ int ret = 0, i;
+
+ if (!src)
+ node = dst;
+ else
+ node = src;
+
+ qp = to_qcom_provider(node->provider);
+
+ INIT_LIST_HEAD(&commit_list);
+
+ for (i = 0; i < qp->num_bcms; i++) {
+ if (qp->bcms[i]->dirty) {
+ bcm_aggregate(qp->bcms[i]);
+ list_add_tail(&qp->bcms[i]->list, &commit_list);
+ }
+ }
+
+ /*
+ * Construct the command list based on a pre ordered list of BCMs
+ * based on VCD.
+ */
+ tcs_list_gen(&commit_list, cmds, commit_idx);
+
+ if (!commit_idx[0])
+ return ret;
+
+ ret = rpmh_invalidate(qp->dev);
+ if (ret) {
+ pr_err("Error invalidating RPMH client (%d)\n", ret);
+ return ret;
+ }
+
+ ret = rpmh_write_batch(qp->dev, RPMH_ACTIVE_ONLY_STATE,
+ cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending AMC RPMH requests (%d)\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int cmp_vcd(const void *_l, const void *_r)
+{
+ const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm **)_l;
+ const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm **)_r;
+
+ if (l[0]->aux_data.vcd < r[0]->aux_data.vcd)
+ return -1;
+ else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd)
+ return 0;
+ else
+ return 1;
+}
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ dev_dbg(&pdev->dev, "registered node %p %s %d\n", node,
+ qnodes[i]->name, node->id);
+
+ /* populate links */
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ /*
+ * Pre sort the BCMs based on VCD for ease of generating a command list
+ * that groups the BCMs with the same VCD together. VCDs are numbered
+ * with lowest being the most expensive time wise, ensuring that
+ * those commands are being sent the earliest in the queue.
+ */
+ sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL);
+
+ platform_set_drvdata(pdev, qp);
+
+ dev_dbg(&pdev->dev, "Registered SDM845 ICC\n");
+
+ return ret;
+err:
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ struct icc_provider *provider = &qp->provider;
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+
+ return icc_provider_del(provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sdm845-rsc-hlos", .data = &sdm845_rsc_hlos },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sdm845",
+ .of_match_table = qnoc_of_match,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm sdm845 NoC driver");
+MODULE_LICENSE("GPL v2");