ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/net/wireless/mediatek/Kconfig b/marvell/linux/drivers/net/wireless/mediatek/Kconfig
new file mode 100644
index 0000000..02d1120
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config WLAN_VENDOR_MEDIATEK
+	bool "MediaTek devices"
+	default y
+	---help---
+	  If you have a wireless card belonging to this class, say Y.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all the
+	  questions about these cards. If you say Y, you will be asked for
+	  your specific card in the following questions.
+
+if WLAN_VENDOR_MEDIATEK
+source "drivers/net/wireless/mediatek/mt7601u/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/Kconfig"
+endif # WLAN_VENDOR_MEDIATEK
diff --git a/marvell/linux/drivers/net/wireless/mediatek/Makefile b/marvell/linux/drivers/net/wireless/mediatek/Makefile
new file mode 100644
index 0000000..8061726
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MT7601U)	+= mt7601u/
+obj-$(CONFIG_MT76_CORE)	+= mt76/
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/Kconfig b/marvell/linux/drivers/net/wireless/mediatek/mt76/Kconfig
new file mode 100644
index 0000000..cbc2d8a
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MT76_CORE
+	tristate
+
+config MT76_LEDS
+	bool
+	depends on MT76_CORE
+	depends on LEDS_CLASS=y || MT76_CORE=LEDS_CLASS
+	default y
+
+config MT76_USB
+	tristate
+	depends on MT76_CORE
+
+config MT76x02_LIB
+	tristate
+	select MT76_CORE
+
+config MT76x02_USB
+	tristate
+	select MT76_USB
+
+source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig"
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/Makefile b/marvell/linux/drivers/net/wireless/mediatek/mt76/Makefile
new file mode 100644
index 0000000..d7a1ddc
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MT76_CORE) += mt76.o
+obj-$(CONFIG_MT76_USB) += mt76-usb.o
+obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o
+obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
+
+mt76-y := \
+	mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
+	tx.o agg-rx.o mcu.o
+
+mt76-$(CONFIG_PCI) += pci.o
+
+mt76-usb-y := usb.o usb_trace.o
+
+CFLAGS_trace.o := -I$(src)
+CFLAGS_usb_trace.o := -I$(src)
+CFLAGS_mt76x02_trace.o := -I$(src)
+
+mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
+		 mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \
+		 mt76x02_txrx.o mt76x02_trace.o mt76x02_debugfs.o \
+		 mt76x02_dfs.o mt76x02_beacon.o
+
+mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
+
+obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
+obj-$(CONFIG_MT7603E) += mt7603/
+obj-$(CONFIG_MT7615E) += mt7615/
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/agg-rx.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/agg-rx.c
new file mode 100644
index 0000000..f8441fd
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
+ */
+#include "mt76.h"
+
+#define REORDER_TIMEOUT (HZ / 10)
+
+static void
+mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
+{
+	struct sk_buff *skb;
+
+	tid->head = ieee80211_sn_inc(tid->head);
+
+	skb = tid->reorder_buf[idx];
+	if (!skb)
+		return;
+
+	tid->reorder_buf[idx] = NULL;
+	tid->nframes--;
+	__skb_queue_tail(frames, skb);
+}
+
+static void
+mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid,
+			    struct sk_buff_head *frames,
+			    u16 head)
+{
+	int idx;
+
+	while (ieee80211_sn_less(tid->head, head)) {
+		idx = tid->head % tid->size;
+		mt76_aggr_release(tid, frames, idx);
+	}
+}
+
+static void
+mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
+{
+	int idx = tid->head % tid->size;
+
+	while (tid->reorder_buf[idx]) {
+		mt76_aggr_release(tid, frames, idx);
+		idx = tid->head % tid->size;
+	}
+}
+
+static void
+mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
+{
+	struct mt76_rx_status *status;
+	struct sk_buff *skb;
+	int start, idx, nframes;
+
+	if (!tid->nframes)
+		return;
+
+	mt76_rx_aggr_release_head(tid, frames);
+
+	start = tid->head % tid->size;
+	nframes = tid->nframes;
+
+	for (idx = (tid->head + 1) % tid->size;
+	     idx != start && nframes;
+	     idx = (idx + 1) % tid->size) {
+		skb = tid->reorder_buf[idx];
+		if (!skb)
+			continue;
+
+		nframes--;
+		status = (struct mt76_rx_status *)skb->cb;
+		if (!time_after(jiffies,
+				status->reorder_time + REORDER_TIMEOUT))
+			continue;
+
+		mt76_rx_aggr_release_frames(tid, frames, status->seqno);
+	}
+
+	mt76_rx_aggr_release_head(tid, frames);
+}
+
+static void
+mt76_rx_aggr_reorder_work(struct work_struct *work)
+{
+	struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
+					       reorder_work.work);
+	struct mt76_dev *dev = tid->dev;
+	struct sk_buff_head frames;
+	int nframes;
+
+	__skb_queue_head_init(&frames);
+
+	local_bh_disable();
+	rcu_read_lock();
+
+	spin_lock(&tid->lock);
+	mt76_rx_aggr_check_release(tid, &frames);
+	nframes = tid->nframes;
+	spin_unlock(&tid->lock);
+
+	if (nframes)
+		ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
+					     REORDER_TIMEOUT);
+	mt76_rx_complete(dev, &frames, NULL);
+
+	rcu_read_unlock();
+	local_bh_enable();
+}
+
+static void
+mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
+{
+	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+	struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
+	struct mt76_wcid *wcid = status->wcid;
+	struct mt76_rx_tid *tid;
+	u16 seqno;
+
+	if (!ieee80211_is_ctl(bar->frame_control))
+		return;
+
+	if (!ieee80211_is_back_req(bar->frame_control))
+		return;
+
+	status->tid = le16_to_cpu(bar->control) >> 12;
+	seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
+	tid = rcu_dereference(wcid->aggr[status->tid]);
+	if (!tid)
+		return;
+
+	spin_lock_bh(&tid->lock);
+	mt76_rx_aggr_release_frames(tid, frames, seqno);
+	mt76_rx_aggr_release_head(tid, frames);
+	spin_unlock_bh(&tid->lock);
+}
+
+void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
+{
+	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct mt76_wcid *wcid = status->wcid;
+	struct ieee80211_sta *sta;
+	struct mt76_rx_tid *tid;
+	bool sn_less;
+	u16 seqno, head, size, idx;
+	u8 ackp;
+
+	__skb_queue_tail(frames, skb);
+
+	sta = wcid_to_sta(wcid);
+	if (!sta)
+		return;
+
+	if (!status->aggr) {
+		mt76_rx_aggr_check_ctl(skb, frames);
+		return;
+	}
+
+	/* not part of a BA session */
+	ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
+	if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
+	    ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
+		return;
+
+	tid = rcu_dereference(wcid->aggr[status->tid]);
+	if (!tid)
+		return;
+
+	status->flag |= RX_FLAG_DUP_VALIDATED;
+	spin_lock_bh(&tid->lock);
+
+	if (tid->stopped)
+		goto out;
+
+	head = tid->head;
+	seqno = status->seqno;
+	size = tid->size;
+	sn_less = ieee80211_sn_less(seqno, head);
+
+	if (!tid->started) {
+		if (sn_less)
+			goto out;
+
+		tid->started = true;
+	}
+
+	if (sn_less) {
+		__skb_unlink(skb, frames);
+		dev_kfree_skb(skb);
+		goto out;
+	}
+
+	if (seqno == head) {
+		tid->head = ieee80211_sn_inc(head);
+		if (tid->nframes)
+			mt76_rx_aggr_release_head(tid, frames);
+		goto out;
+	}
+
+	__skb_unlink(skb, frames);
+
+	/*
+	 * Frame sequence number exceeds buffering window, free up some space
+	 * by releasing previous frames
+	 */
+	if (!ieee80211_sn_less(seqno, head + size)) {
+		head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
+		mt76_rx_aggr_release_frames(tid, frames, head);
+	}
+
+	idx = seqno % size;
+
+	/* Discard if the current slot is already in use */
+	if (tid->reorder_buf[idx]) {
+		dev_kfree_skb(skb);
+		goto out;
+	}
+
+	status->reorder_time = jiffies;
+	tid->reorder_buf[idx] = skb;
+	tid->nframes++;
+	mt76_rx_aggr_release_head(tid, frames);
+
+	ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
+				     REORDER_TIMEOUT);
+
+out:
+	spin_unlock_bh(&tid->lock);
+}
+
+int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
+		       u16 ssn, u16 size)
+{
+	struct mt76_rx_tid *tid;
+
+	mt76_rx_aggr_stop(dev, wcid, tidno);
+
+	tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
+	if (!tid)
+		return -ENOMEM;
+
+	tid->dev = dev;
+	tid->head = ssn;
+	tid->size = size;
+	INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
+	spin_lock_init(&tid->lock);
+
+	rcu_assign_pointer(wcid->aggr[tidno], tid);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
+
+static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
+{
+	u16 size = tid->size;
+	int i;
+
+	cancel_delayed_work(&tid->reorder_work);
+
+	spin_lock_bh(&tid->lock);
+
+	tid->stopped = true;
+	for (i = 0; tid->nframes && i < size; i++) {
+		struct sk_buff *skb = tid->reorder_buf[i];
+
+		if (!skb)
+			continue;
+
+		tid->reorder_buf[i] = NULL;
+		tid->nframes--;
+		dev_kfree_skb(skb);
+	}
+
+	spin_unlock_bh(&tid->lock);
+}
+
+void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
+{
+	struct mt76_rx_tid *tid;
+
+	rcu_read_lock();
+
+	tid = rcu_dereference(wcid->aggr[tidno]);
+	if (tid) {
+		rcu_assign_pointer(wcid->aggr[tidno], NULL);
+		mt76_rx_aggr_shutdown(dev, tid);
+		kfree_rcu(tid, rcu_head);
+	}
+
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/debugfs.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/debugfs.c
new file mode 100644
index 0000000..d95b73f
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+#include "mt76.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+	struct mt76_dev *dev = data;
+
+	dev->bus->wr(dev, dev->debugfs_reg, val);
+	return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+	struct mt76_dev *dev = data;
+
+	*val = dev->bus->rr(dev, dev->debugfs_reg);
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set,
+			 "0x%08llx\n");
+
+static int
+mt76_queues_read(struct seq_file *s, void *data)
+{
+	struct mt76_dev *dev = dev_get_drvdata(s->private);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
+		struct mt76_sw_queue *q = &dev->q_tx[i];
+
+		if (!q->q)
+			continue;
+
+		seq_printf(s,
+			   "%d:	queued=%d head=%d tail=%d swq_queued=%d\n",
+			   i, q->q->queued, q->q->head, q->q->tail,
+			   q->swq_queued);
+	}
+
+	return 0;
+}
+
+void mt76_seq_puts_array(struct seq_file *file, const char *str,
+			 s8 *val, int len)
+{
+	int i;
+
+	seq_printf(file, "%10s:", str);
+	for (i = 0; i < len; i++)
+		seq_printf(file, " %2d", val[i]);
+	seq_puts(file, "\n");
+}
+EXPORT_SYMBOL_GPL(mt76_seq_puts_array);
+
+static int mt76_read_rate_txpower(struct seq_file *s, void *data)
+{
+	struct mt76_dev *dev = dev_get_drvdata(s->private);
+
+	mt76_seq_puts_array(s, "CCK", dev->rate_power.cck,
+			    ARRAY_SIZE(dev->rate_power.cck));
+	mt76_seq_puts_array(s, "OFDM", dev->rate_power.ofdm,
+			    ARRAY_SIZE(dev->rate_power.ofdm));
+	mt76_seq_puts_array(s, "STBC", dev->rate_power.stbc,
+			    ARRAY_SIZE(dev->rate_power.stbc));
+	mt76_seq_puts_array(s, "HT", dev->rate_power.ht,
+			    ARRAY_SIZE(dev->rate_power.ht));
+	mt76_seq_puts_array(s, "VHT", dev->rate_power.vht,
+			    ARRAY_SIZE(dev->rate_power.vht));
+	return 0;
+}
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
+{
+	struct dentry *dir;
+
+	dir = debugfs_create_dir("mt76", dev->hw->wiphy->debugfsdir);
+	if (!dir)
+		return NULL;
+
+	debugfs_create_u8("led_pin", 0600, dir, &dev->led_pin);
+	debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg);
+	debugfs_create_file_unsafe("regval", 0600, dir, dev,
+				   &fops_regval);
+	debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom);
+	if (dev->otp.data)
+		debugfs_create_blob("otp", 0400, dir, &dev->otp);
+	debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read);
+	debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir,
+				    mt76_read_rate_txpower);
+
+	return dir;
+}
+EXPORT_SYMBOL_GPL(mt76_register_debugfs);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/dma.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/dma.c
new file mode 100644
index 0000000..50a92aa
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/dma.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/dma-mapping.h>
+#include "mt76.h"
+#include "dma.h"
+
+static int
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+		     int idx, int n_desc, int bufsize,
+		     u32 ring_base)
+{
+	int size;
+	int i;
+
+	spin_lock_init(&q->lock);
+
+	q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
+	q->ndesc = n_desc;
+	q->buf_size = bufsize;
+	q->hw_idx = idx;
+
+	size = q->ndesc * sizeof(struct mt76_desc);
+	q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
+	if (!q->desc)
+		return -ENOMEM;
+
+	size = q->ndesc * sizeof(*q->entry);
+	q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+	if (!q->entry)
+		return -ENOMEM;
+
+	/* clear descriptors */
+	for (i = 0; i < q->ndesc; i++)
+		q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+	writel(q->desc_dma, &q->regs->desc_base);
+	writel(0, &q->regs->cpu_idx);
+	writel(0, &q->regs->dma_idx);
+	writel(q->ndesc, &q->regs->ring_size);
+
+	return 0;
+}
+
+static int
+mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+		 struct mt76_queue_buf *buf, int nbufs, u32 info,
+		 struct sk_buff *skb, void *txwi)
+{
+	struct mt76_desc *desc;
+	u32 ctrl;
+	int i, idx = -1;
+
+	if (txwi) {
+		q->entry[q->head].txwi = DMA_DUMMY_DATA;
+		q->entry[q->head].skip_buf0 = true;
+	}
+
+	for (i = 0; i < nbufs; i += 2, buf += 2) {
+		u32 buf0 = buf[0].addr, buf1 = 0;
+
+		ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+		if (i < nbufs - 1) {
+			buf1 = buf[1].addr;
+			ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+		}
+
+		if (i == nbufs - 1)
+			ctrl |= MT_DMA_CTL_LAST_SEC0;
+		else if (i == nbufs - 2)
+			ctrl |= MT_DMA_CTL_LAST_SEC1;
+
+		idx = q->head;
+		q->head = (q->head + 1) % q->ndesc;
+
+		desc = &q->desc[idx];
+
+		WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
+		WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
+		WRITE_ONCE(desc->info, cpu_to_le32(info));
+		WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+
+		q->queued++;
+	}
+
+	q->entry[idx].txwi = txwi;
+	q->entry[idx].skb = skb;
+
+	return idx;
+}
+
+static void
+mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+			struct mt76_queue_entry *prev_e)
+{
+	struct mt76_queue_entry *e = &q->entry[idx];
+	__le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
+	u32 ctrl = le32_to_cpu(__ctrl);
+
+	if (!e->skip_buf0) {
+		__le32 addr = READ_ONCE(q->desc[idx].buf0);
+		u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
+
+		dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+				 DMA_TO_DEVICE);
+	}
+
+	if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
+		__le32 addr = READ_ONCE(q->desc[idx].buf1);
+		u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
+
+		dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+				 DMA_TO_DEVICE);
+	}
+
+	if (e->txwi == DMA_DUMMY_DATA)
+		e->txwi = NULL;
+
+	if (e->skb == DMA_DUMMY_DATA)
+		e->skb = NULL;
+
+	*prev_e = *e;
+	memset(e, 0, sizeof(*e));
+}
+
+static void
+mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	writel(q->desc_dma, &q->regs->desc_base);
+	writel(q->ndesc, &q->regs->ring_size);
+	q->head = readl(&q->regs->dma_idx);
+	q->tail = q->head;
+	writel(q->head, &q->regs->cpu_idx);
+}
+
+static void
+mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
+{
+	struct mt76_sw_queue *sq = &dev->q_tx[qid];
+	struct mt76_queue *q = sq->q;
+	struct mt76_queue_entry entry;
+	unsigned int n_swq_queued[4] = {};
+	unsigned int n_queued = 0;
+	bool wake = false;
+	int i, last;
+
+	if (!q)
+		return;
+
+	if (flush)
+		last = -1;
+	else
+		last = readl(&q->regs->dma_idx);
+
+	while ((q->queued > n_queued) && q->tail != last) {
+		mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
+		if (entry.schedule)
+			n_swq_queued[entry.qid]++;
+
+		q->tail = (q->tail + 1) % q->ndesc;
+		n_queued++;
+
+		if (entry.skb)
+			dev->drv->tx_complete_skb(dev, qid, &entry);
+
+		if (entry.txwi) {
+			if (!(dev->drv->txwi_flags & MT_TXWI_NO_FREE))
+				mt76_put_txwi(dev, entry.txwi);
+			wake = !flush;
+		}
+
+		if (!flush && q->tail == last)
+			last = readl(&q->regs->dma_idx);
+	}
+
+	spin_lock_bh(&q->lock);
+
+	q->queued -= n_queued;
+	for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
+		if (!n_swq_queued[i])
+			continue;
+
+		dev->q_tx[i].swq_queued -= n_swq_queued[i];
+	}
+
+	if (flush)
+		mt76_dma_sync_idx(dev, q);
+
+	wake = wake && q->stopped &&
+	       qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+	if (wake)
+		q->stopped = false;
+
+	if (!q->queued)
+		wake_up(&dev->tx_wait);
+
+	spin_unlock_bh(&q->lock);
+
+	if (wake)
+		ieee80211_wake_queue(dev->hw, qid);
+}
+
+static void *
+mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+		 int *len, u32 *info, bool *more)
+{
+	struct mt76_queue_entry *e = &q->entry[idx];
+	struct mt76_desc *desc = &q->desc[idx];
+	dma_addr_t buf_addr;
+	void *buf = e->buf;
+	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+	buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
+	if (len) {
+		u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
+		*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
+		*more = !(ctl & MT_DMA_CTL_LAST_SEC0);
+	}
+
+	if (info)
+		*info = le32_to_cpu(desc->info);
+
+	dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
+	e->buf = NULL;
+
+	return buf;
+}
+
+static void *
+mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+		 int *len, u32 *info, bool *more)
+{
+	int idx = q->tail;
+
+	*more = false;
+	if (!q->queued)
+		return NULL;
+
+	if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
+		return NULL;
+
+	q->tail = (q->tail + 1) % q->ndesc;
+	q->queued--;
+
+	return mt76_dma_get_buf(dev, q, idx, len, info, more);
+}
+
+static void
+mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	writel(q->head, &q->regs->cpu_idx);
+}
+
+static int
+mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
+			  struct sk_buff *skb, u32 tx_info)
+{
+	struct mt76_queue *q = dev->q_tx[qid].q;
+	struct mt76_queue_buf buf;
+	dma_addr_t addr;
+
+	if (q->queued + 1 >= q->ndesc - 1)
+		goto error;
+
+	addr = dma_map_single(dev->dev, skb->data, skb->len,
+			      DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev->dev, addr)))
+		goto error;
+
+	buf.addr = addr;
+	buf.len = skb->len;
+
+	spin_lock_bh(&q->lock);
+	mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
+	mt76_dma_kick_queue(dev, q);
+	spin_unlock_bh(&q->lock);
+
+	return 0;
+
+error:
+	dev_kfree_skb(skb);
+	return -ENOMEM;
+}
+
+static int
+mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
+		      struct sk_buff *skb, struct mt76_wcid *wcid,
+		      struct ieee80211_sta *sta)
+{
+	struct mt76_queue *q = dev->q_tx[qid].q;
+	struct mt76_tx_info tx_info = {
+		.skb = skb,
+	};
+	int len, n = 0, ret = -ENOMEM;
+	struct mt76_queue_entry e;
+	struct mt76_txwi_cache *t;
+	struct sk_buff *iter;
+	dma_addr_t addr;
+	u8 *txwi;
+
+	t = mt76_get_txwi(dev);
+	if (!t) {
+		ieee80211_free_txskb(dev->hw, skb);
+		return -ENOMEM;
+	}
+	txwi = mt76_get_txwi_ptr(dev, t);
+
+	skb->prev = skb->next = NULL;
+	if (dev->drv->tx_aligned4_skbs)
+		mt76_insert_hdr_pad(skb);
+
+	len = skb_headlen(skb);
+	addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev->dev, addr)))
+		goto free;
+
+	tx_info.buf[n].addr = t->dma_addr;
+	tx_info.buf[n++].len = dev->drv->txwi_size;
+	tx_info.buf[n].addr = addr;
+	tx_info.buf[n++].len = len;
+
+	skb_walk_frags(skb, iter) {
+		if (n == ARRAY_SIZE(tx_info.buf))
+			goto unmap;
+
+		addr = dma_map_single(dev->dev, iter->data, iter->len,
+				      DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dev->dev, addr)))
+			goto unmap;
+
+		tx_info.buf[n].addr = addr;
+		tx_info.buf[n++].len = iter->len;
+	}
+	tx_info.nbuf = n;
+
+	dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
+				DMA_TO_DEVICE);
+	ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
+	dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
+				   DMA_TO_DEVICE);
+	if (ret < 0)
+		goto unmap;
+
+	if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
+		ret = -ENOMEM;
+		goto unmap;
+	}
+
+	return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
+				tx_info.info, tx_info.skb, t);
+
+unmap:
+	for (n--; n > 0; n--)
+		dma_unmap_single(dev->dev, tx_info.buf[n].addr,
+				 tx_info.buf[n].len, DMA_TO_DEVICE);
+
+free:
+	e.skb = tx_info.skb;
+	e.txwi = t;
+	dev->drv->tx_complete_skb(dev, qid, &e);
+	mt76_put_txwi(dev, t);
+	return ret;
+}
+
+static int
+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	dma_addr_t addr;
+	void *buf;
+	int frames = 0;
+	int len = SKB_WITH_OVERHEAD(q->buf_size);
+	int offset = q->buf_offset;
+	int idx;
+
+	spin_lock_bh(&q->lock);
+
+	while (q->queued < q->ndesc - 1) {
+		struct mt76_queue_buf qbuf;
+
+		buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
+		if (!buf)
+			break;
+
+		addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
+		if (unlikely(dma_mapping_error(dev->dev, addr))) {
+			skb_free_frag(buf);
+			break;
+		}
+
+		qbuf.addr = addr + offset;
+		qbuf.len = len - offset;
+		idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
+		frames++;
+	}
+
+	if (frames)
+		mt76_dma_kick_queue(dev, q);
+
+	spin_unlock_bh(&q->lock);
+
+	return frames;
+}
+
+static void
+mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	struct page *page;
+	void *buf;
+	bool more;
+
+	spin_lock_bh(&q->lock);
+
+	do {
+		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
+		if (!buf)
+			break;
+
+		skb_free_frag(buf);
+	} while (1);
+
+	if (q->rx_head) {
+		dev_kfree_skb(q->rx_head);
+		q->rx_head = NULL;
+	}
+
+	spin_unlock_bh(&q->lock);
+
+	if (!q->rx_page.va)
+		return;
+
+	page = virt_to_page(q->rx_page.va);
+	__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
+	memset(&q->rx_page, 0, sizeof(q->rx_page));
+}
+
+static void
+mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
+{
+	struct mt76_queue *q = &dev->q_rx[qid];
+	int i;
+
+	for (i = 0; i < q->ndesc; i++)
+		q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+	mt76_dma_rx_cleanup(dev, q);
+	mt76_dma_sync_idx(dev, q);
+	mt76_dma_rx_fill(dev, q);
+}
+
+static void
+mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+		  int len, bool more)
+{
+	struct sk_buff *skb = q->rx_head;
+	struct skb_shared_info *shinfo = skb_shinfo(skb);
+	int nr_frags = shinfo->nr_frags;
+
+	if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
+		struct page *page = virt_to_head_page(data);
+		int offset = data - page_address(page) + q->buf_offset;
+
+		skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
+	} else {
+		skb_free_frag(data);
+	}
+
+	if (more)
+		return;
+
+	q->rx_head = NULL;
+	if (nr_frags < ARRAY_SIZE(shinfo->frags))
+		dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+	else
+		dev_kfree_skb(skb);
+}
+
+static int
+mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+{
+	int len, data_len, done = 0;
+	struct sk_buff *skb;
+	unsigned char *data;
+	bool more;
+
+	while (done < budget) {
+		u32 info;
+
+		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
+		if (!data)
+			break;
+
+		if (q->rx_head)
+			data_len = q->buf_size;
+		else
+			data_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+		if (data_len < len + q->buf_offset) {
+			dev_kfree_skb(q->rx_head);
+			q->rx_head = NULL;
+
+			skb_free_frag(data);
+			continue;
+		}
+
+		if (q->rx_head) {
+			mt76_add_fragment(dev, q, data, len, more);
+			continue;
+		}
+
+		skb = build_skb(data, q->buf_size);
+		if (!skb) {
+			skb_free_frag(data);
+			continue;
+		}
+		skb_reserve(skb, q->buf_offset);
+
+		if (q == &dev->q_rx[MT_RXQ_MCU]) {
+			u32 *rxfce = (u32 *)skb->cb;
+			*rxfce = info;
+		}
+
+		__skb_put(skb, len);
+		done++;
+
+		if (more) {
+			q->rx_head = skb;
+			continue;
+		}
+
+		dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+	}
+
+	mt76_dma_rx_fill(dev, q);
+	return done;
+}
+
+static int
+mt76_dma_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct mt76_dev *dev;
+	int qid, done = 0, cur;
+
+	dev = container_of(napi->dev, struct mt76_dev, napi_dev);
+	qid = napi - dev->napi;
+
+	rcu_read_lock();
+
+	do {
+		cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
+		mt76_rx_poll_complete(dev, qid, napi);
+		done += cur;
+	} while (cur && done < budget);
+
+	rcu_read_unlock();
+
+	if (done < budget) {
+		napi_complete(napi);
+		dev->drv->rx_poll_complete(dev, qid);
+	}
+
+	return done;
+}
+
+static int
+mt76_dma_init(struct mt76_dev *dev)
+{
+	int i;
+
+	init_dummy_netdev(&dev->napi_dev);
+
+	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+		netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
+			       64);
+		mt76_dma_rx_fill(dev, &dev->q_rx[i]);
+		skb_queue_head_init(&dev->rx_skb[i]);
+		napi_enable(&dev->napi[i]);
+	}
+
+	return 0;
+}
+
+static const struct mt76_queue_ops mt76_dma_ops = {
+	.init = mt76_dma_init,
+	.alloc = mt76_dma_alloc_queue,
+	.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
+	.tx_queue_skb = mt76_dma_tx_queue_skb,
+	.tx_cleanup = mt76_dma_tx_cleanup,
+	.rx_reset = mt76_dma_rx_reset,
+	.kick = mt76_dma_kick_queue,
+};
+
+void mt76_dma_attach(struct mt76_dev *dev)
+{
+	dev->queue_ops = &mt76_dma_ops;
+}
+EXPORT_SYMBOL_GPL(mt76_dma_attach);
+
+void mt76_dma_cleanup(struct mt76_dev *dev)
+{
+	int i;
+
+	netif_napi_del(&dev->tx_napi);
+	for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
+		mt76_dma_tx_cleanup(dev, i, true);
+
+	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+		netif_napi_del(&dev->napi[i]);
+		mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/dma.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/dma.h
new file mode 100644
index 0000000..e7c2769
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/dma.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+#ifndef __MT76_DMA_H
+#define __MT76_DMA_H
+
+#define DMA_DUMMY_DATA			((void *)~0)
+
+#define MT_RING_SIZE			0x10
+
+#define MT_DMA_CTL_SD_LEN1		GENMASK(13, 0)
+#define MT_DMA_CTL_LAST_SEC1		BIT(14)
+#define MT_DMA_CTL_BURST		BIT(15)
+#define MT_DMA_CTL_SD_LEN0		GENMASK(29, 16)
+#define MT_DMA_CTL_LAST_SEC0		BIT(30)
+#define MT_DMA_CTL_DMA_DONE		BIT(31)
+
+#define MT_DMA_HDR_LEN			4
+#define MT_RX_INFO_LEN			4
+#define MT_FCE_INFO_LEN			4
+#define MT_RX_RXWI_LEN			32
+
+struct mt76_desc {
+	__le32 buf0;
+	__le32 ctrl;
+	__le32 buf1;
+	__le32 info;
+} __packed __aligned(4);
+
+enum mt76_qsel {
+	MT_QSEL_MGMT,
+	MT_QSEL_HCCA,
+	MT_QSEL_EDCA,
+	MT_QSEL_EDCA_2,
+};
+
+enum mt76_mcu_evt_type {
+	EVT_CMD_DONE,
+	EVT_CMD_ERROR,
+	EVT_CMD_RETRY,
+	EVT_EVENT_PWR_RSP,
+	EVT_EVENT_WOW_RSP,
+	EVT_EVENT_CARRIER_DETECT_RSP,
+	EVT_EVENT_DFS_DETECT_RSP,
+};
+
+void mt76_dma_attach(struct mt76_dev *dev);
+void mt76_dma_cleanup(struct mt76_dev *dev);
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/eeprom.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/eeprom.c
new file mode 100644
index 0000000..804224e
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include "mt76.h"
+
+static int
+mt76_get_of_eeprom(struct mt76_dev *dev, int len)
+{
+#if defined(CONFIG_OF) && defined(CONFIG_MTD)
+	struct device_node *np = dev->dev->of_node;
+	struct mtd_info *mtd;
+	const __be32 *list;
+	const char *part;
+	phandle phandle;
+	int offset = 0;
+	int size;
+	size_t retlen;
+	int ret;
+
+	if (!np)
+		return -ENOENT;
+
+	list = of_get_property(np, "mediatek,mtd-eeprom", &size);
+	if (!list)
+		return -ENOENT;
+
+	phandle = be32_to_cpup(list++);
+	if (!phandle)
+		return -ENOENT;
+
+	np = of_find_node_by_phandle(phandle);
+	if (!np)
+		return -EINVAL;
+
+	part = of_get_property(np, "label", NULL);
+	if (!part)
+		part = np->name;
+
+	mtd = get_mtd_device_nm(part);
+	if (IS_ERR(mtd)) {
+		ret =  PTR_ERR(mtd);
+		goto out_put_node;
+	}
+
+	if (size <= sizeof(*list)) {
+		ret = -EINVAL;
+		goto out_put_node;
+	}
+
+	offset = be32_to_cpup(list);
+	ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
+	put_mtd_device(mtd);
+	if (ret)
+		goto out_put_node;
+
+	if (retlen < len) {
+		ret = -EINVAL;
+		goto out_put_node;
+	}
+
+out_put_node:
+	of_node_put(np);
+	return ret;
+#else
+	return -ENOENT;
+#endif
+}
+
+void
+mt76_eeprom_override(struct mt76_dev *dev)
+{
+#ifdef CONFIG_OF
+	struct device_node *np = dev->dev->of_node;
+	const u8 *mac;
+
+	if (!np)
+		return;
+
+	mac = of_get_mac_address(np);
+	if (!IS_ERR(mac))
+		ether_addr_copy(dev->macaddr, mac);
+#endif
+
+	if (!is_valid_ether_addr(dev->macaddr)) {
+		eth_random_addr(dev->macaddr);
+		dev_info(dev->dev,
+			 "Invalid MAC address, using random address %pM\n",
+			 dev->macaddr);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_override);
+
+int
+mt76_eeprom_init(struct mt76_dev *dev, int len)
+{
+	dev->eeprom.size = len;
+	dev->eeprom.data = devm_kzalloc(dev->dev, len, GFP_KERNEL);
+	if (!dev->eeprom.data)
+		return -ENOMEM;
+
+	return !mt76_get_of_eeprom(dev, len);
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_init);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mac80211.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mac80211.c
new file mode 100644
index 0000000..179337e
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+#include <linux/of.h>
+#include "mt76.h"
+
+#define CHAN2G(_idx, _freq) {			\
+	.band = NL80211_BAND_2GHZ,		\
+	.center_freq = (_freq),			\
+	.hw_value = (_idx),			\
+	.max_power = 30,			\
+}
+
+#define CHAN5G(_idx, _freq) {			\
+	.band = NL80211_BAND_5GHZ,		\
+	.center_freq = (_freq),			\
+	.hw_value = (_idx),			\
+	.max_power = 30,			\
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+	CHAN2G(1, 2412),
+	CHAN2G(2, 2417),
+	CHAN2G(3, 2422),
+	CHAN2G(4, 2427),
+	CHAN2G(5, 2432),
+	CHAN2G(6, 2437),
+	CHAN2G(7, 2442),
+	CHAN2G(8, 2447),
+	CHAN2G(9, 2452),
+	CHAN2G(10, 2457),
+	CHAN2G(11, 2462),
+	CHAN2G(12, 2467),
+	CHAN2G(13, 2472),
+	CHAN2G(14, 2484),
+};
+
+static const struct ieee80211_channel mt76_channels_5ghz[] = {
+	CHAN5G(36, 5180),
+	CHAN5G(40, 5200),
+	CHAN5G(44, 5220),
+	CHAN5G(48, 5240),
+
+	CHAN5G(52, 5260),
+	CHAN5G(56, 5280),
+	CHAN5G(60, 5300),
+	CHAN5G(64, 5320),
+
+	CHAN5G(100, 5500),
+	CHAN5G(104, 5520),
+	CHAN5G(108, 5540),
+	CHAN5G(112, 5560),
+	CHAN5G(116, 5580),
+	CHAN5G(120, 5600),
+	CHAN5G(124, 5620),
+	CHAN5G(128, 5640),
+	CHAN5G(132, 5660),
+	CHAN5G(136, 5680),
+	CHAN5G(140, 5700),
+
+	CHAN5G(149, 5745),
+	CHAN5G(153, 5765),
+	CHAN5G(157, 5785),
+	CHAN5G(161, 5805),
+	CHAN5G(165, 5825),
+};
+
+static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
+	{ .throughput =   0 * 1024, .blink_time = 334 },
+	{ .throughput =   1 * 1024, .blink_time = 260 },
+	{ .throughput =   5 * 1024, .blink_time = 220 },
+	{ .throughput =  10 * 1024, .blink_time = 190 },
+	{ .throughput =  20 * 1024, .blink_time = 170 },
+	{ .throughput =  50 * 1024, .blink_time = 150 },
+	{ .throughput =  70 * 1024, .blink_time = 130 },
+	{ .throughput = 100 * 1024, .blink_time = 110 },
+	{ .throughput = 200 * 1024, .blink_time =  80 },
+	{ .throughput = 300 * 1024, .blink_time =  50 },
+};
+
+static int mt76_led_init(struct mt76_dev *dev)
+{
+	struct device_node *np = dev->dev->of_node;
+	struct ieee80211_hw *hw = dev->hw;
+	int led_pin;
+
+	if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+		return 0;
+
+	snprintf(dev->led_name, sizeof(dev->led_name),
+		 "mt76-%s", wiphy_name(hw->wiphy));
+
+	dev->led_cdev.name = dev->led_name;
+	dev->led_cdev.default_trigger =
+		ieee80211_create_tpt_led_trigger(hw,
+					IEEE80211_TPT_LEDTRIG_FL_RADIO,
+					mt76_tpt_blink,
+					ARRAY_SIZE(mt76_tpt_blink));
+
+	np = of_get_child_by_name(np, "led");
+	if (np) {
+		if (!of_property_read_u32(np, "led-sources", &led_pin))
+			dev->led_pin = led_pin;
+		dev->led_al = of_property_read_bool(np, "led-active-low");
+		of_node_put(np);
+	}
+
+	return led_classdev_register(dev->dev, &dev->led_cdev);
+}
+
+static void mt76_led_cleanup(struct mt76_dev *dev)
+{
+	if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+		return;
+
+	led_classdev_unregister(&dev->led_cdev);
+}
+
+static void mt76_init_stream_cap(struct mt76_dev *dev,
+				 struct ieee80211_supported_band *sband,
+				 bool vht)
+{
+	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
+	int i, nstream = hweight8(dev->antenna_mask);
+	struct ieee80211_sta_vht_cap *vht_cap;
+	u16 mcs_map = 0;
+
+	if (nstream > 1)
+		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
+	else
+		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
+
+	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
+
+	if (!vht)
+		return;
+
+	vht_cap = &sband->vht_cap;
+	if (nstream > 1)
+		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
+	else
+		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
+	for (i = 0; i < 8; i++) {
+		if (i < nstream)
+			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
+		else
+			mcs_map |=
+				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
+	}
+	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+}
+
+void mt76_set_stream_caps(struct mt76_dev *dev, bool vht)
+{
+	if (dev->cap.has_2ghz)
+		mt76_init_stream_cap(dev, &dev->sband_2g.sband, false);
+	if (dev->cap.has_5ghz)
+		mt76_init_stream_cap(dev, &dev->sband_5g.sband, vht);
+}
+EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
+
+static int
+mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
+		const struct ieee80211_channel *chan, int n_chan,
+		struct ieee80211_rate *rates, int n_rates, bool vht)
+{
+	struct ieee80211_supported_band *sband = &msband->sband;
+	struct ieee80211_sta_ht_cap *ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap;
+	void *chanlist;
+	int size;
+
+	size = n_chan * sizeof(*chan);
+	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
+	if (!chanlist)
+		return -ENOMEM;
+
+	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
+				    GFP_KERNEL);
+	if (!msband->chan)
+		return -ENOMEM;
+
+	sband->channels = chanlist;
+	sband->n_channels = n_chan;
+	sband->bitrates = rates;
+	sband->n_bitrates = n_rates;
+	dev->chandef.chan = &sband->channels[0];
+
+	ht_cap = &sband->ht_cap;
+	ht_cap->ht_supported = true;
+	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+		       IEEE80211_HT_CAP_GRN_FLD |
+		       IEEE80211_HT_CAP_SGI_20 |
+		       IEEE80211_HT_CAP_SGI_40 |
+		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+	ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+	mt76_init_stream_cap(dev, sband, vht);
+
+	if (!vht)
+		return 0;
+
+	vht_cap = &sband->vht_cap;
+	vht_cap->vht_supported = true;
+	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
+			IEEE80211_VHT_CAP_RXSTBC_1 |
+			IEEE80211_VHT_CAP_SHORT_GI_80 |
+			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
+			IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
+			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
+
+	return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+		   int n_rates)
+{
+	dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->sband_2g.sband;
+
+	return mt76_init_sband(dev, &dev->sband_2g,
+			       mt76_channels_2ghz,
+			       ARRAY_SIZE(mt76_channels_2ghz),
+			       rates, n_rates, false);
+}
+
+static int
+mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+		   int n_rates, bool vht)
+{
+	dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->sband_5g.sband;
+
+	return mt76_init_sband(dev, &dev->sband_5g,
+			       mt76_channels_5ghz,
+			       ARRAY_SIZE(mt76_channels_5ghz),
+			       rates, n_rates, vht);
+}
+
+static void
+mt76_check_sband(struct mt76_dev *dev, int band)
+{
+	struct ieee80211_supported_band *sband = dev->hw->wiphy->bands[band];
+	bool found = false;
+	int i;
+
+	if (!sband)
+		return;
+
+	for (i = 0; i < sband->n_channels; i++) {
+		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
+			continue;
+
+		found = true;
+		break;
+	}
+
+	if (found)
+		return;
+
+	sband->n_channels = 0;
+	dev->hw->wiphy->bands[band] = NULL;
+}
+
+struct mt76_dev *
+mt76_alloc_device(struct device *pdev, unsigned int size,
+		  const struct ieee80211_ops *ops,
+		  const struct mt76_driver_ops *drv_ops)
+{
+	struct ieee80211_hw *hw;
+	struct mt76_dev *dev;
+
+	hw = ieee80211_alloc_hw(size, ops);
+	if (!hw)
+		return NULL;
+
+	dev = hw->priv;
+	dev->hw = hw;
+	dev->dev = pdev;
+	dev->drv = drv_ops;
+
+	spin_lock_init(&dev->rx_lock);
+	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->cc_lock);
+	mutex_init(&dev->mutex);
+	init_waitqueue_head(&dev->tx_wait);
+	skb_queue_head_init(&dev->status_list);
+
+	tasklet_init(&dev->tx_tasklet, mt76_tx_tasklet, (unsigned long)dev);
+
+	return dev;
+}
+EXPORT_SYMBOL_GPL(mt76_alloc_device);
+
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+			 struct ieee80211_rate *rates, int n_rates)
+{
+	struct ieee80211_hw *hw = dev->hw;
+	struct wiphy *wiphy = hw->wiphy;
+	int ret;
+
+	dev_set_drvdata(dev->dev, dev);
+
+	INIT_LIST_HEAD(&dev->txwi_cache);
+
+	SET_IEEE80211_DEV(hw, dev->dev);
+	SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+
+	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
+	wiphy->available_antennas_tx = dev->antenna_mask;
+	wiphy->available_antennas_rx = dev->antenna_mask;
+
+	hw->txq_data_size = sizeof(struct mt76_txq);
+	hw->max_tx_fragments = 16;
+
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+	ieee80211_hw_set(hw, TX_AMSDU);
+	ieee80211_hw_set(hw, TX_FRAG_LIST);
+	ieee80211_hw_set(hw, MFP_CAPABLE);
+	ieee80211_hw_set(hw, AP_LINK_PS);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+
+	wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+	if (dev->cap.has_2ghz) {
+		ret = mt76_init_sband_2g(dev, rates, n_rates);
+		if (ret)
+			return ret;
+	}
+
+	if (dev->cap.has_5ghz) {
+		ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht);
+		if (ret)
+			return ret;
+	}
+
+	wiphy_read_of_freq_limits(dev->hw->wiphy);
+	mt76_check_sband(dev, NL80211_BAND_2GHZ);
+	mt76_check_sband(dev, NL80211_BAND_5GHZ);
+
+	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+		ret = mt76_led_init(dev);
+		if (ret)
+			return ret;
+	}
+
+	return ieee80211_register_hw(hw);
+}
+EXPORT_SYMBOL_GPL(mt76_register_device);
+
+void mt76_unregister_device(struct mt76_dev *dev)
+{
+	struct ieee80211_hw *hw = dev->hw;
+
+	if (IS_ENABLED(CONFIG_MT76_LEDS))
+		mt76_led_cleanup(dev);
+	mt76_tx_status_check(dev, NULL, true);
+	ieee80211_unregister_hw(hw);
+}
+EXPORT_SYMBOL_GPL(mt76_unregister_device);
+
+void mt76_free_device(struct mt76_dev *dev)
+{
+	mt76_tx_free(dev);
+	ieee80211_free_hw(dev->hw);
+}
+EXPORT_SYMBOL_GPL(mt76_free_device);
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
+{
+	if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	__skb_queue_tail(&dev->rx_skb[q], skb);
+}
+EXPORT_SYMBOL_GPL(mt76_rx);
+
+bool mt76_has_tx_pending(struct mt76_dev *dev)
+{
+	struct mt76_queue *q;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
+		q = dev->q_tx[i].q;
+		if (q && q->queued)
+			return true;
+	}
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
+
+void mt76_set_channel(struct mt76_dev *dev)
+{
+	struct ieee80211_hw *hw = dev->hw;
+	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+	struct mt76_channel_state *state;
+	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
+	int timeout = HZ / 5;
+
+	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), timeout);
+
+	if (dev->drv->update_survey)
+		dev->drv->update_survey(dev);
+
+	dev->chandef = *chandef;
+
+	if (!offchannel)
+		dev->main_chan = chandef->chan;
+
+	if (chandef->chan != dev->main_chan) {
+		state = mt76_channel_state(dev, chandef->chan);
+		memset(state, 0, sizeof(*state));
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_set_channel);
+
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+		    struct survey_info *survey)
+{
+	struct mt76_dev *dev = hw->priv;
+	struct mt76_sband *sband;
+	struct ieee80211_channel *chan;
+	struct mt76_channel_state *state;
+	int ret = 0;
+
+	if (idx == 0 && dev->drv->update_survey)
+		dev->drv->update_survey(dev);
+
+	sband = &dev->sband_2g;
+	if (idx >= sband->sband.n_channels) {
+		idx -= sband->sband.n_channels;
+		sband = &dev->sband_5g;
+	}
+
+	if (idx >= sband->sband.n_channels)
+		return -ENOENT;
+
+	chan = &sband->sband.channels[idx];
+	state = mt76_channel_state(dev, chan);
+
+	memset(survey, 0, sizeof(*survey));
+	survey->channel = chan;
+	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
+	if (chan == dev->main_chan)
+		survey->filled |= SURVEY_INFO_IN_USE;
+
+	spin_lock_bh(&dev->cc_lock);
+	survey->time = div_u64(state->cc_active, 1000);
+	survey->time_busy = div_u64(state->cc_busy, 1000);
+	spin_unlock_bh(&dev->cc_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_get_survey);
+
+void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
+			 struct ieee80211_key_conf *key)
+{
+	struct ieee80211_key_seq seq;
+	int i;
+
+	wcid->rx_check_pn = false;
+
+	if (!key)
+		return;
+
+	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
+		return;
+
+	wcid->rx_check_pn = true;
+	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
+		ieee80211_get_key_rx_seq(key, i, &seq);
+		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
+	}
+}
+EXPORT_SYMBOL(mt76_wcid_key_setup);
+
+static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
+{
+	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+	struct mt76_rx_status mstat;
+
+	mstat = *((struct mt76_rx_status *)skb->cb);
+	memset(status, 0, sizeof(*status));
+
+	status->flag = mstat.flag;
+	status->freq = mstat.freq;
+	status->enc_flags = mstat.enc_flags;
+	status->encoding = mstat.encoding;
+	status->bw = mstat.bw;
+	status->rate_idx = mstat.rate_idx;
+	status->nss = mstat.nss;
+	status->band = mstat.band;
+	status->signal = mstat.signal;
+	status->chains = mstat.chains;
+
+	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
+	BUILD_BUG_ON(sizeof(status->chain_signal) !=
+		     sizeof(mstat.chain_signal));
+	memcpy(status->chain_signal, mstat.chain_signal,
+	       sizeof(mstat.chain_signal));
+
+	return wcid_to_sta(mstat.wcid);
+}
+
+static int
+mt76_check_ccmp_pn(struct sk_buff *skb)
+{
+	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+	struct mt76_wcid *wcid = status->wcid;
+	struct ieee80211_hdr *hdr;
+	int ret;
+
+	if (!(status->flag & RX_FLAG_DECRYPTED))
+		return 0;
+
+	if (!wcid || !wcid->rx_check_pn)
+		return 0;
+
+	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+		/*
+		 * Validate the first fragment both here and in mac80211
+		 * All further fragments will be validated by mac80211 only.
+		 */
+		hdr = (struct ieee80211_hdr *)skb->data;
+		if (ieee80211_is_frag(hdr) &&
+		    !ieee80211_is_first_frag(hdr->frame_control))
+			return 0;
+	}
+
+	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
+	ret = memcmp(status->iv, wcid->rx_key_pn[status->tid],
+		     sizeof(status->iv));
+	if (ret <= 0)
+		return -EINVAL; /* replay */
+
+	memcpy(wcid->rx_key_pn[status->tid], status->iv, sizeof(status->iv));
+
+	if (status->flag & RX_FLAG_IV_STRIPPED)
+		status->flag |= RX_FLAG_PN_VALIDATED;
+
+	return 0;
+}
+
+static void
+mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
+{
+	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_sta *sta;
+	struct mt76_wcid *wcid = status->wcid;
+	bool ps;
+	int i;
+
+	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
+		sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
+		if (sta)
+			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
+	}
+
+	if (!wcid || !wcid->sta)
+		return;
+
+	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
+
+	if (status->signal <= 0)
+		ewma_signal_add(&wcid->rssi, -status->signal);
+
+	wcid->inactive_count = 0;
+
+	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
+		return;
+
+	if (ieee80211_is_pspoll(hdr->frame_control)) {
+		ieee80211_sta_pspoll(sta);
+		return;
+	}
+
+	if (ieee80211_has_morefrags(hdr->frame_control) ||
+	    !(ieee80211_is_mgmt(hdr->frame_control) ||
+	      ieee80211_is_data(hdr->frame_control)))
+		return;
+
+	ps = ieee80211_has_pm(hdr->frame_control);
+
+	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
+		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
+		ieee80211_sta_uapsd_trigger(sta, status->tid);
+
+	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
+		return;
+
+	if (ps)
+		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
+	else
+		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
+
+	dev->drv->sta_ps(dev, sta, ps);
+	ieee80211_sta_ps_transition(sta, ps);
+
+	if (ps)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+		struct mt76_txq *mtxq;
+
+		if (!sta->txq[i])
+			continue;
+
+		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
+		if (!skb_queue_empty(&mtxq->retry_q))
+			ieee80211_schedule_txq(dev->hw, sta->txq[i]);
+	}
+}
+
+void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
+		      struct napi_struct *napi)
+{
+	struct ieee80211_sta *sta;
+	struct sk_buff *skb;
+
+	spin_lock(&dev->rx_lock);
+	while ((skb = __skb_dequeue(frames)) != NULL) {
+		if (mt76_check_ccmp_pn(skb)) {
+			dev_kfree_skb(skb);
+			continue;
+		}
+
+		sta = mt76_rx_convert(skb);
+		ieee80211_rx_napi(dev->hw, sta, skb, napi);
+	}
+	spin_unlock(&dev->rx_lock);
+}
+
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+			   struct napi_struct *napi)
+{
+	struct sk_buff_head frames;
+	struct sk_buff *skb;
+
+	__skb_queue_head_init(&frames);
+
+	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
+		mt76_check_sta(dev, skb);
+		mt76_rx_aggr_reorder(skb, &frames);
+	}
+
+	mt76_rx_complete(dev, &frames, napi);
+}
+EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
+
+static int
+mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
+	     struct ieee80211_sta *sta)
+{
+	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+	int ret;
+	int i;
+
+	mutex_lock(&dev->mutex);
+
+	ret = dev->drv->sta_add(dev, vif, sta);
+	if (ret)
+		goto out;
+
+	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+		struct mt76_txq *mtxq;
+
+		if (!sta->txq[i])
+			continue;
+
+		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
+		mtxq->wcid = wcid;
+
+		mt76_txq_init(dev, sta->txq[i]);
+	}
+
+	ewma_signal_init(&wcid->rssi);
+	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
+
+out:
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta)
+{
+	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+	int i, idx = wcid->idx;
+
+	rcu_assign_pointer(dev->wcid[idx], NULL);
+	synchronize_rcu();
+
+	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
+		mt76_rx_aggr_stop(dev, wcid, i);
+
+	if (dev->drv->sta_remove)
+		dev->drv->sta_remove(dev, vif, sta);
+
+	mt76_tx_status_check(dev, wcid, true);
+	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+		mt76_txq_remove(dev, sta->txq[i]);
+	mt76_wcid_free(dev->wcid_mask, idx);
+}
+EXPORT_SYMBOL_GPL(__mt76_sta_remove);
+
+static void
+mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+		struct ieee80211_sta *sta)
+{
+	mutex_lock(&dev->mutex);
+	__mt76_sta_remove(dev, vif, sta);
+	mutex_unlock(&dev->mutex);
+}
+
+int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta,
+		   enum ieee80211_sta_state old_state,
+		   enum ieee80211_sta_state new_state)
+{
+	struct mt76_dev *dev = hw->priv;
+
+	if (old_state == IEEE80211_STA_NOTEXIST &&
+	    new_state == IEEE80211_STA_NONE)
+		return mt76_sta_add(dev, vif, sta);
+
+	if (old_state == IEEE80211_STA_AUTH &&
+	    new_state == IEEE80211_STA_ASSOC &&
+	    dev->drv->sta_assoc)
+		dev->drv->sta_assoc(dev, vif, sta);
+
+	if (old_state == IEEE80211_STA_NONE &&
+	    new_state == IEEE80211_STA_NOTEXIST)
+		mt76_sta_remove(dev, vif, sta);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_sta_state);
+
+int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		     int *dbm)
+{
+	struct mt76_dev *dev = hw->priv;
+	int n_chains = hweight8(dev->antenna_mask);
+
+	*dbm = DIV_ROUND_UP(dev->txpower_cur, 2);
+
+	/* convert from per-chain power to combined
+	 * output power
+	 */
+	switch (n_chains) {
+	case 4:
+		*dbm += 6;
+		break;
+	case 3:
+		*dbm += 4;
+		break;
+	case 2:
+		*dbm += 3;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_get_txpower);
+
+static void
+__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+	if (vif->csa_active && ieee80211_csa_is_complete(vif))
+		ieee80211_csa_finish(vif);
+}
+
+void mt76_csa_finish(struct mt76_dev *dev)
+{
+	if (!dev->csa_complete)
+		return;
+
+	ieee80211_iterate_active_interfaces_atomic(dev->hw,
+		IEEE80211_IFACE_ITER_RESUME_ALL,
+		__mt76_csa_finish, dev);
+
+	dev->csa_complete = 0;
+}
+EXPORT_SYMBOL_GPL(mt76_csa_finish);
+
+static void
+__mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+	struct mt76_dev *dev = priv;
+
+	if (!vif->csa_active)
+		return;
+
+	dev->csa_complete |= ieee80211_csa_is_complete(vif);
+}
+
+void mt76_csa_check(struct mt76_dev *dev)
+{
+	ieee80211_iterate_active_interfaces_atomic(dev->hw,
+		IEEE80211_IFACE_ITER_RESUME_ALL,
+		__mt76_csa_check, dev);
+}
+EXPORT_SYMBOL_GPL(mt76_csa_check);
+
+int
+mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
+{
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_set_tim);
+
+void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
+{
+	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+	u8 *hdr, *pn = status->iv;
+
+	__skb_push(skb, 8);
+	memmove(skb->data, skb->data + 8, hdr_len);
+	hdr = skb->data + hdr_len;
+
+	hdr[0] = pn[5];
+	hdr[1] = pn[4];
+	hdr[2] = 0;
+	hdr[3] = 0x20 | (key_id << 6);
+	hdr[4] = pn[3];
+	hdr[5] = pn[2];
+	hdr[6] = pn[1];
+	hdr[7] = pn[0];
+
+	status->flag &= ~RX_FLAG_IV_STRIPPED;
+}
+EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
+
+int mt76_get_rate(struct mt76_dev *dev,
+		  struct ieee80211_supported_band *sband,
+		  int idx, bool cck)
+{
+	int i, offset = 0, len = sband->n_bitrates;
+
+	if (cck) {
+		if (sband == &dev->sband_5g.sband)
+			return 0;
+
+		idx &= ~BIT(2); /* short preamble */
+	} else if (sband == &dev->sband_2g.sband) {
+		offset = 4;
+	}
+
+	for (i = offset; i < len; i++) {
+		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
+			return i;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_get_rate);
+
+void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  const u8 *mac)
+{
+	struct mt76_dev *dev = hw->priv;
+
+	set_bit(MT76_SCANNING, &dev->state);
+}
+EXPORT_SYMBOL_GPL(mt76_sw_scan);
+
+void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct mt76_dev *dev = hw->priv;
+
+	clear_bit(MT76_SCANNING, &dev->state);
+}
+EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mcu.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mcu.c
new file mode 100644
index 0000000..2a97668
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76.h"
+
+struct sk_buff *
+mt76_mcu_msg_alloc(const void *data, int head_len,
+		   int data_len, int tail_len)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(head_len + data_len + tail_len,
+			GFP_KERNEL);
+	if (!skb)
+		return NULL;
+
+	skb_reserve(skb, head_len);
+	if (data && data_len)
+		skb_put_data(skb, data, data_len);
+
+	return skb;
+}
+EXPORT_SYMBOL_GPL(mt76_mcu_msg_alloc);
+
+/* mmio */
+struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
+				      unsigned long expires)
+{
+	unsigned long timeout;
+
+	if (!time_is_after_jiffies(expires))
+		return NULL;
+
+	timeout = expires - jiffies;
+	wait_event_timeout(dev->mmio.mcu.wait,
+			   !skb_queue_empty(&dev->mmio.mcu.res_q),
+			   timeout);
+	return skb_dequeue(&dev->mmio.mcu.res_q);
+}
+EXPORT_SYMBOL_GPL(mt76_mcu_get_response);
+
+void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb)
+{
+	skb_queue_tail(&dev->mmio.mcu.res_q, skb);
+	wake_up(&dev->mmio.mcu.wait);
+}
+EXPORT_SYMBOL_GPL(mt76_mcu_rx_event);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mmio.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mmio.c
new file mode 100644
index 0000000..1c974df
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt76.h"
+#include "trace.h"
+
+static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
+{
+	u32 val;
+
+	val = readl(dev->mmio.regs + offset);
+	trace_reg_rr(dev, offset, val);
+
+	return val;
+}
+
+static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+	trace_reg_wr(dev, offset, val);
+	writel(val, dev->mmio.regs + offset);
+}
+
+static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+	val |= mt76_mmio_rr(dev, offset) & ~mask;
+	mt76_mmio_wr(dev, offset, val);
+	return val;
+}
+
+static void mt76_mmio_write_copy(struct mt76_dev *dev, u32 offset,
+				 const void *data, int len)
+{
+	__iowrite32_copy(dev->mmio.regs + offset, data, DIV_ROUND_UP(len, 4));
+}
+
+static void mt76_mmio_read_copy(struct mt76_dev *dev, u32 offset,
+				void *data, int len)
+{
+	__ioread32_copy(data, dev->mmio.regs + offset, DIV_ROUND_UP(len, 4));
+}
+
+static int mt76_mmio_wr_rp(struct mt76_dev *dev, u32 base,
+			   const struct mt76_reg_pair *data, int len)
+{
+	while (len > 0) {
+		mt76_mmio_wr(dev, data->reg, data->value);
+		data++;
+		len--;
+	}
+
+	return 0;
+}
+
+static int mt76_mmio_rd_rp(struct mt76_dev *dev, u32 base,
+			   struct mt76_reg_pair *data, int len)
+{
+	while (len > 0) {
+		data->value = mt76_mmio_rr(dev, data->reg);
+		data++;
+		len--;
+	}
+
+	return 0;
+}
+
+void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
+		       u32 clear, u32 set)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->mmio.irq_lock, flags);
+	dev->mmio.irqmask &= ~clear;
+	dev->mmio.irqmask |= set;
+	mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
+	spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
+}
+EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
+{
+	static const struct mt76_bus_ops mt76_mmio_ops = {
+		.rr = mt76_mmio_rr,
+		.rmw = mt76_mmio_rmw,
+		.wr = mt76_mmio_wr,
+		.write_copy = mt76_mmio_write_copy,
+		.read_copy = mt76_mmio_read_copy,
+		.wr_rp = mt76_mmio_wr_rp,
+		.rd_rp = mt76_mmio_rd_rp,
+		.type = MT76_BUS_MMIO,
+	};
+
+	dev->bus = &mt76_mmio_ops;
+	dev->mmio.regs = regs;
+
+	skb_queue_head_init(&dev->mmio.mcu.res_q);
+	init_waitqueue_head(&dev->mmio.mcu.wait);
+	spin_lock_init(&dev->mmio.irq_lock);
+	mutex_init(&dev->mmio.mcu.mutex);
+}
+EXPORT_SYMBOL_GPL(mt76_mmio_init);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76.h
new file mode 100644
index 0000000..52a16b4
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -0,0 +1,836 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#ifndef __MT76_H
+#define __MT76_H
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/leds.h>
+#include <linux/usb.h>
+#include <linux/average.h>
+#include <net/mac80211.h>
+#include "util.h"
+
+#define MT_TX_RING_SIZE     256
+#define MT_MCU_RING_SIZE    32
+#define MT_RX_BUF_SIZE      2048
+#define MT_SKB_HEAD_LEN     128
+
+struct mt76_dev;
+struct mt76_wcid;
+
+struct mt76_reg_pair {
+	u32 reg;
+	u32 value;
+};
+
+enum mt76_bus_type {
+	MT76_BUS_MMIO,
+	MT76_BUS_USB,
+};
+
+struct mt76_bus_ops {
+	u32 (*rr)(struct mt76_dev *dev, u32 offset);
+	void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
+	u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
+	void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data,
+			   int len);
+	void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data,
+			  int len);
+	int (*wr_rp)(struct mt76_dev *dev, u32 base,
+		     const struct mt76_reg_pair *rp, int len);
+	int (*rd_rp)(struct mt76_dev *dev, u32 base,
+		     struct mt76_reg_pair *rp, int len);
+	enum mt76_bus_type type;
+};
+
+#define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB)
+#define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO)
+
+enum mt76_txq_id {
+	MT_TXQ_VO = IEEE80211_AC_VO,
+	MT_TXQ_VI = IEEE80211_AC_VI,
+	MT_TXQ_BE = IEEE80211_AC_BE,
+	MT_TXQ_BK = IEEE80211_AC_BK,
+	MT_TXQ_PSD,
+	MT_TXQ_MCU,
+	MT_TXQ_BEACON,
+	MT_TXQ_CAB,
+	MT_TXQ_FWDL,
+	__MT_TXQ_MAX
+};
+
+enum mt76_rxq_id {
+	MT_RXQ_MAIN,
+	MT_RXQ_MCU,
+	__MT_RXQ_MAX
+};
+
+struct mt76_queue_buf {
+	dma_addr_t addr;
+	int len;
+};
+
+struct mt76_tx_info {
+	struct mt76_queue_buf buf[32];
+	struct sk_buff *skb;
+	int nbuf;
+	u32 info;
+};
+
+struct mt76_queue_entry {
+	union {
+		void *buf;
+		struct sk_buff *skb;
+	};
+	union {
+		struct mt76_txwi_cache *txwi;
+		struct urb *urb;
+	};
+	enum mt76_txq_id qid;
+	bool skip_buf0:1;
+	bool schedule:1;
+	bool done:1;
+};
+
+struct mt76_queue_regs {
+	u32 desc_base;
+	u32 ring_size;
+	u32 cpu_idx;
+	u32 dma_idx;
+} __packed __aligned(4);
+
+struct mt76_queue {
+	struct mt76_queue_regs __iomem *regs;
+
+	spinlock_t lock;
+	struct mt76_queue_entry *entry;
+	struct mt76_desc *desc;
+
+	u16 first;
+	u16 head;
+	u16 tail;
+	int ndesc;
+	int queued;
+	int buf_size;
+	bool stopped;
+
+	u8 buf_offset;
+	u8 hw_idx;
+
+	dma_addr_t desc_dma;
+	struct sk_buff *rx_head;
+	struct page_frag_cache rx_page;
+};
+
+struct mt76_sw_queue {
+	struct mt76_queue *q;
+
+	struct list_head swq;
+	int swq_queued;
+};
+
+struct mt76_mcu_ops {
+	int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
+			    int len, bool wait_resp);
+	int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
+			 const struct mt76_reg_pair *rp, int len);
+	int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
+			 struct mt76_reg_pair *rp, int len);
+	int (*mcu_restart)(struct mt76_dev *dev);
+};
+
+struct mt76_queue_ops {
+	int (*init)(struct mt76_dev *dev);
+
+	int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
+		     int idx, int n_desc, int bufsize,
+		     u32 ring_base);
+
+	int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
+		       struct mt76_queue_buf *buf, int nbufs, u32 info,
+		       struct sk_buff *skb, void *txwi);
+
+	int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
+			    struct sk_buff *skb, struct mt76_wcid *wcid,
+			    struct ieee80211_sta *sta);
+
+	int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid,
+				struct sk_buff *skb, u32 tx_info);
+
+	void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+			 int *len, u32 *info, bool *more);
+
+	void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
+
+	void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
+			   bool flush);
+
+	void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
+};
+
+enum mt76_wcid_flags {
+	MT_WCID_FLAG_CHECK_PS,
+	MT_WCID_FLAG_PS,
+};
+
+#define MT76_N_WCIDS 128
+
+DECLARE_EWMA(signal, 10, 8);
+
+#define MT_WCID_TX_INFO_RATE		GENMASK(15, 0)
+#define MT_WCID_TX_INFO_NSS		GENMASK(17, 16)
+#define MT_WCID_TX_INFO_TXPWR_ADJ	GENMASK(25, 18)
+#define MT_WCID_TX_INFO_SET		BIT(31)
+
+struct mt76_wcid {
+	struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
+
+	struct work_struct aggr_work;
+
+	unsigned long flags;
+
+	struct ewma_signal rssi;
+	int inactive_count;
+
+	u8 idx;
+	u8 hw_key_idx;
+
+	u8 sta:1;
+
+	u8 rx_check_pn;
+	u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
+	u16 cipher;
+
+	u32 tx_info;
+	bool sw_iv;
+
+	u8 packet_id;
+};
+
+struct mt76_txq {
+	struct mt76_sw_queue *swq;
+	struct mt76_wcid *wcid;
+
+	struct sk_buff_head retry_q;
+
+	u16 agg_ssn;
+	bool send_bar;
+	bool aggr;
+};
+
+struct mt76_txwi_cache {
+	struct list_head list;
+	dma_addr_t dma_addr;
+
+	struct sk_buff *skb;
+};
+
+struct mt76_rx_tid {
+	struct rcu_head rcu_head;
+
+	struct mt76_dev *dev;
+
+	spinlock_t lock;
+	struct delayed_work reorder_work;
+
+	u16 head;
+	u16 size;
+	u16 nframes;
+
+	u8 started:1, stopped:1, timer_pending:1;
+
+	struct sk_buff *reorder_buf[];
+};
+
+#define MT_TX_CB_DMA_DONE		BIT(0)
+#define MT_TX_CB_TXS_DONE		BIT(1)
+#define MT_TX_CB_TXS_FAILED		BIT(2)
+
+#define MT_PACKET_ID_MASK		GENMASK(6, 0)
+#define MT_PACKET_ID_NO_ACK		0
+#define MT_PACKET_ID_NO_SKB		1
+#define MT_PACKET_ID_FIRST		2
+#define MT_PACKET_ID_HAS_RATE		BIT(7)
+
+#define MT_TX_STATUS_SKB_TIMEOUT	HZ
+
+struct mt76_tx_cb {
+	unsigned long jiffies;
+	u8 wcid;
+	u8 pktid;
+	u8 flags;
+};
+
+enum {
+	MT76_STATE_INITIALIZED,
+	MT76_STATE_RUNNING,
+	MT76_STATE_MCU_RUNNING,
+	MT76_SCANNING,
+	MT76_RESET,
+	MT76_REMOVED,
+	MT76_READING_STATS,
+};
+
+struct mt76_hw_cap {
+	bool has_2ghz;
+	bool has_5ghz;
+};
+
+#define MT_TXWI_NO_FREE			BIT(0)
+
+struct mt76_driver_ops {
+	bool tx_aligned4_skbs;
+	u32 txwi_flags;
+	u16 txwi_size;
+
+	void (*update_survey)(struct mt76_dev *dev);
+
+	int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
+			      enum mt76_txq_id qid, struct mt76_wcid *wcid,
+			      struct ieee80211_sta *sta,
+			      struct mt76_tx_info *tx_info);
+
+	void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
+				struct mt76_queue_entry *e);
+
+	bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
+
+	void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
+		       struct sk_buff *skb);
+
+	void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
+
+	void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
+		       bool ps);
+
+	int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta);
+
+	void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
+			  struct ieee80211_sta *sta);
+
+	void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta);
+};
+
+struct mt76_channel_state {
+	u64 cc_active;
+	u64 cc_busy;
+};
+
+struct mt76_sband {
+	struct ieee80211_supported_band sband;
+	struct mt76_channel_state *chan;
+};
+
+struct mt76_rate_power {
+	union {
+		struct {
+			s8 cck[4];
+			s8 ofdm[8];
+			s8 stbc[10];
+			s8 ht[16];
+			s8 vht[10];
+		};
+		s8 all[48];
+	};
+};
+
+/* addr req mask */
+#define MT_VEND_TYPE_EEPROM	BIT(31)
+#define MT_VEND_TYPE_CFG	BIT(30)
+#define MT_VEND_TYPE_MASK	(MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
+
+#define MT_VEND_ADDR(type, n)	(MT_VEND_TYPE_##type | (n))
+enum mt_vendor_req {
+	MT_VEND_DEV_MODE =	0x1,
+	MT_VEND_WRITE =		0x2,
+	MT_VEND_MULTI_WRITE =	0x6,
+	MT_VEND_MULTI_READ =	0x7,
+	MT_VEND_READ_EEPROM =	0x9,
+	MT_VEND_WRITE_FCE =	0x42,
+	MT_VEND_WRITE_CFG =	0x46,
+	MT_VEND_READ_CFG =	0x47,
+};
+
+enum mt76u_in_ep {
+	MT_EP_IN_PKT_RX,
+	MT_EP_IN_CMD_RESP,
+	__MT_EP_IN_MAX,
+};
+
+enum mt76u_out_ep {
+	MT_EP_OUT_INBAND_CMD,
+	MT_EP_OUT_AC_BE,
+	MT_EP_OUT_AC_BK,
+	MT_EP_OUT_AC_VI,
+	MT_EP_OUT_AC_VO,
+	MT_EP_OUT_HCCA,
+	__MT_EP_OUT_MAX,
+};
+
+#define MT_TX_SG_MAX_SIZE	8
+#define MT_RX_SG_MAX_SIZE	1
+#define MT_NUM_TX_ENTRIES	256
+#define MT_NUM_RX_ENTRIES	128
+#define MCU_RESP_URB_SIZE	1024
+struct mt76_usb {
+	struct mutex usb_ctrl_mtx;
+	union {
+		u8 data[32];
+		__le32 reg_val;
+	};
+
+	struct tasklet_struct rx_tasklet;
+	struct delayed_work stat_work;
+
+	u8 out_ep[__MT_EP_OUT_MAX];
+	u8 in_ep[__MT_EP_IN_MAX];
+	bool sg_en;
+
+	struct mt76u_mcu {
+		struct mutex mutex;
+		u8 *data;
+		u32 msg_seq;
+
+		/* multiple reads */
+		struct mt76_reg_pair *rp;
+		int rp_len;
+		u32 base;
+		bool burst;
+	} mcu;
+};
+
+struct mt76_mmio {
+	struct mt76e_mcu {
+		struct mutex mutex;
+
+		wait_queue_head_t wait;
+		struct sk_buff_head res_q;
+
+		u32 msg_seq;
+	} mcu;
+	void __iomem *regs;
+	spinlock_t irq_lock;
+	u32 irqmask;
+};
+
+struct mt76_dev {
+	struct ieee80211_hw *hw;
+	struct cfg80211_chan_def chandef;
+	struct ieee80211_channel *main_chan;
+
+	spinlock_t lock;
+	spinlock_t cc_lock;
+
+	struct mutex mutex;
+
+	const struct mt76_bus_ops *bus;
+	const struct mt76_driver_ops *drv;
+	const struct mt76_mcu_ops *mcu_ops;
+	struct device *dev;
+
+	struct net_device napi_dev;
+	spinlock_t rx_lock;
+	struct napi_struct napi[__MT_RXQ_MAX];
+	struct sk_buff_head rx_skb[__MT_RXQ_MAX];
+
+	struct list_head txwi_cache;
+	struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
+	struct mt76_queue q_rx[__MT_RXQ_MAX];
+	const struct mt76_queue_ops *queue_ops;
+	int tx_dma_idx[4];
+
+	struct tasklet_struct tx_tasklet;
+	struct napi_struct tx_napi;
+	struct delayed_work mac_work;
+
+	wait_queue_head_t tx_wait;
+	struct sk_buff_head status_list;
+
+	unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
+
+	struct mt76_wcid global_wcid;
+	struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
+
+	u8 macaddr[ETH_ALEN];
+	u32 rev;
+	unsigned long state;
+
+	u8 antenna_mask;
+	u16 chainmask;
+
+	struct tasklet_struct pre_tbtt_tasklet;
+	int beacon_int;
+	u8 beacon_mask;
+
+	struct mt76_sband sband_2g;
+	struct mt76_sband sband_5g;
+	struct debugfs_blob_wrapper eeprom;
+	struct debugfs_blob_wrapper otp;
+	struct mt76_hw_cap cap;
+
+	struct mt76_rate_power rate_power;
+	int txpower_conf;
+	int txpower_cur;
+
+	enum nl80211_dfs_regions region;
+
+	u32 debugfs_reg;
+
+	struct led_classdev led_cdev;
+	char led_name[32];
+	bool led_al;
+	u8 led_pin;
+
+	u8 csa_complete;
+
+	ktime_t survey_time;
+
+	u32 rxfilter;
+
+	union {
+		struct mt76_mmio mmio;
+		struct mt76_usb usb;
+	};
+};
+
+enum mt76_phy_type {
+	MT_PHY_TYPE_CCK,
+	MT_PHY_TYPE_OFDM,
+	MT_PHY_TYPE_HT,
+	MT_PHY_TYPE_HT_GF,
+	MT_PHY_TYPE_VHT,
+};
+
+struct mt76_rx_status {
+	struct mt76_wcid *wcid;
+
+	unsigned long reorder_time;
+
+	u8 iv[6];
+
+	u8 aggr:1;
+	u8 tid;
+	u16 seqno;
+
+	u16 freq;
+	u32 flag;
+	u8 enc_flags;
+	u8 encoding:2, bw:3;
+	u8 rate_idx;
+	u8 nss;
+	u8 band;
+	s8 signal;
+	u8 chains;
+	s8 chain_signal[IEEE80211_MAX_CHAINS];
+};
+
+#define __mt76_rr(dev, ...)	(dev)->bus->rr((dev), __VA_ARGS__)
+#define __mt76_wr(dev, ...)	(dev)->bus->wr((dev), __VA_ARGS__)
+#define __mt76_rmw(dev, ...)	(dev)->bus->rmw((dev), __VA_ARGS__)
+#define __mt76_wr_copy(dev, ...)	(dev)->bus->write_copy((dev), __VA_ARGS__)
+#define __mt76_rr_copy(dev, ...)	(dev)->bus->read_copy((dev), __VA_ARGS__)
+
+#define __mt76_set(dev, offset, val)	__mt76_rmw(dev, offset, 0, val)
+#define __mt76_clear(dev, offset, val)	__mt76_rmw(dev, offset, val, 0)
+
+#define mt76_rr(dev, ...)	(dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr(dev, ...)	(dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_rmw(dev, ...)	(dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr_copy(dev, ...)	(dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__)
+#define mt76_rr_copy(dev, ...)	(dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr_rp(dev, ...)	(dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
+#define mt76_rd_rp(dev, ...)	(dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
+
+#define mt76_mcu_send_msg(dev, ...)	(dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
+#define __mt76_mcu_send_msg(dev, ...)	(dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
+#define mt76_mcu_restart(dev, ...)	(dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
+#define __mt76_mcu_restart(dev, ...)	(dev)->mcu_ops->mcu_restart((dev))
+
+#define mt76_set(dev, offset, val)	mt76_rmw(dev, offset, 0, val)
+#define mt76_clear(dev, offset, val)	mt76_rmw(dev, offset, val, 0)
+
+#define mt76_get_field(_dev, _reg, _field)		\
+	FIELD_GET(_field, mt76_rr(dev, _reg))
+
+#define mt76_rmw_field(_dev, _reg, _field, _val)	\
+	mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+#define __mt76_rmw_field(_dev, _reg, _field, _val)	\
+	__mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+#define mt76_hw(dev) (dev)->mt76.hw
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+		 int timeout);
+
+#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+		      int timeout);
+
+#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+void mt76_pci_disable_aspm(struct pci_dev *pdev);
+
+static inline u16 mt76_chip(struct mt76_dev *dev)
+{
+	return dev->rev >> 16;
+}
+
+static inline u16 mt76_rev(struct mt76_dev *dev)
+{
+	return dev->rev & 0xffff;
+}
+
+#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
+#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
+
+#define mt76_init_queues(dev)		(dev)->mt76.queue_ops->init(&((dev)->mt76))
+#define mt76_queue_alloc(dev, ...)	(dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
+#define mt76_tx_queue_skb_raw(dev, ...)	(dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
+#define mt76_tx_queue_skb(dev, ...)	(dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_rx_reset(dev, ...)	(dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_tx_cleanup(dev, ...)	(dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_kick(dev, ...)	(dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
+
+static inline struct mt76_channel_state *
+mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
+{
+	struct mt76_sband *msband;
+	int idx;
+
+	if (c->band == NL80211_BAND_2GHZ)
+		msband = &dev->sband_2g;
+	else
+		msband = &dev->sband_5g;
+
+	idx = c - &msband->sband.channels[0];
+	return &msband->chan[idx];
+}
+
+struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
+				   const struct ieee80211_ops *ops,
+				   const struct mt76_driver_ops *drv_ops);
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+			 struct ieee80211_rate *rates, int n_rates);
+void mt76_unregister_device(struct mt76_dev *dev);
+void mt76_free_device(struct mt76_dev *dev);
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
+void mt76_seq_puts_array(struct seq_file *file, const char *str,
+			 s8 *val, int len);
+
+int mt76_eeprom_init(struct mt76_dev *dev, int len);
+void mt76_eeprom_override(struct mt76_dev *dev);
+
+static inline u8 *
+mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+	return (u8 *)t - dev->drv->txwi_size;
+}
+
+/* increment with wrap-around */
+static inline int mt76_incr(int val, int size)
+{
+	return (val + 1) & (size - 1);
+}
+
+/* decrement with wrap-around */
+static inline int mt76_decr(int val, int size)
+{
+	return (val - 1) & (size - 1);
+}
+
+u8 mt76_ac_to_hwq(u8 ac);
+
+static inline struct ieee80211_txq *
+mtxq_to_txq(struct mt76_txq *mtxq)
+{
+	void *ptr = mtxq;
+
+	return container_of(ptr, struct ieee80211_txq, drv_priv);
+}
+
+static inline struct ieee80211_sta *
+wcid_to_sta(struct mt76_wcid *wcid)
+{
+	void *ptr = wcid;
+
+	if (!wcid || !wcid->sta)
+		return NULL;
+
+	return container_of(ptr, struct ieee80211_sta, drv_priv);
+}
+
+static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
+{
+	BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
+		     sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
+	return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
+}
+
+static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
+{
+	int len = ieee80211_get_hdrlen_from_skb(skb);
+
+	if (len % 4 == 0)
+		return;
+
+	skb_push(skb, 2);
+	memmove(skb->data, skb->data + 2, len);
+
+	skb->data[len] = 0;
+	skb->data[len + 1] = 0;
+}
+
+static inline bool mt76_is_skb_pktid(u8 pktid)
+{
+	if (pktid & MT_PACKET_ID_HAS_RATE)
+		return false;
+
+	return pktid >= MT_PACKET_ID_FIRST;
+}
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
+void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+	     struct mt76_wcid *wcid, struct sk_buff *skb);
+void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
+void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+			 bool send_bar);
+void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid);
+void mt76_txq_schedule_all(struct mt76_dev *dev);
+void mt76_tx_tasklet(unsigned long data);
+void mt76_release_buffered_frames(struct ieee80211_hw *hw,
+				  struct ieee80211_sta *sta,
+				  u16 tids, int nframes,
+				  enum ieee80211_frame_release_type reason,
+				  bool more_data);
+bool mt76_has_tx_pending(struct mt76_dev *dev);
+void mt76_set_channel(struct mt76_dev *dev);
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+		    struct survey_info *survey);
+void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
+
+int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
+		       u16 ssn, u16 size);
+void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
+
+void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
+			 struct ieee80211_key_conf *key);
+
+void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
+			 __acquires(&dev->status_list.lock);
+void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
+			   __releases(&dev->status_list.lock);
+
+int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
+			   struct sk_buff *skb);
+struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
+				       struct mt76_wcid *wcid, int pktid,
+				       struct sk_buff_head *list);
+void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
+			     struct sk_buff_head *list);
+void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
+void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
+			  bool flush);
+int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta,
+		   enum ieee80211_sta_state old_state,
+		   enum ieee80211_sta_state new_state);
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta);
+
+int mt76_get_min_avg_rssi(struct mt76_dev *dev);
+
+int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		     int *dbm);
+
+void mt76_csa_check(struct mt76_dev *dev);
+void mt76_csa_finish(struct mt76_dev *dev);
+
+int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
+void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
+int mt76_get_rate(struct mt76_dev *dev,
+		  struct ieee80211_supported_band *sband,
+		  int idx, bool cck);
+void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  const u8 *mac);
+void mt76_sw_scan_complete(struct ieee80211_hw *hw,
+			   struct ieee80211_vif *vif);
+
+/* internal */
+void mt76_tx_free(struct mt76_dev *dev);
+struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
+void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
+void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
+		      struct napi_struct *napi);
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+			   struct napi_struct *napi);
+void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
+
+/* usb */
+static inline bool mt76u_urb_error(struct urb *urb)
+{
+	return urb->status &&
+	       urb->status != -ECONNRESET &&
+	       urb->status != -ESHUTDOWN &&
+	       urb->status != -ENOENT;
+}
+
+/* Map hardware queues to usb endpoints */
+static inline u8 q2ep(u8 qid)
+{
+	/* TODO: take management packets to queue 5 */
+	return qid + 1;
+}
+
+static inline int
+mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
+	       int timeout)
+{
+	struct usb_interface *uintf = to_usb_interface(dev->dev);
+	struct usb_device *udev = interface_to_usbdev(uintf);
+	struct mt76_usb *usb = &dev->usb;
+	unsigned int pipe;
+
+	if (actual_len)
+		pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]);
+	else
+		pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
+
+	return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
+}
+
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+			 u8 req_type, u16 val, u16 offset,
+			 void *buf, size_t len);
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+		     const u16 offset, const u32 val);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
+int mt76u_alloc_queues(struct mt76_dev *dev);
+void mt76u_stop_tx(struct mt76_dev *dev);
+void mt76u_stop_rx(struct mt76_dev *dev);
+int mt76u_resume_rx(struct mt76_dev *dev);
+void mt76u_queues_deinit(struct mt76_dev *dev);
+
+struct sk_buff *
+mt76_mcu_msg_alloc(const void *data, int head_len,
+		   int data_len, int tail_len);
+void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
+struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
+				      unsigned long expires);
+
+void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig
new file mode 100644
index 0000000..6a0080f
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MT7603E
+	tristate "MediaTek MT7603E (PCIe) and MT76x8 WLAN support"
+	select MT76_CORE
+	depends on MAC80211
+	depends on PCI
+	help
+	  This adds support for MT7603E wireless PCIe devices and the WLAN core
+	  on MT7628/MT7688 SoC devices. This family supports IEEE 802.11n 2x2
+	  to 300Mbps PHY rate
+
+	  To compile this driver as a module, choose M here.
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/Makefile b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/Makefile
new file mode 100644
index 0000000..6878e30
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MT7603E) += mt7603e.o
+
+mt7603e-y := \
+	pci.o soc.o main.o init.o mcu.o \
+	core.o dma.o mac.o eeprom.o \
+	beacon.o debugfs.o
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
new file mode 100644
index 0000000..7a41cdf
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7603.h"
+
+struct beacon_bc_data {
+	struct mt7603_dev *dev;
+	struct sk_buff_head q;
+	struct sk_buff *tail[MT7603_MAX_INTERFACES];
+	int count[MT7603_MAX_INTERFACES];
+};
+
+static void
+mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+	struct mt7603_dev *dev = (struct mt7603_dev *)priv;
+	struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+	struct sk_buff *skb = NULL;
+
+	if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
+		return;
+
+	skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+	if (!skb)
+		return;
+
+	mt76_tx_queue_skb(dev, MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
+
+	spin_lock_bh(&dev->ps_lock);
+	mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
+		FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
+		FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
+			   dev->mt76.q_tx[MT_TXQ_CAB].q->hw_idx) |
+		FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
+		FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
+
+	if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000))
+		dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
+
+	spin_unlock_bh(&dev->ps_lock);
+}
+
+static void
+mt7603_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+	struct beacon_bc_data *data = priv;
+	struct mt7603_dev *dev = data->dev;
+	struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+	struct ieee80211_tx_info *info;
+	struct sk_buff *skb;
+
+	if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
+		return;
+
+	skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+	if (!skb)
+		return;
+
+	info = IEEE80211_SKB_CB(skb);
+	info->control.vif = vif;
+	info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+	mt76_skb_set_moredata(skb, true);
+	__skb_queue_tail(&data->q, skb);
+	data->tail[mvif->idx] = skb;
+	data->count[mvif->idx]++;
+}
+
+void mt7603_pre_tbtt_tasklet(unsigned long arg)
+{
+	struct mt7603_dev *dev = (struct mt7603_dev *)arg;
+	struct mt76_queue *q;
+	struct beacon_bc_data data = {};
+	struct sk_buff *skb;
+	int i, nframes;
+
+	if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+		return;
+
+	data.dev = dev;
+	__skb_queue_head_init(&data.q);
+
+	q = dev->mt76.q_tx[MT_TXQ_BEACON].q;
+	spin_lock_bh(&q->lock);
+	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+		IEEE80211_IFACE_ITER_RESUME_ALL,
+		mt7603_update_beacon_iter, dev);
+	mt76_queue_kick(dev, q);
+	spin_unlock_bh(&q->lock);
+
+	/* Flush all previous CAB queue packets */
+	mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
+
+	mt76_queue_tx_cleanup(dev, MT_TXQ_CAB, false);
+
+	mt76_csa_check(&dev->mt76);
+	if (dev->mt76.csa_complete)
+		goto out;
+
+	q = dev->mt76.q_tx[MT_TXQ_CAB].q;
+	do {
+		nframes = skb_queue_len(&data.q);
+		ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+			IEEE80211_IFACE_ITER_RESUME_ALL,
+			mt7603_add_buffered_bc, &data);
+	} while (nframes != skb_queue_len(&data.q) &&
+		 skb_queue_len(&data.q) < 8);
+
+	if (skb_queue_empty(&data.q))
+		goto out;
+
+	for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+		if (!data.tail[i])
+			continue;
+
+		mt76_skb_set_moredata(data.tail[i], false);
+	}
+
+	spin_lock_bh(&q->lock);
+	while ((skb = __skb_dequeue(&data.q)) != NULL) {
+		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+		struct ieee80211_vif *vif = info->control.vif;
+		struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+
+		mt76_tx_queue_skb(dev, MT_TXQ_CAB, skb, &mvif->sta.wcid, NULL);
+	}
+	mt76_queue_kick(dev, q);
+	spin_unlock_bh(&q->lock);
+
+	for (i = 0; i < ARRAY_SIZE(data.count); i++)
+		mt76_wr(dev, MT_WF_ARB_CAB_COUNT_B0_REG(i),
+			data.count[i] << MT_WF_ARB_CAB_COUNT_B0_SHIFT(i));
+
+	mt76_wr(dev, MT_WF_ARB_CAB_START,
+		MT_WF_ARB_CAB_START_BSSn(0) |
+		(MT_WF_ARB_CAB_START_BSS0n(1) *
+		 ((1 << (MT7603_MAX_INTERFACES - 1)) - 1)));
+
+out:
+	mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
+	if (dev->mt76.q_tx[MT_TXQ_BEACON].q->queued >
+	    hweight8(dev->mt76.beacon_mask))
+		dev->beacon_check++;
+}
+
+void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
+{
+	u32 pre_tbtt = MT7603_PRE_TBTT_TIME / 64;
+
+	if (idx >= 0) {
+		if (intval)
+			dev->mt76.beacon_mask |= BIT(idx);
+		else
+			dev->mt76.beacon_mask &= ~BIT(idx);
+	}
+
+	if (!dev->mt76.beacon_mask || (!intval && idx < 0)) {
+		mt7603_irq_disable(dev, MT_INT_MAC_IRQ3);
+		mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK);
+		mt76_wr(dev, MT_HW_INT_MASK(3), 0);
+		return;
+	}
+
+	dev->mt76.beacon_int = intval;
+	mt76_wr(dev, MT_TBTT,
+		FIELD_PREP(MT_TBTT_PERIOD, intval) | MT_TBTT_CAL_ENABLE);
+
+	mt76_wr(dev, MT_TBTT_TIMER_CFG, 0x99); /* start timer */
+
+	mt76_rmw_field(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK,
+		       MT_BCNQ_OPMODE_AP);
+	mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TBTT_BCN_PRIO);
+	mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TBTT_BCAST_PRIO);
+
+	mt76_wr(dev, MT_PRE_TBTT, pre_tbtt);
+
+	mt76_set(dev, MT_HW_INT_MASK(3),
+		 MT_HW_INT3_PRE_TBTT0 | MT_HW_INT3_TBTT0);
+
+	mt76_set(dev, MT_WF_ARB_BCN_START,
+		 MT_WF_ARB_BCN_START_BSSn(0) |
+		 ((dev->mt76.beacon_mask >> 1) *
+		  MT_WF_ARB_BCN_START_BSS0n(1)));
+	mt7603_irq_enable(dev, MT_INT_MAC_IRQ3);
+
+	if (dev->mt76.beacon_mask & ~BIT(0))
+		mt76_set(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
+	else
+		mt76_clear(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/core.c
new file mode 100644
index 0000000..47e3693
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/core.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7603.h"
+
+void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+
+	mt7603_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
+{
+	struct mt7603_dev *dev = dev_instance;
+	u32 intr;
+
+	intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+	mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+		return IRQ_NONE;
+
+	intr &= dev->mt76.mmio.irqmask;
+
+	if (intr & MT_INT_MAC_IRQ3) {
+		u32 hwintr = mt76_rr(dev, MT_HW_INT_STATUS(3));
+
+		mt76_wr(dev, MT_HW_INT_STATUS(3), hwintr);
+		if (hwintr & MT_HW_INT3_PRE_TBTT0)
+			tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
+
+		if ((hwintr & MT_HW_INT3_TBTT0) && dev->mt76.csa_complete)
+			mt76_csa_finish(&dev->mt76);
+	}
+
+	if (intr & MT_INT_TX_DONE_ALL) {
+		mt7603_irq_disable(dev, MT_INT_TX_DONE_ALL);
+		napi_schedule(&dev->mt76.tx_napi);
+	}
+
+	if (intr & MT_INT_RX_DONE(0)) {
+		dev->rx_pse_check = 0;
+		mt7603_irq_disable(dev, MT_INT_RX_DONE(0));
+		napi_schedule(&dev->mt76.napi[0]);
+	}
+
+	if (intr & MT_INT_RX_DONE(1)) {
+		dev->rx_pse_check = 0;
+		mt7603_irq_disable(dev, MT_INT_RX_DONE(1));
+		napi_schedule(&dev->mt76.napi[1]);
+	}
+
+	return IRQ_HANDLED;
+}
+
+u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr)
+{
+	u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
+	u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
+
+	dev->bus_ops->wr(&dev->mt76, MT_MCU_PCIE_REMAP_2, base);
+
+	return MT_PCIE_REMAP_BASE_2 + offset;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
new file mode 100644
index 0000000..5942fe7
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7603.h"
+
+static int
+mt7603_reset_read(struct seq_file *s, void *data)
+{
+	struct mt7603_dev *dev = dev_get_drvdata(s->private);
+	static const char * const reset_cause_str[] = {
+		[RESET_CAUSE_TX_HANG] = "TX hang",
+		[RESET_CAUSE_TX_BUSY] = "TX DMA busy stuck",
+		[RESET_CAUSE_RX_BUSY] = "RX DMA busy stuck",
+		[RESET_CAUSE_RX_PSE_BUSY] = "RX PSE busy stuck",
+		[RESET_CAUSE_BEACON_STUCK] = "Beacon stuck",
+		[RESET_CAUSE_MCU_HANG] = "MCU hang",
+		[RESET_CAUSE_RESET_FAILED] = "PSE reset failed",
+	};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(reset_cause_str); i++) {
+		if (!reset_cause_str[i])
+			continue;
+
+		seq_printf(s, "%20s: %u\n", reset_cause_str[i],
+			   dev->reset_cause[i]);
+	}
+
+	return 0;
+}
+
+static int
+mt7603_radio_read(struct seq_file *s, void *data)
+{
+	struct mt7603_dev *dev = dev_get_drvdata(s->private);
+
+	seq_printf(s, "Sensitivity: %d\n", dev->sensitivity);
+	seq_printf(s, "False CCA: ofdm=%d cck=%d\n",
+		   dev->false_cca_ofdm, dev->false_cca_cck);
+
+	return 0;
+}
+
+static int
+mt7603_edcca_set(void *data, u64 val)
+{
+	struct mt7603_dev *dev = data;
+
+	mutex_lock(&dev->mt76.mutex);
+
+	dev->ed_monitor_enabled = !!val;
+	dev->ed_monitor = dev->ed_monitor_enabled &&
+			  dev->mt76.region == NL80211_DFS_ETSI;
+	mt7603_init_edcca(dev);
+
+	mutex_unlock(&dev->mt76.mutex);
+
+	return 0;
+}
+
+static int
+mt7603_edcca_get(void *data, u64 *val)
+{
+	struct mt7603_dev *dev = data;
+
+	*val = dev->ed_monitor_enabled;
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt7603_edcca_get,
+			 mt7603_edcca_set, "%lld\n");
+
+void mt7603_init_debugfs(struct mt7603_dev *dev)
+{
+	struct dentry *dir;
+
+	dir = mt76_register_debugfs(&dev->mt76);
+	if (!dir)
+		return;
+
+	debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
+	debugfs_create_u32("reset_test", 0600, dir, &dev->reset_test);
+	debugfs_create_devm_seqfile(dev->mt76.dev, "reset", dir,
+				    mt7603_reset_read);
+	debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
+				    mt7603_radio_read);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
new file mode 100644
index 0000000..24d82a2
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7603.h"
+#include "mac.h"
+#include "../dma.h"
+
+static int
+mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
+		     int idx, int n_desc)
+{
+	struct mt76_queue *hwq;
+	int err;
+
+	hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+	if (!hwq)
+		return -ENOMEM;
+
+	err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+	if (err < 0)
+		return err;
+
+	INIT_LIST_HEAD(&q->swq);
+	q->q = hwq;
+
+	mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
+
+	return 0;
+}
+
+static void
+mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
+{
+	__le32 *txd = (__le32 *)skb->data;
+	struct ieee80211_hdr *hdr;
+	struct ieee80211_sta *sta;
+	struct mt7603_sta *msta;
+	struct mt76_wcid *wcid;
+	void *priv;
+	int idx;
+	u32 val;
+	u8 tid;
+
+	if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
+		goto free;
+
+	val = le32_to_cpu(txd[1]);
+	idx = FIELD_GET(MT_TXD1_WLAN_IDX, val);
+	skb->priority = FIELD_GET(MT_TXD1_TID, val);
+
+	if (idx >= MT7603_WTBL_STA - 1)
+		goto free;
+
+	wcid = rcu_dereference(dev->mt76.wcid[idx]);
+	if (!wcid)
+		goto free;
+
+	priv = msta = container_of(wcid, struct mt7603_sta, wcid);
+	val = le32_to_cpu(txd[0]);
+	skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
+
+	val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
+	val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
+	txd[0] = cpu_to_le32(val);
+
+	sta = container_of(priv, struct ieee80211_sta, drv_priv);
+	hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
+	tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+	ieee80211_sta_set_buffered(sta, tid, true);
+
+	spin_lock_bh(&dev->ps_lock);
+	__skb_queue_tail(&msta->psq, skb);
+	if (skb_queue_len(&msta->psq) >= 64) {
+		skb = __skb_dequeue(&msta->psq);
+		dev_kfree_skb(skb);
+	}
+	spin_unlock_bh(&dev->ps_lock);
+	return;
+
+free:
+	dev_kfree_skb(skb);
+}
+
+void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+			 struct sk_buff *skb)
+{
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+	__le32 *rxd = (__le32 *)skb->data;
+	__le32 *end = (__le32 *)&skb->data[skb->len];
+	enum rx_pkt_type type;
+
+	type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+
+	if (q == MT_RXQ_MCU) {
+		if (type == PKT_TYPE_RX_EVENT)
+			mt76_mcu_rx_event(&dev->mt76, skb);
+		else
+			mt7603_rx_loopback_skb(dev, skb);
+		return;
+	}
+
+	switch (type) {
+	case PKT_TYPE_TXS:
+		for (rxd++; rxd + 5 <= end; rxd += 5)
+			mt7603_mac_add_txs(dev, rxd);
+		dev_kfree_skb(skb);
+		break;
+	case PKT_TYPE_RX_EVENT:
+		mt76_mcu_rx_event(&dev->mt76, skb);
+		return;
+	case PKT_TYPE_NORMAL:
+		if (mt7603_mac_fill_rx(dev, skb) == 0) {
+			mt76_rx(&dev->mt76, q, skb);
+			return;
+		}
+		/* fall through */
+	default:
+		dev_kfree_skb(skb);
+		break;
+	}
+}
+
+static int
+mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
+		     int idx, int n_desc, int bufsize)
+{
+	int err;
+
+	err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
+			       MT_RX_RING_BASE);
+	if (err < 0)
+		return err;
+
+	mt7603_irq_enable(dev, MT_INT_RX_DONE(idx));
+
+	return 0;
+}
+
+static int mt7603_poll_tx(struct napi_struct *napi, int budget)
+{
+	struct mt7603_dev *dev;
+	int i;
+
+	dev = container_of(napi, struct mt7603_dev, mt76.tx_napi);
+	dev->tx_dma_check = 0;
+
+	for (i = MT_TXQ_MCU; i >= 0; i--)
+		mt76_queue_tx_cleanup(dev, i, false);
+
+	if (napi_complete_done(napi, 0))
+		mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
+
+	for (i = MT_TXQ_MCU; i >= 0; i--)
+		mt76_queue_tx_cleanup(dev, i, false);
+
+	tasklet_schedule(&dev->mt76.tx_tasklet);
+
+	return 0;
+}
+
+int mt7603_dma_init(struct mt7603_dev *dev)
+{
+	static const u8 wmm_queue_map[] = {
+		[IEEE80211_AC_BK] = 0,
+		[IEEE80211_AC_BE] = 1,
+		[IEEE80211_AC_VI] = 2,
+		[IEEE80211_AC_VO] = 3,
+	};
+	int ret;
+	int i;
+
+	mt76_dma_attach(&dev->mt76);
+
+	mt76_clear(dev, MT_WPDMA_GLO_CFG,
+		   MT_WPDMA_GLO_CFG_TX_DMA_EN |
+		   MT_WPDMA_GLO_CFG_RX_DMA_EN |
+		   MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
+		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+
+	mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+	mt7603_pse_client_reset(dev);
+
+	for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+		ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[i],
+					   wmm_queue_map[i],
+					   MT_TX_RING_SIZE);
+		if (ret)
+			return ret;
+	}
+
+	ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+				   MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+				   MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_BEACON],
+				   MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_CAB],
+				   MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+				   MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
+				   MT7603_RX_RING_SIZE, MT_RX_BUF_SIZE);
+	if (ret)
+		return ret;
+
+	mt76_wr(dev, MT_DELAY_INT_CFG, 0);
+	ret = mt76_init_queues(dev);
+	if (ret)
+		return ret;
+
+	netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+			  mt7603_poll_tx, NAPI_POLL_WEIGHT);
+	napi_enable(&dev->mt76.tx_napi);
+
+	return 0;
+}
+
+void mt7603_dma_cleanup(struct mt7603_dev *dev)
+{
+	mt76_clear(dev, MT_WPDMA_GLO_CFG,
+		   MT_WPDMA_GLO_CFG_TX_DMA_EN |
+		   MT_WPDMA_GLO_CFG_RX_DMA_EN |
+		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+
+	tasklet_kill(&dev->mt76.tx_tasklet);
+	mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
new file mode 100644
index 0000000..2b6a4d8
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7603.h"
+#include "eeprom.h"
+
+static int
+mt7603_efuse_read(struct mt7603_dev *dev, u32 base, u16 addr, u8 *data)
+{
+	u32 val;
+	int i;
+
+	val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+	val &= ~(MT_EFUSE_CTRL_AIN |
+		 MT_EFUSE_CTRL_MODE);
+	val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+	val |= MT_EFUSE_CTRL_KICK;
+	mt76_wr(dev, base + MT_EFUSE_CTRL, val);
+
+	if (!mt76_poll(dev, base + MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+		return -ETIMEDOUT;
+
+	udelay(2);
+
+	val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+	if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT ||
+	    WARN_ON_ONCE(!(val & MT_EFUSE_CTRL_VALID))) {
+		memset(data, 0xff, 16);
+		return 0;
+	}
+
+	for (i = 0; i < 4; i++) {
+		val = mt76_rr(dev, base + MT_EFUSE_RDATA(i));
+		put_unaligned_le32(val, data + 4 * i);
+	}
+
+	return 0;
+}
+
+static int
+mt7603_efuse_init(struct mt7603_dev *dev)
+{
+	u32 base = mt7603_reg_map(dev, MT_EFUSE_BASE);
+	int len = MT7603_EEPROM_SIZE;
+	void *buf;
+	int ret, i;
+
+	if (mt76_rr(dev, base + MT_EFUSE_BASE_CTRL) & MT_EFUSE_BASE_CTRL_EMPTY)
+		return 0;
+
+	dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
+	dev->mt76.otp.size = len;
+	if (!dev->mt76.otp.data)
+		return -ENOMEM;
+
+	buf = dev->mt76.otp.data;
+	for (i = 0; i + 16 <= len; i += 16) {
+		ret = mt7603_efuse_read(dev, base, i, buf + i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static bool
+mt7603_has_cal_free_data(struct mt7603_dev *dev, u8 *efuse)
+{
+	if (!efuse[MT_EE_TEMP_SENSOR_CAL])
+		return false;
+
+	if (get_unaligned_le16(efuse + MT_EE_TX_POWER_0_START_2G) == 0)
+		return false;
+
+	if (get_unaligned_le16(efuse + MT_EE_TX_POWER_1_START_2G) == 0)
+		return false;
+
+	if (!efuse[MT_EE_CP_FT_VERSION])
+		return false;
+
+	if (!efuse[MT_EE_XTAL_FREQ_OFFSET])
+		return false;
+
+	if (!efuse[MT_EE_XTAL_WF_RFCAL])
+		return false;
+
+	return true;
+}
+
+static void
+mt7603_apply_cal_free_data(struct mt7603_dev *dev, u8 *efuse)
+{
+	static const u8 cal_free_bytes[] = {
+		MT_EE_TEMP_SENSOR_CAL,
+		MT_EE_CP_FT_VERSION,
+		MT_EE_XTAL_FREQ_OFFSET,
+		MT_EE_XTAL_WF_RFCAL,
+		/* Skip for MT7628 */
+		MT_EE_TX_POWER_0_START_2G,
+		MT_EE_TX_POWER_0_START_2G + 1,
+		MT_EE_TX_POWER_1_START_2G,
+		MT_EE_TX_POWER_1_START_2G + 1,
+	};
+	u8 *eeprom = dev->mt76.eeprom.data;
+	int n = ARRAY_SIZE(cal_free_bytes);
+	int i;
+
+	if (!mt7603_has_cal_free_data(dev, efuse))
+		return;
+
+	if (is_mt7628(dev))
+		n -= 4;
+
+	for (i = 0; i < n; i++) {
+		int offset = cal_free_bytes[i];
+
+		eeprom[offset] = efuse[offset];
+	}
+}
+
+static int
+mt7603_eeprom_load(struct mt7603_dev *dev)
+{
+	int ret;
+
+	ret = mt76_eeprom_init(&dev->mt76, MT7603_EEPROM_SIZE);
+	if (ret < 0)
+		return ret;
+
+	return mt7603_efuse_init(dev);
+}
+
+static int mt7603_check_eeprom(struct mt76_dev *dev)
+{
+	u16 val = get_unaligned_le16(dev->eeprom.data);
+
+	switch (val) {
+	case 0x7628:
+	case 0x7603:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+int mt7603_eeprom_init(struct mt7603_dev *dev)
+{
+	int ret;
+
+	ret = mt7603_eeprom_load(dev);
+	if (ret < 0)
+		return ret;
+
+	if (dev->mt76.otp.data) {
+		if (mt7603_check_eeprom(&dev->mt76) == 0)
+			mt7603_apply_cal_free_data(dev, dev->mt76.otp.data);
+		else
+			memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data,
+			       MT7603_EEPROM_SIZE);
+	}
+
+	dev->mt76.cap.has_2ghz = true;
+	memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+	       ETH_ALEN);
+
+	mt76_eeprom_override(&dev->mt76);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
new file mode 100644
index 0000000..b893fac
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_EEPROM_H
+#define __MT7603_EEPROM_H
+
+#include "mt7603.h"
+
+enum mt7603_eeprom_field {
+	MT_EE_CHIP_ID =				0x000,
+	MT_EE_VERSION =				0x002,
+	MT_EE_MAC_ADDR =			0x004,
+	MT_EE_NIC_CONF_0 =			0x034,
+	MT_EE_NIC_CONF_1 =			0x036,
+	MT_EE_NIC_CONF_2 =			0x042,
+
+	MT_EE_XTAL_TRIM_1 =			0x03a,
+
+	MT_EE_RSSI_OFFSET_2G =			0x046,
+	MT_EE_WIFI_RF_SETTING =			0x048,
+	MT_EE_RSSI_OFFSET_5G =			0x04a,
+
+	MT_EE_TX_POWER_DELTA_BW40 =		0x050,
+	MT_EE_TX_POWER_DELTA_BW80 =		0x052,
+
+	MT_EE_TX_POWER_EXT_PA_5G =		0x054,
+
+	MT_EE_TEMP_SENSOR_CAL =			0x055,
+
+	MT_EE_TX_POWER_0_START_2G =		0x056,
+	MT_EE_TX_POWER_1_START_2G =		0x05c,
+
+	/* used as byte arrays */
+#define MT_TX_POWER_GROUP_SIZE_5G		5
+#define MT_TX_POWER_GROUPS_5G			6
+	MT_EE_TX_POWER_0_START_5G =		0x062,
+
+	MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA =	0x074,
+	MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE =	0x076,
+
+	MT_EE_TX_POWER_1_START_5G =		0x080,
+
+	MT_EE_TX_POWER_CCK =			0x0a0,
+	MT_EE_TX_POWER_OFDM_2G_6M =		0x0a2,
+	MT_EE_TX_POWER_OFDM_2G_24M =		0x0a4,
+	MT_EE_TX_POWER_OFDM_2G_54M =		0x0a6,
+	MT_EE_TX_POWER_HT_BPSK_QPSK =		0x0a8,
+	MT_EE_TX_POWER_HT_16_64_QAM =		0x0aa,
+	MT_EE_TX_POWER_HT_64_QAM =		0x0ac,
+
+	MT_EE_ELAN_RX_MODE_GAIN =		0x0c0,
+	MT_EE_ELAN_RX_MODE_NF =			0x0c1,
+	MT_EE_ELAN_RX_MODE_P1DB =		0x0c2,
+
+	MT_EE_ELAN_BYPASS_MODE_GAIN =		0x0c3,
+	MT_EE_ELAN_BYPASS_MODE_NF =		0x0c4,
+	MT_EE_ELAN_BYPASS_MODE_P1DB =		0x0c5,
+
+	MT_EE_STEP_NUM_NEG_6_7 =		0x0c6,
+	MT_EE_STEP_NUM_NEG_4_5 =		0x0c8,
+	MT_EE_STEP_NUM_NEG_2_3 =		0x0ca,
+	MT_EE_STEP_NUM_NEG_0_1 =		0x0cc,
+
+	MT_EE_REF_STEP_24G =			0x0ce,
+
+	MT_EE_STEP_NUM_PLUS_1_2 =		0x0d0,
+	MT_EE_STEP_NUM_PLUS_3_4 =		0x0d2,
+	MT_EE_STEP_NUM_PLUS_5_6 =		0x0d4,
+	MT_EE_STEP_NUM_PLUS_7 =			0x0d6,
+
+	MT_EE_CP_FT_VERSION =			0x0f0,
+
+	MT_EE_TX_POWER_TSSI_OFF =		0x0f2,
+
+	MT_EE_XTAL_FREQ_OFFSET =		0x0f4,
+	MT_EE_XTAL_TRIM_2_COMP =		0x0f5,
+	MT_EE_XTAL_TRIM_3_COMP =		0x0f6,
+	MT_EE_XTAL_WF_RFCAL =			0x0f7,
+
+	__MT_EE_MAX
+};
+
+enum mt7603_eeprom_source {
+	MT_EE_SRC_PROM,
+	MT_EE_SRC_EFUSE,
+	MT_EE_SRC_FLASH,
+};
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/init.c
new file mode 100644
index 0000000..ad2ccdb
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -0,0 +1,594 @@
+// SPDX-License-Identifier: ISC
+
+#include <linux/etherdevice.h>
+#include "mt7603.h"
+#include "mac.h"
+#include "eeprom.h"
+
+const struct mt76_driver_ops mt7603_drv_ops = {
+	.txwi_size = MT_TXD_SIZE,
+	.tx_prepare_skb = mt7603_tx_prepare_skb,
+	.tx_complete_skb = mt7603_tx_complete_skb,
+	.rx_skb = mt7603_queue_rx_skb,
+	.rx_poll_complete = mt7603_rx_poll_complete,
+	.sta_ps = mt7603_sta_ps,
+	.sta_add = mt7603_sta_add,
+	.sta_assoc = mt7603_sta_assoc,
+	.sta_remove = mt7603_sta_remove,
+	.update_survey = mt7603_update_channel,
+};
+
+static void
+mt7603_set_tmac_template(struct mt7603_dev *dev)
+{
+	u32 desc[5] = {
+		[1] = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 0xf),
+		[3] = MT_TXD5_SW_POWER_MGMT
+	};
+	u32 addr;
+	int i;
+
+	addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR);
+	addr += MT_CLIENT_TMAC_INFO_TEMPLATE;
+	for (i = 0; i < ARRAY_SIZE(desc); i++)
+		mt76_wr(dev, addr + 4 * i, desc[i]);
+}
+
+static void
+mt7603_dma_sched_init(struct mt7603_dev *dev)
+{
+	int page_size = 128;
+	int page_count;
+	int max_len = 1792;
+	int max_amsdu_pages = 4096 / page_size;
+	int max_mcu_len = 4096;
+	int max_beacon_len = 512 * 4 + max_len;
+	int max_mcast_pages = 4 * max_len / page_size;
+	int reserved_count = 0;
+	int beacon_pages;
+	int mcu_pages;
+	int i;
+
+	page_count = mt76_get_field(dev, MT_PSE_FC_P0,
+				    MT_PSE_FC_P0_MAX_QUOTA);
+	beacon_pages = 4 * (max_beacon_len / page_size);
+	mcu_pages = max_mcu_len / page_size;
+
+	mt76_wr(dev, MT_PSE_FRP,
+		FIELD_PREP(MT_PSE_FRP_P0, 7) |
+		FIELD_PREP(MT_PSE_FRP_P1, 6) |
+		FIELD_PREP(MT_PSE_FRP_P2_RQ2, 4));
+
+	mt76_wr(dev, MT_HIGH_PRIORITY_1, 0x55555553);
+	mt76_wr(dev, MT_HIGH_PRIORITY_2, 0x78555555);
+
+	mt76_wr(dev, MT_QUEUE_PRIORITY_1, 0x2b1a096e);
+	mt76_wr(dev, MT_QUEUE_PRIORITY_2, 0x785f4d3c);
+
+	mt76_wr(dev, MT_PRIORITY_MASK, 0xffffffff);
+
+	mt76_wr(dev, MT_SCH_1, page_count | (2 << 28));
+	mt76_wr(dev, MT_SCH_2, max_amsdu_pages);
+
+	for (i = 0; i <= 4; i++)
+		mt76_wr(dev, MT_PAGE_COUNT(i), max_amsdu_pages);
+	reserved_count += 5 * max_amsdu_pages;
+
+	mt76_wr(dev, MT_PAGE_COUNT(5), mcu_pages);
+	reserved_count += mcu_pages;
+
+	mt76_wr(dev, MT_PAGE_COUNT(7), beacon_pages);
+	reserved_count += beacon_pages;
+
+	mt76_wr(dev, MT_PAGE_COUNT(8), max_mcast_pages);
+	reserved_count += max_mcast_pages;
+
+	if (is_mt7603(dev))
+		reserved_count = 0;
+
+	mt76_wr(dev, MT_RSV_MAX_THRESH, page_count - reserved_count);
+
+	if (is_mt7603(dev) && mt76xx_rev(dev) >= MT7603_REV_E2) {
+		mt76_wr(dev, MT_GROUP_THRESH(0),
+			page_count - beacon_pages - mcu_pages);
+		mt76_wr(dev, MT_GROUP_THRESH(1), beacon_pages);
+		mt76_wr(dev, MT_BMAP_0, 0x0080ff5f);
+		mt76_wr(dev, MT_GROUP_THRESH(2), mcu_pages);
+		mt76_wr(dev, MT_BMAP_1, 0x00000020);
+	} else {
+		mt76_wr(dev, MT_GROUP_THRESH(0), page_count);
+		mt76_wr(dev, MT_BMAP_0, 0xffff);
+	}
+
+	mt76_wr(dev, MT_SCH_4, 0);
+
+	for (i = 0; i <= 15; i++)
+		mt76_wr(dev, MT_TXTIME_THRESH(i), 0xfffff);
+
+	mt76_set(dev, MT_SCH_4, BIT(6));
+}
+
+static void
+mt7603_phy_init(struct mt7603_dev *dev)
+{
+	int rx_chains = dev->mt76.antenna_mask;
+	int tx_chains = hweight8(rx_chains) - 1;
+
+	mt76_rmw(dev, MT_WF_RMAC_RMCR,
+		 (MT_WF_RMAC_RMCR_SMPS_MODE |
+		  MT_WF_RMAC_RMCR_RX_STREAMS),
+		 (FIELD_PREP(MT_WF_RMAC_RMCR_SMPS_MODE, 3) |
+		  FIELD_PREP(MT_WF_RMAC_RMCR_RX_STREAMS, rx_chains)));
+
+	mt76_rmw_field(dev, MT_TMAC_TCR, MT_TMAC_TCR_TX_STREAMS,
+		       tx_chains);
+
+	dev->agc0 = mt76_rr(dev, MT_AGC(0));
+	dev->agc3 = mt76_rr(dev, MT_AGC(3));
+}
+
+static void
+mt7603_mac_init(struct mt7603_dev *dev)
+{
+	u8 bc_addr[ETH_ALEN];
+	u32 addr;
+	int i;
+
+	mt76_wr(dev, MT_AGG_BA_SIZE_LIMIT_0,
+		(MT_AGG_SIZE_LIMIT(0) << 0 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+		(MT_AGG_SIZE_LIMIT(1) << 1 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+		(MT_AGG_SIZE_LIMIT(2) << 2 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+		(MT_AGG_SIZE_LIMIT(3) << 3 * MT_AGG_BA_SIZE_LIMIT_SHIFT));
+
+	mt76_wr(dev, MT_AGG_BA_SIZE_LIMIT_1,
+		(MT_AGG_SIZE_LIMIT(4) << 0 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+		(MT_AGG_SIZE_LIMIT(5) << 1 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+		(MT_AGG_SIZE_LIMIT(6) << 2 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+		(MT_AGG_SIZE_LIMIT(7) << 3 * MT_AGG_BA_SIZE_LIMIT_SHIFT));
+
+	mt76_wr(dev, MT_AGG_LIMIT,
+		FIELD_PREP(MT_AGG_LIMIT_AC(0), 24) |
+		FIELD_PREP(MT_AGG_LIMIT_AC(1), 24) |
+		FIELD_PREP(MT_AGG_LIMIT_AC(2), 24) |
+		FIELD_PREP(MT_AGG_LIMIT_AC(3), 24));
+
+	mt76_wr(dev, MT_AGG_LIMIT_1,
+		FIELD_PREP(MT_AGG_LIMIT_AC(0), 24) |
+		FIELD_PREP(MT_AGG_LIMIT_AC(1), 24) |
+		FIELD_PREP(MT_AGG_LIMIT_AC(2), 24) |
+		FIELD_PREP(MT_AGG_LIMIT_AC(3), 24));
+
+	mt76_wr(dev, MT_AGG_CONTROL,
+		FIELD_PREP(MT_AGG_CONTROL_BAR_RATE, 0x4b) |
+		FIELD_PREP(MT_AGG_CONTROL_CFEND_RATE, 0x69) |
+		MT_AGG_CONTROL_NO_BA_AR_RULE);
+
+	mt76_wr(dev, MT_AGG_RETRY_CONTROL,
+		FIELD_PREP(MT_AGG_RETRY_CONTROL_BAR_LIMIT, 1) |
+		FIELD_PREP(MT_AGG_RETRY_CONTROL_RTS_LIMIT, 15));
+
+	mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
+		FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 4096));
+
+	mt76_rmw(dev, MT_DMA_VCFR0, BIT(0), BIT(13));
+	mt76_rmw(dev, MT_DMA_TMCFR0, BIT(0) | BIT(1), BIT(13));
+
+	mt76_clear(dev, MT_WF_RMAC_TMR_PA, BIT(31));
+
+	mt76_set(dev, MT_WF_RMACDR, MT_WF_RMACDR_MAXLEN_20BIT);
+	mt76_rmw(dev, MT_WF_RMAC_MAXMINLEN, 0xffffff, 0x19000);
+
+	mt76_wr(dev, MT_WF_RFCR1, 0);
+
+	mt76_set(dev, MT_TMAC_TCR, MT_TMAC_TCR_RX_RIFS_MODE);
+
+	mt7603_set_tmac_template(dev);
+
+	/* Enable RX group to HIF */
+	addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR);
+	mt76_set(dev, addr + MT_CLIENT_RXINF, MT_CLIENT_RXINF_RXSH_GROUPS);
+
+	/* Enable RX group to MCU */
+	mt76_set(dev, MT_DMA_DCR1, GENMASK(13, 11));
+
+	mt76_rmw_field(dev, MT_AGG_PCR_RTS, MT_AGG_PCR_RTS_PKT_THR, 3);
+	mt76_set(dev, MT_TMAC_PCR, MT_TMAC_PCR_SPE_EN);
+
+	/* include preamble detection in CCA trigger signal */
+	mt76_rmw_field(dev, MT_TXREQ, MT_TXREQ_CCA_SRC_SEL, 2);
+
+	mt76_wr(dev, MT_RXREQ, 4);
+
+	/* Configure all rx packets to HIF */
+	mt76_wr(dev, MT_DMA_RCFR0, 0xc0000000);
+
+	/* Configure MCU txs selection with aggregation */
+	mt76_wr(dev, MT_DMA_TCFR0,
+		FIELD_PREP(MT_DMA_TCFR_TXS_AGGR_TIMEOUT, 1) | /* 32 us */
+		MT_DMA_TCFR_TXS_AGGR_COUNT);
+
+	/* Configure HIF txs selection with aggregation */
+	mt76_wr(dev, MT_DMA_TCFR1,
+		FIELD_PREP(MT_DMA_TCFR_TXS_AGGR_TIMEOUT, 1) | /* 32 us */
+		MT_DMA_TCFR_TXS_AGGR_COUNT | /* Maximum count */
+		MT_DMA_TCFR_TXS_BIT_MAP);
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_1, MT_PSE_WTBL_2_PHYS_ADDR);
+
+	for (i = 0; i < MT7603_WTBL_SIZE; i++)
+		mt7603_wtbl_clear(dev, i);
+
+	eth_broadcast_addr(bc_addr);
+	mt7603_wtbl_init(dev, MT7603_WTBL_RESERVED, -1, bc_addr);
+	dev->global_sta.wcid.idx = MT7603_WTBL_RESERVED;
+	rcu_assign_pointer(dev->mt76.wcid[MT7603_WTBL_RESERVED],
+			   &dev->global_sta.wcid);
+
+	mt76_rmw_field(dev, MT_LPON_BTEIR, MT_LPON_BTEIR_MBSS_MODE, 2);
+	mt76_rmw_field(dev, MT_WF_RMACDR, MT_WF_RMACDR_MBSSID_MASK, 2);
+
+	mt76_wr(dev, MT_AGG_ARUCR,
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), 2) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), 2) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), 2) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), 1));
+
+	mt76_wr(dev, MT_AGG_ARDCR,
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), MT7603_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), MT7603_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7603_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7603_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7603_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7603_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7603_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7603_RATE_RETRY - 1));
+
+	mt76_wr(dev, MT_AGG_ARCR,
+		(FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
+		 MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
+		 FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
+		 FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
+
+	mt76_set(dev, MT_WTBL_RMVTCR, MT_WTBL_RMVTCR_RX_MV_MODE);
+
+	mt76_clear(dev, MT_SEC_SCR, MT_SEC_SCR_MASK_ORDER);
+	mt76_clear(dev, MT_SEC_SCR, BIT(18));
+
+	/* Set secondary beacon time offsets */
+	for (i = 0; i <= 4; i++)
+		mt76_rmw_field(dev, MT_LPON_SBTOR(i), MT_LPON_SBTOR_TIME_OFFSET,
+			       (i + 1) * (20 + 4096));
+}
+
+static int
+mt7603_init_hardware(struct mt7603_dev *dev)
+{
+	int i, ret;
+
+	mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+
+	ret = mt7603_eeprom_init(dev);
+	if (ret < 0)
+		return ret;
+
+	ret = mt7603_dma_init(dev);
+	if (ret)
+		return ret;
+
+	mt76_wr(dev, MT_WPDMA_GLO_CFG, 0x52000850);
+	mt7603_mac_dma_start(dev);
+	dev->rxfilter = mt76_rr(dev, MT_WF_RFCR);
+	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+	for (i = 0; i < MT7603_WTBL_SIZE; i++) {
+		mt76_wr(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY | MT_PSE_RTA_WRITE |
+			FIELD_PREP(MT_PSE_RTA_TAG_ID, i));
+		mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000);
+	}
+
+	ret = mt7603_mcu_init(dev);
+	if (ret)
+		return ret;
+
+	mt7603_dma_sched_init(dev);
+	mt7603_mcu_set_eeprom(dev);
+	mt7603_phy_init(dev);
+	mt7603_mac_init(dev);
+
+	return 0;
+}
+
+#define CCK_RATE(_idx, _rate) {					\
+	.bitrate = _rate,					\
+	.flags = IEEE80211_RATE_SHORT_PREAMBLE,			\
+	.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),		\
+	.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx),	\
+}
+
+#define OFDM_RATE(_idx, _rate) {				\
+	.bitrate = _rate,					\
+	.hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),		\
+	.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),	\
+}
+
+static struct ieee80211_rate mt7603_rates[] = {
+	CCK_RATE(0, 10),
+	CCK_RATE(1, 20),
+	CCK_RATE(2, 55),
+	CCK_RATE(3, 110),
+	OFDM_RATE(11, 60),
+	OFDM_RATE(15, 90),
+	OFDM_RATE(10, 120),
+	OFDM_RATE(14, 180),
+	OFDM_RATE(9,  240),
+	OFDM_RATE(13, 360),
+	OFDM_RATE(8,  480),
+	OFDM_RATE(12, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_ADHOC)
+	}, {
+		.max = MT7603_MAX_INTERFACES,
+		.types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+			 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+			 BIT(NL80211_IFTYPE_AP)
+	 },
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+	{
+		.limits = if_limits,
+		.n_limits = ARRAY_SIZE(if_limits),
+		.max_interfaces = 4,
+		.num_different_channels = 1,
+		.beacon_int_infra_match = true,
+	}
+};
+
+static void mt7603_led_set_config(struct mt76_dev *mt76, u8 delay_on,
+				  u8 delay_off)
+{
+	struct mt7603_dev *dev = container_of(mt76, struct mt7603_dev,
+					      mt76);
+	u32 val, addr;
+
+	val = MT_LED_STATUS_DURATION(0xffff) |
+	      MT_LED_STATUS_OFF(delay_off) |
+	      MT_LED_STATUS_ON(delay_on);
+
+	addr = mt7603_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
+	mt76_wr(dev, addr, val);
+	addr = mt7603_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
+	mt76_wr(dev, addr, val);
+
+	val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+	      MT_LED_CTRL_KICK(mt76->led_pin);
+	if (mt76->led_al)
+		val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+	addr = mt7603_reg_map(dev, MT_LED_CTRL);
+	mt76_wr(dev, addr, val);
+}
+
+static int mt7603_led_set_blink(struct led_classdev *led_cdev,
+				unsigned long *delay_on,
+				unsigned long *delay_off)
+{
+	struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+					     led_cdev);
+	u8 delta_on, delta_off;
+
+	delta_off = max_t(u8, *delay_off / 10, 1);
+	delta_on = max_t(u8, *delay_on / 10, 1);
+
+	mt7603_led_set_config(mt76, delta_on, delta_off);
+	return 0;
+}
+
+static void mt7603_led_set_brightness(struct led_classdev *led_cdev,
+				      enum led_brightness brightness)
+{
+	struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+					     led_cdev);
+
+	if (!brightness)
+		mt7603_led_set_config(mt76, 0, 0xff);
+	else
+		mt7603_led_set_config(mt76, 0xff, 0);
+}
+
+static u32 __mt7603_reg_addr(struct mt7603_dev *dev, u32 addr)
+{
+	if (addr < 0x100000)
+		return addr;
+
+	return mt7603_reg_map(dev, addr);
+}
+
+static u32 mt7603_rr(struct mt76_dev *mdev, u32 offset)
+{
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+	u32 addr = __mt7603_reg_addr(dev, offset);
+
+	return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7603_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+	u32 addr = __mt7603_reg_addr(dev, offset);
+
+	dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7603_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+	u32 addr = __mt7603_reg_addr(dev, offset);
+
+	return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
+static void
+mt7603_regd_notifier(struct wiphy *wiphy,
+		     struct regulatory_request *request)
+{
+	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+	struct mt7603_dev *dev = hw->priv;
+
+	dev->mt76.region = request->dfs_region;
+	dev->ed_monitor = dev->ed_monitor_enabled &&
+			  dev->mt76.region == NL80211_DFS_ETSI;
+}
+
+static int
+mt7603_txpower_signed(int val)
+{
+	bool sign = val & BIT(6);
+
+	if (!(val & BIT(7)))
+		return 0;
+
+	val &= GENMASK(5, 0);
+	if (!sign)
+		val = -val;
+
+	return val;
+}
+
+static void
+mt7603_init_txpower(struct mt7603_dev *dev,
+		    struct ieee80211_supported_band *sband)
+{
+	struct ieee80211_channel *chan;
+	u8 *eeprom = (u8 *)dev->mt76.eeprom.data;
+	int target_power = eeprom[MT_EE_TX_POWER_0_START_2G + 2] & ~BIT(7);
+	u8 *rate_power = &eeprom[MT_EE_TX_POWER_CCK];
+	bool ext_pa = eeprom[MT_EE_NIC_CONF_0 + 1] & BIT(1);
+	int max_offset, cur_offset;
+	int i;
+
+	if (ext_pa && is_mt7603(dev))
+		target_power = eeprom[MT_EE_TX_POWER_TSSI_OFF] & ~BIT(7);
+
+	if (target_power & BIT(6))
+		target_power = -(target_power & GENMASK(5, 0));
+
+	max_offset = 0;
+	for (i = 0; i < 14; i++) {
+		cur_offset = mt7603_txpower_signed(rate_power[i]);
+		max_offset = max(max_offset, cur_offset);
+	}
+
+	target_power += max_offset;
+
+	dev->tx_power_limit = target_power;
+	dev->mt76.txpower_cur = target_power;
+
+	target_power = DIV_ROUND_UP(target_power, 2);
+
+	/* add 3 dBm for 2SS devices (combined output) */
+	if (dev->mt76.antenna_mask & BIT(1))
+		target_power += 3;
+
+	for (i = 0; i < sband->n_channels; i++) {
+		chan = &sband->channels[i];
+		chan->max_power = min_t(int, chan->max_reg_power, target_power);
+		chan->orig_mpwr = target_power;
+	}
+}
+
+int mt7603_register_device(struct mt7603_dev *dev)
+{
+	struct mt76_bus_ops *bus_ops;
+	struct ieee80211_hw *hw = mt76_hw(dev);
+	struct wiphy *wiphy = hw->wiphy;
+	int ret;
+
+	dev->bus_ops = dev->mt76.bus;
+	bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+			       GFP_KERNEL);
+	if (!bus_ops)
+		return -ENOMEM;
+
+	bus_ops->rr = mt7603_rr;
+	bus_ops->wr = mt7603_wr;
+	bus_ops->rmw = mt7603_rmw;
+	dev->mt76.bus = bus_ops;
+
+	spin_lock_init(&dev->ps_lock);
+
+	INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7603_mac_work);
+	tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
+		     (unsigned long)dev);
+
+	/* Check for 7688, which only has 1SS */
+	dev->mt76.antenna_mask = 3;
+	if (mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4))
+		dev->mt76.antenna_mask = 1;
+
+	dev->slottime = 9;
+
+	ret = mt7603_init_hardware(dev);
+	if (ret)
+		return ret;
+
+	hw->queues = 4;
+	hw->max_rates = 3;
+	hw->max_report_rates = 7;
+	hw->max_rate_tries = 11;
+
+	hw->sta_data_size = sizeof(struct mt7603_sta);
+	hw->vif_data_size = sizeof(struct mt7603_vif);
+
+	wiphy->iface_combinations = if_comb;
+	wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+
+	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+	ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
+
+	/* init led callbacks */
+	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+		dev->mt76.led_cdev.brightness_set = mt7603_led_set_brightness;
+		dev->mt76.led_cdev.blink_set = mt7603_led_set_blink;
+	}
+
+	wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+		BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+		BIT(NL80211_IFTYPE_ADHOC);
+
+	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+	wiphy->reg_notifier = mt7603_regd_notifier;
+
+	ret = mt76_register_device(&dev->mt76, true, mt7603_rates,
+				   ARRAY_SIZE(mt7603_rates));
+	if (ret)
+		return ret;
+
+	mt7603_init_debugfs(dev);
+	mt7603_init_txpower(dev, &dev->mt76.sband_2g.sband);
+
+	return 0;
+}
+
+void mt7603_unregister_device(struct mt7603_dev *dev)
+{
+	tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+	mt76_unregister_device(&dev->mt76);
+	mt7603_mcu_exit(dev);
+	mt7603_dma_cleanup(dev);
+	mt76_free_device(&dev->mt76);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
new file mode 100644
index 0000000..5cfea0d
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -0,0 +1,1744 @@
+// SPDX-License-Identifier: ISC
+
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+#include "mt7603.h"
+#include "mac.h"
+
+#define MT_PSE_PAGE_SIZE	128
+
+static u32
+mt7603_ac_queue_mask0(u32 mask)
+{
+	u32 ret = 0;
+
+	ret |= GENMASK(3, 0) * !!(mask & BIT(0));
+	ret |= GENMASK(8, 5) * !!(mask & BIT(1));
+	ret |= GENMASK(13, 10) * !!(mask & BIT(2));
+	ret |= GENMASK(19, 16) * !!(mask & BIT(3));
+	return ret;
+}
+
+static void
+mt76_stop_tx_ac(struct mt7603_dev *dev, u32 mask)
+{
+	mt76_set(dev, MT_WF_ARB_TX_STOP_0, mt7603_ac_queue_mask0(mask));
+}
+
+static void
+mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask)
+{
+	mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask));
+}
+
+void mt7603_mac_set_timing(struct mt7603_dev *dev)
+{
+	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
+		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24);
+	int offset = 3 * dev->coverage_class;
+	u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+			 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+	int sifs;
+	u32 val;
+
+	if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
+		sifs = 16;
+	else
+		sifs = 10;
+
+	mt76_set(dev, MT_ARB_SCR,
+		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+	udelay(1);
+
+	mt76_wr(dev, MT_TIMEOUT_CCK, cck + reg_offset);
+	mt76_wr(dev, MT_TIMEOUT_OFDM, ofdm + reg_offset);
+	mt76_wr(dev, MT_IFS,
+		FIELD_PREP(MT_IFS_EIFS, 360) |
+		FIELD_PREP(MT_IFS_RIFS, 2) |
+		FIELD_PREP(MT_IFS_SIFS, sifs) |
+		FIELD_PREP(MT_IFS_SLOT, dev->slottime));
+
+	if (dev->slottime < 20)
+		val = MT7603_CFEND_RATE_DEFAULT;
+	else
+		val = MT7603_CFEND_RATE_11B;
+
+	mt76_rmw_field(dev, MT_AGG_CONTROL, MT_AGG_CONTROL_CFEND_RATE, val);
+
+	mt76_clear(dev, MT_ARB_SCR,
+		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+}
+
+static void
+mt7603_wtbl_update(struct mt7603_dev *dev, int idx, u32 mask)
+{
+	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
+		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
+
+	mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+}
+
+static u32
+mt7603_wtbl1_addr(int idx)
+{
+	return MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
+}
+
+static u32
+mt7603_wtbl2_addr(int idx)
+{
+	/* Mapped to WTBL2 */
+	return MT_PCIE_REMAP_BASE_1 + idx * MT_WTBL2_SIZE;
+}
+
+static u32
+mt7603_wtbl3_addr(int idx)
+{
+	u32 base = mt7603_wtbl2_addr(MT7603_WTBL_SIZE);
+
+	return base + idx * MT_WTBL3_SIZE;
+}
+
+static u32
+mt7603_wtbl4_addr(int idx)
+{
+	u32 base = mt7603_wtbl3_addr(MT7603_WTBL_SIZE);
+
+	return base + idx * MT_WTBL4_SIZE;
+}
+
+void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif,
+		      const u8 *mac_addr)
+{
+	const void *_mac = mac_addr;
+	u32 addr = mt7603_wtbl1_addr(idx);
+	u32 w0 = 0, w1 = 0;
+	int i;
+
+	if (_mac) {
+		w0 = FIELD_PREP(MT_WTBL1_W0_ADDR_HI,
+				get_unaligned_le16(_mac + 4));
+		w1 = FIELD_PREP(MT_WTBL1_W1_ADDR_LO,
+				get_unaligned_le32(_mac));
+	}
+
+	if (vif < 0)
+		vif = 0;
+	else
+		w0 |= MT_WTBL1_W0_RX_CHECK_A1;
+	w0 |= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX, vif);
+
+	mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+	mt76_set(dev, addr + 0 * 4, w0);
+	mt76_set(dev, addr + 1 * 4, w1);
+	mt76_set(dev, addr + 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL);
+
+	mt76_stop_tx_ac(dev, GENMASK(3, 0));
+	addr = mt7603_wtbl2_addr(idx);
+	for (i = 0; i < MT_WTBL2_SIZE; i += 4)
+		mt76_wr(dev, addr + i, 0);
+	mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
+	mt76_start_tx_ac(dev, GENMASK(3, 0));
+
+	addr = mt7603_wtbl3_addr(idx);
+	for (i = 0; i < MT_WTBL3_SIZE; i += 4)
+		mt76_wr(dev, addr + i, 0);
+
+	addr = mt7603_wtbl4_addr(idx);
+	for (i = 0; i < MT_WTBL4_SIZE; i += 4)
+		mt76_wr(dev, addr + i, 0);
+}
+
+static void
+mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled)
+{
+	u32 addr = mt7603_wtbl1_addr(idx);
+	u32 val = mt76_rr(dev, addr + 3 * 4);
+
+	val &= ~MT_WTBL1_W3_SKIP_TX;
+	val |= enabled * MT_WTBL1_W3_SKIP_TX;
+
+	mt76_wr(dev, addr + 3 * 4, val);
+}
+
+void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort)
+{
+	int i, port, queue;
+
+	if (abort) {
+		port = 3; /* PSE */
+		queue = 8; /* free queue */
+	} else {
+		port = 0; /* HIF */
+		queue = 1; /* MCU queue */
+	}
+
+	mt7603_wtbl_set_skip_tx(dev, idx, true);
+
+	mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN |
+			FIELD_PREP(MT_TX_ABORT_WCID, idx));
+
+	for (i = 0; i < 4; i++) {
+		mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
+			FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) |
+			FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, i) |
+			FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) |
+			FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue));
+
+		WARN_ON_ONCE(!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY,
+					0, 5000));
+	}
+
+	mt76_wr(dev, MT_TX_ABORT, 0);
+
+	mt7603_wtbl_set_skip_tx(dev, idx, false);
+}
+
+void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
+			  bool enabled)
+{
+	u32 addr = mt7603_wtbl1_addr(sta->wcid.idx);
+
+	if (sta->smps == enabled)
+		return;
+
+	mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_SMPS, enabled);
+	sta->smps = enabled;
+}
+
+void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta,
+			bool enabled)
+{
+	int idx = sta->wcid.idx;
+	u32 addr;
+
+	spin_lock_bh(&dev->ps_lock);
+
+	if (sta->ps == enabled)
+		goto out;
+
+	mt76_wr(dev, MT_PSE_RTA,
+		FIELD_PREP(MT_PSE_RTA_TAG_ID, idx) |
+		FIELD_PREP(MT_PSE_RTA_PORT_ID, 0) |
+		FIELD_PREP(MT_PSE_RTA_QUEUE_ID, 1) |
+		FIELD_PREP(MT_PSE_RTA_REDIRECT_EN, enabled) |
+		MT_PSE_RTA_WRITE | MT_PSE_RTA_BUSY);
+
+	mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000);
+
+	if (enabled)
+		mt7603_filter_tx(dev, idx, false);
+
+	addr = mt7603_wtbl1_addr(idx);
+	mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
+	mt76_rmw(dev, addr + 3 * 4, MT_WTBL1_W3_POWER_SAVE,
+		 enabled * MT_WTBL1_W3_POWER_SAVE);
+	mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
+	sta->ps = enabled;
+
+out:
+	spin_unlock_bh(&dev->ps_lock);
+}
+
+void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx)
+{
+	int wtbl2_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL2_SIZE;
+	int wtbl2_frame = idx / wtbl2_frame_size;
+	int wtbl2_entry = idx % wtbl2_frame_size;
+
+	int wtbl3_base_frame = MT_WTBL3_OFFSET / MT_PSE_PAGE_SIZE;
+	int wtbl3_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL3_SIZE;
+	int wtbl3_frame = wtbl3_base_frame + idx / wtbl3_frame_size;
+	int wtbl3_entry = (idx % wtbl3_frame_size) * 2;
+
+	int wtbl4_base_frame = MT_WTBL4_OFFSET / MT_PSE_PAGE_SIZE;
+	int wtbl4_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL4_SIZE;
+	int wtbl4_frame = wtbl4_base_frame + idx / wtbl4_frame_size;
+	int wtbl4_entry = idx % wtbl4_frame_size;
+
+	u32 addr = MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
+	int i;
+
+	mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+	mt76_wr(dev, addr + 0 * 4,
+		MT_WTBL1_W0_RX_CHECK_A1 |
+		MT_WTBL1_W0_RX_CHECK_A2 |
+		MT_WTBL1_W0_RX_VALID);
+	mt76_wr(dev, addr + 1 * 4, 0);
+	mt76_wr(dev, addr + 2 * 4, 0);
+
+	mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
+
+	mt76_wr(dev, addr + 3 * 4,
+		FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID, wtbl2_frame) |
+		FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID, wtbl2_entry) |
+		FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID, wtbl4_frame) |
+		MT_WTBL1_W3_I_PSM | MT_WTBL1_W3_KEEP_I_PSM);
+	mt76_wr(dev, addr + 4 * 4,
+		FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID, wtbl3_frame) |
+		FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID, wtbl3_entry) |
+		FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID, wtbl4_entry));
+
+	mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
+
+	addr = mt7603_wtbl2_addr(idx);
+
+	/* Clear BA information */
+	mt76_wr(dev, addr + (15 * 4), 0);
+
+	mt76_stop_tx_ac(dev, GENMASK(3, 0));
+	for (i = 2; i <= 4; i++)
+		mt76_wr(dev, addr + (i * 4), 0);
+	mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
+	mt76_start_tx_ac(dev, GENMASK(3, 0));
+
+	mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_RX_COUNT_CLEAR);
+	mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_TX_COUNT_CLEAR);
+	mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+}
+
+void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta)
+{
+	struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+	int idx = msta->wcid.idx;
+	u32 addr;
+	u32 val;
+
+	addr = mt7603_wtbl1_addr(idx);
+
+	val = mt76_rr(dev, addr + 2 * 4);
+	val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL;
+	val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) |
+	       FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) |
+	       MT_WTBL1_W2_TXS_BAF_REPORT;
+
+	if (sta->ht_cap.cap)
+		val |= MT_WTBL1_W2_HT;
+	if (sta->vht_cap.cap)
+		val |= MT_WTBL1_W2_VHT;
+
+	mt76_wr(dev, addr + 2 * 4, val);
+
+	addr = mt7603_wtbl2_addr(idx);
+	val = mt76_rr(dev, addr + 9 * 4);
+	val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
+		 MT_WTBL2_W9_SHORT_GI_80);
+	if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+		val |= MT_WTBL2_W9_SHORT_GI_20;
+	if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+		val |= MT_WTBL2_W9_SHORT_GI_40;
+	mt76_wr(dev, addr + 9 * 4, val);
+}
+
+void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
+{
+	mt76_wr(dev, MT_BA_CONTROL_0, get_unaligned_le32(addr));
+	mt76_wr(dev, MT_BA_CONTROL_1,
+		(get_unaligned_le16(addr + 4) |
+		 FIELD_PREP(MT_BA_CONTROL_1_TID, tid) |
+		 MT_BA_CONTROL_1_RESET));
+}
+
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
+			    int ba_size)
+{
+	u32 addr = mt7603_wtbl2_addr(wcid);
+	u32 tid_mask = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
+		       (MT_WTBL2_W15_BA_WIN_SIZE <<
+			(tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT));
+	u32 tid_val;
+	int i;
+
+	if (ba_size < 0) {
+		/* disable */
+		mt76_clear(dev, addr + (15 * 4), tid_mask);
+		return;
+	}
+
+	for (i = 7; i > 0; i--) {
+		if (ba_size >= MT_AGG_SIZE_LIMIT(i))
+			break;
+	}
+
+	tid_val = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
+		  i << (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT);
+
+	mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val);
+}
+
+static struct mt76_wcid *
+mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
+{
+	struct mt7603_sta *sta;
+	struct mt76_wcid *wcid;
+
+	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+		return NULL;
+
+	wcid = rcu_dereference(dev->mt76.wcid[idx]);
+	if (unicast || !wcid)
+		return wcid;
+
+	if (!wcid->sta)
+		return NULL;
+
+	sta = container_of(wcid, struct mt7603_sta, wcid);
+	if (!sta->vif)
+		return NULL;
+
+	return &sta->vif->sta.wcid;
+}
+
+int
+mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
+{
+	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_hdr *hdr;
+	__le32 *rxd = (__le32 *)skb->data;
+	u32 rxd0 = le32_to_cpu(rxd[0]);
+	u32 rxd1 = le32_to_cpu(rxd[1]);
+	u32 rxd2 = le32_to_cpu(rxd[2]);
+	bool unicast = rxd1 & MT_RXD1_NORMAL_U2M;
+	bool insert_ccmp_hdr = false;
+	bool remove_pad;
+	int idx;
+	int i;
+
+	memset(status, 0, sizeof(*status));
+
+	i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
+	sband = (i & 1) ? &dev->mt76.sband_5g.sband : &dev->mt76.sband_2g.sband;
+	i >>= 1;
+
+	idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
+	status->wcid = mt7603_rx_get_wcid(dev, idx, unicast);
+
+	status->band = sband->band;
+	if (i < sband->n_channels)
+		status->freq = sband->channels[i].center_freq;
+
+	if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
+		status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+	if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
+		status->flag |= RX_FLAG_MMIC_ERROR;
+
+	if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
+	    !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
+		status->flag |= RX_FLAG_DECRYPTED;
+		status->flag |= RX_FLAG_IV_STRIPPED;
+		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
+	}
+
+	remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
+
+	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
+		return -EINVAL;
+
+	if (!sband->channels)
+		return -EINVAL;
+
+	rxd += 4;
+	if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
+		rxd += 4;
+		if ((u8 *)rxd - skb->data >= skb->len)
+			return -EINVAL;
+	}
+	if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
+		u8 *data = (u8 *)rxd;
+
+		if (status->flag & RX_FLAG_DECRYPTED) {
+			status->iv[0] = data[5];
+			status->iv[1] = data[4];
+			status->iv[2] = data[3];
+			status->iv[3] = data[2];
+			status->iv[4] = data[1];
+			status->iv[5] = data[0];
+
+			insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+		}
+
+		rxd += 4;
+		if ((u8 *)rxd - skb->data >= skb->len)
+			return -EINVAL;
+	}
+	if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
+		rxd += 2;
+		if ((u8 *)rxd - skb->data >= skb->len)
+			return -EINVAL;
+	}
+	if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
+		u32 rxdg0 = le32_to_cpu(rxd[0]);
+		u32 rxdg3 = le32_to_cpu(rxd[3]);
+		bool cck = false;
+
+		i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
+		switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
+		case MT_PHY_TYPE_CCK:
+			cck = true;
+			/* fall through */
+		case MT_PHY_TYPE_OFDM:
+			i = mt76_get_rate(&dev->mt76, sband, i, cck);
+			break;
+		case MT_PHY_TYPE_HT_GF:
+		case MT_PHY_TYPE_HT:
+			status->encoding = RX_ENC_HT;
+			if (i > 15)
+				return -EINVAL;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (rxdg0 & MT_RXV1_HT_SHORT_GI)
+			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+		if (rxdg0 & MT_RXV1_HT_AD_CODE)
+			status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+		status->enc_flags |= RX_ENC_FLAG_STBC_MASK *
+				    FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
+
+		status->rate_idx = i;
+
+		status->chains = dev->mt76.antenna_mask;
+		status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) +
+					  dev->rssi_offset[0];
+		status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
+					  dev->rssi_offset[1];
+
+		status->signal = status->chain_signal[0];
+		if (status->chains & BIT(1))
+			status->signal = max(status->signal,
+					     status->chain_signal[1]);
+
+		if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1)
+			status->bw = RATE_INFO_BW_40;
+
+		rxd += 6;
+		if ((u8 *)rxd - skb->data >= skb->len)
+			return -EINVAL;
+	} else {
+		return -EINVAL;
+	}
+
+	skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
+
+	if (insert_ccmp_hdr) {
+		u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+		mt76_insert_ccmp_hdr(skb, key_id);
+	}
+
+	hdr = (struct ieee80211_hdr *)skb->data;
+	if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+		return 0;
+
+	status->aggr = unicast &&
+		       !ieee80211_is_qos_nullfunc(hdr->frame_control);
+	status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+	status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+	return 0;
+}
+
+static u16
+mt7603_mac_tx_rate_val(struct mt7603_dev *dev,
+		       const struct ieee80211_tx_rate *rate, bool stbc, u8 *bw)
+{
+	u8 phy, nss, rate_idx;
+	u16 rateval;
+
+	*bw = 0;
+	if (rate->flags & IEEE80211_TX_RC_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 3);
+		phy = MT_PHY_TYPE_HT;
+		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+			phy = MT_PHY_TYPE_HT_GF;
+		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			*bw = 1;
+	} else {
+		const struct ieee80211_rate *r;
+		int band = dev->mt76.chandef.chan->band;
+		u16 val;
+
+		nss = 1;
+		r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+			val = r->hw_value_short;
+		else
+			val = r->hw_value;
+
+		phy = val >> 8;
+		rate_idx = val & 0xff;
+	}
+
+	rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
+		   FIELD_PREP(MT_TX_RATE_MODE, phy));
+
+	if (stbc && nss == 1)
+		rateval |= MT_TX_RATE_STBC;
+
+	return rateval;
+}
+
+void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
+			   struct ieee80211_tx_rate *probe_rate,
+			   struct ieee80211_tx_rate *rates)
+{
+	struct ieee80211_tx_rate *ref;
+	int wcid = sta->wcid.idx;
+	u32 addr = mt7603_wtbl2_addr(wcid);
+	bool stbc = false;
+	int n_rates = sta->n_rates;
+	u8 bw, bw_prev, bw_idx = 0;
+	u16 val[4];
+	u16 probe_val;
+	u32 w9 = mt76_rr(dev, addr + 9 * 4);
+	bool rateset;
+	int i, k;
+
+	if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+		return;
+
+	for (i = n_rates; i < 4; i++)
+		rates[i] = rates[n_rates - 1];
+
+	rateset = !(sta->rate_set_tsf & BIT(0));
+	memcpy(sta->rateset[rateset].rates, rates,
+	       sizeof(sta->rateset[rateset].rates));
+	if (probe_rate) {
+		sta->rateset[rateset].probe_rate = *probe_rate;
+		ref = &sta->rateset[rateset].probe_rate;
+	} else {
+		sta->rateset[rateset].probe_rate.idx = -1;
+		ref = &sta->rateset[rateset].rates[0];
+	}
+
+	rates = sta->rateset[rateset].rates;
+	for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
+		/*
+		 * We don't support switching between short and long GI
+		 * within the rate set. For accurate tx status reporting, we
+		 * need to make sure that flags match.
+		 * For improved performance, avoid duplicate entries by
+		 * decrementing the MCS index if necessary
+		 */
+		if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
+			rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
+
+		for (k = 0; k < i; k++) {
+			if (rates[i].idx != rates[k].idx)
+				continue;
+			if ((rates[i].flags ^ rates[k].flags) &
+			    IEEE80211_TX_RC_40_MHZ_WIDTH)
+				continue;
+
+			if (!rates[i].idx)
+				continue;
+
+			rates[i].idx--;
+		}
+	}
+
+	w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
+	      MT_WTBL2_W9_SHORT_GI_80;
+
+	val[0] = mt7603_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
+	bw_prev = bw;
+
+	if (probe_rate) {
+		probe_val = mt7603_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
+		if (bw)
+			bw_idx = 1;
+		else
+			bw_prev = 0;
+	} else {
+		probe_val = val[0];
+	}
+
+	w9 |= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL, bw);
+	w9 |= FIELD_PREP(MT_WTBL2_W9_BW_CAP, bw);
+
+	val[1] = mt7603_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
+	if (bw_prev) {
+		bw_idx = 3;
+		bw_prev = bw;
+	}
+
+	val[2] = mt7603_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
+	if (bw_prev) {
+		bw_idx = 5;
+		bw_prev = bw;
+	}
+
+	val[3] = mt7603_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
+	if (bw_prev)
+		bw_idx = 7;
+
+	w9 |= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE,
+		       bw_idx ? bw_idx - 1 : 7);
+
+	mt76_wr(dev, MT_WTBL_RIUCR0, w9);
+
+	mt76_wr(dev, MT_WTBL_RIUCR1,
+		FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
+		FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
+		FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1]));
+
+	mt76_wr(dev, MT_WTBL_RIUCR2,
+		FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) |
+		FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
+		FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) |
+		FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
+
+	mt76_wr(dev, MT_WTBL_RIUCR3,
+		FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
+		FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) |
+		FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
+
+	mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+	sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset;
+
+	mt76_wr(dev, MT_WTBL_UPDATE,
+		FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
+		MT_WTBL_UPDATE_RATE_UPDATE |
+		MT_WTBL_UPDATE_TX_COUNT_CLEAR);
+
+	if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
+		mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+	sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates;
+	sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+}
+
+static enum mt7603_cipher_type
+mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+	memset(key_data, 0, 32);
+	if (!key)
+		return MT_CIPHER_NONE;
+
+	if (key->keylen > 32)
+		return MT_CIPHER_NONE;
+
+	memcpy(key_data, key->key, key->keylen);
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		return MT_CIPHER_WEP40;
+	case WLAN_CIPHER_SUITE_WEP104:
+		return MT_CIPHER_WEP104;
+	case WLAN_CIPHER_SUITE_TKIP:
+		/* Rx/Tx MIC keys are swapped */
+		memcpy(key_data + 16, key->key + 24, 8);
+		memcpy(key_data + 24, key->key + 16, 8);
+		return MT_CIPHER_TKIP;
+	case WLAN_CIPHER_SUITE_CCMP:
+		return MT_CIPHER_AES_CCMP;
+	default:
+		return MT_CIPHER_NONE;
+	}
+}
+
+int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
+			struct ieee80211_key_conf *key)
+{
+	enum mt7603_cipher_type cipher;
+	u32 addr = mt7603_wtbl3_addr(wcid);
+	u8 key_data[32];
+	int key_len = sizeof(key_data);
+
+	cipher = mt7603_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EOPNOTSUPP;
+
+	if (key && (cipher == MT_CIPHER_WEP40 || cipher == MT_CIPHER_WEP104)) {
+		addr += key->keyidx * 16;
+		key_len = 16;
+	}
+
+	mt76_wr_copy(dev, addr, key_data, key_len);
+
+	addr = mt7603_wtbl1_addr(wcid);
+	mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_KEY_TYPE, cipher);
+	if (key)
+		mt76_rmw_field(dev, addr, MT_WTBL1_W0_KEY_IDX, key->keyidx);
+	mt76_rmw_field(dev, addr, MT_WTBL1_W0_RX_KEY_VALID, !!key);
+
+	return 0;
+}
+
+static int
+mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
+		      struct sk_buff *skb, enum mt76_txq_id qid,
+		      struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+		      int pid, struct ieee80211_key_conf *key)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_rate *rate = &info->control.rates[0];
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
+	struct ieee80211_vif *vif = info->control.vif;
+	struct mt76_queue *q = dev->mt76.q_tx[qid].q;
+	struct mt7603_vif *mvif;
+	int wlan_idx;
+	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+	int tx_count = 8;
+	u8 frame_type, frame_subtype;
+	u16 fc = le16_to_cpu(hdr->frame_control);
+	u16 seqno = 0;
+	u8 vif_idx = 0;
+	u32 val;
+	u8 bw;
+
+	if (vif) {
+		mvif = (struct mt7603_vif *)vif->drv_priv;
+		vif_idx = mvif->idx;
+		if (vif_idx && qid >= MT_TXQ_BEACON)
+			vif_idx += 0x10;
+	}
+
+	if (sta) {
+		struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+
+		tx_count = msta->rate_count;
+	}
+
+	if (wcid)
+		wlan_idx = wcid->idx;
+	else
+		wlan_idx = MT7603_WTBL_RESERVED;
+
+	frame_type = (fc & IEEE80211_FCTL_FTYPE) >> 2;
+	frame_subtype = (fc & IEEE80211_FCTL_STYPE) >> 4;
+
+	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
+	      FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx);
+	txwi[0] = cpu_to_le32(val);
+
+	val = MT_TXD1_LONG_FORMAT |
+	      FIELD_PREP(MT_TXD1_OWN_MAC, vif_idx) |
+	      FIELD_PREP(MT_TXD1_TID,
+			 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
+	      FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
+	      FIELD_PREP(MT_TXD1_HDR_INFO, hdr_len / 2) |
+	      FIELD_PREP(MT_TXD1_WLAN_IDX, wlan_idx) |
+	      FIELD_PREP(MT_TXD1_PROTECTED, !!key);
+	txwi[1] = cpu_to_le32(val);
+
+	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+		txwi[1] |= cpu_to_le32(MT_TXD1_NO_ACK);
+
+	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, frame_type) |
+	      FIELD_PREP(MT_TXD2_SUB_TYPE, frame_subtype) |
+	      FIELD_PREP(MT_TXD2_MULTICAST,
+			 is_multicast_ether_addr(hdr->addr1));
+	txwi[2] = cpu_to_le32(val);
+
+	if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
+		txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+	txwi[4] = 0;
+
+	val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
+	      FIELD_PREP(MT_TXD5_PID, pid);
+	txwi[5] = cpu_to_le32(val);
+
+	txwi[6] = 0;
+
+	if (rate->idx >= 0 && rate->count &&
+	    !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
+		bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
+		u16 rateval = mt7603_mac_tx_rate_val(dev, rate, stbc, &bw);
+
+		txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
+
+		val = MT_TXD6_FIXED_BW |
+		      FIELD_PREP(MT_TXD6_BW, bw) |
+		      FIELD_PREP(MT_TXD6_TX_RATE, rateval);
+		txwi[6] |= cpu_to_le32(val);
+
+		if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+			txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
+
+		if (!(rate->flags & IEEE80211_TX_RC_MCS))
+			txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+		tx_count = rate->count;
+	}
+
+	/* use maximum tx count for beacons and buffered multicast */
+	if (qid >= MT_TXQ_BEACON)
+		tx_count = 0x1f;
+
+	val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
+		  MT_TXD3_SN_VALID;
+
+	if (ieee80211_is_data_qos(hdr->frame_control))
+		seqno = le16_to_cpu(hdr->seq_ctrl);
+	else if (ieee80211_is_back_req(hdr->frame_control))
+		seqno = le16_to_cpu(bar->start_seq_num);
+	else
+		val &= ~MT_TXD3_SN_VALID;
+
+	val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
+
+	txwi[3] = cpu_to_le32(val);
+
+	if (key) {
+		u64 pn = atomic64_inc_return(&key->tx_pn);
+
+		txwi[3] |= cpu_to_le32(MT_TXD3_PN_VALID);
+		txwi[4] = cpu_to_le32(pn & GENMASK(31, 0));
+		txwi[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH, pn >> 32));
+	}
+
+	txwi[7] = 0;
+
+	return 0;
+}
+
+int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
+			  struct ieee80211_sta *sta,
+			  struct mt76_tx_info *tx_info)
+{
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+	struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+	struct ieee80211_key_conf *key = info->control.hw_key;
+	int pid;
+
+	if (!wcid)
+		wcid = &dev->global_sta.wcid;
+
+	if (sta) {
+		msta = (struct mt7603_sta *)sta->drv_priv;
+
+		if ((info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
+				    IEEE80211_TX_CTL_CLEAR_PS_FILT)) ||
+		    (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
+			mt7603_wtbl_set_ps(dev, msta, false);
+	}
+
+	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+		spin_lock_bh(&dev->mt76.lock);
+		mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0],
+				      msta->rates);
+		msta->rate_probe = true;
+		spin_unlock_bh(&dev->mt76.lock);
+	}
+
+	mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid,
+			      sta, pid, key);
+
+	return 0;
+}
+
+static bool
+mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
+		struct ieee80211_tx_info *info, __le32 *txs_data)
+{
+	struct ieee80211_supported_band *sband;
+	struct mt7603_rate_set *rs;
+	int first_idx = 0, last_idx;
+	u32 rate_set_tsf;
+	u32 final_rate;
+	u32 final_rate_flags;
+	bool rs_idx;
+	bool ack_timeout;
+	bool fixed_rate;
+	bool probe;
+	bool ampdu;
+	bool cck = false;
+	int count;
+	u32 txs;
+	int idx;
+	int i;
+
+	fixed_rate = info->status.rates[0].count;
+	probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+
+	txs = le32_to_cpu(txs_data[4]);
+	ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU);
+	count = FIELD_GET(MT_TXS4_TX_COUNT, txs);
+	last_idx = FIELD_GET(MT_TXS4_LAST_TX_RATE, txs);
+
+	txs = le32_to_cpu(txs_data[0]);
+	final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
+	ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
+
+	if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
+		return false;
+
+	if (txs & MT_TXS0_QUEUE_TIMEOUT)
+		return false;
+
+	if (!ack_timeout)
+		info->flags |= IEEE80211_TX_STAT_ACK;
+
+	info->status.ampdu_len = 1;
+	info->status.ampdu_ack_len = !!(info->flags &
+					IEEE80211_TX_STAT_ACK);
+
+	if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
+		info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
+
+	first_idx = max_t(int, 0, last_idx - (count + 1) / MT7603_RATE_RETRY);
+
+	if (fixed_rate && !probe) {
+		info->status.rates[0].count = count;
+		i = 0;
+		goto out;
+	}
+
+	rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
+	rs_idx = !((u32)(FIELD_GET(MT_TXS1_F0_TIMESTAMP, le32_to_cpu(txs_data[1])) -
+			 rate_set_tsf) < 1000000);
+	rs_idx ^= rate_set_tsf & BIT(0);
+	rs = &sta->rateset[rs_idx];
+
+	if (!first_idx && rs->probe_rate.idx >= 0) {
+		info->status.rates[0] = rs->probe_rate;
+
+		spin_lock_bh(&dev->mt76.lock);
+		if (sta->rate_probe) {
+			mt7603_wtbl_set_rates(dev, sta, NULL,
+					      sta->rates);
+			sta->rate_probe = false;
+		}
+		spin_unlock_bh(&dev->mt76.lock);
+	} else {
+		info->status.rates[0] = rs->rates[first_idx / 2];
+	}
+	info->status.rates[0].count = 0;
+
+	for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
+		struct ieee80211_tx_rate *cur_rate;
+		int cur_count;
+
+		cur_rate = &rs->rates[idx / 2];
+		cur_count = min_t(int, MT7603_RATE_RETRY, count);
+		count -= cur_count;
+
+		if (idx && (cur_rate->idx != info->status.rates[i].idx ||
+			    cur_rate->flags != info->status.rates[i].flags)) {
+			i++;
+			if (i == ARRAY_SIZE(info->status.rates)) {
+				i--;
+				break;
+			}
+
+			info->status.rates[i] = *cur_rate;
+			info->status.rates[i].count = 0;
+		}
+
+		info->status.rates[i].count += cur_count;
+	}
+
+out:
+	final_rate_flags = info->status.rates[i].flags;
+
+	switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
+	case MT_PHY_TYPE_CCK:
+		cck = true;
+		/* fall through */
+	case MT_PHY_TYPE_OFDM:
+		if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
+			sband = &dev->mt76.sband_5g.sband;
+		else
+			sband = &dev->mt76.sband_2g.sband;
+		final_rate &= GENMASK(5, 0);
+		final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
+					   cck);
+		final_rate_flags = 0;
+		break;
+	case MT_PHY_TYPE_HT_GF:
+	case MT_PHY_TYPE_HT:
+		final_rate_flags |= IEEE80211_TX_RC_MCS;
+		final_rate &= GENMASK(5, 0);
+		if (final_rate > 15)
+			return false;
+		break;
+	default:
+		return false;
+	}
+
+	info->status.rates[i].idx = final_rate;
+	info->status.rates[i].flags = final_rate_flags;
+
+	return true;
+}
+
+static bool
+mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
+		       __le32 *txs_data)
+{
+	struct mt76_dev *mdev = &dev->mt76;
+	struct sk_buff_head list;
+	struct sk_buff *skb;
+
+	if (pid < MT_PACKET_ID_FIRST)
+		return false;
+
+	mt76_tx_status_lock(mdev, &list);
+	skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
+	if (skb) {
+		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+		if (!mt7603_fill_txs(dev, sta, info, txs_data)) {
+			ieee80211_tx_info_clear_status(info);
+			info->status.rates[0].idx = -1;
+		}
+
+		mt76_tx_status_skb_done(mdev, skb, &list);
+	}
+	mt76_tx_status_unlock(mdev, &list);
+
+	return !!skb;
+}
+
+void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
+{
+	struct ieee80211_tx_info info = {};
+	struct ieee80211_sta *sta = NULL;
+	struct mt7603_sta *msta = NULL;
+	struct mt76_wcid *wcid;
+	__le32 *txs_data = data;
+	u32 txs;
+	u8 wcidx;
+	u8 pid;
+
+	txs = le32_to_cpu(txs_data[4]);
+	pid = FIELD_GET(MT_TXS4_PID, txs);
+	txs = le32_to_cpu(txs_data[3]);
+	wcidx = FIELD_GET(MT_TXS3_WCID, txs);
+
+	if (pid == MT_PACKET_ID_NO_ACK)
+		return;
+
+	if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
+		return;
+
+	rcu_read_lock();
+
+	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+	if (!wcid)
+		goto out;
+
+	msta = container_of(wcid, struct mt7603_sta, wcid);
+	sta = wcid_to_sta(wcid);
+
+	if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data))
+		goto out;
+
+	if (wcidx >= MT7603_WTBL_STA || !sta)
+		goto out;
+
+	if (mt7603_fill_txs(dev, msta, &info, txs_data))
+		ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+	rcu_read_unlock();
+}
+
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+			    struct mt76_queue_entry *e)
+{
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+	struct sk_buff *skb = e->skb;
+
+	if (!e->txwi) {
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	if (qid < 4)
+		dev->tx_hang_check = 0;
+
+	mt76_tx_complete_skb(mdev, skb);
+}
+
+static bool
+wait_for_wpdma(struct mt7603_dev *dev)
+{
+	return mt76_poll(dev, MT_WPDMA_GLO_CFG,
+			 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+			 MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+			 0, 1000);
+}
+
+static void mt7603_pse_reset(struct mt7603_dev *dev)
+{
+	/* Clear previous reset result */
+	if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED])
+		mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE_S);
+
+	/* Reset PSE */
+	mt76_set(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
+
+	if (!mt76_poll_msec(dev, MT_MCU_DEBUG_RESET,
+			    MT_MCU_DEBUG_RESET_PSE_S,
+			    MT_MCU_DEBUG_RESET_PSE_S, 500)) {
+		dev->reset_cause[RESET_CAUSE_RESET_FAILED]++;
+		mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
+	} else {
+		dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
+		mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_QUEUES);
+	}
+
+	if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] >= 3)
+		dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
+}
+
+void mt7603_mac_dma_start(struct mt7603_dev *dev)
+{
+	mt7603_mac_start(dev);
+
+	wait_for_wpdma(dev);
+	usleep_range(50, 100);
+
+	mt76_set(dev, MT_WPDMA_GLO_CFG,
+		 (MT_WPDMA_GLO_CFG_TX_DMA_EN |
+		  MT_WPDMA_GLO_CFG_RX_DMA_EN |
+		  FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
+		  MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE));
+
+	mt7603_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
+}
+
+void mt7603_mac_start(struct mt7603_dev *dev)
+{
+	mt76_clear(dev, MT_ARB_SCR,
+		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+	mt76_wr(dev, MT_WF_ARB_TX_START_0, ~0);
+	mt76_set(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
+}
+
+void mt7603_mac_stop(struct mt7603_dev *dev)
+{
+	mt76_set(dev, MT_ARB_SCR,
+		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+	mt76_wr(dev, MT_WF_ARB_TX_START_0, 0);
+	mt76_clear(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
+}
+
+void mt7603_pse_client_reset(struct mt7603_dev *dev)
+{
+	u32 addr;
+
+	addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR +
+				   MT_CLIENT_RESET_TX);
+
+	/* Clear previous reset state */
+	mt76_clear(dev, addr,
+		   MT_CLIENT_RESET_TX_R_E_1 |
+		   MT_CLIENT_RESET_TX_R_E_2 |
+		   MT_CLIENT_RESET_TX_R_E_1_S |
+		   MT_CLIENT_RESET_TX_R_E_2_S);
+
+	/* Start PSE client TX abort */
+	mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1);
+	mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S,
+		       MT_CLIENT_RESET_TX_R_E_1_S, 500);
+
+	mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_2);
+	mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
+
+	/* Wait for PSE client to clear TX FIFO */
+	mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_2_S,
+		       MT_CLIENT_RESET_TX_R_E_2_S, 500);
+
+	/* Clear PSE client TX abort state */
+	mt76_clear(dev, addr,
+		   MT_CLIENT_RESET_TX_R_E_1 |
+		   MT_CLIENT_RESET_TX_R_E_2);
+}
+
+static void mt7603_dma_sched_reset(struct mt7603_dev *dev)
+{
+	if (!is_mt7628(dev))
+		return;
+
+	mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET);
+	mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET);
+}
+
+static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
+{
+	int beacon_int = dev->mt76.beacon_int;
+	u32 mask = dev->mt76.mmio.irqmask;
+	int i;
+
+	ieee80211_stop_queues(dev->mt76.hw);
+	set_bit(MT76_RESET, &dev->mt76.state);
+
+	/* lock/unlock all queues to ensure that no tx is pending */
+	mt76_txq_schedule_all(&dev->mt76);
+
+	tasklet_disable(&dev->mt76.tx_tasklet);
+	tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+	napi_disable(&dev->mt76.napi[0]);
+	napi_disable(&dev->mt76.napi[1]);
+	napi_disable(&dev->mt76.tx_napi);
+
+	mutex_lock(&dev->mt76.mutex);
+
+	mt7603_beacon_set_timer(dev, -1, 0);
+
+	if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
+	    dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY ||
+	    dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK ||
+	    dev->cur_reset_cause == RESET_CAUSE_TX_HANG)
+		mt7603_pse_reset(dev);
+
+	if (dev->reset_cause[RESET_CAUSE_RESET_FAILED])
+		goto skip_dma_reset;
+
+	mt7603_mac_stop(dev);
+
+	mt76_clear(dev, MT_WPDMA_GLO_CFG,
+		   MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+	usleep_range(1000, 2000);
+
+	mt7603_irq_disable(dev, mask);
+
+	mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
+
+	mt7603_pse_client_reset(dev);
+
+	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
+		mt76_queue_tx_cleanup(dev, i, true);
+
+	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
+		mt76_queue_rx_reset(dev, i);
+
+	mt7603_dma_sched_reset(dev);
+
+	mt7603_mac_dma_start(dev);
+
+	mt7603_irq_enable(dev, mask);
+
+skip_dma_reset:
+	clear_bit(MT76_RESET, &dev->mt76.state);
+	mutex_unlock(&dev->mt76.mutex);
+
+	tasklet_enable(&dev->mt76.tx_tasklet);
+	napi_enable(&dev->mt76.tx_napi);
+	napi_schedule(&dev->mt76.tx_napi);
+
+	tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+	mt7603_beacon_set_timer(dev, -1, beacon_int);
+
+	napi_enable(&dev->mt76.napi[0]);
+	napi_schedule(&dev->mt76.napi[0]);
+
+	napi_enable(&dev->mt76.napi[1]);
+	napi_schedule(&dev->mt76.napi[1]);
+
+	ieee80211_wake_queues(dev->mt76.hw);
+	mt76_txq_schedule_all(&dev->mt76);
+}
+
+static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index)
+{
+	u32 val;
+
+	mt76_wr(dev, MT_WPDMA_DEBUG,
+		FIELD_PREP(MT_WPDMA_DEBUG_IDX, index) |
+		MT_WPDMA_DEBUG_SEL);
+
+	val = mt76_rr(dev, MT_WPDMA_DEBUG);
+	return FIELD_GET(MT_WPDMA_DEBUG_VALUE, val);
+}
+
+static bool mt7603_rx_fifo_busy(struct mt7603_dev *dev)
+{
+	if (is_mt7628(dev))
+		return mt7603_dma_debug(dev, 9) & BIT(9);
+
+	return mt7603_dma_debug(dev, 2) & BIT(8);
+}
+
+static bool mt7603_rx_dma_busy(struct mt7603_dev *dev)
+{
+	if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY))
+		return false;
+
+	return mt7603_rx_fifo_busy(dev);
+}
+
+static bool mt7603_tx_dma_busy(struct mt7603_dev *dev)
+{
+	u32 val;
+
+	if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY))
+		return false;
+
+	val = mt7603_dma_debug(dev, 9);
+	return (val & BIT(8)) && (val & 0xf) != 0xf;
+}
+
+static bool mt7603_tx_hang(struct mt7603_dev *dev)
+{
+	struct mt76_queue *q;
+	u32 dma_idx, prev_dma_idx;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		q = dev->mt76.q_tx[i].q;
+
+		if (!q->queued)
+			continue;
+
+		prev_dma_idx = dev->tx_dma_idx[i];
+		dma_idx = readl(&q->regs->dma_idx);
+		dev->tx_dma_idx[i] = dma_idx;
+
+		if (dma_idx == prev_dma_idx &&
+		    dma_idx != readl(&q->regs->cpu_idx))
+			break;
+	}
+
+	return i < 4;
+}
+
+static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
+{
+	u32 addr, val;
+
+	if (mt7603_rx_fifo_busy(dev))
+		goto out;
+
+	addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
+	mt76_wr(dev, addr, 3);
+	val = mt76_rr(dev, addr) >> 16;
+
+	if (!(val & BIT(0)))
+		return false;
+
+	if (is_mt7628(dev))
+		val &= 0xa000;
+	else
+		val &= 0x8000;
+	if (!val)
+		return false;
+
+out:
+	if (mt76_rr(dev, MT_INT_SOURCE_CSR) &
+	    (MT_INT_RX_DONE(0) | MT_INT_RX_DONE(1)))
+		return false;
+
+	return true;
+}
+
+static bool
+mt7603_watchdog_check(struct mt7603_dev *dev, u8 *counter,
+		      enum mt7603_reset_cause cause,
+		      bool (*check)(struct mt7603_dev *dev))
+{
+	if (dev->reset_test == cause + 1) {
+		dev->reset_test = 0;
+		goto trigger;
+	}
+
+	if (check) {
+		if (!check(dev) && *counter < MT7603_WATCHDOG_TIMEOUT) {
+			*counter = 0;
+			return false;
+		}
+
+		(*counter)++;
+	}
+
+	if (*counter < MT7603_WATCHDOG_TIMEOUT)
+		return false;
+trigger:
+	dev->cur_reset_cause = cause;
+	dev->reset_cause[cause]++;
+	return true;
+}
+
+void mt7603_update_channel(struct mt76_dev *mdev)
+{
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+	struct mt76_channel_state *state;
+	ktime_t cur_time;
+	u32 busy;
+
+	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+		return;
+
+	state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
+	busy = mt76_rr(dev, MT_MIB_STAT_PSCCA);
+
+	spin_lock_bh(&dev->mt76.cc_lock);
+	cur_time = ktime_get_boottime();
+	state->cc_busy += busy;
+	state->cc_active += ktime_to_us(ktime_sub(cur_time,
+						  dev->mt76.survey_time));
+	dev->mt76.survey_time = cur_time;
+	spin_unlock_bh(&dev->mt76.cc_lock);
+}
+
+void
+mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val)
+{
+	u32 rxtd_6 = 0xd7c80000;
+
+	if (val == dev->ed_strict_mode)
+		return;
+
+	dev->ed_strict_mode = val;
+
+	/* Ensure that ED/CCA does not trigger if disabled */
+	if (!dev->ed_monitor)
+		rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x34);
+	else
+		rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x7d);
+
+	if (dev->ed_monitor && !dev->ed_strict_mode)
+		rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x0f);
+	else
+		rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x10);
+
+	mt76_wr(dev, MT_RXTD(6), rxtd_6);
+
+	mt76_rmw_field(dev, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN,
+		       dev->ed_monitor && !dev->ed_strict_mode);
+}
+
+static void
+mt7603_edcca_check(struct mt7603_dev *dev)
+{
+	u32 val = mt76_rr(dev, MT_AGC(41));
+	ktime_t cur_time;
+	int rssi0, rssi1;
+	u32 active;
+	u32 ed_busy;
+
+	if (!dev->ed_monitor)
+		return;
+
+	rssi0 = FIELD_GET(MT_AGC_41_RSSI_0, val);
+	if (rssi0 > 128)
+		rssi0 -= 256;
+
+	rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val);
+	if (rssi1 > 128)
+		rssi1 -= 256;
+
+	if (max(rssi0, rssi1) >= -40 &&
+	    dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH)
+		dev->ed_strong_signal++;
+	else if (dev->ed_strong_signal > 0)
+		dev->ed_strong_signal--;
+
+	cur_time = ktime_get_boottime();
+	ed_busy = mt76_rr(dev, MT_MIB_STAT_ED) & MT_MIB_STAT_ED_MASK;
+
+	active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
+	dev->ed_time = cur_time;
+
+	if (!active)
+		return;
+
+	if (100 * ed_busy / active > 90) {
+		if (dev->ed_trigger < 0)
+			dev->ed_trigger = 0;
+		dev->ed_trigger++;
+	} else {
+		if (dev->ed_trigger > 0)
+			dev->ed_trigger = 0;
+		dev->ed_trigger--;
+	}
+
+	if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH ||
+	    dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH / 2) {
+		mt7603_edcca_set_strict(dev, true);
+	} else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) {
+		mt7603_edcca_set_strict(dev, false);
+	}
+
+	if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH)
+		dev->ed_trigger = MT7603_EDCCA_BLOCK_TH;
+	else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH)
+		dev->ed_trigger = -MT7603_EDCCA_BLOCK_TH;
+}
+
+void mt7603_cca_stats_reset(struct mt7603_dev *dev)
+{
+	mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
+	mt76_clear(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
+	mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN);
+}
+
+static void
+mt7603_adjust_sensitivity(struct mt7603_dev *dev)
+{
+	u32 agc0 = dev->agc0, agc3 = dev->agc3;
+	u32 adj;
+
+	if (!dev->sensitivity || dev->sensitivity < -100) {
+		dev->sensitivity = 0;
+	} else if (dev->sensitivity <= -84) {
+		adj = 7 + (dev->sensitivity + 92) / 2;
+
+		agc0 = 0x56f0076f;
+		agc0 |= adj << 12;
+		agc0 |= adj << 16;
+		agc3 = 0x81d0d5e3;
+	} else if (dev->sensitivity <= -72) {
+		adj = 7 + (dev->sensitivity + 80) / 2;
+
+		agc0 = 0x6af0006f;
+		agc0 |= adj << 8;
+		agc0 |= adj << 12;
+		agc0 |= adj << 16;
+
+		agc3 = 0x8181d5e3;
+	} else {
+		if (dev->sensitivity > -54)
+			dev->sensitivity = -54;
+
+		adj = 7 + (dev->sensitivity + 80) / 2;
+
+		agc0 = 0x7ff0000f;
+		agc0 |= adj << 4;
+		agc0 |= adj << 8;
+		agc0 |= adj << 12;
+		agc0 |= adj << 16;
+
+		agc3 = 0x818181e3;
+	}
+
+	mt76_wr(dev, MT_AGC(0), agc0);
+	mt76_wr(dev, MT_AGC1(0), agc0);
+
+	mt76_wr(dev, MT_AGC(3), agc3);
+	mt76_wr(dev, MT_AGC1(3), agc3);
+}
+
+static void
+mt7603_false_cca_check(struct mt7603_dev *dev)
+{
+	int pd_cck, pd_ofdm, mdrdy_cck, mdrdy_ofdm;
+	int false_cca;
+	int min_signal;
+	u32 val;
+
+	val = mt76_rr(dev, MT_PHYCTRL_STAT_PD);
+	pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val);
+	pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val);
+
+	val = mt76_rr(dev, MT_PHYCTRL_STAT_MDRDY);
+	mdrdy_cck = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK, val);
+	mdrdy_ofdm = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM, val);
+
+	dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
+	dev->false_cca_cck = pd_cck - mdrdy_cck;
+
+	mt7603_cca_stats_reset(dev);
+
+	min_signal = mt76_get_min_avg_rssi(&dev->mt76);
+	if (!min_signal) {
+		dev->sensitivity = 0;
+		dev->last_cca_adj = jiffies;
+		goto out;
+	}
+
+	min_signal -= 15;
+
+	false_cca = dev->false_cca_ofdm + dev->false_cca_cck;
+	if (false_cca > 600) {
+		if (!dev->sensitivity)
+			dev->sensitivity = -92;
+		else
+			dev->sensitivity += 2;
+		dev->last_cca_adj = jiffies;
+	} else if (false_cca < 100 ||
+		   time_after(jiffies, dev->last_cca_adj + 10 * HZ)) {
+		dev->last_cca_adj = jiffies;
+		if (!dev->sensitivity)
+			goto out;
+
+		dev->sensitivity -= 2;
+	}
+
+	if (dev->sensitivity && dev->sensitivity > min_signal) {
+		dev->sensitivity = min_signal;
+		dev->last_cca_adj = jiffies;
+	}
+
+out:
+	mt7603_adjust_sensitivity(dev);
+}
+
+void mt7603_mac_work(struct work_struct *work)
+{
+	struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
+					      mt76.mac_work.work);
+	bool reset = false;
+
+	mt76_tx_status_check(&dev->mt76, NULL, false);
+
+	mutex_lock(&dev->mt76.mutex);
+
+	dev->mac_work_count++;
+	mt7603_update_channel(&dev->mt76);
+	mt7603_edcca_check(dev);
+
+	if (dev->mac_work_count == 10)
+		mt7603_false_cca_check(dev);
+
+	if (mt7603_watchdog_check(dev, &dev->rx_pse_check,
+				  RESET_CAUSE_RX_PSE_BUSY,
+				  mt7603_rx_pse_busy) ||
+	    mt7603_watchdog_check(dev, &dev->beacon_check,
+				  RESET_CAUSE_BEACON_STUCK,
+				  NULL) ||
+	    mt7603_watchdog_check(dev, &dev->tx_hang_check,
+				  RESET_CAUSE_TX_HANG,
+				  mt7603_tx_hang) ||
+	    mt7603_watchdog_check(dev, &dev->tx_dma_check,
+				  RESET_CAUSE_TX_BUSY,
+				  mt7603_tx_dma_busy) ||
+	    mt7603_watchdog_check(dev, &dev->rx_dma_check,
+				  RESET_CAUSE_RX_BUSY,
+				  mt7603_rx_dma_busy) ||
+	    mt7603_watchdog_check(dev, &dev->mcu_hang,
+				  RESET_CAUSE_MCU_HANG,
+				  NULL) ||
+	    dev->reset_cause[RESET_CAUSE_RESET_FAILED]) {
+		dev->beacon_check = 0;
+		dev->tx_dma_check = 0;
+		dev->tx_hang_check = 0;
+		dev->rx_dma_check = 0;
+		dev->rx_pse_check = 0;
+		dev->mcu_hang = 0;
+		dev->rx_dma_idx = ~0;
+		memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx));
+		reset = true;
+		dev->mac_work_count = 0;
+	}
+
+	if (dev->mac_work_count >= 10)
+		dev->mac_work_count = 0;
+
+	mutex_unlock(&dev->mt76.mutex);
+
+	if (reset)
+		mt7603_mac_watchdog_reset(dev);
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+				     msecs_to_jiffies(MT7603_WATCHDOG_TIME));
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mac.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mac.h
new file mode 100644
index 0000000..17e34ec
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mac.h
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_MAC_H
+#define __MT7603_MAC_H
+
+#define MT_RXD0_LENGTH			GENMASK(15, 0)
+#define MT_RXD0_PKT_TYPE		GENMASK(31, 29)
+
+#define MT_RXD0_NORMAL_ETH_TYPE_OFS	GENMASK(22, 16)
+#define MT_RXD0_NORMAL_IP_SUM		BIT(23)
+#define MT_RXD0_NORMAL_UDP_TCP_SUM	BIT(24)
+#define MT_RXD0_NORMAL_GROUP_1		BIT(25)
+#define MT_RXD0_NORMAL_GROUP_2		BIT(26)
+#define MT_RXD0_NORMAL_GROUP_3		BIT(27)
+#define MT_RXD0_NORMAL_GROUP_4		BIT(28)
+
+enum rx_pkt_type {
+	PKT_TYPE_TXS		= 0,
+	PKT_TYPE_TXRXV		= 1,
+	PKT_TYPE_NORMAL		= 2,
+	PKT_TYPE_RX_DUP_RFB	= 3,
+	PKT_TYPE_RX_TMR		= 4,
+	PKT_TYPE_RETRIEVE	= 5,
+	PKT_TYPE_RX_EVENT	= 7,
+};
+
+#define MT_RXD1_NORMAL_BSSID		GENMASK(31, 26)
+#define MT_RXD1_NORMAL_PAYLOAD_FORMAT	GENMASK(25, 24)
+#define MT_RXD1_NORMAL_HDR_TRANS	BIT(23)
+#define MT_RXD1_NORMAL_HDR_OFFSET	BIT(22)
+#define MT_RXD1_NORMAL_MAC_HDR_LEN	GENMASK(21, 16)
+#define MT_RXD1_NORMAL_CH_FREQ		GENMASK(15, 8)
+#define MT_RXD1_NORMAL_KEY_ID		GENMASK(7, 6)
+#define MT_RXD1_NORMAL_BEACON_UC	BIT(5)
+#define MT_RXD1_NORMAL_BEACON_MC	BIT(4)
+#define MT_RXD1_NORMAL_BCAST		BIT(3)
+#define MT_RXD1_NORMAL_MCAST		BIT(2)
+#define MT_RXD1_NORMAL_U2M		BIT(1)
+#define MT_RXD1_NORMAL_HTC_VLD		BIT(0)
+
+#define MT_RXD2_NORMAL_NON_AMPDU	BIT(31)
+#define MT_RXD2_NORMAL_NON_AMPDU_SUB	BIT(30)
+#define MT_RXD2_NORMAL_NDATA		BIT(29)
+#define MT_RXD2_NORMAL_NULL_FRAME	BIT(28)
+#define MT_RXD2_NORMAL_FRAG		BIT(27)
+#define MT_RXD2_NORMAL_UDF_VALID	BIT(26)
+#define MT_RXD2_NORMAL_LLC_MIS		BIT(25)
+#define MT_RXD2_NORMAL_MAX_LEN_ERROR	BIT(24)
+#define MT_RXD2_NORMAL_AMSDU_ERR	BIT(23)
+#define MT_RXD2_NORMAL_LEN_MISMATCH	BIT(22)
+#define MT_RXD2_NORMAL_TKIP_MIC_ERR	BIT(21)
+#define MT_RXD2_NORMAL_ICV_ERR		BIT(20)
+#define MT_RXD2_NORMAL_CLM		BIT(19)
+#define MT_RXD2_NORMAL_CM		BIT(18)
+#define MT_RXD2_NORMAL_FCS_ERR		BIT(17)
+#define MT_RXD2_NORMAL_SW_BIT		BIT(16)
+#define MT_RXD2_NORMAL_SEC_MODE		GENMASK(15, 12)
+#define MT_RXD2_NORMAL_TID		GENMASK(11, 8)
+#define MT_RXD2_NORMAL_WLAN_IDX		GENMASK(7, 0)
+
+#define MT_RXD3_NORMAL_PF_STS		GENMASK(31, 30)
+#define MT_RXD3_NORMAL_PF_MODE		BIT(29)
+#define MT_RXD3_NORMAL_CLS_BITMAP	GENMASK(28, 19)
+#define MT_RXD3_NORMAL_WOL		GENMASK(18, 14)
+#define MT_RXD3_NORMAL_MAGIC_PKT	BIT(13)
+#define MT_RXD3_NORMAL_OFLD		GENMASK(12, 11)
+#define MT_RXD3_NORMAL_CLS		BIT(10)
+#define MT_RXD3_NORMAL_PATTERN_DROP	BIT(9)
+#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS	BIT(8)
+#define MT_RXD3_NORMAL_RXV_SEQ		GENMASK(7, 0)
+
+#define MT_RXV1_VHTA1_B5_B4		GENMASK(31, 30)
+#define MT_RXV1_VHTA2_B8_B1		GENMASK(29, 22)
+#define MT_RXV1_HT_NO_SOUND		BIT(21)
+#define MT_RXV1_HT_SMOOTH		BIT(20)
+#define MT_RXV1_HT_SHORT_GI		BIT(19)
+#define MT_RXV1_HT_AGGR			BIT(18)
+#define MT_RXV1_VHTA1_B22		BIT(17)
+#define MT_RXV1_FRAME_MODE		GENMASK(16, 15)
+#define MT_RXV1_TX_MODE			GENMASK(14, 12)
+#define MT_RXV1_HT_EXT_LTF		GENMASK(11, 10)
+#define MT_RXV1_HT_AD_CODE		BIT(9)
+#define MT_RXV1_HT_STBC			GENMASK(8, 7)
+#define MT_RXV1_TX_RATE			GENMASK(6, 0)
+
+#define MT_RXV2_VHTA1_B16_B6		GENMASK(31, 21)
+#define MT_RXV2_LENGTH			GENMASK(20, 0)
+
+#define MT_RXV3_F_AGC1_CAL_GAIN		GENMASK(31, 29)
+#define MT_RXV3_F_AGC1_EQ_CAL		BIT(28)
+#define MT_RXV3_RCPI1			GENMASK(27, 20)
+#define MT_RXV3_F_AGC0_CAL_GAIN		GENMASK(19, 17)
+#define MT_RXV3_F_AGC0_EQ_CAL		BIT(16)
+#define MT_RXV3_RCPI0			GENMASK(15, 8)
+#define MT_RXV3_SEL_ANT			BIT(7)
+#define MT_RXV3_ACI_DET_X		BIT(6)
+#define MT_RXV3_OFDM_FREQ_TRANS_DETECT	BIT(5)
+#define MT_RXV3_VHTA1_B21_B17		GENMASK(4, 0)
+
+#define MT_RXV4_F_AGC_CAL_GAIN		GENMASK(31, 29)
+#define MT_RXV4_F_AGC2_EQ_CAL		BIT(28)
+#define MT_RXV4_IB_RSSI1		GENMASK(27, 20)
+#define MT_RXV4_F_AGC_LPF_GAIN_X	GENMASK(19, 16)
+#define MT_RXV4_WB_RSSI_X		GENMASK(15, 8)
+#define MT_RXV4_IB_RSSI0		GENMASK(7, 0)
+
+#define MT_RXV5_LTF_SNR0		GENMASK(31, 26)
+#define MT_RXV5_LTF_PROC_TIME		GENMASK(25, 19)
+#define MT_RXV5_FOE			GENMASK(18, 7)
+#define MT_RXV5_C_AGC_SATE		GENMASK(6, 4)
+#define MT_RXV5_F_AGC_LNA_GAIN_0	GENMASK(3, 2)
+#define MT_RXV5_F_AGC_LNA_GAIN_1	GENMASK(1, 0)
+
+#define MT_RXV6_C_AGC_STATE		GENMASK(30, 28)
+#define MT_RXV6_NS_TS_FIELD		GENMASK(27, 25)
+#define MT_RXV6_RX_VALID		BIT(24)
+#define MT_RXV6_NF2			GENMASK(23, 16)
+#define MT_RXV6_NF1			GENMASK(15, 8)
+#define MT_RXV6_NF0			GENMASK(7, 0)
+
+enum mt7603_tx_header_format {
+	MT_HDR_FORMAT_802_3,
+	MT_HDR_FORMAT_CMD,
+	MT_HDR_FORMAT_802_11,
+	MT_HDR_FORMAT_802_11_EXT,
+};
+
+#define MT_TXD_SIZE			(8 * 4)
+
+#define MT_TXD0_P_IDX			BIT(31)
+#define MT_TXD0_Q_IDX			GENMASK(30, 27)
+#define MT_TXD0_UTXB			BIT(26)
+#define MT_TXD0_UNXV			BIT(25)
+#define MT_TXD0_UDP_TCP_SUM		BIT(24)
+#define MT_TXD0_IP_SUM			BIT(23)
+#define MT_TXD0_ETH_TYPE_OFFSET		GENMASK(22, 16)
+#define MT_TXD0_TX_BYTES		GENMASK(15, 0)
+
+#define MT_TXD1_OWN_MAC			GENMASK(31, 26)
+#define MT_TXD1_PROTECTED		BIT(23)
+#define MT_TXD1_TID			GENMASK(22, 20)
+#define MT_TXD1_NO_ACK			BIT(19)
+#define MT_TXD1_HDR_PAD			GENMASK(18, 16)
+#define MT_TXD1_LONG_FORMAT		BIT(15)
+#define MT_TXD1_HDR_FORMAT		GENMASK(14, 13)
+#define MT_TXD1_HDR_INFO		GENMASK(12, 8)
+#define MT_TXD1_WLAN_IDX		GENMASK(7, 0)
+
+#define MT_TXD2_FIX_RATE		BIT(31)
+#define MT_TXD2_TIMING_MEASURE		BIT(30)
+#define MT_TXD2_BA_DISABLE		BIT(29)
+#define MT_TXD2_POWER_OFFSET		GENMASK(28, 24)
+#define MT_TXD2_MAX_TX_TIME		GENMASK(23, 16)
+#define MT_TXD2_FRAG			GENMASK(15, 14)
+#define MT_TXD2_HTC_VLD			BIT(13)
+#define MT_TXD2_DURATION		BIT(12)
+#define MT_TXD2_BIP			BIT(11)
+#define MT_TXD2_MULTICAST		BIT(10)
+#define MT_TXD2_RTS			BIT(9)
+#define MT_TXD2_SOUNDING		BIT(8)
+#define MT_TXD2_NDPA			BIT(7)
+#define MT_TXD2_NDP			BIT(6)
+#define MT_TXD2_FRAME_TYPE		GENMASK(5, 4)
+#define MT_TXD2_SUB_TYPE		GENMASK(3, 0)
+
+#define MT_TXD3_SN_VALID		BIT(31)
+#define MT_TXD3_PN_VALID		BIT(30)
+#define MT_TXD3_SEQ			GENMASK(27, 16)
+#define MT_TXD3_REM_TX_COUNT		GENMASK(15, 11)
+#define MT_TXD3_TX_COUNT		GENMASK(10, 6)
+
+#define MT_TXD4_PN_LOW			GENMASK(31, 0)
+
+#define MT_TXD5_PN_HIGH			GENMASK(31, 16)
+#define MT_TXD5_SW_POWER_MGMT		BIT(13)
+#define MT_TXD5_BA_SEQ_CTRL		BIT(12)
+#define MT_TXD5_DA_SELECT		BIT(11)
+#define MT_TXD5_TX_STATUS_HOST		BIT(10)
+#define MT_TXD5_TX_STATUS_MCU		BIT(9)
+#define MT_TXD5_TX_STATUS_FMT		BIT(8)
+#define MT_TXD5_PID			GENMASK(7, 0)
+
+#define MT_TXD6_SGI			BIT(31)
+#define MT_TXD6_LDPC			BIT(30)
+#define MT_TXD6_TX_RATE			GENMASK(29, 18)
+#define MT_TXD6_I_TXBF			BIT(17)
+#define MT_TXD6_E_TXBF			BIT(16)
+#define MT_TXD6_DYN_BW			BIT(15)
+#define MT_TXD6_ANT_PRI			GENMASK(14, 12)
+#define MT_TXD6_SPE_EN			BIT(11)
+#define MT_TXD6_FIXED_BW		BIT(10)
+#define MT_TXD6_BW			GENMASK(9, 8)
+#define MT_TXD6_ANT_ID			GENMASK(7, 2)
+#define MT_TXD6_FIXED_RATE		BIT(0)
+
+#define MT_TX_RATE_STBC			BIT(11)
+#define MT_TX_RATE_NSS			GENMASK(10, 9)
+#define MT_TX_RATE_MODE			GENMASK(8, 6)
+#define MT_TX_RATE_IDX			GENMASK(5, 0)
+
+#define MT_TXS0_ANTENNA			GENMASK(31, 26)
+#define MT_TXS0_TID			GENMASK(25, 22)
+#define MT_TXS0_BA_ERROR		BIT(22)
+#define MT_TXS0_PS_FLAG			BIT(21)
+#define MT_TXS0_TXOP_TIMEOUT		BIT(20)
+#define MT_TXS0_BIP_ERROR		BIT(19)
+
+#define MT_TXS0_QUEUE_TIMEOUT		BIT(18)
+#define MT_TXS0_RTS_TIMEOUT		BIT(17)
+#define MT_TXS0_ACK_TIMEOUT		BIT(16)
+#define MT_TXS0_ACK_ERROR_MASK		GENMASK(18, 16)
+
+#define MT_TXS0_TX_STATUS_HOST		BIT(15)
+#define MT_TXS0_TX_STATUS_MCU		BIT(14)
+#define MT_TXS0_TXS_FORMAT		BIT(13)
+#define MT_TXS0_FIXED_RATE		BIT(12)
+#define MT_TXS0_TX_RATE			GENMASK(11, 0)
+
+#define MT_TXS1_F0_TIMESTAMP		GENMASK(31, 0)
+#define MT_TXS1_F1_NOISE_2		GENMASK(23, 16)
+#define MT_TXS1_F1_NOISE_1		GENMASK(15, 8)
+#define MT_TXS1_F1_NOISE_0		GENMASK(7, 0)
+
+#define MT_TXS2_F0_FRONT_TIME		GENMASK(24, 0)
+#define MT_TXS2_F1_RCPI_2		GENMASK(23, 16)
+#define MT_TXS2_F1_RCPI_1		GENMASK(15, 8)
+#define MT_TXS2_F1_RCPI_0		GENMASK(7, 0)
+
+#define MT_TXS3_WCID			GENMASK(31, 24)
+#define MT_TXS3_RXV_SEQNO		GENMASK(23, 16)
+#define MT_TXS3_TX_DELAY		GENMASK(15, 0)
+
+#define MT_TXS4_LAST_TX_RATE		GENMASK(31, 29)
+#define MT_TXS4_TX_COUNT		GENMASK(28, 24)
+#define MT_TXS4_AMPDU			BIT(23)
+#define MT_TXS4_ACKED_MPDU		BIT(22)
+#define MT_TXS4_PID			GENMASK(21, 14)
+#define MT_TXS4_BW			GENMASK(13, 12)
+#define MT_TXS4_F0_SEQNO		GENMASK(11, 0)
+#define MT_TXS4_F1_TSSI			GENMASK(11, 0)
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/main.c
new file mode 100644
index 0000000..6254922
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: ISC
+
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "mt7603.h"
+#include "mac.h"
+#include "eeprom.h"
+
+static int
+mt7603_start(struct ieee80211_hw *hw)
+{
+	struct mt7603_dev *dev = hw->priv;
+
+	mt7603_mac_start(dev);
+	dev->mt76.survey_time = ktime_get_boottime();
+	set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+	mt7603_mac_work(&dev->mt76.mac_work.work);
+
+	return 0;
+}
+
+static void
+mt7603_stop(struct ieee80211_hw *hw)
+{
+	struct mt7603_dev *dev = hw->priv;
+
+	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+	cancel_delayed_work_sync(&dev->mt76.mac_work);
+	mt7603_mac_stop(dev);
+}
+
+static int
+mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+	struct mt7603_dev *dev = hw->priv;
+	struct mt76_txq *mtxq;
+	u8 bc_addr[ETH_ALEN];
+	int idx;
+	int ret = 0;
+
+	mutex_lock(&dev->mt76.mutex);
+
+	mvif->idx = ffs(~dev->vif_mask) - 1;
+	if (mvif->idx >= MT7603_MAX_INTERFACES) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	mt76_wr(dev, MT_MAC_ADDR0(mvif->idx),
+		get_unaligned_le32(vif->addr));
+	mt76_wr(dev, MT_MAC_ADDR1(mvif->idx),
+		(get_unaligned_le16(vif->addr + 4) |
+		 MT_MAC_ADDR1_VALID));
+
+	if (vif->type == NL80211_IFTYPE_AP) {
+		mt76_wr(dev, MT_BSSID0(mvif->idx),
+			get_unaligned_le32(vif->addr));
+		mt76_wr(dev, MT_BSSID1(mvif->idx),
+			(get_unaligned_le16(vif->addr + 4) |
+			 MT_BSSID1_VALID));
+	}
+
+	idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
+	dev->vif_mask |= BIT(mvif->idx);
+	mvif->sta.wcid.idx = idx;
+	mvif->sta.wcid.hw_key_idx = -1;
+
+	eth_broadcast_addr(bc_addr);
+	mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr);
+
+	mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+	mtxq->wcid = &mvif->sta.wcid;
+	mt76_txq_init(&dev->mt76, vif->txq);
+	rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+
+out:
+	mutex_unlock(&dev->mt76.mutex);
+
+	return ret;
+}
+
+static void
+mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+	struct mt7603_dev *dev = hw->priv;
+	int idx = mvif->sta.wcid.idx;
+
+	mt76_wr(dev, MT_MAC_ADDR0(mvif->idx), 0);
+	mt76_wr(dev, MT_MAC_ADDR1(mvif->idx), 0);
+	mt76_wr(dev, MT_BSSID0(mvif->idx), 0);
+	mt76_wr(dev, MT_BSSID1(mvif->idx), 0);
+	mt7603_beacon_set_timer(dev, mvif->idx, 0);
+
+	rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+	mt76_txq_remove(&dev->mt76, vif->txq);
+
+	mutex_lock(&dev->mt76.mutex);
+	dev->vif_mask &= ~BIT(mvif->idx);
+	mutex_unlock(&dev->mt76.mutex);
+}
+
+void mt7603_init_edcca(struct mt7603_dev *dev)
+{
+	/* Set lower signal level to -65dBm */
+	mt76_rmw_field(dev, MT_RXTD(8), MT_RXTD_8_LOWER_SIGNAL, 0x23);
+
+	/* clear previous energy detect monitor results */
+	mt76_rr(dev, MT_MIB_STAT_ED);
+
+	if (dev->ed_monitor)
+		mt76_set(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME);
+	else
+		mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME);
+
+	dev->ed_strict_mode = 0xff;
+	dev->ed_strong_signal = 0;
+	dev->ed_time = ktime_get_boottime();
+
+	mt7603_edcca_set_strict(dev, false);
+}
+
+static int
+mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
+{
+	u8 *rssi_data = (u8 *)dev->mt76.eeprom.data;
+	int idx, ret;
+	u8 bw = MT_BW_20;
+	bool failed = false;
+
+	cancel_delayed_work_sync(&dev->mt76.mac_work);
+	tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+
+	mutex_lock(&dev->mt76.mutex);
+	set_bit(MT76_RESET, &dev->mt76.state);
+
+	mt7603_beacon_set_timer(dev, -1, 0);
+	mt76_set_channel(&dev->mt76);
+	mt7603_mac_stop(dev);
+
+	if (def->width == NL80211_CHAN_WIDTH_40)
+		bw = MT_BW_40;
+
+	dev->mt76.chandef = *def;
+	mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw);
+	ret = mt7603_mcu_set_channel(dev);
+	if (ret) {
+		failed = true;
+		goto out;
+	}
+
+	if (def->chan->band == NL80211_BAND_5GHZ) {
+		idx = 1;
+		rssi_data += MT_EE_RSSI_OFFSET_5G;
+	} else {
+		idx = 0;
+		rssi_data += MT_EE_RSSI_OFFSET_2G;
+	}
+
+	memcpy(dev->rssi_offset, rssi_data, sizeof(dev->rssi_offset));
+
+	idx |= (def->chan -
+		mt76_hw(dev)->wiphy->bands[def->chan->band]->channels) << 1;
+	mt76_wr(dev, MT_WF_RMAC_CH_FREQ, idx);
+	mt7603_mac_set_timing(dev);
+	mt7603_mac_start(dev);
+
+	clear_bit(MT76_RESET, &dev->mt76.state);
+
+	mt76_txq_schedule_all(&dev->mt76);
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+				     msecs_to_jiffies(MT7603_WATCHDOG_TIME));
+
+	/* reset channel stats */
+	mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_READ_CLR_DIS);
+	mt76_set(dev, MT_MIB_CTL,
+		 MT_MIB_CTL_CCA_NAV_TX | MT_MIB_CTL_PSCCA_TIME);
+	mt76_rr(dev, MT_MIB_STAT_PSCCA);
+	mt7603_cca_stats_reset(dev);
+
+	dev->mt76.survey_time = ktime_get_boottime();
+
+	mt7603_init_edcca(dev);
+
+out:
+	if (!(mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+		mt7603_beacon_set_timer(dev, -1, dev->mt76.beacon_int);
+	mutex_unlock(&dev->mt76.mutex);
+
+	tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+
+	if (failed)
+		mt7603_mac_work(&dev->mt76.mac_work.work);
+
+	return ret;
+}
+
+static int
+mt7603_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct mt7603_dev *dev = hw->priv;
+	int ret = 0;
+
+	if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
+		       IEEE80211_CONF_CHANGE_POWER)) {
+		ieee80211_stop_queues(hw);
+		ret = mt7603_set_channel(dev, &hw->conf.chandef);
+		ieee80211_wake_queues(hw);
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+		mutex_lock(&dev->mt76.mutex);
+
+		if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+			dev->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+		else
+			dev->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+		mt76_wr(dev, MT_WF_RFCR, dev->rxfilter);
+
+		mutex_unlock(&dev->mt76.mutex);
+	}
+
+	return ret;
+}
+
+static void
+mt7603_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+			unsigned int *total_flags, u64 multicast)
+{
+	struct mt7603_dev *dev = hw->priv;
+	u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+		flags |= *total_flags & FIF_##_flag;			\
+		dev->rxfilter &= ~(_hw);				\
+		dev->rxfilter |= !(flags & FIF_##_flag) * (_hw);	\
+	} while (0)
+
+	dev->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+			   MT_WF_RFCR_DROP_OTHER_BEACON |
+			   MT_WF_RFCR_DROP_FRAME_REPORT |
+			   MT_WF_RFCR_DROP_PROBEREQ |
+			   MT_WF_RFCR_DROP_MCAST_FILTERED |
+			   MT_WF_RFCR_DROP_MCAST |
+			   MT_WF_RFCR_DROP_BCAST |
+			   MT_WF_RFCR_DROP_DUPLICATE |
+			   MT_WF_RFCR_DROP_A2_BSSID |
+			   MT_WF_RFCR_DROP_UNWANTED_CTL |
+			   MT_WF_RFCR_DROP_STBC_MULTI);
+
+	MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
+			       MT_WF_RFCR_DROP_A3_MAC |
+			       MT_WF_RFCR_DROP_A3_BSSID);
+
+	MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
+
+	MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+			     MT_WF_RFCR_DROP_RTS |
+			     MT_WF_RFCR_DROP_CTL_RSV |
+			     MT_WF_RFCR_DROP_NDPA);
+
+	*total_flags = flags;
+	mt76_wr(dev, MT_WF_RFCR, dev->rxfilter);
+}
+
+static void
+mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			struct ieee80211_bss_conf *info, u32 changed)
+{
+	struct mt7603_dev *dev = hw->priv;
+	struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+
+	mutex_lock(&dev->mt76.mutex);
+
+	if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID)) {
+		if (info->assoc || info->ibss_joined) {
+			mt76_wr(dev, MT_BSSID0(mvif->idx),
+				get_unaligned_le32(info->bssid));
+			mt76_wr(dev, MT_BSSID1(mvif->idx),
+				(get_unaligned_le16(info->bssid + 4) |
+				 MT_BSSID1_VALID));
+		} else {
+			mt76_wr(dev, MT_BSSID0(mvif->idx), 0);
+			mt76_wr(dev, MT_BSSID1(mvif->idx), 0);
+		}
+	}
+
+	if (changed & BSS_CHANGED_ERP_SLOT) {
+		int slottime = info->use_short_slot ? 9 : 20;
+
+		if (slottime != dev->slottime) {
+			dev->slottime = slottime;
+			mt7603_mac_set_timing(dev);
+		}
+	}
+
+	if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) {
+		int beacon_int = !!info->enable_beacon * info->beacon_int;
+
+		tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+		mt7603_beacon_set_timer(dev, mvif->idx, beacon_int);
+		tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+	}
+
+	mutex_unlock(&dev->mt76.mutex);
+}
+
+int
+mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+	       struct ieee80211_sta *sta)
+{
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+	struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+	struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+	int idx;
+	int ret = 0;
+
+	idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7603_WTBL_STA - 1);
+	if (idx < 0)
+		return -ENOSPC;
+
+	__skb_queue_head_init(&msta->psq);
+	msta->ps = ~0;
+	msta->smps = ~0;
+	msta->wcid.sta = 1;
+	msta->wcid.idx = idx;
+	mt7603_wtbl_init(dev, idx, mvif->idx, sta->addr);
+	mt7603_wtbl_set_ps(dev, msta, false);
+
+	if (vif->type == NL80211_IFTYPE_AP)
+		set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
+
+	return ret;
+}
+
+void
+mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+		 struct ieee80211_sta *sta)
+{
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+
+	mt7603_wtbl_update_cap(dev, sta);
+}
+
+void
+mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+		  struct ieee80211_sta *sta)
+{
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+	struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+
+	spin_lock_bh(&dev->ps_lock);
+	__skb_queue_purge(&msta->psq);
+	mt7603_filter_tx(dev, wcid->idx, true);
+	spin_unlock_bh(&dev->ps_lock);
+
+	mt7603_wtbl_clear(dev, wcid->idx);
+}
+
+static void
+mt7603_ps_tx_list(struct mt7603_dev *dev, struct sk_buff_head *list)
+{
+	struct sk_buff *skb;
+
+	while ((skb = __skb_dequeue(list)) != NULL)
+		mt76_tx_queue_skb_raw(dev, skb_get_queue_mapping(skb),
+				      skb, 0);
+}
+
+void
+mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
+{
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+	struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+	struct sk_buff_head list;
+
+	mt76_stop_tx_queues(&dev->mt76, sta, true);
+	mt7603_wtbl_set_ps(dev, msta, ps);
+	if (ps)
+		return;
+
+	__skb_queue_head_init(&list);
+
+	spin_lock_bh(&dev->ps_lock);
+	skb_queue_splice_tail_init(&msta->psq, &list);
+	spin_unlock_bh(&dev->ps_lock);
+
+	mt7603_ps_tx_list(dev, &list);
+}
+
+static void
+mt7603_ps_set_more_data(struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr;
+
+	hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
+	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
+static void
+mt7603_release_buffered_frames(struct ieee80211_hw *hw,
+			       struct ieee80211_sta *sta,
+			       u16 tids, int nframes,
+			       enum ieee80211_frame_release_type reason,
+			       bool more_data)
+{
+	struct mt7603_dev *dev = hw->priv;
+	struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+	struct sk_buff_head list;
+	struct sk_buff *skb, *tmp;
+
+	__skb_queue_head_init(&list);
+
+	mt7603_wtbl_set_ps(dev, msta, false);
+
+	spin_lock_bh(&dev->ps_lock);
+	skb_queue_walk_safe(&msta->psq, skb, tmp) {
+		if (!nframes)
+			break;
+
+		if (!(tids & BIT(skb->priority)))
+			continue;
+
+		skb_set_queue_mapping(skb, MT_TXQ_PSD);
+		__skb_unlink(skb, &msta->psq);
+		mt7603_ps_set_more_data(skb);
+		__skb_queue_tail(&list, skb);
+		nframes--;
+	}
+	spin_unlock_bh(&dev->ps_lock);
+
+	if (!skb_queue_empty(&list))
+		ieee80211_sta_eosp(sta);
+
+	mt7603_ps_tx_list(dev, &list);
+
+	if (nframes)
+		mt76_release_buffered_frames(hw, sta, tids, nframes, reason,
+					     more_data);
+}
+
+static int
+mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+	       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+	       struct ieee80211_key_conf *key)
+{
+	struct mt7603_dev *dev = hw->priv;
+	struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+	struct mt7603_sta *msta = sta ? (struct mt7603_sta *)sta->drv_priv :
+				  &mvif->sta;
+	struct mt76_wcid *wcid = &msta->wcid;
+	int idx = key->keyidx;
+
+	/* fall back to sw encryption for unsupported ciphers */
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_TKIP:
+	case WLAN_CIPHER_SUITE_CCMP:
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * The hardware does not support per-STA RX GTK, fall back
+	 * to software mode for these.
+	 */
+	if ((vif->type == NL80211_IFTYPE_ADHOC ||
+	     vif->type == NL80211_IFTYPE_MESH_POINT) &&
+	    (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+	     key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+		return -EOPNOTSUPP;
+
+	if (cmd == SET_KEY) {
+		key->hw_key_idx = wcid->idx;
+		wcid->hw_key_idx = idx;
+	} else {
+		if (idx == wcid->hw_key_idx)
+			wcid->hw_key_idx = -1;
+
+		key = NULL;
+	}
+	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+	return mt7603_wtbl_set_key(dev, wcid->idx, key);
+}
+
+static int
+mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+	       const struct ieee80211_tx_queue_params *params)
+{
+	struct mt7603_dev *dev = hw->priv;
+	u16 cw_min = (1 << 5) - 1;
+	u16 cw_max = (1 << 10) - 1;
+	u32 val;
+
+	queue = dev->mt76.q_tx[queue].q->hw_idx;
+
+	if (params->cw_min)
+		cw_min = params->cw_min;
+	if (params->cw_max)
+		cw_max = params->cw_max;
+
+	mutex_lock(&dev->mt76.mutex);
+	mt7603_mac_stop(dev);
+
+	val = mt76_rr(dev, MT_WMM_TXOP(queue));
+	val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(queue));
+	val |= params->txop << MT_WMM_TXOP_SHIFT(queue);
+	mt76_wr(dev, MT_WMM_TXOP(queue), val);
+
+	val = mt76_rr(dev, MT_WMM_AIFSN);
+	val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(queue));
+	val |= params->aifs << MT_WMM_AIFSN_SHIFT(queue);
+	mt76_wr(dev, MT_WMM_AIFSN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMIN);
+	val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(queue));
+	val |= cw_min << MT_WMM_CWMIN_SHIFT(queue);
+	mt76_wr(dev, MT_WMM_CWMIN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMAX(queue));
+	val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(queue));
+	val |= cw_max << MT_WMM_CWMAX_SHIFT(queue);
+	mt76_wr(dev, MT_WMM_CWMAX(queue), val);
+
+	mt7603_mac_start(dev);
+	mutex_unlock(&dev->mt76.mutex);
+
+	return 0;
+}
+
+static void
+mt7603_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+	     u32 queues, bool drop)
+{
+}
+
+static int
+mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		    struct ieee80211_ampdu_params *params)
+{
+	enum ieee80211_ampdu_mlme_action action = params->action;
+	struct mt7603_dev *dev = hw->priv;
+	struct ieee80211_sta *sta = params->sta;
+	struct ieee80211_txq *txq = sta->txq[params->tid];
+	struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+	u16 tid = params->tid;
+	u16 ssn = params->ssn;
+	u8 ba_size = params->buf_size;
+	struct mt76_txq *mtxq;
+
+	if (!txq)
+		return -EINVAL;
+
+	mtxq = (struct mt76_txq *)txq->drv_priv;
+
+	mutex_lock(&dev->mt76.mutex);
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
+				   params->buf_size);
+		mt7603_mac_rx_ba_reset(dev, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		mtxq->aggr = true;
+		mtxq->send_bar = false;
+		mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		mtxq->aggr = false;
+		mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		mtxq->aggr = false;
+		mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	}
+	mutex_unlock(&dev->mt76.mutex);
+
+	return 0;
+}
+
+static void
+mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta)
+{
+	struct mt7603_dev *dev = hw->priv;
+	struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+	struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
+	int i;
+
+	if (!sta_rates)
+		return;
+
+	spin_lock_bh(&dev->mt76.lock);
+	for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
+		msta->rates[i].idx = sta_rates->rate[i].idx;
+		msta->rates[i].count = sta_rates->rate[i].count;
+		msta->rates[i].flags = sta_rates->rate[i].flags;
+
+		if (msta->rates[i].idx < 0 || !msta->rates[i].count)
+			break;
+	}
+	msta->n_rates = i;
+	mt7603_wtbl_set_rates(dev, msta, NULL, msta->rates);
+	msta->rate_probe = false;
+	mt7603_wtbl_set_smps(dev, msta,
+			     sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
+	spin_unlock_bh(&dev->mt76.lock);
+}
+
+static void
+mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+{
+	struct mt7603_dev *dev = hw->priv;
+
+	dev->coverage_class = coverage_class;
+	mt7603_mac_set_timing(dev);
+}
+
+static void mt7603_tx(struct ieee80211_hw *hw,
+		      struct ieee80211_tx_control *control,
+		      struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_vif *vif = info->control.vif;
+	struct mt7603_dev *dev = hw->priv;
+	struct mt76_wcid *wcid = &dev->global_sta.wcid;
+
+	if (control->sta) {
+		struct mt7603_sta *msta;
+
+		msta = (struct mt7603_sta *)control->sta->drv_priv;
+		wcid = &msta->wcid;
+	} else if (vif) {
+		struct mt7603_vif *mvif;
+
+		mvif = (struct mt7603_vif *)vif->drv_priv;
+		wcid = &mvif->sta.wcid;
+	}
+
+	mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+
+const struct ieee80211_ops mt7603_ops = {
+	.tx = mt7603_tx,
+	.start = mt7603_start,
+	.stop = mt7603_stop,
+	.add_interface = mt7603_add_interface,
+	.remove_interface = mt7603_remove_interface,
+	.config = mt7603_config,
+	.configure_filter = mt7603_configure_filter,
+	.bss_info_changed = mt7603_bss_info_changed,
+	.sta_state = mt76_sta_state,
+	.set_key = mt7603_set_key,
+	.conf_tx = mt7603_conf_tx,
+	.sw_scan_start = mt76_sw_scan,
+	.sw_scan_complete = mt76_sw_scan_complete,
+	.flush = mt7603_flush,
+	.ampdu_action = mt7603_ampdu_action,
+	.get_txpower = mt76_get_txpower,
+	.wake_tx_queue = mt76_wake_tx_queue,
+	.sta_rate_tbl_update = mt7603_sta_rate_tbl_update,
+	.release_buffered_frames = mt7603_release_buffered_frames,
+	.set_coverage_class = mt7603_set_coverage_class,
+	.set_tim = mt76_set_tim,
+	.get_survey = mt76_get_survey,
+};
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+static int __init mt7603_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&mt76_wmac_driver);
+	if (ret)
+		return ret;
+
+#ifdef CONFIG_PCI
+	ret = pci_register_driver(&mt7603_pci_driver);
+	if (ret)
+		platform_driver_unregister(&mt76_wmac_driver);
+#endif
+	return ret;
+}
+
+static void __exit mt7603_exit(void)
+{
+#ifdef CONFIG_PCI
+	pci_unregister_driver(&mt7603_pci_driver);
+#endif
+	platform_driver_unregister(&mt76_wmac_driver);
+}
+
+module_init(mt7603_init);
+module_exit(mt7603_exit);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
new file mode 100644
index 0000000..02b2bd6
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: ISC
+
+#include <linux/firmware.h>
+#include "mt7603.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+#define MCU_SKB_RESERVE	8
+
+struct mt7603_fw_trailer {
+	char fw_ver[10];
+	char build_date[15];
+	__le32 dl_len;
+} __packed;
+
+static int
+__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb,
+		      int cmd, int *wait_seq)
+{
+	int hdrlen = dev->mcu_running ? sizeof(struct mt7603_mcu_txd) : 12;
+	struct mt76_dev *mdev = &dev->mt76;
+	struct mt7603_mcu_txd *txd;
+	u8 seq;
+
+	seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+	if (!seq)
+		seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+
+	txd = (struct mt7603_mcu_txd *)skb_push(skb, hdrlen);
+	memset(txd, 0, hdrlen);
+
+	txd->len = cpu_to_le16(skb->len);
+	if (cmd == -MCU_CMD_FW_SCATTER)
+		txd->pq_id = cpu_to_le16(MCU_PORT_QUEUE_FW);
+	else
+		txd->pq_id = cpu_to_le16(MCU_PORT_QUEUE);
+	txd->pkt_type = MCU_PKT_ID;
+	txd->seq = seq;
+
+	if (cmd < 0) {
+		txd->cid = -cmd;
+		txd->set_query = MCU_Q_NA;
+	} else {
+		txd->cid = MCU_CMD_EXT_CID;
+		txd->ext_cid = cmd;
+		txd->set_query = MCU_Q_SET;
+		txd->ext_cid_ack = 1;
+	}
+
+	if (wait_seq)
+		*wait_seq = seq;
+
+	return mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0);
+}
+
+static int
+mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+		    int len, bool wait_resp)
+{
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+	unsigned long expires = jiffies + 3 * HZ;
+	struct mt7603_mcu_rxd *rxd;
+	struct sk_buff *skb;
+	int ret, seq;
+
+	skb = mt7603_mcu_msg_alloc(data, len);
+	if (!skb)
+		return -ENOMEM;
+
+	mutex_lock(&mdev->mmio.mcu.mutex);
+
+	ret = __mt7603_mcu_msg_send(dev, skb, cmd, &seq);
+	if (ret)
+		goto out;
+
+	while (wait_resp) {
+		bool check_seq = false;
+
+		skb = mt76_mcu_get_response(&dev->mt76, expires);
+		if (!skb) {
+			dev_err(mdev->dev,
+				"MCU message %d (seq %d) timed out\n",
+				cmd, seq);
+			dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT;
+			ret = -ETIMEDOUT;
+			break;
+		}
+
+		rxd = (struct mt7603_mcu_rxd *)skb->data;
+		if (seq == rxd->seq)
+			check_seq = true;
+
+		dev_kfree_skb(skb);
+
+		if (check_seq)
+			break;
+	}
+
+out:
+	mutex_unlock(&mdev->mmio.mcu.mutex);
+
+	return ret;
+}
+
+static int
+mt7603_mcu_init_download(struct mt7603_dev *dev, u32 addr, u32 len)
+{
+	struct {
+		__le32 addr;
+		__le32 len;
+		__le32 mode;
+	} req = {
+		.addr = cpu_to_le32(addr),
+		.len = cpu_to_le32(len),
+		.mode = cpu_to_le32(BIT(31)),
+	};
+
+	return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+				   &req, sizeof(req), true);
+}
+
+static int
+mt7603_mcu_send_firmware(struct mt7603_dev *dev, const void *data, int len)
+{
+	int cur_len, ret = 0;
+
+	while (len > 0) {
+		cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd),
+				len);
+
+		ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER,
+					  data, cur_len, false);
+		if (ret)
+			break;
+
+		data += cur_len;
+		len -= cur_len;
+	}
+
+	return ret;
+}
+
+static int
+mt7603_mcu_start_firmware(struct mt7603_dev *dev, u32 addr)
+{
+	struct {
+		__le32 override;
+		__le32 addr;
+	} req = {
+		.override = cpu_to_le32(addr ? 1 : 0),
+		.addr = cpu_to_le32(addr),
+	};
+
+	return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ,
+				   &req, sizeof(req), true);
+}
+
+static int
+mt7603_mcu_restart(struct mt76_dev *dev)
+{
+	return __mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ,
+				   NULL, 0, true);
+}
+
+static int mt7603_load_firmware(struct mt7603_dev *dev)
+{
+	const struct firmware *fw;
+	const struct mt7603_fw_trailer *hdr;
+	const char *firmware;
+	int dl_len;
+	u32 addr, val;
+	int ret;
+
+	if (is_mt7628(dev)) {
+		if (mt76xx_rev(dev) == MT7628_REV_E1)
+			firmware = MT7628_FIRMWARE_E1;
+		else
+			firmware = MT7628_FIRMWARE_E2;
+	} else {
+		if (mt76xx_rev(dev) < MT7603_REV_E2)
+			firmware = MT7603_FIRMWARE_E1;
+		else
+			firmware = MT7603_FIRMWARE_E2;
+	}
+
+	ret = request_firmware(&fw, firmware, dev->mt76.dev);
+	if (ret)
+		return ret;
+
+	if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+		dev_err(dev->mt76.dev, "Invalid firmware\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	hdr = (const struct mt7603_fw_trailer *)(fw->data + fw->size -
+						 sizeof(*hdr));
+
+	dev_info(dev->mt76.dev, "Firmware Version: %.10s\n", hdr->fw_ver);
+	dev_info(dev->mt76.dev, "Build Time: %.15s\n", hdr->build_date);
+
+	addr = mt7603_reg_map(dev, 0x50012498);
+	mt76_wr(dev, addr, 0x5);
+	mt76_wr(dev, addr, 0x5);
+	udelay(1);
+
+	/* switch to bypass mode */
+	mt76_rmw(dev, MT_SCH_4, MT_SCH_4_FORCE_QID,
+		 MT_SCH_4_BYPASS | FIELD_PREP(MT_SCH_4_FORCE_QID, 5));
+
+	val = mt76_rr(dev, MT_TOP_MISC2);
+	if (val & BIT(1)) {
+		dev_info(dev->mt76.dev, "Firmware already running...\n");
+		goto running;
+	}
+
+	if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(0) | BIT(1), BIT(0), 500)) {
+		dev_err(dev->mt76.dev, "Timeout waiting for ROM code to become ready\n");
+		ret = -EIO;
+		goto out;
+	}
+
+	dl_len = le32_to_cpu(hdr->dl_len) + 4;
+	ret = mt7603_mcu_init_download(dev, MCU_FIRMWARE_ADDRESS, dl_len);
+	if (ret) {
+		dev_err(dev->mt76.dev, "Download request failed\n");
+		goto out;
+	}
+
+	ret = mt7603_mcu_send_firmware(dev, fw->data, dl_len);
+	if (ret) {
+		dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
+		goto out;
+	}
+
+	ret = mt7603_mcu_start_firmware(dev, MCU_FIRMWARE_ADDRESS);
+	if (ret) {
+		dev_err(dev->mt76.dev, "Failed to start firmware\n");
+		goto out;
+	}
+
+	if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(1), BIT(1), 500)) {
+		dev_err(dev->mt76.dev, "Timeout waiting for firmware to initialize\n");
+		ret = -EIO;
+		goto out;
+	}
+
+running:
+	mt76_clear(dev, MT_SCH_4, MT_SCH_4_FORCE_QID | MT_SCH_4_BYPASS);
+
+	mt76_set(dev, MT_SCH_4, BIT(8));
+	mt76_clear(dev, MT_SCH_4, BIT(8));
+
+	dev->mcu_running = true;
+	snprintf(dev->mt76.hw->wiphy->fw_version,
+		 sizeof(dev->mt76.hw->wiphy->fw_version),
+		 "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+	dev_info(dev->mt76.dev, "firmware init done\n");
+
+out:
+	release_firmware(fw);
+
+	return ret;
+}
+
+int mt7603_mcu_init(struct mt7603_dev *dev)
+{
+	static const struct mt76_mcu_ops mt7603_mcu_ops = {
+		.mcu_send_msg = mt7603_mcu_msg_send,
+		.mcu_restart = mt7603_mcu_restart,
+	};
+
+	dev->mt76.mcu_ops = &mt7603_mcu_ops;
+	return mt7603_load_firmware(dev);
+}
+
+void mt7603_mcu_exit(struct mt7603_dev *dev)
+{
+	__mt76_mcu_restart(&dev->mt76);
+	skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
+}
+
+int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
+{
+	static const u16 req_fields[] = {
+#define WORD(_start)			\
+		_start,			\
+		_start + 1
+#define GROUP_2G(_start)		\
+		WORD(_start),		\
+		WORD(_start + 2),	\
+		WORD(_start + 4)
+
+		MT_EE_NIC_CONF_0 + 1,
+		WORD(MT_EE_NIC_CONF_1),
+		MT_EE_WIFI_RF_SETTING,
+		MT_EE_TX_POWER_DELTA_BW40,
+		MT_EE_TX_POWER_DELTA_BW80 + 1,
+		MT_EE_TX_POWER_EXT_PA_5G,
+		MT_EE_TEMP_SENSOR_CAL,
+		GROUP_2G(MT_EE_TX_POWER_0_START_2G),
+		GROUP_2G(MT_EE_TX_POWER_1_START_2G),
+		WORD(MT_EE_TX_POWER_CCK),
+		WORD(MT_EE_TX_POWER_OFDM_2G_6M),
+		WORD(MT_EE_TX_POWER_OFDM_2G_24M),
+		WORD(MT_EE_TX_POWER_OFDM_2G_54M),
+		WORD(MT_EE_TX_POWER_HT_BPSK_QPSK),
+		WORD(MT_EE_TX_POWER_HT_16_64_QAM),
+		WORD(MT_EE_TX_POWER_HT_64_QAM),
+		MT_EE_ELAN_RX_MODE_GAIN,
+		MT_EE_ELAN_RX_MODE_NF,
+		MT_EE_ELAN_RX_MODE_P1DB,
+		MT_EE_ELAN_BYPASS_MODE_GAIN,
+		MT_EE_ELAN_BYPASS_MODE_NF,
+		MT_EE_ELAN_BYPASS_MODE_P1DB,
+		WORD(MT_EE_STEP_NUM_NEG_6_7),
+		WORD(MT_EE_STEP_NUM_NEG_4_5),
+		WORD(MT_EE_STEP_NUM_NEG_2_3),
+		WORD(MT_EE_STEP_NUM_NEG_0_1),
+		WORD(MT_EE_REF_STEP_24G),
+		WORD(MT_EE_STEP_NUM_PLUS_1_2),
+		WORD(MT_EE_STEP_NUM_PLUS_3_4),
+		WORD(MT_EE_STEP_NUM_PLUS_5_6),
+		MT_EE_STEP_NUM_PLUS_7,
+		MT_EE_XTAL_FREQ_OFFSET,
+		MT_EE_XTAL_TRIM_2_COMP,
+		MT_EE_XTAL_TRIM_3_COMP,
+		MT_EE_XTAL_WF_RFCAL,
+
+		/* unknown fields below */
+		WORD(0x24),
+		0x34,
+		0x39,
+		0x3b,
+		WORD(0x42),
+		WORD(0x9e),
+		0xf2,
+		WORD(0xf8),
+		0xfa,
+		0x12e,
+		WORD(0x130), WORD(0x132), WORD(0x134), WORD(0x136),
+		WORD(0x138), WORD(0x13a), WORD(0x13c), WORD(0x13e),
+
+#undef GROUP_2G
+#undef WORD
+
+	};
+	struct req_data {
+		__le16 addr;
+		u8 val;
+		u8 pad;
+	} __packed;
+	struct {
+		u8 buffer_mode;
+		u8 len;
+		u8 pad[2];
+	} req_hdr = {
+		.buffer_mode = 1,
+		.len = ARRAY_SIZE(req_fields) - 1,
+	};
+	const int size = 0xff * sizeof(struct req_data);
+	u8 *req, *eep = (u8 *)dev->mt76.eeprom.data;
+	int i, ret, len = sizeof(req_hdr) + size;
+	struct req_data *data;
+
+	BUILD_BUG_ON(ARRAY_SIZE(req_fields) * sizeof(*data) > size);
+
+	req = kmalloc(len, GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	memcpy(req, &req_hdr, sizeof(req_hdr));
+	data = (struct req_data *)(req + sizeof(req_hdr));
+	memset(data, 0, size);
+	for (i = 0; i < ARRAY_SIZE(req_fields); i++) {
+		data[i].addr = cpu_to_le16(req_fields[i]);
+		data[i].val = eep[req_fields[i]];
+	}
+
+	ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+				  req, len, true);
+	kfree(req);
+
+	return ret;
+}
+
+static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
+{
+	struct {
+		u8 center_channel;
+		u8 tssi;
+		u8 temp_comp;
+		u8 target_power[2];
+		u8 rate_power_delta[14];
+		u8 bw_power_delta;
+		u8 ch_power_delta[6];
+		u8 temp_comp_power[17];
+		u8 reserved;
+	} req = {
+		.center_channel = dev->mt76.chandef.chan->hw_value,
+#define EEP_VAL(n) ((u8 *)dev->mt76.eeprom.data)[n]
+		.tssi = EEP_VAL(MT_EE_NIC_CONF_1 + 1),
+		.temp_comp = EEP_VAL(MT_EE_NIC_CONF_1),
+		.target_power = {
+			EEP_VAL(MT_EE_TX_POWER_0_START_2G + 2),
+			EEP_VAL(MT_EE_TX_POWER_1_START_2G + 2)
+		},
+		.bw_power_delta = EEP_VAL(MT_EE_TX_POWER_DELTA_BW40),
+		.ch_power_delta = {
+			EEP_VAL(MT_EE_TX_POWER_0_START_2G + 3),
+			EEP_VAL(MT_EE_TX_POWER_0_START_2G + 4),
+			EEP_VAL(MT_EE_TX_POWER_0_START_2G + 5),
+			EEP_VAL(MT_EE_TX_POWER_1_START_2G + 3),
+			EEP_VAL(MT_EE_TX_POWER_1_START_2G + 4),
+			EEP_VAL(MT_EE_TX_POWER_1_START_2G + 5)
+		},
+#undef EEP_VAL
+	};
+	u8 *eep = (u8 *)dev->mt76.eeprom.data;
+
+	memcpy(req.rate_power_delta, eep + MT_EE_TX_POWER_CCK,
+	       sizeof(req.rate_power_delta));
+
+	memcpy(req.temp_comp_power, eep + MT_EE_STEP_NUM_NEG_6_7,
+	       sizeof(req.temp_comp_power));
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL,
+				   &req, sizeof(req), true);
+}
+
+int mt7603_mcu_set_channel(struct mt7603_dev *dev)
+{
+	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+	struct ieee80211_hw *hw = mt76_hw(dev);
+	int n_chains = hweight8(dev->mt76.antenna_mask);
+	struct {
+		u8 control_chan;
+		u8 center_chan;
+		u8 bw;
+		u8 tx_streams;
+		u8 rx_streams;
+		u8 _res0[7];
+		u8 txpower[21];
+		u8 _res1[3];
+	} req = {
+		.control_chan = chandef->chan->hw_value,
+		.center_chan = chandef->chan->hw_value,
+		.bw = MT_BW_20,
+		.tx_streams = n_chains,
+		.rx_streams = n_chains,
+	};
+	s8 tx_power;
+	int i, ret;
+
+	if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_40) {
+		req.bw = MT_BW_40;
+		if (chandef->center_freq1 > chandef->chan->center_freq)
+			req.center_chan += 2;
+		else
+			req.center_chan -= 2;
+	}
+
+	tx_power = hw->conf.power_level * 2;
+	if (dev->mt76.antenna_mask == 3)
+		tx_power -= 6;
+	tx_power = min(tx_power, dev->tx_power_limit);
+
+	dev->mt76.txpower_cur = tx_power;
+
+	for (i = 0; i < ARRAY_SIZE(req.txpower); i++)
+		req.txpower[i] = tx_power;
+
+	ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH,
+				  &req, sizeof(req), true);
+	if (ret)
+		return ret;
+
+	return mt7603_mcu_set_tx_power(dev);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h
new file mode 100644
index 0000000..1bba369
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_MCU_H
+#define __MT7603_MCU_H
+
+struct mt7603_mcu_txd {
+	__le16 len;
+	__le16 pq_id;
+
+	u8 cid;
+	u8 pkt_type;
+	u8 set_query;
+	u8 seq;
+
+	u8 uc_d2b0_rev;
+	u8 ext_cid;
+	u8 uc_d2b2_rev;
+	u8 ext_cid_ack;
+
+	u32 au4_d3_to_d7_rev[5];
+} __packed __aligned(4);
+
+struct mt7603_mcu_rxd {
+	__le16 len;
+	__le16 pkt_type_id;
+
+	u8 eid;
+	u8 seq;
+	__le16 __rsv;
+
+	u8 ext_eid;
+	u8 __rsv1[3];
+};
+
+#define MCU_PKT_ID		0xa0
+#define MCU_PORT_QUEUE		0x8000
+#define MCU_PORT_QUEUE_FW	0xc000
+
+#define MCU_FIRMWARE_ADDRESS	0x100000
+
+enum {
+	MCU_Q_QUERY,
+	MCU_Q_SET,
+	MCU_Q_RESERVED,
+	MCU_Q_NA
+};
+
+enum {
+	MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
+	MCU_CMD_FW_START_REQ = 0x02,
+	MCU_CMD_INIT_ACCESS_REG = 0x3,
+	MCU_CMD_PATCH_START_REQ = 0x05,
+	MCU_CMD_PATCH_FINISH_REQ = 0x07,
+	MCU_CMD_PATCH_SEM_CONTROL = 0x10,
+	MCU_CMD_HIF_LOOPBACK = 0x20,
+	MCU_CMD_CH_PRIVILEGE = 0x20,
+	MCU_CMD_ACCESS_REG = 0xC2,
+	MCU_CMD_EXT_CID = 0xED,
+	MCU_CMD_FW_SCATTER = 0xEE,
+	MCU_CMD_RESTART_DL_REQ = 0xEF,
+};
+
+enum {
+	MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
+	MCU_EXT_CMD_RF_TEST = 0x04,
+	MCU_EXT_CMD_RADIO_ON_OFF_CTRL = 0x05,
+	MCU_EXT_CMD_WIFI_RX_DISABLE = 0x06,
+	MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
+	MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
+	MCU_EXT_CMD_NIC_CAPABILITY = 0x09,
+	MCU_EXT_CMD_PWR_SAVING = 0x0A,
+	MCU_EXT_CMD_MULTIPLE_REG_ACCESS = 0x0E,
+	MCU_EXT_CMD_AP_PWR_SAVING_CAPABILITY = 0xF,
+	MCU_EXT_CMD_SEC_ADDREMOVE_KEY = 0x10,
+	MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
+	MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
+	MCU_EXT_CMD_PS_RETRIEVE_START = 0x14,
+	MCU_EXT_CMD_LED_CTRL = 0x17,
+	MCU_EXT_CMD_PACKET_FILTER = 0x18,
+	MCU_EXT_CMD_PWR_MGT_BIT_WIFI = 0x1B,
+	MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+	MCU_EXT_CMD_THERMAL_PROTECT = 0x23,
+	MCU_EXT_CMD_EDCA_SET = 0x27,
+	MCU_EXT_CMD_SLOT_TIME_SET = 0x28,
+	MCU_EXT_CMD_CONFIG_INTERNAL_SETTING = 0x29,
+	MCU_EXT_CMD_NOA_OFFLOAD_CTRL = 0x2B,
+	MCU_EXT_CMD_GET_THEMAL_SENSOR = 0x2C,
+	MCU_EXT_CMD_WAKEUP_OPTION = 0x2E,
+	MCU_EXT_CMD_AC_QUEUE_CONTROL = 0x31,
+	MCU_EXT_CMD_BCN_UPDATE = 0x33
+};
+
+enum {
+	MCU_EXT_EVENT_CMD_RESULT = 0x0,
+	MCU_EXT_EVENT_RF_REG_ACCESS = 0x2,
+	MCU_EXT_EVENT_MULTI_CR_ACCESS = 0x0E,
+	MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
+	MCU_EXT_EVENT_BEACON_LOSS = 0x1A,
+	MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
+	MCU_EXT_EVENT_BCN_UPDATE = 0x31,
+};
+
+static inline struct sk_buff *
+mt7603_mcu_msg_alloc(const void *data, int len)
+{
+	return mt76_mcu_msg_alloc(data, sizeof(struct mt7603_mcu_txd),
+				  len, 0);
+}
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
new file mode 100644
index 0000000..257300f
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_H
+#define __MT7603_H
+
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT7603_MAX_INTERFACES	4
+#define MT7603_WTBL_SIZE	128
+#define MT7603_WTBL_RESERVED	(MT7603_WTBL_SIZE - 1)
+#define MT7603_WTBL_STA		(MT7603_WTBL_RESERVED - MT7603_MAX_INTERFACES)
+
+#define MT7603_RATE_RETRY	2
+
+#define MT7603_RX_RING_SIZE     128
+
+#define MT7603_FIRMWARE_E1	"mt7603_e1.bin"
+#define MT7603_FIRMWARE_E2	"mt7603_e2.bin"
+#define MT7628_FIRMWARE_E1	"mt7628_e1.bin"
+#define MT7628_FIRMWARE_E2	"mt7628_e2.bin"
+
+#define MT7603_EEPROM_SIZE	1024
+
+#define MT_AGG_SIZE_LIMIT(_n)	(((_n) + 1) * 4)
+
+#define MT7603_PRE_TBTT_TIME	5000 /* ms */
+
+#define MT7603_WATCHDOG_TIME	100 /* ms */
+#define MT7603_WATCHDOG_TIMEOUT	10 /* number of checks */
+
+#define MT7603_EDCCA_BLOCK_TH	10
+
+#define MT7603_CFEND_RATE_DEFAULT	0x69 /* chip default (24M) */
+#define MT7603_CFEND_RATE_11B		0x03 /* 11B LP, 11M */
+
+struct mt7603_vif;
+struct mt7603_sta;
+
+enum {
+	MT7603_REV_E1 = 0x00,
+	MT7603_REV_E2 = 0x10,
+	MT7628_REV_E1 = 0x8a00,
+};
+
+enum mt7603_bw {
+	MT_BW_20,
+	MT_BW_40,
+	MT_BW_80,
+};
+
+struct mt7603_rate_set {
+	struct ieee80211_tx_rate probe_rate;
+	struct ieee80211_tx_rate rates[4];
+};
+
+struct mt7603_sta {
+	struct mt76_wcid wcid; /* must be first */
+
+	struct mt7603_vif *vif;
+
+	struct sk_buff_head psq;
+
+	struct ieee80211_tx_rate rates[4];
+
+	struct mt7603_rate_set rateset[2];
+	u32 rate_set_tsf;
+
+	u8 rate_count;
+	u8 n_rates;
+
+	u8 rate_probe;
+	u8 smps;
+
+	u8 ps;
+};
+
+struct mt7603_vif {
+	struct mt7603_sta sta; /* must be first */
+
+	u8 idx;
+};
+
+enum mt7603_reset_cause {
+	RESET_CAUSE_TX_HANG,
+	RESET_CAUSE_TX_BUSY,
+	RESET_CAUSE_RX_BUSY,
+	RESET_CAUSE_BEACON_STUCK,
+	RESET_CAUSE_RX_PSE_BUSY,
+	RESET_CAUSE_MCU_HANG,
+	RESET_CAUSE_RESET_FAILED,
+	__RESET_CAUSE_MAX
+};
+
+struct mt7603_dev {
+	struct mt76_dev mt76; /* must be first */
+
+	const struct mt76_bus_ops *bus_ops;
+
+	u32 rxfilter;
+
+	u8 vif_mask;
+
+	struct mt7603_sta global_sta;
+
+	u32 agc0, agc3;
+	u32 false_cca_ofdm, false_cca_cck;
+	unsigned long last_cca_adj;
+
+	u8 rssi_offset[3];
+
+	u8 slottime;
+	s16 coverage_class;
+
+	s8 tx_power_limit;
+
+	ktime_t ed_time;
+
+	struct mt76_queue q_rx;
+
+	spinlock_t ps_lock;
+
+	u8 mac_work_count;
+
+	u8 mcu_running;
+
+	u8 ed_monitor_enabled;
+	u8 ed_monitor;
+	s8 ed_trigger;
+	u8 ed_strict_mode;
+	u8 ed_strong_signal;
+
+	s8 sensitivity;
+
+	u8 beacon_check;
+	u8 tx_hang_check;
+	u8 tx_dma_check;
+	u8 rx_dma_check;
+	u8 rx_pse_check;
+	u8 mcu_hang;
+
+	enum mt7603_reset_cause cur_reset_cause;
+
+	u16 tx_dma_idx[4];
+	u16 rx_dma_idx;
+
+	u32 reset_test;
+
+	unsigned int reset_cause[__RESET_CAUSE_MAX];
+};
+
+extern const struct mt76_driver_ops mt7603_drv_ops;
+extern const struct ieee80211_ops mt7603_ops;
+extern struct pci_driver mt7603_pci_driver;
+extern struct platform_driver mt76_wmac_driver;
+
+static inline bool is_mt7603(struct mt7603_dev *dev)
+{
+	return mt76xx_chip(dev) == 0x7603;
+}
+
+static inline bool is_mt7628(struct mt7603_dev *dev)
+{
+	return mt76xx_chip(dev) == 0x7628;
+}
+
+/* need offset to prevent conflict with ampdu_ack_len */
+#define MT_RATE_DRIVER_DATA_OFFSET	4
+
+u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr);
+
+irqreturn_t mt7603_irq_handler(int irq, void *dev_instance);
+
+int mt7603_register_device(struct mt7603_dev *dev);
+void mt7603_unregister_device(struct mt7603_dev *dev);
+int mt7603_eeprom_init(struct mt7603_dev *dev);
+int mt7603_dma_init(struct mt7603_dev *dev);
+void mt7603_dma_cleanup(struct mt7603_dev *dev);
+int mt7603_mcu_init(struct mt7603_dev *dev);
+void mt7603_init_debugfs(struct mt7603_dev *dev);
+
+static inline void mt7603_irq_enable(struct mt7603_dev *dev, u32 mask)
+{
+	mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+}
+
+static inline void mt7603_irq_disable(struct mt7603_dev *dev, u32 mask)
+{
+	mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+}
+
+void mt7603_mac_dma_start(struct mt7603_dev *dev);
+void mt7603_mac_start(struct mt7603_dev *dev);
+void mt7603_mac_stop(struct mt7603_dev *dev);
+void mt7603_mac_work(struct work_struct *work);
+void mt7603_mac_set_timing(struct mt7603_dev *dev);
+void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval);
+int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
+void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
+void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
+			    int ba_size);
+
+void mt7603_pse_client_reset(struct mt7603_dev *dev);
+
+int mt7603_mcu_set_channel(struct mt7603_dev *dev);
+int mt7603_mcu_set_eeprom(struct mt7603_dev *dev);
+void mt7603_mcu_exit(struct mt7603_dev *dev);
+
+void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif,
+		      const u8 *mac_addr);
+void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx);
+void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta);
+void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
+			   struct ieee80211_tx_rate *probe_rate,
+			   struct ieee80211_tx_rate *rates);
+int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
+			struct ieee80211_key_conf *key);
+void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta,
+			bool enabled);
+void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
+			  bool enabled);
+void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort);
+
+int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
+			  struct ieee80211_sta *sta,
+			  struct mt76_tx_info *tx_info);
+
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+			    struct mt76_queue_entry *e);
+
+void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+			 struct sk_buff *skb);
+void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta);
+void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+		      struct ieee80211_sta *sta);
+void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta);
+
+void mt7603_pre_tbtt_tasklet(unsigned long arg);
+
+void mt7603_update_channel(struct mt76_dev *mdev);
+
+void mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val);
+void mt7603_cca_stats_reset(struct mt7603_dev *dev);
+
+void mt7603_init_edcca(struct mt7603_dev *dev);
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
new file mode 100644
index 0000000..2f2f337
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: ISC
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt7603.h"
+
+static const struct pci_device_id mt76pci_device_table[] = {
+	{ PCI_DEVICE(0x14c3, 0x7603) },
+	{ },
+};
+
+static int
+mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct mt7603_dev *dev;
+	struct mt76_dev *mdev;
+	int ret;
+
+	ret = pcim_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+	if (ret)
+		return ret;
+
+	pci_set_master(pdev);
+
+	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
+				 &mt7603_drv_ops);
+	if (!mdev)
+		return -ENOMEM;
+
+	dev = container_of(mdev, struct mt7603_dev, mt76);
+	mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
+
+	mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+		    (mt76_rr(dev, MT_HW_REV) & 0xff);
+	dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+	ret = devm_request_irq(mdev->dev, pdev->irq, mt7603_irq_handler,
+			       IRQF_SHARED, KBUILD_MODNAME, dev);
+	if (ret)
+		goto error;
+
+	ret = mt7603_register_device(dev);
+	if (ret)
+		goto error;
+
+	return 0;
+error:
+	ieee80211_free_hw(mt76_hw(dev));
+	return ret;
+}
+
+static void
+mt76pci_remove(struct pci_dev *pdev)
+{
+	struct mt76_dev *mdev = pci_get_drvdata(pdev);
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+
+	mt7603_unregister_device(dev);
+}
+
+MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
+MODULE_FIRMWARE(MT7603_FIRMWARE_E1);
+MODULE_FIRMWARE(MT7603_FIRMWARE_E2);
+
+struct pci_driver mt7603_pci_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= mt76pci_device_table,
+	.probe		= mt76pci_probe,
+	.remove		= mt76pci_remove,
+};
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
new file mode 100644
index 0000000..eb9eefe
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
@@ -0,0 +1,784 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_REGS_H
+#define __MT7603_REGS_H
+
+#define MT_HW_REV			0x1000
+#define MT_HW_CHIPID			0x1008
+#define MT_TOP_MISC2			0x1134
+
+#define MT_MCU_BASE			0x2000
+#define MT_MCU(ofs)			(MT_MCU_BASE + (ofs))
+
+#define MT_MCU_PCIE_REMAP_1		MT_MCU(0x500)
+#define MT_MCU_PCIE_REMAP_1_OFFSET	GENMASK(17, 0)
+#define MT_MCU_PCIE_REMAP_1_BASE	GENMASK(31, 18)
+
+#define MT_MCU_PCIE_REMAP_2		MT_MCU(0x504)
+#define MT_MCU_PCIE_REMAP_2_OFFSET	GENMASK(18, 0)
+#define MT_MCU_PCIE_REMAP_2_BASE	GENMASK(31, 19)
+
+#define MT_HIF_BASE			0x4000
+#define MT_HIF(ofs)			(MT_HIF_BASE + (ofs))
+
+#define MT_INT_SOURCE_CSR		MT_HIF(0x200)
+#define MT_INT_MASK_CSR			MT_HIF(0x204)
+#define MT_DELAY_INT_CFG		MT_HIF(0x210)
+
+#define MT_INT_RX_DONE(_n)		BIT(_n)
+#define MT_INT_RX_DONE_ALL		GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL		GENMASK(19, 4)
+#define MT_INT_TX_DONE(_n)		BIT((_n) + 4)
+
+#define MT_INT_RX_COHERENT		BIT(20)
+#define MT_INT_TX_COHERENT		BIT(21)
+#define MT_INT_MAC_IRQ3			BIT(27)
+
+#define MT_INT_MCU_CMD			BIT(30)
+
+#define MT_WPDMA_GLO_CFG		MT_HIF(0x208)
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN	BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY	BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN	BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY	BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE	GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE	BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN	BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN	GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_SW_RESET	BIT(24)
+#define MT_WPDMA_GLO_CFG_FORCE_TX_EOF	BIT(25)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS	BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET	BIT(31)
+
+#define MT_WPDMA_RST_IDX		MT_HIF(0x20c)
+
+#define MT_WPDMA_DEBUG			MT_HIF(0x244)
+#define MT_WPDMA_DEBUG_VALUE		GENMASK(17, 0)
+#define MT_WPDMA_DEBUG_SEL		BIT(27)
+#define MT_WPDMA_DEBUG_IDX		GENMASK(31, 28)
+
+#define MT_TX_RING_BASE			MT_HIF(0x300)
+#define MT_RX_RING_BASE			MT_HIF(0x400)
+
+#define MT_TXTIME_THRESH_BASE		MT_HIF(0x500)
+#define MT_TXTIME_THRESH(n)		(MT_TXTIME_THRESH_BASE + ((n) * 4))
+
+#define MT_PAGE_COUNT_BASE		MT_HIF(0x540)
+#define MT_PAGE_COUNT(n)		(MT_PAGE_COUNT_BASE + ((n) * 4))
+
+#define MT_SCH_1			MT_HIF(0x588)
+#define MT_SCH_2			MT_HIF(0x58c)
+#define MT_SCH_3			MT_HIF(0x590)
+
+#define MT_SCH_4			MT_HIF(0x594)
+#define MT_SCH_4_FORCE_QID		GENMASK(4, 0)
+#define MT_SCH_4_BYPASS			BIT(5)
+#define MT_SCH_4_RESET			BIT(8)
+
+#define MT_GROUP_THRESH_BASE		MT_HIF(0x598)
+#define MT_GROUP_THRESH(n)		(MT_GROUP_THRESH_BASE + ((n) * 4))
+
+#define MT_QUEUE_PRIORITY_1		MT_HIF(0x580)
+#define MT_QUEUE_PRIORITY_2		MT_HIF(0x584)
+
+#define MT_BMAP_0			MT_HIF(0x5b0)
+#define MT_BMAP_1			MT_HIF(0x5b4)
+#define MT_BMAP_2			MT_HIF(0x5b8)
+
+#define MT_HIGH_PRIORITY_1		MT_HIF(0x5bc)
+#define MT_HIGH_PRIORITY_2		MT_HIF(0x5c0)
+
+#define MT_PRIORITY_MASK		MT_HIF(0x5c4)
+
+#define MT_RSV_MAX_THRESH		MT_HIF(0x5c8)
+
+#define MT_PSE_BASE			0x8000
+#define MT_PSE(ofs)			(MT_PSE_BASE + (ofs))
+
+#define MT_MCU_DEBUG_RESET		MT_PSE(0x16c)
+#define MT_MCU_DEBUG_RESET_PSE		BIT(0)
+#define MT_MCU_DEBUG_RESET_PSE_S	BIT(1)
+#define MT_MCU_DEBUG_RESET_QUEUES	GENMASK(6, 2)
+
+#define MT_PSE_FC_P0			MT_PSE(0x120)
+#define MT_PSE_FC_P0_MIN_RESERVE	GENMASK(11, 0)
+#define MT_PSE_FC_P0_MAX_QUOTA		GENMASK(27, 16)
+
+#define MT_PSE_FRP			MT_PSE(0x138)
+#define MT_PSE_FRP_P0			GENMASK(2, 0)
+#define MT_PSE_FRP_P1			GENMASK(5, 3)
+#define MT_PSE_FRP_P2_RQ0		GENMASK(8, 6)
+#define MT_PSE_FRP_P2_RQ1		GENMASK(11, 9)
+#define MT_PSE_FRP_P2_RQ2		GENMASK(14, 12)
+
+#define MT_FC_RSV_COUNT_0		MT_PSE(0x13c)
+#define MT_FC_RSV_COUNT_0_P0		GENMASK(11, 0)
+#define MT_FC_RSV_COUNT_0_P1		GENMASK(27, 16)
+
+#define MT_FC_SP2_Q0Q1			MT_PSE(0x14c)
+#define MT_FC_SP2_Q0Q1_SRC_COUNT_Q0	GENMASK(11, 0)
+#define MT_FC_SP2_Q0Q1_SRC_COUNT_Q1	GENMASK(27, 16)
+
+#define MT_PSE_FW_SHARED		MT_PSE(0x17c)
+
+#define MT_PSE_RTA			MT_PSE(0x194)
+#define MT_PSE_RTA_QUEUE_ID		GENMASK(4, 0)
+#define MT_PSE_RTA_PORT_ID		GENMASK(6, 5)
+#define MT_PSE_RTA_REDIRECT_EN		BIT(7)
+#define MT_PSE_RTA_TAG_ID		GENMASK(15, 8)
+#define MT_PSE_RTA_WRITE		BIT(16)
+#define MT_PSE_RTA_BUSY			BIT(31)
+
+#define MT_WF_PHY_BASE			0x10000
+#define MT_WF_PHY_OFFSET		0x1000
+#define MT_WF_PHY(ofs)			(MT_WF_PHY_BASE + (ofs))
+
+#define MT_AGC_BASE			MT_WF_PHY(0x500)
+#define MT_AGC(n)			(MT_AGC_BASE + ((n) * 4))
+
+#define MT_AGC1_BASE			MT_WF_PHY(0x1500)
+#define MT_AGC1(n)			(MT_AGC1_BASE + ((n) * 4))
+
+#define MT_AGC_41_RSSI_0		GENMASK(23, 16)
+#define MT_AGC_41_RSSI_1		GENMASK(7, 0)
+
+#define MT_RXTD_BASE			MT_WF_PHY(0x600)
+#define MT_RXTD(n)			(MT_RXTD_BASE + ((n) * 4))
+
+#define MT_RXTD_6_ACI_TH		GENMASK(4, 0)
+#define MT_RXTD_6_CCAED_TH		GENMASK(14, 8)
+
+#define MT_RXTD_8_LOWER_SIGNAL		GENMASK(5, 0)
+
+#define MT_RXTD_13_ACI_TH_EN		BIT(0)
+
+#define MT_WF_PHY_CR_TSSI_BASE		MT_WF_PHY(0xd00)
+#define MT_WF_PHY_CR_TSSI(phy, n)	(MT_WF_PHY_CR_TSSI_BASE +	\
+					 ((phy) * MT_WF_PHY_OFFSET) +	\
+					 ((n) * 4))
+
+#define MT_PHYCTRL_BASE			MT_WF_PHY(0x4100)
+#define MT_PHYCTRL(n)			(MT_PHYCTRL_BASE + ((n) * 4))
+
+#define MT_PHYCTRL_2_STATUS_RESET	BIT(6)
+#define MT_PHYCTRL_2_STATUS_EN		BIT(7)
+
+#define MT_PHYCTRL_STAT_PD		MT_PHYCTRL(3)
+#define MT_PHYCTRL_STAT_PD_OFDM		GENMASK(31, 16)
+#define MT_PHYCTRL_STAT_PD_CCK		GENMASK(15, 0)
+
+#define MT_PHYCTRL_STAT_MDRDY		MT_PHYCTRL(8)
+#define MT_PHYCTRL_STAT_MDRDY_OFDM	GENMASK(31, 16)
+#define MT_PHYCTRL_STAT_MDRDY_CCK	GENMASK(15, 0)
+
+#define MT_WF_AGG_BASE			0x21200
+#define MT_WF_AGG(ofs)			(MT_WF_AGG_BASE + (ofs))
+
+#define MT_AGG_ARCR			MT_WF_AGG(0x010)
+#define MT_AGG_ARCR_INIT_RATE1		BIT(0)
+#define MT_AGG_ARCR_FB_SGI_DISABLE	BIT(1)
+#define MT_AGG_ARCR_RATE8_DOWN_WRAP	BIT(2)
+#define MT_AGG_ARCR_RTS_RATE_THR	GENMASK(12, 8)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO	GENMASK(17, 16)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN	BIT(19)
+#define MT_AGG_ARCR_RATE_UP_EXTRA_TH	GENMASK(22, 20)
+#define MT_AGG_ARCR_SPE_DIS_TH		GENMASK(27, 24)
+
+#define MT_AGG_ARUCR			MT_WF_AGG(0x014)
+#define MT_AGG_ARDCR			MT_WF_AGG(0x018)
+#define MT_AGG_ARxCR_LIMIT_SHIFT(_n)	(4 * (_n))
+#define MT_AGG_ARxCR_LIMIT(_n)		GENMASK(2 + \
+						MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
+						MT_AGG_ARxCR_LIMIT_SHIFT(_n))
+
+#define MT_AGG_LIMIT			MT_WF_AGG(0x040)
+#define MT_AGG_LIMIT_1			MT_WF_AGG(0x044)
+#define MT_AGG_LIMIT_AC(_n)		GENMASK(((_n) + 1) * 8 - 1, (_n) * 8)
+
+#define MT_AGG_BA_SIZE_LIMIT_0		MT_WF_AGG(0x048)
+#define MT_AGG_BA_SIZE_LIMIT_1		MT_WF_AGG(0x04c)
+#define MT_AGG_BA_SIZE_LIMIT_SHIFT	8
+
+#define MT_AGG_PCR			MT_WF_AGG(0x050)
+#define MT_AGG_PCR_MM			BIT(16)
+#define MT_AGG_PCR_GF			BIT(17)
+#define MT_AGG_PCR_BW40			BIT(18)
+#define MT_AGG_PCR_RIFS			BIT(19)
+#define MT_AGG_PCR_BW80			BIT(20)
+#define MT_AGG_PCR_BW160		BIT(21)
+#define MT_AGG_PCR_ERP			BIT(22)
+
+#define MT_AGG_PCR_RTS			MT_WF_AGG(0x054)
+#define MT_AGG_PCR_RTS_THR		GENMASK(19, 0)
+#define MT_AGG_PCR_RTS_PKT_THR		GENMASK(31, 25)
+
+#define MT_AGG_CONTROL			MT_WF_AGG(0x070)
+#define MT_AGG_CONTROL_NO_BA_RULE	BIT(0)
+#define MT_AGG_CONTROL_NO_BA_AR_RULE	BIT(1)
+#define MT_AGG_CONTROL_CFEND_SPE_EN	BIT(3)
+#define MT_AGG_CONTROL_CFEND_RATE	GENMASK(15, 4)
+#define MT_AGG_CONTROL_BAR_SPE_EN	BIT(19)
+#define MT_AGG_CONTROL_BAR_RATE		GENMASK(31, 20)
+
+#define MT_AGG_TMP			MT_WF_AGG(0x0d8)
+
+#define MT_AGG_BWCR			MT_WF_AGG(0x0ec)
+#define MT_AGG_BWCR_BW			GENMASK(3, 2)
+
+#define MT_AGG_RETRY_CONTROL		MT_WF_AGG(0x0f4)
+#define MT_AGG_RETRY_CONTROL_RTS_LIMIT	GENMASK(11, 7)
+#define MT_AGG_RETRY_CONTROL_BAR_LIMIT	GENMASK(15, 12)
+
+#define MT_WF_DMA_BASE			0x21c00
+#define MT_WF_DMA(ofs)			(MT_WF_DMA_BASE + (ofs))
+
+#define MT_DMA_DCR0			MT_WF_DMA(0x000)
+#define MT_DMA_DCR0_MAX_RX_LEN		GENMASK(15, 0)
+#define MT_DMA_DCR0_DAMSDU		BIT(16)
+#define MT_DMA_DCR0_RX_VEC_DROP		BIT(17)
+
+#define MT_DMA_DCR1			MT_WF_DMA(0x004)
+
+#define MT_DMA_FQCR0			MT_WF_DMA(0x008)
+#define MT_DMA_FQCR0_TARGET_WCID	GENMASK(7, 0)
+#define MT_DMA_FQCR0_TARGET_BSS		GENMASK(13, 8)
+#define MT_DMA_FQCR0_TARGET_QID		GENMASK(20, 16)
+#define MT_DMA_FQCR0_DEST_PORT_ID	GENMASK(23, 22)
+#define MT_DMA_FQCR0_DEST_QUEUE_ID	GENMASK(28, 24)
+#define MT_DMA_FQCR0_MODE		BIT(29)
+#define MT_DMA_FQCR0_STATUS		BIT(30)
+#define MT_DMA_FQCR0_BUSY		BIT(31)
+
+#define MT_DMA_RCFR0			MT_WF_DMA(0x070)
+#define MT_DMA_VCFR0			MT_WF_DMA(0x07c)
+
+#define MT_DMA_TCFR0			MT_WF_DMA(0x080)
+#define MT_DMA_TCFR1			MT_WF_DMA(0x084)
+#define MT_DMA_TCFR_TXS_AGGR_TIMEOUT	GENMASK(27, 16)
+#define MT_DMA_TCFR_TXS_QUEUE		BIT(14)
+#define MT_DMA_TCFR_TXS_AGGR_COUNT	GENMASK(12, 8)
+#define MT_DMA_TCFR_TXS_BIT_MAP		GENMASK(6, 0)
+
+#define MT_DMA_TMCFR0			MT_WF_DMA(0x088)
+
+#define MT_WF_ARB_BASE			0x21400
+#define MT_WF_ARB(ofs)			(MT_WF_ARB_BASE + (ofs))
+
+#define MT_WMM_AIFSN			MT_WF_ARB(0x020)
+#define MT_WMM_AIFSN_MASK		GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_CWMAX_BASE		MT_WF_ARB(0x028)
+#define MT_WMM_CWMAX(_n)		(MT_WMM_CWMAX_BASE + (((_n) / 2) << 2))
+#define MT_WMM_CWMAX_SHIFT(_n)		(((_n) & 1) * 16)
+#define MT_WMM_CWMAX_MASK		GENMASK(15, 0)
+
+#define MT_WMM_CWMIN			MT_WF_ARB(0x040)
+#define MT_WMM_CWMIN_MASK		GENMASK(7, 0)
+#define MT_WMM_CWMIN_SHIFT(_n)		((_n) * 8)
+
+#define MT_WF_ARB_RQCR			MT_WF_ARB(0x070)
+#define MT_WF_ARB_RQCR_RX_START		BIT(0)
+#define MT_WF_ARB_RQCR_RXV_START	BIT(4)
+#define MT_WF_ARB_RQCR_RXV_R_EN		BIT(7)
+#define MT_WF_ARB_RQCR_RXV_T_EN		BIT(8)
+
+#define MT_ARB_SCR			MT_WF_ARB(0x080)
+#define MT_ARB_SCR_BCNQ_OPMODE_MASK	GENMASK(1, 0)
+#define MT_ARB_SCR_BCNQ_OPMODE_SHIFT(n)	((n) * 2)
+#define MT_ARB_SCR_TX_DISABLE		BIT(8)
+#define MT_ARB_SCR_RX_DISABLE		BIT(9)
+#define MT_ARB_SCR_BCNQ_EMPTY_SKIP	BIT(28)
+#define MT_ARB_SCR_TTTT_BTIM_PRIO	BIT(29)
+#define MT_ARB_SCR_TBTT_BCN_PRIO	BIT(30)
+#define MT_ARB_SCR_TBTT_BCAST_PRIO	BIT(31)
+
+enum {
+	MT_BCNQ_OPMODE_STA =	0,
+	MT_BCNQ_OPMODE_AP =	1,
+	MT_BCNQ_OPMODE_ADHOC =	2,
+};
+
+#define MT_WF_ARB_TX_START_0		MT_WF_ARB(0x100)
+#define MT_WF_ARB_TX_START_1		MT_WF_ARB(0x104)
+#define MT_WF_ARB_TX_FLUSH_0		MT_WF_ARB(0x108)
+#define MT_WF_ARB_TX_FLUSH_1		MT_WF_ARB(0x10c)
+#define MT_WF_ARB_TX_STOP_0		MT_WF_ARB(0x110)
+#define MT_WF_ARB_TX_STOP_1		MT_WF_ARB(0x114)
+
+#define MT_WF_ARB_BCN_START		MT_WF_ARB(0x118)
+#define MT_WF_ARB_BCN_START_BSSn(n)	BIT(0 + (n))
+#define MT_WF_ARB_BCN_START_T_PRE_TTTT	BIT(10)
+#define MT_WF_ARB_BCN_START_T_TTTT	BIT(11)
+#define MT_WF_ARB_BCN_START_T_PRE_TBTT	BIT(12)
+#define MT_WF_ARB_BCN_START_T_TBTT	BIT(13)
+#define MT_WF_ARB_BCN_START_T_SLOT_IDLE	BIT(14)
+#define MT_WF_ARB_BCN_START_T_TX_START	BIT(15)
+#define MT_WF_ARB_BCN_START_BSS0n(n)	BIT((n) ? 16 + ((n) - 1) : 0)
+
+#define MT_WF_ARB_BCN_FLUSH		MT_WF_ARB(0x11c)
+#define MT_WF_ARB_BCN_FLUSH_BSSn(n)	BIT(0 + (n))
+#define MT_WF_ARB_BCN_FLUSH_BSS0n(n)	BIT((n) ? 16 + ((n) - 1) : 0)
+
+#define MT_WF_ARB_CAB_START		MT_WF_ARB(0x120)
+#define MT_WF_ARB_CAB_START_BSSn(n)	BIT(0 + (n))
+#define MT_WF_ARB_CAB_START_BSS0n(n)	BIT((n) ? 16 + ((n) - 1) : 0)
+
+#define MT_WF_ARB_CAB_FLUSH		MT_WF_ARB(0x124)
+#define MT_WF_ARB_CAB_FLUSH_BSSn(n)	BIT(0 + (n))
+#define MT_WF_ARB_CAB_FLUSH_BSS0n(n)	BIT((n) ? 16 + ((n) - 1) : 0)
+
+#define MT_WF_ARB_CAB_COUNT(n)		MT_WF_ARB(0x128 + (n) * 4)
+#define MT_WF_ARB_CAB_COUNT_SHIFT	4
+#define MT_WF_ARB_CAB_COUNT_MASK	GENMASK(3, 0)
+#define MT_WF_ARB_CAB_COUNT_B0_REG(n)	MT_WF_ARB_CAB_COUNT(((n) > 12 ? 2 : \
+							     ((n) > 4 ? 1 : 0)))
+#define MT_WF_ARB_CAB_COUNT_B0_SHIFT(n)	(((n) > 12 ? (n) - 12 : \
+					 ((n) > 4 ? (n) - 4 : \
+					  (n) ? (n) + 3 : 0)) * 4)
+
+#define MT_TX_ABORT			MT_WF_ARB(0x134)
+#define MT_TX_ABORT_EN			BIT(0)
+#define MT_TX_ABORT_WCID		GENMASK(15, 8)
+
+#define MT_WF_TMAC_BASE			0x21600
+#define MT_WF_TMAC(ofs)			(MT_WF_TMAC_BASE + (ofs))
+
+#define MT_TMAC_TCR			MT_WF_TMAC(0x000)
+#define MT_TMAC_TCR_BLINK_SEL		GENMASK(7, 6)
+#define MT_TMAC_TCR_PRE_RTS_GUARD	GENMASK(11, 8)
+#define MT_TMAC_TCR_PRE_RTS_SEC_IDLE	GENMASK(13, 12)
+#define MT_TMAC_TCR_RTS_SIGTA		BIT(14)
+#define MT_TMAC_TCR_LDPC_OFS		BIT(15)
+#define MT_TMAC_TCR_TX_STREAMS		GENMASK(17, 16)
+#define MT_TMAC_TCR_SCH_IDLE_SEL	GENMASK(19, 18)
+#define MT_TMAC_TCR_SCH_DET_PER_IOD	BIT(20)
+#define MT_TMAC_TCR_DCH_DET_DISABLE	BIT(21)
+#define MT_TMAC_TCR_TX_RIFS		BIT(22)
+#define MT_TMAC_TCR_RX_RIFS_MODE	BIT(23)
+#define MT_TMAC_TCR_TXOP_TBTT_CTL	BIT(24)
+#define MT_TMAC_TCR_TBTT_TX_STOP_CTL	BIT(25)
+#define MT_TMAC_TCR_TXOP_BURST_STOP	BIT(26)
+#define MT_TMAC_TCR_RDG_RA_MODE		BIT(27)
+#define MT_TMAC_TCR_RDG_RESP		BIT(29)
+#define MT_TMAC_TCR_RDG_NO_PENDING	BIT(30)
+#define MT_TMAC_TCR_SMOOTHING		BIT(31)
+
+#define MT_WMM_TXOP_BASE		MT_WF_TMAC(0x010)
+#define MT_WMM_TXOP(_n)			(MT_WMM_TXOP_BASE + \
+					 ((((_n) / 2) ^ 0x1) << 2))
+#define MT_WMM_TXOP_SHIFT(_n)		(((_n) & 1) * 16)
+#define MT_WMM_TXOP_MASK		GENMASK(15, 0)
+
+#define MT_TIMEOUT_CCK			MT_WF_TMAC(0x090)
+#define MT_TIMEOUT_OFDM			MT_WF_TMAC(0x094)
+#define MT_TIMEOUT_VAL_PLCP		GENMASK(15, 0)
+#define MT_TIMEOUT_VAL_CCA		GENMASK(31, 16)
+
+#define MT_TXREQ			MT_WF_TMAC(0x09c)
+#define MT_TXREQ_CCA_SRC_SEL		GENMASK(31, 30)
+
+#define MT_RXREQ			MT_WF_TMAC(0x0a0)
+#define MT_RXREQ_DELAY			GENMASK(8, 0)
+
+#define MT_IFS				MT_WF_TMAC(0x0a4)
+#define MT_IFS_EIFS			GENMASK(8, 0)
+#define MT_IFS_RIFS			GENMASK(14, 10)
+#define MT_IFS_SIFS			GENMASK(22, 16)
+#define MT_IFS_SLOT			GENMASK(30, 24)
+
+#define MT_TMAC_PCR			MT_WF_TMAC(0x0b4)
+#define MT_TMAC_PCR_RATE		GENMASK(8, 0)
+#define MT_TMAC_PCR_RATE_FIXED		BIT(15)
+#define MT_TMAC_PCR_ANT_ID		GENMASK(21, 16)
+#define MT_TMAC_PCR_ANT_ID_SEL		BIT(22)
+#define MT_TMAC_PCR_SPE_EN		BIT(23)
+#define MT_TMAC_PCR_ANT_PRI		GENMASK(26, 24)
+#define MT_TMAC_PCR_ANT_PRI_SEL		GENMASK(27)
+
+#define MT_WF_RMAC_BASE			0x21800
+#define MT_WF_RMAC(ofs)			(MT_WF_RMAC_BASE + (ofs))
+
+#define MT_WF_RFCR			MT_WF_RMAC(0x000)
+#define MT_WF_RFCR_DROP_STBC_MULTI	BIT(0)
+#define MT_WF_RFCR_DROP_FCSFAIL		BIT(1)
+#define MT_WF_RFCR_DROP_VERSION		BIT(3)
+#define MT_WF_RFCR_DROP_PROBEREQ	BIT(4)
+#define MT_WF_RFCR_DROP_MCAST		BIT(5)
+#define MT_WF_RFCR_DROP_BCAST		BIT(6)
+#define MT_WF_RFCR_DROP_MCAST_FILTERED	BIT(7)
+#define MT_WF_RFCR_DROP_A3_MAC		BIT(8)
+#define MT_WF_RFCR_DROP_A3_BSSID	BIT(9)
+#define MT_WF_RFCR_DROP_A2_BSSID	BIT(10)
+#define MT_WF_RFCR_DROP_OTHER_BEACON	BIT(11)
+#define MT_WF_RFCR_DROP_FRAME_REPORT	BIT(12)
+#define MT_WF_RFCR_DROP_CTL_RSV		BIT(13)
+#define MT_WF_RFCR_DROP_CTS		BIT(14)
+#define MT_WF_RFCR_DROP_RTS		BIT(15)
+#define MT_WF_RFCR_DROP_DUPLICATE	BIT(16)
+#define MT_WF_RFCR_DROP_OTHER_BSS	BIT(17)
+#define MT_WF_RFCR_DROP_OTHER_UC	BIT(18)
+#define MT_WF_RFCR_DROP_OTHER_TIM	BIT(19)
+#define MT_WF_RFCR_DROP_NDPA		BIT(20)
+#define MT_WF_RFCR_DROP_UNWANTED_CTL	BIT(21)
+
+#define MT_BSSID0(idx)			MT_WF_RMAC(0x004 + (idx) * 8)
+#define MT_BSSID1(idx)			MT_WF_RMAC(0x008 + (idx) * 8)
+#define MT_BSSID1_VALID			BIT(16)
+
+#define MT_MAC_ADDR0(idx)		MT_WF_RMAC(0x024 + (idx) * 8)
+#define MT_MAC_ADDR1(idx)		MT_WF_RMAC(0x028 + (idx) * 8)
+#define MT_MAC_ADDR1_ADDR		GENMASK(15, 0)
+#define MT_MAC_ADDR1_VALID		BIT(16)
+
+#define MT_BA_CONTROL_0			MT_WF_RMAC(0x068)
+#define MT_BA_CONTROL_1			MT_WF_RMAC(0x06c)
+#define MT_BA_CONTROL_1_ADDR		GENMASK(15, 0)
+#define MT_BA_CONTROL_1_TID		GENMASK(19, 16)
+#define MT_BA_CONTROL_1_IGNORE_TID	BIT(20)
+#define MT_BA_CONTROL_1_IGNORE_ALL	BIT(21)
+#define MT_BA_CONTROL_1_RESET		BIT(22)
+
+#define MT_WF_RMACDR			MT_WF_RMAC(0x078)
+#define MT_WF_RMACDR_TSF_PROBERSP_DIS	BIT(0)
+#define MT_WF_RMACDR_TSF_TIM		BIT(4)
+#define MT_WF_RMACDR_MBSSID_MASK	GENMASK(25, 24)
+#define MT_WF_RMACDR_CHECK_HTC_BY_RATE	BIT(26)
+#define MT_WF_RMACDR_MAXLEN_20BIT	BIT(30)
+
+#define MT_WF_RMAC_RMCR			MT_WF_RMAC(0x080)
+#define MT_WF_RMAC_RMCR_SMPS_MODE	GENMASK(21, 20)
+#define MT_WF_RMAC_RMCR_RX_STREAMS	GENMASK(24, 22)
+#define MT_WF_RMAC_RMCR_SMPS_RTS	BIT(25)
+
+#define MT_WF_RMAC_CH_FREQ		MT_WF_RMAC(0x090)
+#define MT_WF_RMAC_MAXMINLEN		MT_WF_RMAC(0x098)
+#define MT_WF_RFCR1			MT_WF_RMAC(0x0a4)
+#define MT_WF_RMAC_TMR_PA		MT_WF_RMAC(0x0e0)
+
+#define MT_WF_SEC_BASE			0x21a00
+#define MT_WF_SEC(ofs)			(MT_WF_SEC_BASE + (ofs))
+
+#define MT_SEC_SCR			MT_WF_SEC(0x004)
+#define MT_SEC_SCR_MASK_ORDER		GENMASK(1, 0)
+
+#define MT_WTBL_OFF_BASE		0x23000
+#define MT_WTBL_OFF(n)			(MT_WTBL_OFF_BASE + (n))
+
+#define MT_WTBL_UPDATE			MT_WTBL_OFF(0x000)
+#define MT_WTBL_UPDATE_WLAN_IDX		GENMASK(7, 0)
+#define MT_WTBL_UPDATE_WTBL2		BIT(11)
+#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR	BIT(12)
+#define MT_WTBL_UPDATE_RATE_UPDATE	BIT(13)
+#define MT_WTBL_UPDATE_TX_COUNT_CLEAR	BIT(14)
+#define MT_WTBL_UPDATE_RX_COUNT_CLEAR	BIT(15)
+#define MT_WTBL_UPDATE_BUSY		BIT(16)
+
+#define MT_WTBL_RMVTCR			MT_WTBL_OFF(0x008)
+#define MT_WTBL_RMVTCR_RX_MV_MODE	BIT(23)
+
+#define MT_LPON_BASE			0x24000
+#define MT_LPON(n)			(MT_LPON_BASE + (n))
+
+#define MT_LPON_T0CR			MT_LPON(0x010)
+#define MT_LPON_T0CR_MODE		GENMASK(1, 0)
+
+#define MT_LPON_UTTR0			MT_LPON(0x018)
+#define MT_LPON_UTTR1			MT_LPON(0x01c)
+
+#define MT_LPON_BTEIR			MT_LPON(0x020)
+#define MT_LPON_BTEIR_MBSS_MODE		GENMASK(31, 29)
+
+#define MT_PRE_TBTT			MT_LPON(0x030)
+#define MT_PRE_TBTT_MASK		GENMASK(7, 0)
+#define MT_PRE_TBTT_SHIFT		8
+
+#define MT_TBTT				MT_LPON(0x034)
+#define MT_TBTT_PERIOD			GENMASK(15, 0)
+#define MT_TBTT_DTIM_PERIOD		GENMASK(23, 16)
+#define MT_TBTT_TBTT_WAKE_PERIOD	GENMASK(27, 24)
+#define MT_TBTT_DTIM_WAKE_PERIOD	GENMASK(30, 28)
+#define MT_TBTT_CAL_ENABLE		BIT(31)
+
+#define MT_TBTT_TIMER_CFG		MT_LPON(0x05c)
+
+#define MT_LPON_SBTOR(n)		MT_LPON(0x0a0)
+#define MT_LPON_SBTOR_SUB_BSS_EN	BIT(29)
+#define MT_LPON_SBTOR_TIME_OFFSET	GENMASK(19, 0)
+
+#define MT_INT_WAKEUP_BASE		0x24400
+#define MT_INT_WAKEUP(n)		(MT_INT_WAKEUP_BASE + (n))
+
+#define MT_HW_INT_STATUS(n)		MT_INT_WAKEUP(0x3c + (n) * 8)
+#define MT_HW_INT_MASK(n)		MT_INT_WAKEUP(0x40 + (n) * 8)
+
+#define MT_HW_INT3_TBTT0		BIT(15)
+#define MT_HW_INT3_PRE_TBTT0		BIT(31)
+
+#define MT_WTBL1_BASE			0x28000
+
+#define MT_WTBL_ON_BASE			(MT_WTBL1_BASE + 0x2000)
+#define MT_WTBL_ON(_n)			(MT_WTBL_ON_BASE + (_n))
+
+#define MT_WTBL_RIUCR0			MT_WTBL_ON(0x200)
+
+#define MT_WTBL_RIUCR1			MT_WTBL_ON(0x204)
+#define MT_WTBL_RIUCR1_RATE0		GENMASK(11, 0)
+#define MT_WTBL_RIUCR1_RATE1		GENMASK(23, 12)
+#define MT_WTBL_RIUCR1_RATE2_LO		GENMASK(31, 24)
+
+#define MT_WTBL_RIUCR2			MT_WTBL_ON(0x208)
+#define MT_WTBL_RIUCR2_RATE2_HI		GENMASK(3, 0)
+#define MT_WTBL_RIUCR2_RATE3		GENMASK(15, 4)
+#define MT_WTBL_RIUCR2_RATE4		GENMASK(27, 16)
+#define MT_WTBL_RIUCR2_RATE5_LO		GENMASK(31, 28)
+
+#define MT_WTBL_RIUCR3			MT_WTBL_ON(0x20c)
+#define MT_WTBL_RIUCR3_RATE5_HI		GENMASK(7, 0)
+#define MT_WTBL_RIUCR3_RATE6		GENMASK(19, 8)
+#define MT_WTBL_RIUCR3_RATE7		GENMASK(31, 20)
+
+#define MT_MIB_BASE			0x2c000
+#define MT_MIB(_n)			(MT_MIB_BASE + (_n))
+
+#define MT_MIB_CTL			MT_MIB(0x00)
+#define MT_MIB_CTL_PSCCA_TIME		GENMASK(13, 11)
+#define MT_MIB_CTL_CCA_NAV_TX		GENMASK(16, 14)
+#define MT_MIB_CTL_ED_TIME		GENMASK(30, 28)
+#define MT_MIB_CTL_READ_CLR_DIS		BIT(31)
+
+#define MT_MIB_STAT(_n)			MT_MIB(0x08 + (_n) * 4)
+
+#define MT_MIB_STAT_CCA			MT_MIB_STAT(9)
+#define MT_MIB_STAT_CCA_MASK		GENMASK(23, 0)
+
+#define MT_MIB_STAT_PSCCA		MT_MIB_STAT(16)
+#define MT_MIB_STAT_PSCCA_MASK		GENMASK(23, 0)
+
+#define MT_MIB_STAT_ED			MT_MIB_STAT(18)
+#define MT_MIB_STAT_ED_MASK		GENMASK(23, 0)
+
+#define MT_PCIE_REMAP_BASE_1		0x40000
+#define MT_PCIE_REMAP_BASE_2		0x80000
+
+#define MT_TX_HW_QUEUE_MGMT		4
+#define MT_TX_HW_QUEUE_MCU		5
+#define MT_TX_HW_QUEUE_BCN		7
+#define MT_TX_HW_QUEUE_BMC		8
+
+#define MT_LED_BASE_PHYS		0x80024000
+#define MT_LED_PHYS(_n)			(MT_LED_BASE_PHYS + (_n))
+
+#define MT_LED_CTRL			MT_LED_PHYS(0x00)
+
+#define MT_LED_CTRL_REPLAY(_n)		BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n)	BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n)	BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_TX_MANUAL_BLINK(_n)	BIT(3 + (8 * (_n)))
+#define MT_LED_CTRL_TX_OVER_BLINK(_n)	BIT(5 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n)		BIT(7 + (8 * (_n)))
+
+#define MT_LED_STATUS_0(_n)		MT_LED_PHYS(0x10 + ((_n) * 8))
+#define MT_LED_STATUS_1(_n)		MT_LED_PHYS(0x14 + ((_n) * 8))
+#define MT_LED_STATUS_OFF_MASK		GENMASK(31, 24)
+#define MT_LED_STATUS_OFF(_v)		(((_v) << \
+					  __ffs(MT_LED_STATUS_OFF_MASK)) & \
+					 MT_LED_STATUS_OFF_MASK)
+#define MT_LED_STATUS_ON_MASK		GENMASK(23, 16)
+#define MT_LED_STATUS_ON(_v)		(((_v) << \
+					  __ffs(MT_LED_STATUS_ON_MASK)) & \
+					 MT_LED_STATUS_ON_MASK)
+#define MT_LED_STATUS_DURATION_MASK	GENMASK(15, 0)
+#define MT_LED_STATUS_DURATION(_v)	(((_v) << \
+					  __ffs(MT_LED_STATUS_DURATION_MASK)) &\
+					 MT_LED_STATUS_DURATION_MASK)
+
+#define MT_CLIENT_BASE_PHYS_ADDR	0x800c0000
+
+#define MT_CLIENT_TMAC_INFO_TEMPLATE	0x040
+
+#define MT_CLIENT_STATUS		0x06c
+
+#define MT_CLIENT_RESET_TX		0x070
+#define MT_CLIENT_RESET_TX_R_E_1	BIT(16)
+#define MT_CLIENT_RESET_TX_R_E_2	BIT(17)
+#define MT_CLIENT_RESET_TX_R_E_1_S	BIT(20)
+#define MT_CLIENT_RESET_TX_R_E_2_S	BIT(21)
+
+#define MT_EFUSE_BASE			0x81070000
+
+#define MT_EFUSE_BASE_CTRL		0x000
+#define MT_EFUSE_BASE_CTRL_EMPTY	BIT(30)
+
+#define MT_EFUSE_CTRL			0x008
+#define MT_EFUSE_CTRL_AOUT		GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE		GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME	GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME	GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN		GENMASK(25, 16)
+#define MT_EFUSE_CTRL_VALID		BIT(29)
+#define MT_EFUSE_CTRL_KICK		BIT(30)
+#define MT_EFUSE_CTRL_SEL		BIT(31)
+
+#define MT_EFUSE_WDATA(_i)		(0x010 + ((_i) * 4))
+#define MT_EFUSE_RDATA(_i)		(0x030 + ((_i) * 4))
+
+#define MT_CLIENT_RXINF			0x068
+#define MT_CLIENT_RXINF_RXSH_GROUPS	GENMASK(2, 0)
+
+#define MT_PSE_BASE_PHYS_ADDR		0xa0000000
+
+#define MT_PSE_WTBL_2_PHYS_ADDR		0xa5000000
+
+#define MT_WTBL1_SIZE			(8 * 4)
+#define MT_WTBL2_SIZE			(16 * 4)
+#define MT_WTBL3_OFFSET			(MT7603_WTBL_SIZE * MT_WTBL2_SIZE)
+#define MT_WTBL3_SIZE			(16 * 4)
+#define MT_WTBL4_OFFSET			(MT7603_WTBL_SIZE * MT_WTBL3_SIZE + \
+					 MT_WTBL3_OFFSET)
+#define MT_WTBL4_SIZE			(8 * 4)
+
+#define MT_WTBL1_W0_ADDR_HI		GENMASK(15, 0)
+#define MT_WTBL1_W0_MUAR_IDX		GENMASK(21, 16)
+#define MT_WTBL1_W0_RX_CHECK_A1		BIT(22)
+#define MT_WTBL1_W0_KEY_IDX		GENMASK(24, 23)
+#define MT_WTBL1_W0_RX_CHECK_KEY_IDX	BIT(25)
+#define MT_WTBL1_W0_RX_KEY_VALID	BIT(26)
+#define MT_WTBL1_W0_RX_IK_VALID		BIT(27)
+#define MT_WTBL1_W0_RX_VALID		BIT(28)
+#define MT_WTBL1_W0_RX_CHECK_A2		BIT(29)
+#define MT_WTBL1_W0_RX_DATA_VALID	BIT(30)
+#define MT_WTBL1_W0_WRITE_BURST		BIT(31)
+
+#define MT_WTBL1_W1_ADDR_LO		GENMASK(31, 0)
+
+#define MT_WTBL1_W2_MPDU_DENSITY	GENMASK(2, 0)
+#define MT_WTBL1_W2_KEY_TYPE		GENMASK(6, 3)
+#define MT_WTBL1_W2_EVEN_PN		BIT(7)
+#define MT_WTBL1_W2_TO_DS		BIT(8)
+#define MT_WTBL1_W2_FROM_DS		BIT(9)
+#define MT_WTBL1_W2_HEADER_TRANS	BIT(10)
+#define MT_WTBL1_W2_AMPDU_FACTOR	GENMASK(13, 11)
+#define MT_WTBL1_W2_PWR_MGMT		BIT(14)
+#define MT_WTBL1_W2_RDG			BIT(15)
+#define MT_WTBL1_W2_RTS			BIT(16)
+#define MT_WTBL1_W2_CFACK		BIT(17)
+#define MT_WTBL1_W2_RDG_BA		BIT(18)
+#define MT_WTBL1_W2_SMPS		BIT(19)
+#define MT_WTBL1_W2_TXS_BAF_REPORT	BIT(20)
+#define MT_WTBL1_W2_DYN_BW		BIT(21)
+#define MT_WTBL1_W2_LDPC		BIT(22)
+#define MT_WTBL1_W2_ITXBF		BIT(23)
+#define MT_WTBL1_W2_ETXBF		BIT(24)
+#define MT_WTBL1_W2_TXOP_PS		BIT(25)
+#define MT_WTBL1_W2_MESH		BIT(26)
+#define MT_WTBL1_W2_QOS			BIT(27)
+#define MT_WTBL1_W2_HT			BIT(28)
+#define MT_WTBL1_W2_VHT			BIT(29)
+#define MT_WTBL1_W2_ADMISSION_CONTROL	BIT(30)
+#define MT_WTBL1_W2_GROUP_ID		BIT(31)
+
+#define MT_WTBL1_W3_WTBL2_FRAME_ID	GENMASK(10, 0)
+#define MT_WTBL1_W3_WTBL2_ENTRY_ID	GENMASK(15, 11)
+#define MT_WTBL1_W3_WTBL4_FRAME_ID	GENMASK(26, 16)
+#define MT_WTBL1_W3_CHECK_PER		BIT(27)
+#define MT_WTBL1_W3_KEEP_I_PSM		BIT(28)
+#define MT_WTBL1_W3_I_PSM		BIT(29)
+#define MT_WTBL1_W3_POWER_SAVE		BIT(30)
+#define MT_WTBL1_W3_SKIP_TX		BIT(31)
+
+#define MT_WTBL1_W4_WTBL3_FRAME_ID	GENMASK(10, 0)
+#define MT_WTBL1_W4_WTBL3_ENTRY_ID	GENMASK(16, 11)
+#define MT_WTBL1_W4_WTBL4_ENTRY_ID	GENMASK(22, 17)
+#define MT_WTBL1_W4_PARTIAL_AID		GENMASK(31, 23)
+
+#define MT_WTBL2_W0_PN_LO		GENMASK(31, 0)
+
+#define MT_WTBL2_W1_PN_HI		GENMASK(15, 0)
+#define MT_WTBL2_W1_NON_QOS_SEQNO	GENMASK(27, 16)
+
+#define MT_WTBL2_W2_TID0_SN		GENMASK(11, 0)
+#define MT_WTBL2_W2_TID1_SN		GENMASK(23, 12)
+#define MT_WTBL2_W2_TID2_SN_LO		GENMASK(31, 24)
+
+#define MT_WTBL2_W3_TID2_SN_HI		GENMASK(3, 0)
+#define MT_WTBL2_W3_TID3_SN		GENMASK(15, 4)
+#define MT_WTBL2_W3_TID4_SN		GENMASK(27, 16)
+#define MT_WTBL2_W3_TID5_SN_LO		GENMASK(31, 28)
+
+#define MT_WTBL2_W4_TID5_SN_HI		GENMASK(7, 0)
+#define MT_WTBL2_W4_TID6_SN		GENMASK(19, 8)
+#define MT_WTBL2_W4_TID7_SN		GENMASK(31, 20)
+
+#define MT_WTBL2_W5_TX_COUNT_RATE1	GENMASK(15, 0)
+#define MT_WTBL2_W5_FAIL_COUNT_RATE1	GENAMSK(31, 16)
+
+#define MT_WTBL2_W6_TX_COUNT_RATE2	GENMASK(7, 0)
+#define MT_WTBL2_W6_TX_COUNT_RATE3	GENMASK(15, 8)
+#define MT_WTBL2_W6_TX_COUNT_RATE4	GENMASK(23, 16)
+#define MT_WTBL2_W6_TX_COUNT_RATE5	GENMASK(31, 24)
+
+#define MT_WTBL2_W7_TX_COUNT_CUR_BW	GENMASK(15, 0)
+#define MT_WTBL2_W7_FAIL_COUNT_CUR_BW	GENMASK(31, 16)
+
+#define MT_WTBL2_W8_TX_COUNT_OTHER_BW	GENMASK(15, 0)
+#define MT_WTBL2_W8_FAIL_COUNT_OTHER_BW	GENMASK(31, 16)
+
+#define MT_WTBL2_W9_POWER_OFFSET	GENMASK(4, 0)
+#define MT_WTBL2_W9_SPATIAL_EXT		BIT(5)
+#define MT_WTBL2_W9_ANT_PRIORITY	GENMASK(8, 6)
+#define MT_WTBL2_W9_CC_BW_SEL		GENMASK(10, 9)
+#define MT_WTBL2_W9_CHANGE_BW_RATE	GENMASK(13, 11)
+#define MT_WTBL2_W9_BW_CAP		GENMASK(15, 14)
+#define MT_WTBL2_W9_SHORT_GI_20		BIT(16)
+#define MT_WTBL2_W9_SHORT_GI_40		BIT(17)
+#define MT_WTBL2_W9_SHORT_GI_80		BIT(18)
+#define MT_WTBL2_W9_SHORT_GI_160	BIT(19)
+#define MT_WTBL2_W9_MPDU_FAIL_COUNT	GENMASK(25, 23)
+#define MT_WTBL2_W9_MPDU_OK_COUNT	GENMASK(28, 26)
+#define MT_WTBL2_W9_RATE_IDX		GENMASK(31, 29)
+
+#define MT_WTBL2_W10_RATE1		GENMASK(11, 0)
+#define MT_WTBL2_W10_RATE2		GENMASK(23, 12)
+#define MT_WTBL2_W10_RATE3_LO		GENMASK(31, 24)
+
+#define MT_WTBL2_W11_RATE3_HI		GENMASK(3, 0)
+#define MT_WTBL2_W11_RATE4		GENMASK(15, 4)
+#define MT_WTBL2_W11_RATE5		GENMASK(27, 16)
+#define MT_WTBL2_W11_RATE6_LO		GENMASK(31, 28)
+
+#define MT_WTBL2_W12_RATE6_HI		GENMASK(7, 0)
+#define MT_WTBL2_W12_RATE7		GENMASK(19, 8)
+#define MT_WTBL2_W12_RATE8		GENMASK(31, 20)
+
+#define MT_WTBL2_W13_AVG_RCPI0		GENMASK(7, 0)
+#define MT_WTBL2_W13_AVG_RCPI1		GENMASK(15, 8)
+#define MT_WTBL2_W13_AVG_RCPI2		GENAMSK(23, 16)
+
+#define MT_WTBL2_W14_CC_NOISE_1S	GENMASK(6, 0)
+#define MT_WTBL2_W14_CC_NOISE_2S	GENMASK(13, 7)
+#define MT_WTBL2_W14_CC_NOISE_3S	GENMASK(20, 14)
+#define MT_WTBL2_W14_CHAN_EST_RMS	GENMASK(24, 21)
+#define MT_WTBL2_W14_CC_NOISE_SEL	BIT(15)
+#define MT_WTBL2_W14_ANT_SEL		GENMASK(31, 26)
+
+#define MT_WTBL2_W15_BA_WIN_SIZE	GENMASK(2, 0)
+#define MT_WTBL2_W15_BA_WIN_SIZE_SHIFT	3
+#define MT_WTBL2_W15_BA_EN_TIDS		GENMASK(31, 24)
+
+#define MT_WTBL1_OR			(MT_WTBL1_BASE + 0x2300)
+#define MT_WTBL1_OR_PSM_WRITE		BIT(31)
+
+enum mt7603_cipher_type {
+	MT_CIPHER_NONE,
+	MT_CIPHER_WEP40,
+	MT_CIPHER_TKIP,
+	MT_CIPHER_TKIP_NO_MIC,
+	MT_CIPHER_AES_CCMP,
+	MT_CIPHER_WEP104,
+	MT_CIPHER_BIP_CMAC_128,
+	MT_CIPHER_WEP128,
+	MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
new file mode 100644
index 0000000..68efb30
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: ISC
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "mt7603.h"
+
+static int
+mt76_wmac_probe(struct platform_device *pdev)
+{
+	struct mt7603_dev *dev;
+	void __iomem *mem_base;
+	struct mt76_dev *mdev;
+	int irq;
+	int ret;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	mem_base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(mem_base)) {
+		dev_err(&pdev->dev, "Failed to get memory resource\n");
+		return PTR_ERR(mem_base);
+	}
+
+	mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
+				 &mt7603_drv_ops);
+	if (!mdev)
+		return -ENOMEM;
+
+	dev = container_of(mdev, struct mt7603_dev, mt76);
+	mt76_mmio_init(mdev, mem_base);
+
+	mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+		    (mt76_rr(dev, MT_HW_REV) & 0xff);
+	dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+	ret = devm_request_irq(mdev->dev, irq, mt7603_irq_handler,
+			       IRQF_SHARED, KBUILD_MODNAME, dev);
+	if (ret)
+		goto error;
+
+	ret = mt7603_register_device(dev);
+	if (ret)
+		goto error;
+
+	return 0;
+error:
+	ieee80211_free_hw(mt76_hw(dev));
+	return ret;
+}
+
+static int
+mt76_wmac_remove(struct platform_device *pdev)
+{
+	struct mt76_dev *mdev = platform_get_drvdata(pdev);
+	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+
+	mt7603_unregister_device(dev);
+
+	return 0;
+}
+
+static const struct of_device_id of_wmac_match[] = {
+	{ .compatible = "mediatek,mt7628-wmac" },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, of_wmac_match);
+MODULE_FIRMWARE(MT7628_FIRMWARE_E1);
+MODULE_FIRMWARE(MT7628_FIRMWARE_E2);
+
+struct platform_driver mt76_wmac_driver = {
+	.probe		= mt76_wmac_probe,
+	.remove		= mt76_wmac_remove,
+	.driver = {
+		.name = "mt76_wmac",
+		.of_match_table = of_wmac_match,
+	},
+};
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
new file mode 100644
index 0000000..4cabba9
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MT7615E
+	tristate "MediaTek MT7615E (PCIe) support"
+	select MT76_CORE
+	depends on MAC80211
+	depends on PCI
+	help
+	  This adds support for MT7615-based wireless PCIe devices,
+	  which support concurrent dual-band operation at both 5GHz
+	  and 2.4GHz, IEEE 802.11ac 4x4:4SS 1733Mbps PHY rate, wave2
+	  MU-MIMO up to 4 users/group and 160MHz channels.
+
+	  To compile this driver as a module, choose M here.
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
new file mode 100644
index 0000000..5aaac69
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
@@ -0,0 +1,6 @@
+#SPDX-License-Identifier: ISC
+
+obj-$(CONFIG_MT7615E) += mt7615e.o
+
+mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
+	     debugfs.o
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
new file mode 100644
index 0000000..2428a46
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7615.h"
+
+static int
+mt7615_radar_pattern_set(void *data, u64 val)
+{
+	struct mt7615_dev *dev = data;
+
+	return mt7615_mcu_rdd_send_pattern(dev);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_pattern, NULL,
+			 mt7615_radar_pattern_set, "%lld\n");
+
+static int
+mt7615_scs_set(void *data, u64 val)
+{
+	struct mt7615_dev *dev = data;
+
+	mt7615_mac_set_scs(dev, val);
+
+	return 0;
+}
+
+static int
+mt7615_scs_get(void *data, u64 *val)
+{
+	struct mt7615_dev *dev = data;
+
+	*val = dev->scs_en;
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_scs, mt7615_scs_get,
+			 mt7615_scs_set, "%lld\n");
+
+static int
+mt7615_radio_read(struct seq_file *s, void *data)
+{
+	struct mt7615_dev *dev = dev_get_drvdata(s->private);
+
+	seq_printf(s, "Sensitivity: ofdm=%d cck=%d\n",
+		   dev->ofdm_sensitivity, dev->cck_sensitivity);
+	seq_printf(s, "False CCA: ofdm=%d cck=%d\n",
+		   dev->false_cca_ofdm, dev->false_cca_cck);
+
+	return 0;
+}
+
+static int mt7615_read_temperature(struct seq_file *s, void *data)
+{
+	struct mt7615_dev *dev = dev_get_drvdata(s->private);
+	int temp;
+
+	/* cpu */
+	temp = mt7615_mcu_get_temperature(dev, 0);
+	seq_printf(s, "Temperature: %d\n", temp);
+
+	return 0;
+}
+
+int mt7615_init_debugfs(struct mt7615_dev *dev)
+{
+	struct dentry *dir;
+
+	dir = mt76_register_debugfs(&dev->mt76);
+	if (!dir)
+		return -ENOMEM;
+
+	debugfs_create_file("scs", 0600, dir, dev, &fops_scs);
+	debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
+				    mt7615_radio_read);
+	debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
+	/* test pattern knobs */
+	debugfs_create_u8("pattern_len", 0600, dir,
+			  &dev->radar_pattern.n_pulses);
+	debugfs_create_u32("pulse_period", 0600, dir,
+			   &dev->radar_pattern.period);
+	debugfs_create_u16("pulse_width", 0600, dir,
+			   &dev->radar_pattern.width);
+	debugfs_create_u16("pulse_power", 0600, dir,
+			   &dev->radar_pattern.power);
+	debugfs_create_file("radar_trigger", 0200, dir, dev,
+			    &fops_radar_pattern);
+	debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
+				    mt7615_read_temperature);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
new file mode 100644
index 0000000..fe532ce
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ *         Roy Luo <royluo@google.com>
+ *         Lorenzo Bianconi <lorenzo@kernel.org>
+ *         Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt7615.h"
+#include "../dma.h"
+#include "mac.h"
+
+static int
+mt7615_init_tx_queues(struct mt7615_dev *dev, int n_desc)
+{
+	struct mt76_sw_queue *q;
+	struct mt76_queue *hwq;
+	int err, i;
+
+	hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+	if (!hwq)
+		return -ENOMEM;
+
+	err = mt76_queue_alloc(dev, hwq, 0, n_desc, 0, MT_TX_RING_BASE);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < MT_TXQ_MCU; i++) {
+		q = &dev->mt76.q_tx[i];
+		INIT_LIST_HEAD(&q->swq);
+		q->q = hwq;
+	}
+
+	return 0;
+}
+
+static int
+mt7615_init_mcu_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
+		      int idx, int n_desc)
+{
+	struct mt76_queue *hwq;
+	int err;
+
+	hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+	if (!hwq)
+		return -ENOMEM;
+
+	err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+	if (err < 0)
+		return err;
+
+	INIT_LIST_HEAD(&q->swq);
+	q->q = hwq;
+
+	return 0;
+}
+
+void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+			 struct sk_buff *skb)
+{
+	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+	__le32 *rxd = (__le32 *)skb->data;
+	__le32 *end = (__le32 *)&skb->data[skb->len];
+	enum rx_pkt_type type;
+
+	type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+
+	switch (type) {
+	case PKT_TYPE_TXS:
+		for (rxd++; rxd + 7 <= end; rxd += 7)
+			mt7615_mac_add_txs(dev, rxd);
+		dev_kfree_skb(skb);
+		break;
+	case PKT_TYPE_TXRX_NOTIFY:
+		mt7615_mac_tx_free(dev, skb);
+		break;
+	case PKT_TYPE_RX_EVENT:
+		mt7615_mcu_rx_event(dev, skb);
+		break;
+	case PKT_TYPE_NORMAL:
+		if (!mt7615_mac_fill_rx(dev, skb)) {
+			mt76_rx(&dev->mt76, q, skb);
+			return;
+		}
+		/* fall through */
+	default:
+		dev_kfree_skb(skb);
+		break;
+	}
+}
+
+static int mt7615_poll_tx(struct napi_struct *napi, int budget)
+{
+	static const u8 queue_map[] = {
+		MT_TXQ_MCU,
+		MT_TXQ_BE
+	};
+	struct mt7615_dev *dev;
+	int i;
+
+	dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
+
+	for (i = 0; i < ARRAY_SIZE(queue_map); i++)
+		mt76_queue_tx_cleanup(dev, queue_map[i], false);
+
+	if (napi_complete_done(napi, 0))
+		mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
+
+	for (i = 0; i < ARRAY_SIZE(queue_map); i++)
+		mt76_queue_tx_cleanup(dev, queue_map[i], false);
+
+	tasklet_schedule(&dev->mt76.tx_tasklet);
+
+	return 0;
+}
+
+int mt7615_dma_init(struct mt7615_dev *dev)
+{
+	int ret;
+
+	mt76_dma_attach(&dev->mt76);
+
+	mt76_wr(dev, MT_WPDMA_GLO_CFG,
+		MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE |
+		MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN |
+		MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY |
+		MT_WPDMA_GLO_CFG_OMIT_TX_INFO);
+
+	mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+		       MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0, 0x1);
+
+	mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+		       MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21, 0x1);
+
+	mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+		       MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 0x3);
+
+	mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+		       MT_WPDMA_GLO_CFG_MULTI_DMA_EN, 0x3);
+
+	mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1);
+	mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000);
+	mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000);
+	mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026);
+	mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881);
+	mt76_set(dev, 0x7158, BIT(16));
+	mt76_clear(dev, 0x7000, BIT(23));
+	mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+	ret = mt7615_init_tx_queues(dev, MT7615_TX_RING_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+				    MT7615_TXQ_MCU,
+				    MT7615_TX_MCU_RING_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
+				    MT7615_TXQ_FWDL,
+				    MT7615_TX_FWDL_RING_SIZE);
+	if (ret)
+		return ret;
+
+	/* init rx queues */
+	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+			       MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE,
+			       MT_RX_RING_BASE);
+	if (ret)
+		return ret;
+
+	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
+			       MT7615_RX_RING_SIZE, MT_RX_BUF_SIZE,
+			       MT_RX_RING_BASE);
+	if (ret)
+		return ret;
+
+	mt76_wr(dev, MT_DELAY_INT_CFG, 0);
+
+	ret = mt76_init_queues(dev);
+	if (ret < 0)
+		return ret;
+
+	netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+			  mt7615_poll_tx, NAPI_POLL_WEIGHT);
+	napi_enable(&dev->mt76.tx_napi);
+
+	mt76_poll(dev, MT_WPDMA_GLO_CFG,
+		  MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+		  MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000);
+
+	/* start dma engine */
+	mt76_set(dev, MT_WPDMA_GLO_CFG,
+		 MT_WPDMA_GLO_CFG_TX_DMA_EN |
+		 MT_WPDMA_GLO_CFG_RX_DMA_EN);
+
+	/* enable interrupts for TX/RX rings */
+	mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
+
+	return 0;
+}
+
+void mt7615_dma_cleanup(struct mt7615_dev *dev)
+{
+	mt76_clear(dev, MT_WPDMA_GLO_CFG,
+		   MT_WPDMA_GLO_CFG_TX_DMA_EN |
+		   MT_WPDMA_GLO_CFG_RX_DMA_EN);
+	mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
+
+	tasklet_kill(&dev->mt76.tx_tasklet);
+	mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
new file mode 100644
index 0000000..515bb58
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ *         Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt7615.h"
+#include "eeprom.h"
+
+static int mt7615_efuse_read(struct mt7615_dev *dev, u32 base,
+			     u16 addr, u8 *data)
+{
+	u32 val;
+	int i;
+
+	val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+	val &= ~(MT_EFUSE_CTRL_AIN | MT_EFUSE_CTRL_MODE);
+	val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+	val |= MT_EFUSE_CTRL_KICK;
+	mt76_wr(dev, base + MT_EFUSE_CTRL, val);
+
+	if (!mt76_poll(dev, base + MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+		return -ETIMEDOUT;
+
+	udelay(2);
+
+	val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+	if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT ||
+	    WARN_ON_ONCE(!(val & MT_EFUSE_CTRL_VALID))) {
+		memset(data, 0x0, 16);
+		return 0;
+	}
+
+	for (i = 0; i < 4; i++) {
+		val = mt76_rr(dev, base + MT_EFUSE_RDATA(i));
+		put_unaligned_le32(val, data + 4 * i);
+	}
+
+	return 0;
+}
+
+static int mt7615_efuse_init(struct mt7615_dev *dev)
+{
+	u32 val, base = mt7615_reg_map(dev, MT_EFUSE_BASE);
+	int i, len = MT7615_EEPROM_SIZE;
+	void *buf;
+
+	val = mt76_rr(dev, base + MT_EFUSE_BASE_CTRL);
+	if (val & MT_EFUSE_BASE_CTRL_EMPTY)
+		return 0;
+
+	dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
+	dev->mt76.otp.size = len;
+	if (!dev->mt76.otp.data)
+		return -ENOMEM;
+
+	buf = dev->mt76.otp.data;
+	for (i = 0; i + 16 <= len; i += 16) {
+		int ret;
+
+		ret = mt7615_efuse_read(dev, base, i, buf + i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mt7615_eeprom_load(struct mt7615_dev *dev)
+{
+	int ret;
+
+	ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE);
+	if (ret < 0)
+		return ret;
+
+	return mt7615_efuse_init(dev);
+}
+
+static int mt7615_check_eeprom(struct mt76_dev *dev)
+{
+	u16 val = get_unaligned_le16(dev->eeprom.data);
+
+	switch (val) {
+	case 0x7615:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
+{
+	u8 val, *eeprom = dev->mt76.eeprom.data;
+
+	val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
+			eeprom[MT_EE_WIFI_CONF]);
+	switch (val) {
+	case MT_EE_5GHZ:
+		dev->mt76.cap.has_5ghz = true;
+		break;
+	case MT_EE_2GHZ:
+		dev->mt76.cap.has_2ghz = true;
+		break;
+	default:
+		dev->mt76.cap.has_2ghz = true;
+		dev->mt76.cap.has_5ghz = true;
+		break;
+	}
+}
+
+int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
+				  struct ieee80211_channel *chan,
+				  u8 chain_idx)
+{
+	int index;
+
+	if (chain_idx > 3)
+		return -EINVAL;
+
+	/* TSSI disabled */
+	if (mt7615_ext_pa_enabled(dev, chan->band)) {
+		if (chan->band == NL80211_BAND_2GHZ)
+			return MT_EE_EXT_PA_2G_TARGET_POWER;
+		else
+			return MT_EE_EXT_PA_5G_TARGET_POWER;
+	}
+
+	/* TSSI enabled */
+	if (chan->band == NL80211_BAND_2GHZ) {
+		index = MT_EE_TX0_2G_TARGET_POWER + chain_idx * 6;
+	} else {
+		int group = mt7615_get_channel_group(chan->hw_value);
+
+		switch (chain_idx) {
+		case 1:
+			index = MT_EE_TX1_5G_G0_TARGET_POWER;
+			break;
+		case 2:
+			index = MT_EE_TX2_5G_G0_TARGET_POWER;
+			break;
+		case 3:
+			index = MT_EE_TX3_5G_G0_TARGET_POWER;
+			break;
+		case 0:
+		default:
+			index = MT_EE_TX0_5G_G0_TARGET_POWER;
+			break;
+		}
+		index += 5 * group;
+	}
+
+	return index;
+}
+
+static void mt7615_apply_cal_free_data(struct mt7615_dev *dev)
+{
+	static const u16 ical[] = {
+		0x53, 0x54, 0x55, 0x56, 0x57, 0x5c, 0x5d, 0x62, 0x63, 0x68,
+		0x69, 0x6e, 0x6f, 0x73, 0x74, 0x78, 0x79, 0x82, 0x83, 0x87,
+		0x88, 0x8c, 0x8d, 0x91, 0x92, 0x96, 0x97, 0x9b, 0x9c, 0xa0,
+		0xa1, 0xaa, 0xab, 0xaf, 0xb0, 0xb4, 0xb5, 0xb9, 0xba, 0xf4,
+		0xf7, 0xff,
+		0x140, 0x141, 0x145, 0x146, 0x14a, 0x14b, 0x154, 0x155, 0x159,
+		0x15a, 0x15e, 0x15f, 0x163, 0x164, 0x168, 0x169, 0x16d, 0x16e,
+		0x172, 0x173, 0x17c, 0x17d, 0x181, 0x182, 0x186, 0x187, 0x18b,
+		0x18c
+	};
+	static const u16 ical_nocheck[] = {
+		0x110, 0x111, 0x112, 0x113, 0x114, 0x115, 0x116, 0x117, 0x118,
+		0x1b5, 0x1b6, 0x1b7, 0x3ac, 0x3ad, 0x3ae, 0x3af, 0x3b0, 0x3b1,
+		0x3b2
+	};
+	u8 *eeprom = dev->mt76.eeprom.data;
+	u8 *otp = dev->mt76.otp.data;
+	int i;
+
+	if (!otp)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(ical); i++)
+		if (!otp[ical[i]])
+			return;
+
+	for (i = 0; i < ARRAY_SIZE(ical); i++)
+		eeprom[ical[i]] = otp[ical[i]];
+
+	for (i = 0; i < ARRAY_SIZE(ical_nocheck); i++)
+		eeprom[ical_nocheck[i]] = otp[ical_nocheck[i]];
+}
+
+int mt7615_eeprom_init(struct mt7615_dev *dev)
+{
+	int ret;
+
+	ret = mt7615_eeprom_load(dev);
+	if (ret < 0)
+		return ret;
+
+	ret = mt7615_check_eeprom(&dev->mt76);
+	if (ret && dev->mt76.otp.data)
+		memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data,
+		       MT7615_EEPROM_SIZE);
+	else
+		mt7615_apply_cal_free_data(dev);
+
+	mt7615_eeprom_parse_hw_cap(dev);
+	memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+	       ETH_ALEN);
+
+	mt76_eeprom_override(&dev->mt76);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
new file mode 100644
index 0000000..f4a4280
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_EEPROM_H
+#define __MT7615_EEPROM_H
+
+#include "mt7615.h"
+
+enum mt7615_eeprom_field {
+	MT_EE_CHIP_ID =				0x000,
+	MT_EE_VERSION =				0x002,
+	MT_EE_MAC_ADDR =			0x004,
+	MT_EE_NIC_CONF_0 =			0x034,
+	MT_EE_NIC_CONF_1 =			0x036,
+	MT_EE_WIFI_CONF =			0x03e,
+	MT_EE_TX0_2G_TARGET_POWER =		0x058,
+	MT_EE_TX0_5G_G0_TARGET_POWER =		0x070,
+	MT_EE_TX1_5G_G0_TARGET_POWER =		0x098,
+	MT_EE_EXT_PA_2G_TARGET_POWER =		0x0f2,
+	MT_EE_EXT_PA_5G_TARGET_POWER =		0x0f3,
+	MT_EE_TX2_5G_G0_TARGET_POWER =		0x142,
+	MT_EE_TX3_5G_G0_TARGET_POWER =		0x16a,
+
+	__MT_EE_MAX =				0x3bf
+};
+
+#define MT_EE_NIC_CONF_TSSI_2G			BIT(5)
+#define MT_EE_NIC_CONF_TSSI_5G			BIT(6)
+
+#define MT_EE_NIC_WIFI_CONF_BAND_SEL		GENMASK(5, 4)
+enum mt7615_eeprom_band {
+	MT_EE_DUAL_BAND,
+	MT_EE_5GHZ,
+	MT_EE_2GHZ,
+	MT_EE_DBDC,
+};
+
+enum mt7615_channel_group {
+	MT_CH_5G_JAPAN,
+	MT_CH_5G_UNII_1,
+	MT_CH_5G_UNII_2A,
+	MT_CH_5G_UNII_2B,
+	MT_CH_5G_UNII_2E_1,
+	MT_CH_5G_UNII_2E_2,
+	MT_CH_5G_UNII_2E_3,
+	MT_CH_5G_UNII_3,
+	__MT_CH_MAX
+};
+
+static inline enum mt7615_channel_group
+mt7615_get_channel_group(int channel)
+{
+	if (channel >= 184 && channel <= 196)
+		return MT_CH_5G_JAPAN;
+	if (channel <= 48)
+		return MT_CH_5G_UNII_1;
+	if (channel <= 64)
+		return MT_CH_5G_UNII_2A;
+	if (channel <= 114)
+		return MT_CH_5G_UNII_2E_1;
+	if (channel <= 144)
+		return MT_CH_5G_UNII_2E_2;
+	if (channel <= 161)
+		return MT_CH_5G_UNII_2E_3;
+	return MT_CH_5G_UNII_3;
+}
+
+static inline bool
+mt7615_ext_pa_enabled(struct mt7615_dev *dev, enum nl80211_band band)
+{
+	u8 *eep = dev->mt76.eeprom.data;
+
+	if (band == NL80211_BAND_5GHZ)
+		return !(eep[MT_EE_NIC_CONF_1 + 1] & MT_EE_NIC_CONF_TSSI_5G);
+	else
+		return !(eep[MT_EE_NIC_CONF_1 + 1] & MT_EE_NIC_CONF_TSSI_2G);
+}
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/init.c
new file mode 100644
index 0000000..1104e4c
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ *         Ryder Lee <ryder.lee@mediatek.com>
+ *         Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/etherdevice.h>
+#include "mt7615.h"
+#include "mac.h"
+#include "eeprom.h"
+
+static void mt7615_phy_init(struct mt7615_dev *dev)
+{
+	/* disable band 0 rf low power beacon mode */
+	mt76_rmw(dev, MT_WF_PHY_WF2_RFCTRL0, MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN,
+		 MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
+}
+
+static void mt7615_mac_init(struct mt7615_dev *dev)
+{
+	u32 val;
+
+	/* enable band 0/1 clk */
+	mt76_set(dev, MT_CFG_CCR,
+		 MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN |
+		 MT_CFG_CCR_MAC_D1_1X_GC_EN | MT_CFG_CCR_MAC_D1_2X_GC_EN);
+
+	val = mt76_rmw(dev, MT_TMAC_TRCR0,
+		       MT_TMAC_TRCR_CCA_SEL | MT_TMAC_TRCR_SEC_CCA_SEL,
+		       FIELD_PREP(MT_TMAC_TRCR_CCA_SEL, 2) |
+		       FIELD_PREP(MT_TMAC_TRCR_SEC_CCA_SEL, 0));
+	mt76_wr(dev, MT_TMAC_TRCR1, val);
+
+	val = MT_AGG_ACR_PKT_TIME_EN | MT_AGG_ACR_NO_BA_AR_RULE |
+	      FIELD_PREP(MT_AGG_ACR_CFEND_RATE, 0x49) | /* 24M */
+	      FIELD_PREP(MT_AGG_ACR_BAR_RATE, 0x4b); /* 6M */
+	mt76_wr(dev, MT_AGG_ACR0, val);
+	mt76_wr(dev, MT_AGG_ACR1, val);
+
+	mt76_rmw_field(dev, MT_TMAC_CTCR0,
+		       MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
+	mt76_rmw_field(dev, MT_TMAC_CTCR0,
+		       MT_TMAC_CTCR0_INS_DDLMT_DENSITY, 0x3);
+	mt76_rmw(dev, MT_TMAC_CTCR0,
+		 MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+		 MT_TMAC_CTCR0_INS_DDLMT_EN,
+		 MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+		 MT_TMAC_CTCR0_INS_DDLMT_EN);
+
+	mt7615_mcu_set_rts_thresh(dev, 0x92b);
+	mt7615_mac_set_scs(dev, false);
+
+	mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
+		 MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
+
+	mt7615_mcu_init_mac(dev);
+
+	mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
+		FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072));
+
+	mt76_wr(dev, MT_AGG_ARUCR,
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), 2) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), 2) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), 2) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), 1));
+
+	mt76_wr(dev, MT_AGG_ARDCR,
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), MT7615_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), MT7615_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7615_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7615_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7615_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7615_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) |
+		FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1));
+
+	mt76_wr(dev, MT_AGG_ARCR,
+		(FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
+		 MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
+		 FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
+		 FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
+}
+
+static int mt7615_init_hardware(struct mt7615_dev *dev)
+{
+	int ret, idx;
+
+	mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+
+	spin_lock_init(&dev->token_lock);
+	idr_init(&dev->token);
+
+	ret = mt7615_eeprom_init(dev);
+	if (ret < 0)
+		return ret;
+
+	ret = mt7615_dma_init(dev);
+	if (ret)
+		return ret;
+
+	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+	ret = mt7615_mcu_init(dev);
+	if (ret)
+		return ret;
+
+	mt7615_mcu_set_eeprom(dev);
+	mt7615_mac_init(dev);
+	mt7615_phy_init(dev);
+	mt7615_mcu_ctrl_pm_state(dev, 0);
+	mt7615_mcu_del_wtbl_all(dev);
+
+	/* Beacon and mgmt frames should occupy wcid 0 */
+	idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
+	if (idx)
+		return -ENOSPC;
+
+	dev->mt76.global_wcid.idx = idx;
+	dev->mt76.global_wcid.hw_key_idx = -1;
+	rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+
+	return 0;
+}
+
+#define CCK_RATE(_idx, _rate) {						\
+	.bitrate = _rate,						\
+	.flags = IEEE80211_RATE_SHORT_PREAMBLE,				\
+	.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),			\
+	.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)),	\
+}
+
+#define OFDM_RATE(_idx, _rate) {					\
+	.bitrate = _rate,						\
+	.hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),			\
+	.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),		\
+}
+
+static struct ieee80211_rate mt7615_rates[] = {
+	CCK_RATE(0, 10),
+	CCK_RATE(1, 20),
+	CCK_RATE(2, 55),
+	CCK_RATE(3, 110),
+	OFDM_RATE(11, 60),
+	OFDM_RATE(15, 90),
+	OFDM_RATE(10, 120),
+	OFDM_RATE(14, 180),
+	OFDM_RATE(9,  240),
+	OFDM_RATE(13, 360),
+	OFDM_RATE(8,  480),
+	OFDM_RATE(12, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+	{
+		.max = MT7615_MAX_INTERFACES,
+		.types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+			 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+			 BIT(NL80211_IFTYPE_STATION)
+	}
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+	{
+		.limits = if_limits,
+		.n_limits = ARRAY_SIZE(if_limits),
+		.max_interfaces = 4,
+		.num_different_channels = 1,
+		.beacon_int_infra_match = true,
+	}
+};
+
+static void
+mt7615_init_txpower(struct mt7615_dev *dev,
+		    struct ieee80211_supported_band *sband)
+{
+	int i, n_chains = hweight8(dev->mt76.antenna_mask), target_chains;
+	u8 *eep = (u8 *)dev->mt76.eeprom.data;
+	enum nl80211_band band = sband->band;
+
+	target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
+	for (i = 0; i < sband->n_channels; i++) {
+		struct ieee80211_channel *chan = &sband->channels[i];
+		u8 target_power = 0;
+		int j;
+
+		for (j = 0; j < target_chains; j++) {
+			int index;
+
+			index = mt7615_eeprom_get_power_index(dev, chan, j);
+			target_power = max(target_power, eep[index]);
+		}
+
+		target_power = DIV_ROUND_UP(target_power, 2);
+		switch (n_chains) {
+		case 4:
+			target_power += 6;
+			break;
+		case 3:
+			target_power += 4;
+			break;
+		case 2:
+			target_power += 3;
+			break;
+		default:
+			break;
+		}
+
+		chan->max_power = min_t(int, chan->max_reg_power,
+					target_power);
+		chan->orig_mpwr = target_power;
+	}
+}
+
+static void
+mt7615_regd_notifier(struct wiphy *wiphy,
+		     struct regulatory_request *request)
+{
+	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+	struct mt7615_dev *dev = hw->priv;
+	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+
+	if (request->dfs_region == dev->mt76.region)
+		return;
+
+	dev->mt76.region = request->dfs_region;
+
+	if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
+		return;
+
+	mt7615_dfs_stop_radar_detector(dev);
+	if (request->dfs_region == NL80211_DFS_UNSET)
+		mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, MT_HW_RDD0,
+				   MT_RX_SEL0, 0);
+	else
+		mt7615_dfs_start_radar_detector(dev);
+}
+
+int mt7615_register_device(struct mt7615_dev *dev)
+{
+	struct ieee80211_hw *hw = mt76_hw(dev);
+	struct wiphy *wiphy = hw->wiphy;
+	int ret;
+
+	ret = mt7615_init_hardware(dev);
+	if (ret)
+		return ret;
+
+	INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
+
+	hw->queues = 4;
+	hw->max_rates = 3;
+	hw->max_report_rates = 7;
+	hw->max_rate_tries = 11;
+
+	hw->sta_data_size = sizeof(struct mt7615_sta);
+	hw->vif_data_size = sizeof(struct mt7615_vif);
+
+	wiphy->iface_combinations = if_comb;
+	wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+	wiphy->reg_notifier = mt7615_regd_notifier;
+	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+	ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
+
+	dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+	dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+	dev->mt76.sband_5g.sband.vht_cap.cap |=
+			IEEE80211_VHT_CAP_SHORT_GI_160 |
+			IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
+			IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+	dev->mt76.chainmask = 0x404;
+	dev->mt76.antenna_mask = 0xf;
+	dev->dfs_state = -1;
+
+	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+				 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+				 BIT(NL80211_IFTYPE_AP);
+
+	ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
+				   ARRAY_SIZE(mt7615_rates));
+	if (ret)
+		return ret;
+
+	mt7615_init_txpower(dev, &dev->mt76.sband_2g.sband);
+	mt7615_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
+	hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
+
+	return mt7615_init_debugfs(dev);
+}
+
+void mt7615_unregister_device(struct mt7615_dev *dev)
+{
+	struct mt76_txwi_cache *txwi;
+	int id;
+
+	mt76_unregister_device(&dev->mt76);
+	mt7615_mcu_exit(dev);
+	mt7615_dma_cleanup(dev);
+
+	spin_lock_bh(&dev->token_lock);
+	idr_for_each_entry(&dev->token, txwi, id) {
+		mt7615_txp_skb_unmap(&dev->mt76, txwi);
+		if (txwi->skb)
+			dev_kfree_skb_any(txwi->skb);
+		mt76_put_txwi(&dev->mt76, txwi);
+	}
+	spin_unlock_bh(&dev->token_lock);
+	idr_destroy(&dev->token);
+
+	mt76_free_device(&dev->mt76);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
new file mode 100644
index 0000000..0c813e2
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -0,0 +1,1364 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ *         Roy Luo <royluo@google.com>
+ *         Felix Fietkau <nbd@nbd.name>
+ *         Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+#include "mt7615.h"
+#include "../dma.h"
+#include "mac.h"
+
+#define to_rssi(field, rxv)		((FIELD_GET(field, rxv) - 220) / 2)
+
+static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
+					    u8 idx, bool unicast)
+{
+	struct mt7615_sta *sta;
+	struct mt76_wcid *wcid;
+
+	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+		return NULL;
+
+	wcid = rcu_dereference(dev->mt76.wcid[idx]);
+	if (unicast || !wcid)
+		return wcid;
+
+	if (!wcid->sta)
+		return NULL;
+
+	sta = container_of(wcid, struct mt7615_sta, wcid);
+	if (!sta->vif)
+		return NULL;
+
+	return &sta->vif->sta.wcid;
+}
+
+int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_hdr *hdr;
+	__le32 *rxd = (__le32 *)skb->data;
+	u32 rxd0 = le32_to_cpu(rxd[0]);
+	u32 rxd1 = le32_to_cpu(rxd[1]);
+	u32 rxd2 = le32_to_cpu(rxd[2]);
+	bool unicast, remove_pad, insert_ccmp_hdr = false;
+	int i, idx;
+
+	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+		return -EINVAL;
+
+	memset(status, 0, sizeof(*status));
+
+	unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
+	idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
+	status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
+
+	/* TODO: properly support DBDC */
+	status->freq = dev->mt76.chandef.chan->center_freq;
+	status->band = dev->mt76.chandef.chan->band;
+	if (status->band == NL80211_BAND_5GHZ)
+		sband = &dev->mt76.sband_5g.sband;
+	else
+		sband = &dev->mt76.sband_2g.sband;
+
+	if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
+		status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+	if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
+		status->flag |= RX_FLAG_MMIC_ERROR;
+
+	if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
+	    !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
+		status->flag |= RX_FLAG_DECRYPTED;
+		status->flag |= RX_FLAG_IV_STRIPPED;
+		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
+	}
+
+	remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
+
+	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
+		return -EINVAL;
+
+	if (!sband->channels)
+		return -EINVAL;
+
+	rxd += 4;
+	if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
+		rxd += 4;
+		if ((u8 *)rxd - skb->data >= skb->len)
+			return -EINVAL;
+	}
+
+	if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
+		u8 *data = (u8 *)rxd;
+
+		if (status->flag & RX_FLAG_DECRYPTED) {
+			status->iv[0] = data[5];
+			status->iv[1] = data[4];
+			status->iv[2] = data[3];
+			status->iv[3] = data[2];
+			status->iv[4] = data[1];
+			status->iv[5] = data[0];
+
+			insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+		}
+		rxd += 4;
+		if ((u8 *)rxd - skb->data >= skb->len)
+			return -EINVAL;
+	}
+
+	if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
+		rxd += 2;
+		if ((u8 *)rxd - skb->data >= skb->len)
+			return -EINVAL;
+	}
+
+	if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
+		u32 rxdg0 = le32_to_cpu(rxd[0]);
+		u32 rxdg1 = le32_to_cpu(rxd[1]);
+		u32 rxdg3 = le32_to_cpu(rxd[3]);
+		u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
+		bool cck = false;
+
+		i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
+		switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
+		case MT_PHY_TYPE_CCK:
+			cck = true;
+			/* fall through */
+		case MT_PHY_TYPE_OFDM:
+			i = mt76_get_rate(&dev->mt76, sband, i, cck);
+			break;
+		case MT_PHY_TYPE_HT_GF:
+		case MT_PHY_TYPE_HT:
+			status->encoding = RX_ENC_HT;
+			if (i > 31)
+				return -EINVAL;
+			break;
+		case MT_PHY_TYPE_VHT:
+			status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
+			status->encoding = RX_ENC_VHT;
+			break;
+		default:
+			return -EINVAL;
+		}
+		status->rate_idx = i;
+
+		switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
+		case MT_PHY_BW_20:
+			break;
+		case MT_PHY_BW_40:
+			status->bw = RATE_INFO_BW_40;
+			break;
+		case MT_PHY_BW_80:
+			status->bw = RATE_INFO_BW_80;
+			break;
+		case MT_PHY_BW_160:
+			status->bw = RATE_INFO_BW_160;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (rxdg0 & MT_RXV1_HT_SHORT_GI)
+			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+		if (rxdg0 & MT_RXV1_HT_AD_CODE)
+			status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+		status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+
+		status->chains = dev->mt76.antenna_mask;
+		status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
+		status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
+		status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
+		status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
+		status->signal = status->chain_signal[0];
+
+		for (i = 1; i < hweight8(dev->mt76.antenna_mask); i++) {
+			if (!(status->chains & BIT(i)))
+				continue;
+
+			status->signal = max(status->signal,
+					     status->chain_signal[i]);
+		}
+
+		rxd += 6;
+		if ((u8 *)rxd - skb->data >= skb->len)
+			return -EINVAL;
+	}
+
+	skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
+
+	if (insert_ccmp_hdr) {
+		u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+		mt76_insert_ccmp_hdr(skb, key_id);
+	}
+
+	hdr = (struct ieee80211_hdr *)skb->data;
+	if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+		return 0;
+
+	status->aggr = unicast &&
+		       !ieee80211_is_qos_nullfunc(hdr->frame_control);
+	status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+	status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+	return 0;
+}
+
+void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
+{
+}
+
+void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+			    struct mt76_queue_entry *e)
+{
+	if (!e->txwi) {
+		dev_kfree_skb_any(e->skb);
+		return;
+	}
+
+	/* error path */
+	if (e->skb == DMA_DUMMY_DATA) {
+		struct mt76_txwi_cache *t;
+		struct mt7615_dev *dev;
+		struct mt7615_txp *txp;
+
+		dev = container_of(mdev, struct mt7615_dev, mt76);
+		txp = mt7615_txwi_to_txp(mdev, e->txwi);
+
+		spin_lock_bh(&dev->token_lock);
+		t = idr_remove(&dev->token, le16_to_cpu(txp->token));
+		spin_unlock_bh(&dev->token_lock);
+		e->skb = t ? t->skb : NULL;
+	}
+
+	if (e->skb)
+		mt76_tx_complete_skb(mdev, e->skb);
+}
+
+static u16
+mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
+		       const struct ieee80211_tx_rate *rate,
+		       bool stbc, u8 *bw)
+{
+	u8 phy, nss, rate_idx;
+	u16 rateval = 0;
+
+	*bw = 0;
+
+	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+		rate_idx = ieee80211_rate_get_vht_mcs(rate);
+		nss = ieee80211_rate_get_vht_nss(rate);
+		phy = MT_PHY_TYPE_VHT;
+		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			*bw = 1;
+		else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+			*bw = 2;
+		else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+			*bw = 3;
+	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 3);
+		phy = MT_PHY_TYPE_HT;
+		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+			phy = MT_PHY_TYPE_HT_GF;
+		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			*bw = 1;
+	} else {
+		const struct ieee80211_rate *r;
+		int band = dev->mt76.chandef.chan->band;
+		u16 val;
+
+		nss = 1;
+		r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+			val = r->hw_value_short;
+		else
+			val = r->hw_value;
+
+		phy = val >> 8;
+		rate_idx = val & 0xff;
+	}
+
+	if (stbc && nss == 1) {
+		nss++;
+		rateval |= MT_TX_RATE_STBC;
+	}
+
+	rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
+		    FIELD_PREP(MT_TX_RATE_MODE, phy) |
+		    FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
+
+	return rateval;
+}
+
+int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
+			  struct sk_buff *skb, struct mt76_wcid *wcid,
+			  struct ieee80211_sta *sta, int pid,
+			  struct ieee80211_key_conf *key)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_rate *rate = &info->control.rates[0];
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	bool multicast = is_multicast_ether_addr(hdr->addr1);
+	struct ieee80211_vif *vif = info->control.vif;
+	int tx_count = 8;
+	u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
+	__le16 fc = hdr->frame_control;
+	u16 seqno = 0;
+	u32 val;
+
+	if (vif) {
+		struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+
+		omac_idx = mvif->omac_idx;
+		wmm_idx = mvif->wmm_idx;
+	}
+
+	if (sta) {
+		struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+		tx_count = msta->rate_count;
+	}
+
+	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
+	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
+
+	if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) {
+		q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
+			skb_get_queue_mapping(skb);
+		p_fmt = MT_TX_TYPE_CT;
+	} else if (ieee80211_is_beacon(fc)) {
+		q_idx = MT_LMAC_BCN0;
+		p_fmt = MT_TX_TYPE_FW;
+	} else {
+		q_idx = MT_LMAC_ALTX0;
+		p_fmt = MT_TX_TYPE_CT;
+	}
+
+	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
+	      FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
+	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+	txwi[0] = cpu_to_le32(val);
+
+	val = MT_TXD1_LONG_FORMAT |
+	      FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
+	      FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
+	      FIELD_PREP(MT_TXD1_HDR_INFO,
+			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
+	      FIELD_PREP(MT_TXD1_TID,
+			 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
+	      FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
+	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
+	txwi[1] = cpu_to_le32(val);
+
+	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
+	      FIELD_PREP(MT_TXD2_MULTICAST, multicast);
+	if (key) {
+		if (multicast && ieee80211_is_robust_mgmt_frame(skb) &&
+		    key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+			val |= MT_TXD2_BIP;
+			txwi[3] = 0;
+		} else {
+			txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME);
+		}
+	} else {
+		txwi[3] = 0;
+	}
+	txwi[2] = cpu_to_le32(val);
+
+	if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
+		txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+	txwi[4] = 0;
+	txwi[6] = 0;
+
+	if (rate->idx >= 0 && rate->count &&
+	    !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
+		bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
+		u8 bw;
+		u16 rateval = mt7615_mac_tx_rate_val(dev, rate, stbc, &bw);
+
+		txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
+
+		val = MT_TXD6_FIXED_BW |
+		      FIELD_PREP(MT_TXD6_BW, bw) |
+		      FIELD_PREP(MT_TXD6_TX_RATE, rateval);
+		txwi[6] |= cpu_to_le32(val);
+
+		if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+			txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
+
+		if (info->flags & IEEE80211_TX_CTL_LDPC)
+			txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
+
+		if (!(rate->flags & (IEEE80211_TX_RC_MCS |
+				     IEEE80211_TX_RC_VHT_MCS)))
+			txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+		tx_count = rate->count;
+	}
+
+	if (!ieee80211_is_beacon(fc)) {
+		val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
+		      FIELD_PREP(MT_TXD5_PID, pid);
+		txwi[5] = cpu_to_le32(val);
+	} else {
+		txwi[5] = 0;
+		/* use maximum tx count for beacons */
+		tx_count = 0x1f;
+	}
+
+	val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
+	if (ieee80211_is_data_qos(hdr->frame_control)) {
+		seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+		val |= MT_TXD3_SN_VALID;
+	} else if (ieee80211_is_back_req(hdr->frame_control)) {
+		struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
+
+		seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
+		val |= MT_TXD3_SN_VALID;
+	}
+	val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
+
+	txwi[3] |= cpu_to_le32(val);
+
+	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+		txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
+
+	txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+		  FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+
+	return 0;
+}
+
+void mt7615_txp_skb_unmap(struct mt76_dev *dev,
+			  struct mt76_txwi_cache *t)
+{
+	struct mt7615_txp *txp;
+	int i;
+
+	txp = mt7615_txwi_to_txp(dev, t);
+	for (i = 1; i < txp->nbuf; i++)
+		dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+				 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+}
+
+static u32 mt7615_mac_wtbl_addr(int wcid)
+{
+	return MT_WTBL_BASE + wcid * MT_WTBL_ENTRY_SIZE;
+}
+
+void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
+			  struct ieee80211_tx_rate *probe_rate,
+			  struct ieee80211_tx_rate *rates)
+{
+	struct ieee80211_tx_rate *ref;
+	int wcid = sta->wcid.idx;
+	u32 addr = mt7615_mac_wtbl_addr(wcid);
+	bool stbc = false;
+	int n_rates = sta->n_rates;
+	u8 bw, bw_prev, bw_idx = 0;
+	u16 val[4];
+	u16 probe_val;
+	u32 w5, w27;
+	bool rateset;
+	int i, k;
+
+	if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+		return;
+
+	for (i = n_rates; i < 4; i++)
+		rates[i] = rates[n_rates - 1];
+
+	rateset = !(sta->rate_set_tsf & BIT(0));
+	memcpy(sta->rateset[rateset].rates, rates,
+	       sizeof(sta->rateset[rateset].rates));
+	if (probe_rate) {
+		sta->rateset[rateset].probe_rate = *probe_rate;
+		ref = &sta->rateset[rateset].probe_rate;
+	} else {
+		sta->rateset[rateset].probe_rate.idx = -1;
+		ref = &sta->rateset[rateset].rates[0];
+	}
+
+	rates = sta->rateset[rateset].rates;
+	for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
+		/*
+		 * We don't support switching between short and long GI
+		 * within the rate set. For accurate tx status reporting, we
+		 * need to make sure that flags match.
+		 * For improved performance, avoid duplicate entries by
+		 * decrementing the MCS index if necessary
+		 */
+		if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
+			rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
+
+		for (k = 0; k < i; k++) {
+			if (rates[i].idx != rates[k].idx)
+				continue;
+			if ((rates[i].flags ^ rates[k].flags) &
+			    (IEEE80211_TX_RC_40_MHZ_WIDTH |
+			     IEEE80211_TX_RC_80_MHZ_WIDTH |
+			     IEEE80211_TX_RC_160_MHZ_WIDTH))
+				continue;
+
+			if (!rates[i].idx)
+				continue;
+
+			rates[i].idx--;
+		}
+	}
+
+	val[0] = mt7615_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
+	bw_prev = bw;
+
+	if (probe_rate) {
+		probe_val = mt7615_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
+		if (bw)
+			bw_idx = 1;
+		else
+			bw_prev = 0;
+	} else {
+		probe_val = val[0];
+	}
+
+	val[1] = mt7615_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
+	if (bw_prev) {
+		bw_idx = 3;
+		bw_prev = bw;
+	}
+
+	val[2] = mt7615_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
+	if (bw_prev) {
+		bw_idx = 5;
+		bw_prev = bw;
+	}
+
+	val[3] = mt7615_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
+	if (bw_prev)
+		bw_idx = 7;
+
+	w27 = mt76_rr(dev, addr + 27 * 4);
+	w27 &= ~MT_WTBL_W27_CC_BW_SEL;
+	w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, bw);
+
+	w5 = mt76_rr(dev, addr + 5 * 4);
+	w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
+		MT_WTBL_W5_MPDU_OK_COUNT |
+		MT_WTBL_W5_MPDU_FAIL_COUNT |
+		MT_WTBL_W5_RATE_IDX);
+	w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, bw) |
+	      FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, bw_idx ? bw_idx - 1 : 7);
+
+	mt76_wr(dev, MT_WTBL_RIUCR0, w5);
+
+	mt76_wr(dev, MT_WTBL_RIUCR1,
+		FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
+		FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
+		FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1]));
+
+	mt76_wr(dev, MT_WTBL_RIUCR2,
+		FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) |
+		FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
+		FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) |
+		FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
+
+	mt76_wr(dev, MT_WTBL_RIUCR3,
+		FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
+		FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) |
+		FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
+
+	mt76_wr(dev, MT_WTBL_UPDATE,
+		FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
+		MT_WTBL_UPDATE_RATE_UPDATE |
+		MT_WTBL_UPDATE_TX_COUNT_CLEAR);
+
+	mt76_wr(dev, addr + 27 * 4, w27);
+
+	mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+	sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset;
+
+	if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
+		mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+	sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates;
+	sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+}
+
+static enum mt7615_cipher_type
+mt7615_mac_get_cipher(int cipher)
+{
+	switch (cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		return MT_CIPHER_WEP40;
+	case WLAN_CIPHER_SUITE_WEP104:
+		return MT_CIPHER_WEP104;
+	case WLAN_CIPHER_SUITE_TKIP:
+		return MT_CIPHER_TKIP;
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+		return MT_CIPHER_BIP_CMAC_128;
+	case WLAN_CIPHER_SUITE_CCMP:
+		return MT_CIPHER_AES_CCMP;
+	case WLAN_CIPHER_SUITE_CCMP_256:
+		return MT_CIPHER_CCMP_256;
+	case WLAN_CIPHER_SUITE_GCMP:
+		return MT_CIPHER_GCMP;
+	case WLAN_CIPHER_SUITE_GCMP_256:
+		return MT_CIPHER_GCMP_256;
+	case WLAN_CIPHER_SUITE_SMS4:
+		return MT_CIPHER_WAPI;
+	default:
+		return MT_CIPHER_NONE;
+	}
+}
+
+static int
+mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+			   struct ieee80211_key_conf *key,
+			   enum mt7615_cipher_type cipher,
+			   enum set_key_cmd cmd)
+{
+	u32 addr = mt7615_mac_wtbl_addr(wcid->idx) + 30 * 4;
+	u8 data[32] = {};
+
+	if (key->keylen > sizeof(data))
+		return -EINVAL;
+
+	mt76_rr_copy(dev, addr, data, sizeof(data));
+	if (cmd == SET_KEY) {
+		if (cipher == MT_CIPHER_TKIP) {
+			/* Rx/Tx MIC keys are swapped */
+			memcpy(data + 16, key->key + 24, 8);
+			memcpy(data + 24, key->key + 16, 8);
+		}
+		if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
+			memmove(data + 16, data, 16);
+		if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
+			memcpy(data, key->key, key->keylen);
+		else if (cipher == MT_CIPHER_BIP_CMAC_128)
+			memcpy(data + 16, key->key, 16);
+	} else {
+		if (wcid->cipher & ~BIT(cipher)) {
+			if (cipher != MT_CIPHER_BIP_CMAC_128)
+				memmove(data, data + 16, 16);
+			memset(data + 16, 0, 16);
+		} else {
+			memset(data, 0, sizeof(data));
+		}
+	}
+	mt76_wr_copy(dev, addr, data, sizeof(data));
+
+	return 0;
+}
+
+static int
+mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+			  enum mt7615_cipher_type cipher, int keyidx,
+			  enum set_key_cmd cmd)
+{
+	u32 addr = mt7615_mac_wtbl_addr(wcid->idx), w0, w1;
+
+	if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+		return -ETIMEDOUT;
+
+	w0 = mt76_rr(dev, addr);
+	w1 = mt76_rr(dev, addr + 4);
+	if (cmd == SET_KEY) {
+		w0 |= MT_WTBL_W0_RX_KEY_VALID |
+		      FIELD_PREP(MT_WTBL_W0_RX_IK_VALID,
+				 cipher == MT_CIPHER_BIP_CMAC_128);
+		if (cipher != MT_CIPHER_BIP_CMAC_128 ||
+		    !wcid->cipher)
+			w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
+	}  else {
+		if (!(wcid->cipher & ~BIT(cipher)))
+			w0 &= ~(MT_WTBL_W0_RX_KEY_VALID |
+				MT_WTBL_W0_KEY_IDX);
+		if (cipher == MT_CIPHER_BIP_CMAC_128)
+			w0 &= ~MT_WTBL_W0_RX_IK_VALID;
+	}
+	mt76_wr(dev, MT_WTBL_RICR0, w0);
+	mt76_wr(dev, MT_WTBL_RICR1, w1);
+
+	mt76_wr(dev, MT_WTBL_UPDATE,
+		FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid->idx) |
+		MT_WTBL_UPDATE_RXINFO_UPDATE);
+
+	if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static void
+mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+			      enum mt7615_cipher_type cipher,
+			      enum set_key_cmd cmd)
+{
+	u32 addr = mt7615_mac_wtbl_addr(wcid->idx);
+
+	if (cmd == SET_KEY) {
+		if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
+			mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
+				 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
+	} else {
+		if (cipher != MT_CIPHER_BIP_CMAC_128 &&
+		    wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128))
+			mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
+				 FIELD_PREP(MT_WTBL_W2_KEY_TYPE,
+					    MT_CIPHER_BIP_CMAC_128));
+		else if (!(wcid->cipher & ~BIT(cipher)))
+			mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
+	}
+}
+
+int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+			    struct mt76_wcid *wcid,
+			    struct ieee80211_key_conf *key,
+			    enum set_key_cmd cmd)
+{
+	enum mt7615_cipher_type cipher;
+	int err;
+
+	cipher = mt7615_mac_get_cipher(key->cipher);
+	if (cipher == MT_CIPHER_NONE)
+		return -EOPNOTSUPP;
+
+	spin_lock_bh(&dev->mt76.lock);
+
+	mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd);
+	err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd);
+	if (err < 0)
+		goto out;
+
+	err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx,
+					cmd);
+	if (err < 0)
+		goto out;
+
+	if (cmd == SET_KEY)
+		wcid->cipher |= BIT(cipher);
+	else
+		wcid->cipher &= ~BIT(cipher);
+
+out:
+	spin_unlock_bh(&dev->mt76.lock);
+
+	return err;
+}
+
+int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
+			  struct ieee80211_sta *sta,
+			  struct mt76_tx_info *tx_info)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+	struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+	struct ieee80211_key_conf *key = info->control.hw_key;
+	struct ieee80211_vif *vif = info->control.vif;
+	int i, pid, id, nbuf = tx_info->nbuf - 1;
+	u8 *txwi = (u8 *)txwi_ptr;
+	struct mt76_txwi_cache *t;
+	struct mt7615_txp *txp;
+
+	if (!wcid)
+		wcid = &dev->mt76.global_wcid;
+
+	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+		spin_lock_bh(&dev->mt76.lock);
+		mt7615_mac_set_rates(dev, msta, &info->control.rates[0],
+				     msta->rates);
+		msta->rate_probe = true;
+		spin_unlock_bh(&dev->mt76.lock);
+	}
+
+	mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
+			      pid, key);
+
+	txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
+	for (i = 0; i < nbuf; i++) {
+		txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+		txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+	}
+	txp->nbuf = nbuf;
+
+	/* pass partial skb header to fw */
+	tx_info->buf[1].len = MT_CT_PARSE_LEN;
+	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
+
+	txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+
+	if (!key)
+		txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+
+	if (ieee80211_is_mgmt(hdr->frame_control))
+		txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+
+	if (vif) {
+		struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+
+		txp->bss_idx = mvif->idx;
+	}
+
+	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+	t->skb = tx_info->skb;
+
+	spin_lock_bh(&dev->token_lock);
+	id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
+	spin_unlock_bh(&dev->token_lock);
+	if (id < 0)
+		return id;
+
+	txp->token = cpu_to_le16(id);
+	txp->rept_wds_wcid = 0xff;
+	tx_info->skb = DMA_DUMMY_DATA;
+
+	return 0;
+}
+
+static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
+			    struct ieee80211_tx_info *info, __le32 *txs_data)
+{
+	struct ieee80211_supported_band *sband;
+	struct mt7615_rate_set *rs;
+	int first_idx = 0, last_idx;
+	int i, idx, count;
+	bool fixed_rate, ack_timeout;
+	bool ampdu, cck = false;
+	bool rs_idx;
+	u32 rate_set_tsf;
+	u32 final_rate, final_rate_flags, final_nss, txs;
+
+	txs = le32_to_cpu(txs_data[1]);
+	ampdu = txs & MT_TXS1_AMPDU;
+
+	txs = le32_to_cpu(txs_data[3]);
+	count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
+	last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
+
+	txs = le32_to_cpu(txs_data[0]);
+	fixed_rate = txs & MT_TXS0_FIXED_RATE;
+	final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
+	ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
+
+	if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
+		return false;
+
+	if (txs & MT_TXS0_QUEUE_TIMEOUT)
+		return false;
+
+	if (!ack_timeout)
+		info->flags |= IEEE80211_TX_STAT_ACK;
+
+	info->status.ampdu_len = 1;
+	info->status.ampdu_ack_len = !!(info->flags &
+					IEEE80211_TX_STAT_ACK);
+
+	if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
+		info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
+
+	first_idx = max_t(int, 0, last_idx - (count + 1) / MT7615_RATE_RETRY);
+
+	if (fixed_rate) {
+		info->status.rates[0].count = count;
+		i = 0;
+		goto out;
+	}
+
+	rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
+	rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) -
+			 rate_set_tsf) < 1000000);
+	rs_idx ^= rate_set_tsf & BIT(0);
+	rs = &sta->rateset[rs_idx];
+
+	if (!first_idx && rs->probe_rate.idx >= 0) {
+		info->status.rates[0] = rs->probe_rate;
+
+		spin_lock_bh(&dev->mt76.lock);
+		if (sta->rate_probe) {
+			mt7615_mac_set_rates(dev, sta, NULL, sta->rates);
+			sta->rate_probe = false;
+		}
+		spin_unlock_bh(&dev->mt76.lock);
+	} else {
+		info->status.rates[0] = rs->rates[first_idx / 2];
+	}
+	info->status.rates[0].count = 0;
+
+	for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
+		struct ieee80211_tx_rate *cur_rate;
+		int cur_count;
+
+		cur_rate = &rs->rates[idx / 2];
+		cur_count = min_t(int, MT7615_RATE_RETRY, count);
+		count -= cur_count;
+
+		if (idx && (cur_rate->idx != info->status.rates[i].idx ||
+			    cur_rate->flags != info->status.rates[i].flags)) {
+			i++;
+			if (i == ARRAY_SIZE(info->status.rates)) {
+				i--;
+				break;
+			}
+
+			info->status.rates[i] = *cur_rate;
+			info->status.rates[i].count = 0;
+		}
+
+		info->status.rates[i].count += cur_count;
+	}
+
+out:
+	final_rate_flags = info->status.rates[i].flags;
+
+	switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
+	case MT_PHY_TYPE_CCK:
+		cck = true;
+		/* fall through */
+	case MT_PHY_TYPE_OFDM:
+		if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
+			sband = &dev->mt76.sband_5g.sband;
+		else
+			sband = &dev->mt76.sband_2g.sband;
+		final_rate &= MT_TX_RATE_IDX;
+		final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
+					   cck);
+		final_rate_flags = 0;
+		break;
+	case MT_PHY_TYPE_HT_GF:
+	case MT_PHY_TYPE_HT:
+		final_rate_flags |= IEEE80211_TX_RC_MCS;
+		final_rate &= MT_TX_RATE_IDX;
+		if (final_rate > 31)
+			return false;
+		break;
+	case MT_PHY_TYPE_VHT:
+		final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
+
+		if ((final_rate & MT_TX_RATE_STBC) && final_nss)
+			final_nss--;
+
+		final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
+		final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
+		break;
+	default:
+		return false;
+	}
+
+	info->status.rates[i].idx = final_rate;
+	info->status.rates[i].flags = final_rate_flags;
+
+	return true;
+}
+
+static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
+				   struct mt7615_sta *sta, int pid,
+				   __le32 *txs_data)
+{
+	struct mt76_dev *mdev = &dev->mt76;
+	struct sk_buff_head list;
+	struct sk_buff *skb;
+
+	if (pid < MT_PACKET_ID_FIRST)
+		return false;
+
+	mt76_tx_status_lock(mdev, &list);
+	skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
+	if (skb) {
+		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+		if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
+			ieee80211_tx_info_clear_status(info);
+			info->status.rates[0].idx = -1;
+		}
+
+		mt76_tx_status_skb_done(mdev, skb, &list);
+	}
+	mt76_tx_status_unlock(mdev, &list);
+
+	return !!skb;
+}
+
+void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
+{
+	struct ieee80211_tx_info info = {};
+	struct ieee80211_sta *sta = NULL;
+	struct mt7615_sta *msta = NULL;
+	struct mt76_wcid *wcid;
+	__le32 *txs_data = data;
+	u32 txs;
+	u8 wcidx;
+	u8 pid;
+
+	txs = le32_to_cpu(txs_data[0]);
+	pid = FIELD_GET(MT_TXS0_PID, txs);
+	txs = le32_to_cpu(txs_data[2]);
+	wcidx = FIELD_GET(MT_TXS2_WCID, txs);
+
+	if (pid == MT_PACKET_ID_NO_ACK)
+		return;
+
+	if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
+		return;
+
+	rcu_read_lock();
+
+	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+	if (!wcid)
+		goto out;
+
+	msta = container_of(wcid, struct mt7615_sta, wcid);
+	sta = wcid_to_sta(wcid);
+
+	if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
+		goto out;
+
+	if (wcidx >= MT7615_WTBL_STA || !sta)
+		goto out;
+
+	if (mt7615_fill_txs(dev, msta, &info, txs_data))
+		ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+	rcu_read_unlock();
+}
+
+void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+	struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
+	struct mt76_dev *mdev = &dev->mt76;
+	struct mt76_txwi_cache *txwi;
+	u8 i, count;
+
+	count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
+	for (i = 0; i < count; i++) {
+		spin_lock_bh(&dev->token_lock);
+		txwi = idr_remove(&dev->token, le16_to_cpu(free->token[i]));
+		spin_unlock_bh(&dev->token_lock);
+
+		if (!txwi)
+			continue;
+
+		mt7615_txp_skb_unmap(mdev, txwi);
+		if (txwi->skb) {
+			mt76_tx_complete_skb(mdev, txwi->skb);
+			txwi->skb = NULL;
+		}
+
+		mt76_put_txwi(mdev, txwi);
+	}
+	dev_kfree_skb(skb);
+}
+
+static void
+mt7615_mac_set_default_sensitivity(struct mt7615_dev *dev)
+{
+	mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
+		 MT_WF_PHY_B0_PD_OFDM_MASK,
+		 MT_WF_PHY_B0_PD_OFDM(0x13c));
+	mt76_rmw(dev, MT_WF_PHY_B1_MIN_PRI_PWR,
+		 MT_WF_PHY_B1_PD_OFDM_MASK,
+		 MT_WF_PHY_B1_PD_OFDM(0x13c));
+
+	mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD,
+		 MT_WF_PHY_B0_PD_CCK_MASK,
+		 MT_WF_PHY_B0_PD_CCK(0x92));
+	mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD,
+		 MT_WF_PHY_B1_PD_CCK_MASK,
+		 MT_WF_PHY_B1_PD_CCK(0x92));
+
+	dev->ofdm_sensitivity = -98;
+	dev->cck_sensitivity = -110;
+	dev->last_cca_adj = jiffies;
+}
+
+void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable)
+{
+	mutex_lock(&dev->mt76.mutex);
+
+	if (dev->scs_en == enable)
+		goto out;
+
+	if (enable) {
+		/* DBDC not supported */
+		mt76_set(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
+			 MT_WF_PHY_B0_PD_BLK);
+		if (is_mt7622(&dev->mt76)) {
+			mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7 << 8);
+			mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7);
+		}
+	} else {
+		mt76_clear(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
+			   MT_WF_PHY_B0_PD_BLK);
+		mt76_clear(dev, MT_WF_PHY_B1_MIN_PRI_PWR,
+			   MT_WF_PHY_B1_PD_BLK);
+	}
+
+	mt7615_mac_set_default_sensitivity(dev);
+	dev->scs_en = enable;
+
+out:
+	mutex_unlock(&dev->mt76.mutex);
+}
+
+void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev)
+{
+	mt76_clear(dev, MT_WF_PHY_R0_B0_PHYMUX_5, GENMASK(22, 20));
+	mt76_set(dev, MT_WF_PHY_R0_B0_PHYMUX_5, BIT(22) | BIT(20));
+}
+
+static void
+mt7615_mac_adjust_sensitivity(struct mt7615_dev *dev,
+			      u32 rts_err_rate, bool ofdm)
+{
+	int false_cca = ofdm ? dev->false_cca_ofdm : dev->false_cca_cck;
+	u16 def_th = ofdm ? -98 : -110;
+	bool update = false;
+	s8 *sensitivity;
+	int signal;
+
+	sensitivity = ofdm ? &dev->ofdm_sensitivity : &dev->cck_sensitivity;
+	signal = mt76_get_min_avg_rssi(&dev->mt76);
+	if (!signal) {
+		mt7615_mac_set_default_sensitivity(dev);
+		return;
+	}
+
+	signal = min(signal, -72);
+	if (false_cca > 500) {
+		if (rts_err_rate > MT_FRAC(40, 100))
+			return;
+
+		/* decrease coverage */
+		if (*sensitivity == def_th && signal > -90) {
+			*sensitivity = -90;
+			update = true;
+		} else if (*sensitivity + 2 < signal) {
+			*sensitivity += 2;
+			update = true;
+		}
+	} else if ((false_cca > 0 && false_cca < 50) ||
+		   rts_err_rate > MT_FRAC(60, 100)) {
+		/* increase coverage */
+		if (*sensitivity - 2 >= def_th) {
+			*sensitivity -= 2;
+			update = true;
+		}
+	}
+
+	if (*sensitivity > signal) {
+		*sensitivity = signal;
+		update = true;
+	}
+
+	if (update) {
+		u16 val;
+
+		if (ofdm) {
+			/* DBDC not supported */
+			val = *sensitivity * 2 + 512;
+			mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
+				 MT_WF_PHY_B0_PD_OFDM_MASK,
+				 MT_WF_PHY_B0_PD_OFDM(val));
+		} else {
+			val = *sensitivity + 256;
+			mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD,
+				 MT_WF_PHY_B0_PD_CCK_MASK,
+				 MT_WF_PHY_B0_PD_CCK(val));
+			mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD,
+				 MT_WF_PHY_B1_PD_CCK_MASK,
+				 MT_WF_PHY_B1_PD_CCK(val));
+		}
+		dev->last_cca_adj = jiffies;
+	}
+}
+
+static void
+mt7615_mac_scs_check(struct mt7615_dev *dev)
+{
+	u32 val, rts_cnt = 0, rts_retries_cnt = 0, rts_err_rate = 0;
+	u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm;
+	int i;
+
+	if (!dev->scs_en)
+		return;
+
+	for (i = 0; i < 4; i++) {
+		u32 data;
+
+		val = mt76_rr(dev, MT_MIB_MB_SDR0(i));
+		data = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+		if (data > rts_retries_cnt) {
+			rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+			rts_retries_cnt = data;
+		}
+	}
+
+	val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS0);
+	pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val);
+	pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val);
+
+	val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS5);
+	mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val);
+	mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val);
+
+	dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
+	dev->false_cca_cck = pd_cck - mdrdy_cck;
+	mt7615_mac_cca_stats_reset(dev);
+
+	if (rts_cnt + rts_retries_cnt)
+		rts_err_rate = MT_FRAC(rts_retries_cnt,
+				       rts_cnt + rts_retries_cnt);
+
+	/* cck */
+	mt7615_mac_adjust_sensitivity(dev, rts_err_rate, false);
+	/* ofdm */
+	mt7615_mac_adjust_sensitivity(dev, rts_err_rate, true);
+
+	if (time_after(jiffies, dev->last_cca_adj + 10 * HZ))
+		mt7615_mac_set_default_sensitivity(dev);
+}
+
+void mt7615_update_channel(struct mt76_dev *mdev)
+{
+	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+	struct mt76_channel_state *state;
+	ktime_t cur_time;
+	u32 busy;
+
+	if (!test_bit(MT76_STATE_RUNNING, &mdev->state))
+		return;
+
+	state = mt76_channel_state(mdev, mdev->chandef.chan);
+	/* TODO: add DBDC support */
+	busy = mt76_get_field(dev, MT_MIB_SDR16(0), MT_MIB_BUSY_MASK);
+
+	spin_lock_bh(&mdev->cc_lock);
+	cur_time = ktime_get_boottime();
+	state->cc_busy += busy;
+	state->cc_active += ktime_to_us(ktime_sub(cur_time,
+						  mdev->survey_time));
+	mdev->survey_time = cur_time;
+	spin_unlock_bh(&mdev->cc_lock);
+}
+
+void mt7615_mac_work(struct work_struct *work)
+{
+	struct mt7615_dev *dev;
+
+	dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
+						mac_work.work);
+
+	mutex_lock(&dev->mt76.mutex);
+	mt7615_update_channel(&dev->mt76);
+	if (++dev->mac_work_count == 5) {
+		mt7615_mac_scs_check(dev);
+		dev->mac_work_count = 0;
+	}
+	mutex_unlock(&dev->mt76.mutex);
+
+	mt76_tx_status_check(&dev->mt76, NULL, false);
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+				     MT7615_WATCHDOG_TIME);
+}
+
+int mt7615_dfs_stop_radar_detector(struct mt7615_dev *dev)
+{
+	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+	int err;
+
+	err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD0,
+				 MT_RX_SEL0, 0);
+	if (err < 0)
+		return err;
+
+	if (chandef->width == NL80211_CHAN_WIDTH_160 ||
+	    chandef->width == NL80211_CHAN_WIDTH_80P80)
+		err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD1,
+					 MT_RX_SEL0, 0);
+	return err;
+}
+
+static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
+{
+	int err;
+
+	err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
+	if (err < 0)
+		return err;
+
+	return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
+				  MT_RX_SEL0, 1);
+}
+
+int mt7615_dfs_start_radar_detector(struct mt7615_dev *dev)
+{
+	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+	int err;
+
+	/* start CAC */
+	err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, MT_HW_RDD0,
+				 MT_RX_SEL0, 0);
+	if (err < 0)
+		return err;
+
+	/* TODO: DBDC support */
+
+	err = mt7615_dfs_start_rdd(dev, MT_HW_RDD0);
+	if (err < 0)
+		return err;
+
+	if (chandef->width == NL80211_CHAN_WIDTH_160 ||
+	    chandef->width == NL80211_CHAN_WIDTH_80P80) {
+		err = mt7615_dfs_start_rdd(dev, MT_HW_RDD1);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
+int mt7615_dfs_init_radar_detector(struct mt7615_dev *dev)
+{
+	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+	int err;
+
+	if (dev->mt76.region == NL80211_DFS_UNSET)
+		return 0;
+
+	if (test_bit(MT76_SCANNING, &dev->mt76.state))
+		return 0;
+
+	if (dev->dfs_state == chandef->chan->dfs_state)
+		return 0;
+
+	dev->dfs_state = chandef->chan->dfs_state;
+
+	if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
+		if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+			return mt7615_dfs_start_radar_detector(dev);
+		else
+			return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, MT_HW_RDD0,
+						  MT_RX_SEL0, 0);
+	} else {
+		err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START,
+					 MT_HW_RDD0, MT_RX_SEL0, 0);
+		if (err < 0)
+			return err;
+
+		return mt7615_dfs_stop_radar_detector(dev);
+	}
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
new file mode 100644
index 0000000..38695d4
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_MAC_H
+#define __MT7615_MAC_H
+
+#define MT_CT_PARSE_LEN			72
+#define MT_CT_DMA_BUF_NUM		2
+
+#define MT_RXD0_LENGTH			GENMASK(15, 0)
+#define MT_RXD0_PKT_TYPE		GENMASK(31, 29)
+
+#define MT_RXD0_NORMAL_ETH_TYPE_OFS	GENMASK(22, 16)
+#define MT_RXD0_NORMAL_IP_SUM		BIT(23)
+#define MT_RXD0_NORMAL_UDP_TCP_SUM	BIT(24)
+#define MT_RXD0_NORMAL_GROUP_1		BIT(25)
+#define MT_RXD0_NORMAL_GROUP_2		BIT(26)
+#define MT_RXD0_NORMAL_GROUP_3		BIT(27)
+#define MT_RXD0_NORMAL_GROUP_4		BIT(28)
+
+enum rx_pkt_type {
+	PKT_TYPE_TXS,
+	PKT_TYPE_TXRXV,
+	PKT_TYPE_NORMAL,
+	PKT_TYPE_RX_DUP_RFB,
+	PKT_TYPE_RX_TMR,
+	PKT_TYPE_RETRIEVE,
+	PKT_TYPE_TXRX_NOTIFY,
+	PKT_TYPE_RX_EVENT
+};
+
+#define MT_RXD1_NORMAL_BSSID		GENMASK(31, 26)
+#define MT_RXD1_NORMAL_PAYLOAD_FORMAT	GENMASK(25, 24)
+#define MT_RXD1_NORMAL_HDR_TRANS	BIT(23)
+#define MT_RXD1_NORMAL_HDR_OFFSET	BIT(22)
+#define MT_RXD1_NORMAL_MAC_HDR_LEN	GENMASK(21, 16)
+#define MT_RXD1_NORMAL_CH_FREQ		GENMASK(15, 8)
+#define MT_RXD1_NORMAL_KEY_ID		GENMASK(7, 6)
+#define MT_RXD1_NORMAL_BEACON_UC	BIT(5)
+#define MT_RXD1_NORMAL_BEACON_MC	BIT(4)
+#define MT_RXD1_NORMAL_BF_REPORT	BIT(3)
+#define MT_RXD1_NORMAL_ADDR_TYPE	GENMASK(2, 1)
+#define MT_RXD1_NORMAL_BCAST		GENMASK(2, 1)
+#define MT_RXD1_NORMAL_MCAST		BIT(2)
+#define MT_RXD1_NORMAL_U2M		BIT(1)
+#define MT_RXD1_NORMAL_HTC_VLD		BIT(0)
+
+#define MT_RXD2_NORMAL_NON_AMPDU	BIT(31)
+#define MT_RXD2_NORMAL_NON_AMPDU_SUB	BIT(30)
+#define MT_RXD2_NORMAL_NDATA		BIT(29)
+#define MT_RXD2_NORMAL_NULL_FRAME	BIT(28)
+#define MT_RXD2_NORMAL_FRAG		BIT(27)
+#define MT_RXD2_NORMAL_INT_FRAME	BIT(26)
+#define MT_RXD2_NORMAL_HDR_TRANS_ERROR	BIT(25)
+#define MT_RXD2_NORMAL_MAX_LEN_ERROR	BIT(24)
+#define MT_RXD2_NORMAL_AMSDU_ERR	BIT(23)
+#define MT_RXD2_NORMAL_LEN_MISMATCH	BIT(22)
+#define MT_RXD2_NORMAL_TKIP_MIC_ERR	BIT(21)
+#define MT_RXD2_NORMAL_ICV_ERR		BIT(20)
+#define MT_RXD2_NORMAL_CLM		BIT(19)
+#define MT_RXD2_NORMAL_CM		BIT(18)
+#define MT_RXD2_NORMAL_FCS_ERR		BIT(17)
+#define MT_RXD2_NORMAL_SW_BIT		BIT(16)
+#define MT_RXD2_NORMAL_SEC_MODE		GENMASK(15, 12)
+#define MT_RXD2_NORMAL_TID		GENMASK(11, 8)
+#define MT_RXD2_NORMAL_WLAN_IDX		GENMASK(7, 0)
+
+#define MT_RXD3_NORMAL_PF_STS		GENMASK(31, 30)
+#define MT_RXD3_NORMAL_PF_MODE		BIT(29)
+#define MT_RXD3_NORMAL_CLS_BITMAP	GENMASK(28, 19)
+#define MT_RXD3_NORMAL_WOL		GENMASK(18, 14)
+#define MT_RXD3_NORMAL_MAGIC_PKT	BIT(13)
+#define MT_RXD3_NORMAL_OFLD		GENMASK(12, 11)
+#define MT_RXD3_NORMAL_CLS		BIT(10)
+#define MT_RXD3_NORMAL_PATTERN_DROP	BIT(9)
+#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS	BIT(8)
+#define MT_RXD3_NORMAL_RXV_SEQ		GENMASK(7, 0)
+
+#define MT_RXV1_ACID_DET_H		BIT(31)
+#define MT_RXV1_ACID_DET_L		BIT(30)
+#define MT_RXV1_VHTA2_B8_B3		GENMASK(29, 24)
+#define MT_RXV1_NUM_RX			GENMASK(23, 22)
+#define MT_RXV1_HT_NO_SOUND		BIT(21)
+#define MT_RXV1_HT_SMOOTH		BIT(20)
+#define MT_RXV1_HT_SHORT_GI		BIT(19)
+#define MT_RXV1_HT_AGGR			BIT(18)
+#define MT_RXV1_VHTA1_B22		BIT(17)
+#define MT_RXV1_FRAME_MODE		GENMASK(16, 15)
+#define MT_RXV1_TX_MODE			GENMASK(14, 12)
+#define MT_RXV1_HT_EXT_LTF		GENMASK(11, 10)
+#define MT_RXV1_HT_AD_CODE		BIT(9)
+#define MT_RXV1_HT_STBC			GENMASK(8, 7)
+#define MT_RXV1_TX_RATE			GENMASK(6, 0)
+
+#define MT_RXV2_SEL_ANT			BIT(31)
+#define MT_RXV2_VALID_BIT		BIT(30)
+#define MT_RXV2_NSTS			GENMASK(29, 27)
+#define MT_RXV2_GROUP_ID		GENMASK(26, 21)
+#define MT_RXV2_LENGTH			GENMASK(20, 0)
+
+#define MT_RXV4_RCPI3			GENMASK(31, 24)
+#define MT_RXV4_RCPI2			GENMASK(23, 16)
+#define MT_RXV4_RCPI1			GENMASK(15, 8)
+#define MT_RXV4_RCPI0			GENMASK(7, 0)
+
+enum tx_header_format {
+	MT_HDR_FORMAT_802_3,
+	MT_HDR_FORMAT_CMD,
+	MT_HDR_FORMAT_802_11,
+	MT_HDR_FORMAT_802_11_EXT,
+};
+
+enum tx_pkt_type {
+	MT_TX_TYPE_CT,
+	MT_TX_TYPE_SF,
+	MT_TX_TYPE_CMD,
+	MT_TX_TYPE_FW,
+};
+
+enum tx_pkt_queue_idx {
+	MT_LMAC_AC00,
+	MT_LMAC_AC01,
+	MT_LMAC_AC02,
+	MT_LMAC_AC03,
+	MT_LMAC_ALTX0 = 0x10,
+	MT_LMAC_BMC0,
+	MT_LMAC_BCN0,
+	MT_LMAC_PSMP0,
+};
+
+enum tx_port_idx {
+	MT_TX_PORT_IDX_LMAC,
+	MT_TX_PORT_IDX_MCU
+};
+
+enum tx_mcu_port_q_idx {
+	MT_TX_MCU_PORT_RX_Q0 = 0,
+	MT_TX_MCU_PORT_RX_Q1,
+	MT_TX_MCU_PORT_RX_Q2,
+	MT_TX_MCU_PORT_RX_Q3,
+	MT_TX_MCU_PORT_RX_FWDL = 0x1e
+};
+
+enum tx_phy_bandwidth {
+	MT_PHY_BW_20,
+	MT_PHY_BW_40,
+	MT_PHY_BW_80,
+	MT_PHY_BW_160,
+};
+
+#define MT_CT_INFO_APPLY_TXD		BIT(0)
+#define MT_CT_INFO_COPY_HOST_TXD_ALL	BIT(1)
+#define MT_CT_INFO_MGMT_FRAME		BIT(2)
+#define MT_CT_INFO_NONE_CIPHER_FRAME	BIT(3)
+#define MT_CT_INFO_HSR2_TX		BIT(4)
+
+#define MT_TXD_SIZE			(8 * 4)
+
+#define MT_TXD0_P_IDX			BIT(31)
+#define MT_TXD0_Q_IDX			GENMASK(30, 26)
+#define MT_TXD0_UDP_TCP_SUM		BIT(24)
+#define MT_TXD0_IP_SUM			BIT(23)
+#define MT_TXD0_ETH_TYPE_OFFSET		GENMASK(22, 16)
+#define MT_TXD0_TX_BYTES		GENMASK(15, 0)
+
+#define MT_TXD1_OWN_MAC			GENMASK(31, 26)
+#define MT_TXD1_PKT_FMT			GENMASK(25, 24)
+#define MT_TXD1_TID			GENMASK(23, 21)
+#define MT_TXD1_AMSDU			BIT(20)
+#define MT_TXD1_UNXV			BIT(19)
+#define MT_TXD1_HDR_PAD			GENMASK(18, 17)
+#define MT_TXD1_TXD_LEN			BIT(16)
+#define MT_TXD1_LONG_FORMAT		BIT(15)
+#define MT_TXD1_HDR_FORMAT		GENMASK(14, 13)
+#define MT_TXD1_HDR_INFO		GENMASK(12, 8)
+#define MT_TXD1_WLAN_IDX		GENMASK(7, 0)
+
+#define MT_TXD2_FIX_RATE		BIT(31)
+#define MT_TXD2_TIMING_MEASURE		BIT(30)
+#define MT_TXD2_BA_DISABLE		BIT(29)
+#define MT_TXD2_POWER_OFFSET		GENMASK(28, 24)
+#define MT_TXD2_MAX_TX_TIME		GENMASK(23, 16)
+#define MT_TXD2_FRAG			GENMASK(15, 14)
+#define MT_TXD2_HTC_VLD			BIT(13)
+#define MT_TXD2_DURATION		BIT(12)
+#define MT_TXD2_BIP			BIT(11)
+#define MT_TXD2_MULTICAST		BIT(10)
+#define MT_TXD2_RTS			BIT(9)
+#define MT_TXD2_SOUNDING		BIT(8)
+#define MT_TXD2_NDPA			BIT(7)
+#define MT_TXD2_NDP			BIT(6)
+#define MT_TXD2_FRAME_TYPE		GENMASK(5, 4)
+#define MT_TXD2_SUB_TYPE		GENMASK(3, 0)
+
+#define MT_TXD3_SN_VALID		BIT(31)
+#define MT_TXD3_PN_VALID		BIT(30)
+#define MT_TXD3_SEQ			GENMASK(27, 16)
+#define MT_TXD3_REM_TX_COUNT		GENMASK(15, 11)
+#define MT_TXD3_TX_COUNT		GENMASK(10, 6)
+#define MT_TXD3_PROTECT_FRAME		BIT(1)
+#define MT_TXD3_NO_ACK			BIT(0)
+
+#define MT_TXD4_PN_LOW			GENMASK(31, 0)
+
+#define MT_TXD5_PN_HIGH			GENMASK(31, 16)
+#define MT_TXD5_SW_POWER_MGMT		BIT(13)
+#define MT_TXD5_DA_SELECT		BIT(11)
+#define MT_TXD5_TX_STATUS_HOST		BIT(10)
+#define MT_TXD5_TX_STATUS_MCU		BIT(9)
+#define MT_TXD5_TX_STATUS_FMT		BIT(8)
+#define MT_TXD5_PID			GENMASK(7, 0)
+
+#define MT_TXD6_FIXED_RATE		BIT(31)
+#define MT_TXD6_SGI			BIT(30)
+#define MT_TXD6_LDPC			BIT(29)
+#define MT_TXD6_TX_BF			BIT(28)
+#define MT_TXD6_TX_RATE			GENMASK(27, 16)
+#define MT_TXD6_ANT_ID			GENMASK(15, 4)
+#define MT_TXD6_DYN_BW			BIT(3)
+#define MT_TXD6_FIXED_BW		BIT(2)
+#define MT_TXD6_BW			GENMASK(1, 0)
+
+#define MT_TXD7_TYPE			GENMASK(21, 20)
+#define MT_TXD7_SUB_TYPE		GENMASK(19, 16)
+
+#define MT_TX_RATE_STBC			BIT(11)
+#define MT_TX_RATE_NSS			GENMASK(10, 9)
+#define MT_TX_RATE_MODE			GENMASK(8, 6)
+#define MT_TX_RATE_IDX			GENMASK(5, 0)
+
+#define MT_TXP_MAX_BUF_NUM		6
+
+struct mt7615_txp {
+	__le16 flags;
+	__le16 token;
+	u8 bss_idx;
+	u8 rept_wds_wcid;
+	u8 rsv;
+	u8 nbuf;
+	__le32 buf[MT_TXP_MAX_BUF_NUM];
+	__le16 len[MT_TXP_MAX_BUF_NUM];
+} __packed;
+
+struct mt7615_tx_free {
+	__le16 rx_byte_cnt;
+	__le16 ctrl;
+	u8 txd_cnt;
+	u8 rsv[3];
+	__le16 token[];
+} __packed;
+
+#define MT_TX_FREE_MSDU_ID_CNT		GENMASK(6, 0)
+
+#define MT_TXS0_PID			GENMASK(31, 24)
+#define MT_TXS0_BA_ERROR		BIT(22)
+#define MT_TXS0_PS_FLAG			BIT(21)
+#define MT_TXS0_TXOP_TIMEOUT		BIT(20)
+#define MT_TXS0_BIP_ERROR		BIT(19)
+
+#define MT_TXS0_QUEUE_TIMEOUT		BIT(18)
+#define MT_TXS0_RTS_TIMEOUT		BIT(17)
+#define MT_TXS0_ACK_TIMEOUT		BIT(16)
+#define MT_TXS0_ACK_ERROR_MASK		GENMASK(18, 16)
+
+#define MT_TXS0_TX_STATUS_HOST		BIT(15)
+#define MT_TXS0_TX_STATUS_MCU		BIT(14)
+#define MT_TXS0_TXS_FORMAT		BIT(13)
+#define MT_TXS0_FIXED_RATE		BIT(12)
+#define MT_TXS0_TX_RATE			GENMASK(11, 0)
+
+#define MT_TXS1_ANT_ID			GENMASK(31, 20)
+#define MT_TXS1_RESP_RATE		GENMASK(19, 16)
+#define MT_TXS1_BW			GENMASK(15, 14)
+#define MT_TXS1_I_TXBF			BIT(13)
+#define MT_TXS1_E_TXBF			BIT(12)
+#define MT_TXS1_TID			GENMASK(11, 9)
+#define MT_TXS1_AMPDU			BIT(8)
+#define MT_TXS1_ACKED_MPDU		BIT(7)
+#define MT_TXS1_TX_POWER_DBM		GENMASK(6, 0)
+
+#define MT_TXS2_WCID			GENMASK(31, 24)
+#define MT_TXS2_RXV_SEQNO		GENMASK(23, 16)
+#define MT_TXS2_TX_DELAY		GENMASK(15, 0)
+
+#define MT_TXS3_LAST_TX_RATE		GENMASK(31, 29)
+#define MT_TXS3_TX_COUNT		GENMASK(28, 24)
+#define MT_TXS3_F1_TSSI1		GENMASK(23, 12)
+#define MT_TXS3_F1_TSSI0		GENMASK(11, 0)
+#define MT_TXS3_F0_SEQNO		GENMASK(11, 0)
+
+#define MT_TXS4_F0_TIMESTAMP		GENMASK(31, 0)
+#define MT_TXS4_F1_TSSI3		GENMASK(23, 12)
+#define MT_TXS4_F1_TSSI2		GENMASK(11, 0)
+
+#define MT_TXS5_F0_FRONT_TIME		GENMASK(24, 0)
+#define MT_TXS5_F1_NOISE_2		GENMASK(23, 16)
+#define MT_TXS5_F1_NOISE_1		GENMASK(15, 8)
+#define MT_TXS5_F1_NOISE_0		GENMASK(7, 0)
+
+#define MT_TXS6_F1_RCPI_3		GENMASK(31, 24)
+#define MT_TXS6_F1_RCPI_2		GENMASK(23, 16)
+#define MT_TXS6_F1_RCPI_1		GENMASK(15, 8)
+#define MT_TXS6_F1_RCPI_0		GENMASK(7, 0)
+
+enum mt7615_cipher_type {
+	MT_CIPHER_NONE,
+	MT_CIPHER_WEP40,
+	MT_CIPHER_TKIP,
+	MT_CIPHER_TKIP_NO_MIC,
+	MT_CIPHER_AES_CCMP,
+	MT_CIPHER_WEP104,
+	MT_CIPHER_BIP_CMAC_128,
+	MT_CIPHER_WEP128,
+	MT_CIPHER_WAPI,
+	MT_CIPHER_CCMP_256 = 10,
+	MT_CIPHER_GCMP,
+	MT_CIPHER_GCMP_256,
+};
+
+static inline struct mt7615_txp *
+mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+	u8 *txwi;
+
+	if (!t)
+		return NULL;
+
+	txwi = mt76_get_txwi_ptr(dev, t);
+
+	return (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
+}
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/main.c
new file mode 100644
index 0000000..5ad8b0a
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -0,0 +1,519 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ *         Ryder Lee <ryder.lee@mediatek.com>
+ *         Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "mt7615.h"
+
+static int mt7615_start(struct ieee80211_hw *hw)
+{
+	struct mt7615_dev *dev = hw->priv;
+
+	dev->mt76.survey_time = ktime_get_boottime();
+	set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+				     MT7615_WATCHDOG_TIME);
+
+	return 0;
+}
+
+static void mt7615_stop(struct ieee80211_hw *hw)
+{
+	struct mt7615_dev *dev = hw->priv;
+
+	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+	cancel_delayed_work_sync(&dev->mt76.mac_work);
+}
+
+static int get_omac_idx(enum nl80211_iftype type, u32 mask)
+{
+	int i;
+
+	switch (type) {
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_MESH_POINT:
+		/* ap use hw bssid 0 and ext bssid */
+		if (~mask & BIT(HW_BSSID_0))
+			return HW_BSSID_0;
+
+		for (i = EXT_BSSID_1; i < EXT_BSSID_END; i++)
+			if (~mask & BIT(i))
+				return i;
+
+		break;
+	case NL80211_IFTYPE_STATION:
+		/* sta use hw bssid other than 0 */
+		for (i = HW_BSSID_1; i < HW_BSSID_MAX; i++)
+			if (~mask & BIT(i))
+				return i;
+
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	};
+
+	return -1;
+}
+
+static int mt7615_add_interface(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif)
+{
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+	struct mt7615_dev *dev = hw->priv;
+	struct mt76_txq *mtxq;
+	int idx, ret = 0;
+
+	mutex_lock(&dev->mt76.mutex);
+
+	mvif->idx = ffs(~dev->vif_mask) - 1;
+	if (mvif->idx >= MT7615_MAX_INTERFACES) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	idx = get_omac_idx(vif->type, dev->omac_mask);
+	if (idx < 0) {
+		ret = -ENOSPC;
+		goto out;
+	}
+	mvif->omac_idx = idx;
+
+	/* TODO: DBDC support. Use band 0 for now */
+	mvif->band_idx = 0;
+	mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
+
+	ret = mt7615_mcu_set_dev_info(dev, vif, 1);
+	if (ret)
+		goto out;
+
+	dev->vif_mask |= BIT(mvif->idx);
+	dev->omac_mask |= BIT(mvif->omac_idx);
+	idx = MT7615_WTBL_RESERVED - mvif->idx;
+	mvif->sta.wcid.idx = idx;
+	mvif->sta.wcid.hw_key_idx = -1;
+
+	rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+	mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+	mtxq->wcid = &mvif->sta.wcid;
+	mt76_txq_init(&dev->mt76, vif->txq);
+
+out:
+	mutex_unlock(&dev->mt76.mutex);
+
+	return ret;
+}
+
+static void mt7615_remove_interface(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif)
+{
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+	struct mt7615_dev *dev = hw->priv;
+	int idx = mvif->sta.wcid.idx;
+
+	/* TODO: disable beacon for the bss */
+
+	mt7615_mcu_set_dev_info(dev, vif, 0);
+
+	rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+	mt76_txq_remove(&dev->mt76, vif->txq);
+
+	mutex_lock(&dev->mt76.mutex);
+	dev->vif_mask &= ~BIT(mvif->idx);
+	dev->omac_mask &= ~BIT(mvif->omac_idx);
+	mutex_unlock(&dev->mt76.mutex);
+}
+
+static int mt7615_set_channel(struct mt7615_dev *dev)
+{
+	int ret;
+
+	cancel_delayed_work_sync(&dev->mt76.mac_work);
+
+	mutex_lock(&dev->mt76.mutex);
+	set_bit(MT76_RESET, &dev->mt76.state);
+
+	mt7615_dfs_check_channel(dev);
+
+	mt76_set_channel(&dev->mt76);
+
+	ret = mt7615_mcu_set_channel(dev);
+	if (ret)
+		goto out;
+
+	ret = mt7615_dfs_init_radar_detector(dev);
+	mt7615_mac_cca_stats_reset(dev);
+	dev->mt76.survey_time = ktime_get_boottime();
+	/* TODO: add DBDC support */
+	mt76_rr(dev, MT_MIB_SDR16(0));
+
+out:
+	clear_bit(MT76_RESET, &dev->mt76.state);
+	mutex_unlock(&dev->mt76.mutex);
+
+	mt76_txq_schedule_all(&dev->mt76);
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+				     MT7615_WATCHDOG_TIME);
+	return ret;
+}
+
+static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+			  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+			  struct ieee80211_key_conf *key)
+{
+	struct mt7615_dev *dev = hw->priv;
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+	struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
+				  &mvif->sta;
+	struct mt76_wcid *wcid = &msta->wcid;
+	int idx = key->keyidx;
+
+	/* The hardware does not support per-STA RX GTK, fallback
+	 * to software mode for these.
+	 */
+	if ((vif->type == NL80211_IFTYPE_ADHOC ||
+	     vif->type == NL80211_IFTYPE_MESH_POINT) &&
+	    (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+	     key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+		return -EOPNOTSUPP;
+
+	/* fall back to sw encryption for unsupported ciphers */
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+		break;
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+	case WLAN_CIPHER_SUITE_TKIP:
+	case WLAN_CIPHER_SUITE_CCMP:
+	case WLAN_CIPHER_SUITE_CCMP_256:
+	case WLAN_CIPHER_SUITE_GCMP:
+	case WLAN_CIPHER_SUITE_GCMP_256:
+	case WLAN_CIPHER_SUITE_SMS4:
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	if (cmd == SET_KEY) {
+		key->hw_key_idx = wcid->idx;
+		wcid->hw_key_idx = idx;
+	} else if (idx == wcid->hw_key_idx) {
+		wcid->hw_key_idx = -1;
+	}
+	mt76_wcid_key_setup(&dev->mt76, wcid,
+			    cmd == SET_KEY ? key : NULL);
+
+	return mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+}
+
+static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct mt7615_dev *dev = hw->priv;
+	int ret = 0;
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		ieee80211_stop_queues(hw);
+		ret = mt7615_set_channel(dev);
+		ieee80211_wake_queues(hw);
+	}
+
+	mutex_lock(&dev->mt76.mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_POWER)
+		ret = mt7615_mcu_set_tx_power(dev);
+
+	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+		if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+			dev->mt76.rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+		else
+			dev->mt76.rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+		mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+	}
+
+	mutex_unlock(&dev->mt76.mutex);
+
+	return ret;
+}
+
+static int
+mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+	       const struct ieee80211_tx_queue_params *params)
+{
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+	struct mt7615_dev *dev = hw->priv;
+
+	queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS;
+
+	return mt7615_mcu_set_wmm(dev, queue, params);
+}
+
+static void mt7615_configure_filter(struct ieee80211_hw *hw,
+				    unsigned int changed_flags,
+				    unsigned int *total_flags,
+				    u64 multicast)
+{
+	struct mt7615_dev *dev = hw->priv;
+	u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+		flags |= *total_flags & FIF_##_flag;			\
+		dev->mt76.rxfilter &= ~(_hw);				\
+		dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw);	\
+	} while (0)
+
+	dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+				MT_WF_RFCR_DROP_OTHER_BEACON |
+				MT_WF_RFCR_DROP_FRAME_REPORT |
+				MT_WF_RFCR_DROP_PROBEREQ |
+				MT_WF_RFCR_DROP_MCAST_FILTERED |
+				MT_WF_RFCR_DROP_MCAST |
+				MT_WF_RFCR_DROP_BCAST |
+				MT_WF_RFCR_DROP_DUPLICATE |
+				MT_WF_RFCR_DROP_A2_BSSID |
+				MT_WF_RFCR_DROP_UNWANTED_CTL |
+				MT_WF_RFCR_DROP_STBC_MULTI);
+
+	MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
+			       MT_WF_RFCR_DROP_A3_MAC |
+			       MT_WF_RFCR_DROP_A3_BSSID);
+
+	MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
+
+	MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+			     MT_WF_RFCR_DROP_RTS |
+			     MT_WF_RFCR_DROP_CTL_RSV |
+			     MT_WF_RFCR_DROP_NDPA);
+
+	*total_flags = flags;
+	mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+}
+
+static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_bss_conf *info,
+				    u32 changed)
+{
+	struct mt7615_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mt76.mutex);
+
+	if (changed & BSS_CHANGED_ASSOC)
+		mt7615_mcu_set_bss_info(dev, vif, info->assoc);
+
+	/* TODO: update beacon content
+	 * BSS_CHANGED_BEACON
+	 */
+
+	if (changed & BSS_CHANGED_BEACON_ENABLED) {
+		mt7615_mcu_set_bss_info(dev, vif, info->enable_beacon);
+		mt7615_mcu_wtbl_bmc(dev, vif, info->enable_beacon);
+		mt7615_mcu_set_sta_rec_bmc(dev, vif, info->enable_beacon);
+		mt7615_mcu_set_bcn(dev, vif, info->enable_beacon);
+	}
+
+	mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7615_channel_switch_beacon(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif,
+			     struct cfg80211_chan_def *chandef)
+{
+	struct mt7615_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mt76.mutex);
+	mt7615_mcu_set_bcn(dev, vif, true);
+	mutex_unlock(&dev->mt76.mutex);
+}
+
+int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta)
+{
+	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+	struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+	int idx;
+
+	idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
+	if (idx < 0)
+		return -ENOSPC;
+
+	msta->vif = mvif;
+	msta->wcid.sta = 1;
+	msta->wcid.idx = idx;
+
+	mt7615_mcu_add_wtbl(dev, vif, sta);
+	mt7615_mcu_set_sta_rec(dev, vif, sta, 1);
+
+	return 0;
+}
+
+void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+		      struct ieee80211_sta *sta)
+{
+	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+	if (sta->ht_cap.ht_supported)
+		mt7615_mcu_set_ht_cap(dev, vif, sta);
+}
+
+void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta)
+{
+	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+	mt7615_mcu_set_sta_rec(dev, vif, sta, 0);
+	mt7615_mcu_del_wtbl(dev, sta);
+}
+
+static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif,
+				       struct ieee80211_sta *sta)
+{
+	struct mt7615_dev *dev = hw->priv;
+	struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+	struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
+	int i;
+
+	if (!sta_rates)
+		return;
+
+	spin_lock_bh(&dev->mt76.lock);
+	for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
+		msta->rates[i].idx = sta_rates->rate[i].idx;
+		msta->rates[i].count = sta_rates->rate[i].count;
+		msta->rates[i].flags = sta_rates->rate[i].flags;
+
+		if (msta->rates[i].idx < 0 || !msta->rates[i].count)
+			break;
+	}
+	msta->n_rates = i;
+	mt7615_mac_set_rates(dev, msta, NULL, msta->rates);
+	msta->rate_probe = false;
+	spin_unlock_bh(&dev->mt76.lock);
+}
+
+static void mt7615_tx(struct ieee80211_hw *hw,
+		      struct ieee80211_tx_control *control,
+		      struct sk_buff *skb)
+{
+	struct mt7615_dev *dev = hw->priv;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_vif *vif = info->control.vif;
+	struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+
+	if (control->sta) {
+		struct mt7615_sta *sta;
+
+		sta = (struct mt7615_sta *)control->sta->drv_priv;
+		wcid = &sta->wcid;
+	}
+
+	if (vif && !control->sta) {
+		struct mt7615_vif *mvif;
+
+		mvif = (struct mt7615_vif *)vif->drv_priv;
+		wcid = &mvif->sta.wcid;
+	}
+
+	mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+
+static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+	struct mt7615_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mt76.mutex);
+	mt7615_mcu_set_rts_thresh(dev, val);
+	mutex_unlock(&dev->mt76.mutex);
+
+	return 0;
+}
+
+static int
+mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		    struct ieee80211_ampdu_params *params)
+{
+	enum ieee80211_ampdu_mlme_action action = params->action;
+	struct mt7615_dev *dev = hw->priv;
+	struct ieee80211_sta *sta = params->sta;
+	struct ieee80211_txq *txq = sta->txq[params->tid];
+	struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+	u16 tid = params->tid;
+	u16 ssn = params->ssn;
+	struct mt76_txq *mtxq;
+
+	if (!txq)
+		return -EINVAL;
+
+	mtxq = (struct mt76_txq *)txq->drv_priv;
+
+	mutex_lock(&dev->mt76.mutex);
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
+				   params->buf_size);
+		mt7615_mcu_set_rx_ba(dev, params, 1);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+		mt7615_mcu_set_rx_ba(dev, params, 0);
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		mtxq->aggr = true;
+		mtxq->send_bar = false;
+		mt7615_mcu_set_tx_ba(dev, params, 1);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		mtxq->aggr = false;
+		mt7615_mcu_set_tx_ba(dev, params, 0);
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		mtxq->aggr = false;
+		mt7615_mcu_set_tx_ba(dev, params, 0);
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	}
+	mutex_unlock(&dev->mt76.mutex);
+
+	return 0;
+}
+
+const struct ieee80211_ops mt7615_ops = {
+	.tx = mt7615_tx,
+	.start = mt7615_start,
+	.stop = mt7615_stop,
+	.add_interface = mt7615_add_interface,
+	.remove_interface = mt7615_remove_interface,
+	.config = mt7615_config,
+	.conf_tx = mt7615_conf_tx,
+	.configure_filter = mt7615_configure_filter,
+	.bss_info_changed = mt7615_bss_info_changed,
+	.sta_state = mt76_sta_state,
+	.set_key = mt7615_set_key,
+	.ampdu_action = mt7615_ampdu_action,
+	.set_rts_threshold = mt7615_set_rts_threshold,
+	.wake_tx_queue = mt76_wake_tx_queue,
+	.sta_rate_tbl_update = mt7615_sta_rate_tbl_update,
+	.sw_scan_start = mt76_sw_scan,
+	.sw_scan_complete = mt76_sw_scan_complete,
+	.release_buffered_frames = mt76_release_buffered_frames,
+	.get_txpower = mt76_get_txpower,
+	.channel_switch_beacon = mt7615_channel_switch_beacon,
+	.get_survey = mt76_get_survey,
+};
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
new file mode 100644
index 0000000..b6867d9
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -0,0 +1,1608 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ *         Ryder Lee <ryder.lee@mediatek.com>
+ */
+
+#include <linux/firmware.h>
+#include "mt7615.h"
+#include "mcu.h"
+#include "mac.h"
+#include "eeprom.h"
+
+struct mt7615_patch_hdr {
+	char build_date[16];
+	char platform[4];
+	__be32 hw_sw_ver;
+	__be32 patch_ver;
+	__be16 checksum;
+} __packed;
+
+struct mt7615_fw_trailer {
+	__le32 addr;
+	u8 chip_id;
+	u8 feature_set;
+	u8 eco_code;
+	char fw_ver[10];
+	char build_date[15];
+	__le32 len;
+} __packed;
+
+#define MCU_PATCH_ADDRESS		0x80000
+
+#define N9_REGION_NUM			2
+#define CR4_REGION_NUM			1
+
+#define IMG_CRC_LEN			4
+
+#define FW_FEATURE_SET_ENCRYPT		BIT(0)
+#define FW_FEATURE_SET_KEY_IDX		GENMASK(2, 1)
+
+#define DL_MODE_ENCRYPT			BIT(0)
+#define DL_MODE_KEY_IDX			GENMASK(2, 1)
+#define DL_MODE_RESET_SEC_IV		BIT(3)
+#define DL_MODE_WORKING_PDA_CR4		BIT(4)
+#define DL_MODE_NEED_RSP		BIT(31)
+
+#define FW_START_OVERRIDE		BIT(0)
+#define FW_START_WORKING_PDA_CR4	BIT(2)
+
+static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
+				 int cmd, int *wait_seq)
+{
+	struct mt7615_mcu_txd *mcu_txd;
+	u8 seq, q_idx, pkt_fmt;
+	enum mt76_txq_id qid;
+	u32 val;
+	__le32 *txd;
+
+	seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
+	if (!seq)
+		seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
+
+	mcu_txd = (struct mt7615_mcu_txd *)skb_push(skb,
+		   sizeof(struct mt7615_mcu_txd));
+	memset(mcu_txd, 0, sizeof(struct mt7615_mcu_txd));
+
+	if (cmd != -MCU_CMD_FW_SCATTER) {
+		q_idx = MT_TX_MCU_PORT_RX_Q0;
+		pkt_fmt = MT_TX_TYPE_CMD;
+	} else {
+		q_idx = MT_TX_MCU_PORT_RX_FWDL;
+		pkt_fmt = MT_TX_TYPE_FW;
+	}
+
+	txd = mcu_txd->txd;
+
+	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
+	      FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_MCU) |
+	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+	txd[0] = cpu_to_le32(val);
+
+	val = MT_TXD1_LONG_FORMAT |
+	      FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD) |
+	      FIELD_PREP(MT_TXD1_PKT_FMT, pkt_fmt);
+	txd[1] = cpu_to_le32(val);
+
+	mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
+	mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, q_idx));
+	mcu_txd->pkt_type = MCU_PKT_ID;
+	mcu_txd->seq = seq;
+
+	if (cmd < 0) {
+		mcu_txd->set_query = MCU_Q_NA;
+		mcu_txd->cid = -cmd;
+	} else {
+		mcu_txd->cid = MCU_CMD_EXT_CID;
+		mcu_txd->set_query = MCU_Q_SET;
+		mcu_txd->ext_cid = cmd;
+		mcu_txd->ext_cid_ack = 1;
+	}
+	mcu_txd->s2d_index = MCU_S2D_H2N;
+
+	if (wait_seq)
+		*wait_seq = seq;
+
+	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state))
+		qid = MT_TXQ_MCU;
+	else
+		qid = MT_TXQ_FWDL;
+
+	return mt76_tx_queue_skb_raw(dev, qid, skb, 0);
+}
+
+static int
+mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
+			  struct sk_buff *skb, int seq)
+{
+	struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
+	int ret = 0;
+
+	if (seq != rxd->seq) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	switch (cmd) {
+	case -MCU_CMD_PATCH_SEM_CONTROL:
+		skb_pull(skb, sizeof(*rxd) - 4);
+		ret = *skb->data;
+		break;
+	case MCU_EXT_CMD_GET_TEMP:
+		skb_pull(skb, sizeof(*rxd));
+		ret = le32_to_cpu(*(__le32 *)skb->data);
+		break;
+	default:
+		break;
+	}
+out:
+	dev_kfree_skb(skb);
+
+	return ret;
+}
+
+static int
+mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+		    int len, bool wait_resp)
+{
+	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+	unsigned long expires = jiffies + 10 * HZ;
+	struct sk_buff *skb;
+	int ret, seq;
+
+	skb = mt7615_mcu_msg_alloc(data, len);
+	if (!skb)
+		return -ENOMEM;
+
+	mutex_lock(&mdev->mmio.mcu.mutex);
+
+	ret = __mt7615_mcu_msg_send(dev, skb, cmd, &seq);
+	if (ret)
+		goto out;
+
+	while (wait_resp) {
+		skb = mt76_mcu_get_response(mdev, expires);
+		if (!skb) {
+			dev_err(mdev->dev, "Message %d (seq %d) timeout\n",
+				cmd, seq);
+			ret = -ETIMEDOUT;
+			break;
+		}
+
+		ret = mt7615_mcu_parse_response(dev, cmd, skb, seq);
+		if (ret != -EAGAIN)
+			break;
+	}
+
+out:
+	mutex_unlock(&mdev->mmio.mcu.mutex);
+
+	return ret;
+}
+
+static void
+mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+	if (vif->csa_active)
+		ieee80211_csa_finish(vif);
+}
+
+static void
+mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+	struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
+
+	switch (rxd->ext_eid) {
+	case MCU_EXT_EVENT_RDD_REPORT:
+		ieee80211_radar_detected(dev->mt76.hw);
+		dev->hw_pattern++;
+		break;
+	case MCU_EXT_EVENT_CSA_NOTIFY:
+		ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
+				IEEE80211_IFACE_ITER_RESUME_ALL,
+				mt7615_mcu_csa_finish, dev);
+		break;
+	default:
+		break;
+	}
+}
+
+static void
+mt7615_mcu_rx_unsolicited_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+	struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
+
+	switch (rxd->eid) {
+	case MCU_EVENT_EXT:
+		mt7615_mcu_rx_ext_event(dev, skb);
+		break;
+	default:
+		break;
+	}
+	dev_kfree_skb(skb);
+}
+
+void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+	struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
+
+	if (rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT ||
+	    rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST ||
+	    rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
+	    rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
+	    !rxd->seq)
+		mt7615_mcu_rx_unsolicited_event(dev, skb);
+	else
+		mt76_mcu_rx_event(&dev->mt76, skb);
+}
+
+static int mt7615_mcu_init_download(struct mt7615_dev *dev, u32 addr,
+				    u32 len, u32 mode)
+{
+	struct {
+		__le32 addr;
+		__le32 len;
+		__le32 mode;
+	} req = {
+		.addr = cpu_to_le32(addr),
+		.len = cpu_to_le32(len),
+		.mode = cpu_to_le32(mode),
+	};
+
+	return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+				   &req, sizeof(req), true);
+}
+
+static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
+				    int len)
+{
+	int ret = 0, cur_len;
+
+	while (len > 0) {
+		cur_len = min_t(int, 4096 - sizeof(struct mt7615_mcu_txd),
+				len);
+
+		ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER,
+					  data, cur_len, false);
+		if (ret)
+			break;
+
+		data += cur_len;
+		len -= cur_len;
+		mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+	}
+
+	return ret;
+}
+
+static int mt7615_mcu_start_firmware(struct mt7615_dev *dev, u32 addr,
+				     u32 option)
+{
+	struct {
+		__le32 option;
+		__le32 addr;
+	} req = {
+		.option = cpu_to_le32(option),
+		.addr = cpu_to_le32(addr),
+	};
+
+	return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ,
+				   &req, sizeof(req), true);
+}
+
+static int mt7615_mcu_restart(struct mt76_dev *dev)
+{
+	return __mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ, NULL,
+				   0, true);
+}
+
+static int mt7615_mcu_patch_sem_ctrl(struct mt7615_dev *dev, bool get)
+{
+	struct {
+		__le32 op;
+	} req = {
+		.op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE),
+	};
+
+	return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_SEM_CONTROL,
+				   &req, sizeof(req), true);
+}
+
+static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
+{
+	struct {
+		u8 check_crc;
+		u8 reserved[3];
+	} req = {
+		.check_crc = 0,
+	};
+
+	return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_FINISH_REQ,
+				   &req, sizeof(req), true);
+}
+
+static int mt7615_driver_own(struct mt7615_dev *dev)
+{
+	mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_DRV_OWN);
+	if (!mt76_poll_msec(dev, MT_CFG_LPCR_HOST,
+			    MT_CFG_LPCR_HOST_FW_OWN, 0, 500)) {
+		dev_err(dev->mt76.dev, "Timeout for driver own\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int mt7615_load_patch(struct mt7615_dev *dev)
+{
+	const struct mt7615_patch_hdr *hdr;
+	const struct firmware *fw = NULL;
+	int len, ret, sem;
+
+	sem = mt7615_mcu_patch_sem_ctrl(dev, 1);
+	switch (sem) {
+	case PATCH_IS_DL:
+		return 0;
+	case PATCH_NOT_DL_SEM_SUCCESS:
+		break;
+	default:
+		dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
+		return -EAGAIN;
+	}
+
+	ret = request_firmware(&fw, MT7615_ROM_PATCH, dev->mt76.dev);
+	if (ret)
+		goto out;
+
+	if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+		dev_err(dev->mt76.dev, "Invalid firmware\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	hdr = (const struct mt7615_patch_hdr *)(fw->data);
+
+	dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
+		 be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
+
+	len = fw->size - sizeof(*hdr);
+
+	ret = mt7615_mcu_init_download(dev, MCU_PATCH_ADDRESS, len,
+				       DL_MODE_NEED_RSP);
+	if (ret) {
+		dev_err(dev->mt76.dev, "Download request failed\n");
+		goto out;
+	}
+
+	ret = mt7615_mcu_send_firmware(dev, fw->data + sizeof(*hdr), len);
+	if (ret) {
+		dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
+		goto out;
+	}
+
+	ret = mt7615_mcu_start_patch(dev);
+	if (ret)
+		dev_err(dev->mt76.dev, "Failed to start patch\n");
+
+out:
+	release_firmware(fw);
+
+	sem = mt7615_mcu_patch_sem_ctrl(dev, 0);
+	switch (sem) {
+	case PATCH_REL_SEM_SUCCESS:
+		break;
+	default:
+		ret = -EAGAIN;
+		dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
+		break;
+	}
+
+	return ret;
+}
+
+static u32 mt7615_mcu_gen_dl_mode(u8 feature_set, bool is_cr4)
+{
+	u32 ret = 0;
+
+	ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
+	       (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
+	ret |= FIELD_PREP(DL_MODE_KEY_IDX,
+			  FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
+	ret |= DL_MODE_NEED_RSP;
+	ret |= is_cr4 ? DL_MODE_WORKING_PDA_CR4 : 0;
+
+	return ret;
+}
+
+static int
+mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
+			     const struct mt7615_fw_trailer *hdr,
+			     const u8 *data, bool is_cr4)
+{
+	int n_region = is_cr4 ? CR4_REGION_NUM : N9_REGION_NUM;
+	int err, i, offset = 0;
+	u32 len, addr, mode;
+
+	for (i = 0; i < n_region; i++) {
+		mode = mt7615_mcu_gen_dl_mode(hdr[i].feature_set, is_cr4);
+		len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN;
+		addr = le32_to_cpu(hdr[i].addr);
+
+		err = mt7615_mcu_init_download(dev, addr, len, mode);
+		if (err) {
+			dev_err(dev->mt76.dev, "Download request failed\n");
+			return err;
+		}
+
+		err = mt7615_mcu_send_firmware(dev, data + offset, len);
+		if (err) {
+			dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
+			return err;
+		}
+
+		offset += len;
+	}
+
+	return 0;
+}
+
+static int mt7615_load_ram(struct mt7615_dev *dev)
+{
+	const struct mt7615_fw_trailer *hdr;
+	const struct firmware *fw;
+	int ret;
+
+	ret = request_firmware(&fw, MT7615_FIRMWARE_N9, dev->mt76.dev);
+	if (ret)
+		return ret;
+
+	if (!fw || !fw->data || fw->size < N9_REGION_NUM * sizeof(*hdr)) {
+		dev_err(dev->mt76.dev, "Invalid firmware\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	hdr = (const struct mt7615_fw_trailer *)(fw->data + fw->size -
+					N9_REGION_NUM * sizeof(*hdr));
+
+	dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n",
+		 hdr->fw_ver, hdr->build_date);
+
+	ret = mt7615_mcu_send_ram_firmware(dev, hdr, fw->data, false);
+	if (ret)
+		goto out;
+
+	ret = mt7615_mcu_start_firmware(dev, le32_to_cpu(hdr->addr),
+					FW_START_OVERRIDE);
+	if (ret) {
+		dev_err(dev->mt76.dev, "Failed to start N9 firmware\n");
+		goto out;
+	}
+
+	release_firmware(fw);
+
+	ret = request_firmware(&fw, MT7615_FIRMWARE_CR4, dev->mt76.dev);
+	if (ret)
+		return ret;
+
+	if (!fw || !fw->data || fw->size < CR4_REGION_NUM * sizeof(*hdr)) {
+		dev_err(dev->mt76.dev, "Invalid firmware\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	hdr = (const struct mt7615_fw_trailer *)(fw->data + fw->size -
+					CR4_REGION_NUM * sizeof(*hdr));
+
+	dev_info(dev->mt76.dev, "CR4 Firmware Version: %.10s, Build Time: %.15s\n",
+		 hdr->fw_ver, hdr->build_date);
+
+	ret = mt7615_mcu_send_ram_firmware(dev, hdr, fw->data, true);
+	if (ret)
+		goto out;
+
+	ret = mt7615_mcu_start_firmware(dev, 0, FW_START_WORKING_PDA_CR4);
+	if (ret)
+		dev_err(dev->mt76.dev, "Failed to start CR4 firmware\n");
+
+out:
+	release_firmware(fw);
+
+	return ret;
+}
+
+static int mt7615_load_firmware(struct mt7615_dev *dev)
+{
+	int ret;
+	u32 val;
+
+	val = mt76_get_field(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE);
+
+	if (val != FW_STATE_FW_DOWNLOAD) {
+		dev_err(dev->mt76.dev, "Firmware is not ready for download\n");
+		return -EIO;
+	}
+
+	ret = mt7615_load_patch(dev);
+	if (ret)
+		return ret;
+
+	ret = mt7615_load_ram(dev);
+	if (ret)
+		return ret;
+
+	if (!mt76_poll_msec(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE,
+			    FIELD_PREP(MT_TOP_MISC2_FW_STATE,
+				       FW_STATE_CR4_RDY), 500)) {
+		dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+		return -EIO;
+	}
+
+	mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+
+	dev_dbg(dev->mt76.dev, "Firmware init done\n");
+
+	return 0;
+}
+
+int mt7615_mcu_init(struct mt7615_dev *dev)
+{
+	static const struct mt76_mcu_ops mt7615_mcu_ops = {
+		.mcu_send_msg = mt7615_mcu_msg_send,
+		.mcu_restart = mt7615_mcu_restart,
+	};
+	int ret;
+
+	dev->mt76.mcu_ops = &mt7615_mcu_ops,
+
+	ret = mt7615_driver_own(dev);
+	if (ret)
+		return ret;
+
+	ret = mt7615_load_firmware(dev);
+	if (ret)
+		return ret;
+
+	set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+	return 0;
+}
+
+void mt7615_mcu_exit(struct mt7615_dev *dev)
+{
+	__mt76_mcu_restart(&dev->mt76);
+	mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_FW_OWN);
+	skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
+}
+
+int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
+{
+	struct {
+		u8 buffer_mode;
+		u8 pad;
+		u16 len;
+	} __packed req_hdr = {
+		.buffer_mode = 1,
+		.len = __MT_EE_MAX - MT_EE_NIC_CONF_0,
+	};
+	int ret, len = sizeof(req_hdr) + __MT_EE_MAX - MT_EE_NIC_CONF_0;
+	u8 *req, *eep = (u8 *)dev->mt76.eeprom.data;
+
+	req = kzalloc(len, GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	memcpy(req, &req_hdr, sizeof(req_hdr));
+	memcpy(req + sizeof(req_hdr), eep + MT_EE_NIC_CONF_0,
+	       __MT_EE_MAX - MT_EE_NIC_CONF_0);
+
+	ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+				  req, len, true);
+	kfree(req);
+
+	return ret;
+}
+
+int mt7615_mcu_init_mac(struct mt7615_dev *dev)
+{
+	struct {
+		u8 enable;
+		u8 band;
+		u8 rsv[2];
+	} __packed req = {
+		.enable = 1,
+		.band = 0,
+	};
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL,
+				   &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val)
+{
+	struct {
+		u8 prot_idx;
+		u8 band;
+		u8 rsv[2];
+		__le32 len_thresh;
+		__le32 pkt_thresh;
+	} __packed req = {
+		.prot_idx = 1,
+		.band = 0,
+		.len_thresh = cpu_to_le32(val),
+		.pkt_thresh = cpu_to_le32(0x2),
+	};
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PROTECT_CTRL,
+				   &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
+		       const struct ieee80211_tx_queue_params *params)
+{
+#define WMM_AIFS_SET	BIT(0)
+#define WMM_CW_MIN_SET	BIT(1)
+#define WMM_CW_MAX_SET	BIT(2)
+#define WMM_TXOP_SET	BIT(3)
+#define WMM_PARAM_SET	(WMM_AIFS_SET | WMM_CW_MIN_SET | \
+			 WMM_CW_MAX_SET | WMM_TXOP_SET)
+	struct req_data {
+		u8 number;
+		u8 rsv[3];
+		u8 queue;
+		u8 valid;
+		u8 aifs;
+		u8 cw_min;
+		__le16 cw_max;
+		__le16 txop;
+	} __packed req = {
+		.number = 1,
+		.queue = queue,
+		.valid = WMM_PARAM_SET,
+		.aifs = params->aifs,
+		.cw_min = 5,
+		.cw_max = cpu_to_le16(10),
+		.txop = cpu_to_le16(params->txop),
+	};
+
+	if (params->cw_min)
+		req.cw_min = fls(params->cw_min);
+	if (params->cw_max)
+		req.cw_max = cpu_to_le16(fls(params->cw_max));
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE,
+				   &req, sizeof(req), true);
+}
+
+int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter)
+{
+#define ENTER_PM_STATE	1
+#define EXIT_PM_STATE	2
+	struct {
+		u8 pm_number;
+		u8 pm_state;
+		u8 bssid[ETH_ALEN];
+		u8 dtim_period;
+		u8 wlan_idx;
+		__le16 bcn_interval;
+		__le32 aid;
+		__le32 rx_filter;
+		u8 band_idx;
+		u8 rsv[3];
+		__le32 feature;
+		u8 omac_idx;
+		u8 wmm_idx;
+		u8 bcn_loss_cnt;
+		u8 bcn_sp_duration;
+	} __packed req = {
+		.pm_number = 5,
+		.pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE,
+		.band_idx = 0,
+	};
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL,
+				   &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
+			    struct ieee80211_vif *vif, bool enable)
+{
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+	struct {
+		struct req_hdr {
+			u8 omac_idx;
+			u8 band_idx;
+			__le16 tlv_num;
+			u8 is_tlv_append;
+			u8 rsv[3];
+		} __packed hdr;
+		struct req_tlv {
+			__le16 tag;
+			__le16 len;
+			u8 active;
+			u8 band_idx;
+			u8 omac_addr[ETH_ALEN];
+		} __packed tlv;
+	} data = {
+		.hdr = {
+			.omac_idx = mvif->omac_idx,
+			.band_idx = mvif->band_idx,
+			.tlv_num = cpu_to_le16(1),
+			.is_tlv_append = 1,
+		},
+		.tlv = {
+			.tag = cpu_to_le16(DEV_INFO_ACTIVE),
+			.len = cpu_to_le16(sizeof(struct req_tlv)),
+			.active = enable,
+			.band_idx = mvif->band_idx,
+		},
+	};
+
+	memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE,
+				   &data, sizeof(data), true);
+}
+
+static void
+mt7615_mcu_bss_info_omac_header(struct mt7615_vif *mvif, u8 *data,
+				u32 conn_type)
+{
+	struct bss_info_omac *hdr = (struct bss_info_omac *)data;
+	u8 idx;
+
+	idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+	hdr->tag = cpu_to_le16(BSS_INFO_OMAC);
+	hdr->len = cpu_to_le16(sizeof(struct bss_info_omac));
+	hdr->hw_bss_idx = idx;
+	hdr->omac_idx = mvif->omac_idx;
+	hdr->band_idx = mvif->band_idx;
+	hdr->conn_type = cpu_to_le32(conn_type);
+}
+
+static void
+mt7615_mcu_bss_info_basic_header(struct ieee80211_vif *vif, u8 *data,
+				 u32 net_type, u8 tx_wlan_idx,
+				 bool enable)
+{
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+	struct bss_info_basic *hdr = (struct bss_info_basic *)data;
+
+	hdr->tag = cpu_to_le16(BSS_INFO_BASIC);
+	hdr->len = cpu_to_le16(sizeof(struct bss_info_basic));
+	hdr->network_type = cpu_to_le32(net_type);
+	hdr->active = enable;
+	hdr->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+	memcpy(hdr->bssid, vif->bss_conf.bssid, ETH_ALEN);
+	hdr->wmm_idx = mvif->wmm_idx;
+	hdr->dtim_period = vif->bss_conf.dtim_period;
+	hdr->bmc_tx_wlan_idx = tx_wlan_idx;
+}
+
+static void
+mt7615_mcu_bss_info_ext_header(struct mt7615_vif *mvif, u8 *data)
+{
+/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
+#define BCN_TX_ESTIMATE_TIME (4096 + 20)
+	struct bss_info_ext_bss *hdr = (struct bss_info_ext_bss *)data;
+	int ext_bss_idx, tsf_offset;
+
+	ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
+	if (ext_bss_idx < 0)
+		return;
+
+	hdr->tag = cpu_to_le16(BSS_INFO_EXT_BSS);
+	hdr->len = cpu_to_le16(sizeof(struct bss_info_ext_bss));
+	tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
+	hdr->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+}
+
+int mt7615_mcu_set_bss_info(struct mt7615_dev *dev,
+			    struct ieee80211_vif *vif, int en)
+{
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+	struct req_hdr {
+		u8 bss_idx;
+		u8 rsv0;
+		__le16 tlv_num;
+		u8 is_tlv_append;
+		u8 rsv1[3];
+	} __packed;
+	int len = sizeof(struct req_hdr) + sizeof(struct bss_info_basic);
+	int ret, i, features = BIT(BSS_INFO_BASIC), ntlv = 1;
+	u32 conn_type = 0, net_type = NETWORK_INFRA;
+	u8 *buf, *data, tx_wlan_idx = 0;
+	struct req_hdr *hdr;
+
+	if (en) {
+		len += sizeof(struct bss_info_omac);
+		features |= BIT(BSS_INFO_OMAC);
+		if (mvif->omac_idx > EXT_BSSID_START) {
+			len += sizeof(struct bss_info_ext_bss);
+			features |= BIT(BSS_INFO_EXT_BSS);
+			ntlv++;
+		}
+		ntlv++;
+	}
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_MESH_POINT:
+		tx_wlan_idx = mvif->sta.wcid.idx;
+		conn_type = CONNECTION_INFRA_AP;
+		break;
+	case NL80211_IFTYPE_STATION: {
+		/* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
+		if (en) {
+			struct ieee80211_sta *sta;
+			struct mt7615_sta *msta;
+
+			rcu_read_lock();
+			sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+			if (!sta) {
+				rcu_read_unlock();
+				return -EINVAL;
+			}
+
+			msta = (struct mt7615_sta *)sta->drv_priv;
+			tx_wlan_idx = msta->wcid.idx;
+			rcu_read_unlock();
+		}
+		conn_type = CONNECTION_INFRA_STA;
+		break;
+	}
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	buf = kzalloc(len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	hdr = (struct req_hdr *)buf;
+	hdr->bss_idx = mvif->idx;
+	hdr->tlv_num = cpu_to_le16(ntlv);
+	hdr->is_tlv_append = 1;
+
+	data = buf + sizeof(*hdr);
+	for (i = 0; i < BSS_INFO_MAX_NUM; i++) {
+		int tag = ffs(features & BIT(i)) - 1;
+
+		switch (tag) {
+		case BSS_INFO_OMAC:
+			mt7615_mcu_bss_info_omac_header(mvif, data,
+							conn_type);
+			data += sizeof(struct bss_info_omac);
+			break;
+		case BSS_INFO_BASIC:
+			mt7615_mcu_bss_info_basic_header(vif, data, net_type,
+							 tx_wlan_idx, en);
+			data += sizeof(struct bss_info_basic);
+			break;
+		case BSS_INFO_EXT_BSS:
+			mt7615_mcu_bss_info_ext_header(mvif, data);
+			data += sizeof(struct bss_info_ext_bss);
+			break;
+		default:
+			break;
+		}
+	}
+
+	ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BSS_INFO_UPDATE,
+				  buf, len, true);
+	kfree(buf);
+
+	return ret;
+}
+
+static int
+mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev,
+			struct mt7615_vif *mvif)
+{
+	struct {
+		struct wtbl_req_hdr hdr;
+		struct wtbl_generic g_wtbl;
+		struct wtbl_rx rx_wtbl;
+	} req = {
+		.hdr = {
+			.wlan_idx = mvif->sta.wcid.idx,
+			.operation = WTBL_RESET_AND_SET,
+			.tlv_num = cpu_to_le16(2),
+		},
+		.g_wtbl = {
+			.tag = cpu_to_le16(WTBL_GENERIC),
+			.len = cpu_to_le16(sizeof(struct wtbl_generic)),
+			.muar_idx = 0xe,
+		},
+		.rx_wtbl = {
+			.tag = cpu_to_le16(WTBL_RX),
+			.len = cpu_to_le16(sizeof(struct wtbl_rx)),
+			.rca1 = 1,
+			.rca2 = 1,
+			.rv = 1,
+		},
+	};
+	eth_broadcast_addr(req.g_wtbl.peer_addr);
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+				   &req, sizeof(req), true);
+}
+
+int mt7615_mcu_wtbl_bmc(struct mt7615_dev *dev,
+			struct ieee80211_vif *vif, bool enable)
+{
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+
+	if (!enable) {
+		struct wtbl_req_hdr req = {
+			.wlan_idx = mvif->sta.wcid.idx,
+			.operation = WTBL_RESET_AND_SET,
+		};
+
+		return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+					   &req, sizeof(req), true);
+	}
+
+	return mt7615_mcu_add_wtbl_bmc(dev, mvif);
+}
+
+int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta)
+{
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+	struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+	struct {
+		struct wtbl_req_hdr hdr;
+		struct wtbl_generic g_wtbl;
+		struct wtbl_rx rx_wtbl;
+	} req = {
+		.hdr = {
+			.wlan_idx = msta->wcid.idx,
+			.operation = WTBL_RESET_AND_SET,
+			.tlv_num = cpu_to_le16(2),
+		},
+		.g_wtbl = {
+			.tag = cpu_to_le16(WTBL_GENERIC),
+			.len = cpu_to_le16(sizeof(struct wtbl_generic)),
+			.muar_idx = mvif->omac_idx,
+			.qos = sta->wme,
+			.partial_aid = cpu_to_le16(sta->aid),
+		},
+		.rx_wtbl = {
+			.tag = cpu_to_le16(WTBL_RX),
+			.len = cpu_to_le16(sizeof(struct wtbl_rx)),
+			.rca1 = vif->type != NL80211_IFTYPE_AP,
+			.rca2 = 1,
+			.rv = 1,
+		},
+	};
+	memcpy(req.g_wtbl.peer_addr, sta->addr, ETH_ALEN);
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+				   &req, sizeof(req), true);
+}
+
+int mt7615_mcu_del_wtbl(struct mt7615_dev *dev,
+			struct ieee80211_sta *sta)
+{
+	struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+	struct wtbl_req_hdr req = {
+		.wlan_idx = msta->wcid.idx,
+		.operation = WTBL_RESET_AND_SET,
+	};
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+				   &req, sizeof(req), true);
+}
+
+int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
+{
+	struct wtbl_req_hdr req = {
+		.operation = WTBL_RESET_ALL,
+	};
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+				   &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
+			       struct ieee80211_vif *vif, bool en)
+{
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+	struct {
+		struct sta_req_hdr hdr;
+		struct sta_rec_basic basic;
+	} req = {
+		.hdr = {
+			.bss_idx = mvif->idx,
+			.wlan_idx = mvif->sta.wcid.idx,
+			.tlv_num = cpu_to_le16(1),
+			.is_tlv_append = 1,
+			.muar_idx = mvif->omac_idx,
+		},
+		.basic = {
+			.tag = cpu_to_le16(STA_REC_BASIC),
+			.len = cpu_to_le16(sizeof(struct sta_rec_basic)),
+			.conn_type = cpu_to_le32(CONNECTION_INFRA_BC),
+		},
+	};
+	eth_broadcast_addr(req.basic.peer_addr);
+
+	if (en) {
+		req.basic.conn_state = CONN_STATE_PORT_SECURE;
+		req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER |
+						   EXTRA_INFO_NEW);
+	} else {
+		req.basic.conn_state = CONN_STATE_DISCONNECT;
+		req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
+	}
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+				   &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta, bool en)
+{
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+	struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+	struct {
+		struct sta_req_hdr hdr;
+		struct sta_rec_basic basic;
+	} req = {
+		.hdr = {
+			.bss_idx = mvif->idx,
+			.wlan_idx = msta->wcid.idx,
+			.tlv_num = cpu_to_le16(1),
+			.is_tlv_append = 1,
+			.muar_idx = mvif->omac_idx,
+		},
+		.basic = {
+			.tag = cpu_to_le16(STA_REC_BASIC),
+			.len = cpu_to_le16(sizeof(struct sta_rec_basic)),
+			.qos = sta->wme,
+			.aid = cpu_to_le16(sta->aid),
+		},
+	};
+	memcpy(req.basic.peer_addr, sta->addr, ETH_ALEN);
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_MESH_POINT:
+		req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
+		break;
+	case NL80211_IFTYPE_STATION:
+		req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	};
+
+	if (en) {
+		req.basic.conn_state = CONN_STATE_PORT_SECURE;
+		req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER |
+						   EXTRA_INFO_NEW);
+	} else {
+		req.basic.conn_state = CONN_STATE_DISCONNECT;
+		req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
+	}
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+				   &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+		       int en)
+{
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+	struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+	struct ieee80211_mutable_offsets offs;
+	struct req {
+		u8 omac_idx;
+		u8 enable;
+		u8 wlan_idx;
+		u8 band_idx;
+		u8 pkt_type;
+		u8 need_pre_tbtt_int;
+		__le16 csa_ie_pos;
+		__le16 pkt_len;
+		__le16 tim_ie_pos;
+		u8 pkt[512];
+		u8 csa_cnt;
+		/* bss color change */
+		u8 bcc_cnt;
+		__le16 bcc_ie_pos;
+	} __packed req = {
+		.omac_idx = mvif->omac_idx,
+		.enable = en,
+		.wlan_idx = wcid->idx,
+		.band_idx = mvif->band_idx,
+	};
+	struct sk_buff *skb;
+
+	skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
+	if (!skb)
+		return -EINVAL;
+
+	if (skb->len > 512 - MT_TXD_SIZE) {
+		dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
+		dev_kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
+			      0, NULL);
+	memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
+	req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+	req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
+	if (offs.csa_counter_offs[0]) {
+		u16 csa_offs;
+
+		csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4;
+		req.csa_ie_pos = cpu_to_le16(csa_offs);
+		req.csa_cnt = skb->data[offs.csa_counter_offs[0]];
+	}
+	dev_kfree_skb(skb);
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD,
+				   &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_tx_power(struct mt7615_dev *dev)
+{
+	int i, ret, n_chains = hweight8(dev->mt76.antenna_mask);
+	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+	int freq = chandef->center_freq1, len, target_chains;
+	u8 *req, *data, *eep = (u8 *)dev->mt76.eeprom.data;
+	enum nl80211_band band = chandef->chan->band;
+	struct ieee80211_hw *hw = mt76_hw(dev);
+	struct {
+		u8 center_chan;
+		u8 dbdc_idx;
+		u8 band;
+		u8 rsv;
+	} __packed req_hdr = {
+		.center_chan = ieee80211_frequency_to_channel(freq),
+		.band = band,
+	};
+	s8 tx_power;
+
+	len = sizeof(req_hdr) + __MT_EE_MAX - MT_EE_NIC_CONF_0;
+	req = kzalloc(len, GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	memcpy(req, &req_hdr, sizeof(req_hdr));
+	data = req + sizeof(req_hdr);
+	memcpy(data, eep + MT_EE_NIC_CONF_0,
+	       __MT_EE_MAX - MT_EE_NIC_CONF_0);
+
+	tx_power = hw->conf.power_level * 2;
+	switch (n_chains) {
+	case 4:
+		tx_power -= 12;
+		break;
+	case 3:
+		tx_power -= 8;
+		break;
+	case 2:
+		tx_power -= 6;
+		break;
+	default:
+		break;
+	}
+	tx_power = max_t(s8, tx_power, 0);
+	dev->mt76.txpower_cur = tx_power;
+
+	target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
+	for (i = 0; i < target_chains; i++) {
+		int index = -MT_EE_NIC_CONF_0;
+
+		ret = mt7615_eeprom_get_power_index(dev, chandef->chan, i);
+		if (ret < 0)
+			goto out;
+
+		index += ret;
+		data[index] = min_t(u8, data[index], tx_power);
+	}
+
+	ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL,
+				  req, len, true);
+out:
+	kfree(req);
+
+	return ret;
+}
+
+int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
+		       enum mt7615_rdd_cmd cmd, u8 index,
+		       u8 rx_sel, u8 val)
+{
+	struct {
+		u8 ctrl;
+		u8 rdd_idx;
+		u8 rdd_rx_sel;
+		u8 val;
+		u8 rsv[4];
+	} req = {
+		.ctrl = cmd,
+		.rdd_idx = index,
+		.rdd_rx_sel = rx_sel,
+		.val = val,
+	};
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL,
+				   &req, sizeof(req), true);
+}
+
+int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev)
+{
+	struct {
+		u8 pulse_num;
+		u8 rsv[3];
+		struct {
+			u32 start_time;
+			u16 width;
+			s16 power;
+		} pattern[32];
+	} req = {
+		.pulse_num = dev->radar_pattern.n_pulses,
+	};
+	u32 start_time = ktime_to_ms(ktime_get_boottime());
+	int i;
+
+	if (dev->radar_pattern.n_pulses > ARRAY_SIZE(req.pattern))
+		return -EINVAL;
+
+	/* TODO: add some noise here */
+	for (i = 0; i < dev->radar_pattern.n_pulses; i++) {
+		req.pattern[i].width = dev->radar_pattern.width;
+		req.pattern[i].power = dev->radar_pattern.power;
+		req.pattern[i].start_time = start_time +
+					    i * dev->radar_pattern.period;
+	}
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_PATTERN,
+				   &req, sizeof(req), false);
+}
+
+int mt7615_mcu_set_channel(struct mt7615_dev *dev)
+{
+	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+	int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
+	struct {
+		u8 control_chan;
+		u8 center_chan;
+		u8 bw;
+		u8 tx_streams;
+		u8 rx_streams_mask;
+		u8 switch_reason;
+		u8 band_idx;
+		/* for 80+80 only */
+		u8 center_chan2;
+		__le16 cac_case;
+		u8 channel_band;
+		u8 rsv0;
+		__le32 outband_freq;
+		u8 txpower_drop;
+		u8 rsv1[3];
+		u8 txpower_sku[53];
+		u8 rsv2[3];
+	} req = {
+		.control_chan = chandef->chan->hw_value,
+		.center_chan = ieee80211_frequency_to_channel(freq1),
+		.tx_streams = (dev->mt76.chainmask >> 8) & 0xf,
+		.rx_streams_mask = dev->mt76.antenna_mask,
+		.center_chan2 = ieee80211_frequency_to_channel(freq2),
+	};
+	int ret;
+
+	if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+	    chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+		req.switch_reason = CH_SWITCH_DFS;
+	else
+		req.switch_reason = CH_SWITCH_NORMAL;
+
+	switch (dev->mt76.chandef.width) {
+	case NL80211_CHAN_WIDTH_40:
+		req.bw = CMD_CBW_40MHZ;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		req.bw = CMD_CBW_80MHZ;
+		break;
+	case NL80211_CHAN_WIDTH_80P80:
+		req.bw = CMD_CBW_8080MHZ;
+		break;
+	case NL80211_CHAN_WIDTH_160:
+		req.bw = CMD_CBW_160MHZ;
+		break;
+	case NL80211_CHAN_WIDTH_5:
+		req.bw = CMD_CBW_5MHZ;
+		break;
+	case NL80211_CHAN_WIDTH_10:
+		req.bw = CMD_CBW_10MHZ;
+		break;
+	case NL80211_CHAN_WIDTH_20_NOHT:
+	case NL80211_CHAN_WIDTH_20:
+	default:
+		req.bw = CMD_CBW_20MHZ;
+		break;
+	}
+	memset(req.txpower_sku, 0x3f, 49);
+
+	ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH,
+				  &req, sizeof(req), true);
+	if (ret)
+		return ret;
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RX_PATH,
+				   &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+			  struct ieee80211_sta *sta)
+{
+	struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+	struct wtbl_req_hdr *wtbl_hdr;
+	struct sta_req_hdr *sta_hdr;
+	struct wtbl_raw *wtbl_raw;
+	struct sta_rec_ht *sta_ht;
+	struct wtbl_ht *wtbl_ht;
+	int buf_len, ret, ntlv = 2;
+	u32 msk, val = 0;
+	u8 *buf;
+
+	buf = kzalloc(MT7615_WTBL_UPDATE_MAX_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	wtbl_hdr = (struct wtbl_req_hdr *)buf;
+	wtbl_hdr->wlan_idx = msta->wcid.idx;
+	wtbl_hdr->operation = WTBL_SET;
+	buf_len = sizeof(*wtbl_hdr);
+
+	/* ht basic */
+	wtbl_ht = (struct wtbl_ht *)(buf + buf_len);
+	wtbl_ht->tag = cpu_to_le16(WTBL_HT);
+	wtbl_ht->len = cpu_to_le16(sizeof(*wtbl_ht));
+	wtbl_ht->ht = 1;
+	wtbl_ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
+	wtbl_ht->af = sta->ht_cap.ampdu_factor;
+	wtbl_ht->mm = sta->ht_cap.ampdu_density;
+	buf_len += sizeof(*wtbl_ht);
+
+	if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+		val |= MT_WTBL_W5_SHORT_GI_20;
+	if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+		val |= MT_WTBL_W5_SHORT_GI_40;
+
+	/* vht basic */
+	if (sta->vht_cap.vht_supported) {
+		struct wtbl_vht *wtbl_vht;
+
+		wtbl_vht = (struct wtbl_vht *)(buf + buf_len);
+		buf_len += sizeof(*wtbl_vht);
+		wtbl_vht->tag = cpu_to_le16(WTBL_VHT);
+		wtbl_vht->len = cpu_to_le16(sizeof(*wtbl_vht));
+		wtbl_vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC;
+		wtbl_vht->vht = 1;
+		ntlv++;
+
+		if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+			val |= MT_WTBL_W5_SHORT_GI_80;
+		if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+			val |= MT_WTBL_W5_SHORT_GI_160;
+	}
+
+	/* smps */
+	if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) {
+		struct wtbl_smps *wtbl_smps;
+
+		wtbl_smps = (struct wtbl_smps *)(buf + buf_len);
+		buf_len += sizeof(*wtbl_smps);
+		wtbl_smps->tag = cpu_to_le16(WTBL_SMPS);
+		wtbl_smps->len = cpu_to_le16(sizeof(*wtbl_smps));
+		wtbl_smps->smps = 1;
+		ntlv++;
+	}
+
+	/* sgi */
+	msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
+	      MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
+
+	wtbl_raw = (struct wtbl_raw *)(buf + buf_len);
+	buf_len += sizeof(*wtbl_raw);
+	wtbl_raw->tag = cpu_to_le16(WTBL_RAW_DATA);
+	wtbl_raw->len = cpu_to_le16(sizeof(*wtbl_raw));
+	wtbl_raw->wtbl_idx = 1;
+	wtbl_raw->dw = 5;
+	wtbl_raw->msk = cpu_to_le32(~msk);
+	wtbl_raw->val = cpu_to_le32(val);
+
+	wtbl_hdr->tlv_num = cpu_to_le16(ntlv);
+	ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+				  buf, buf_len, true);
+	if (ret)
+		goto out;
+
+	memset(buf, 0, MT7615_WTBL_UPDATE_MAX_SIZE);
+
+	sta_hdr = (struct sta_req_hdr *)buf;
+	sta_hdr->bss_idx = mvif->idx;
+	sta_hdr->wlan_idx = msta->wcid.idx;
+	sta_hdr->is_tlv_append = 1;
+	ntlv = sta->vht_cap.vht_supported ? 2 : 1;
+	sta_hdr->tlv_num = cpu_to_le16(ntlv);
+	sta_hdr->muar_idx = mvif->omac_idx;
+	buf_len = sizeof(*sta_hdr);
+
+	sta_ht = (struct sta_rec_ht *)(buf + buf_len);
+	sta_ht->tag = cpu_to_le16(STA_REC_HT);
+	sta_ht->len = cpu_to_le16(sizeof(*sta_ht));
+	sta_ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+	buf_len += sizeof(*sta_ht);
+
+	if (sta->vht_cap.vht_supported) {
+		struct sta_rec_vht *sta_vht;
+
+		sta_vht = (struct sta_rec_vht *)(buf + buf_len);
+		buf_len += sizeof(*sta_vht);
+		sta_vht->tag = cpu_to_le16(STA_REC_VHT);
+		sta_vht->len = cpu_to_le16(sizeof(*sta_vht));
+		sta_vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
+		sta_vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
+		sta_vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
+	}
+
+	ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+				  buf, buf_len, true);
+out:
+	kfree(buf);
+
+	return ret;
+}
+
+int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
+			 struct ieee80211_ampdu_params *params,
+			 bool add)
+{
+	struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+	struct mt7615_vif *mvif = msta->vif;
+	struct {
+		struct wtbl_req_hdr hdr;
+		struct wtbl_ba ba;
+	} wtbl_req = {
+		.hdr = {
+			.wlan_idx = msta->wcid.idx,
+			.operation = WTBL_SET,
+			.tlv_num = cpu_to_le16(1),
+		},
+		.ba = {
+			.tag = cpu_to_le16(WTBL_BA),
+			.len = cpu_to_le16(sizeof(struct wtbl_ba)),
+			.tid = params->tid,
+			.ba_type = MT_BA_TYPE_ORIGINATOR,
+			.sn = add ? cpu_to_le16(params->ssn) : 0,
+			.ba_en = add,
+		},
+	};
+	struct {
+		struct sta_req_hdr hdr;
+		struct sta_rec_ba ba;
+	} sta_req = {
+		.hdr = {
+			.bss_idx = mvif->idx,
+			.wlan_idx = msta->wcid.idx,
+			.tlv_num = cpu_to_le16(1),
+			.is_tlv_append = 1,
+			.muar_idx = mvif->omac_idx,
+		},
+		.ba = {
+			.tag = cpu_to_le16(STA_REC_BA),
+			.len = cpu_to_le16(sizeof(struct sta_rec_ba)),
+			.tid = params->tid,
+			.ba_type = MT_BA_TYPE_ORIGINATOR,
+			.amsdu = params->amsdu,
+			.ba_en = add << params->tid,
+			.ssn = cpu_to_le16(params->ssn),
+			.winsize = cpu_to_le16(params->buf_size),
+		},
+	};
+	int ret;
+
+	if (add) {
+		u8 idx, ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
+
+		for (idx = 7; idx > 0; idx--) {
+			if (params->buf_size >= ba_range[idx])
+				break;
+		}
+
+		wtbl_req.ba.ba_winsize_idx = idx;
+	}
+
+	ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+				  &wtbl_req, sizeof(wtbl_req), true);
+	if (ret)
+		return ret;
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+				   &sta_req, sizeof(sta_req), true);
+}
+
+int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
+			 struct ieee80211_ampdu_params *params,
+			 bool add)
+{
+	struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+	struct mt7615_vif *mvif = msta->vif;
+	struct {
+		struct wtbl_req_hdr hdr;
+		struct wtbl_ba ba;
+	} wtbl_req = {
+		.hdr = {
+			.wlan_idx = msta->wcid.idx,
+			.operation = WTBL_SET,
+			.tlv_num = cpu_to_le16(1),
+		},
+		.ba = {
+			.tag = cpu_to_le16(WTBL_BA),
+			.len = cpu_to_le16(sizeof(struct wtbl_ba)),
+			.tid = params->tid,
+			.ba_type = MT_BA_TYPE_RECIPIENT,
+			.rst_ba_tid = params->tid,
+			.rst_ba_sel = RST_BA_MAC_TID_MATCH,
+			.rst_ba_sb = 1,
+		},
+	};
+	struct {
+		struct sta_req_hdr hdr;
+		struct sta_rec_ba ba;
+	} sta_req = {
+		.hdr = {
+			.bss_idx = mvif->idx,
+			.wlan_idx = msta->wcid.idx,
+			.tlv_num = cpu_to_le16(1),
+			.is_tlv_append = 1,
+			.muar_idx = mvif->omac_idx,
+		},
+		.ba = {
+			.tag = cpu_to_le16(STA_REC_BA),
+			.len = cpu_to_le16(sizeof(struct sta_rec_ba)),
+			.tid = params->tid,
+			.ba_type = MT_BA_TYPE_RECIPIENT,
+			.amsdu = params->amsdu,
+			.ba_en = add << params->tid,
+			.ssn = cpu_to_le16(params->ssn),
+			.winsize = cpu_to_le16(params->buf_size),
+		},
+	};
+	int ret;
+
+	memcpy(wtbl_req.ba.peer_addr, params->sta->addr, ETH_ALEN);
+
+	ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+				  &sta_req, sizeof(sta_req), true);
+	if (ret || !add)
+		return ret;
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+				   &wtbl_req, sizeof(wtbl_req), true);
+}
+
+int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
+{
+	struct {
+		u8 action;
+		u8 rsv[3];
+	} req = {
+		.action = index,
+	};
+
+	return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req,
+				   sizeof(req), true);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
new file mode 100644
index 0000000..1fd7dff
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -0,0 +1,498 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_MCU_H
+#define __MT7615_MCU_H
+
+struct mt7615_mcu_txd {
+	__le32 txd[8];
+
+	__le16 len;
+	__le16 pq_id;
+
+	u8 cid;
+	u8 pkt_type;
+	u8 set_query; /* FW don't care */
+	u8 seq;
+
+	u8 uc_d2b0_rev;
+	u8 ext_cid;
+	u8 s2d_index;
+	u8 ext_cid_ack;
+
+	u32 reserved[5];
+} __packed __aligned(4);
+
+/* event table */
+enum {
+	MCU_EVENT_TARGET_ADDRESS_LEN = 0x01,
+	MCU_EVENT_FW_START = 0x01,
+	MCU_EVENT_GENERIC = 0x01,
+	MCU_EVENT_ACCESS_REG = 0x02,
+	MCU_EVENT_MT_PATCH_SEM = 0x04,
+	MCU_EVENT_CH_PRIVILEGE = 0x18,
+	MCU_EVENT_EXT = 0xed,
+	MCU_EVENT_RESTART_DL = 0xef,
+};
+
+/* ext event table */
+enum {
+	MCU_EXT_EVENT_PS_SYNC = 0x5,
+	MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
+	MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
+	MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
+	MCU_EXT_EVENT_RDD_REPORT = 0x3a,
+	MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
+};
+
+struct mt7615_mcu_rxd {
+	__le32 rxd[4];
+
+	__le16 len;
+	__le16 pkt_type_id;
+
+	u8 eid;
+	u8 seq;
+	__le16 __rsv;
+
+	u8 ext_eid;
+	u8 __rsv1[2];
+	u8 s2d_index;
+};
+
+#define MCU_PQ_ID(p, q)		(((p) << 15) | ((q) << 10))
+#define MCU_PKT_ID		0xa0
+
+enum {
+	MCU_Q_QUERY,
+	MCU_Q_SET,
+	MCU_Q_RESERVED,
+	MCU_Q_NA
+};
+
+enum {
+	MCU_S2D_H2N,
+	MCU_S2D_C2N,
+	MCU_S2D_H2C,
+	MCU_S2D_H2CN
+};
+
+enum {
+	MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
+	MCU_CMD_FW_START_REQ = 0x02,
+	MCU_CMD_INIT_ACCESS_REG = 0x3,
+	MCU_CMD_PATCH_START_REQ = 0x05,
+	MCU_CMD_PATCH_FINISH_REQ = 0x07,
+	MCU_CMD_PATCH_SEM_CONTROL = 0x10,
+	MCU_CMD_EXT_CID = 0xED,
+	MCU_CMD_FW_SCATTER = 0xEE,
+	MCU_CMD_RESTART_DL_REQ = 0xEF,
+};
+
+enum {
+	MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
+	MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
+	MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
+	MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+	MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
+	MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
+	MCU_EXT_CMD_EDCA_UPDATE = 0x27,
+	MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
+	MCU_EXT_CMD_GET_TEMP = 0x2c,
+	MCU_EXT_CMD_WTBL_UPDATE = 0x32,
+	MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
+	MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
+	MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
+	MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
+	MCU_EXT_CMD_SET_RX_PATH = 0x4e,
+	MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
+};
+
+enum {
+	PATCH_SEM_RELEASE = 0x0,
+	PATCH_SEM_GET	  = 0x1
+};
+
+enum {
+	PATCH_NOT_DL_SEM_FAIL	 = 0x0,
+	PATCH_IS_DL		 = 0x1,
+	PATCH_NOT_DL_SEM_SUCCESS = 0x2,
+	PATCH_REL_SEM_SUCCESS	 = 0x3
+};
+
+enum {
+	FW_STATE_INITIAL          = 0,
+	FW_STATE_FW_DOWNLOAD      = 1,
+	FW_STATE_NORMAL_OPERATION = 2,
+	FW_STATE_NORMAL_TRX       = 3,
+	FW_STATE_CR4_RDY          = 7
+};
+
+#define STA_TYPE_STA		BIT(0)
+#define STA_TYPE_AP		BIT(1)
+#define STA_TYPE_ADHOC		BIT(2)
+#define STA_TYPE_WDS		BIT(4)
+#define STA_TYPE_BC		BIT(5)
+
+#define NETWORK_INFRA		BIT(16)
+#define NETWORK_P2P		BIT(17)
+#define NETWORK_IBSS		BIT(18)
+#define NETWORK_WDS		BIT(21)
+
+#define CONNECTION_INFRA_STA	(STA_TYPE_STA | NETWORK_INFRA)
+#define CONNECTION_INFRA_AP	(STA_TYPE_AP | NETWORK_INFRA)
+#define CONNECTION_P2P_GC	(STA_TYPE_STA | NETWORK_P2P)
+#define CONNECTION_P2P_GO	(STA_TYPE_AP | NETWORK_P2P)
+#define CONNECTION_IBSS_ADHOC	(STA_TYPE_ADHOC | NETWORK_IBSS)
+#define CONNECTION_WDS		(STA_TYPE_WDS | NETWORK_WDS)
+#define CONNECTION_INFRA_BC	(STA_TYPE_BC | NETWORK_INFRA)
+
+#define CONN_STATE_DISCONNECT	0
+#define CONN_STATE_CONNECT	1
+#define CONN_STATE_PORT_SECURE	2
+
+enum {
+	DEV_INFO_ACTIVE,
+	DEV_INFO_MAX_NUM
+};
+
+struct bss_info_omac {
+	__le16 tag;
+	__le16 len;
+	u8 hw_bss_idx;
+	u8 omac_idx;
+	u8 band_idx;
+	u8 rsv0;
+	__le32 conn_type;
+	u32 rsv1;
+} __packed;
+
+struct bss_info_basic {
+	__le16 tag;
+	__le16 len;
+	__le32 network_type;
+	u8 active;
+	u8 rsv0;
+	__le16 bcn_interval;
+	u8 bssid[ETH_ALEN];
+	u8 wmm_idx;
+	u8 dtim_period;
+	u8 bmc_tx_wlan_idx;
+	u8 cipher; /* not used */
+	u8 phymode; /* not used */
+	u8 rsv1[5];
+} __packed;
+
+struct bss_info_rf_ch {
+	__le16 tag;
+	__le16 len;
+	u8 pri_ch;
+	u8 central_ch0;
+	u8 central_ch1;
+	u8 bw;
+} __packed;
+
+struct bss_info_ext_bss {
+	__le16 tag;
+	__le16 len;
+	__le32 mbss_tsf_offset; /* in unit of us */
+	u8 rsv[8];
+} __packed;
+
+enum {
+	BSS_INFO_OMAC,
+	BSS_INFO_BASIC,
+	BSS_INFO_RF_CH, /* optional, for BT/LTE coex */
+	BSS_INFO_PM, /* sta only */
+	BSS_INFO_UAPSD, /* sta only */
+	BSS_INFO_ROAM_DETECTION, /* obsoleted */
+	BSS_INFO_LQ_RM, /* obsoleted */
+	BSS_INFO_EXT_BSS,
+	BSS_INFO_BMC_INFO, /* for bmc rate control in CR4 */
+	BSS_INFO_SYNC_MODE, /* obsoleted */
+	BSS_INFO_RA,
+	BSS_INFO_MAX_NUM
+};
+
+enum {
+	WTBL_RESET_AND_SET = 1,
+	WTBL_SET,
+	WTBL_QUERY,
+	WTBL_RESET_ALL
+};
+
+struct wtbl_req_hdr {
+	u8 wlan_idx;
+	u8 operation;
+	__le16 tlv_num;
+	u8 rsv[4];
+} __packed;
+
+struct wtbl_generic {
+	__le16 tag;
+	__le16 len;
+	u8 peer_addr[ETH_ALEN];
+	u8 muar_idx;
+	u8 skip_tx;
+	u8 cf_ack;
+	u8 qos;
+	u8 mesh;
+	u8 adm;
+	__le16 partial_aid;
+	u8 baf_en;
+	u8 aad_om;
+} __packed;
+
+struct wtbl_rx {
+	__le16 tag;
+	__le16 len;
+	u8 rcid;
+	u8 rca1;
+	u8 rca2;
+	u8 rv;
+	u8 rsv[4];
+} __packed;
+
+struct wtbl_ht {
+	__le16 tag;
+	__le16 len;
+	u8 ht;
+	u8 ldpc;
+	u8 af;
+	u8 mm;
+	u8 rsv[4];
+} __packed;
+
+struct wtbl_vht {
+	__le16 tag;
+	__le16 len;
+	u8 ldpc;
+	u8 dyn_bw;
+	u8 vht;
+	u8 txop_ps;
+	u8 rsv[4];
+} __packed;
+
+struct wtbl_tx_ps {
+	__le16 tag;
+	__le16 len;
+	u8 txps;
+	u8 rsv[3];
+} __packed;
+
+struct wtbl_hdr_trans {
+	__le16 tag;
+	__le16 len;
+	u8 to_ds;
+	u8 from_ds;
+	u8 disable_rx_trans;
+	u8 rsv;
+} __packed;
+
+enum {
+	MT_BA_TYPE_INVALID,
+	MT_BA_TYPE_ORIGINATOR,
+	MT_BA_TYPE_RECIPIENT
+};
+
+enum {
+	RST_BA_MAC_TID_MATCH,
+	RST_BA_MAC_MATCH,
+	RST_BA_NO_MATCH
+};
+
+struct wtbl_ba {
+	__le16 tag;
+	__le16 len;
+	/* common */
+	u8 tid;
+	u8 ba_type;
+	u8 rsv0[2];
+	/* originator only */
+	__le16 sn;
+	u8 ba_en;
+	u8 ba_winsize_idx;
+	__le16 ba_winsize;
+	/* recipient only */
+	u8 peer_addr[ETH_ALEN];
+	u8 rst_ba_tid;
+	u8 rst_ba_sel;
+	u8 rst_ba_sb;
+	u8 band_idx;
+	u8 rsv1[4];
+} __packed;
+
+struct wtbl_bf {
+	__le16 tag;
+	__le16 len;
+	u8 ibf;
+	u8 ebf;
+	u8 ibf_vht;
+	u8 ebf_vht;
+	u8 gid;
+	u8 pfmu_idx;
+	u8 rsv[2];
+} __packed;
+
+struct wtbl_smps {
+	__le16 tag;
+	__le16 len;
+	u8 smps;
+	u8 rsv[3];
+} __packed;
+
+struct wtbl_pn {
+	__le16 tag;
+	__le16 len;
+	u8 pn[6];
+	u8 rsv[2];
+} __packed;
+
+struct wtbl_spe {
+	__le16 tag;
+	__le16 len;
+	u8 spe_idx;
+	u8 rsv[3];
+} __packed;
+
+struct wtbl_raw {
+	__le16 tag;
+	__le16 len;
+	u8 wtbl_idx;
+	u8 dw;
+	u8 rsv[2];
+	__le32 msk;
+	__le32 val;
+} __packed;
+
+#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
+				     sizeof(struct wtbl_generic) + \
+				     sizeof(struct wtbl_rx) + \
+				     sizeof(struct wtbl_ht) + \
+				     sizeof(struct wtbl_vht) + \
+				     sizeof(struct wtbl_tx_ps) + \
+				     sizeof(struct wtbl_hdr_trans) + \
+				     sizeof(struct wtbl_ba) + \
+				     sizeof(struct wtbl_bf) + \
+				     sizeof(struct wtbl_smps) + \
+				     sizeof(struct wtbl_pn) + \
+				     sizeof(struct wtbl_spe))
+
+enum {
+	WTBL_GENERIC,
+	WTBL_RX,
+	WTBL_HT,
+	WTBL_VHT,
+	WTBL_PEER_PS, /* not used */
+	WTBL_TX_PS,
+	WTBL_HDR_TRANS,
+	WTBL_SEC_KEY,
+	WTBL_BA,
+	WTBL_RDG, /* obsoleted */
+	WTBL_PROTECT, /* not used */
+	WTBL_CLEAR, /* not used */
+	WTBL_BF,
+	WTBL_SMPS,
+	WTBL_RAW_DATA, /* debug only */
+	WTBL_PN,
+	WTBL_SPE,
+	WTBL_MAX_NUM
+};
+
+struct sta_req_hdr {
+	u8 bss_idx;
+	u8 wlan_idx;
+	__le16 tlv_num;
+	u8 is_tlv_append;
+	u8 muar_idx;
+	u8 rsv[2];
+} __packed;
+
+struct sta_rec_basic {
+	__le16 tag;
+	__le16 len;
+	__le32 conn_type;
+	u8 conn_state;
+	u8 qos;
+	__le16 aid;
+	u8 peer_addr[ETH_ALEN];
+#define EXTRA_INFO_VER	BIT(0)
+#define EXTRA_INFO_NEW	BIT(1)
+	__le16 extra_info;
+} __packed;
+
+struct sta_rec_ht {
+	__le16 tag;
+	__le16 len;
+	__le16 ht_cap;
+	u16 rsv;
+} __packed;
+
+struct sta_rec_vht {
+	__le16 tag;
+	__le16 len;
+	__le32 vht_cap;
+	__le16 vht_rx_mcs_map;
+	__le16 vht_tx_mcs_map;
+} __packed;
+
+struct sta_rec_ba {
+	__le16 tag;
+	__le16 len;
+	u8 tid;
+	u8 ba_type;
+	u8 amsdu;
+	u8 ba_en;
+	__le16 ssn;
+	__le16 winsize;
+} __packed;
+
+#define MT7615_STA_REC_UPDATE_MAX_SIZE (sizeof(struct sta_rec_basic) + \
+					sizeof(struct sta_rec_ht) + \
+					sizeof(struct sta_rec_vht))
+
+enum {
+	STA_REC_BASIC,
+	STA_REC_RA,
+	STA_REC_RA_CMM_INFO,
+	STA_REC_RA_UPDATE,
+	STA_REC_BF,
+	STA_REC_AMSDU, /* for CR4 */
+	STA_REC_BA,
+	STA_REC_RED, /* not used */
+	STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */
+	STA_REC_HT,
+	STA_REC_VHT,
+	STA_REC_APPS,
+	STA_REC_MAX_NUM
+};
+
+enum {
+	CMD_CBW_20MHZ,
+	CMD_CBW_40MHZ,
+	CMD_CBW_80MHZ,
+	CMD_CBW_160MHZ,
+	CMD_CBW_10MHZ,
+	CMD_CBW_5MHZ,
+	CMD_CBW_8080MHZ
+};
+
+enum {
+	CH_SWITCH_NORMAL = 0,
+	CH_SWITCH_SCAN = 3,
+	CH_SWITCH_MCC = 4,
+	CH_SWITCH_DFS = 5,
+	CH_SWITCH_BACKGROUND_SCAN_START = 6,
+	CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7,
+	CH_SWITCH_BACKGROUND_SCAN_STOP = 8,
+	CH_SWITCH_SCAN_BYPASS_DPD = 9
+};
+
+static inline struct sk_buff *
+mt7615_mcu_msg_alloc(const void *data, int len)
+{
+	return mt76_mcu_msg_alloc(data, sizeof(struct mt7615_mcu_txd),
+				  len, 0);
+}
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
new file mode 100644
index 0000000..7963e30
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_H
+#define __MT7615_H
+
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT7615_MAX_INTERFACES		4
+#define MT7615_MAX_WMM_SETS		4
+#define MT7615_WTBL_SIZE		128
+#define MT7615_WTBL_RESERVED		(MT7615_WTBL_SIZE - 1)
+#define MT7615_WTBL_STA			(MT7615_WTBL_RESERVED - \
+					 MT7615_MAX_INTERFACES)
+
+#define MT7615_WATCHDOG_TIME		(HZ / 10)
+#define MT7615_RATE_RETRY		2
+
+#define MT7615_TX_RING_SIZE		1024
+#define MT7615_TX_MCU_RING_SIZE		128
+#define MT7615_TX_FWDL_RING_SIZE	128
+
+#define MT7615_RX_RING_SIZE		1024
+#define MT7615_RX_MCU_RING_SIZE		512
+
+#define MT7615_FIRMWARE_CR4		"mediatek/mt7615_cr4.bin"
+#define MT7615_FIRMWARE_N9		"mediatek/mt7615_n9.bin"
+#define MT7615_ROM_PATCH		"mediatek/mt7615_rom_patch.bin"
+
+#define MT7615_EEPROM_SIZE		1024
+#define MT7615_TOKEN_SIZE		4096
+
+#define MT_FRAC_SCALE		12
+#define MT_FRAC(val, div)	(((val) << MT_FRAC_SCALE) / (div))
+
+struct mt7615_vif;
+struct mt7615_sta;
+
+enum mt7615_hw_txq_id {
+	MT7615_TXQ_MAIN,
+	MT7615_TXQ_EXT,
+	MT7615_TXQ_MCU,
+	MT7615_TXQ_FWDL,
+};
+
+struct mt7615_rate_set {
+	struct ieee80211_tx_rate probe_rate;
+	struct ieee80211_tx_rate rates[4];
+};
+
+struct mt7615_sta {
+	struct mt76_wcid wcid; /* must be first */
+
+	struct mt7615_vif *vif;
+
+	struct ieee80211_tx_rate rates[4];
+
+	struct mt7615_rate_set rateset[2];
+	u32 rate_set_tsf;
+
+	u8 rate_count;
+	u8 n_rates;
+
+	u8 rate_probe;
+};
+
+struct mt7615_vif {
+	u8 idx;
+	u8 omac_idx;
+	u8 band_idx;
+	u8 wmm_idx;
+
+	struct mt7615_sta sta;
+};
+
+struct mt7615_dev {
+	struct mt76_dev mt76; /* must be first */
+	u32 vif_mask;
+	u32 omac_mask;
+
+	struct {
+		u8 n_pulses;
+		u32 period;
+		u16 width;
+		s16 power;
+	} radar_pattern;
+	u32 hw_pattern;
+	int dfs_state;
+
+	int false_cca_ofdm, false_cca_cck;
+	unsigned long last_cca_adj;
+	u8 mac_work_count;
+	s8 ofdm_sensitivity;
+	s8 cck_sensitivity;
+	bool scs_en;
+
+	spinlock_t token_lock;
+	struct idr token;
+};
+
+enum {
+	HW_BSSID_0 = 0x0,
+	HW_BSSID_1,
+	HW_BSSID_2,
+	HW_BSSID_3,
+	HW_BSSID_MAX,
+	EXT_BSSID_START = 0x10,
+	EXT_BSSID_1,
+	EXT_BSSID_2,
+	EXT_BSSID_3,
+	EXT_BSSID_4,
+	EXT_BSSID_5,
+	EXT_BSSID_6,
+	EXT_BSSID_7,
+	EXT_BSSID_8,
+	EXT_BSSID_9,
+	EXT_BSSID_10,
+	EXT_BSSID_11,
+	EXT_BSSID_12,
+	EXT_BSSID_13,
+	EXT_BSSID_14,
+	EXT_BSSID_15,
+	EXT_BSSID_END
+};
+
+enum {
+	MT_HW_RDD0,
+	MT_HW_RDD1,
+};
+
+enum {
+	MT_RX_SEL0,
+	MT_RX_SEL1,
+};
+
+enum mt7615_rdd_cmd {
+	RDD_STOP,
+	RDD_START,
+	RDD_DET_MODE,
+	RDD_DET_STOP,
+	RDD_CAC_START,
+	RDD_CAC_END,
+	RDD_NORMAL_START,
+	RDD_DISABLE_DFS_CAL,
+	RDD_PULSE_DBG,
+	RDD_READ_PULSE,
+	RDD_RESUME_BF,
+};
+
+extern const struct ieee80211_ops mt7615_ops;
+extern struct pci_driver mt7615_pci_driver;
+
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
+
+int mt7615_register_device(struct mt7615_dev *dev);
+void mt7615_unregister_device(struct mt7615_dev *dev);
+int mt7615_eeprom_init(struct mt7615_dev *dev);
+int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
+				  struct ieee80211_channel *chan,
+				  u8 chain_idx);
+int mt7615_dma_init(struct mt7615_dev *dev);
+void mt7615_dma_cleanup(struct mt7615_dev *dev);
+int mt7615_mcu_init(struct mt7615_dev *dev);
+int mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
+			    struct ieee80211_vif *vif, bool enable);
+int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+			    int en);
+void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
+			  struct ieee80211_tx_rate *probe_rate,
+			  struct ieee80211_tx_rate *rates);
+int mt7615_mcu_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+			bool enable);
+int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta);
+int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_sta *sta);
+int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
+int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
+			       struct ieee80211_vif *vif, bool en);
+int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta, bool en);
+int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+		       int en);
+int mt7615_mcu_set_channel(struct mt7615_dev *dev);
+int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
+		       const struct ieee80211_tx_queue_params *params);
+int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
+			 struct ieee80211_ampdu_params *params,
+			 bool add);
+int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
+			 struct ieee80211_ampdu_params *params,
+			 bool add);
+int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+			  struct ieee80211_sta *sta);
+void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb);
+int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
+		       enum mt7615_rdd_cmd cmd, u8 index,
+		       u8 rx_sel, u8 val);
+int mt7615_dfs_start_radar_detector(struct mt7615_dev *dev);
+int mt7615_dfs_stop_radar_detector(struct mt7615_dev *dev);
+int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev);
+
+static inline bool is_mt7622(struct mt76_dev *dev)
+{
+	return mt76_chip(dev) == 0x7622;
+}
+
+static inline void mt7615_dfs_check_channel(struct mt7615_dev *dev)
+{
+	enum nl80211_chan_width width = dev->mt76.chandef.width;
+	u32 freq = dev->mt76.chandef.chan->center_freq;
+	struct ieee80211_hw *hw = mt76_hw(dev);
+
+	if (hw->conf.chandef.chan->center_freq != freq ||
+	    hw->conf.chandef.width != width)
+		dev->dfs_state = -1;
+}
+
+static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
+{
+	mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+}
+
+static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask)
+{
+	mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+}
+
+void mt7615_update_channel(struct mt76_dev *mdev);
+void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev);
+void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable);
+int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
+			  struct sk_buff *skb, struct mt76_wcid *wcid,
+			  struct ieee80211_sta *sta, int pid,
+			  struct ieee80211_key_conf *key);
+int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb);
+void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data);
+void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb);
+int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+			    struct ieee80211_key_conf *key,
+			    enum set_key_cmd cmd);
+
+int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
+int mt7615_mcu_init_mac(struct mt7615_dev *dev);
+int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val);
+int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter);
+int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
+int mt7615_mcu_set_tx_power(struct mt7615_dev *dev);
+void mt7615_mcu_exit(struct mt7615_dev *dev);
+
+int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
+			  struct ieee80211_sta *sta,
+			  struct mt76_tx_info *tx_info);
+
+void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+			    struct mt76_queue_entry *e);
+
+void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+			 struct sk_buff *skb);
+void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta);
+void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+		      struct ieee80211_sta *sta);
+void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta);
+void mt7615_mac_work(struct work_struct *work);
+void mt7615_txp_skb_unmap(struct mt76_dev *dev,
+			  struct mt76_txwi_cache *txwi);
+int mt76_dfs_start_rdd(struct mt7615_dev *dev, bool force);
+int mt7615_dfs_init_radar_detector(struct mt7615_dev *dev);
+
+int mt7615_init_debugfs(struct mt7615_dev *dev);
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
new file mode 100644
index 0000000..e250607
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ *         Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt7615.h"
+#include "mac.h"
+
+static const struct pci_device_id mt7615_pci_device_table[] = {
+	{ PCI_DEVICE(0x14c3, 0x7615) },
+	{ },
+};
+
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
+{
+	u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
+	u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
+
+	return MT_PCIE_REMAP_BASE_2 + offset;
+}
+
+static void
+mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+	mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
+{
+	struct mt7615_dev *dev = dev_instance;
+	u32 intr;
+
+	intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+	mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+		return IRQ_NONE;
+
+	intr &= dev->mt76.mmio.irqmask;
+
+	if (intr & MT_INT_TX_DONE_ALL) {
+		mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
+		napi_schedule(&dev->mt76.tx_napi);
+	}
+
+	if (intr & MT_INT_RX_DONE(0)) {
+		mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
+		napi_schedule(&dev->mt76.napi[0]);
+	}
+
+	if (intr & MT_INT_RX_DONE(1)) {
+		mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
+		napi_schedule(&dev->mt76.napi[1]);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int mt7615_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *id)
+{
+	static const struct mt76_driver_ops drv_ops = {
+		/* txwi_size = txd size + txp size */
+		.txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp),
+		.txwi_flags = MT_TXWI_NO_FREE,
+		.tx_prepare_skb = mt7615_tx_prepare_skb,
+		.tx_complete_skb = mt7615_tx_complete_skb,
+		.rx_skb = mt7615_queue_rx_skb,
+		.rx_poll_complete = mt7615_rx_poll_complete,
+		.sta_ps = mt7615_sta_ps,
+		.sta_add = mt7615_sta_add,
+		.sta_assoc = mt7615_sta_assoc,
+		.sta_remove = mt7615_sta_remove,
+		.update_survey = mt7615_update_channel,
+	};
+	struct mt7615_dev *dev;
+	struct mt76_dev *mdev;
+	int ret;
+
+	ret = pcim_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+	if (ret)
+		return ret;
+
+	pci_set_master(pdev);
+
+	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7615_ops,
+				 &drv_ops);
+	if (!mdev)
+		return -ENOMEM;
+
+	dev = container_of(mdev, struct mt7615_dev, mt76);
+	mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+
+	mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+		    (mt76_rr(dev, MT_HW_REV) & 0xff);
+	dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+	ret = devm_request_irq(mdev->dev, pdev->irq, mt7615_irq_handler,
+			       IRQF_SHARED, KBUILD_MODNAME, dev);
+	if (ret)
+		goto error;
+
+	ret = mt7615_register_device(dev);
+	if (ret)
+		goto error;
+
+	return 0;
+error:
+	ieee80211_free_hw(mt76_hw(dev));
+	return ret;
+}
+
+static void mt7615_pci_remove(struct pci_dev *pdev)
+{
+	struct mt76_dev *mdev = pci_get_drvdata(pdev);
+	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+	mt7615_unregister_device(dev);
+}
+
+struct pci_driver mt7615_pci_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= mt7615_pci_device_table,
+	.probe		= mt7615_pci_probe,
+	.remove		= mt7615_pci_remove,
+};
+
+module_pci_driver(mt7615_pci_driver);
+
+MODULE_DEVICE_TABLE(pci, mt7615_pci_device_table);
+MODULE_FIRMWARE(MT7615_FIRMWARE_CR4);
+MODULE_FIRMWARE(MT7615_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7615_ROM_PATCH);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
new file mode 100644
index 0000000..b193814
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_REGS_H
+#define __MT7615_REGS_H
+
+#define MT_HW_REV			0x1000
+#define MT_HW_CHIPID			0x1008
+#define MT_TOP_MISC2			0x1134
+#define MT_TOP_MISC2_FW_STATE		GENMASK(2, 0)
+
+#define MT_MCU_BASE			0x2000
+#define MT_MCU(ofs)			(MT_MCU_BASE + (ofs))
+
+#define MT_MCU_PCIE_REMAP_1		MT_MCU(0x500)
+#define MT_MCU_PCIE_REMAP_1_OFFSET	GENMASK(17, 0)
+#define MT_MCU_PCIE_REMAP_1_BASE	GENMASK(31, 18)
+#define MT_PCIE_REMAP_BASE_1		0x40000
+
+#define MT_MCU_PCIE_REMAP_2		MT_MCU(0x504)
+#define MT_MCU_PCIE_REMAP_2_OFFSET	GENMASK(18, 0)
+#define MT_MCU_PCIE_REMAP_2_BASE	GENMASK(31, 19)
+#define MT_PCIE_REMAP_BASE_2		0x80000
+
+#define MT_HIF_BASE			0x4000
+#define MT_HIF(ofs)			(MT_HIF_BASE + (ofs))
+
+#define MT_CFG_LPCR_HOST		MT_HIF(0x1f0)
+#define MT_CFG_LPCR_HOST_FW_OWN		BIT(0)
+#define MT_CFG_LPCR_HOST_DRV_OWN	BIT(1)
+
+#define MT_INT_SOURCE_CSR		MT_HIF(0x200)
+#define MT_INT_MASK_CSR			MT_HIF(0x204)
+#define MT_DELAY_INT_CFG		MT_HIF(0x210)
+
+#define MT_INT_RX_DONE(_n)		BIT(_n)
+#define MT_INT_RX_DONE_ALL		GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL		GENMASK(7, 4)
+#define MT_INT_TX_DONE(_n)		BIT((_n) + 4)
+
+#define MT_WPDMA_GLO_CFG		MT_HIF(0x208)
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN	BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY	BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN	BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY	BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE	GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE	BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN	BIT(7)
+#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0	BIT(9)
+#define MT_WPDMA_GLO_CFG_MULTI_DMA_EN	GENMASK(11, 10)
+#define MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN	BIT(12)
+#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21	GENMASK(23, 22)
+#define MT_WPDMA_GLO_CFG_SW_RESET	BIT(24)
+#define MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY	BIT(26)
+#define MT_WPDMA_GLO_CFG_OMIT_TX_INFO	BIT(28)
+
+#define MT_WPDMA_RST_IDX		MT_HIF(0x20c)
+
+#define MT_TX_RING_BASE			MT_HIF(0x300)
+#define MT_RX_RING_BASE			MT_HIF(0x400)
+
+#define MT_WPDMA_GLO_CFG1		MT_HIF(0x500)
+#define MT_WPDMA_TX_PRE_CFG		MT_HIF(0x510)
+#define MT_WPDMA_RX_PRE_CFG		MT_HIF(0x520)
+#define MT_WPDMA_ABT_CFG		MT_HIF(0x530)
+#define MT_WPDMA_ABT_CFG1		MT_HIF(0x534)
+
+#define MT_WF_PHY_BASE			0x10000
+#define MT_WF_PHY(ofs)			(MT_WF_PHY_BASE + (ofs))
+
+#define MT_WF_PHY_WF2_RFCTRL0		MT_WF_PHY(0x1900)
+#define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN	BIT(9)
+
+#define MT_WF_PHY_R0_B0_PHYMUX_5	MT_WF_PHY(0x0614)
+
+#define MT_WF_PHY_R0_B0_PHYCTRL_STS0	MT_WF_PHY(0x020c)
+#define MT_WF_PHYCTRL_STAT_PD_OFDM	GENMASK(31, 16)
+#define MT_WF_PHYCTRL_STAT_PD_CCK	GENMASK(15, 0)
+
+#define MT_WF_PHY_R0_B0_PHYCTRL_STS5	MT_WF_PHY(0x0220)
+#define MT_WF_PHYCTRL_STAT_MDRDY_OFDM	GENMASK(31, 16)
+#define MT_WF_PHYCTRL_STAT_MDRDY_CCK	GENMASK(15, 0)
+
+#define MT_WF_PHY_B0_MIN_PRI_PWR	MT_WF_PHY(0x229c)
+#define MT_WF_PHY_B0_PD_OFDM_MASK	GENMASK(28, 20)
+#define MT_WF_PHY_B0_PD_OFDM(v)		((v) << 20)
+#define MT_WF_PHY_B0_PD_BLK		BIT(19)
+
+#define MT_WF_PHY_B1_MIN_PRI_PWR	MT_WF_PHY(0x084)
+#define MT_WF_PHY_B1_PD_OFDM_MASK	GENMASK(24, 16)
+#define MT_WF_PHY_B1_PD_OFDM(v)		((v) << 16)
+#define MT_WF_PHY_B1_PD_BLK		BIT(25)
+
+#define MT_WF_PHY_B0_RXTD_CCK_PD	MT_WF_PHY(0x2310)
+#define MT_WF_PHY_B0_PD_CCK_MASK	GENMASK(8, 1)
+#define MT_WF_PHY_B0_PD_CCK(v)		((v) << 1)
+
+#define MT_WF_PHY_B1_RXTD_CCK_PD	MT_WF_PHY(0x2314)
+#define MT_WF_PHY_B1_PD_CCK_MASK	GENMASK(31, 24)
+#define MT_WF_PHY_B1_PD_CCK(v)		((v) << 24)
+
+#define MT_WF_CFG_BASE			0x20200
+#define MT_WF_CFG(ofs)			(MT_WF_CFG_BASE + (ofs))
+
+#define MT_CFG_CCR			MT_WF_CFG(0x000)
+#define MT_CFG_CCR_MAC_D1_1X_GC_EN	BIT(24)
+#define MT_CFG_CCR_MAC_D0_1X_GC_EN	BIT(25)
+#define MT_CFG_CCR_MAC_D1_2X_GC_EN	BIT(30)
+#define MT_CFG_CCR_MAC_D0_2X_GC_EN	BIT(31)
+
+#define MT_WF_AGG_BASE			0x20a00
+#define MT_WF_AGG(ofs)			(MT_WF_AGG_BASE + (ofs))
+
+#define MT_AGG_ARCR			MT_WF_AGG(0x010)
+#define MT_AGG_ARCR_INIT_RATE1		BIT(0)
+#define MT_AGG_ARCR_RTS_RATE_THR	GENMASK(12, 8)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO	GENMASK(17, 16)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN	BIT(19)
+#define MT_AGG_ARCR_RATE_UP_EXTRA_TH	GENMASK(22, 20)
+
+#define MT_AGG_ARUCR			MT_WF_AGG(0x018)
+#define MT_AGG_ARDCR			MT_WF_AGG(0x01c)
+#define MT_AGG_ARxCR_LIMIT_SHIFT(_n)	(4 * (_n))
+#define MT_AGG_ARxCR_LIMIT(_n)		GENMASK(2 + \
+					MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
+					MT_AGG_ARxCR_LIMIT_SHIFT(_n))
+
+#define MT_AGG_ACR0			MT_WF_AGG(0x070)
+#define MT_AGG_ACR1			MT_WF_AGG(0x170)
+#define MT_AGG_ACR_NO_BA_RULE		BIT(0)
+#define MT_AGG_ACR_NO_BA_AR_RULE	BIT(1)
+#define MT_AGG_ACR_PKT_TIME_EN		BIT(2)
+#define MT_AGG_ACR_CFEND_RATE		GENMASK(15, 4)
+#define MT_AGG_ACR_BAR_RATE		GENMASK(31, 20)
+
+#define MT_AGG_SCR			MT_WF_AGG(0x0fc)
+#define MT_AGG_SCR_NLNAV_MID_PTEC_DIS	BIT(3)
+
+#define MT_WF_TMAC_BASE			0x21000
+#define MT_WF_TMAC(ofs)			(MT_WF_TMAC_BASE + (ofs))
+
+#define MT_TMAC_TRCR0			MT_WF_TMAC(0x09c)
+#define MT_TMAC_TRCR1			MT_WF_TMAC(0x070)
+#define MT_TMAC_TRCR_CCA_SEL		GENMASK(31, 30)
+#define MT_TMAC_TRCR_SEC_CCA_SEL	GENMASK(29, 28)
+
+#define MT_TMAC_CTCR0			MT_WF_TMAC(0x0f4)
+#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME	GENMASK(5, 0)
+#define MT_TMAC_CTCR0_INS_DDLMT_DENSITY	GENMASK(15, 12)
+#define MT_TMAC_CTCR0_INS_DDLMT_EN	BIT(17)
+#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN	BIT(18)
+
+#define MT_WF_RMAC_BASE			0x21200
+#define MT_WF_RMAC(ofs)			(MT_WF_RMAC_BASE + (ofs))
+
+#define MT_WF_RFCR			MT_WF_RMAC(0x000)
+#define MT_WF_RFCR_DROP_STBC_MULTI	BIT(0)
+#define MT_WF_RFCR_DROP_FCSFAIL		BIT(1)
+#define MT_WF_RFCR_DROP_VERSION		BIT(3)
+#define MT_WF_RFCR_DROP_PROBEREQ	BIT(4)
+#define MT_WF_RFCR_DROP_MCAST		BIT(5)
+#define MT_WF_RFCR_DROP_BCAST		BIT(6)
+#define MT_WF_RFCR_DROP_MCAST_FILTERED	BIT(7)
+#define MT_WF_RFCR_DROP_A3_MAC		BIT(8)
+#define MT_WF_RFCR_DROP_A3_BSSID	BIT(9)
+#define MT_WF_RFCR_DROP_A2_BSSID	BIT(10)
+#define MT_WF_RFCR_DROP_OTHER_BEACON	BIT(11)
+#define MT_WF_RFCR_DROP_FRAME_REPORT	BIT(12)
+#define MT_WF_RFCR_DROP_CTL_RSV		BIT(13)
+#define MT_WF_RFCR_DROP_CTS		BIT(14)
+#define MT_WF_RFCR_DROP_RTS		BIT(15)
+#define MT_WF_RFCR_DROP_DUPLICATE	BIT(16)
+#define MT_WF_RFCR_DROP_OTHER_BSS	BIT(17)
+#define MT_WF_RFCR_DROP_OTHER_UC	BIT(18)
+#define MT_WF_RFCR_DROP_OTHER_TIM	BIT(19)
+#define MT_WF_RFCR_DROP_NDPA		BIT(20)
+#define MT_WF_RFCR_DROP_UNWANTED_CTL	BIT(21)
+
+#define MT_WF_DMA_BASE			0x21800
+#define MT_WF_DMA(ofs)			(MT_WF_DMA_BASE + (ofs))
+
+#define MT_DMA_DCR0			MT_WF_DMA(0x000)
+#define MT_DMA_DCR0_MAX_RX_LEN		GENMASK(15, 2)
+#define MT_DMA_DCR0_RX_VEC_DROP		BIT(17)
+
+#define MT_WTBL_BASE			0x30000
+#define MT_WTBL_ENTRY_SIZE		256
+
+#define MT_WTBL_OFF_BASE		0x23400
+#define MT_WTBL_OFF(n)			(MT_WTBL_OFF_BASE + (n))
+
+#define MT_WTBL_W0_KEY_IDX		GENMASK(24, 23)
+#define MT_WTBL_W0_RX_KEY_VALID		BIT(26)
+#define MT_WTBL_W0_RX_IK_VALID		BIT(27)
+
+#define MT_WTBL_W2_KEY_TYPE		GENMASK(7, 4)
+
+#define MT_WTBL_UPDATE			MT_WTBL_OFF(0x030)
+#define MT_WTBL_UPDATE_WLAN_IDX		GENMASK(7, 0)
+#define MT_WTBL_UPDATE_RXINFO_UPDATE	BIT(11)
+#define MT_WTBL_UPDATE_RATE_UPDATE	BIT(13)
+#define MT_WTBL_UPDATE_TX_COUNT_CLEAR	BIT(14)
+#define MT_WTBL_UPDATE_BUSY		BIT(31)
+
+#define MT_WTBL_ON_BASE			0x23000
+#define MT_WTBL_ON(_n)			(MT_WTBL_ON_BASE + (_n))
+
+#define MT_WTBL_RICR0			MT_WTBL_ON(0x010)
+#define MT_WTBL_RICR1			MT_WTBL_ON(0x014)
+
+#define MT_WTBL_RIUCR0			MT_WTBL_ON(0x020)
+
+#define MT_WTBL_RIUCR1			MT_WTBL_ON(0x024)
+#define MT_WTBL_RIUCR1_RATE0		GENMASK(11, 0)
+#define MT_WTBL_RIUCR1_RATE1		GENMASK(23, 12)
+#define MT_WTBL_RIUCR1_RATE2_LO		GENMASK(31, 24)
+
+#define MT_WTBL_RIUCR2			MT_WTBL_ON(0x028)
+#define MT_WTBL_RIUCR2_RATE2_HI		GENMASK(3, 0)
+#define MT_WTBL_RIUCR2_RATE3		GENMASK(15, 4)
+#define MT_WTBL_RIUCR2_RATE4		GENMASK(27, 16)
+#define MT_WTBL_RIUCR2_RATE5_LO		GENMASK(31, 28)
+
+#define MT_WTBL_RIUCR3			MT_WTBL_ON(0x02c)
+#define MT_WTBL_RIUCR3_RATE5_HI		GENMASK(7, 0)
+#define MT_WTBL_RIUCR3_RATE6		GENMASK(19, 8)
+#define MT_WTBL_RIUCR3_RATE7		GENMASK(31, 20)
+
+#define MT_WTBL_W5_CHANGE_BW_RATE	GENMASK(7, 5)
+#define MT_WTBL_W5_SHORT_GI_20		BIT(8)
+#define MT_WTBL_W5_SHORT_GI_40		BIT(9)
+#define MT_WTBL_W5_SHORT_GI_80		BIT(10)
+#define MT_WTBL_W5_SHORT_GI_160		BIT(11)
+#define MT_WTBL_W5_BW_CAP		GENMASK(13, 12)
+#define MT_WTBL_W5_MPDU_FAIL_COUNT	GENMASK(25, 23)
+#define MT_WTBL_W5_MPDU_OK_COUNT	GENMASK(28, 26)
+#define MT_WTBL_W5_RATE_IDX		GENMASK(31, 29)
+
+#define MT_WTBL_W27_CC_BW_SEL		GENMASK(6, 5)
+
+#define MT_LPON_BASE			0x24200
+#define MT_LPON(_n)			(MT_LPON_BASE + (_n))
+
+#define MT_LPON_T0CR			MT_LPON(0x010)
+#define MT_LPON_T0CR_MODE		GENMASK(1, 0)
+
+#define MT_LPON_UTTR0			MT_LPON(0x018)
+#define MT_LPON_UTTR1			MT_LPON(0x01c)
+
+#define MT_WF_MIB_BASE			0x24800
+#define MT_WF_MIB(ofs)			(MT_WF_MIB_BASE + (ofs))
+
+#define MT_MIB_M0_MISC_CR		MT_WF_MIB(0x00c)
+#define MT_MIB_MB_SDR0(n)		MT_WF_MIB(0x100 + ((n) << 4))
+#define MT_MIB_RTS_RETRIES_COUNT_MASK	GENMASK(31, 16)
+#define MT_MIB_RTS_COUNT_MASK		GENMASK(15, 0)
+
+#define MT_MIB_SDR16(n)			MT_WF_MIB(0x48 + ((n) << 9))
+#define MT_MIB_BUSY_MASK		GENMASK(23, 0)
+
+#define MT_EFUSE_BASE			0x81070000
+#define MT_EFUSE_BASE_CTRL		0x000
+#define MT_EFUSE_BASE_CTRL_EMPTY	BIT(30)
+
+#define MT_EFUSE_CTRL			0x008
+#define MT_EFUSE_CTRL_AOUT		GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE		GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME	GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME	GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN		GENMASK(25, 16)
+#define MT_EFUSE_CTRL_VALID		BIT(29)
+#define MT_EFUSE_CTRL_KICK		BIT(30)
+#define MT_EFUSE_CTRL_SEL		BIT(31)
+
+#define MT_EFUSE_WDATA(_i)		(0x010 + ((_i) * 4))
+#define MT_EFUSE_RDATA(_i)		(0x030 + ((_i) * 4))
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig
new file mode 100644
index 0000000..7c88ed8
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MT76x0_COMMON
+	tristate
+	select MT76x02_LIB
+
+config MT76x0U
+	tristate "MediaTek MT76x0U (USB) support"
+	select MT76x0_COMMON
+	select MT76x02_USB
+	depends on MAC80211
+	depends on USB
+	help
+	  This adds support for MT7610U-based wireless USB 2.0 dongles,
+	  which comply with IEEE 802.11ac standards and support 1x1
+	  433Mbps PHY rate.
+
+	  To compile this driver as a module, choose M here.
+
+config MT76x0E
+	tristate "MediaTek MT76x0E (PCIe) support"
+	select MT76x0_COMMON
+	depends on MAC80211
+	depends on PCI
+	help
+	  This adds support for MT7610/MT7630-based wireless PCIe devices,
+	  which comply with IEEE 802.11ac standards and support 1x1
+	  433Mbps PHY rate.
+
+	  To compile this driver as a module, choose M here.
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
new file mode 100644
index 0000000..8dcfb4c
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MT76x0U) += mt76x0u.o
+obj-$(CONFIG_MT76x0E) += mt76x0e.o
+obj-$(CONFIG_MT76x0_COMMON) += mt76x0-common.o
+
+mt76x0-common-y := init.o main.o eeprom.o phy.o
+
+mt76x0u-y := usb.o usb_mcu.o
+mt76x0e-y := pci.o pci_mcu.o
+
+# ccflags-y := -DDEBUG
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
new file mode 100644
index 0000000..9d4426f
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "mt76x0.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+
+#define MT_MAP_READS	DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16)
+static int
+mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev)
+{
+	u8 data[MT_MAP_READS * 16];
+	int ret, i;
+	u32 start = 0, end = 0, cnt_free;
+
+	ret = mt76x02_get_efuse_data(dev, MT_EE_USAGE_MAP_START, data,
+				     sizeof(data), MT_EE_PHYSICAL_READ);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
+		if (!data[i]) {
+			if (!start)
+				start = MT_EE_USAGE_MAP_START + i;
+			end = MT_EE_USAGE_MAP_START + i;
+		}
+	cnt_free = end - start + 1;
+
+	if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
+		dev_err(dev->mt76.dev,
+			"driver does not support default EEPROM\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
+{
+	u16 nic_conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+	u16 nic_conf1 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+
+	mt76x02_eeprom_parse_hw_cap(dev);
+	dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n",
+		dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz);
+
+	if (dev->no_2ghz) {
+		dev->mt76.cap.has_2ghz = false;
+		dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
+	}
+
+	if (is_mt7630(dev)) {
+		dev->mt76.cap.has_5ghz = false;
+		dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
+	}
+
+	if (!mt76x02_field_valid(nic_conf1 & 0xff))
+		nic_conf1 &= 0xff00;
+
+	if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
+		dev_err(dev->mt76.dev,
+			"driver does not support HW RF ctrl\n");
+
+	if (!mt76x02_field_valid(nic_conf0 >> 8))
+		return;
+
+	if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+	    FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+		dev_err(dev->mt76.dev, "invalid tx-rx stream\n");
+}
+
+static void mt76x0_set_temp_offset(struct mt76x02_dev *dev)
+{
+	u8 val;
+
+	val = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER) >> 8;
+	if (mt76x02_field_valid(val))
+		dev->cal.rx.temp_offset = mt76x02_sign_extend(val, 8);
+	else
+		dev->cal.rx.temp_offset = -10;
+}
+
+static void mt76x0_set_freq_offset(struct mt76x02_dev *dev)
+{
+	struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
+	u8 val;
+
+	val = mt76x02_eeprom_get(dev, MT_EE_FREQ_OFFSET);
+	if (!mt76x02_field_valid(val))
+		val = 0;
+	caldata->freq_offset = val;
+
+	val = mt76x02_eeprom_get(dev, MT_EE_TSSI_BOUND4) >> 8;
+	if (!mt76x02_field_valid(val))
+		val = 0;
+
+	caldata->freq_offset -= mt76x02_sign_extend(val, 8);
+}
+
+void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
+	s8 val, lna_5g[3], lna_2g;
+	u16 rssi_offset;
+	int i;
+
+	mt76x02_get_rx_gain(dev, chan->band, &rssi_offset, &lna_2g, lna_5g);
+	caldata->lna_gain = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+
+	for (i = 0; i < ARRAY_SIZE(caldata->rssi_offset); i++) {
+		val = rssi_offset >> (8 * i);
+		if (val < -10 || val > 10)
+			val = 0;
+
+		caldata->rssi_offset[i] = val;
+	}
+}
+
+static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
+{
+	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+	u8 val;
+
+	if (chandef->width == NL80211_CHAN_WIDTH_80) {
+		val = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER) >> 8;
+	} else if (chandef->width == NL80211_CHAN_WIDTH_40) {
+		u16 data;
+
+		data = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+		if (chandef->chan->band == NL80211_BAND_5GHZ)
+			val = data >> 8;
+		else
+			val = data;
+	} else {
+		return 0;
+	}
+
+	return mt76x02_rate_power_val(val);
+}
+
+void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
+				  struct ieee80211_channel *chan,
+				  struct mt76_rate_power *t)
+{
+	bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
+	u16 val, addr;
+	s8 delta;
+
+	memset(t, 0, sizeof(*t));
+
+	/* cck 1M, 2M, 5.5M, 11M */
+	val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_BYRATE_BASE);
+	t->cck[0] = t->cck[1] = s6_to_s8(val);
+	t->cck[2] = t->cck[3] = s6_to_s8(val >> 8);
+
+	/* ofdm 6M, 9M, 12M, 18M */
+	addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 2 : 0x120;
+	val = mt76x02_eeprom_get(dev, addr);
+	t->ofdm[0] = t->ofdm[1] = s6_to_s8(val);
+	t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8);
+
+	/* ofdm 24M, 36M, 48M, 54M */
+	addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 4 : 0x122;
+	val = mt76x02_eeprom_get(dev, addr);
+	t->ofdm[4] = t->ofdm[5] = s6_to_s8(val);
+	t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8);
+
+	/* ht-vht mcs 1ss 0, 1, 2, 3 */
+	addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124;
+	val = mt76x02_eeprom_get(dev, addr);
+	t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val);
+	t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8);
+
+	/* ht-vht mcs 1ss 4, 5, 6 */
+	addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126;
+	val = mt76x02_eeprom_get(dev, addr);
+	t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
+	t->ht[6] = t->ht[7] = t->vht[6] = t->vht[7] = s6_to_s8(val >> 8);
+
+	/* ht-vht mcs 1ss 0, 1, 2, 3 stbc */
+	addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec;
+	val = mt76x02_eeprom_get(dev, addr);
+	t->stbc[0] = t->stbc[1] = s6_to_s8(val);
+	t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8);
+
+	/* ht-vht mcs 1ss 4, 5, 6 stbc */
+	addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee;
+	val = mt76x02_eeprom_get(dev, addr);
+	t->stbc[4] = t->stbc[5] = s6_to_s8(val);
+	t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8);
+
+	/* vht mcs 8, 9 5GHz */
+	val = mt76x02_eeprom_get(dev, 0x132);
+	t->vht[8] = s6_to_s8(val);
+	t->vht[9] = s6_to_s8(val >> 8);
+
+	delta = mt76x0_tssi_enabled(dev) ? 0 : mt76x0_get_delta(dev);
+	mt76x02_add_rate_power_offset(t, delta);
+}
+
+void mt76x0_get_power_info(struct mt76x02_dev *dev,
+			   struct ieee80211_channel *chan, s8 *tp)
+{
+	struct mt76x0_chan_map {
+		u8 chan;
+		u8 offset;
+	} chan_map[] = {
+		{   2,  0 }, {   4,  2 }, {   6,  4 }, {   8,  6 },
+		{  10,  8 }, {  12, 10 }, {  14, 12 }, {  38,  0 },
+		{  44,  2 }, {  48,  4 }, {  54,  6 }, {  60,  8 },
+		{  64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 },
+		{ 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 },
+		{ 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 },
+		{ 167, 34 }, { 171, 36 }, { 175, 38 },
+	};
+	u8 offset, addr;
+	int i, idx = 0;
+	u16 data;
+
+	if (mt76x0_tssi_enabled(dev)) {
+		s8 target_power;
+
+		if (chan->band == NL80211_BAND_5GHZ)
+			data = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER);
+		else
+			data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER);
+		target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7];
+		*tp = target_power + mt76x0_get_delta(dev);
+
+		return;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(chan_map); i++) {
+		if (chan->hw_value <= chan_map[i].chan) {
+			idx = (chan->hw_value == chan_map[i].chan);
+			offset = chan_map[i].offset;
+			break;
+		}
+	}
+	if (i == ARRAY_SIZE(chan_map))
+		offset = chan_map[0].offset;
+
+	if (chan->band == NL80211_BAND_2GHZ) {
+		addr = MT_EE_TX_POWER_DELTA_BW80 + offset;
+	} else {
+		switch (chan->hw_value) {
+		case 42:
+			offset = 2;
+			break;
+		case 58:
+			offset = 8;
+			break;
+		case 106:
+			offset = 14;
+			break;
+		case 122:
+			offset = 20;
+			break;
+		case 155:
+			offset = 30;
+			break;
+		default:
+			break;
+		}
+		addr = MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE + 2 + offset;
+	}
+
+	data = mt76x02_eeprom_get(dev, addr);
+	*tp = data >> (8 * idx);
+	if (*tp < 0 || *tp > 0x3f)
+		*tp = 5;
+}
+
+static int mt76x0_check_eeprom(struct mt76x02_dev *dev)
+{
+	u16 val;
+
+	val = get_unaligned_le16(dev->mt76.eeprom.data);
+	if (!val)
+		val = get_unaligned_le16(dev->mt76.eeprom.data +
+					 MT_EE_PCI_ID);
+
+	switch (val) {
+	case 0x7650:
+	case 0x7610:
+		return 0;
+	default:
+		dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n",
+			val);
+		return -EINVAL;
+	}
+}
+
+static int mt76x0_load_eeprom(struct mt76x02_dev *dev)
+{
+	int found;
+
+	found = mt76_eeprom_init(&dev->mt76, MT76X0_EEPROM_SIZE);
+	if (found < 0)
+		return found;
+
+	if (found && !mt76x0_check_eeprom(dev))
+		return 0;
+
+	found = mt76x0_efuse_physical_size_check(dev);
+	if (found < 0)
+		return found;
+
+	return mt76x02_get_efuse_data(dev, 0, dev->mt76.eeprom.data,
+				      MT76X0_EEPROM_SIZE, MT_EE_READ);
+}
+
+int mt76x0_eeprom_init(struct mt76x02_dev *dev)
+{
+	u8 version, fae;
+	u16 data;
+	int err;
+
+	err = mt76x0_load_eeprom(dev);
+	if (err < 0)
+		return err;
+
+	data = mt76x02_eeprom_get(dev, MT_EE_VERSION);
+	version = data >> 8;
+	fae = data;
+
+	if (version > MT76X0U_EE_MAX_VER)
+		dev_warn(dev->mt76.dev,
+			 "Warning: unsupported EEPROM version %02hhx\n",
+			 version);
+	dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
+		 version, fae);
+
+	mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+	mt76x0_set_chip_cap(dev);
+	mt76x0_set_freq_offset(dev);
+	mt76x0_set_temp_offset(dev);
+
+	return 0;
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
new file mode 100644
index 0000000..15540ce
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#ifndef __MT76X0U_EEPROM_H
+#define __MT76X0U_EEPROM_H
+
+#include "../mt76x02_eeprom.h"
+
+struct mt76x02_dev;
+
+#define MT76X0U_EE_MAX_VER		0x0c
+#define MT76X0_EEPROM_SIZE		512
+
+int mt76x0_eeprom_init(struct mt76x02_dev *dev);
+void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
+void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
+				  struct ieee80211_channel *chan,
+				  struct mt76_rate_power *t);
+void mt76x0_get_power_info(struct mt76x02_dev *dev,
+			   struct ieee80211_channel *chan, s8 *tp);
+
+static inline s8 s6_to_s8(u32 val)
+{
+	s8 ret = val & GENMASK(5, 0);
+
+	if (ret & BIT(5))
+		ret -= BIT(6);
+	return ret;
+}
+
+static inline bool mt76x0_tssi_enabled(struct mt76x02_dev *dev)
+{
+	return (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+		MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
new file mode 100644
index 0000000..cf7fc30
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#include "mt76x0.h"
+#include "eeprom.h"
+#include "mcu.h"
+#include "initvals.h"
+#include "../mt76x02_phy.h"
+
+static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband)
+{
+	struct ieee80211_sta_vht_cap *vht_cap = &sband->vht_cap;
+	u16 mcs_map = 0;
+	int i;
+
+	vht_cap->cap &= ~IEEE80211_VHT_CAP_RXLDPC;
+	for (i = 0; i < 8; i++) {
+		if (!i)
+			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_7 << (i * 2));
+		else
+			mcs_map |=
+				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
+	}
+	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+}
+
+static void
+mt76x0_set_wlan_state(struct mt76x02_dev *dev, u32 val, bool enable)
+{
+	u32 mask = MT_CMB_CTRL_XTAL_RDY | MT_CMB_CTRL_PLL_LD;
+
+	/* Note: we don't turn off WLAN_CLK because that makes the device
+	 *	 not respond properly on the probe path.
+	 *	 In case anyone (PSM?) wants to use this function we can
+	 *	 bring the clock stuff back and fixup the probe path.
+	 */
+
+	if (enable)
+		val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+			MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+	else
+		val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
+
+	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+
+	/* Note: vendor driver tries to disable/enable wlan here and retry
+	 *       but the code which does it is so buggy it must have never
+	 *       triggered, so don't bother.
+	 */
+	if (enable && !mt76_poll(dev, MT_CMB_CTRL, mask, mask, 2000))
+		dev_err(dev->mt76.dev, "PLL and XTAL check failed\n");
+}
+
+void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset)
+{
+	u32 val;
+
+	val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+	if (reset) {
+		val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
+		val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+		if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+			val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
+				MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+			mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+			udelay(20);
+
+			val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
+				 MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+		}
+	}
+
+	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+
+	mt76x0_set_wlan_state(dev, val, enable);
+}
+EXPORT_SYMBOL_GPL(mt76x0_chip_onoff);
+
+static void mt76x0_reset_csr_bbp(struct mt76x02_dev *dev)
+{
+	mt76_wr(dev, MT_MAC_SYS_CTRL,
+		MT_MAC_SYS_CTRL_RESET_CSR |
+		MT_MAC_SYS_CTRL_RESET_BBP);
+	msleep(200);
+	mt76_clear(dev, MT_MAC_SYS_CTRL,
+		   MT_MAC_SYS_CTRL_RESET_CSR |
+		   MT_MAC_SYS_CTRL_RESET_BBP);
+}
+
+#define RANDOM_WRITE(dev, tab)			\
+	mt76_wr_rp(dev, MT_MCU_MEMMAP_WLAN,	\
+		   tab, ARRAY_SIZE(tab))
+
+static int mt76x0_init_bbp(struct mt76x02_dev *dev)
+{
+	int ret, i;
+
+	ret = mt76x0_phy_wait_bbp_ready(dev);
+	if (ret)
+		return ret;
+
+	RANDOM_WRITE(dev, mt76x0_bbp_init_tab);
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+		const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+		const struct mt76_reg_pair *pair = &item->reg_pair;
+
+		if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
+			mt76_wr(dev, pair->reg, pair->value);
+	}
+
+	RANDOM_WRITE(dev, mt76x0_dcoc_tab);
+
+	return 0;
+}
+
+static void mt76x0_init_mac_registers(struct mt76x02_dev *dev)
+{
+	RANDOM_WRITE(dev, common_mac_reg_table);
+
+	/* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
+	RANDOM_WRITE(dev, mt76x0_mac_reg_table);
+
+	/* Release BBP and MAC reset MAC_SYS_CTRL[1:0] = 0x0 */
+	mt76_clear(dev, MT_MAC_SYS_CTRL, 0x3);
+
+	/* Set 0x141C[15:12]=0xF */
+	mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
+
+	mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+	/*
+	 * tx_ring 9 is for mgmt frame
+	 * tx_ring 8 is for in-band command frame.
+	 * WMM_RG0_TXQMA: this register setting is for FCE to
+	 *		  define the rule of tx_ring 9
+	 * WMM_RG1_TXQMA: this register setting is for FCE to
+	 *		  define the rule of tx_ring 8
+	 */
+	mt76_rmw(dev, MT_WMM_CTRL, 0x3ff, 0x201);
+}
+
+static void mt76x0_reset_counters(struct mt76x02_dev *dev)
+{
+	mt76_rr(dev, MT_RX_STAT_0);
+	mt76_rr(dev, MT_RX_STAT_1);
+	mt76_rr(dev, MT_RX_STAT_2);
+	mt76_rr(dev, MT_TX_STA_0);
+	mt76_rr(dev, MT_TX_STA_1);
+	mt76_rr(dev, MT_TX_STA_2);
+}
+
+int mt76x0_mac_start(struct mt76x02_dev *dev)
+{
+	mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+
+	if (!mt76x02_wait_for_wpdma(&dev->mt76, 200000))
+		return -ETIMEDOUT;
+
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+	mt76_wr(dev, MT_MAC_SYS_CTRL,
+		MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+
+	return !mt76x02_wait_for_wpdma(&dev->mt76, 50) ? -ETIMEDOUT : 0;
+}
+EXPORT_SYMBOL_GPL(mt76x0_mac_start);
+
+void mt76x0_mac_stop(struct mt76x02_dev *dev)
+{
+	int i = 200, ok = 0;
+
+	mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+
+	/* Page count on TxQ */
+	while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
+		       (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+		       (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
+		msleep(10);
+
+	if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
+		dev_warn(dev->mt76.dev, "Warning: MAC TX did not stop!\n");
+
+	mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
+					 MT_MAC_SYS_CTRL_ENABLE_TX);
+
+	/* Page count on RxQ */
+	for (i = 0; i < 200; i++) {
+		if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
+		    !mt76_rr(dev, 0x0a30) &&
+		    !mt76_rr(dev, 0x0a34)) {
+			if (ok++ > 5)
+				break;
+			continue;
+		}
+		msleep(1);
+	}
+
+	if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
+		dev_warn(dev->mt76.dev, "Warning: MAC RX did not stop!\n");
+}
+EXPORT_SYMBOL_GPL(mt76x0_mac_stop);
+
+int mt76x0_init_hardware(struct mt76x02_dev *dev)
+{
+	int ret, i, k;
+
+	if (!mt76x02_wait_for_wpdma(&dev->mt76, 1000))
+		return -EIO;
+
+	/* Wait for ASIC ready after FW load. */
+	if (!mt76x02_wait_for_mac(&dev->mt76))
+		return -ETIMEDOUT;
+
+	mt76x0_reset_csr_bbp(dev);
+	ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1);
+	if (ret)
+		return ret;
+
+	mt76x0_init_mac_registers(dev);
+
+	if (!mt76x02_wait_for_txrx_idle(&dev->mt76))
+		return -EIO;
+
+	ret = mt76x0_init_bbp(dev);
+	if (ret)
+		return ret;
+
+	dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+	for (i = 0; i < 16; i++)
+		for (k = 0; k < 4; k++)
+			mt76x02_mac_shared_key_setup(dev, i, k, NULL);
+
+	for (i = 0; i < 256; i++)
+		mt76x02_mac_wcid_setup(dev, i, 0, NULL);
+
+	mt76x0_reset_counters(dev);
+
+	ret = mt76x0_eeprom_init(dev);
+	if (ret)
+		return ret;
+
+	mt76x0_phy_init(dev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x0_init_hardware);
+
+static void
+mt76x0_init_txpower(struct mt76x02_dev *dev,
+		    struct ieee80211_supported_band *sband)
+{
+	struct ieee80211_channel *chan;
+	struct mt76_rate_power t;
+	s8 tp;
+	int i;
+
+	for (i = 0; i < sband->n_channels; i++) {
+		chan = &sband->channels[i];
+
+		mt76x0_get_tx_power_per_rate(dev, chan, &t);
+		mt76x0_get_power_info(dev, chan, &tp);
+
+		chan->orig_mpwr = (mt76x02_get_max_rate_power(&t) + tp) / 2;
+		chan->max_power = min_t(int, chan->max_reg_power,
+					chan->orig_mpwr);
+	}
+}
+
+int mt76x0_register_device(struct mt76x02_dev *dev)
+{
+	int ret;
+
+	mt76x02_init_device(dev);
+	mt76x02_config_mac_addr_list(dev);
+
+	ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
+				   ARRAY_SIZE(mt76x02_rates));
+	if (ret)
+		return ret;
+
+	if (dev->mt76.cap.has_5ghz) {
+		/* overwrite unsupported features */
+		mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband);
+		mt76x0_init_txpower(dev, &dev->mt76.sband_5g.sband);
+	}
+
+	if (dev->mt76.cap.has_2ghz)
+		mt76x0_init_txpower(dev, &dev->mt76.sband_2g.sband);
+
+	mt76x02_init_debugfs(dev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x0_register_device);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
new file mode 100644
index 0000000..3dcd962
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76X0U_INITVALS_H
+#define __MT76X0U_INITVALS_H
+
+#include "phy.h"
+
+static const struct mt76_reg_pair common_mac_reg_table[] = {
+	{ MT_BCN_OFFSET(0),		0xf8f0e8e0 },
+	{ MT_BCN_OFFSET(1),		0x6f77d0c8 },
+	{ MT_LEGACY_BASIC_RATE,		0x0000013f },
+	{ MT_HT_BASIC_RATE,		0x00008003 },
+	{ MT_MAC_SYS_CTRL,		0x00000000 },
+	{ MT_RX_FILTR_CFG,		0x00017f97 },
+	{ MT_BKOFF_SLOT_CFG,		0x00000209 },
+	{ MT_TX_SW_CFG0,		0x00000000 },
+	{ MT_TX_SW_CFG1,		0x00080606 },
+	{ MT_TX_LINK_CFG,		0x00001020 },
+	{ MT_TX_TIMEOUT_CFG,		0x000a2090 },
+	{ MT_MAX_LEN_CFG,		0xa0fff | 0x00001000 },
+	{ MT_LED_CFG,			0x7f031e46 },
+	{ MT_PBF_TX_MAX_PCNT,		0x1fbf1f1f },
+	{ MT_PBF_RX_MAX_PCNT,		0x0000fe9f },
+	{ MT_TX_RETRY_CFG,		0x47d01f0f },
+	{ MT_AUTO_RSP_CFG,		0x00000013 },
+	{ MT_CCK_PROT_CFG,		0x07f40003 },
+	{ MT_OFDM_PROT_CFG,		0x07f42004 },
+	{ MT_PBF_CFG,			0x00f40006 },
+	{ MT_WPDMA_GLO_CFG,		0x00000030 },
+	{ MT_GF20_PROT_CFG,		0x01742004 },
+	{ MT_GF40_PROT_CFG,		0x03f42084 },
+	{ MT_MM20_PROT_CFG,		0x01742004 },
+	{ MT_MM40_PROT_CFG,		0x03f42084 },
+	{ MT_TXOP_CTRL_CFG,		0x0000583f },
+	{ MT_TX_RTS_CFG,		0x00ffff20 },
+	{ MT_EXP_ACK_TIME,		0x002400ca },
+	{ MT_TXOP_HLDR_ET,		0x00000002 },
+	{ MT_XIFS_TIME_CFG,		0x33a41010 },
+	{ MT_PWR_PIN_CFG,		0x00000000 },
+};
+
+static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
+	{ MT_IOCFG_6,			0xa0040080 },
+	{ MT_PBF_SYS_CTRL,		0x00080c00 },
+	{ MT_PBF_CFG,			0x77723c1f },
+	{ MT_FCE_PSE_CTRL,		0x00000001 },
+	{ MT_AMPDU_MAX_LEN_20M1S,	0xAAA99887 },
+	{ MT_TX_SW_CFG0,		0x00000601 },
+	{ MT_TX_SW_CFG1,		0x00040000 },
+	{ MT_TX_SW_CFG2,		0x00000000 },
+	{ 0xa44,			0x00000000 },
+	{ MT_HEADER_TRANS_CTRL_REG,	0x00000000 },
+	{ MT_TSO_CTRL,			0x00000000 },
+	{ MT_BB_PA_MODE_CFG1,		0x00500055 },
+	{ MT_RF_PA_MODE_CFG1,		0x00500055 },
+	{ MT_TX_ALC_CFG_0,		0x2F2F000C },
+	{ MT_TX0_BB_GAIN_ATTEN,		0x00000000 },
+	{ MT_TX_PWR_CFG_0,		0x3A3A3A3A },
+	{ MT_TX_PWR_CFG_1,		0x3A3A3A3A },
+	{ MT_TX_PWR_CFG_2,		0x3A3A3A3A },
+	{ MT_TX_PWR_CFG_3,		0x3A3A3A3A },
+	{ MT_TX_PWR_CFG_4,		0x3A3A3A3A },
+	{ MT_TX_PWR_CFG_7,		0x3A3A3A3A },
+	{ MT_TX_PWR_CFG_8,		0x0000003A },
+	{ MT_TX_PWR_CFG_9,		0x0000003A },
+	{ 0x150C,			0x00000002 },
+	{ 0x1238,			0x001700C8 },
+	{ MT_LDO_CTRL_0,		0x00A647B6 },
+	{ MT_LDO_CTRL_1,		0x6B006464 },
+	{ MT_HT_BASIC_RATE,		0x00004003 },
+	{ MT_HT_CTRL_CFG,		0x000001FF },
+	{ MT_TXOP_HLDR_ET,		0x00000000 },
+	{ MT_PN_PAD_MODE,		0x00000003 },
+	{ MT_TX_PROT_CFG6,		0xe3f42004 },
+	{ MT_TX_PROT_CFG7,		0xe3f42084 },
+	{ MT_TX_PROT_CFG8,		0xe3f42104 },
+	{ MT_VHT_HT_FBK_CFG1,		0xedcba980 },
+};
+
+static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
+	{ MT_BBP(CORE, 1),	0x00000002 },
+	{ MT_BBP(CORE, 4),	0x00000000 },
+	{ MT_BBP(CORE, 24),	0x00000000 },
+	{ MT_BBP(CORE, 32),	0x4003000a },
+	{ MT_BBP(CORE, 42),	0x00000000 },
+	{ MT_BBP(CORE, 44),	0x00000000 },
+	{ MT_BBP(IBI, 11),	0x0FDE8081 },
+	{ MT_BBP(AGC, 0),	0x00021400 },
+	{ MT_BBP(AGC, 1),	0x00000003 },
+	{ MT_BBP(AGC, 2),	0x003A6464 },
+	{ MT_BBP(AGC, 15),	0x88A28CB8 },
+	{ MT_BBP(AGC, 22),	0x00001E21 },
+	{ MT_BBP(AGC, 23),	0x0000272C },
+	{ MT_BBP(AGC, 24),	0x00002F3A },
+	{ MT_BBP(AGC, 25),	0x8000005A },
+	{ MT_BBP(AGC, 26),	0x007C2005 },
+	{ MT_BBP(AGC, 33),	0x00003238 },
+	{ MT_BBP(AGC, 34),	0x000A0C0C },
+	{ MT_BBP(AGC, 37),	0x2121262C },
+	{ MT_BBP(AGC, 41),	0x38383E45 },
+	{ MT_BBP(AGC, 57),	0x00001010 },
+	{ MT_BBP(AGC, 59),	0xBAA20E96 },
+	{ MT_BBP(AGC, 63),	0x00000001 },
+	{ MT_BBP(TXC, 0),	0x00280403 },
+	{ MT_BBP(TXC, 1),	0x00000000 },
+	{ MT_BBP(RXC, 1),	0x00000012 },
+	{ MT_BBP(RXC, 2),	0x00000011 },
+	{ MT_BBP(RXC, 3),	0x00000005 },
+	{ MT_BBP(RXC, 4),	0x00000000 },
+	{ MT_BBP(RXC, 5),	0xF977C4EC },
+	{ MT_BBP(RXC, 7),	0x00000090 },
+	{ MT_BBP(TXO, 8),	0x00000000 },
+	{ MT_BBP(TXBE, 0),	0x00000000 },
+	{ MT_BBP(TXBE, 4),	0x00000004 },
+	{ MT_BBP(TXBE, 6),	0x00000000 },
+	{ MT_BBP(TXBE, 8),	0x00000014 },
+	{ MT_BBP(TXBE, 9),	0x20000000 },
+	{ MT_BBP(TXBE, 10),	0x00000000 },
+	{ MT_BBP(TXBE, 12),	0x00000000 },
+	{ MT_BBP(TXBE, 13),	0x00000000 },
+	{ MT_BBP(TXBE, 14),	0x00000000 },
+	{ MT_BBP(TXBE, 15),	0x00000000 },
+	{ MT_BBP(TXBE, 16),	0x00000000 },
+	{ MT_BBP(TXBE, 17),	0x00000000 },
+	{ MT_BBP(RXFE, 1),	0x00008800 },
+	{ MT_BBP(RXFE, 3),	0x00000000 },
+	{ MT_BBP(RXFE, 4),	0x00000000 },
+	{ MT_BBP(RXO, 13),	0x00000192 },
+	{ MT_BBP(RXO, 14),	0x00060612 },
+	{ MT_BBP(RXO, 15),	0xC8321B18 },
+	{ MT_BBP(RXO, 16),	0x0000001E },
+	{ MT_BBP(RXO, 17),	0x00000000 },
+	{ MT_BBP(RXO, 18),	0xCC00A993 },
+	{ MT_BBP(RXO, 19),	0xB9CB9CB9 },
+	{ MT_BBP(RXO, 20),	0x26c00057 },
+	{ MT_BBP(RXO, 21),	0x00000001 },
+	{ MT_BBP(RXO, 24),	0x00000006 },
+	{ MT_BBP(RXO, 28),	0x0000003F },
+};
+
+static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(AGC, 4),	0x1FEDA049 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 4),	0x1FECA054 } },
+
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(AGC, 6),	0x00000045 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 6),	0x0000000A } },
+
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(AGC, 8),	0x16344EF0 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 8),	0x122C54F2 } },
+
+	{ RF_G_BAND | RF_BW_20,				{ MT_BBP(AGC, 12),	0x05052879 } },
+	{ RF_G_BAND | RF_BW_40,				{ MT_BBP(AGC, 12),	0x050528F9 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 12),	0x050528F9 } },
+
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(AGC, 13),	0x35050004 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 13),	0x2C3A0406 } },
+
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(AGC, 14),	0x310F2E3C } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 14),	0x310F2A3F } },
+
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(AGC, 26),	0x007C2005 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 26),	0x007C2005 } },
+
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(AGC, 27),	0x000000E1 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 27),	0x000000EC } },
+
+	{ RF_G_BAND | RF_BW_20,				{ MT_BBP(AGC, 28),	0x00060806 } },
+	{ RF_G_BAND | RF_BW_40,				{ MT_BBP(AGC, 28),	0x00050806 } },
+	{ RF_A_BAND | RF_BW_40,				{ MT_BBP(AGC, 28),	0x00060801 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_80,		{ MT_BBP(AGC, 28),	0x00060806 } },
+
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(RXO, 28),	0x0000008A } },
+
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(AGC, 31),	0x00000E23 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 31),	0x00000E13 } },
+
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(AGC, 32),	0x00003218 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 32),	0x0000181C } },
+
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(AGC, 33),	0x00003240 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 33),	0x00003218 } },
+
+	{ RF_G_BAND | RF_BW_20,				{ MT_BBP(AGC, 35),	0x11111616 } },
+	{ RF_G_BAND | RF_BW_40,				{ MT_BBP(AGC, 35),	0x11111516 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 35),	0x11111111 } },
+
+	{ RF_G_BAND | RF_BW_20,				{ MT_BBP(AGC, 39),	0x2A2A3036 } },
+	{ RF_G_BAND | RF_BW_40,				{ MT_BBP(AGC, 39),	0x2A2A2C36 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 39),	0x2A2A2A2A } },
+
+	{ RF_G_BAND | RF_BW_20,				{ MT_BBP(AGC, 43),	0x27273438 } },
+	{ RF_G_BAND | RF_BW_40,				{ MT_BBP(AGC, 43),	0x27272D38 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 43),	0x27271A1A } },
+
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(AGC, 51),	0x17171C1C } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 51),	0xFFFFFFFF } },
+
+	{ RF_G_BAND | RF_BW_20,				{ MT_BBP(AGC, 53),	0x26262A2F } },
+	{ RF_G_BAND | RF_BW_40,				{ MT_BBP(AGC, 53),	0x2626322F } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 53),	0xFFFFFFFF } },
+
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(AGC, 55),	0x40404040 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 55),	0xFFFFFFFF } },
+
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(AGC, 58),	0x00001010 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(AGC, 58),	0x00000000 } },
+
+	{ RF_G_BAND | RF_BW_20 | RF_BW_40,		{ MT_BBP(RXFE, 0),	0x3D5000E0 } },
+	{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,	{ MT_BBP(RXFE, 0),	0x895000E0 } },
+};
+
+static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
+	{ MT_BBP(CAL, 47), 0x000010F0 },
+	{ MT_BBP(CAL, 48), 0x00008080 },
+	{ MT_BBP(CAL, 49), 0x00000F07 },
+	{ MT_BBP(CAL, 50), 0x00000040 },
+	{ MT_BBP(CAL, 51), 0x00000404 },
+	{ MT_BBP(CAL, 52), 0x00080803 },
+	{ MT_BBP(CAL, 53), 0x00000704 },
+	{ MT_BBP(CAL, 54), 0x00002828 },
+	{ MT_BBP(CAL, 55), 0x00005050 },
+};
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
new file mode 100644
index 0000000..42a7988
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
@@ -0,0 +1,633 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#ifndef __MT76X0U_PHY_INITVALS_H
+#define __MT76X0U_PHY_INITVALS_H
+
+static const struct mt76_reg_pair mt76x0_rf_central_tab[] = {
+	{ MT_RF(0,  1), 0x01 },
+	{ MT_RF(0,  2), 0x11 },
+	/* R3 ~ R7: VCO Cal */
+	{ MT_RF(0,  3), 0x73 }, /* VCO Freq Cal */
+	{ MT_RF(0,  4), 0x30 }, /* R4 b<7>=1, VCO cal */
+	{ MT_RF(0,  5), 0x00 },
+	{ MT_RF(0,  6), 0x41 },
+	{ MT_RF(0,  7), 0x00 },
+	{ MT_RF(0,  8), 0x00 },
+	{ MT_RF(0,  9), 0x00 },
+	{ MT_RF(0, 10), 0x0C },
+	{ MT_RF(0, 11), 0x00 },
+	{ MT_RF(0, 12), 0x00 },
+	/* BG */
+	{ MT_RF(0, 13), 0x00 },
+	{ MT_RF(0, 14), 0x00 },
+	{ MT_RF(0, 15), 0x00 },
+	/* LDO */
+	{ MT_RF(0, 19), 0x20 },
+	{ MT_RF(0, 20), 0x22 },
+	{ MT_RF(0, 21), 0x12 },
+	{ MT_RF(0, 23), 0x00 },
+	{ MT_RF(0, 24), 0x33 },
+	{ MT_RF(0, 25), 0x00 },
+	/* PLL */
+	{ MT_RF(0, 26), 0x00 },
+	{ MT_RF(0, 27), 0x00 },
+	{ MT_RF(0, 28), 0x00 },
+	{ MT_RF(0, 29), 0x00 },
+	{ MT_RF(0, 30), 0x00 },
+	{ MT_RF(0, 31), 0x00 },
+	{ MT_RF(0, 32), 0x00 },
+	{ MT_RF(0, 33), 0x00 },
+	{ MT_RF(0, 34), 0x00 },
+	{ MT_RF(0, 35), 0x00 },
+	{ MT_RF(0, 36), 0x00 },
+	{ MT_RF(0, 37), 0x00 },
+	/* LO Buffer */
+	{ MT_RF(0, 38), 0x2F },
+	/* Test Ports */
+	{ MT_RF(0, 64), 0x00 },
+	{ MT_RF(0, 65), 0x80 },
+	{ MT_RF(0, 66), 0x01 },
+	{ MT_RF(0, 67), 0x04 },
+	/* ADC-DAC */
+	{ MT_RF(0, 68), 0x00 },
+	{ MT_RF(0, 69), 0x08 },
+	{ MT_RF(0, 70), 0x08 },
+	{ MT_RF(0, 71), 0x40 },
+	{ MT_RF(0, 72), 0xD0 },
+	{ MT_RF(0, 73), 0x93 },
+};
+
+static const struct mt76_reg_pair mt76x0_rf_2g_channel_0_tab[] = {
+	/* RX logic operation */
+	{ MT_RF(5,  2), 0x0C }, /* 5G+2G */
+	{ MT_RF(5,  3), 0x00 },
+	/* TX logic operation */
+	{ MT_RF(5,  4), 0x00 },
+	{ MT_RF(5,  5), 0x84 },
+	{ MT_RF(5,  6), 0x02 },
+	/* LDO */
+	{ MT_RF(5,  7), 0x00 },
+	{ MT_RF(5,  8), 0x00 },
+	{ MT_RF(5,  9), 0x00 },
+	/* RX */
+	{ MT_RF(5, 10), 0x51 },
+	{ MT_RF(5, 11), 0x22 },
+	{ MT_RF(5, 12), 0x22 },
+	{ MT_RF(5, 13), 0x0F },
+	{ MT_RF(5, 14), 0x47 },
+	{ MT_RF(5, 15), 0x25 },
+	{ MT_RF(5, 16), 0xC7 },
+	{ MT_RF(5, 17), 0x00 },
+	{ MT_RF(5, 18), 0x00 },
+	{ MT_RF(5, 19), 0x30 },
+	{ MT_RF(5, 20), 0x33 },
+	{ MT_RF(5, 21), 0x02 },
+	{ MT_RF(5, 22), 0x32 },
+	{ MT_RF(5, 23), 0x00 },
+	{ MT_RF(5, 24), 0x25 },
+	{ MT_RF(5, 26), 0x00 },
+	{ MT_RF(5, 27), 0x12 },
+	{ MT_RF(5, 28), 0x0F },
+	{ MT_RF(5, 29), 0x00 },
+	/* LOGEN */
+	{ MT_RF(5, 30), 0x51 },
+	{ MT_RF(5, 31), 0x35 },
+	{ MT_RF(5, 32), 0x31 },
+	{ MT_RF(5, 33), 0x31 },
+	{ MT_RF(5, 34), 0x34 },
+	{ MT_RF(5, 35), 0x03 },
+	{ MT_RF(5, 36), 0x00 },
+	/* TX */
+	{ MT_RF(5, 37), 0xDD },
+	{ MT_RF(5, 38), 0xB3 },
+	{ MT_RF(5, 39), 0x33 },
+	{ MT_RF(5, 40), 0xB1 },
+	{ MT_RF(5, 41), 0x71 },
+	{ MT_RF(5, 42), 0xF2 },
+	{ MT_RF(5, 43), 0x47 },
+	{ MT_RF(5, 44), 0x77 },
+	{ MT_RF(5, 45), 0x0E },
+	{ MT_RF(5, 46), 0x10 },
+	{ MT_RF(5, 47), 0x00 },
+	{ MT_RF(5, 48), 0x53 },
+	{ MT_RF(5, 49), 0x03 },
+	{ MT_RF(5, 50), 0xEF },
+	{ MT_RF(5, 51), 0xC7 },
+	{ MT_RF(5, 52), 0x62 },
+	{ MT_RF(5, 53), 0x62 },
+	{ MT_RF(5, 54), 0x00 },
+	{ MT_RF(5, 55), 0x00 },
+	{ MT_RF(5, 56), 0x0F },
+	{ MT_RF(5, 57), 0x0F },
+	{ MT_RF(5, 58), 0x16 },
+	{ MT_RF(5, 59), 0x16 },
+	{ MT_RF(5, 60), 0x10 },
+	{ MT_RF(5, 61), 0x10 },
+	{ MT_RF(5, 62), 0xD0 },
+	{ MT_RF(5, 63), 0x6C },
+	{ MT_RF(5, 64), 0x58 },
+	{ MT_RF(5, 65), 0x58 },
+	{ MT_RF(5, 66), 0xF2 },
+	{ MT_RF(5, 67), 0xE8 },
+	{ MT_RF(5, 68), 0xF0 },
+	{ MT_RF(5, 69), 0xF0 },
+	{ MT_RF(5, 127), 0x04 },
+};
+
+static const struct mt76_reg_pair mt76x0_rf_5g_channel_0_tab[] = {
+	/* RX logic operation */
+	{ MT_RF(6, 2), 0x0C },
+	{ MT_RF(6, 3), 0x00 },
+	/* TX logic operation */
+	{ MT_RF(6, 4), 0x00 },
+	{ MT_RF(6, 5), 0x84 },
+	{ MT_RF(6, 6), 0x02 },
+	/* LDO */
+	{ MT_RF(6, 7), 0x00 },
+	{ MT_RF(6, 8), 0x00 },
+	{ MT_RF(6, 9), 0x00 },
+	/* RX */
+	{ MT_RF(6, 10), 0x00 },
+	{ MT_RF(6, 11), 0x01 },
+	{ MT_RF(6, 13), 0x23 },
+	{ MT_RF(6, 14), 0x00 },
+	{ MT_RF(6, 15), 0x04 },
+	{ MT_RF(6, 16), 0x22 },
+	{ MT_RF(6, 18), 0x08 },
+	{ MT_RF(6, 19), 0x00 },
+	{ MT_RF(6, 20), 0x00 },
+	{ MT_RF(6, 21), 0x00 },
+	{ MT_RF(6, 22), 0xFB },
+	/* LOGEN5G */
+	{ MT_RF(6, 25), 0x76 },
+	{ MT_RF(6, 26), 0x24 },
+	{ MT_RF(6, 27), 0x04 },
+	{ MT_RF(6, 28), 0x00 },
+	{ MT_RF(6, 29), 0x00 },
+	/* TX */
+	{ MT_RF(6, 37), 0xBB },
+	{ MT_RF(6, 38), 0xB3 },
+	{ MT_RF(6, 40), 0x33 },
+	{ MT_RF(6, 41), 0x33 },
+	{ MT_RF(6, 43), 0x03 },
+	{ MT_RF(6, 44), 0xB3 },
+	{ MT_RF(6, 46), 0x17 },
+	{ MT_RF(6, 47), 0x0E },
+	{ MT_RF(6, 48), 0x10 },
+	{ MT_RF(6, 49), 0x07 },
+	{ MT_RF(6, 62), 0x00 },
+	{ MT_RF(6, 63), 0x00 },
+	{ MT_RF(6, 64), 0xF1 },
+	{ MT_RF(6, 65), 0x0F },
+};
+
+static const struct mt76_reg_pair mt76x0_rf_vga_channel_0_tab[] = {
+	/* E3 CR */
+	{ MT_RF(7,  0), 0x47 },
+	{ MT_RF(7,  1), 0x00 },
+	{ MT_RF(7,  2), 0x00 },
+	{ MT_RF(7,  3), 0x00 },
+	{ MT_RF(7,  4), 0x00 },
+	{ MT_RF(7, 10), 0x13 },
+	{ MT_RF(7, 11), 0x0F },
+	{ MT_RF(7, 12), 0x13 },
+	{ MT_RF(7, 13), 0x13 },
+	{ MT_RF(7, 14), 0x13 },
+	{ MT_RF(7, 15), 0x20 },
+	{ MT_RF(7, 16), 0x22 },
+	{ MT_RF(7, 17), 0x7C },
+	{ MT_RF(7, 18), 0x00 },
+	{ MT_RF(7, 19), 0x00 },
+	{ MT_RF(7, 20), 0x00 },
+	{ MT_RF(7, 21), 0xF1 },
+	{ MT_RF(7, 22), 0x11 },
+	{ MT_RF(7, 23), 0xC2 },
+	{ MT_RF(7, 24), 0x41 },
+	{ MT_RF(7, 25), 0x20 },
+	{ MT_RF(7, 26), 0x40 },
+	{ MT_RF(7, 27), 0xD7 },
+	{ MT_RF(7, 28), 0xA2 },
+	{ MT_RF(7, 29), 0x60 },
+	{ MT_RF(7, 30), 0x49 },
+	{ MT_RF(7, 31), 0x20 },
+	{ MT_RF(7, 32), 0x44 },
+	{ MT_RF(7, 33), 0xC1 },
+	{ MT_RF(7, 34), 0x60 },
+	{ MT_RF(7, 35), 0xC0 },
+	{ MT_RF(7, 61), 0x01 },
+	{ MT_RF(7, 72), 0x3C },
+	{ MT_RF(7, 73), 0x34 },
+	{ MT_RF(7, 74), 0x00 },
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_bw_switch_tab[] = {
+	/* bank, reg	bw/band			value */
+	{ MT_RF(0, 17),	RF_G_BAND | RF_BW_20,	0x00 },
+	{ MT_RF(0, 17),	RF_G_BAND | RF_BW_40,	0x00 },
+	{ MT_RF(0, 17),	RF_A_BAND | RF_BW_20,	0x00 },
+	{ MT_RF(0, 17),	RF_A_BAND | RF_BW_40,	0x00 },
+	{ MT_RF(0, 17),	RF_A_BAND | RF_BW_80,	0x00 },
+	{ MT_RF(7,  6),	RF_G_BAND | RF_BW_20,	0x40 },
+	{ MT_RF(7,  6),	RF_G_BAND | RF_BW_40,	0x1C },
+	{ MT_RF(7,  6),	RF_A_BAND | RF_BW_20,	0x40 },
+	{ MT_RF(7,  6),	RF_A_BAND | RF_BW_40,	0x20 },
+	{ MT_RF(7,  6),	RF_A_BAND | RF_BW_80,	0x10 },
+	{ MT_RF(7,  7),	RF_G_BAND | RF_BW_20,	0x40 },
+	{ MT_RF(7,  7),	RF_G_BAND | RF_BW_40,	0x20 },
+	{ MT_RF(7,  7),	RF_A_BAND | RF_BW_20,	0x40 },
+	{ MT_RF(7,  7),	RF_A_BAND | RF_BW_40,	0x20 },
+	{ MT_RF(7,  7),	RF_A_BAND | RF_BW_80,	0x10 },
+	{ MT_RF(7,  8),	RF_G_BAND | RF_BW_20,	0x03 },
+	{ MT_RF(7,  8),	RF_G_BAND | RF_BW_40,	0x01 },
+	{ MT_RF(7,  8),	RF_A_BAND | RF_BW_20,	0x03 },
+	{ MT_RF(7,  8),	RF_A_BAND | RF_BW_40,	0x01 },
+	{ MT_RF(7,  8),	RF_A_BAND | RF_BW_80,	0x00 },
+	{ MT_RF(7, 58),	RF_G_BAND | RF_BW_20,	0x40 },
+	{ MT_RF(7, 58),	RF_G_BAND | RF_BW_40,	0x40 },
+	{ MT_RF(7, 58),	RF_A_BAND | RF_BW_20,	0x40 },
+	{ MT_RF(7, 58),	RF_A_BAND | RF_BW_40,	0x40 },
+	{ MT_RF(7, 58),	RF_A_BAND | RF_BW_80,	0x10 },
+	{ MT_RF(7, 59),	RF_G_BAND | RF_BW_20,	0x40 },
+	{ MT_RF(7, 59),	RF_G_BAND | RF_BW_40,	0x40 },
+	{ MT_RF(7, 59),	RF_A_BAND | RF_BW_20,	0x40 },
+	{ MT_RF(7, 59),	RF_A_BAND | RF_BW_40,	0x40 },
+	{ MT_RF(7, 59),	RF_A_BAND | RF_BW_80,	0x10 },
+	{ MT_RF(7, 60),	RF_G_BAND | RF_BW_20,	0xAA },
+	{ MT_RF(7, 60),	RF_G_BAND | RF_BW_40,	0xAA },
+	{ MT_RF(7, 60),	RF_A_BAND | RF_BW_20,	0xAA },
+	{ MT_RF(7, 60),	RF_A_BAND | RF_BW_40,	0xAA },
+	{ MT_RF(7, 60),	RF_A_BAND | RF_BW_80,	0xAA },
+	{ MT_RF(7, 76),	RF_BW_20,		0x40 },
+	{ MT_RF(7, 76),	RF_BW_40,		0x40 },
+	{ MT_RF(7, 76),	RF_BW_80,		0x10 },
+	{ MT_RF(7, 77),	RF_BW_20,		0x40 },
+	{ MT_RF(7, 77),	RF_BW_40,		0x40 },
+	{ MT_RF(7, 77),	RF_BW_80,		0x10 },
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_band_switch_tab[] = {
+	/* bank, reg		bw/band		value */
+	{ MT_RF(0,  16),	RF_G_BAND,	0x20 },
+	{ MT_RF(0,  16),	RF_A_BAND,	0x20 },
+	{ MT_RF(0,  18),	RF_G_BAND,	0x00 },
+	{ MT_RF(0,  18),	RF_A_BAND,	0x00 },
+	{ MT_RF(0,  39),	RF_G_BAND,	0x36 },
+	{ MT_RF(0,  39),	RF_A_BAND_LB,	0x34 },
+	{ MT_RF(0,  39),	RF_A_BAND_MB,	0x33 },
+	{ MT_RF(0,  39),	RF_A_BAND_HB,	0x31 },
+	{ MT_RF(0,  39),	RF_A_BAND_11J,	0x36 },
+	{ MT_RF(6,  12),	RF_A_BAND_LB,	0x44 },
+	{ MT_RF(6,  12),	RF_A_BAND_MB,	0x44 },
+	{ MT_RF(6,  12),	RF_A_BAND_HB,	0x55 },
+	{ MT_RF(6,  12),	RF_A_BAND_11J,	0x44 },
+	{ MT_RF(6,  17),	RF_A_BAND_LB,	0x02 },
+	{ MT_RF(6,  17),	RF_A_BAND_MB,	0x00 },
+	{ MT_RF(6,  17),	RF_A_BAND_HB,	0x00 },
+	{ MT_RF(6,  17),	RF_A_BAND_11J,	0x05 },
+	{ MT_RF(6,  24),	RF_A_BAND_LB,	0xA1 },
+	{ MT_RF(6,  24),	RF_A_BAND_MB,	0x41 },
+	{ MT_RF(6,  24),	RF_A_BAND_HB,	0x21 },
+	{ MT_RF(6,  24),	RF_A_BAND_11J,	0xE1 },
+	{ MT_RF(6,  39),	RF_A_BAND_LB,	0x36 },
+	{ MT_RF(6,  39),	RF_A_BAND_MB,	0x34 },
+	{ MT_RF(6,  39),	RF_A_BAND_HB,	0x32 },
+	{ MT_RF(6,  39),	RF_A_BAND_11J,	0x37 },
+	{ MT_RF(6,  42),	RF_A_BAND_LB,	0xFB },
+	{ MT_RF(6,  42),	RF_A_BAND_MB,	0xF3 },
+	{ MT_RF(6,  42),	RF_A_BAND_HB,	0xEB },
+	{ MT_RF(6,  42),	RF_A_BAND_11J,	0xEB },
+	{ MT_RF(6, 127),	RF_G_BAND,	0x84 },
+	{ MT_RF(6, 127),	RF_A_BAND,	0x04 },
+	{ MT_RF(7,   5),	RF_G_BAND,	0x40 },
+	{ MT_RF(7,   5),	RF_A_BAND,	0x00 },
+	{ MT_RF(7,   9),	RF_G_BAND,	0x00 },
+	{ MT_RF(7,   9),	RF_A_BAND,	0x00 },
+	{ MT_RF(7,  70),	RF_G_BAND,	0x00 },
+	{ MT_RF(7,  70),	RF_A_BAND,	0x6D },
+	{ MT_RF(7,  71),	RF_G_BAND,	0x00 },
+	{ MT_RF(7,  71),	RF_A_BAND,	0xB0 },
+	{ MT_RF(7,  78),	RF_G_BAND,	0x00 },
+	{ MT_RF(7,  78),	RF_A_BAND,	0x55 },
+	{ MT_RF(7,  79),	RF_G_BAND,	0x00 },
+	{ MT_RF(7,  79),	RF_A_BAND,	0x55 },
+};
+
+static const struct mt76x0_freq_item mt76x0_frequency_plan[] = {
+	{   1,	RF_G_BAND,			0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 2412 */
+	{   2, 	RF_G_BAND,			0x02, 0x3F, 0x3C, 0xDD, 0xE4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA1, 0, 0x30, 0, 0, 0x1 }, /* Freq 2417 */
+	{   3, 	RF_G_BAND,			0x02, 0x3F, 0x3C, 0xDD, 0xE2, 0x40, 0x07, 0x40, 0x0B, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0 }, /* Freq 2422 */
+	{   4, 	RF_G_BAND,			0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0 }, /* Freq 2427 */
+	{   5, 	RF_G_BAND,			0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1 }, /* Freq 2432 */
+	{   6, 	RF_G_BAND,			0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1 }, /* Freq 2437 */
+	{   7, 	RF_G_BAND,			0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x07, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 2442 */
+	{   8, 	RF_G_BAND,			0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA3, 0, 0x30, 0, 0, 0x1 }, /* Freq 2447 */
+	{   9, 	RF_G_BAND,			0x02, 0x3F, 0x3C, 0xDD, 0xF2, 0x40, 0x07, 0x40, 0x0D, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 2452 */
+	{  10, 	RF_G_BAND,			0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x51, 0, 0x30, 0, 0, 0x0 }, /* Freq 2457 */
+	{  11, 	RF_G_BAND,			0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1 }, /* Freq 2462 */
+	{  12, 	RF_G_BAND,			0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1 }, /* Freq 2467 */
+	{  13, 	RF_G_BAND,			0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 2472 */
+	{  14, 	RF_G_BAND,			0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 2484 */
+	{ 183, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 4915 */
+	{ 184, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x00, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4920 */
+	{ 185, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4925 */
+	{ 187, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4935 */
+	{ 188, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4940 */
+	{ 189, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4945 */
+	{ 192, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4960 */
+	{ 196, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4980 */
+	{  36, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5180 */
+	{  37, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5185 */
+	{  38, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5190 */
+	{  39, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5195 */
+	{  40, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5200 */
+	{  41, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5205 */
+	{  42, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5210 */
+	{  43, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5215 */
+	{  44, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5220 */
+	{  45, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5225 */
+	{  46, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5230 */
+	{  47, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5235 */
+	{  48, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5240 */
+	{  49, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5245 */
+	{  50, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5250 */
+	{  51, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5255 */
+	{  52, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5260 */
+	{  53, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5265 */
+	{  54, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5270 */
+	{  55, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5275 */
+	{  56, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5280 */
+	{  57, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5285 */
+	{  58, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5290 */
+	{  59, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5295 */
+	{  60, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5300 */
+	{  61, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5305 */
+	{  62, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5310 */
+	{  63, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5315 */
+	{  64, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5320 */
+	{ 100, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5500 */
+	{ 101, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5505 */
+	{ 102, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5510 */
+	{ 103, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5515 */
+	{ 104, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5520 */
+	{ 105, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5525 */
+	{ 106, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5530 */
+	{ 107, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5535 */
+	{ 108, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5540 */
+	{ 109, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5545 */
+	{ 110, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5550 */
+	{ 111, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5555 */
+	{ 112, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5560 */
+	{ 113, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5565 */
+	{ 114, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5570 */
+	{ 115, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5575 */
+	{ 116, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5580 */
+	{ 117, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5585 */
+	{ 118, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5590 */
+	{ 119, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5595 */
+	{ 120, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5600 */
+	{ 121, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5605 */
+	{ 122, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5610 */
+	{ 123, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5615 */
+	{ 124, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5620 */
+	{ 125, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5625 */
+	{ 126, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5630 */
+	{ 127, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5635 */
+	{ 128, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5640 */
+	{ 129, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5645 */
+	{ 130, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5650 */
+	{ 131, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5655 */
+	{ 132, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5660 */
+	{ 133, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5665 */
+	{ 134, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5670 */
+	{ 135, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5675 */
+	{ 136, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5680 */
+	{ 137, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5685 */
+	{ 138, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5690 */
+	{ 139, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5695 */
+	{ 140, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5700 */
+	{ 141, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5705 */
+	{ 142, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5710 */
+	{ 143, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5715 */
+	{ 144, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5720 */
+	{ 145, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5725 */
+	{ 146, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5730 */
+	{ 147, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5735 */
+	{ 148, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5740 */
+	{ 149, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5745 */
+	{ 150, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5750 */
+	{ 151, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5755 */
+	{ 152, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5760 */
+	{ 153, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5765 */
+	{ 154, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5770 */
+	{ 155, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5775 */
+	{ 156, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5780 */
+	{ 157, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5785 */
+	{ 158, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5790 */
+	{ 159, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5795 */
+	{ 160, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5800 */
+	{ 161, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5805 */
+	{ 162, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5810 */
+	{ 163, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5815 */
+	{ 164, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5820 */
+	{ 165, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5825 */
+	{ 166, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5830 */
+	{ 167, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5835 */
+	{ 168, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5840 */
+	{ 169, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5845 */
+	{ 170, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5850 */
+	{ 171, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5855 */
+	{ 172, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5860 */
+	{ 173, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5865 */
+};
+
+static const struct mt76x0_freq_item mt76x0_sdm_frequency_plan[] = {
+	{   1,	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x0CCCC, 0x3 }, /* Freq 2412 */
+	{   2, 	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x12222, 0x3 }, /* Freq 2417 */
+	{   3, 	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x17777, 0x3 }, /* Freq 2422 */
+	{   4, 	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x1CCCC, 0x3 }, /* Freq 2427 */
+	{   5, 	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x22222, 0x3 }, /* Freq 2432 */
+	{   6, 	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x27777, 0x3 }, /* Freq 2437 */
+	{   7, 	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x2CCCC, 0x3 }, /* Freq 2442 */
+	{   8, 	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x32222, 0x3 }, /* Freq 2447 */
+	{   9, 	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x37777, 0x3 }, /* Freq 2452 */
+	{  10, 	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x3CCCC, 0x3 }, /* Freq 2457 */
+	{  11, 	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x02222, 0x3 }, /* Freq 2462 */
+	{  12, 	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x07777, 0x3 }, /* Freq 2467 */
+	{  13, 	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x0CCCC, 0x3 }, /* Freq 2472 */
+	{  14, 	RF_G_BAND,			0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x19999, 0x3 }, /* Freq 2484 */
+	{ 183, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 4915 */
+	{ 184, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 4920 */
+	{ 185, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 4925 */
+	{ 187, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 4935 */
+	{ 188, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 4940 */
+	{ 189, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 4945 */
+	{ 192, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 4960 */
+	{ 196, 	(RF_A_BAND | RF_A_BAND_11J),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 4980 */
+	{  36, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5180 */
+	{  37, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5185 */
+	{  38, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5190 */
+	{  39, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5195 */
+	{  40, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5200 */
+	{  41, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5205 */
+	{  42, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5210 */
+	{  43, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5215 */
+	{  44, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5220 */
+	{  45, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5225 */
+	{  46, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5230 */
+	{  47, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5235 */
+	{  48, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5240 */
+	{  49, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5245 */
+	{  50, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5250 */
+	{  51, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5255 */
+	{  52, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5260 */
+	{  53, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5265 */
+	{  54, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5270 */
+	{  55, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5275 */
+	{  56, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5280 */
+	{  57, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5285 */
+	{  58, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5290 */
+	{  59, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5295 */
+	{  60, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5300 */
+	{  61, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5305 */
+	{  62, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5310 */
+	{  63, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5315 */
+	{  64, 	(RF_A_BAND | RF_A_BAND_LB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5320 */
+	{ 100, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5500 */
+	{ 101, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5505 */
+	{ 102, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5510 */
+	{ 103, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5515 */
+	{ 104, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5520 */
+	{ 105, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5525 */
+	{ 106, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5530 */
+	{ 107, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5535 */
+	{ 108, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5540 */
+	{ 109, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5545 */
+	{ 110, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5550 */
+	{ 111, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5555 */
+	{ 112, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5560 */
+	{ 113, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5565 */
+	{ 114, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5570 */
+	{ 115, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5575 */
+	{ 116, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5580 */
+	{ 117, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5585 */
+	{ 118, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5590 */
+	{ 119, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5595 */
+	{ 120, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5600 */
+	{ 121, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5605 */
+	{ 122, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5610 */
+	{ 123, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5615 */
+	{ 124, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5620 */
+	{ 125, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5625 */
+	{ 126, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5630 */
+	{ 127, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5635 */
+	{ 128, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5640 */
+	{ 129, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5645 */
+	{ 130, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5650 */
+	{ 131, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5655 */
+	{ 132, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5660 */
+	{ 133, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5665 */
+	{ 134, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5670 */
+	{ 135, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5675 */
+	{ 136, 	(RF_A_BAND | RF_A_BAND_MB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5680 */
+	{ 137, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5685 */
+	{ 138, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5690 */
+	{ 139, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5695 */
+	{ 140, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5700 */
+	{ 141, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5705 */
+	{ 142, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5710 */
+	{ 143, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5715 */
+	{ 144, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5720 */
+	{ 145, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5725 */
+	{ 146, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5730 */
+	{ 147, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5735 */
+	{ 148, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5740 */
+	{ 149, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5745 */
+	{ 150, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5750 */
+	{ 151, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5755 */
+	{ 152, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5760 */
+	{ 153, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5765 */
+	{ 154, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5770 */
+	{ 155, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5775 */
+	{ 156, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5780 */
+	{ 157, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5785 */
+	{ 158, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5790 */
+	{ 159, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5795 */
+	{ 160, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5800 */
+	{ 161, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5805 */
+	{ 162, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5810 */
+	{ 163, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5815 */
+	{ 164, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5820 */
+	{ 165, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5825 */
+	{ 166, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5830 */
+	{ 167, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5835 */
+	{ 168, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5840 */
+	{ 169, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5845 */
+	{ 170, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5850 */
+	{ 171, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5855 */
+	{ 172, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5860 */
+	{ 173, 	(RF_A_BAND | RF_A_BAND_HB),	0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5865 */
+};
+
+static const u8 mt76x0_sdm_channel[] = {
+	183, 185,  43,  45,
+	54,   55,  57,  58,
+	102, 103, 105, 106,
+	115, 117, 126, 127,
+	129, 130, 139, 141,
+	150, 151, 153, 154,
+	163, 165
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_ext_pa_tab[] = {
+	{ MT_RF(6, 45),	RF_A_BAND_LB,	0x63 },
+	{ MT_RF(6, 45),	RF_A_BAND_MB,	0x43 },
+	{ MT_RF(6, 45),	RF_A_BAND_HB,	0x33 },
+	{ MT_RF(6, 45),	RF_A_BAND_11J,	0x73 },
+	{ MT_RF(6, 50),	RF_A_BAND_LB,	0x02 },
+	{ MT_RF(6, 50),	RF_A_BAND_MB,	0x02 },
+	{ MT_RF(6, 50),	RF_A_BAND_HB,	0x02 },
+	{ MT_RF(6, 50),	RF_A_BAND_11J,	0x02 },
+	{ MT_RF(6, 51),	RF_A_BAND_LB,	0x02 },
+	{ MT_RF(6, 51),	RF_A_BAND_MB,	0x02 },
+	{ MT_RF(6, 51),	RF_A_BAND_HB,	0x02 },
+	{ MT_RF(6, 51),	RF_A_BAND_11J,	0x02 },
+	{ MT_RF(6, 52),	RF_A_BAND_LB,	0x08 },
+	{ MT_RF(6, 52),	RF_A_BAND_MB,	0x08 },
+	{ MT_RF(6, 52),	RF_A_BAND_HB,	0x08 },
+	{ MT_RF(6, 52),	RF_A_BAND_11J,	0x08 },
+	{ MT_RF(6, 53),	RF_A_BAND_LB,	0x08 },
+	{ MT_RF(6, 53),	RF_A_BAND_MB,	0x08 },
+	{ MT_RF(6, 53),	RF_A_BAND_HB,	0x08 },
+	{ MT_RF(6, 53),	RF_A_BAND_11J,	0x08 },
+	{ MT_RF(6, 54),	RF_A_BAND_LB,	0x0A },
+	{ MT_RF(6, 54),	RF_A_BAND_MB,	0x0A },
+	{ MT_RF(6, 54),	RF_A_BAND_HB,	0x0A },
+	{ MT_RF(6, 54),	RF_A_BAND_11J,	0x0A },
+	{ MT_RF(6, 55),	RF_A_BAND_LB,	0x0A },
+	{ MT_RF(6, 55),	RF_A_BAND_MB,	0x0A },
+	{ MT_RF(6, 55),	RF_A_BAND_HB,	0x0A },
+	{ MT_RF(6, 55),	RF_A_BAND_11J,	0x0A },
+	{ MT_RF(6, 56),	RF_A_BAND_LB,	0x05 },
+	{ MT_RF(6, 56),	RF_A_BAND_MB,	0x05 },
+	{ MT_RF(6, 56),	RF_A_BAND_HB,	0x05 },
+	{ MT_RF(6, 56),	RF_A_BAND_11J,	0x05 },
+	{ MT_RF(6, 57),	RF_A_BAND_LB,	0x05 },
+	{ MT_RF(6, 57),	RF_A_BAND_MB,	0x05 },
+	{ MT_RF(6, 57),	RF_A_BAND_HB,	0x05 },
+	{ MT_RF(6, 57),	RF_A_BAND_11J,	0x05 },
+	{ MT_RF(6, 58),	RF_A_BAND_LB,	0x05 },
+	{ MT_RF(6, 58),	RF_A_BAND_MB,	0x03 },
+	{ MT_RF(6, 58),	RF_A_BAND_HB,	0x02 },
+	{ MT_RF(6, 58),	RF_A_BAND_11J,	0x07 },
+	{ MT_RF(6, 59),	RF_A_BAND_LB,	0x05 },
+	{ MT_RF(6, 59),	RF_A_BAND_MB,	0x03 },
+	{ MT_RF(6, 59),	RF_A_BAND_HB,	0x02 },
+	{ MT_RF(6, 59),	RF_A_BAND_11J,	0x07 },
+};
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
new file mode 100644
index 0000000..efb7ca9
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#include <linux/etherdevice.h>
+#include "mt76x0.h"
+
+static void
+mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+{
+	cancel_delayed_work_sync(&dev->cal_work);
+	mt76x02_pre_tbtt_enable(dev, false);
+	if (mt76_is_mmio(dev))
+		tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+
+	mt76_set_channel(&dev->mt76);
+	mt76x0_phy_set_channel(dev, chandef);
+
+	/* channel cycle counters read-and-clear */
+	mt76_rr(dev, MT_CH_IDLE);
+	mt76_rr(dev, MT_CH_BUSY);
+
+	mt76x02_edcca_init(dev);
+
+	if (mt76_is_mmio(dev)) {
+		mt76x02_dfs_init_params(dev);
+		tasklet_enable(&dev->dfs_pd.dfs_tasklet);
+	}
+	mt76x02_pre_tbtt_enable(dev, true);
+
+	mt76_txq_schedule_all(&dev->mt76);
+}
+
+int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct mt76x02_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mt76.mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		ieee80211_stop_queues(hw);
+		mt76x0_set_channel(dev, &hw->conf.chandef);
+		ieee80211_wake_queues(hw);
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_POWER) {
+		dev->mt76.txpower_conf = hw->conf.power_level * 2;
+
+		if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+			mt76x0_phy_set_txpower(dev);
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+		if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+			dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+		else
+			dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+		mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+	}
+
+	mutex_unlock(&dev->mt76.mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x0_config);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
new file mode 100644
index 0000000..0ef29f1
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT76X0U_MCU_H
+#define __MT76X0U_MCU_H
+
+#include "../mt76x02_mcu.h"
+
+struct mt76x02_dev;
+
+#define MT_MCU_IVB_SIZE			0x40
+#define MT_MCU_DLM_OFFSET		0x80000
+
+/* We use same space for BBP as for MAC regs
+ * #define MT_MCU_MEMMAP_BBP		0x40000000
+ */
+#define MT_MCU_MEMMAP_RF		0x80000000
+
+enum mcu_calibrate {
+	MCU_CAL_R = 1,
+	MCU_CAL_RXDCOC,
+	MCU_CAL_LC,
+	MCU_CAL_LOFT,
+	MCU_CAL_TXIQ,
+	MCU_CAL_BW,
+	MCU_CAL_DPD,
+	MCU_CAL_RXIQ,
+	MCU_CAL_TXDCOC,
+	MCU_CAL_RX_GROUP_DELAY,
+	MCU_CAL_TX_GROUP_DELAY,
+	MCU_CAL_VCO,
+	MCU_CAL_NO_SIGNAL = 0xfe,
+	MCU_CAL_FULL = 0xff,
+};
+
+int mt76x0e_mcu_init(struct mt76x02_dev *dev);
+int mt76x0u_mcu_init(struct mt76x02_dev *dev);
+static inline int mt76x0_firmware_running(struct mt76x02_dev *dev)
+{
+	return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
+}
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
new file mode 100644
index 0000000..26517e0
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#ifndef MT76X0U_H
+#define MT76X0U_H
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <net/mac80211.h>
+#include <linux/debugfs.h>
+
+#include "../mt76x02.h"
+#include "eeprom.h"
+
+#define MT7610E_FIRMWARE		"mediatek/mt7610e.bin"
+#define MT7650E_FIRMWARE		"mediatek/mt7650e.bin"
+
+#define MT7610U_FIRMWARE		"mediatek/mt7610u.bin"
+
+#define MT_USB_AGGR_SIZE_LIMIT		21 /* * 1024B */
+#define MT_USB_AGGR_TIMEOUT		0x80 /* * 33ns */
+
+static inline bool is_mt7610e(struct mt76x02_dev *dev)
+{
+	if (!mt76_is_mmio(dev))
+		return false;
+
+	return mt76_chip(&dev->mt76) == 0x7610;
+}
+
+static inline bool is_mt7630(struct mt76x02_dev *dev)
+{
+	return mt76_chip(&dev->mt76) == 0x7630;
+}
+
+/* Init */
+int mt76x0_init_hardware(struct mt76x02_dev *dev);
+int mt76x0_register_device(struct mt76x02_dev *dev);
+void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
+
+int mt76x0_mac_start(struct mt76x02_dev *dev);
+void mt76x0_mac_stop(struct mt76x02_dev *dev);
+
+int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
+
+/* PHY */
+void mt76x0_phy_init(struct mt76x02_dev *dev);
+int mt76x0_phy_wait_bbp_ready(struct mt76x02_dev *dev);
+void mt76x0_phy_set_channel(struct mt76x02_dev *dev,
+			    struct cfg80211_chan_def *chandef);
+void mt76x0_phy_set_txpower(struct mt76x02_dev *dev);
+void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on);
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
new file mode 100644
index 0000000..7705e55
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+
+static int mt76x0e_start(struct ieee80211_hw *hw)
+{
+	struct mt76x02_dev *dev = hw->priv;
+
+	mt76x02_mac_start(dev);
+	mt76x0_phy_calibrate(dev, true);
+	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
+				     MT_MAC_WORK_INTERVAL);
+	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+	set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+	return 0;
+}
+
+static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
+{
+	cancel_delayed_work_sync(&dev->cal_work);
+	cancel_delayed_work_sync(&dev->mt76.mac_work);
+
+	if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
+		       0, 1000))
+		dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
+	mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
+
+	mt76x0_mac_stop(dev);
+
+	if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+		       0, 1000))
+		dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
+	mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN);
+}
+
+static void mt76x0e_stop(struct ieee80211_hw *hw)
+{
+	struct mt76x02_dev *dev = hw->priv;
+
+	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+	mt76x0e_stop_hw(dev);
+}
+
+static int
+mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		struct ieee80211_key_conf *key)
+{
+	struct mt76x02_dev *dev = hw->priv;
+
+	if (is_mt7630(dev))
+		return -EOPNOTSUPP;
+
+	return mt76x02_set_key(hw, cmd, vif, sta, key);
+}
+
+static void
+mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+	      u32 queues, bool drop)
+{
+}
+
+static const struct ieee80211_ops mt76x0e_ops = {
+	.tx = mt76x02_tx,
+	.start = mt76x0e_start,
+	.stop = mt76x0e_stop,
+	.add_interface = mt76x02_add_interface,
+	.remove_interface = mt76x02_remove_interface,
+	.config = mt76x0_config,
+	.configure_filter = mt76x02_configure_filter,
+	.bss_info_changed = mt76x02_bss_info_changed,
+	.sta_state = mt76_sta_state,
+	.set_key = mt76x0e_set_key,
+	.conf_tx = mt76x02_conf_tx,
+	.sw_scan_start = mt76_sw_scan,
+	.sw_scan_complete = mt76x02_sw_scan_complete,
+	.ampdu_action = mt76x02_ampdu_action,
+	.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+	.wake_tx_queue = mt76_wake_tx_queue,
+	.get_survey = mt76_get_survey,
+	.get_txpower = mt76_get_txpower,
+	.flush = mt76x0e_flush,
+	.set_tim = mt76_set_tim,
+	.release_buffered_frames = mt76_release_buffered_frames,
+	.set_coverage_class = mt76x02_set_coverage_class,
+	.set_rts_threshold = mt76x02_set_rts_threshold,
+};
+
+static int mt76x0e_register_device(struct mt76x02_dev *dev)
+{
+	int err;
+
+	mt76x0_chip_onoff(dev, true, false);
+	if (!mt76x02_wait_for_mac(&dev->mt76))
+		return -ETIMEDOUT;
+
+	mt76x02_dma_disable(dev);
+	err = mt76x0e_mcu_init(dev);
+	if (err < 0)
+		return err;
+
+	err = mt76x02_dma_init(dev);
+	if (err < 0)
+		return err;
+
+	err = mt76x0_init_hardware(dev);
+	if (err < 0)
+		return err;
+
+	mt76x02e_init_beacon_config(dev);
+
+	if (mt76_chip(&dev->mt76) == 0x7610) {
+		u16 val;
+
+		mt76_clear(dev, MT_COEXCFG0, BIT(0));
+
+		val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+		if (!(val & MT_EE_NIC_CONF_0_PA_IO_CURRENT))
+			mt76_set(dev, MT_XO_CTRL7, 0xc03);
+	}
+
+	mt76_clear(dev, 0x110, BIT(9));
+	mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
+
+	mt76_wr(dev, MT_CH_TIME_CFG,
+		MT_CH_TIME_CFG_TIMER_EN |
+		MT_CH_TIME_CFG_TX_AS_BUSY |
+		MT_CH_TIME_CFG_RX_AS_BUSY |
+		MT_CH_TIME_CFG_NAV_AS_BUSY |
+		MT_CH_TIME_CFG_EIFS_AS_BUSY |
+		MT_CH_CCA_RC_EN |
+		FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
+
+	err = mt76x0_register_device(dev);
+	if (err < 0)
+		return err;
+
+	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+	return 0;
+}
+
+static int
+mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct mt76_driver_ops drv_ops = {
+		.txwi_size = sizeof(struct mt76x02_txwi),
+		.tx_aligned4_skbs = true,
+		.update_survey = mt76x02_update_channel,
+		.tx_prepare_skb = mt76x02_tx_prepare_skb,
+		.tx_complete_skb = mt76x02_tx_complete_skb,
+		.rx_skb = mt76x02_queue_rx_skb,
+		.rx_poll_complete = mt76x02_rx_poll_complete,
+		.sta_ps = mt76x02_sta_ps,
+		.sta_add = mt76x02_sta_add,
+		.sta_remove = mt76x02_sta_remove,
+	};
+	struct mt76x02_dev *dev;
+	struct mt76_dev *mdev;
+	int ret;
+
+	ret = pcim_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+	if (ret)
+		return ret;
+
+	pci_set_master(pdev);
+
+	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
+				 &drv_ops);
+	if (!mdev)
+		return -ENOMEM;
+
+	dev = container_of(mdev, struct mt76x02_dev, mt76);
+	mutex_init(&dev->phy_mutex);
+
+	mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
+
+	mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
+	dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+
+	ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
+			       IRQF_SHARED, KBUILD_MODNAME, dev);
+	if (ret)
+		goto error;
+
+	ret = mt76x0e_register_device(dev);
+	if (ret < 0)
+		goto error;
+
+	return 0;
+
+error:
+	ieee80211_free_hw(mt76_hw(dev));
+	return ret;
+}
+
+static void mt76x0e_cleanup(struct mt76x02_dev *dev)
+{
+	clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+	tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+	mt76x0_chip_onoff(dev, false, false);
+	mt76x0e_stop_hw(dev);
+	mt76x02_dma_cleanup(dev);
+	mt76x02_mcu_cleanup(dev);
+}
+
+static void
+mt76x0e_remove(struct pci_dev *pdev)
+{
+	struct mt76_dev *mdev = pci_get_drvdata(pdev);
+	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+	mt76_unregister_device(mdev);
+	mt76x0e_cleanup(dev);
+	mt76_free_device(mdev);
+}
+
+static const struct pci_device_id mt76x0e_device_table[] = {
+	{ PCI_DEVICE(0x14c3, 0x7630) },
+	{ PCI_DEVICE(0x14c3, 0x7650) },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(pci, mt76x0e_device_table);
+MODULE_FIRMWARE(MT7610E_FIRMWARE);
+MODULE_FIRMWARE(MT7650E_FIRMWARE);
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct pci_driver mt76x0e_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= mt76x0e_device_table,
+	.probe		= mt76x0e_probe,
+	.remove		= mt76x0e_remove,
+};
+
+module_pci_driver(mt76x0e_driver);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
new file mode 100644
index 0000000..038187b
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+
+#define MT_MCU_IVB_ADDR		(MT_MCU_ILM_ADDR + 0x54000 - MT_MCU_IVB_SIZE)
+
+static int mt76x0e_load_firmware(struct mt76x02_dev *dev)
+{
+	bool is_combo_chip = mt76_chip(&dev->mt76) != 0x7610;
+	u32 val, ilm_len, dlm_len, offset = 0;
+	const struct mt76x02_fw_header *hdr;
+	const struct firmware *fw;
+	const char *firmware;
+	const u8 *fw_payload;
+	int len, err;
+
+	if (is_combo_chip)
+		firmware = MT7650E_FIRMWARE;
+	else
+		firmware = MT7610E_FIRMWARE;
+
+	err = request_firmware(&fw, firmware, dev->mt76.dev);
+	if (err)
+		return err;
+
+	if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+		err = -EIO;
+		goto out;
+	}
+
+	hdr = (const struct mt76x02_fw_header *)fw->data;
+
+	len = sizeof(*hdr);
+	len += le32_to_cpu(hdr->ilm_len);
+	len += le32_to_cpu(hdr->dlm_len);
+
+	if (fw->size != len) {
+		err = -EIO;
+		goto out;
+	}
+
+	fw_payload = fw->data + sizeof(*hdr);
+
+	val = le16_to_cpu(hdr->fw_ver);
+	dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+		 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+	val = le16_to_cpu(hdr->fw_ver);
+	dev_dbg(dev->mt76.dev,
+		"Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+		(val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+		le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+	if (is_combo_chip && !mt76_poll(dev, MT_MCU_SEMAPHORE_00, 1, 1, 600)) {
+		dev_err(dev->mt76.dev,
+			"Could not get hardware semaphore for loading fw\n");
+		err = -ETIMEDOUT;
+		goto out;
+	}
+
+	/* upload ILM. */
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+	ilm_len = le32_to_cpu(hdr->ilm_len);
+	if (is_combo_chip) {
+		ilm_len -= MT_MCU_IVB_SIZE;
+		offset = MT_MCU_IVB_SIZE;
+	}
+	dev_dbg(dev->mt76.dev, "loading FW - ILM %u\n", ilm_len);
+	mt76_wr_copy(dev, MT_MCU_ILM_ADDR + offset, fw_payload + offset,
+		     ilm_len);
+
+	/* upload IVB. */
+	if (is_combo_chip) {
+		dev_dbg(dev->mt76.dev, "loading FW - IVB %u\n",
+			MT_MCU_IVB_SIZE);
+		mt76_wr_copy(dev, MT_MCU_IVB_ADDR, fw_payload, MT_MCU_IVB_SIZE);
+	}
+
+	/* upload DLM. */
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
+	dlm_len = le32_to_cpu(hdr->dlm_len);
+	dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
+	mt76_wr_copy(dev, MT_MCU_ILM_ADDR,
+		     fw_payload + le32_to_cpu(hdr->ilm_len), dlm_len);
+
+	/* trigger firmware */
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+	if (is_combo_chip)
+		mt76_wr(dev, MT_MCU_INT_LEVEL, 0x3);
+	else
+		mt76_wr(dev, MT_MCU_RESET_CTL, 0x300);
+
+	if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 1000)) {
+		dev_err(dev->mt76.dev, "Firmware failed to start\n");
+		err = -ETIMEDOUT;
+		goto out;
+	}
+
+	mt76x02_set_ethtool_fwver(dev, hdr);
+	dev_dbg(dev->mt76.dev, "Firmware running!\n");
+
+out:
+	if (is_combo_chip)
+		mt76_wr(dev, MT_MCU_SEMAPHORE_00, 0x1);
+	release_firmware(fw);
+
+	return err;
+}
+
+int mt76x0e_mcu_init(struct mt76x02_dev *dev)
+{
+	static const struct mt76_mcu_ops mt76x0e_mcu_ops = {
+		.mcu_send_msg = mt76x02_mcu_msg_send,
+	};
+	int err;
+
+	dev->mt76.mcu_ops = &mt76x0e_mcu_ops;
+
+	err = mt76x0e_load_firmware(dev);
+	if (err < 0)
+		return err;
+
+	set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
new file mode 100644
index 0000000..711a352
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -0,0 +1,1215 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "phy.h"
+#include "initvals.h"
+#include "initvals_phy.h"
+#include "../mt76x02_phy.h"
+
+static int
+mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
+{
+	int ret = 0;
+	u8 bank, reg;
+
+	if (test_bit(MT76_REMOVED, &dev->mt76.state))
+		return -ENODEV;
+
+	bank = MT_RF_BANK(offset);
+	reg = MT_RF_REG(offset);
+
+	if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
+		return -EINVAL;
+
+	mutex_lock(&dev->phy_mutex);
+
+	if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	mt76_wr(dev, MT_RF_CSR_CFG,
+		FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
+		FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+		FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+		MT_RF_CSR_CFG_WR |
+		MT_RF_CSR_CFG_KICK);
+
+out:
+	mutex_unlock(&dev->phy_mutex);
+
+	if (ret < 0)
+		dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n",
+			bank, reg, ret);
+
+	return ret;
+}
+
+static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset)
+{
+	int ret = -ETIMEDOUT;
+	u32 val;
+	u8 bank, reg;
+
+	if (test_bit(MT76_REMOVED, &dev->mt76.state))
+		return -ENODEV;
+
+	bank = MT_RF_BANK(offset);
+	reg = MT_RF_REG(offset);
+
+	if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
+		return -EINVAL;
+
+	mutex_lock(&dev->phy_mutex);
+
+	if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+		goto out;
+
+	mt76_wr(dev, MT_RF_CSR_CFG,
+		FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+		FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+		MT_RF_CSR_CFG_KICK);
+
+	if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+		goto out;
+
+	val = mt76_rr(dev, MT_RF_CSR_CFG);
+	if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == reg &&
+	    FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank)
+		ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
+
+out:
+	mutex_unlock(&dev->phy_mutex);
+
+	if (ret < 0)
+		dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n",
+			bank, reg, ret);
+
+	return ret;
+}
+
+static int
+mt76x0_rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
+{
+	if (mt76_is_usb(dev)) {
+		struct mt76_reg_pair pair = {
+			.reg = offset,
+			.value = val,
+		};
+
+		WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
+				       &dev->mt76.state));
+		return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+	} else {
+		return mt76x0_rf_csr_wr(dev, offset, val);
+	}
+}
+
+static int mt76x0_rf_rr(struct mt76x02_dev *dev, u32 offset)
+{
+	int ret;
+	u32 val;
+
+	if (mt76_is_usb(dev)) {
+		struct mt76_reg_pair pair = {
+			.reg = offset,
+		};
+
+		WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
+				       &dev->mt76.state));
+		ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+		val = pair.value;
+	} else {
+		ret = val = mt76x0_rf_csr_rr(dev, offset);
+	}
+
+	return (ret < 0) ? ret : val;
+}
+
+static int
+mt76x0_rf_rmw(struct mt76x02_dev *dev, u32 offset, u8 mask, u8 val)
+{
+	int ret;
+
+	ret = mt76x0_rf_rr(dev, offset);
+	if (ret < 0)
+		return ret;
+
+	val |= ret & ~mask;
+
+	ret = mt76x0_rf_wr(dev, offset, val);
+	return ret ? ret : val;
+}
+
+static int
+mt76x0_rf_set(struct mt76x02_dev *dev, u32 offset, u8 val)
+{
+	return mt76x0_rf_rmw(dev, offset, 0, val);
+}
+
+static int
+mt76x0_rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask)
+{
+	return mt76x0_rf_rmw(dev, offset, mask, 0);
+}
+
+static void
+mt76x0_phy_rf_csr_wr_rp(struct mt76x02_dev *dev,
+			const struct mt76_reg_pair *data,
+			int n)
+{
+	while (n-- > 0) {
+		mt76x0_rf_csr_wr(dev, data->reg, data->value);
+		data++;
+	}
+}
+
+#define RF_RANDOM_WRITE(dev, tab) do {					\
+	if (mt76_is_mmio(dev))						\
+		mt76x0_phy_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab));	\
+	else								\
+		mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));\
+} while (0)
+
+int mt76x0_phy_wait_bbp_ready(struct mt76x02_dev *dev)
+{
+	int i = 20;
+	u32 val;
+
+	do {
+		val = mt76_rr(dev, MT_BBP(CORE, 0));
+		if (val && ~val)
+			break;
+	} while (--i);
+
+	if (!i) {
+		dev_err(dev->mt76.dev, "Error: BBP is not ready\n");
+		return -EIO;
+	}
+
+	dev_dbg(dev->mt76.dev, "BBP version %08x\n", val);
+	return 0;
+}
+
+static void
+mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
+{
+	switch (band) {
+	case NL80211_BAND_2GHZ:
+		RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
+
+		mt76x0_rf_wr(dev, MT_RF(5, 0), 0x45);
+		mt76x0_rf_wr(dev, MT_RF(6, 0), 0x44);
+
+		mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
+		mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
+		break;
+	case NL80211_BAND_5GHZ:
+		RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+
+		mt76x0_rf_wr(dev, MT_RF(5, 0), 0x44);
+		mt76x0_rf_wr(dev, MT_RF(6, 0), 0x45);
+
+		mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
+		mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
+		break;
+	default:
+		break;
+	}
+}
+
+static void
+mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel,
+			      u16 rf_bw_band)
+{
+	const struct mt76x0_freq_item *freq_item;
+	u16 rf_band = rf_bw_band & 0xff00;
+	u16 rf_bw = rf_bw_band & 0x00ff;
+	enum nl80211_band band;
+	bool b_sdm = false;
+	u32 mac_reg;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_sdm_channel); i++) {
+		if (channel == mt76x0_sdm_channel[i]) {
+			b_sdm = true;
+			break;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_frequency_plan); i++) {
+		if (channel == mt76x0_frequency_plan[i].channel) {
+			rf_band = mt76x0_frequency_plan[i].band;
+
+			if (b_sdm)
+				freq_item = &mt76x0_sdm_frequency_plan[i];
+			else
+				freq_item = &mt76x0_frequency_plan[i];
+
+			mt76x0_rf_wr(dev, MT_RF(0, 37), freq_item->pllR37);
+			mt76x0_rf_wr(dev, MT_RF(0, 36), freq_item->pllR36);
+			mt76x0_rf_wr(dev, MT_RF(0, 35), freq_item->pllR35);
+			mt76x0_rf_wr(dev, MT_RF(0, 34), freq_item->pllR34);
+			mt76x0_rf_wr(dev, MT_RF(0, 33), freq_item->pllR33);
+
+			mt76x0_rf_rmw(dev, MT_RF(0, 32), 0xe0,
+				      freq_item->pllR32_b7b5);
+
+			/* R32<4:0> pll_den: (Denomina - 8) */
+			mt76x0_rf_rmw(dev, MT_RF(0, 32), MT_RF_PLL_DEN_MASK,
+				      freq_item->pllR32_b4b0);
+
+			/* R31<7:5> */
+			mt76x0_rf_rmw(dev, MT_RF(0, 31), 0xe0,
+				      freq_item->pllR31_b7b5);
+
+			/* R31<4:0> pll_k(Nominator) */
+			mt76x0_rf_rmw(dev, MT_RF(0, 31), MT_RF_PLL_K_MASK,
+				      freq_item->pllR31_b4b0);
+
+			/* R30<7> sdm_reset_n */
+			if (b_sdm) {
+				mt76x0_rf_clear(dev, MT_RF(0, 30),
+						MT_RF_SDM_RESET_MASK);
+				mt76x0_rf_set(dev, MT_RF(0, 30),
+					      MT_RF_SDM_RESET_MASK);
+			} else {
+				mt76x0_rf_rmw(dev, MT_RF(0, 30),
+					      MT_RF_SDM_RESET_MASK,
+					      freq_item->pllR30_b7);
+			}
+
+			/* R30<6:2> sdmmash_prbs,sin */
+			mt76x0_rf_rmw(dev, MT_RF(0, 30),
+				      MT_RF_SDM_MASH_PRBS_MASK,
+				      freq_item->pllR30_b6b2);
+
+			/* R30<1> sdm_bp */
+			mt76x0_rf_rmw(dev, MT_RF(0, 30), MT_RF_SDM_BP_MASK,
+				      freq_item->pllR30_b1 << 1);
+
+			/* R30<0> R29<7:0> (hex) pll_n */
+			mt76x0_rf_wr(dev, MT_RF(0, 29),
+				     freq_item->pll_n & 0xff);
+
+			mt76x0_rf_rmw(dev, MT_RF(0, 30), 0x1,
+				      (freq_item->pll_n >> 8) & 0x1);
+
+			/* R28<7:6> isi_iso */
+			mt76x0_rf_rmw(dev, MT_RF(0, 28), MT_RF_ISI_ISO_MASK,
+				      freq_item->pllR28_b7b6);
+
+			/* R28<5:4> pfd_dly */
+			mt76x0_rf_rmw(dev, MT_RF(0, 28), MT_RF_PFD_DLY_MASK,
+				      freq_item->pllR28_b5b4);
+
+			/* R28<3:2> clksel option */
+			mt76x0_rf_rmw(dev, MT_RF(0, 28), MT_RF_CLK_SEL_MASK,
+				      freq_item->pllR28_b3b2);
+
+			/* R28<1:0> R27<7:0> R26<7:0> (hex) sdm_k */
+			mt76x0_rf_wr(dev, MT_RF(0, 26),
+				     freq_item->pll_sdm_k & 0xff);
+			mt76x0_rf_wr(dev, MT_RF(0, 27),
+				     (freq_item->pll_sdm_k >> 8) & 0xff);
+
+			mt76x0_rf_rmw(dev, MT_RF(0, 28), 0x3,
+				      (freq_item->pll_sdm_k >> 16) & 0x3);
+
+			/* R24<1:0> xo_div */
+			mt76x0_rf_rmw(dev, MT_RF(0, 24), MT_RF_XO_DIV_MASK,
+				      freq_item->pllR24_b1b0);
+
+			break;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+		if (rf_bw == mt76x0_rf_bw_switch_tab[i].bw_band) {
+			mt76x0_rf_wr(dev,
+				     mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+				     mt76x0_rf_bw_switch_tab[i].value);
+		} else if ((rf_bw == (mt76x0_rf_bw_switch_tab[i].bw_band & 0xFF)) &&
+			   (rf_band & mt76x0_rf_bw_switch_tab[i].bw_band)) {
+			mt76x0_rf_wr(dev,
+				     mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+				     mt76x0_rf_bw_switch_tab[i].value);
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+		if (mt76x0_rf_band_switch_tab[i].bw_band & rf_band) {
+			mt76x0_rf_wr(dev,
+				     mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+				     mt76x0_rf_band_switch_tab[i].value);
+		}
+	}
+
+	mt76_clear(dev, MT_RF_MISC, 0xc);
+
+	band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+	if (mt76x02_ext_pa_enabled(dev, band)) {
+		/* MT_RF_MISC (offset: 0x0518)
+		 * [2]1'b1: enable external A band PA
+		 *    1'b0: disable external A band PA
+		 * [3]1'b1: enable external G band PA
+		 *    1'b0: disable external G band PA
+		 */
+		if (rf_band & RF_A_BAND)
+			mt76_set(dev, MT_RF_MISC, BIT(2));
+		else
+			mt76_set(dev, MT_RF_MISC, BIT(3));
+
+		/* External PA */
+		for (i = 0; i < ARRAY_SIZE(mt76x0_rf_ext_pa_tab); i++)
+			if (mt76x0_rf_ext_pa_tab[i].bw_band & rf_band)
+				mt76x0_rf_wr(dev,
+					mt76x0_rf_ext_pa_tab[i].rf_bank_reg,
+					mt76x0_rf_ext_pa_tab[i].value);
+	}
+
+	if (rf_band & RF_G_BAND) {
+		mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x63707400);
+		/* Set Atten mode = 2 For G band, Disable Tx Inc dcoc. */
+		mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+		mac_reg &= 0x896400FF;
+		mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+	} else {
+		mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x686A7800);
+		/* Set Atten mode = 0
+		 * For Ext A band, Disable Tx Inc dcoc Cal.
+		 */
+		mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+		mac_reg &= 0x890400FF;
+		mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+	}
+}
+
+static void
+mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+		const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+		const struct mt76_reg_pair *pair = &item->reg_pair;
+
+		if ((rf_bw_band & item->bw_band) != rf_bw_band)
+			continue;
+
+		if (pair->reg == MT_BBP(AGC, 8)) {
+			u32 val = pair->value;
+			u8 gain;
+
+			gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
+			gain -= dev->cal.rx.lna_gain * 2;
+			val &= ~MT_BBP_AGC_GAIN;
+			val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain);
+			mt76_wr(dev, pair->reg, val);
+		} else {
+			mt76_wr(dev, pair->reg, pair->value);
+		}
+	}
+}
+
+static void mt76x0_phy_ant_select(struct mt76x02_dev *dev)
+{
+	u16 ee_ant = mt76x02_eeprom_get(dev, MT_EE_ANTENNA);
+	u16 ee_cfg1 = mt76x02_eeprom_get(dev, MT_EE_CFG1_INIT);
+	u16 nic_conf2 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
+	u32 wlan, coex3;
+	bool ant_div;
+
+	wlan = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+	coex3 = mt76_rr(dev, MT_COEXCFG3);
+
+	ee_ant &= ~(BIT(14) | BIT(12));
+	wlan  &= ~(BIT(6) | BIT(5));
+	coex3 &= ~GENMASK(5, 2);
+
+	if (ee_ant & MT_EE_ANTENNA_DUAL) {
+		/* dual antenna mode */
+		ant_div = !(nic_conf2 & MT_EE_NIC_CONF_2_ANT_OPT) &&
+			  (nic_conf2 & MT_EE_NIC_CONF_2_ANT_DIV);
+		if (ant_div)
+			ee_ant |= BIT(12);
+		else
+			coex3 |= BIT(4);
+		coex3 |= BIT(3);
+		if (dev->mt76.cap.has_2ghz)
+			wlan |= BIT(6);
+	} else {
+		/* sigle antenna mode */
+		if (dev->mt76.cap.has_5ghz) {
+			coex3 |= BIT(3) | BIT(4);
+		} else {
+			wlan |= BIT(6);
+			coex3 |= BIT(1);
+		}
+	}
+
+	if (is_mt7630(dev))
+		ee_ant |= BIT(14) | BIT(11);
+
+	mt76_wr(dev, MT_WLAN_FUN_CTRL, wlan);
+	mt76_rmw(dev, MT_CMB_CTRL, GENMASK(15, 0), ee_ant);
+	mt76_rmw(dev, MT_CSR_EE_CFG1, GENMASK(15, 0), ee_cfg1);
+	mt76_clear(dev, MT_COEXCFG0, BIT(2));
+	mt76_wr(dev, MT_COEXCFG3, coex3);
+}
+
+static void
+mt76x0_phy_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
+{
+	enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4};
+	int bw;
+
+	switch (width) {
+	default:
+	case NL80211_CHAN_WIDTH_20_NOHT:
+	case NL80211_CHAN_WIDTH_20:
+		bw = BW_20;
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		bw = BW_40;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		bw = BW_80;
+		break;
+	case NL80211_CHAN_WIDTH_10:
+		bw = BW_10;
+		break;
+	case NL80211_CHAN_WIDTH_80P80:
+	case NL80211_CHAN_WIDTH_160:
+	case NL80211_CHAN_WIDTH_5:
+		/* TODO error */
+		return;
+	}
+
+	mt76x02_mcu_function_select(dev, BW_SETTING, bw);
+}
+
+static void mt76x0_phy_tssi_dc_calibrate(struct mt76x02_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	u32 val;
+
+	if (chan->band == NL80211_BAND_5GHZ)
+		mt76x0_rf_clear(dev, MT_RF(0, 67), 0xf);
+
+	/* bypass ADDA control */
+	mt76_wr(dev, MT_RF_SETTING_0, 0x60002237);
+	mt76_wr(dev, MT_RF_BYPASS_0, 0xffffffff);
+
+	/* bbp sw reset */
+	mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+	usleep_range(500, 1000);
+	mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+
+	val = (chan->band == NL80211_BAND_5GHZ) ? 0x80055 : 0x80050;
+	mt76_wr(dev, MT_BBP(CORE, 34), val);
+
+	/* enable TX with DAC0 input */
+	mt76_wr(dev, MT_BBP(TXBE, 6), BIT(31));
+
+	mt76_poll_msec(dev, MT_BBP(CORE, 34), BIT(4), 0, 200);
+	dev->cal.tssi_dc = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+
+	/* stop bypass ADDA */
+	mt76_wr(dev, MT_RF_BYPASS_0, 0);
+	/* stop TX */
+	mt76_wr(dev, MT_BBP(TXBE, 6), 0);
+	/* bbp sw reset */
+	mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+	usleep_range(500, 1000);
+	mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+
+	if (chan->band == NL80211_BAND_5GHZ)
+		mt76x0_rf_rmw(dev, MT_RF(0, 67), 0xf, 0x4);
+}
+
+static int
+mt76x0_phy_tssi_adc_calibrate(struct mt76x02_dev *dev, s16 *ltssi,
+			      u8 *info)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	u32 val;
+
+	val = (chan->band == NL80211_BAND_5GHZ) ? 0x80055 : 0x80050;
+	mt76_wr(dev, MT_BBP(CORE, 34), val);
+
+	if (!mt76_poll_msec(dev, MT_BBP(CORE, 34), BIT(4), 0, 200)) {
+		mt76_clear(dev, MT_BBP(CORE, 34), BIT(4));
+		return -ETIMEDOUT;
+	}
+
+	*ltssi = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+	if (chan->band == NL80211_BAND_5GHZ)
+		*ltssi += 128;
+
+	/* set packet info#1 mode */
+	mt76_wr(dev, MT_BBP(CORE, 34), 0x80041);
+	info[0] = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+
+	/* set packet info#2 mode */
+	mt76_wr(dev, MT_BBP(CORE, 34), 0x80042);
+	info[1] = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+
+	/* set packet info#3 mode */
+	mt76_wr(dev, MT_BBP(CORE, 34), 0x80043);
+	info[2] = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+
+	return 0;
+}
+
+static u8 mt76x0_phy_get_rf_pa_mode(struct mt76x02_dev *dev,
+				    int index, u8 tx_rate)
+{
+	u32 val, reg;
+
+	reg = (index == 1) ? MT_RF_PA_MODE_CFG1 : MT_RF_PA_MODE_CFG0;
+	val = mt76_rr(dev, reg);
+	return (val & (3 << (tx_rate * 2))) >> (tx_rate * 2);
+}
+
+static int
+mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
+			    u8 *info, s8 *target_power,
+			    s8 *target_pa_power)
+{
+	u8 tx_rate, cur_power;
+
+	cur_power = mt76_rr(dev, MT_TX_ALC_CFG_0) & MT_TX_ALC_CFG_0_CH_INIT_0;
+	switch (tx_mode) {
+	case 0:
+		/* cck rates */
+		tx_rate = (info[0] & 0x60) >> 5;
+		if (tx_rate > 3)
+			return -EINVAL;
+
+		*target_power = cur_power + dev->mt76.rate_power.cck[tx_rate];
+		*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 0, tx_rate);
+		break;
+	case 1: {
+		u8 index;
+
+		/* ofdm rates */
+		tx_rate = (info[0] & 0xf0) >> 4;
+		switch (tx_rate) {
+		case 0xb:
+			index = 0;
+			break;
+		case 0xf:
+			index = 1;
+			break;
+		case 0xa:
+			index = 2;
+			break;
+		case 0xe:
+			index = 3;
+			break;
+		case 0x9:
+			index = 4;
+			break;
+		case 0xd:
+			index = 5;
+			break;
+		case 0x8:
+			index = 6;
+			break;
+		case 0xc:
+			index = 7;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		*target_power = cur_power + dev->mt76.rate_power.ofdm[index];
+		*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 0, index + 4);
+		break;
+	}
+	case 4:
+		/* vht rates */
+		tx_rate = info[1] & 0xf;
+		if (tx_rate > 9)
+			return -EINVAL;
+
+		*target_power = cur_power + dev->mt76.rate_power.vht[tx_rate];
+		*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate);
+		break;
+	default:
+		/* ht rates */
+		tx_rate = info[1] & 0x7f;
+		if (tx_rate > 9)
+			return -EINVAL;
+
+		*target_power = cur_power + dev->mt76.rate_power.ht[tx_rate];
+		*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate);
+		break;
+	}
+
+	return 0;
+}
+
+static s16 mt76x0_phy_lin2db(u16 val)
+{
+	u32 mantissa = val << 4;
+	int ret, data;
+	s16 exp = -4;
+
+	while (mantissa < BIT(15)) {
+		mantissa <<= 1;
+		if (--exp < -20)
+			return -10000;
+	}
+	while (mantissa > 0xffff) {
+		mantissa >>= 1;
+		if (++exp > 20)
+			return -10000;
+	}
+
+	/* s(15,0) */
+	if (mantissa <= 47104)
+		data = mantissa + (mantissa >> 3) + (mantissa >> 4) - 38400;
+	else
+		data = mantissa - (mantissa >> 3) - (mantissa >> 6) - 23040;
+	data = max_t(int, 0, data);
+
+	ret = ((15 + exp) << 15) + data;
+	ret = (ret << 2) + (ret << 1) + (ret >> 6) + (ret >> 7);
+	return ret >> 10;
+}
+
+static int
+mt76x0_phy_get_delta_power(struct mt76x02_dev *dev, u8 tx_mode,
+			   s8 target_power, s8 target_pa_power,
+			   s16 ltssi)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	int tssi_target = target_power << 12, tssi_slope;
+	int tssi_offset, tssi_db, ret;
+	u32 data;
+	u16 val;
+
+	if (chan->band == NL80211_BAND_5GHZ) {
+		u8 bound[7];
+		int i, err;
+
+		err = mt76x02_eeprom_copy(dev, MT_EE_TSSI_BOUND1, bound,
+					  sizeof(bound));
+		if (err < 0)
+			return err;
+
+		for (i = 0; i < ARRAY_SIZE(bound); i++) {
+			if (chan->hw_value <= bound[i] || !bound[i])
+				break;
+		}
+		val = mt76x02_eeprom_get(dev, MT_EE_TSSI_SLOPE_5G + i * 2);
+
+		tssi_offset = val >> 8;
+		if ((tssi_offset >= 64 && tssi_offset <= 127) ||
+		    (tssi_offset & BIT(7)))
+			tssi_offset -= BIT(8);
+	} else {
+		val = mt76x02_eeprom_get(dev, MT_EE_TSSI_SLOPE_2G);
+
+		tssi_offset = val >> 8;
+		if (tssi_offset & BIT(7))
+			tssi_offset -= BIT(8);
+	}
+	tssi_slope = val & 0xff;
+
+	switch (target_pa_power) {
+	case 1:
+		if (chan->band == NL80211_BAND_2GHZ)
+			tssi_target += 29491; /* 3.6 * 8192 */
+		/* fall through */
+	case 0:
+		break;
+	default:
+		tssi_target += 4424; /* 0.54 * 8192 */
+		break;
+	}
+
+	if (!tx_mode) {
+		data = mt76_rr(dev, MT_BBP(CORE, 1));
+		if (is_mt7630(dev) && mt76_is_mmio(dev)) {
+			int offset;
+
+			/* 2.3 * 8192 or 1.5 * 8192 */
+			offset = (data & BIT(5)) ? 18841 : 12288;
+			tssi_target += offset;
+		} else if (data & BIT(5)) {
+			/* 0.8 * 8192 */
+			tssi_target += 6554;
+		}
+	}
+
+	data = mt76_rr(dev, MT_BBP(TXBE, 4));
+	switch (data & 0x3) {
+	case 1:
+		tssi_target -= 49152; /* -6db * 8192 */
+		break;
+	case 2:
+		tssi_target -= 98304; /* -12db * 8192 */
+		break;
+	case 3:
+		tssi_target += 49152; /* 6db * 8192 */
+		break;
+	default:
+		break;
+	}
+
+	tssi_db = mt76x0_phy_lin2db(ltssi - dev->cal.tssi_dc) * tssi_slope;
+	if (chan->band == NL80211_BAND_5GHZ) {
+		tssi_db += ((tssi_offset - 50) << 10); /* offset s4.3 */
+		tssi_target -= tssi_db;
+		if (ltssi > 254 && tssi_target > 0) {
+			/* upper saturate */
+			tssi_target = 0;
+		}
+	} else {
+		tssi_db += (tssi_offset << 9); /* offset s3.4 */
+		tssi_target -= tssi_db;
+		/* upper-lower saturate */
+		if ((ltssi > 126 && tssi_target > 0) ||
+		    ((ltssi - dev->cal.tssi_dc) < 1 && tssi_target < 0)) {
+			tssi_target = 0;
+		}
+	}
+
+	if ((dev->cal.tssi_target ^ tssi_target) < 0 &&
+	    dev->cal.tssi_target > -4096 && dev->cal.tssi_target < 4096 &&
+	    tssi_target > -4096 && tssi_target < 4096) {
+		if ((tssi_target < 0 &&
+		     tssi_target + dev->cal.tssi_target > 0) ||
+		    (tssi_target > 0 &&
+		     tssi_target + dev->cal.tssi_target <= 0))
+			tssi_target = 0;
+		else
+			dev->cal.tssi_target = tssi_target;
+	} else {
+		dev->cal.tssi_target = tssi_target;
+	}
+
+	/* make the compensate value to the nearest compensate code */
+	if (tssi_target > 0)
+		tssi_target += 2048;
+	else
+		tssi_target -= 2048;
+	tssi_target >>= 12;
+
+	ret = mt76_get_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP);
+	if (ret & BIT(5))
+		ret -= BIT(6);
+	ret += tssi_target;
+
+	ret = min_t(int, 31, ret);
+	return max_t(int, -32, ret);
+}
+
+static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev)
+{
+	s8 target_power, target_pa_power;
+	u8 tssi_info[3], tx_mode;
+	s16 ltssi;
+	s8 val;
+
+	if (mt76x0_phy_tssi_adc_calibrate(dev, &ltssi, tssi_info) < 0)
+		return;
+
+	tx_mode = tssi_info[0] & 0x7;
+	if (mt76x0_phy_get_target_power(dev, tx_mode, tssi_info,
+					&target_power, &target_pa_power) < 0)
+		return;
+
+	val = mt76x0_phy_get_delta_power(dev, tx_mode, target_power,
+					 target_pa_power, ltssi);
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, val);
+}
+
+void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
+{
+	struct mt76_rate_power *t = &dev->mt76.rate_power;
+	s8 info;
+
+	mt76x0_get_tx_power_per_rate(dev, dev->mt76.chandef.chan, t);
+	mt76x0_get_power_info(dev, dev->mt76.chandef.chan, &info);
+
+	mt76x02_add_rate_power_offset(t, info);
+	mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
+	dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
+	mt76x02_add_rate_power_offset(t, -info);
+
+	dev->target_power = info;
+	mt76x02_phy_set_txpower(dev, info, info);
+}
+
+void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	int is_5ghz = (chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
+	u32 val, tx_alc, reg_val;
+
+	if (is_mt7630(dev))
+		return;
+
+	if (power_on) {
+		mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0);
+		mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value);
+		usleep_range(10, 20);
+
+		if (mt76x0_tssi_enabled(dev)) {
+			mt76_wr(dev, MT_MAC_SYS_CTRL,
+				MT_MAC_SYS_CTRL_ENABLE_RX);
+			mt76x0_phy_tssi_dc_calibrate(dev);
+			mt76_wr(dev, MT_MAC_SYS_CTRL,
+				MT_MAC_SYS_CTRL_ENABLE_TX |
+				MT_MAC_SYS_CTRL_ENABLE_RX);
+		}
+	}
+
+	tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
+	mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
+	usleep_range(500, 700);
+
+	reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
+	mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
+
+	if (is_5ghz) {
+		if (chan->hw_value < 100)
+			val = 0x701;
+		else if (chan->hw_value < 140)
+			val = 0x801;
+		else
+			val = 0x901;
+	} else {
+		val = 0x600;
+	}
+
+	mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val);
+	msleep(350);
+	mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz);
+	usleep_range(15000, 20000);
+
+	mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
+	mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
+	mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+}
+EXPORT_SYMBOL_GPL(mt76x0_phy_calibrate);
+
+void mt76x0_phy_set_channel(struct mt76x02_dev *dev,
+			    struct cfg80211_chan_def *chandef)
+{
+	u32 ext_cca_chan[4] = {
+		[0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+		[1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+		[2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+		[3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+	};
+	bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+	int ch_group_index, freq, freq1;
+	u8 channel;
+	u32 val;
+	u16 rf_bw_band;
+
+	freq = chandef->chan->center_freq;
+	freq1 = chandef->center_freq1;
+	channel = chandef->chan->hw_value;
+	rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND;
+
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_40:
+		if (freq1 > freq)
+			ch_group_index = 0;
+		else
+			ch_group_index = 1;
+		channel += 2 - ch_group_index * 4;
+		rf_bw_band |= RF_BW_40;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		ch_group_index = (freq - freq1 + 30) / 20;
+		if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+			ch_group_index = 0;
+		channel += 6 - ch_group_index * 4;
+		rf_bw_band |= RF_BW_80;
+		break;
+	default:
+		ch_group_index = 0;
+		rf_bw_band |= RF_BW_20;
+		break;
+	}
+
+	if (mt76_is_usb(dev)) {
+		mt76x0_phy_bbp_set_bw(dev, chandef->width);
+	} else {
+		if (chandef->width == NL80211_CHAN_WIDTH_80 ||
+		    chandef->width == NL80211_CHAN_WIDTH_40)
+			val = 0x201;
+		else
+			val = 0x601;
+		mt76_wr(dev, MT_TX_SW_CFG0, val);
+	}
+	mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
+	mt76x02_phy_set_band(dev, chandef->chan->band,
+			     ch_group_index & 1);
+
+	mt76_rmw(dev, MT_EXT_CCA_CFG,
+		 (MT_EXT_CCA_CFG_CCA0 |
+		  MT_EXT_CCA_CFG_CCA1 |
+		  MT_EXT_CCA_CFG_CCA2 |
+		  MT_EXT_CCA_CFG_CCA3 |
+		  MT_EXT_CCA_CFG_CCA_MASK),
+		 ext_cca_chan[ch_group_index]);
+
+	mt76x0_phy_set_band(dev, chandef->chan->band);
+	mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
+
+	/* set Japan Tx filter at channel 14 */
+	if (channel == 14)
+		mt76_set(dev, MT_BBP(CORE, 1), 0x20);
+	else
+		mt76_clear(dev, MT_BBP(CORE, 1), 0x20);
+
+	mt76x0_read_rx_gain(dev);
+	mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band);
+
+	/* enable vco */
+	mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7));
+	if (scan)
+		return;
+
+	mt76x02_init_agc_gain(dev);
+	mt76x0_phy_calibrate(dev, false);
+	mt76x0_phy_set_txpower(dev);
+
+	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+}
+
+static void mt76x0_phy_temp_sensor(struct mt76x02_dev *dev)
+{
+	u8 rf_b7_73, rf_b0_66, rf_b0_67;
+	s8 val;
+
+	rf_b7_73 = mt76x0_rf_rr(dev, MT_RF(7, 73));
+	rf_b0_66 = mt76x0_rf_rr(dev, MT_RF(0, 66));
+	rf_b0_67 = mt76x0_rf_rr(dev, MT_RF(0, 67));
+
+	mt76x0_rf_wr(dev, MT_RF(7, 73), 0x02);
+	mt76x0_rf_wr(dev, MT_RF(0, 66), 0x23);
+	mt76x0_rf_wr(dev, MT_RF(0, 67), 0x01);
+
+	mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
+	if (!mt76_poll_msec(dev, MT_BBP(CORE, 34), BIT(4), 0, 200)) {
+		mt76_clear(dev, MT_BBP(CORE, 34), BIT(4));
+		goto done;
+	}
+
+	val = mt76_rr(dev, MT_BBP(CORE, 35));
+	val = (35 * (val - dev->cal.rx.temp_offset)) / 10 + 25;
+
+	if (abs(val - dev->cal.temp_vco) > 20) {
+		mt76x02_mcu_calibrate(dev, MCU_CAL_VCO,
+				      dev->mt76.chandef.chan->hw_value);
+		dev->cal.temp_vco = val;
+	}
+	if (abs(val - dev->cal.temp) > 30) {
+		mt76x0_phy_calibrate(dev, false);
+		dev->cal.temp = val;
+	}
+
+done:
+	mt76x0_rf_wr(dev, MT_RF(7, 73), rf_b7_73);
+	mt76x0_rf_wr(dev, MT_RF(0, 66), rf_b0_66);
+	mt76x0_rf_wr(dev, MT_RF(0, 67), rf_b0_67);
+}
+
+static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
+{
+	u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+
+	mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, gain);
+
+	if ((dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR) &&
+	    !is_mt7630(dev))
+		mt76x02_phy_dfs_adjust_agc(dev);
+}
+
+static void
+mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
+{
+	bool gain_change;
+	u8 gain_delta;
+	int low_gain;
+
+	dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76);
+	if (!dev->cal.avg_rssi_all)
+		dev->cal.avg_rssi_all = -75;
+
+	low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
+		(dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
+
+	gain_change = dev->cal.low_gain < 0 ||
+		      (dev->cal.low_gain & 2) ^ (low_gain & 2);
+	dev->cal.low_gain = low_gain;
+
+	if (!gain_change) {
+		if (mt76x02_phy_adjust_vga_gain(dev))
+			mt76x0_phy_set_gain_val(dev);
+		return;
+	}
+
+	dev->cal.agc_gain_adjust = (low_gain == 2) ? 0 : 10;
+	gain_delta = (low_gain == 2) ? 10 : 0;
+
+	dev->cal.agc_gain_cur[0] = dev->cal.agc_gain_init[0] - gain_delta;
+	mt76x0_phy_set_gain_val(dev);
+
+	/* clear false CCA counters */
+	mt76_rr(dev, MT_RX_STAT_1);
+}
+
+static void mt76x0_phy_calibration_work(struct work_struct *work)
+{
+	struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+					       cal_work.work);
+
+	mt76x0_phy_update_channel_gain(dev);
+	if (mt76x0_tssi_enabled(dev))
+		mt76x0_phy_tssi_calibrate(dev);
+	else
+		mt76x0_phy_temp_sensor(dev);
+
+	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+				     4 * MT_CALIBRATE_INTERVAL);
+}
+
+static void mt76x0_rf_patch_reg_array(struct mt76x02_dev *dev,
+				      const struct mt76_reg_pair *rp, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		u32 reg = rp[i].reg;
+		u8 val = rp[i].value;
+
+		switch (reg) {
+		case MT_RF(0, 3):
+			if (mt76_is_mmio(dev)) {
+				if (is_mt7630(dev))
+					val = 0x70;
+				else
+					val = 0x63;
+			} else {
+				val = 0x73;
+			}
+			break;
+		case MT_RF(0, 21):
+			if (is_mt7610e(dev))
+				val = 0x10;
+			else
+				val = 0x12;
+			break;
+		case MT_RF(5, 2):
+			if (is_mt7630(dev))
+				val = 0x1d;
+			else if (is_mt7610e(dev))
+				val = 0x00;
+			else
+				val = 0x0c;
+			break;
+		default:
+			break;
+		}
+		mt76x0_rf_wr(dev, reg, val);
+	}
+}
+
+static void mt76x0_phy_rf_init(struct mt76x02_dev *dev)
+{
+	int i;
+	u8 val;
+
+	mt76x0_rf_patch_reg_array(dev, mt76x0_rf_central_tab,
+				  ARRAY_SIZE(mt76x0_rf_central_tab));
+	mt76x0_rf_patch_reg_array(dev, mt76x0_rf_2g_channel_0_tab,
+				  ARRAY_SIZE(mt76x0_rf_2g_channel_0_tab));
+	RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+	RF_RANDOM_WRITE(dev, mt76x0_rf_vga_channel_0_tab);
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+		const struct mt76x0_rf_switch_item *item = &mt76x0_rf_bw_switch_tab[i];
+
+		if (item->bw_band == RF_BW_20)
+			mt76x0_rf_wr(dev, item->rf_bank_reg, item->value);
+		else if (((RF_G_BAND | RF_BW_20) & item->bw_band) ==
+			  (RF_G_BAND | RF_BW_20))
+			mt76x0_rf_wr(dev, item->rf_bank_reg, item->value);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+		if (mt76x0_rf_band_switch_tab[i].bw_band & RF_G_BAND) {
+			mt76x0_rf_wr(dev,
+				     mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+				     mt76x0_rf_band_switch_tab[i].value);
+		}
+	}
+
+	/* Frequency calibration
+	 * E1: B0.R22<6:0>: xo_cxo<6:0>
+	 * E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1>
+	 */
+	mt76x0_rf_wr(dev, MT_RF(0, 22),
+		     min_t(u8, dev->cal.rx.freq_offset, 0xbf));
+	val = mt76x0_rf_rr(dev, MT_RF(0, 22));
+
+	/* Reset procedure DAC during power-up:
+	 * - set B0.R73<7>
+	 * - clear B0.R73<7>
+	 * - set B0.R73<7>
+	 */
+	mt76x0_rf_set(dev, MT_RF(0, 73), BIT(7));
+	mt76x0_rf_clear(dev, MT_RF(0, 73), BIT(7));
+	mt76x0_rf_set(dev, MT_RF(0, 73), BIT(7));
+
+	/* vcocal_en: initiate VCO calibration (reset after completion)) */
+	mt76x0_rf_set(dev, MT_RF(0, 4), 0x80);
+}
+
+void mt76x0_phy_init(struct mt76x02_dev *dev)
+{
+	INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work);
+
+	mt76x0_phy_ant_select(dev);
+	mt76x0_phy_rf_init(dev);
+	mt76x02_phy_set_rxpath(dev);
+	mt76x02_phy_set_txdac(dev);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
new file mode 100644
index 0000000..441d655
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+#ifndef _MT76X0_PHY_H_
+#define _MT76X0_PHY_H_
+
+#define RF_G_BAND	0x0100
+#define RF_A_BAND	0x0200
+#define RF_A_BAND_LB	0x0400
+#define RF_A_BAND_MB	0x0800
+#define RF_A_BAND_HB	0x1000
+#define RF_A_BAND_11J	0x2000
+
+#define RF_BW_20        1
+#define RF_BW_40        2
+#define RF_BW_10        4
+#define RF_BW_80        8
+
+#define MT_RF(bank, reg)		((bank) << 16 | (reg))
+#define MT_RF_BANK(offset)		((offset) >> 16)
+#define MT_RF_REG(offset)		((offset) & 0xff)
+
+#define MT_RF_VCO_BP_CLOSE_LOOP		BIT(3)
+#define MT_RF_VCO_BP_CLOSE_LOOP_MASK	GENMASK(3, 0)
+#define MT_RF_VCO_CAL_MASK		GENMASK(2, 0)
+#define MT_RF_START_TIME		0x3
+#define MT_RF_START_TIME_MASK		GENMASK(2, 0)
+#define MT_RF_SETTLE_TIME_MASK		GENMASK(6, 4)
+
+#define MT_RF_PLL_DEN_MASK		GENMASK(4, 0)
+#define MT_RF_PLL_K_MASK		GENMASK(4, 0)
+#define MT_RF_SDM_RESET_MASK		BIT(7)
+#define MT_RF_SDM_MASH_PRBS_MASK	GENMASK(6, 2)
+#define MT_RF_SDM_BP_MASK		BIT(1)
+#define MT_RF_ISI_ISO_MASK		GENMASK(7, 6)
+#define MT_RF_PFD_DLY_MASK		GENMASK(5, 4)
+#define MT_RF_CLK_SEL_MASK		GENMASK(3, 2)
+#define MT_RF_XO_DIV_MASK		GENMASK(1, 0)
+
+struct mt76x0_bbp_switch_item {
+	u16 bw_band;
+	struct mt76_reg_pair reg_pair;
+};
+
+struct mt76x0_rf_switch_item {
+	u32 rf_bank_reg;
+	u16 bw_band;
+	u8 value;
+};
+
+struct mt76x0_freq_item {
+	u8 channel;
+	u32 band;
+	u8 pllR37;
+	u8 pllR36;
+	u8 pllR35;
+	u8 pllR34;
+	u8 pllR33;
+	u8 pllR32_b7b5;
+	u8 pllR32_b4b0; /* PLL_DEN (Denomina - 8) */
+	u8 pllR31_b7b5;
+	u8 pllR31_b4b0; /* PLL_K (Nominator *)*/
+	u8 pllR30_b7;	/* sdm_reset_n */
+	u8 pllR30_b6b2; /* sdmmash_prbs,sin */
+	u8 pllR30_b1;	/* sdm_bp */
+	u16 pll_n;	/* R30<0>, R29<7:0> (hex) */
+	u8 pllR28_b7b6; /* isi,iso */
+	u8 pllR28_b5b4;	/* pfd_dly */
+	u8 pllR28_b3b2;	/* clksel option */
+	u32 pll_sdm_k;	/* R28<1:0>, R27<7:0>, R26<7:0> (hex) SDM_k */
+	u8 pllR24_b1b0;	/* xo_div */
+};
+
+struct mt76x0_rate_pwr_item {
+	s8 mcs_power;
+	u8 rf_pa_mode;
+};
+
+struct mt76x0_rate_pwr_tab {
+	struct mt76x0_rate_pwr_item cck[4];
+	struct mt76x0_rate_pwr_item ofdm[8];
+	struct mt76x0_rate_pwr_item ht[8];
+	struct mt76x0_rate_pwr_item vht[10];
+	struct mt76x0_rate_pwr_item stbc[8];
+	struct mt76x0_rate_pwr_item mcs32;
+};
+
+#endif /* _MT76X0_PHY_H_ */
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
new file mode 100644
index 0000000..65d404e
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+#include "../mt76x02_usb.h"
+
+static struct usb_device_id mt76x0_device_table[] = {
+	{ USB_DEVICE(0x148F, 0x7610) },	/* MT7610U */
+	{ USB_DEVICE(0x13B1, 0x003E) },	/* Linksys AE6000 */
+	{ USB_DEVICE(0x0E8D, 0x7610) },	/* Sabrent NTWLAC */
+	{ USB_DEVICE(0x7392, 0xa711) },	/* Edimax 7711mac */
+	{ USB_DEVICE(0x7392, 0xb711) },	/* Edimax / Elecom  */
+	{ USB_DEVICE(0x148f, 0x761a) },	/* TP-Link TL-WDN5200 */
+	{ USB_DEVICE(0x148f, 0x760a) },	/* TP-Link unknown */
+	{ USB_DEVICE(0x0b05, 0x17d1) },	/* Asus USB-AC51 */
+	{ USB_DEVICE(0x0b05, 0x17db) },	/* Asus USB-AC50 */
+	{ USB_DEVICE(0x0df6, 0x0075) },	/* Sitecom WLA-3100 */
+	{ USB_DEVICE(0x2019, 0xab31) },	/* Planex GW-450D */
+	{ USB_DEVICE(0x2001, 0x3d02) },	/* D-LINK DWA-171 rev B1 */
+	{ USB_DEVICE(0x0586, 0x3425) },	/* Zyxel NWD6505 */
+	{ USB_DEVICE(0x07b8, 0x7610) },	/* AboCom AU7212 */
+	{ USB_DEVICE(0x04bb, 0x0951) },	/* I-O DATA WN-AC433UK */
+	{ USB_DEVICE(0x057c, 0x8502) },	/* AVM FRITZ!WLAN USB Stick AC 430 */
+	{ USB_DEVICE(0x293c, 0x5702) },	/* Comcast Xfinity KXW02AAA  */
+	{ USB_DEVICE(0x20f4, 0x806b) },	/* TRENDnet TEW-806UBH  */
+	{ USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */
+	{ USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac  Stick */
+	{ USB_DEVICE(0x2357, 0x0123) }, /* TP-LINK T2UHP */
+	/* TP-LINK Archer T1U */
+	{ USB_DEVICE(0x2357, 0x0105), .driver_info = 1, },
+	/* MT7630U */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)},
+	/* MT7650U */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)},
+	{ 0, }
+};
+
+static void mt76x0_init_usb_dma(struct mt76x02_dev *dev)
+{
+	u32 val;
+
+	val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+	val |= MT_USB_DMA_CFG_RX_BULK_EN |
+	       MT_USB_DMA_CFG_TX_BULK_EN;
+
+	/* disable AGGR_BULK_RX in order to receive one
+	 * frame in each rx urb and avoid copies
+	 */
+	val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+	mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+	val = mt76_rr(dev, MT_COM_REG0);
+	if (val & 1)
+		dev_dbg(dev->mt76.dev, "MCU not ready\n");
+
+	val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+	val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD;
+	mt76_wr(dev, MT_USB_DMA_CFG, val);
+	val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PAD;
+	mt76_wr(dev, MT_USB_DMA_CFG, val);
+}
+
+static void mt76x0u_cleanup(struct mt76x02_dev *dev)
+{
+	clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+	mt76x0_chip_onoff(dev, false, false);
+	mt76u_queues_deinit(&dev->mt76);
+}
+
+static void mt76x0u_stop(struct ieee80211_hw *hw)
+{
+	struct mt76x02_dev *dev = hw->priv;
+
+	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+	cancel_delayed_work_sync(&dev->cal_work);
+	cancel_delayed_work_sync(&dev->mt76.mac_work);
+	mt76u_stop_tx(&dev->mt76);
+	mt76x02u_exit_beacon_config(dev);
+
+	if (test_bit(MT76_REMOVED, &dev->mt76.state))
+		return;
+
+	if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+		dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
+
+	mt76x0_mac_stop(dev);
+
+	if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+		dev_warn(dev->mt76.dev, "RX DMA did not stop\n");
+}
+
+static int mt76x0u_start(struct ieee80211_hw *hw)
+{
+	struct mt76x02_dev *dev = hw->priv;
+	int ret;
+
+	ret = mt76x0_mac_start(dev);
+	if (ret)
+		return ret;
+
+	mt76x0_phy_calibrate(dev, true);
+	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
+				     MT_MAC_WORK_INTERVAL);
+	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+	set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+	return 0;
+}
+
+static const struct ieee80211_ops mt76x0u_ops = {
+	.tx = mt76x02_tx,
+	.start = mt76x0u_start,
+	.stop = mt76x0u_stop,
+	.add_interface = mt76x02_add_interface,
+	.remove_interface = mt76x02_remove_interface,
+	.config = mt76x0_config,
+	.configure_filter = mt76x02_configure_filter,
+	.bss_info_changed = mt76x02_bss_info_changed,
+	.sta_state = mt76_sta_state,
+	.set_key = mt76x02_set_key,
+	.conf_tx = mt76x02_conf_tx,
+	.sw_scan_start = mt76_sw_scan,
+	.sw_scan_complete = mt76x02_sw_scan_complete,
+	.ampdu_action = mt76x02_ampdu_action,
+	.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+	.set_rts_threshold = mt76x02_set_rts_threshold,
+	.wake_tx_queue = mt76_wake_tx_queue,
+	.get_txpower = mt76_get_txpower,
+	.get_survey = mt76_get_survey,
+	.set_tim = mt76_set_tim,
+	.release_buffered_frames = mt76_release_buffered_frames,
+};
+
+static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
+{
+	int err;
+
+	mt76x0_chip_onoff(dev, true, reset);
+
+	if (!mt76x02_wait_for_mac(&dev->mt76))
+		return -ETIMEDOUT;
+
+	err = mt76x0u_mcu_init(dev);
+	if (err < 0)
+		return err;
+
+	mt76x0_init_usb_dma(dev);
+	err = mt76x0_init_hardware(dev);
+	if (err < 0)
+		return err;
+
+	mt76x02u_init_beacon_config(dev);
+
+	mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+	mt76_wr(dev, MT_TXOP_CTRL_CFG,
+		FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+		FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+	mt76_wr(dev, MT_CH_TIME_CFG,
+		MT_CH_TIME_CFG_TIMER_EN |
+		MT_CH_TIME_CFG_TX_AS_BUSY |
+		MT_CH_TIME_CFG_RX_AS_BUSY |
+		MT_CH_TIME_CFG_NAV_AS_BUSY |
+		MT_CH_TIME_CFG_EIFS_AS_BUSY);
+
+	return 0;
+}
+
+static int mt76x0u_register_device(struct mt76x02_dev *dev)
+{
+	struct ieee80211_hw *hw = dev->mt76.hw;
+	int err;
+
+	err = mt76u_alloc_queues(&dev->mt76);
+	if (err < 0)
+		goto out_err;
+
+	err = mt76x0u_init_hardware(dev, true);
+	if (err < 0)
+		goto out_err;
+
+	err = mt76x0_register_device(dev);
+	if (err < 0)
+		goto out_err;
+
+	/* check hw sg support in order to enable AMSDU */
+	if (dev->mt76.usb.sg_en)
+		hw->max_tx_fragments = MT_TX_SG_MAX_SIZE;
+	else
+		hw->max_tx_fragments = 1;
+
+	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+	return 0;
+
+out_err:
+	mt76x0u_cleanup(dev);
+	return err;
+}
+
+static int mt76x0u_probe(struct usb_interface *usb_intf,
+			 const struct usb_device_id *id)
+{
+	static const struct mt76_driver_ops drv_ops = {
+		.update_survey = mt76x02_update_channel,
+		.tx_prepare_skb = mt76x02u_tx_prepare_skb,
+		.tx_complete_skb = mt76x02u_tx_complete_skb,
+		.tx_status_data = mt76x02_tx_status_data,
+		.rx_skb = mt76x02_queue_rx_skb,
+		.sta_ps = mt76x02_sta_ps,
+		.sta_add = mt76x02_sta_add,
+		.sta_remove = mt76x02_sta_remove,
+	};
+	struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
+	struct mt76x02_dev *dev;
+	struct mt76_dev *mdev;
+	u32 mac_rev;
+	int ret;
+
+	mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
+				 &drv_ops);
+	if (!mdev)
+		return -ENOMEM;
+
+	dev = container_of(mdev, struct mt76x02_dev, mt76);
+	mutex_init(&dev->phy_mutex);
+
+	/* Quirk for Archer T1U */
+	if (id->driver_info)
+		dev->no_2ghz = true;
+
+	usb_dev = usb_get_dev(usb_dev);
+	usb_reset_device(usb_dev);
+
+	usb_set_intfdata(usb_intf, dev);
+
+	mt76x02u_init_mcu(mdev);
+	ret = mt76u_init(mdev, usb_intf);
+	if (ret)
+		goto err;
+
+	/* Disable the HW, otherwise MCU fail to initialize on hot reboot */
+	mt76x0_chip_onoff(dev, false, false);
+
+	if (!mt76x02_wait_for_mac(mdev)) {
+		ret = -ETIMEDOUT;
+		goto err;
+	}
+
+	mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
+	mac_rev = mt76_rr(dev, MT_MAC_CSR0);
+	dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
+		 mdev->rev, mac_rev);
+	if (!is_mt76x0(dev)) {
+		ret = -ENODEV;
+		goto err;
+	}
+
+	/* Note: vendor driver skips this check for MT76X0U */
+	if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
+		dev_warn(mdev->dev, "Warning: eFUSE not present\n");
+
+	ret = mt76x0u_register_device(dev);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+
+err:
+	usb_set_intfdata(usb_intf, NULL);
+	usb_put_dev(interface_to_usbdev(usb_intf));
+
+	ieee80211_free_hw(mdev->hw);
+	return ret;
+}
+
+static void mt76x0_disconnect(struct usb_interface *usb_intf)
+{
+	struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
+	bool initialized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+	if (!initialized)
+		return;
+
+	ieee80211_unregister_hw(dev->mt76.hw);
+	mt76x0u_cleanup(dev);
+
+	usb_set_intfdata(usb_intf, NULL);
+	usb_put_dev(interface_to_usbdev(usb_intf));
+
+	ieee80211_free_hw(dev->mt76.hw);
+}
+
+static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
+					 pm_message_t state)
+{
+	struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
+
+	mt76u_stop_rx(&dev->mt76);
+	clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+	mt76x0_chip_onoff(dev, false, false);
+
+	return 0;
+}
+
+static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
+{
+	struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
+	int ret;
+
+	ret = mt76u_resume_rx(&dev->mt76);
+	if (ret < 0)
+		goto err;
+
+	ret = mt76x0u_init_hardware(dev, false);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	mt76x0u_cleanup(dev);
+	return ret;
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
+MODULE_FIRMWARE(MT7610E_FIRMWARE);
+MODULE_FIRMWARE(MT7610U_FIRMWARE);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver mt76x0_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= mt76x0_device_table,
+	.probe		= mt76x0u_probe,
+	.disconnect	= mt76x0_disconnect,
+	.suspend	= mt76x0_suspend,
+	.resume		= mt76x0_resume,
+	.reset_resume	= mt76x0_resume,
+	.soft_unbind	= 1,
+	.disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x0_driver);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
new file mode 100644
index 0000000..888a930
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+#include "../mt76x02_usb.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD		0x38f8
+#define MCU_FW_URB_SIZE			(MCU_FW_URB_MAX_PAYLOAD + 12)
+
+static int
+mt76x0u_upload_firmware(struct mt76x02_dev *dev,
+			const struct mt76x02_fw_header *hdr)
+{
+	u8 *fw_payload = (u8 *)(hdr + 1);
+	u32 ilm_len, dlm_len;
+	void *ivb;
+	int err;
+
+	ivb = kmemdup(fw_payload, MT_MCU_IVB_SIZE, GFP_KERNEL);
+	if (!ivb)
+		return -ENOMEM;
+
+	ilm_len = le32_to_cpu(hdr->ilm_len) - MT_MCU_IVB_SIZE;
+	dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %u\n",
+		ilm_len, MT_MCU_IVB_SIZE);
+	err = mt76x02u_mcu_fw_send_data(dev, fw_payload + MT_MCU_IVB_SIZE,
+					ilm_len, MCU_FW_URB_MAX_PAYLOAD,
+					MT_MCU_IVB_SIZE);
+	if (err)
+		goto out;
+
+	dlm_len = le32_to_cpu(hdr->dlm_len);
+	dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
+	err = mt76x02u_mcu_fw_send_data(dev,
+					fw_payload + le32_to_cpu(hdr->ilm_len),
+					dlm_len, MCU_FW_URB_MAX_PAYLOAD,
+					MT_MCU_DLM_OFFSET);
+	if (err)
+		goto out;
+
+	err = mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+				   USB_DIR_OUT | USB_TYPE_VENDOR,
+				   0x12, 0, ivb, MT_MCU_IVB_SIZE);
+	if (err < 0)
+		goto out;
+
+	if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 1000)) {
+		dev_err(dev->mt76.dev, "Firmware failed to start\n");
+		err = -ETIMEDOUT;
+		goto out;
+	}
+
+	dev_dbg(dev->mt76.dev, "Firmware running!\n");
+
+out:
+	kfree(ivb);
+
+	return err;
+}
+
+static int mt76x0_get_firmware(struct mt76x02_dev *dev,
+			       const struct firmware **fw)
+{
+	int err;
+
+	/* try to load mt7610e fw if available
+	 * otherwise fall back to mt7610u one
+	 */
+	err = firmware_request_nowarn(fw, MT7610E_FIRMWARE, dev->mt76.dev);
+	if (err) {
+		dev_info(dev->mt76.dev, "%s not found, switching to %s",
+			 MT7610E_FIRMWARE, MT7610U_FIRMWARE);
+		return request_firmware(fw, MT7610U_FIRMWARE,
+					dev->mt76.dev);
+	}
+	return 0;
+}
+
+static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
+{
+	const struct firmware *fw;
+	const struct mt76x02_fw_header *hdr;
+	int len, ret;
+	u32 val;
+
+	mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+				      MT_USB_DMA_CFG_TX_BULK_EN));
+
+	if (mt76x0_firmware_running(dev))
+		return 0;
+
+	ret = mt76x0_get_firmware(dev, &fw);
+	if (ret)
+		return ret;
+
+	if (!fw || !fw->data || fw->size < sizeof(*hdr))
+		goto err_inv_fw;
+
+	hdr = (const struct mt76x02_fw_header *)fw->data;
+
+	if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+		goto err_inv_fw;
+
+	len = sizeof(*hdr);
+	len += le32_to_cpu(hdr->ilm_len);
+	len += le32_to_cpu(hdr->dlm_len);
+
+	if (fw->size != len)
+		goto err_inv_fw;
+
+	val = le16_to_cpu(hdr->fw_ver);
+	dev_dbg(dev->mt76.dev,
+		"Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+		(val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+		le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+	len = le32_to_cpu(hdr->ilm_len);
+
+	mt76_wr(dev, 0x1004, 0x2c);
+
+	mt76_set(dev, MT_USB_DMA_CFG,
+		 (MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN) |
+		 FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
+	mt76x02u_mcu_fw_reset(dev);
+	usleep_range(5000, 6000);
+
+	mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+	/* FCE tx_fs_base_ptr */
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+	/* FCE tx_fs_max_cnt */
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+	/* FCE pdma enable */
+	mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+	/* FCE skip_fs_en */
+	mt76_wr(dev, MT_FCE_SKIP_FS, 3);
+
+	val = mt76_rr(dev, MT_USB_DMA_CFG);
+	val |= MT_USB_DMA_CFG_UDMA_TX_WL_DROP;
+	mt76_wr(dev, MT_USB_DMA_CFG, val);
+	val &= ~MT_USB_DMA_CFG_UDMA_TX_WL_DROP;
+	mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+	ret = mt76x0u_upload_firmware(dev, hdr);
+	release_firmware(fw);
+
+	mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+	return ret;
+
+err_inv_fw:
+	dev_err(dev->mt76.dev, "Invalid firmware image\n");
+	release_firmware(fw);
+	return -ENOENT;
+}
+
+int mt76x0u_mcu_init(struct mt76x02_dev *dev)
+{
+	int ret;
+
+	ret = mt76x0u_load_firmware(dev);
+	if (ret < 0)
+		return ret;
+
+	set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02.h
new file mode 100644
index 0000000..0075fba
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#ifndef __MT76x02_H
+#define __MT76x02_H
+
+#include <linux/kfifo.h>
+
+#include "mt76.h"
+#include "mt76x02_regs.h"
+#include "mt76x02_mac.h"
+#include "mt76x02_dfs.h"
+#include "mt76x02_dma.h"
+
+#define MT_CALIBRATE_INTERVAL	HZ
+#define MT_MAC_WORK_INTERVAL	(HZ / 10)
+
+#define MT_WATCHDOG_TIME	(HZ / 10)
+#define MT_TX_HANG_TH		10
+
+#define MT_MAX_CHAINS		2
+struct mt76x02_rx_freq_cal {
+	s8 high_gain[MT_MAX_CHAINS];
+	s8 rssi_offset[MT_MAX_CHAINS];
+	s8 lna_gain;
+	u32 mcu_gain;
+	s16 temp_offset;
+	u8 freq_offset;
+};
+
+struct mt76x02_calibration {
+	struct mt76x02_rx_freq_cal rx;
+
+	u8 agc_gain_init[MT_MAX_CHAINS];
+	u8 agc_gain_cur[MT_MAX_CHAINS];
+
+	u16 false_cca;
+	s8 avg_rssi_all;
+	s8 agc_gain_adjust;
+	s8 agc_lowest_gain;
+	s8 low_gain;
+
+	s8 temp_vco;
+	s8 temp;
+
+	bool init_cal_done;
+	bool tssi_cal_done;
+	bool tssi_comp_pending;
+	bool dpd_cal_done;
+	bool channel_cal_done;
+	bool gain_init_done;
+
+	int tssi_target;
+	s8 tssi_dc;
+};
+
+struct mt76x02_beacon_ops {
+	unsigned int nslots;
+	unsigned int slot_size;
+	void (*pre_tbtt_enable)(struct mt76x02_dev *dev, bool en);
+	void (*beacon_enable)(struct mt76x02_dev *dev, bool en);
+};
+
+#define mt76x02_beacon_enable(dev, enable)	\
+	(dev)->beacon_ops->beacon_enable(dev, enable)
+#define mt76x02_pre_tbtt_enable(dev, enable)	\
+	(dev)->beacon_ops->pre_tbtt_enable(dev, enable)
+
+struct mt76x02_dev {
+	struct mt76_dev mt76; /* must be first */
+
+	struct mac_address macaddr_list[8];
+
+	struct mutex phy_mutex;
+
+	u16 vif_mask;
+
+	u8 txdone_seq;
+	DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
+	spinlock_t txstatus_fifo_lock;
+
+	struct sk_buff *rx_head;
+
+	struct delayed_work cal_work;
+	struct delayed_work wdt_work;
+
+	struct hrtimer pre_tbtt_timer;
+	struct work_struct pre_tbtt_work;
+
+	const struct mt76x02_beacon_ops *beacon_ops;
+
+	u32 aggr_stats[32];
+
+	struct sk_buff *beacons[8];
+	u8 beacon_data_mask;
+
+	u8 tbtt_count;
+
+	u32 tx_hang_reset;
+	u8 tx_hang_check;
+	u8 mcu_timeout;
+
+	struct mt76x02_calibration cal;
+
+	s8 target_power;
+	s8 target_power_delta[2];
+	bool enable_tpc;
+
+	bool no_2ghz;
+
+	u8 coverage_class;
+	u8 slottime;
+
+	struct mt76x02_dfs_pattern_detector dfs_pd;
+
+	/* edcca monitor */
+	unsigned long ed_trigger_timeout;
+	bool ed_tx_blocked;
+	bool ed_monitor;
+	u8 ed_monitor_enabled;
+	u8 ed_monitor_learning;
+	u8 ed_trigger;
+	u8 ed_silent;
+	ktime_t ed_time;
+};
+
+extern struct ieee80211_rate mt76x02_rates[12];
+
+void mt76x02_init_device(struct mt76x02_dev *dev);
+void mt76x02_configure_filter(struct ieee80211_hw *hw,
+			      unsigned int changed_flags,
+			      unsigned int *total_flags, u64 multicast);
+int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+		    struct ieee80211_sta *sta);
+void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta);
+
+void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev);
+
+int mt76x02_add_interface(struct ieee80211_hw *hw,
+			  struct ieee80211_vif *vif);
+void mt76x02_remove_interface(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif);
+
+int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			 struct ieee80211_ampdu_params *params);
+int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		    struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		    struct ieee80211_key_conf *key);
+int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		    u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_sta *sta);
+s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
+				const struct ieee80211_tx_rate *rate);
+s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr,
+			    s8 max_txpwr_adj);
+void mt76x02_wdt_work(struct work_struct *work);
+void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
+void mt76x02_set_tx_ackto(struct mt76x02_dev *dev);
+void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
+				s16 coverage_class);
+int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
+void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
+bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
+void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+			  struct sk_buff *skb);
+void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
+void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+		struct sk_buff *skb);
+int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+			   enum mt76_txq_id qid, struct mt76_wcid *wcid,
+			   struct ieee80211_sta *sta,
+			   struct mt76_tx_info *tx_info);
+void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif);
+void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
+void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      struct ieee80211_bss_conf *info, u32 changed);
+
+struct beacon_bc_data {
+	struct mt76x02_dev *dev;
+	struct sk_buff_head q;
+	struct sk_buff *tail[8];
+};
+
+void mt76x02_init_beacon_config(struct mt76x02_dev *dev);
+void mt76x02e_init_beacon_config(struct mt76x02_dev *dev);
+void mt76x02_resync_beacon_timer(struct mt76x02_dev *dev);
+void mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
+void mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev,
+				 struct beacon_bc_data *data,
+				 int max_nframes);
+
+void mt76x02_mac_start(struct mt76x02_dev *dev);
+
+void mt76x02_init_debugfs(struct mt76x02_dev *dev);
+
+static inline bool is_mt76x0(struct mt76x02_dev *dev)
+{
+	return mt76_chip(&dev->mt76) == 0x7610 ||
+	       mt76_chip(&dev->mt76) == 0x7630 ||
+	       mt76_chip(&dev->mt76) == 0x7650;
+}
+
+static inline bool is_mt76x2(struct mt76x02_dev *dev)
+{
+	return mt76_chip(&dev->mt76) == 0x7612 ||
+	       mt76_chip(&dev->mt76) == 0x7632 ||
+	       mt76_chip(&dev->mt76) == 0x7662 ||
+	       mt76_chip(&dev->mt76) == 0x7602;
+}
+
+static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask)
+{
+	mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+}
+
+static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask)
+{
+	mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+}
+
+static inline bool
+mt76x02_wait_for_txrx_idle(struct mt76_dev *dev)
+{
+	return __mt76_poll_msec(dev, MT_MAC_STATUS,
+				MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+				0, 100);
+}
+
+static inline struct mt76x02_sta *
+mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx)
+{
+	struct mt76_wcid *wcid;
+
+	if (idx >= ARRAY_SIZE(dev->wcid))
+		return NULL;
+
+	wcid = rcu_dereference(dev->wcid[idx]);
+	if (!wcid)
+		return NULL;
+
+	return container_of(wcid, struct mt76x02_sta, wcid);
+}
+
+static inline struct mt76_wcid *
+mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast)
+{
+	if (!sta)
+		return NULL;
+
+	if (unicast)
+		return &sta->wcid;
+	else
+		return &sta->vif->group_wcid;
+}
+
+#endif /* __MT76x02_H */
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
new file mode 100644
index 0000000..4209209
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#include "mt76x02.h"
+
+static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
+{
+	u32 regs[4] = {};
+	u16 val;
+	int i;
+
+	for (i = 0; i < dev->beacon_ops->nslots; i++) {
+		val = i * dev->beacon_ops->slot_size;
+		regs[i / 4] |= (val / 64) << (8 * (i % 4));
+	}
+
+	for (i = 0; i < 4; i++)
+		mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static int
+mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
+{
+	int beacon_len = dev->beacon_ops->slot_size;
+	struct mt76x02_txwi txwi;
+
+	if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
+		return -ENOSPC;
+
+	mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
+
+	mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+	offset += sizeof(txwi);
+
+	mt76_wr_copy(dev, offset, skb->data, skb->len);
+	return 0;
+}
+
+static int
+__mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx,
+			 struct sk_buff *skb)
+{
+	int beacon_len = dev->beacon_ops->slot_size;
+	int beacon_addr = MT_BEACON_BASE + (beacon_len * bcn_idx);
+	int ret = 0;
+	int i;
+
+	/* Prevent corrupt transmissions during update */
+	mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
+
+	if (skb) {
+		ret = mt76x02_write_beacon(dev, beacon_addr, skb);
+		if (!ret)
+			dev->beacon_data_mask |= BIT(bcn_idx);
+	} else {
+		dev->beacon_data_mask &= ~BIT(bcn_idx);
+		for (i = 0; i < beacon_len; i += 4)
+			mt76_wr(dev, beacon_addr + i, 0);
+	}
+
+	mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
+
+	return ret;
+}
+
+int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
+			   struct sk_buff *skb)
+{
+	bool force_update = false;
+	int bcn_idx = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
+		if (vif_idx == i) {
+			force_update = !!dev->beacons[i] ^ !!skb;
+			dev_kfree_skb(dev->beacons[i]);
+			dev->beacons[i] = skb;
+			__mt76x02_mac_set_beacon(dev, bcn_idx, skb);
+		} else if (force_update && dev->beacons[i]) {
+			__mt76x02_mac_set_beacon(dev, bcn_idx,
+						 dev->beacons[i]);
+		}
+
+		bcn_idx += !!dev->beacons[i];
+	}
+
+	for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
+		if (!(dev->beacon_data_mask & BIT(i)))
+			break;
+
+		__mt76x02_mac_set_beacon(dev, i, NULL);
+	}
+
+	mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
+		       bcn_idx - 1);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_set_beacon);
+
+void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
+				   struct ieee80211_vif *vif, bool enable)
+{
+	struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+	u8 old_mask = dev->mt76.beacon_mask;
+
+	mt76x02_pre_tbtt_enable(dev, false);
+
+	if (!dev->mt76.beacon_mask)
+		dev->tbtt_count = 0;
+
+	if (enable) {
+		dev->mt76.beacon_mask |= BIT(mvif->idx);
+	} else {
+		dev->mt76.beacon_mask &= ~BIT(mvif->idx);
+		mt76x02_mac_set_beacon(dev, mvif->idx, NULL);
+	}
+
+	if (!!old_mask == !!dev->mt76.beacon_mask)
+		goto out;
+
+	if (dev->mt76.beacon_mask)
+		mt76_set(dev, MT_BEACON_TIME_CFG,
+			 MT_BEACON_TIME_CFG_BEACON_TX |
+			 MT_BEACON_TIME_CFG_TBTT_EN |
+			 MT_BEACON_TIME_CFG_TIMER_EN);
+	else
+		mt76_clear(dev, MT_BEACON_TIME_CFG,
+			   MT_BEACON_TIME_CFG_BEACON_TX |
+			   MT_BEACON_TIME_CFG_TBTT_EN |
+			   MT_BEACON_TIME_CFG_TIMER_EN);
+	mt76x02_beacon_enable(dev, !!dev->mt76.beacon_mask);
+
+out:
+	mt76x02_pre_tbtt_enable(dev, true);
+}
+
+void
+mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
+{
+	u32 timer_val = dev->mt76.beacon_int << 4;
+
+	dev->tbtt_count++;
+
+	/*
+	 * Beacon timer drifts by 1us every tick, the timer is configured
+	 * in 1/16 TU (64us) units.
+	 */
+	if (dev->tbtt_count < 63)
+		return;
+
+	/*
+	 * The updated beacon interval takes effect after two TBTT, because
+	 * at this point the original interval has already been loaded into
+	 * the next TBTT_TIMER value
+	 */
+	if (dev->tbtt_count == 63)
+		timer_val -= 1;
+
+	mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+		       MT_BEACON_TIME_CFG_INTVAL, timer_val);
+
+	if (dev->tbtt_count >= 64)
+		dev->tbtt_count = 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_resync_beacon_timer);
+
+void
+mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+	struct mt76x02_dev *dev = (struct mt76x02_dev *)priv;
+	struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+	struct sk_buff *skb = NULL;
+
+	if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
+		return;
+
+	skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+	if (!skb)
+		return;
+
+	mt76x02_mac_set_beacon(dev, mvif->idx, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_update_beacon_iter);
+
+static void
+mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+	struct beacon_bc_data *data = priv;
+	struct mt76x02_dev *dev = data->dev;
+	struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+	struct ieee80211_tx_info *info;
+	struct sk_buff *skb;
+
+	if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
+		return;
+
+	skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+	if (!skb)
+		return;
+
+	info = IEEE80211_SKB_CB(skb);
+	info->control.vif = vif;
+	info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+	mt76_skb_set_moredata(skb, true);
+	__skb_queue_tail(&data->q, skb);
+	data->tail[mvif->idx] = skb;
+}
+
+void
+mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev,
+			    struct beacon_bc_data *data,
+			    int max_nframes)
+{
+	int i, nframes;
+
+	data->dev = dev;
+	__skb_queue_head_init(&data->q);
+
+	do {
+		nframes = skb_queue_len(&data->q);
+		ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+			IEEE80211_IFACE_ITER_RESUME_ALL,
+			mt76x02_add_buffered_bc, data);
+	} while (nframes != skb_queue_len(&data->q) &&
+		 skb_queue_len(&data->q) < max_nframes);
+
+	if (!skb_queue_len(&data->q))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(data->tail); i++) {
+		if (!data->tail[i])
+			continue;
+		mt76_skb_set_moredata(data->tail[i], false);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76x02_enqueue_buffered_bc);
+
+void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
+{
+	int i;
+
+	mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+					     MT_BEACON_TIME_CFG_TBTT_EN |
+					     MT_BEACON_TIME_CFG_BEACON_TX));
+	mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE);
+	mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
+
+	for (i = 0; i < 8; i++)
+		mt76x02_mac_set_beacon(dev, i, NULL);
+
+	mt76x02_set_beacon_offsets(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
+
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
new file mode 100644
index 0000000..0cb2a7b
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/debugfs.h>
+#include "mt76x02.h"
+
+static int
+mt76x02_ampdu_stat_read(struct seq_file *file, void *data)
+{
+	struct mt76x02_dev *dev = file->private;
+	int i, j;
+
+	for (i = 0; i < 4; i++) {
+		seq_puts(file, "Length: ");
+		for (j = 0; j < 8; j++)
+			seq_printf(file, "%8d | ", i * 8 + j + 1);
+		seq_puts(file, "\n");
+		seq_puts(file, "Count:  ");
+		for (j = 0; j < 8; j++)
+			seq_printf(file, "%8d | ", dev->aggr_stats[i * 8 + j]);
+		seq_puts(file, "\n");
+		seq_puts(file, "--------");
+		for (j = 0; j < 8; j++)
+			seq_puts(file, "-----------");
+		seq_puts(file, "\n");
+	}
+
+	return 0;
+}
+
+static int
+mt76x02_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, mt76x02_ampdu_stat_read, inode->i_private);
+}
+
+static int read_txpower(struct seq_file *file, void *data)
+{
+	struct mt76x02_dev *dev = dev_get_drvdata(file->private);
+
+	seq_printf(file, "Target power: %d\n", dev->target_power);
+
+	mt76_seq_puts_array(file, "Delta", dev->target_power_delta,
+			    ARRAY_SIZE(dev->target_power_delta));
+	return 0;
+}
+
+static const struct file_operations fops_ampdu_stat = {
+	.open = mt76x02_ampdu_stat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int
+mt76x02_dfs_stat_read(struct seq_file *file, void *data)
+{
+	struct mt76x02_dev *dev = file->private;
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+	int i;
+
+	seq_printf(file, "allocated sequences:\t%d\n",
+		   dfs_pd->seq_stats.seq_pool_len);
+	seq_printf(file, "used sequences:\t\t%d\n",
+		   dfs_pd->seq_stats.seq_len);
+	seq_puts(file, "\n");
+
+	for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+		seq_printf(file, "engine: %d\n", i);
+		seq_printf(file, "  hw pattern detected:\t%d\n",
+			   dfs_pd->stats[i].hw_pattern);
+		seq_printf(file, "  hw pulse discarded:\t%d\n",
+			   dfs_pd->stats[i].hw_pulse_discarded);
+		seq_printf(file, "  sw pattern detected:\t%d\n",
+			   dfs_pd->stats[i].sw_pattern);
+	}
+
+	return 0;
+}
+
+static int
+mt76x02_dfs_stat_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, mt76x02_dfs_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_dfs_stat = {
+	.open = mt76x02_dfs_stat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int read_agc(struct seq_file *file, void *data)
+{
+	struct mt76x02_dev *dev = dev_get_drvdata(file->private);
+
+	seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all);
+	seq_printf(file, "low_gain: %d\n", dev->cal.low_gain);
+	seq_printf(file, "false_cca: %d\n", dev->cal.false_cca);
+	seq_printf(file, "agc_gain_adjust: %d\n", dev->cal.agc_gain_adjust);
+
+	return 0;
+}
+
+static int
+mt76_edcca_set(void *data, u64 val)
+{
+	struct mt76x02_dev *dev = data;
+	enum nl80211_dfs_regions region = dev->mt76.region;
+
+	mutex_lock(&dev->mt76.mutex);
+
+	dev->ed_monitor_enabled = !!val;
+	dev->ed_monitor = dev->ed_monitor_enabled &&
+			  region == NL80211_DFS_ETSI;
+	mt76x02_edcca_init(dev);
+
+	mutex_unlock(&dev->mt76.mutex);
+
+	return 0;
+}
+
+static int
+mt76_edcca_get(void *data, u64 *val)
+{
+	struct mt76x02_dev *dev = data;
+
+	*val = dev->ed_monitor_enabled;
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set,
+			 "%lld\n");
+
+void mt76x02_init_debugfs(struct mt76x02_dev *dev)
+{
+	struct dentry *dir;
+
+	dir = mt76_register_debugfs(&dev->mt76);
+	if (!dir)
+		return;
+
+	debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
+	debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
+
+	debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
+	debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
+	debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
+	debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
+				    read_txpower);
+
+	debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
+
+	debugfs_create_u32("tx_hang_reset", 0400, dir, &dev->tx_hang_reset);
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_debugfs);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
new file mode 100644
index 0000000..5dec33e
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -0,0 +1,895 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x02.h"
+
+#define RADAR_SPEC(m, len, el, eh, wl, wh,		\
+		   w_tolerance, tl, th, t_tolerance,	\
+		   bl, bh, event_exp, power_jmp)	\
+{							\
+	.mode = m,					\
+	.avg_len = len,					\
+	.e_low = el,					\
+	.e_high = eh,					\
+	.w_low = wl,					\
+	.w_high = wh,					\
+	.w_margin = w_tolerance,			\
+	.t_low = tl,					\
+	.t_high = th,					\
+	.t_margin = t_tolerance,			\
+	.b_low = bl,					\
+	.b_high = bh,					\
+	.event_expiration = event_exp,			\
+	.pwr_jmp = power_jmp				\
+}
+
+static const struct mt76x02_radar_specs etsi_radar_specs[] = {
+	/* 20MHz */
+	RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+		   0x7fffffff, 0x155cc0, 0x19dd),
+	RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+		   0x7fffffff, 0x2191c0, 0x15cc),
+	/* 40MHz */
+	RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+		   0x7fffffff, 0x155cc0, 0x19dd),
+	RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+		   0x7fffffff, 0x2191c0, 0x15cc),
+	/* 80MHz */
+	RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+		   0x7fffffff, 0x155cc0, 0x19dd),
+	RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+		   0x7fffffff, 0x2191c0, 0x15cc)
+};
+
+static const struct mt76x02_radar_specs fcc_radar_specs[] = {
+	/* 20MHz */
+	RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+		   0x7fffffff, 0xfe808, 0x13dc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0xfe808, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0xfe808, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0x57bcf00, 0x1289),
+	/* 40MHz */
+	RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+		   0x7fffffff, 0xfe808, 0x13dc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0xfe808, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0xfe808, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0x57bcf00, 0x1289),
+	/* 80MHz */
+	RADAR_SPEC(0, 8, 2, 14, 106, 150, 15, 2900, 80100, 15, 0,
+		   0x7fffffff, 0xfe808, 0x16cc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0xfe808, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0xfe808, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0x57bcf00, 0x1289)
+};
+
+static const struct mt76x02_radar_specs jp_w56_radar_specs[] = {
+	/* 20MHz */
+	RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+		   0x7fffffff, 0x14c080, 0x13dc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0x14c080, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0x14c080, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0X57bcf00, 0x1289),
+	/* 40MHz */
+	RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+		   0x7fffffff, 0x14c080, 0x13dc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0x14c080, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0x14c080, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0X57bcf00, 0x1289),
+	/* 80MHz */
+	RADAR_SPEC(0, 8, 2, 9, 106, 150, 15, 2900, 80100, 15, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0x14c080, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0x14c080, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0X57bcf00, 0x1289)
+};
+
+static const struct mt76x02_radar_specs jp_w53_radar_specs[] = {
+	/* 20MHz */
+	RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	/* 40MHz */
+	RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	/* 80MHz */
+	RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 }
+};
+
+static void
+mt76x02_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev, u8 enable)
+{
+	u32 data;
+
+	data = (1 << 1) | enable;
+	mt76_wr(dev, MT_BBP(DFS, 36), data);
+}
+
+static void mt76x02_dfs_seq_pool_put(struct mt76x02_dev *dev,
+				     struct mt76x02_dfs_sequence *seq)
+{
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+	list_add(&seq->head, &dfs_pd->seq_pool);
+
+	dfs_pd->seq_stats.seq_pool_len++;
+	dfs_pd->seq_stats.seq_len--;
+}
+
+static struct mt76x02_dfs_sequence *
+mt76x02_dfs_seq_pool_get(struct mt76x02_dev *dev)
+{
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+	struct mt76x02_dfs_sequence *seq;
+
+	if (list_empty(&dfs_pd->seq_pool)) {
+		seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC);
+	} else {
+		seq = list_first_entry(&dfs_pd->seq_pool,
+				       struct mt76x02_dfs_sequence,
+				       head);
+		list_del(&seq->head);
+		dfs_pd->seq_stats.seq_pool_len--;
+	}
+	if (seq)
+		dfs_pd->seq_stats.seq_len++;
+
+	return seq;
+}
+
+static int mt76x02_dfs_get_multiple(int val, int frac, int margin)
+{
+	int remainder, factor;
+
+	if (!frac)
+		return 0;
+
+	if (abs(val - frac) <= margin)
+		return 1;
+
+	factor = val / frac;
+	remainder = val % frac;
+
+	if (remainder > margin) {
+		if ((frac - remainder) <= margin)
+			factor++;
+		else
+			factor = 0;
+	}
+	return factor;
+}
+
+static void mt76x02_dfs_detector_reset(struct mt76x02_dev *dev)
+{
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+	struct mt76x02_dfs_sequence *seq, *tmp_seq;
+	int i;
+
+	/* reset hw detector */
+	mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+	/* reset sw detector */
+	for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
+		dfs_pd->event_rb[i].h_rb = 0;
+		dfs_pd->event_rb[i].t_rb = 0;
+	}
+
+	list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
+		list_del_init(&seq->head);
+		mt76x02_dfs_seq_pool_put(dev, seq);
+	}
+}
+
+static bool mt76x02_dfs_check_chirp(struct mt76x02_dev *dev)
+{
+	bool ret = false;
+	u32 current_ts, delta_ts;
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+	current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER);
+	delta_ts = current_ts - dfs_pd->chirp_pulse_ts;
+	dfs_pd->chirp_pulse_ts = current_ts;
+
+	/* 12 sec */
+	if (delta_ts <= (12 * (1 << 20))) {
+		if (++dfs_pd->chirp_pulse_cnt > 8)
+			ret = true;
+	} else {
+		dfs_pd->chirp_pulse_cnt = 1;
+	}
+
+	return ret;
+}
+
+static void mt76x02_dfs_get_hw_pulse(struct mt76x02_dev *dev,
+				     struct mt76x02_dfs_hw_pulse *pulse)
+{
+	u32 data;
+
+	/* select channel */
+	data = (MT_DFS_CH_EN << 16) | pulse->engine;
+	mt76_wr(dev, MT_BBP(DFS, 0), data);
+
+	/* reported period */
+	pulse->period = mt76_rr(dev, MT_BBP(DFS, 19));
+
+	/* reported width */
+	pulse->w1 = mt76_rr(dev, MT_BBP(DFS, 20));
+	pulse->w2 = mt76_rr(dev, MT_BBP(DFS, 23));
+
+	/* reported burst number */
+	pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
+}
+
+static bool mt76x02_dfs_check_hw_pulse(struct mt76x02_dev *dev,
+				       struct mt76x02_dfs_hw_pulse *pulse)
+{
+	bool ret = false;
+
+	if (!pulse->period || !pulse->w1)
+		return false;
+
+	switch (dev->mt76.region) {
+	case NL80211_DFS_FCC:
+		if (pulse->engine > 3)
+			break;
+
+		if (pulse->engine == 3) {
+			ret = mt76x02_dfs_check_chirp(dev);
+			break;
+		}
+
+		/* check short pulse*/
+		if (pulse->w1 < 120)
+			ret = (pulse->period >= 2900 &&
+			       (pulse->period <= 4700 ||
+				pulse->period >= 6400) &&
+			       (pulse->period <= 6800 ||
+				pulse->period >= 10200) &&
+			       pulse->period <= 61600);
+		else if (pulse->w1 < 130) /* 120 - 130 */
+			ret = (pulse->period >= 2900 &&
+			       pulse->period <= 61600);
+		else
+			ret = (pulse->period >= 3500 &&
+			       pulse->period <= 10100);
+		break;
+	case NL80211_DFS_ETSI:
+		if (pulse->engine >= 3)
+			break;
+
+		ret = (pulse->period >= 4900 &&
+		       (pulse->period <= 10200 ||
+			pulse->period >= 12400) &&
+		       pulse->period <= 100100);
+		break;
+	case NL80211_DFS_JP:
+		if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+		    dev->mt76.chandef.chan->center_freq <= 5350) {
+			/* JPW53 */
+			if (pulse->w1 <= 130)
+				ret = (pulse->period >= 28360 &&
+				       (pulse->period <= 28700 ||
+					pulse->period >= 76900) &&
+				       pulse->period <= 76940);
+			break;
+		}
+
+		if (pulse->engine > 3)
+			break;
+
+		if (pulse->engine == 3) {
+			ret = mt76x02_dfs_check_chirp(dev);
+			break;
+		}
+
+		/* check short pulse*/
+		if (pulse->w1 < 120)
+			ret = (pulse->period >= 2900 &&
+			       (pulse->period <= 4700 ||
+				pulse->period >= 6400) &&
+			       (pulse->period <= 6800 ||
+				pulse->period >= 27560) &&
+			       (pulse->period <= 27960 ||
+				pulse->period >= 28360) &&
+			       (pulse->period <= 28700 ||
+				pulse->period >= 79900) &&
+			       pulse->period <= 80100);
+		else if (pulse->w1 < 130) /* 120 - 130 */
+			ret = (pulse->period >= 2900 &&
+			       (pulse->period <= 10100 ||
+				pulse->period >= 27560) &&
+			       (pulse->period <= 27960 ||
+				pulse->period >= 28360) &&
+			       (pulse->period <= 28700 ||
+				pulse->period >= 79900) &&
+			       pulse->period <= 80100);
+		else
+			ret = (pulse->period >= 3900 &&
+			       pulse->period <= 10100);
+		break;
+	case NL80211_DFS_UNSET:
+	default:
+		return false;
+	}
+
+	return ret;
+}
+
+static bool mt76x02_dfs_fetch_event(struct mt76x02_dev *dev,
+				    struct mt76x02_dfs_event *event)
+{
+	u32 data;
+
+	/* 1st: DFS_R37[31]: 0 (engine 0) - 1 (engine 2)
+	 * 2nd: DFS_R37[21:0]: pulse time
+	 * 3rd: DFS_R37[11:0]: pulse width
+	 * 3rd: DFS_R37[25:16]: phase
+	 * 4th: DFS_R37[12:0]: current pwr
+	 * 4th: DFS_R37[21:16]: pwr stable counter
+	 *
+	 * 1st: DFS_R37[31:0] set to 0xffffffff means no event detected
+	 */
+	data = mt76_rr(dev, MT_BBP(DFS, 37));
+	if (!MT_DFS_CHECK_EVENT(data))
+		return false;
+
+	event->engine = MT_DFS_EVENT_ENGINE(data);
+	data = mt76_rr(dev, MT_BBP(DFS, 37));
+	event->ts = MT_DFS_EVENT_TIMESTAMP(data);
+	data = mt76_rr(dev, MT_BBP(DFS, 37));
+	event->width = MT_DFS_EVENT_WIDTH(data);
+
+	return true;
+}
+
+static bool mt76x02_dfs_check_event(struct mt76x02_dev *dev,
+				    struct mt76x02_dfs_event *event)
+{
+	if (event->engine == 2) {
+		struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+		struct mt76x02_dfs_event_rb *event_buff = &dfs_pd->event_rb[1];
+		u16 last_event_idx;
+		u32 delta_ts;
+
+		last_event_idx = mt76_decr(event_buff->t_rb,
+					   MT_DFS_EVENT_BUFLEN);
+		delta_ts = event->ts - event_buff->data[last_event_idx].ts;
+		if (delta_ts < MT_DFS_EVENT_TIME_MARGIN &&
+		    event_buff->data[last_event_idx].width >= 200)
+			return false;
+	}
+	return true;
+}
+
+static void mt76x02_dfs_queue_event(struct mt76x02_dev *dev,
+				    struct mt76x02_dfs_event *event)
+{
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+	struct mt76x02_dfs_event_rb *event_buff;
+
+	/* add radar event to ring buffer */
+	event_buff = event->engine == 2 ? &dfs_pd->event_rb[1]
+					: &dfs_pd->event_rb[0];
+	event_buff->data[event_buff->t_rb] = *event;
+	event_buff->data[event_buff->t_rb].fetch_ts = jiffies;
+
+	event_buff->t_rb = mt76_incr(event_buff->t_rb, MT_DFS_EVENT_BUFLEN);
+	if (event_buff->t_rb == event_buff->h_rb)
+		event_buff->h_rb = mt76_incr(event_buff->h_rb,
+					     MT_DFS_EVENT_BUFLEN);
+}
+
+static int mt76x02_dfs_create_sequence(struct mt76x02_dev *dev,
+				       struct mt76x02_dfs_event *event,
+				       u16 cur_len)
+{
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+	struct mt76x02_dfs_sw_detector_params *sw_params;
+	u32 width_delta, with_sum, factor, cur_pri;
+	struct mt76x02_dfs_sequence seq, *seq_p;
+	struct mt76x02_dfs_event_rb *event_rb;
+	struct mt76x02_dfs_event *cur_event;
+	int i, j, end, pri;
+
+	event_rb = event->engine == 2 ? &dfs_pd->event_rb[1]
+				      : &dfs_pd->event_rb[0];
+
+	i = mt76_decr(event_rb->t_rb, MT_DFS_EVENT_BUFLEN);
+	end = mt76_decr(event_rb->h_rb, MT_DFS_EVENT_BUFLEN);
+
+	while (i != end) {
+		cur_event = &event_rb->data[i];
+		with_sum = event->width + cur_event->width;
+
+		sw_params = &dfs_pd->sw_dpd_params;
+		switch (dev->mt76.region) {
+		case NL80211_DFS_FCC:
+		case NL80211_DFS_JP:
+			if (with_sum < 600)
+				width_delta = 8;
+			else
+				width_delta = with_sum >> 3;
+			break;
+		case NL80211_DFS_ETSI:
+			if (event->engine == 2)
+				width_delta = with_sum >> 6;
+			else if (with_sum < 620)
+				width_delta = 24;
+			else
+				width_delta = 8;
+			break;
+		case NL80211_DFS_UNSET:
+		default:
+			return -EINVAL;
+		}
+
+		pri = event->ts - cur_event->ts;
+		if (abs(event->width - cur_event->width) > width_delta ||
+		    pri < sw_params->min_pri)
+			goto next;
+
+		if (pri > sw_params->max_pri)
+			break;
+
+		seq.pri = event->ts - cur_event->ts;
+		seq.first_ts = cur_event->ts;
+		seq.last_ts = event->ts;
+		seq.engine = event->engine;
+		seq.count = 2;
+
+		j = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
+		while (j != end) {
+			cur_event = &event_rb->data[j];
+			cur_pri = event->ts - cur_event->ts;
+			factor = mt76x02_dfs_get_multiple(cur_pri, seq.pri,
+						sw_params->pri_margin);
+			if (factor > 0) {
+				seq.first_ts = cur_event->ts;
+				seq.count++;
+			}
+
+			j = mt76_decr(j, MT_DFS_EVENT_BUFLEN);
+		}
+		if (seq.count <= cur_len)
+			goto next;
+
+		seq_p = mt76x02_dfs_seq_pool_get(dev);
+		if (!seq_p)
+			return -ENOMEM;
+
+		*seq_p = seq;
+		INIT_LIST_HEAD(&seq_p->head);
+		list_add(&seq_p->head, &dfs_pd->sequences);
+next:
+		i = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
+	}
+	return 0;
+}
+
+static u16 mt76x02_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
+					     struct mt76x02_dfs_event *event)
+{
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+	struct mt76x02_dfs_sw_detector_params *sw_params;
+	struct mt76x02_dfs_sequence *seq, *tmp_seq;
+	u16 max_seq_len = 0;
+	u32 factor, pri;
+
+	sw_params = &dfs_pd->sw_dpd_params;
+	list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
+		if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) {
+			list_del_init(&seq->head);
+			mt76x02_dfs_seq_pool_put(dev, seq);
+			continue;
+		}
+
+		if (event->engine != seq->engine)
+			continue;
+
+		pri = event->ts - seq->last_ts;
+		factor = mt76x02_dfs_get_multiple(pri, seq->pri,
+						  sw_params->pri_margin);
+		if (factor > 0) {
+			seq->last_ts = event->ts;
+			seq->count++;
+			max_seq_len = max_t(u16, max_seq_len, seq->count);
+		}
+	}
+	return max_seq_len;
+}
+
+static bool mt76x02_dfs_check_detection(struct mt76x02_dev *dev)
+{
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+	struct mt76x02_dfs_sequence *seq;
+
+	if (list_empty(&dfs_pd->sequences))
+		return false;
+
+	list_for_each_entry(seq, &dfs_pd->sequences, head) {
+		if (seq->count > MT_DFS_SEQUENCE_TH) {
+			dfs_pd->stats[seq->engine].sw_pattern++;
+			return true;
+		}
+	}
+	return false;
+}
+
+static void mt76x02_dfs_add_events(struct mt76x02_dev *dev)
+{
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+	struct mt76x02_dfs_event event;
+	int i, seq_len;
+
+	/* disable debug mode */
+	mt76x02_dfs_set_capture_mode_ctrl(dev, false);
+	for (i = 0; i < MT_DFS_EVENT_LOOP; i++) {
+		if (!mt76x02_dfs_fetch_event(dev, &event))
+			break;
+
+		if (dfs_pd->last_event_ts > event.ts)
+			mt76x02_dfs_detector_reset(dev);
+		dfs_pd->last_event_ts = event.ts;
+
+		if (!mt76x02_dfs_check_event(dev, &event))
+			continue;
+
+		seq_len = mt76x02_dfs_add_event_to_sequence(dev, &event);
+		mt76x02_dfs_create_sequence(dev, &event, seq_len);
+
+		mt76x02_dfs_queue_event(dev, &event);
+	}
+	mt76x02_dfs_set_capture_mode_ctrl(dev, true);
+}
+
+static void mt76x02_dfs_check_event_window(struct mt76x02_dev *dev)
+{
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+	struct mt76x02_dfs_event_rb *event_buff;
+	struct mt76x02_dfs_event *event;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
+		event_buff = &dfs_pd->event_rb[i];
+
+		while (event_buff->h_rb != event_buff->t_rb) {
+			event = &event_buff->data[event_buff->h_rb];
+
+			/* sorted list */
+			if (time_is_after_jiffies(event->fetch_ts +
+						  MT_DFS_EVENT_WINDOW))
+				break;
+			event_buff->h_rb = mt76_incr(event_buff->h_rb,
+						     MT_DFS_EVENT_BUFLEN);
+		}
+	}
+}
+
+static void mt76x02_dfs_tasklet(unsigned long arg)
+{
+	struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+	u32 engine_mask;
+	int i;
+
+	if (test_bit(MT76_SCANNING, &dev->mt76.state))
+		goto out;
+
+	if (time_is_before_jiffies(dfs_pd->last_sw_check +
+				   MT_DFS_SW_TIMEOUT)) {
+		bool radar_detected;
+
+		dfs_pd->last_sw_check = jiffies;
+
+		mt76x02_dfs_add_events(dev);
+		radar_detected = mt76x02_dfs_check_detection(dev);
+		if (radar_detected) {
+			/* sw detector rx radar pattern */
+			ieee80211_radar_detected(dev->mt76.hw);
+			mt76x02_dfs_detector_reset(dev);
+
+			return;
+		}
+		mt76x02_dfs_check_event_window(dev);
+	}
+
+	engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
+	if (!(engine_mask & 0xf))
+		goto out;
+
+	for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+		struct mt76x02_dfs_hw_pulse pulse;
+
+		if (!(engine_mask & (1 << i)))
+			continue;
+
+		pulse.engine = i;
+		mt76x02_dfs_get_hw_pulse(dev, &pulse);
+
+		if (!mt76x02_dfs_check_hw_pulse(dev, &pulse)) {
+			dfs_pd->stats[i].hw_pulse_discarded++;
+			continue;
+		}
+
+		/* hw detector rx radar pattern */
+		dfs_pd->stats[i].hw_pattern++;
+		ieee80211_radar_detected(dev->mt76.hw);
+		mt76x02_dfs_detector_reset(dev);
+
+		return;
+	}
+
+	/* reset hw detector */
+	mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+out:
+	mt76x02_irq_enable(dev, MT_INT_GPTIMER);
+}
+
+static void mt76x02_dfs_init_sw_detector(struct mt76x02_dev *dev)
+{
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+	switch (dev->mt76.region) {
+	case NL80211_DFS_FCC:
+		dfs_pd->sw_dpd_params.max_pri = MT_DFS_FCC_MAX_PRI;
+		dfs_pd->sw_dpd_params.min_pri = MT_DFS_FCC_MIN_PRI;
+		dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
+		break;
+	case NL80211_DFS_ETSI:
+		dfs_pd->sw_dpd_params.max_pri = MT_DFS_ETSI_MAX_PRI;
+		dfs_pd->sw_dpd_params.min_pri = MT_DFS_ETSI_MIN_PRI;
+		dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN << 2;
+		break;
+	case NL80211_DFS_JP:
+		dfs_pd->sw_dpd_params.max_pri = MT_DFS_JP_MAX_PRI;
+		dfs_pd->sw_dpd_params.min_pri = MT_DFS_JP_MIN_PRI;
+		dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
+		break;
+	case NL80211_DFS_UNSET:
+	default:
+		break;
+	}
+}
+
+static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev)
+{
+	const struct mt76x02_radar_specs *radar_specs;
+	u8 i, shift;
+	u32 data;
+
+	switch (dev->mt76.chandef.width) {
+	case NL80211_CHAN_WIDTH_40:
+		shift = MT_DFS_NUM_ENGINES;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		shift = 2 * MT_DFS_NUM_ENGINES;
+		break;
+	default:
+		shift = 0;
+		break;
+	}
+
+	switch (dev->mt76.region) {
+	case NL80211_DFS_FCC:
+		radar_specs = &fcc_radar_specs[shift];
+		break;
+	case NL80211_DFS_ETSI:
+		radar_specs = &etsi_radar_specs[shift];
+		break;
+	case NL80211_DFS_JP:
+		if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+		    dev->mt76.chandef.chan->center_freq <= 5350)
+			radar_specs = &jp_w53_radar_specs[shift];
+		else
+			radar_specs = &jp_w56_radar_specs[shift];
+		break;
+	case NL80211_DFS_UNSET:
+	default:
+		return;
+	}
+
+	data = (MT_DFS_VGA_MASK << 16) |
+	       (MT_DFS_PWR_GAIN_OFFSET << 12) |
+	       (MT_DFS_PWR_DOWN_TIME << 8) |
+	       (MT_DFS_SYM_ROUND << 4) |
+	       (MT_DFS_DELTA_DELAY & 0xf);
+	mt76_wr(dev, MT_BBP(DFS, 2), data);
+
+	data = (MT_DFS_RX_PE_MASK << 16) | MT_DFS_PKT_END_MASK;
+	mt76_wr(dev, MT_BBP(DFS, 3), data);
+
+	for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+		/* configure engine */
+		mt76_wr(dev, MT_BBP(DFS, 0), i);
+
+		/* detection mode + avg_len */
+		data = ((radar_specs[i].avg_len & 0x1ff) << 16) |
+		       (radar_specs[i].mode & 0xf);
+		mt76_wr(dev, MT_BBP(DFS, 4), data);
+
+		/* dfs energy */
+		data = ((radar_specs[i].e_high & 0x0fff) << 16) |
+		       (radar_specs[i].e_low & 0x0fff);
+		mt76_wr(dev, MT_BBP(DFS, 5), data);
+
+		/* dfs period */
+		mt76_wr(dev, MT_BBP(DFS, 7), radar_specs[i].t_low);
+		mt76_wr(dev, MT_BBP(DFS, 9), radar_specs[i].t_high);
+
+		/* dfs burst */
+		mt76_wr(dev, MT_BBP(DFS, 11), radar_specs[i].b_low);
+		mt76_wr(dev, MT_BBP(DFS, 13), radar_specs[i].b_high);
+
+		/* dfs width */
+		data = ((radar_specs[i].w_high & 0x0fff) << 16) |
+		       (radar_specs[i].w_low & 0x0fff);
+		mt76_wr(dev, MT_BBP(DFS, 14), data);
+
+		/* dfs margins */
+		data = (radar_specs[i].w_margin << 16) |
+		       radar_specs[i].t_margin;
+		mt76_wr(dev, MT_BBP(DFS, 15), data);
+
+		/* dfs event expiration */
+		mt76_wr(dev, MT_BBP(DFS, 17), radar_specs[i].event_expiration);
+
+		/* dfs pwr adj */
+		mt76_wr(dev, MT_BBP(DFS, 30), radar_specs[i].pwr_jmp);
+	}
+
+	/* reset status */
+	mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+	mt76_wr(dev, MT_BBP(DFS, 36), 0x3);
+
+	/* enable detection*/
+	mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
+	mt76_wr(dev, MT_BBP(IBI, 11), 0x0c350001);
+}
+
+void mt76x02_phy_dfs_adjust_agc(struct mt76x02_dev *dev)
+{
+	u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
+
+	agc_r8 = mt76_rr(dev, MT_BBP(AGC, 8));
+	agc_r4 = mt76_rr(dev, MT_BBP(AGC, 4));
+
+	val_r8 = (agc_r8 & 0x00007e00) >> 9;
+	val_r4 = agc_r4 & ~0x1f000000;
+	val_r4 += (((val_r8 + 1) >> 1) << 24);
+	mt76_wr(dev, MT_BBP(AGC, 4), val_r4);
+
+	dfs_r31 = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, val_r4);
+	dfs_r31 += val_r8;
+	dfs_r31 -= (agc_r8 & 0x00000038) >> 3;
+	dfs_r31 = (dfs_r31 << 16) | 0x00000307;
+	mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31);
+
+	if (is_mt76x2(dev)) {
+		mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
+	} else {
+		/* disable hw detector */
+		mt76_wr(dev, MT_BBP(DFS, 0), 0);
+		/* enable hw detector */
+		mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_dfs_adjust_agc);
+
+void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
+{
+	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+
+	if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+	    dev->mt76.region != NL80211_DFS_UNSET) {
+		mt76x02_dfs_init_sw_detector(dev);
+		mt76x02_dfs_set_bbp_params(dev);
+		/* enable debug mode */
+		mt76x02_dfs_set_capture_mode_ctrl(dev, true);
+
+		mt76x02_irq_enable(dev, MT_INT_GPTIMER);
+		mt76_rmw_field(dev, MT_INT_TIMER_EN,
+			       MT_INT_TIMER_EN_GP_TIMER_EN, 1);
+	} else {
+		/* disable hw detector */
+		mt76_wr(dev, MT_BBP(DFS, 0), 0);
+		/* clear detector status */
+		mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+		if (mt76_chip(&dev->mt76) == 0x7610 ||
+		    mt76_chip(&dev->mt76) == 0x7630)
+			mt76_wr(dev, MT_BBP(IBI, 11), 0xfde8081);
+		else
+			mt76_wr(dev, MT_BBP(IBI, 11), 0);
+
+		mt76x02_irq_disable(dev, MT_INT_GPTIMER);
+		mt76_rmw_field(dev, MT_INT_TIMER_EN,
+			       MT_INT_TIMER_EN_GP_TIMER_EN, 0);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76x02_dfs_init_params);
+
+void mt76x02_dfs_init_detector(struct mt76x02_dev *dev)
+{
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+	INIT_LIST_HEAD(&dfs_pd->sequences);
+	INIT_LIST_HEAD(&dfs_pd->seq_pool);
+	dev->mt76.region = NL80211_DFS_UNSET;
+	dfs_pd->last_sw_check = jiffies;
+	tasklet_init(&dfs_pd->dfs_tasklet, mt76x02_dfs_tasklet,
+		     (unsigned long)dev);
+}
+
+static void
+mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
+		       enum nl80211_dfs_regions region)
+{
+	struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+	mutex_lock(&dev->mt76.mutex);
+	if (dev->mt76.region != region) {
+		tasklet_disable(&dfs_pd->dfs_tasklet);
+
+		dev->ed_monitor = dev->ed_monitor_enabled &&
+				  region == NL80211_DFS_ETSI;
+		mt76x02_edcca_init(dev);
+
+		dev->mt76.region = region;
+		mt76x02_dfs_init_params(dev);
+		tasklet_enable(&dfs_pd->dfs_tasklet);
+	}
+	mutex_unlock(&dev->mt76.mutex);
+}
+
+void mt76x02_regd_notifier(struct wiphy *wiphy,
+			   struct regulatory_request *request)
+{
+	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+	struct mt76x02_dev *dev = hw->priv;
+
+	mt76x02_dfs_set_domain(dev, request->dfs_region);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
new file mode 100644
index 0000000..491010a
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x02_DFS_H
+#define __MT76x02_DFS_H
+
+#include <linux/types.h>
+#include <linux/nl80211.h>
+
+#define MT_DFS_GP_INTERVAL		(10 << 4) /* 64 us unit */
+#define MT_DFS_NUM_ENGINES		4
+
+/* bbp params */
+#define MT_DFS_SYM_ROUND		0
+#define MT_DFS_DELTA_DELAY		2
+#define MT_DFS_VGA_MASK			0
+#define MT_DFS_PWR_GAIN_OFFSET		3
+#define MT_DFS_PWR_DOWN_TIME		0xf
+#define MT_DFS_RX_PE_MASK		0xff
+#define MT_DFS_PKT_END_MASK		0
+#define MT_DFS_CH_EN			0xf
+
+/* sw detector params */
+#define MT_DFS_EVENT_LOOP		64
+#define MT_DFS_SW_TIMEOUT		(HZ / 20)
+#define MT_DFS_EVENT_WINDOW		(HZ / 5)
+#define MT_DFS_SEQUENCE_WINDOW		(200 * (1 << 20))
+#define MT_DFS_EVENT_TIME_MARGIN	2000
+#define MT_DFS_PRI_MARGIN		4
+#define MT_DFS_SEQUENCE_TH		6
+
+#define MT_DFS_FCC_MAX_PRI		((28570 << 1) + 1000)
+#define MT_DFS_FCC_MIN_PRI		(3000 - 2)
+#define MT_DFS_JP_MAX_PRI		((80000 << 1) + 1000)
+#define MT_DFS_JP_MIN_PRI		(28500 - 2)
+#define MT_DFS_ETSI_MAX_PRI		(133333 + 125000 + 117647 + 1000)
+#define MT_DFS_ETSI_MIN_PRI		(4500 - 20)
+
+struct mt76x02_radar_specs {
+	u8 mode;
+	u16 avg_len;
+	u16 e_low;
+	u16 e_high;
+	u16 w_low;
+	u16 w_high;
+	u16 w_margin;
+	u32 t_low;
+	u32 t_high;
+	u16 t_margin;
+	u32 b_low;
+	u32 b_high;
+	u32 event_expiration;
+	u16 pwr_jmp;
+};
+
+#define MT_DFS_CHECK_EVENT(x)		((x) != GENMASK(31, 0))
+#define MT_DFS_EVENT_ENGINE(x)		(((x) & BIT(31)) ? 2 : 0)
+#define MT_DFS_EVENT_TIMESTAMP(x)	((x) & GENMASK(21, 0))
+#define MT_DFS_EVENT_WIDTH(x)		((x) & GENMASK(11, 0))
+struct mt76x02_dfs_event {
+	unsigned long fetch_ts;
+	u32 ts;
+	u16 width;
+	u8 engine;
+};
+
+#define MT_DFS_EVENT_BUFLEN		256
+struct mt76x02_dfs_event_rb {
+	struct mt76x02_dfs_event data[MT_DFS_EVENT_BUFLEN];
+	int h_rb, t_rb;
+};
+
+struct mt76x02_dfs_sequence {
+	struct list_head head;
+	u32 first_ts;
+	u32 last_ts;
+	u32 pri;
+	u16 count;
+	u8 engine;
+};
+
+struct mt76x02_dfs_hw_pulse {
+	u8 engine;
+	u32 period;
+	u32 w1;
+	u32 w2;
+	u32 burst;
+};
+
+struct mt76x02_dfs_sw_detector_params {
+	u32 min_pri;
+	u32 max_pri;
+	u32 pri_margin;
+};
+
+struct mt76x02_dfs_engine_stats {
+	u32 hw_pattern;
+	u32 hw_pulse_discarded;
+	u32 sw_pattern;
+};
+
+struct mt76x02_dfs_seq_stats {
+	u32 seq_pool_len;
+	u32 seq_len;
+};
+
+struct mt76x02_dfs_pattern_detector {
+	u8 chirp_pulse_cnt;
+	u32 chirp_pulse_ts;
+
+	struct mt76x02_dfs_sw_detector_params sw_dpd_params;
+	struct mt76x02_dfs_event_rb event_rb[2];
+
+	struct list_head sequences;
+	struct list_head seq_pool;
+	struct mt76x02_dfs_seq_stats seq_stats;
+
+	unsigned long last_sw_check;
+	u32 last_event_ts;
+
+	struct mt76x02_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
+	struct tasklet_struct dfs_tasklet;
+};
+
+void mt76x02_dfs_init_params(struct mt76x02_dev *dev);
+void mt76x02_dfs_init_detector(struct mt76x02_dev *dev);
+void mt76x02_regd_notifier(struct wiphy *wiphy,
+			   struct regulatory_request *request);
+void mt76x02_phy_dfs_adjust_agc(struct mt76x02_dev *dev);
+#endif /* __MT76x02_DFS_H */
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
new file mode 100644
index 0000000..4aff4f8
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x02_DMA_H
+#define __MT76x02_DMA_H
+
+#include "mt76x02.h"
+#include "dma.h"
+
+#define MT_TXD_INFO_LEN			GENMASK(15, 0)
+#define MT_TXD_INFO_NEXT_VLD		BIT(16)
+#define MT_TXD_INFO_TX_BURST		BIT(17)
+#define MT_TXD_INFO_80211		BIT(19)
+#define MT_TXD_INFO_TSO			BIT(20)
+#define MT_TXD_INFO_CSO			BIT(21)
+#define MT_TXD_INFO_WIV			BIT(24)
+#define MT_TXD_INFO_QSEL		GENMASK(26, 25)
+#define MT_TXD_INFO_DPORT		GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE		GENMASK(31, 30)
+
+#define MT_RX_FCE_INFO_LEN		GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN		BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ		GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE		GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR	BIT(24)
+#define MT_RX_FCE_INFO_QSEL		GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT		GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE		GENMASK(31, 30)
+
+/* MCU request message header  */
+#define MT_MCU_MSG_LEN			GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ		GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE		GENMASK(26, 20)
+#define MT_MCU_MSG_PORT			GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE			GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD		BIT(30)
+
+#define MT_RX_HEADROOM			32
+#define MT76X02_RX_RING_SIZE		256
+
+enum dma_msg_port {
+	WLAN_PORT,
+	CPU_RX_PORT,
+	CPU_TX_PORT,
+	HOST_PORT,
+	VIRTUAL_CPU_RX_PORT,
+	VIRTUAL_CPU_TX_PORT,
+	DISCARD,
+};
+
+static inline bool
+mt76x02_wait_for_wpdma(struct mt76_dev *dev, int timeout)
+{
+	return __mt76_poll(dev, MT_WPDMA_GLO_CFG,
+			   MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+			   MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+			   0, timeout);
+}
+
+int mt76x02_dma_init(struct mt76x02_dev *dev);
+void mt76x02_dma_disable(struct mt76x02_dev *dev);
+void mt76x02_dma_cleanup(struct mt76x02_dev *dev);
+
+#endif /* __MT76x02_DMA_H */
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
new file mode 100644
index 0000000..c0227b2
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+
+#include "mt76x02_eeprom.h"
+
+static int
+mt76x02_efuse_read(struct mt76x02_dev *dev, u16 addr, u8 *data,
+		   enum mt76x02_eeprom_modes mode)
+{
+	u32 val;
+	int i;
+
+	val = mt76_rr(dev, MT_EFUSE_CTRL);
+	val &= ~(MT_EFUSE_CTRL_AIN |
+		 MT_EFUSE_CTRL_MODE);
+	val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+	val |= FIELD_PREP(MT_EFUSE_CTRL_MODE, mode);
+	val |= MT_EFUSE_CTRL_KICK;
+	mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+	if (!mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+		return -ETIMEDOUT;
+
+	udelay(2);
+
+	val = mt76_rr(dev, MT_EFUSE_CTRL);
+	if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+		memset(data, 0xff, 16);
+		return 0;
+	}
+
+	for (i = 0; i < 4; i++) {
+		val = mt76_rr(dev, MT_EFUSE_DATA(i));
+		put_unaligned_le32(val, data + 4 * i);
+	}
+
+	return 0;
+}
+
+int mt76x02_eeprom_copy(struct mt76x02_dev *dev,
+			enum mt76x02_eeprom_field field,
+			void *dest, int len)
+{
+	if (field + len > dev->mt76.eeprom.size)
+		return -1;
+
+	memcpy(dest, dev->mt76.eeprom.data + field, len);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_eeprom_copy);
+
+int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
+			   int len, enum mt76x02_eeprom_modes mode)
+{
+	int ret, i;
+
+	for (i = 0; i + 16 <= len; i += 16) {
+		ret = mt76x02_efuse_read(dev, base + i, buf + i, mode);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_efuse_data);
+
+void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev)
+{
+	u16 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+	switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
+	case BOARD_TYPE_5GHZ:
+		dev->mt76.cap.has_5ghz = true;
+		break;
+	case BOARD_TYPE_2GHZ:
+		dev->mt76.cap.has_2ghz = true;
+		break;
+	default:
+		dev->mt76.cap.has_2ghz = true;
+		dev->mt76.cap.has_5ghz = true;
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(mt76x02_eeprom_parse_hw_cap);
+
+bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band)
+{
+	u16 conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+	if (band == NL80211_BAND_5GHZ)
+		return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G);
+	else
+		return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
+}
+EXPORT_SYMBOL_GPL(mt76x02_ext_pa_enabled);
+
+void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
+			 u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g)
+{
+	u16 val;
+
+	val = mt76x02_eeprom_get(dev, MT_EE_LNA_GAIN);
+	*lna_2g = val & 0xff;
+	lna_5g[0] = val >> 8;
+
+	val = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1);
+	lna_5g[1] = val >> 8;
+
+	val = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1);
+	lna_5g[2] = val >> 8;
+
+	if (!mt76x02_field_valid(lna_5g[1]))
+		lna_5g[1] = lna_5g[0];
+
+	if (!mt76x02_field_valid(lna_5g[2]))
+		lna_5g[2] = lna_5g[0];
+
+	if (band == NL80211_BAND_2GHZ)
+		*rssi_offset = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0);
+	else
+		*rssi_offset = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0);
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_rx_gain);
+
+u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
+			s8 *lna_2g, s8 *lna_5g,
+			struct ieee80211_channel *chan)
+{
+	u8 lna;
+
+	if (chan->band == NL80211_BAND_2GHZ)
+		lna = *lna_2g;
+	else if (chan->hw_value <= 64)
+		lna = lna_5g[0];
+	else if (chan->hw_value <= 128)
+		lna = lna_5g[1];
+	else
+		lna = lna_5g[2];
+
+	return lna != 0xff ? lna : 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_lna_gain);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
new file mode 100644
index 0000000..99941a4
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x02_EEPROM_H
+#define __MT76x02_EEPROM_H
+
+#include "mt76x02.h"
+
+enum mt76x02_eeprom_field {
+	MT_EE_CHIP_ID =				0x000,
+	MT_EE_VERSION =				0x002,
+	MT_EE_MAC_ADDR =			0x004,
+	MT_EE_PCI_ID =				0x00A,
+	MT_EE_ANTENNA =				0x022,
+	MT_EE_CFG1_INIT =			0x024,
+	MT_EE_NIC_CONF_0 =			0x034,
+	MT_EE_NIC_CONF_1 =			0x036,
+	MT_EE_COUNTRY_REGION_5GHZ =		0x038,
+	MT_EE_COUNTRY_REGION_2GHZ =		0x039,
+	MT_EE_FREQ_OFFSET =			0x03a,
+	MT_EE_NIC_CONF_2 =			0x042,
+
+	MT_EE_XTAL_TRIM_1 =			0x03a,
+	MT_EE_XTAL_TRIM_2 =			0x09e,
+
+	MT_EE_LNA_GAIN =			0x044,
+	MT_EE_RSSI_OFFSET_2G_0 =		0x046,
+	MT_EE_RSSI_OFFSET_2G_1 =		0x048,
+	MT_EE_LNA_GAIN_5GHZ_1 =			0x049,
+	MT_EE_RSSI_OFFSET_5G_0 =		0x04a,
+	MT_EE_RSSI_OFFSET_5G_1 =		0x04c,
+	MT_EE_LNA_GAIN_5GHZ_2 =			0x04d,
+
+	MT_EE_TX_POWER_DELTA_BW40 =		0x050,
+	MT_EE_TX_POWER_DELTA_BW80 =		0x052,
+
+	MT_EE_TX_POWER_EXT_PA_5G =		0x054,
+
+	MT_EE_TX_POWER_0_START_2G =		0x056,
+	MT_EE_TX_POWER_1_START_2G =		0x05c,
+
+	/* used as byte arrays */
+#define MT_TX_POWER_GROUP_SIZE_5G		5
+#define MT_TX_POWER_GROUPS_5G			6
+	MT_EE_TX_POWER_0_START_5G =		0x062,
+	MT_EE_TSSI_SLOPE_2G =			0x06e,
+
+	MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA =	0x074,
+	MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE =	0x076,
+
+	MT_EE_TX_POWER_1_START_5G =		0x080,
+
+	MT_EE_TX_POWER_CCK =			0x0a0,
+	MT_EE_TX_POWER_OFDM_2G_6M =		0x0a2,
+	MT_EE_TX_POWER_OFDM_2G_24M =		0x0a4,
+	MT_EE_TX_POWER_OFDM_5G_6M =		0x0b2,
+	MT_EE_TX_POWER_OFDM_5G_24M =		0x0b4,
+	MT_EE_TX_POWER_HT_MCS0 =		0x0a6,
+	MT_EE_TX_POWER_HT_MCS4 =		0x0a8,
+	MT_EE_TX_POWER_HT_MCS8 =		0x0aa,
+	MT_EE_TX_POWER_HT_MCS12 =		0x0ac,
+	MT_EE_TX_POWER_VHT_MCS0 =		0x0ba,
+	MT_EE_TX_POWER_VHT_MCS4 =		0x0bc,
+	MT_EE_TX_POWER_VHT_MCS8 =		0x0be,
+
+	MT_EE_2G_TARGET_POWER =			0x0d0,
+	MT_EE_TEMP_OFFSET =			0x0d1,
+	MT_EE_5G_TARGET_POWER =			0x0d2,
+	MT_EE_TSSI_BOUND1 =			0x0d4,
+	MT_EE_TSSI_BOUND2 =			0x0d6,
+	MT_EE_TSSI_BOUND3 =			0x0d8,
+	MT_EE_TSSI_BOUND4 =			0x0da,
+	MT_EE_FREQ_OFFSET_COMPENSATION =	0x0db,
+	MT_EE_TSSI_BOUND5 =			0x0dc,
+	MT_EE_TX_POWER_BYRATE_BASE =		0x0de,
+
+	MT_EE_TSSI_SLOPE_5G =			0x0f0,
+	MT_EE_RF_TEMP_COMP_SLOPE_5G =		0x0f2,
+	MT_EE_RF_TEMP_COMP_SLOPE_2G =		0x0f4,
+
+	MT_EE_RF_2G_TSSI_OFF_TXPOWER =		0x0f6,
+	MT_EE_RF_2G_RX_HIGH_GAIN =		0x0f8,
+	MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN =	0x0fa,
+	MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN =	0x0fc,
+	MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN =	0x0fe,
+
+	MT_EE_BT_RCAL_RESULT =			0x138,
+	MT_EE_BT_VCDL_CALIBRATION =		0x13c,
+	MT_EE_BT_PMUCFG =			0x13e,
+
+	MT_EE_USAGE_MAP_START =			0x1e0,
+	MT_EE_USAGE_MAP_END =			0x1fc,
+
+	__MT_EE_MAX
+};
+
+#define MT_EE_ANTENNA_DUAL			BIT(15)
+
+#define MT_EE_NIC_CONF_0_RX_PATH		GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH		GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_PA_TYPE		GENMASK(9, 8)
+#define MT_EE_NIC_CONF_0_PA_INT_2G		BIT(8)
+#define MT_EE_NIC_CONF_0_PA_INT_5G		BIT(9)
+#define MT_EE_NIC_CONF_0_PA_IO_CURRENT		BIT(10)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE		GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL		BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC		BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G		BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G		BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN		BIT(13)
+
+#define MT_EE_NIC_CONF_2_ANT_OPT		BIT(3)
+#define MT_EE_NIC_CONF_2_ANT_DIV		BIT(4)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION		GENMASK(10, 9)
+
+#define MT_EFUSE_USAGE_MAP_SIZE			(MT_EE_USAGE_MAP_END - \
+						 MT_EE_USAGE_MAP_START + 1)
+
+enum mt76x02_eeprom_modes {
+	MT_EE_READ,
+	MT_EE_PHYSICAL_READ,
+};
+
+enum mt76x02_board_type {
+	BOARD_TYPE_2GHZ = 1,
+	BOARD_TYPE_5GHZ = 2,
+};
+
+static inline bool mt76x02_field_valid(u8 val)
+{
+	return val != 0 && val != 0xff;
+}
+
+static inline int
+mt76x02_sign_extend(u32 val, unsigned int size)
+{
+	bool sign = val & BIT(size - 1);
+
+	val &= BIT(size - 1) - 1;
+
+	return sign ? val : -val;
+}
+
+static inline int
+mt76x02_sign_extend_optional(u32 val, unsigned int size)
+{
+	bool enable = val & BIT(size);
+
+	return enable ? mt76x02_sign_extend(val, size) : 0;
+}
+
+static inline s8 mt76x02_rate_power_val(u8 val)
+{
+	if (!mt76x02_field_valid(val))
+		return 0;
+
+	return mt76x02_sign_extend_optional(val, 7);
+}
+
+static inline int
+mt76x02_eeprom_get(struct mt76x02_dev *dev,
+		   enum mt76x02_eeprom_field field)
+{
+	if ((field & 1) || field >= __MT_EE_MAX)
+		return -1;
+
+	return get_unaligned_le16(dev->mt76.eeprom.data + field);
+}
+
+bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band);
+int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
+			   int len, enum mt76x02_eeprom_modes mode);
+void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
+			 u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g);
+u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
+			s8 *lna_2g, s8 *lna_5g,
+			struct ieee80211_channel *chan);
+void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev);
+int mt76x02_eeprom_copy(struct mt76x02_dev *dev,
+			enum mt76x02_eeprom_field field,
+			void *dest, int len);
+
+#endif /* __MT76x02_EEPROM_H */
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
new file mode 100644
index 0000000..5c12cd7
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -0,0 +1,1126 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#include "mt76x02.h"
+#include "mt76x02_trace.h"
+
+static enum mt76x02_cipher_type
+mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+	memset(key_data, 0, 32);
+	if (!key)
+		return MT_CIPHER_NONE;
+
+	if (key->keylen > 32)
+		return MT_CIPHER_NONE;
+
+	memcpy(key_data, key->key, key->keylen);
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		return MT_CIPHER_WEP40;
+	case WLAN_CIPHER_SUITE_WEP104:
+		return MT_CIPHER_WEP104;
+	case WLAN_CIPHER_SUITE_TKIP:
+		return MT_CIPHER_TKIP;
+	case WLAN_CIPHER_SUITE_CCMP:
+		return MT_CIPHER_AES_CCMP;
+	default:
+		return MT_CIPHER_NONE;
+	}
+}
+
+int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
+				 u8 key_idx, struct ieee80211_key_conf *key)
+{
+	enum mt76x02_cipher_type cipher;
+	u8 key_data[32];
+	u32 val;
+
+	cipher = mt76x02_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EOPNOTSUPP;
+
+	val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+	val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+	val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+	mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+	mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+		     sizeof(key_data));
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
+
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+			      struct ieee80211_key_conf *key)
+{
+	enum mt76x02_cipher_type cipher;
+	u8 key_data[32];
+	u32 iv, eiv;
+	u64 pn;
+
+	cipher = mt76x02_mac_get_key_info(key, key_data);
+	iv = mt76_rr(dev, MT_WCID_IV(idx));
+	eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
+
+	pn = (u64)eiv << 16;
+	if (cipher == MT_CIPHER_TKIP) {
+		pn |= (iv >> 16) & 0xff;
+		pn |= (iv & 0xff) << 8;
+	} else if (cipher >= MT_CIPHER_AES_CCMP) {
+		pn |= iv & 0xffff;
+	} else {
+		return;
+	}
+
+	atomic64_set(&key->tx_pn, pn);
+}
+
+int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
+			     struct ieee80211_key_conf *key)
+{
+	enum mt76x02_cipher_type cipher;
+	u8 key_data[32];
+	u8 iv_data[8];
+	u64 pn;
+
+	cipher = mt76x02_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EOPNOTSUPP;
+
+	mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+	mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+
+	memset(iv_data, 0, sizeof(iv_data));
+	if (key) {
+		mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+			       !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+
+		pn = atomic64_read(&key->tx_pn);
+
+		iv_data[3] = key->keyidx << 6;
+		if (cipher >= MT_CIPHER_TKIP) {
+			iv_data[3] |= 0x20;
+			put_unaligned_le32(pn >> 16, &iv_data[4]);
+		}
+
+		if (cipher == MT_CIPHER_TKIP) {
+			iv_data[0] = (pn >> 8) & 0xff;
+			iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
+			iv_data[2] = pn & 0xff;
+		} else if (cipher >= MT_CIPHER_AES_CCMP) {
+			put_unaligned_le16((pn & 0xffff), &iv_data[0]);
+		}
+	}
+
+	mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+	return 0;
+}
+
+void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
+			    u8 vif_idx, u8 *mac)
+{
+	struct mt76_wcid_addr addr = {};
+	u32 attr;
+
+	attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+	       FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+	mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+	if (idx >= 128)
+		return;
+
+	if (mac)
+		memcpy(addr.macaddr, mac, ETH_ALEN);
+
+	mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup);
+
+void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
+{
+	u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
+	u32 bit = MT_WCID_DROP_MASK(idx);
+
+	/* prevent unnecessary writes */
+	if ((val & bit) != (bit * drop))
+		mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+
+static u16
+mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
+			const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+	u8 phy, rate_idx, nss, bw = 0;
+	u16 rateval;
+
+	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 4);
+		phy = MT_PHY_TYPE_VHT;
+		if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+			bw = 2;
+		else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			bw = 1;
+	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 3);
+		phy = MT_PHY_TYPE_HT;
+		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+			phy = MT_PHY_TYPE_HT_GF;
+		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			bw = 1;
+	} else {
+		const struct ieee80211_rate *r;
+		int band = dev->mt76.chandef.chan->band;
+		u16 val;
+
+		r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
+		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+			val = r->hw_value_short;
+		else
+			val = r->hw_value;
+
+		phy = val >> 8;
+		rate_idx = val & 0xff;
+		nss = 1;
+	}
+
+	rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+	rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+	rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+	if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+		rateval |= MT_RXWI_RATE_SGI;
+
+	*nss_val = nss;
+	return rateval;
+}
+
+void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
+			       const struct ieee80211_tx_rate *rate)
+{
+	s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
+	u16 rateval;
+	u32 tx_info;
+	s8 nss;
+
+	rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
+	tx_info = FIELD_PREP(MT_WCID_TX_INFO_RATE, rateval) |
+		  FIELD_PREP(MT_WCID_TX_INFO_NSS, nss) |
+		  FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ, max_txpwr_adj) |
+		  MT_WCID_TX_INFO_SET;
+	wcid->tx_info = tx_info;
+}
+
+void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable)
+{
+	if (enable)
+		mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+	else
+		mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
+bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
+				struct mt76x02_tx_status *stat)
+{
+	u32 stat1, stat2;
+
+	stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+	stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+
+	stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+	if (!stat->valid)
+		return false;
+
+	stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+	stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+	stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+	stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+	stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+	stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+	stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+	trace_mac_txstat_fetch(dev, stat);
+
+	return true;
+}
+
+static int
+mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+			    enum nl80211_band band)
+{
+	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+	txrate->idx = 0;
+	txrate->flags = 0;
+	txrate->count = 1;
+
+	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		if (band == NL80211_BAND_2GHZ)
+			idx += 4;
+
+		txrate->idx = idx;
+		return 0;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8)
+			idx -= 8;
+
+		txrate->idx = idx;
+		return 0;
+	case MT_PHY_TYPE_HT_GF:
+		txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		txrate->flags |= IEEE80211_TX_RC_MCS;
+		txrate->idx = idx;
+		break;
+	case MT_PHY_TYPE_VHT:
+		txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+		txrate->idx = idx;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+	case MT_PHY_BW_20:
+		break;
+	case MT_PHY_BW_40:
+		txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+		break;
+	case MT_PHY_BW_80:
+		txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (rate & MT_RXWI_RATE_SGI)
+		txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+	return 0;
+}
+
+void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
+			    struct sk_buff *skb, struct mt76_wcid *wcid,
+			    struct ieee80211_sta *sta, int len)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_rate *rate = &info->control.rates[0];
+	struct ieee80211_key_conf *key = info->control.hw_key;
+	u32 wcid_tx_info;
+	u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+	u16 txwi_flags = 0, rateval;
+	u8 nss;
+	s8 txpwr_adj, max_txpwr_adj;
+	u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf;
+
+	memset(txwi, 0, sizeof(*txwi));
+
+	if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff &&
+	    ieee80211_has_protected(hdr->frame_control)) {
+		wcid = NULL;
+		ieee80211_get_tx_rates(info->control.vif, sta, skb,
+				       info->control.rates, 1);
+	}
+
+	if (wcid)
+		txwi->wcid = wcid->idx;
+	else
+		txwi->wcid = 0xff;
+
+	if (wcid && wcid->sw_iv && key) {
+		u64 pn = atomic64_inc_return(&key->tx_pn);
+
+		ccmp_pn[0] = pn;
+		ccmp_pn[1] = pn >> 8;
+		ccmp_pn[2] = 0;
+		ccmp_pn[3] = 0x20 | (key->keyidx << 6);
+		ccmp_pn[4] = pn >> 16;
+		ccmp_pn[5] = pn >> 24;
+		ccmp_pn[6] = pn >> 32;
+		ccmp_pn[7] = pn >> 40;
+		txwi->iv = *((__le32 *)&ccmp_pn[0]);
+		txwi->eiv = *((__le32 *)&ccmp_pn[4]);
+	}
+
+	if (wcid && (rate->idx < 0 || !rate->count)) {
+		wcid_tx_info = wcid->tx_info;
+		rateval = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info);
+		max_txpwr_adj = FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ,
+					  wcid_tx_info);
+		nss = FIELD_GET(MT_WCID_TX_INFO_NSS, wcid_tx_info);
+	} else {
+		rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
+		max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
+	}
+	txwi->rate = cpu_to_le16(rateval);
+
+	txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
+					     max_txpwr_adj);
+	txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+	if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4)
+		txwi->txstream = 0x13;
+	else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 &&
+		 !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+		txwi->txstream = 0x93;
+
+	if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC))
+		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+	if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+	if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+		txwi_flags |= MT_TXWI_FLAGS_MMPS;
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+	if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+		u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+		ba_size <<= sta->ht_cap.ampdu_factor;
+		ba_size = min_t(int, 63, ba_size - 1);
+		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+			ba_size = 0;
+		txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+		txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+			 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+				    sta->ht_cap.ampdu_density);
+	}
+
+	if (ieee80211_is_probe_resp(hdr->frame_control) ||
+	    ieee80211_is_beacon(hdr->frame_control))
+		txwi_flags |= MT_TXWI_FLAGS_TS;
+
+	txwi->flags |= cpu_to_le16(txwi_flags);
+	txwi->len_ctl = cpu_to_le16(len);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
+
+static void
+mt76x02_tx_rate_fallback(struct ieee80211_tx_rate *rates, int idx, int phy)
+{
+	u8 mcs, nss;
+
+	if (!idx)
+		return;
+
+	rates += idx - 1;
+	rates[1] = rates[0];
+	switch (phy) {
+	case MT_PHY_TYPE_VHT:
+		mcs = ieee80211_rate_get_vht_mcs(rates);
+		nss = ieee80211_rate_get_vht_nss(rates);
+
+		if (mcs == 0)
+			nss = max_t(int, nss - 1, 1);
+		else
+			mcs--;
+
+		ieee80211_rate_set_vht(rates + 1, mcs, nss);
+		break;
+	case MT_PHY_TYPE_HT_GF:
+	case MT_PHY_TYPE_HT:
+		/* MCS 8 falls back to MCS 0 */
+		if (rates[0].idx == 8) {
+			rates[1].idx = 0;
+			break;
+		}
+		/* fall through */
+	default:
+		rates[1].idx = max_t(int, rates[0].idx - 1, 0);
+		break;
+	}
+}
+
+static void
+mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
+			   struct ieee80211_tx_info *info,
+			   struct mt76x02_tx_status *st, int n_frames)
+{
+	struct ieee80211_tx_rate *rate = info->status.rates;
+	struct ieee80211_tx_rate last_rate;
+	u16 first_rate;
+	int retry = st->retry;
+	int phy;
+	int i;
+
+	if (!n_frames)
+		return;
+
+	phy = FIELD_GET(MT_RXWI_RATE_PHY, st->rate);
+
+	if (st->pktid & MT_PACKET_ID_HAS_RATE) {
+		first_rate = st->rate & ~MT_RXWI_RATE_INDEX;
+		first_rate |= st->pktid & MT_RXWI_RATE_INDEX;
+
+		mt76x02_mac_process_tx_rate(&rate[0], first_rate,
+					    dev->mt76.chandef.chan->band);
+	} else if (rate[0].idx < 0) {
+		if (!msta)
+			return;
+
+		mt76x02_mac_process_tx_rate(&rate[0], msta->wcid.tx_info,
+					    dev->mt76.chandef.chan->band);
+	}
+
+	mt76x02_mac_process_tx_rate(&last_rate, st->rate,
+				    dev->mt76.chandef.chan->band);
+
+	for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
+		retry--;
+		if (i + 1 == ARRAY_SIZE(info->status.rates)) {
+			info->status.rates[i] = last_rate;
+			info->status.rates[i].count = max_t(int, retry, 1);
+			break;
+		}
+
+		mt76x02_tx_rate_fallback(info->status.rates, i, phy);
+		if (info->status.rates[i].idx == last_rate.idx)
+			break;
+	}
+
+	if (i + 1 < ARRAY_SIZE(info->status.rates)) {
+		info->status.rates[i + 1].idx = -1;
+		info->status.rates[i + 1].count = 0;
+	}
+
+	info->status.ampdu_len = n_frames;
+	info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+	if (st->aggr)
+		info->flags |= IEEE80211_TX_CTL_AMPDU |
+			       IEEE80211_TX_STAT_AMPDU;
+
+	if (!st->ack_req)
+		info->flags |= IEEE80211_TX_CTL_NO_ACK;
+	else if (st->success)
+		info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+void mt76x02_send_tx_status(struct mt76x02_dev *dev,
+			    struct mt76x02_tx_status *stat, u8 *update)
+{
+	struct ieee80211_tx_info info = {};
+	struct ieee80211_tx_status status = {
+		.info = &info
+	};
+	struct mt76_wcid *wcid = NULL;
+	struct mt76x02_sta *msta = NULL;
+	struct mt76_dev *mdev = &dev->mt76;
+	struct sk_buff_head list;
+
+	if (stat->pktid == MT_PACKET_ID_NO_ACK)
+		return;
+
+	rcu_read_lock();
+
+	if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
+		wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
+
+	if (wcid && wcid->sta) {
+		void *priv;
+
+		priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
+		status.sta = container_of(priv, struct ieee80211_sta,
+					  drv_priv);
+	}
+
+	mt76_tx_status_lock(mdev, &list);
+
+	if (wcid) {
+		if (mt76_is_skb_pktid(stat->pktid))
+			status.skb = mt76_tx_status_skb_get(mdev, wcid,
+							    stat->pktid, &list);
+		if (status.skb)
+			status.info = IEEE80211_SKB_CB(status.skb);
+	}
+
+	if (!status.skb && !(stat->pktid & MT_PACKET_ID_HAS_RATE)) {
+		mt76_tx_status_unlock(mdev, &list);
+		rcu_read_unlock();
+		return;
+	}
+
+	if (msta && stat->aggr && !status.skb) {
+		u32 stat_val, stat_cache;
+
+		stat_val = stat->rate;
+		stat_val |= ((u32)stat->retry) << 16;
+		stat_cache = msta->status.rate;
+		stat_cache |= ((u32)msta->status.retry) << 16;
+
+		if (*update == 0 && stat_val == stat_cache &&
+		    stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+			msta->n_frames++;
+			mt76_tx_status_unlock(mdev, &list);
+			rcu_read_unlock();
+			return;
+		}
+
+		mt76x02_mac_fill_tx_status(dev, msta, status.info,
+					   &msta->status, msta->n_frames);
+
+		msta->status = *stat;
+		msta->n_frames = 1;
+		*update = 0;
+	} else {
+		mt76x02_mac_fill_tx_status(dev, msta, status.info, stat, 1);
+		*update = 1;
+	}
+
+	if (status.skb)
+		mt76_tx_status_skb_done(mdev, status.skb, &list);
+	mt76_tx_status_unlock(mdev, &list);
+
+	if (!status.skb)
+		ieee80211_tx_status_ext(mt76_hw(dev), &status);
+	rcu_read_unlock();
+}
+
+static int
+mt76x02_mac_process_rate(struct mt76x02_dev *dev,
+			 struct mt76_rx_status *status,
+			 u16 rate)
+{
+	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		if (idx >= 8)
+			idx = 0;
+
+		if (status->band == NL80211_BAND_2GHZ)
+			idx += 4;
+
+		status->rate_idx = idx;
+		return 0;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8) {
+			idx -= 8;
+			status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+		}
+
+		if (idx >= 4)
+			idx = 0;
+
+		status->rate_idx = idx;
+		return 0;
+	case MT_PHY_TYPE_HT_GF:
+		status->enc_flags |= RX_ENC_FLAG_HT_GF;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		status->encoding = RX_ENC_HT;
+		status->rate_idx = idx;
+		break;
+	case MT_PHY_TYPE_VHT: {
+		u8 n_rxstream = dev->mt76.chainmask & 0xf;
+
+		status->encoding = RX_ENC_VHT;
+		status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+		status->nss = min_t(u8, n_rxstream,
+				    FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1);
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+
+	if (rate & MT_RXWI_RATE_LDPC)
+		status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+	if (rate & MT_RXWI_RATE_SGI)
+		status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+	if (rate & MT_RXWI_RATE_STBC)
+		status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+	case MT_PHY_BW_20:
+		break;
+	case MT_PHY_BW_40:
+		status->bw = RATE_INFO_BW_40;
+		break;
+	case MT_PHY_BW_80:
+		status->bw = RATE_INFO_BW_80;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
+{
+	static const u8 null_addr[ETH_ALEN] = {};
+	int i;
+
+	ether_addr_copy(dev->mt76.macaddr, addr);
+
+	if (!is_valid_ether_addr(dev->mt76.macaddr)) {
+		eth_random_addr(dev->mt76.macaddr);
+		dev_info(dev->mt76.dev,
+			 "Invalid MAC address, using random address %pM\n",
+			 dev->mt76.macaddr);
+	}
+
+	mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
+	mt76_wr(dev, MT_MAC_ADDR_DW1,
+		get_unaligned_le16(dev->mt76.macaddr + 4) |
+		FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+
+	mt76_wr(dev, MT_MAC_BSSID_DW0,
+		get_unaligned_le32(dev->mt76.macaddr));
+	mt76_wr(dev, MT_MAC_BSSID_DW1,
+		get_unaligned_le16(dev->mt76.macaddr + 4) |
+		FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */
+		MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
+
+	for (i = 0; i < 16; i++)
+		mt76x02_mac_set_bssid(dev, i, null_addr);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
+
+static int
+mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain)
+{
+	struct mt76x02_rx_freq_cal *cal = &dev->cal.rx;
+
+	rssi += cal->rssi_offset[chain];
+	rssi -= cal->lna_gain;
+
+	return rssi;
+}
+
+int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
+			   void *rxi)
+{
+	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+	struct mt76x02_rxwi *rxwi = rxi;
+	struct mt76x02_sta *sta;
+	u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
+	u32 ctl = le32_to_cpu(rxwi->ctl);
+	u16 rate = le16_to_cpu(rxwi->rate);
+	u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
+	bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
+	int pad_len = 0, nstreams = dev->mt76.chainmask & 0xf;
+	s8 signal;
+	u8 pn_len;
+	u8 wcid;
+	int len;
+
+	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+		return -EINVAL;
+
+	if (rxinfo & MT_RXINFO_L2PAD)
+		pad_len += 2;
+
+	if (rxinfo & MT_RXINFO_DECRYPT) {
+		status->flag |= RX_FLAG_DECRYPTED;
+		status->flag |= RX_FLAG_MMIC_STRIPPED;
+		status->flag |= RX_FLAG_MIC_STRIPPED;
+		status->flag |= RX_FLAG_IV_STRIPPED;
+	}
+
+	wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
+	sta = mt76x02_rx_get_sta(&dev->mt76, wcid);
+	status->wcid = mt76x02_rx_get_sta_wcid(sta, unicast);
+
+	len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+	pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
+	if (pn_len) {
+		int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
+		u8 *data = skb->data + offset;
+
+		status->iv[0] = data[7];
+		status->iv[1] = data[6];
+		status->iv[2] = data[5];
+		status->iv[3] = data[4];
+		status->iv[4] = data[1];
+		status->iv[5] = data[0];
+
+		/*
+		 * Driver CCMP validation can't deal with fragments.
+		 * Let mac80211 take care of it.
+		 */
+		if (rxinfo & MT_RXINFO_FRAG) {
+			status->flag &= ~RX_FLAG_IV_STRIPPED;
+		} else {
+			pad_len += pn_len << 2;
+			len -= pn_len << 2;
+		}
+	}
+
+	mt76x02_remove_hdr_pad(skb, pad_len);
+
+	if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
+		status->aggr = true;
+
+	if (WARN_ON_ONCE(len > skb->len))
+		return -EINVAL;
+
+	pskb_trim(skb, len);
+
+	status->chains = BIT(0);
+	signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
+	status->chain_signal[0] = signal;
+	if (nstreams > 1) {
+		status->chains |= BIT(1);
+		status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
+							       rxwi->rssi[1],
+							       1);
+		signal = max_t(s8, signal, status->chain_signal[1]);
+	}
+	status->signal = signal;
+	status->freq = dev->mt76.chandef.chan->center_freq;
+	status->band = dev->mt76.chandef.chan->band;
+
+	status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
+	status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
+
+	return mt76x02_mac_process_rate(dev, status, rate);
+}
+
+void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
+{
+	struct mt76x02_tx_status stat = {};
+	u8 update = 1;
+	bool ret;
+
+	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+		return;
+
+	trace_mac_txstat_poll(dev);
+
+	while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
+		if (!spin_trylock(&dev->txstatus_fifo_lock))
+			break;
+
+		ret = mt76x02_mac_load_tx_status(dev, &stat);
+		spin_unlock(&dev->txstatus_fifo_lock);
+
+		if (!ret)
+			break;
+
+		if (!irq) {
+			mt76x02_send_tx_status(dev, &stat, &update);
+			continue;
+		}
+
+		kfifo_put(&dev->txstatus_fifo, stat);
+	}
+}
+
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+			     struct mt76_queue_entry *e)
+{
+	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+	struct mt76x02_txwi *txwi;
+	u8 *txwi_ptr;
+
+	if (!e->txwi) {
+		dev_kfree_skb_any(e->skb);
+		return;
+	}
+
+	mt76x02_mac_poll_tx_status(dev, false);
+
+	txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
+	txwi = (struct mt76x02_txwi *)txwi_ptr;
+	trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
+
+	mt76_tx_complete_skb(mdev, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
+
+void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val)
+{
+	u32 data = 0;
+
+	if (val != ~0)
+		data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
+		       MT_PROT_CFG_RTS_THRESH;
+
+	mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
+
+	mt76_rmw(dev, MT_CCK_PROT_CFG,
+		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+	mt76_rmw(dev, MT_OFDM_PROT_CFG,
+		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+}
+
+void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
+				   int ht_mode)
+{
+	int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+	bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+	u32 prot[6];
+	u32 vht_prot[3];
+	int i;
+	u16 rts_thr;
+
+	for (i = 0; i < ARRAY_SIZE(prot); i++) {
+		prot[i] = mt76_rr(dev, MT_CCK_PROT_CFG + i * 4);
+		prot[i] &= ~MT_PROT_CFG_CTRL;
+		if (i >= 2)
+			prot[i] &= ~MT_PROT_CFG_RATE;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(vht_prot); i++) {
+		vht_prot[i] = mt76_rr(dev, MT_TX_PROT_CFG6 + i * 4);
+		vht_prot[i] &= ~(MT_PROT_CFG_CTRL | MT_PROT_CFG_RATE);
+	}
+
+	rts_thr = mt76_get_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH);
+
+	if (rts_thr != 0xffff)
+		prot[0] |= MT_PROT_CTRL_RTS_CTS;
+
+	if (legacy_prot) {
+		prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+		prot[2] |= MT_PROT_RATE_CCK_11;
+		prot[3] |= MT_PROT_RATE_CCK_11;
+		prot[4] |= MT_PROT_RATE_CCK_11;
+		prot[5] |= MT_PROT_RATE_CCK_11;
+
+		vht_prot[0] |= MT_PROT_RATE_CCK_11;
+		vht_prot[1] |= MT_PROT_RATE_CCK_11;
+		vht_prot[2] |= MT_PROT_RATE_CCK_11;
+	} else {
+		if (rts_thr != 0xffff)
+			prot[1] |= MT_PROT_CTRL_RTS_CTS;
+
+		prot[2] |= MT_PROT_RATE_OFDM_24;
+		prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+		prot[4] |= MT_PROT_RATE_OFDM_24;
+		prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+
+		vht_prot[0] |= MT_PROT_RATE_OFDM_24;
+		vht_prot[1] |= MT_PROT_RATE_DUP_OFDM_24;
+		vht_prot[2] |= MT_PROT_RATE_SGI_OFDM_24;
+	}
+
+	switch (mode) {
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+		prot[2] |= MT_PROT_CTRL_RTS_CTS;
+		prot[3] |= MT_PROT_CTRL_RTS_CTS;
+		prot[4] |= MT_PROT_CTRL_RTS_CTS;
+		prot[5] |= MT_PROT_CTRL_RTS_CTS;
+		vht_prot[0] |= MT_PROT_CTRL_RTS_CTS;
+		vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
+		vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
+		break;
+	case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+		prot[3] |= MT_PROT_CTRL_RTS_CTS;
+		prot[5] |= MT_PROT_CTRL_RTS_CTS;
+		vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
+		vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
+		break;
+	}
+
+	if (non_gf) {
+		prot[4] |= MT_PROT_CTRL_RTS_CTS;
+		prot[5] |= MT_PROT_CTRL_RTS_CTS;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(prot); i++)
+		mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+
+	for (i = 0; i < ARRAY_SIZE(vht_prot); i++)
+		mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
+}
+
+void mt76x02_update_channel(struct mt76_dev *mdev)
+{
+	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+	struct mt76_channel_state *state;
+	u32 active, busy;
+
+	state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
+
+	busy = mt76_rr(dev, MT_CH_BUSY);
+	active = busy + mt76_rr(dev, MT_CH_IDLE);
+
+	spin_lock_bh(&dev->mt76.cc_lock);
+	state->cc_busy += busy;
+	state->cc_active += active;
+	spin_unlock_bh(&dev->mt76.cc_lock);
+}
+EXPORT_SYMBOL_GPL(mt76x02_update_channel);
+
+static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
+{
+	u32 val = mt76_rr(dev, 0x10f4);
+
+	if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+		return;
+
+	dev_err(dev->mt76.dev, "mac specific condition occurred\n");
+
+	mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+	udelay(10);
+	mt76_wr(dev, MT_MAC_SYS_CTRL,
+		MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+}
+
+static void
+mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable)
+{
+	if (enable) {
+		u32 data;
+
+		mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+		mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
+		/* enable pa-lna */
+		data = mt76_rr(dev, MT_TX_PIN_CFG);
+		data |= MT_TX_PIN_CFG_TXANT |
+			MT_TX_PIN_CFG_RXANT |
+			MT_TX_PIN_RFTR_EN |
+			MT_TX_PIN_TRSW_EN;
+		mt76_wr(dev, MT_TX_PIN_CFG, data);
+	} else {
+		mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+		mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
+		/* disable pa-lna */
+		mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT);
+		mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_RXANT);
+	}
+	dev->ed_tx_blocked = !enable;
+}
+
+void mt76x02_edcca_init(struct mt76x02_dev *dev)
+{
+	dev->ed_trigger = 0;
+	dev->ed_silent = 0;
+
+	if (dev->ed_monitor) {
+		struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+		u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
+
+		mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
+		mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+		mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0),
+			 ed_th << 8 | ed_th);
+		mt76_set(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
+	} else {
+		mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
+		mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+		if (is_mt76x2(dev)) {
+			mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+			mt76_set(dev, MT_TXOP_HLDR_ET,
+				 MT_TXOP_HLDR_TX40M_BLK_EN);
+		} else {
+			mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464);
+			mt76_clear(dev, MT_TXOP_HLDR_ET,
+				   MT_TXOP_HLDR_TX40M_BLK_EN);
+		}
+	}
+	mt76x02_edcca_tx_enable(dev, true);
+	dev->ed_monitor_learning = true;
+
+	/* clear previous CCA timer value */
+	mt76_rr(dev, MT_ED_CCA_TIMER);
+	dev->ed_time = ktime_get_boottime();
+}
+EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
+
+#define MT_EDCCA_TH		92
+#define MT_EDCCA_BLOCK_TH	2
+#define MT_EDCCA_LEARN_TH	50
+#define MT_EDCCA_LEARN_CCA	180
+#define MT_EDCCA_LEARN_TIMEOUT	(20 * HZ)
+
+static void mt76x02_edcca_check(struct mt76x02_dev *dev)
+{
+	ktime_t cur_time;
+	u32 active, val, busy;
+
+	cur_time = ktime_get_boottime();
+	val = mt76_rr(dev, MT_ED_CCA_TIMER);
+
+	active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
+	dev->ed_time = cur_time;
+
+	busy = (val * 100) / active;
+	busy = min_t(u32, busy, 100);
+
+	if (busy > MT_EDCCA_TH) {
+		dev->ed_trigger++;
+		dev->ed_silent = 0;
+	} else {
+		dev->ed_silent++;
+		dev->ed_trigger = 0;
+	}
+
+	if (dev->cal.agc_lowest_gain &&
+	    dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
+	    dev->ed_trigger > MT_EDCCA_LEARN_TH) {
+		dev->ed_monitor_learning = false;
+		dev->ed_trigger_timeout = jiffies + 20 * HZ;
+	} else if (!dev->ed_monitor_learning &&
+		   time_is_after_jiffies(dev->ed_trigger_timeout)) {
+		dev->ed_monitor_learning = true;
+		mt76x02_edcca_tx_enable(dev, true);
+	}
+
+	if (dev->ed_monitor_learning)
+		return;
+
+	if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
+		mt76x02_edcca_tx_enable(dev, false);
+	else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
+		mt76x02_edcca_tx_enable(dev, true);
+}
+
+void mt76x02_mac_work(struct work_struct *work)
+{
+	struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+					       mt76.mac_work.work);
+	int i, idx;
+
+	mutex_lock(&dev->mt76.mutex);
+
+	mt76x02_update_channel(&dev->mt76);
+	for (i = 0, idx = 0; i < 16; i++) {
+		u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+		dev->aggr_stats[idx++] += val & 0xffff;
+		dev->aggr_stats[idx++] += val >> 16;
+	}
+
+	if (!dev->mt76.beacon_mask)
+		mt76x02_check_mac_err(dev);
+
+	if (dev->ed_monitor)
+		mt76x02_edcca_check(dev);
+
+	mutex_unlock(&dev->mt76.mutex);
+
+	mt76_tx_status_check(&dev->mt76, NULL, false);
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+				     MT_MAC_WORK_INTERVAL);
+}
+
+void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
+{
+	idx &= 7;
+	mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
+	mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
+		       get_unaligned_le16(addr + 4));
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
new file mode 100644
index 0000000..efa4ef9
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ */
+
+#ifndef __MT76X02_MAC_H
+#define __MT76X02_MAC_H
+
+struct mt76x02_dev;
+
+struct mt76x02_tx_status {
+	u8 valid:1;
+	u8 success:1;
+	u8 aggr:1;
+	u8 ack_req:1;
+	u8 wcid;
+	u8 pktid;
+	u8 retry;
+	u16 rate;
+} __packed __aligned(2);
+
+#define MT_VIF_WCID(_n)		(254 - ((_n) & 7))
+#define MT_MAX_VIFS		8
+
+struct mt76x02_vif {
+	struct mt76_wcid group_wcid; /* must be first */
+	u8 idx;
+};
+
+struct mt76x02_sta {
+	struct mt76_wcid wcid; /* must be first */
+
+	struct mt76x02_vif *vif;
+	struct mt76x02_tx_status status;
+	int n_frames;
+
+};
+
+#define MT_RXINFO_BA			BIT(0)
+#define MT_RXINFO_DATA			BIT(1)
+#define MT_RXINFO_NULL			BIT(2)
+#define MT_RXINFO_FRAG			BIT(3)
+#define MT_RXINFO_UNICAST		BIT(4)
+#define MT_RXINFO_MULTICAST		BIT(5)
+#define MT_RXINFO_BROADCAST		BIT(6)
+#define MT_RXINFO_MYBSS			BIT(7)
+#define MT_RXINFO_CRCERR		BIT(8)
+#define MT_RXINFO_ICVERR		BIT(9)
+#define MT_RXINFO_MICERR		BIT(10)
+#define MT_RXINFO_AMSDU			BIT(11)
+#define MT_RXINFO_HTC			BIT(12)
+#define MT_RXINFO_RSSI			BIT(13)
+#define MT_RXINFO_L2PAD			BIT(14)
+#define MT_RXINFO_AMPDU			BIT(15)
+#define MT_RXINFO_DECRYPT		BIT(16)
+#define MT_RXINFO_BSSIDX3		BIT(17)
+#define MT_RXINFO_WAPI_KEY		BIT(18)
+#define MT_RXINFO_PN_LEN		GENMASK(21, 19)
+#define MT_RXINFO_SW_FTYPE0		BIT(22)
+#define MT_RXINFO_SW_FTYPE1		BIT(23)
+#define MT_RXINFO_PROBE_RESP		BIT(24)
+#define MT_RXINFO_BEACON		BIT(25)
+#define MT_RXINFO_DISASSOC		BIT(26)
+#define MT_RXINFO_DEAUTH		BIT(27)
+#define MT_RXINFO_ACTION		BIT(28)
+#define MT_RXINFO_TCP_SUM_ERR		BIT(30)
+#define MT_RXINFO_IP_SUM_ERR		BIT(31)
+
+#define MT_RXWI_CTL_WCID		GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX		GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX		GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF			GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN		GENMASK(29, 16)
+#define MT_RXWI_CTL_EOF			BIT(31)
+
+#define MT_RXWI_TID			GENMASK(3, 0)
+#define MT_RXWI_SN			GENMASK(15, 4)
+
+#define MT_RXWI_RATE_INDEX		GENMASK(5, 0)
+#define MT_RXWI_RATE_LDPC		BIT(6)
+#define MT_RXWI_RATE_BW			GENMASK(8, 7)
+#define MT_RXWI_RATE_SGI		BIT(9)
+#define MT_RXWI_RATE_STBC		BIT(10)
+#define MT_RXWI_RATE_LDPC_EXSYM		BIT(11)
+#define MT_RXWI_RATE_PHY		GENMASK(15, 13)
+
+#define MT_RATE_INDEX_VHT_IDX		GENMASK(3, 0)
+#define MT_RATE_INDEX_VHT_NSS		GENMASK(5, 4)
+
+struct mt76x02_rxwi {
+	__le32 rxinfo;
+
+	__le32 ctl;
+
+	__le16 tid_sn;
+	__le16 rate;
+
+	u8 rssi[4];
+
+	__le32 bbp_rxinfo[4];
+};
+
+#define MT_TX_PWR_ADJ			GENMASK(3, 0)
+
+enum mt76x2_phy_bandwidth {
+	MT_PHY_BW_20,
+	MT_PHY_BW_40,
+	MT_PHY_BW_80,
+};
+
+#define MT_TXWI_FLAGS_FRAG		BIT(0)
+#define MT_TXWI_FLAGS_MMPS		BIT(1)
+#define MT_TXWI_FLAGS_CFACK		BIT(2)
+#define MT_TXWI_FLAGS_TS		BIT(3)
+#define MT_TXWI_FLAGS_AMPDU		BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY	GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP		GENMASK(9, 8)
+#define MT_TXWI_FLAGS_NDPS		BIT(10)
+#define MT_TXWI_FLAGS_RTSBWSIG		BIT(11)
+#define MT_TXWI_FLAGS_NDP_BW		GENMASK(13, 12)
+#define MT_TXWI_FLAGS_SOUND		BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT	BIT(15)
+
+#define MT_TXWI_ACK_CTL_REQ		BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ		BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW	GENMASK(7, 2)
+
+struct mt76x02_txwi {
+	__le16 flags;
+	__le16 rate;
+	u8 ack_ctl;
+	u8 wcid;
+	__le16 len_ctl;
+	__le32 iv;
+	__le32 eiv;
+	u8 aid;
+	u8 txstream;
+	u8 ctl2;
+	u8 pktid;
+} __packed __aligned(4);
+
+static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev)
+{
+	const u32 MAC_CSR0 = 0x1000;
+	int i;
+
+	for (i = 0; i < 500; i++) {
+		if (test_bit(MT76_REMOVED, &dev->state))
+			return false;
+
+		switch (dev->bus->rr(dev, MAC_CSR0)) {
+		case 0:
+		case ~0:
+			break;
+		default:
+			return true;
+		}
+		usleep_range(5000, 10000);
+	}
+	return false;
+}
+
+void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable);
+int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
+				 u8 key_idx, struct ieee80211_key_conf *key);
+int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
+			     struct ieee80211_key_conf *key);
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+			      struct ieee80211_key_conf *key);
+void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
+			    u8 *mac);
+void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
+void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
+			       const struct ieee80211_tx_rate *rate);
+bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
+				struct mt76x02_tx_status *stat);
+void mt76x02_send_tx_status(struct mt76x02_dev *dev,
+			    struct mt76x02_tx_status *stat, u8 *update);
+int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
+			   void *rxi);
+void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
+				   int ht_mode);
+void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val);
+void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr);
+void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
+			    struct sk_buff *skb, struct mt76_wcid *wcid,
+			    struct ieee80211_sta *sta, int len);
+void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+			     struct mt76_queue_entry *e);
+void mt76x02_update_channel(struct mt76_dev *mdev);
+void mt76x02_mac_work(struct work_struct *work);
+
+void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
+int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
+			   struct sk_buff *skb);
+void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
+				   struct ieee80211_vif *vif, bool enable);
+
+void mt76x02_edcca_init(struct mt76x02_dev *dev);
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
new file mode 100644
index 0000000..4be7a24
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x02_mcu.h"
+
+int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+			 int len, bool wait_resp)
+{
+	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+	unsigned long expires = jiffies + HZ;
+	struct sk_buff *skb;
+	u32 tx_info;
+	int ret;
+	u8 seq;
+
+	skb = mt76x02_mcu_msg_alloc(data, len);
+	if (!skb)
+		return -ENOMEM;
+
+	mutex_lock(&mdev->mmio.mcu.mutex);
+
+	seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+	if (!seq)
+		seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+
+	tx_info = MT_MCU_MSG_TYPE_CMD |
+		  FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+		  FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+		  FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+		  FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
+
+	ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, tx_info);
+	if (ret)
+		goto out;
+
+	while (wait_resp) {
+		u32 *rxfce;
+		bool check_seq = false;
+
+		skb = mt76_mcu_get_response(&dev->mt76, expires);
+		if (!skb) {
+			dev_err(mdev->dev,
+				"MCU message %d (seq %d) timed out\n", cmd,
+				seq);
+			ret = -ETIMEDOUT;
+			dev->mcu_timeout = 1;
+			break;
+		}
+
+		rxfce = (u32 *)skb->cb;
+
+		if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
+			check_seq = true;
+
+		dev_kfree_skb(skb);
+		if (check_seq)
+			break;
+	}
+
+out:
+	mutex_unlock(&mdev->mmio.mcu.mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send);
+
+int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func,
+				u32 val)
+{
+	struct {
+		__le32 id;
+		__le32 value;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(func),
+		.value = cpu_to_le32(val),
+	};
+	bool wait = false;
+
+	if (func != Q_SELECT)
+		wait = true;
+
+	return mt76_mcu_send_msg(dev, CMD_FUN_SET_OP, &msg, sizeof(msg), wait);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select);
+
+int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on)
+{
+	struct {
+		__le32 mode;
+		__le32 level;
+	} __packed __aligned(4) msg = {
+		.mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
+		.level = cpu_to_le32(0),
+	};
+
+	return mt76_mcu_send_msg(dev, CMD_POWER_SAVING_OP, &msg, sizeof(msg),
+				 false);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state);
+
+int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param)
+{
+	struct {
+		__le32 id;
+		__le32 value;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(type),
+		.value = cpu_to_le32(param),
+	};
+	bool is_mt76x2e = mt76_is_mmio(dev) && is_mt76x2(dev);
+	int ret;
+
+	if (is_mt76x2e)
+		mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
+
+	ret = mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg),
+				true);
+	if (ret)
+		return ret;
+
+	if (is_mt76x2e &&
+	    WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
+				    BIT(31), BIT(31), 100)))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate);
+
+int mt76x02_mcu_cleanup(struct mt76x02_dev *dev)
+{
+	struct sk_buff *skb;
+
+	mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
+	usleep_range(20000, 30000);
+
+	while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL)
+		dev_kfree_skb(skb);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup);
+
+void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
+			       const struct mt76x02_fw_header *h)
+{
+	u16 bld = le16_to_cpu(h->build_ver);
+	u16 ver = le16_to_cpu(h->fw_ver);
+
+	snprintf(dev->mt76.hw->wiphy->fw_version,
+		 sizeof(dev->mt76.hw->wiphy->fw_version),
+		 "%d.%d.%02d-b%x",
+		 (ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_ethtool_fwver);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
new file mode 100644
index 0000000..c81a965
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x02_MCU_H
+#define __MT76x02_MCU_H
+
+#include "mt76x02.h"
+
+#define MT_MCU_RESET_CTL		0x070C
+#define MT_MCU_INT_LEVEL		0x0718
+#define MT_MCU_COM_REG0			0x0730
+#define MT_MCU_COM_REG1			0x0734
+#define MT_MCU_COM_REG2			0x0738
+#define MT_MCU_COM_REG3			0x073C
+
+#define MT_INBAND_PACKET_MAX_LEN	192
+#define MT_MCU_MEMMAP_WLAN		0x410000
+
+#define MT_MCU_PCIE_REMAP_BASE4		0x074C
+
+#define MT_MCU_SEMAPHORE_00		0x07B0
+#define MT_MCU_SEMAPHORE_01		0x07B4
+#define MT_MCU_SEMAPHORE_02		0x07B8
+#define MT_MCU_SEMAPHORE_03		0x07BC
+
+#define MT_MCU_ILM_ADDR			0x80000
+
+enum mcu_cmd {
+	CMD_FUN_SET_OP = 1,
+	CMD_LOAD_CR = 2,
+	CMD_INIT_GAIN_OP = 3,
+	CMD_DYNC_VGA_OP = 6,
+	CMD_TDLS_CH_SW = 7,
+	CMD_BURST_WRITE = 8,
+	CMD_READ_MODIFY_WRITE = 9,
+	CMD_RANDOM_READ = 10,
+	CMD_BURST_READ = 11,
+	CMD_RANDOM_WRITE = 12,
+	CMD_LED_MODE_OP = 16,
+	CMD_POWER_SAVING_OP = 20,
+	CMD_WOW_CONFIG = 21,
+	CMD_WOW_QUERY = 22,
+	CMD_WOW_FEATURE = 24,
+	CMD_CARRIER_DETECT_OP = 28,
+	CMD_RADOR_DETECT_OP = 29,
+	CMD_SWITCH_CHANNEL_OP = 30,
+	CMD_CALIBRATION_OP = 31,
+	CMD_BEACON_OP = 32,
+	CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_power_mode {
+	RADIO_OFF = 0x30,
+	RADIO_ON = 0x31,
+	RADIO_OFF_AUTO_WAKEUP = 0x32,
+	RADIO_OFF_ADVANCE = 0x33,
+	RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_function {
+	Q_SELECT = 1,
+	BW_SETTING = 2,
+	USB2_SW_DISCONNECT = 2,
+	USB3_SW_DISCONNECT = 3,
+	LOG_FW_DEBUG_MSG = 4,
+	GET_FW_VERSION = 5,
+};
+
+struct mt76x02_fw_header {
+	__le32 ilm_len;
+	__le32 dlm_len;
+	__le16 build_ver;
+	__le16 fw_ver;
+	u8 pad[4];
+	char build_time[16];
+};
+
+struct mt76x02_patch_header {
+	char build_time[16];
+	char platform[4];
+	char hw_version[4];
+	char patch_version[4];
+	u8 pad[2];
+};
+
+static inline struct sk_buff *
+mt76x02_mcu_msg_alloc(const void *data, int len)
+{
+	return mt76_mcu_msg_alloc(data, 0, len, 0);
+}
+
+int mt76x02_mcu_cleanup(struct mt76x02_dev *dev);
+int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param);
+int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+			 int len, bool wait_resp);
+int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func,
+				u32 val);
+int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on);
+void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
+			       const struct mt76x02_fw_header *h);
+
+#endif /* __MT76x02_MCU_H */
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
new file mode 100644
index 0000000..dc77307
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+
+#include "mt76x02.h"
+#include "mt76x02_mcu.h"
+#include "mt76x02_trace.h"
+
+static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
+{
+	struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
+	struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD].q;
+	struct beacon_bc_data data = {};
+	struct sk_buff *skb;
+	int i;
+
+	if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+		return;
+
+	mt76x02_resync_beacon_timer(dev);
+
+	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+		IEEE80211_IFACE_ITER_RESUME_ALL,
+		mt76x02_update_beacon_iter, dev);
+
+	mt76_csa_check(&dev->mt76);
+
+	if (dev->mt76.csa_complete)
+		return;
+
+	mt76x02_enqueue_buffered_bc(dev, &data, 8);
+
+	if (!skb_queue_len(&data.q))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+		if (!data.tail[i])
+			continue;
+
+		mt76_skb_set_moredata(data.tail[i], false);
+	}
+
+	spin_lock_bh(&q->lock);
+	while ((skb = __skb_dequeue(&data.q)) != NULL) {
+		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+		struct ieee80211_vif *vif = info->control.vif;
+		struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+
+		mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, &mvif->group_wcid,
+				  NULL);
+	}
+	spin_unlock_bh(&q->lock);
+}
+
+static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
+{
+	if (en)
+		tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+	else
+		tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+}
+
+static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en)
+{
+	mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
+	if (en)
+		mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+	else
+		mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+}
+
+void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
+{
+	static const struct mt76x02_beacon_ops beacon_ops = {
+		.nslots = 8,
+		.slot_size = 1024,
+		.pre_tbtt_enable = mt76x02e_pre_tbtt_enable,
+		.beacon_enable = mt76x02e_beacon_enable,
+	};
+
+	dev->beacon_ops = &beacon_ops;
+
+	/* Fire a pre-TBTT interrupt 8 ms before TBTT */
+	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
+		       8 << 4);
+	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
+		       MT_DFS_GP_INTERVAL);
+	mt76_wr(dev, MT_INT_TIMER_EN, 0);
+
+	mt76x02_init_beacon_config(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
+
+static int
+mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q,
+		      int idx, int n_desc)
+{
+	struct mt76_queue *hwq;
+	int err;
+
+	hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+	if (!hwq)
+		return -ENOMEM;
+
+	err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+	if (err < 0)
+		return err;
+
+	INIT_LIST_HEAD(&q->swq);
+	q->q = hwq;
+
+	mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
+
+	return 0;
+}
+
+static int
+mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
+		      int idx, int n_desc, int bufsize)
+{
+	int err;
+
+	err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
+			       MT_RX_RING_BASE);
+	if (err < 0)
+		return err;
+
+	mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
+
+	return 0;
+}
+
+static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
+{
+	struct mt76x02_tx_status stat;
+	u8 update = 1;
+
+	while (kfifo_get(&dev->txstatus_fifo, &stat))
+		mt76x02_send_tx_status(dev, &stat, &update);
+}
+
+static void mt76x02_tx_tasklet(unsigned long data)
+{
+	struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
+
+	mt76x02_mac_poll_tx_status(dev, false);
+	mt76x02_process_tx_status_fifo(dev);
+
+	mt76_txq_schedule_all(&dev->mt76);
+}
+
+static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
+{
+	struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev,
+					       mt76.tx_napi);
+	int i;
+
+	mt76x02_mac_poll_tx_status(dev, false);
+
+	for (i = MT_TXQ_MCU; i >= 0; i--)
+		mt76_queue_tx_cleanup(dev, i, false);
+
+	if (napi_complete_done(napi, 0))
+		mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
+
+	for (i = MT_TXQ_MCU; i >= 0; i--)
+		mt76_queue_tx_cleanup(dev, i, false);
+
+	tasklet_schedule(&dev->mt76.tx_tasklet);
+
+	return 0;
+}
+
+int mt76x02_dma_init(struct mt76x02_dev *dev)
+{
+	struct mt76_txwi_cache __maybe_unused *t;
+	int i, ret, fifo_size;
+	struct mt76_queue *q;
+	void *status_fifo;
+
+	BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
+
+	fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
+	status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
+	if (!status_fifo)
+		return -ENOMEM;
+
+	tasklet_init(&dev->mt76.tx_tasklet, mt76x02_tx_tasklet,
+		     (unsigned long)dev);
+	tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
+		     (unsigned long)dev);
+
+	spin_lock_init(&dev->txstatus_fifo_lock);
+	kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+
+	mt76_dma_attach(&dev->mt76);
+
+	mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i],
+					    mt76_ac_to_hwq(i),
+					    MT_TX_RING_SIZE);
+		if (ret)
+			return ret;
+	}
+
+	ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+				    MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+				    MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+				    MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+	if (ret)
+		return ret;
+
+	q = &dev->mt76.q_rx[MT_RXQ_MAIN];
+	q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
+	ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
+				    MT_RX_BUF_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt76_init_queues(dev);
+	if (ret)
+		return ret;
+
+	netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+			  mt76x02_poll_tx, NAPI_POLL_WEIGHT);
+	napi_enable(&dev->mt76.tx_napi);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_init);
+
+void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+	struct mt76x02_dev *dev;
+
+	dev = container_of(mdev, struct mt76x02_dev, mt76);
+	mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
+
+irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
+{
+	struct mt76x02_dev *dev = dev_instance;
+	u32 intr;
+
+	intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+	mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+		return IRQ_NONE;
+
+	trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask);
+
+	intr &= dev->mt76.mmio.irqmask;
+
+	if (intr & MT_INT_RX_DONE(0)) {
+		mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
+		napi_schedule(&dev->mt76.napi[0]);
+	}
+
+	if (intr & MT_INT_RX_DONE(1)) {
+		mt76x02_irq_disable(dev, MT_INT_RX_DONE(1));
+		napi_schedule(&dev->mt76.napi[1]);
+	}
+
+	if (intr & MT_INT_PRE_TBTT)
+		tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
+
+	/* send buffered multicast frames now */
+	if (intr & MT_INT_TBTT) {
+		if (dev->mt76.csa_complete)
+			mt76_csa_finish(&dev->mt76);
+		else
+			mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD].q);
+	}
+
+	if (intr & MT_INT_TX_STAT)
+		mt76x02_mac_poll_tx_status(dev, true);
+
+	if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL)) {
+		mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
+		napi_schedule(&dev->mt76.tx_napi);
+	}
+
+	if (intr & MT_INT_GPTIMER) {
+		mt76x02_irq_disable(dev, MT_INT_GPTIMER);
+		tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
+	}
+
+	return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
+
+static void mt76x02_dma_enable(struct mt76x02_dev *dev)
+{
+	u32 val;
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+	mt76x02_wait_for_wpdma(&dev->mt76, 1000);
+	usleep_range(50, 100);
+
+	val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
+	      MT_WPDMA_GLO_CFG_TX_DMA_EN |
+	      MT_WPDMA_GLO_CFG_RX_DMA_EN;
+	mt76_set(dev, MT_WPDMA_GLO_CFG, val);
+	mt76_clear(dev, MT_WPDMA_GLO_CFG,
+		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+}
+
+void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
+{
+	tasklet_kill(&dev->mt76.tx_tasklet);
+	mt76_dma_cleanup(&dev->mt76);
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
+
+void mt76x02_dma_disable(struct mt76x02_dev *dev)
+{
+	u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+
+	val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
+	       MT_WPDMA_GLO_CFG_BIG_ENDIAN |
+	       MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
+	val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
+	mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
+
+void mt76x02_mac_start(struct mt76x02_dev *dev)
+{
+	mt76x02_dma_enable(dev);
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+	mt76_wr(dev, MT_MAC_SYS_CTRL,
+		MT_MAC_SYS_CTRL_ENABLE_TX |
+		MT_MAC_SYS_CTRL_ENABLE_RX);
+	mt76x02_irq_enable(dev,
+			   MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+			   MT_INT_TX_STAT);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_start);
+
+static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
+{
+	u32 dma_idx, prev_dma_idx;
+	struct mt76_queue *q;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		q = dev->mt76.q_tx[i].q;
+
+		if (!q->queued)
+			continue;
+
+		prev_dma_idx = dev->mt76.tx_dma_idx[i];
+		dma_idx = readl(&q->regs->dma_idx);
+		dev->mt76.tx_dma_idx[i] = dma_idx;
+
+		if (prev_dma_idx == dma_idx)
+			break;
+	}
+
+	return i < 4;
+}
+
+static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			     struct ieee80211_sta *sta,
+			     struct ieee80211_key_conf *key, void *data)
+{
+	struct mt76x02_dev *dev = hw->priv;
+	struct mt76_wcid *wcid;
+
+	if (!sta)
+		return;
+
+	wcid = (struct mt76_wcid *)sta->drv_priv;
+
+	if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
+		return;
+
+	mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
+}
+
+static void mt76x02_reset_state(struct mt76x02_dev *dev)
+{
+	int i;
+
+	lockdep_assert_held(&dev->mt76.mutex);
+
+	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+	rcu_read_lock();
+	ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
+	rcu_read_unlock();
+
+	for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
+		struct ieee80211_sta *sta;
+		struct ieee80211_vif *vif;
+		struct mt76x02_sta *msta;
+		struct mt76_wcid *wcid;
+		void *priv;
+
+		wcid = rcu_dereference_protected(dev->mt76.wcid[i],
+					lockdep_is_held(&dev->mt76.mutex));
+		if (!wcid)
+			continue;
+
+		priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
+		sta = container_of(priv, struct ieee80211_sta, drv_priv);
+
+		priv = msta->vif;
+		vif = container_of(priv, struct ieee80211_vif, drv_priv);
+
+		__mt76_sta_remove(&dev->mt76, vif, sta);
+		memset(msta, 0, sizeof(*msta));
+	}
+
+	dev->vif_mask = 0;
+	dev->mt76.beacon_mask = 0;
+}
+
+static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
+{
+	u32 mask = dev->mt76.mmio.irqmask;
+	bool restart = dev->mt76.mcu_ops->mcu_restart;
+	int i;
+
+	ieee80211_stop_queues(dev->mt76.hw);
+	set_bit(MT76_RESET, &dev->mt76.state);
+
+	tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+	tasklet_disable(&dev->mt76.tx_tasklet);
+	napi_disable(&dev->mt76.tx_napi);
+
+	for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
+		napi_disable(&dev->mt76.napi[i]);
+
+	mutex_lock(&dev->mt76.mutex);
+
+	if (restart)
+		mt76x02_reset_state(dev);
+
+	if (dev->mt76.beacon_mask)
+		mt76_clear(dev, MT_BEACON_TIME_CFG,
+			   MT_BEACON_TIME_CFG_BEACON_TX |
+			   MT_BEACON_TIME_CFG_TBTT_EN);
+
+	mt76x02_irq_disable(dev, mask);
+
+	/* perform device reset */
+	mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+	mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+	mt76_clear(dev, MT_WPDMA_GLO_CFG,
+		   MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
+	usleep_range(5000, 10000);
+	mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff);
+
+	/* let fw reset DMA */
+	mt76_set(dev, 0x734, 0x3);
+
+	if (restart)
+		mt76_mcu_restart(dev);
+
+	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
+		mt76_queue_tx_cleanup(dev, i, true);
+
+	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
+		mt76_queue_rx_reset(dev, i);
+
+	mt76x02_mac_start(dev);
+
+	if (dev->ed_monitor)
+		mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+
+	if (dev->mt76.beacon_mask && !restart)
+		mt76_set(dev, MT_BEACON_TIME_CFG,
+			 MT_BEACON_TIME_CFG_BEACON_TX |
+			 MT_BEACON_TIME_CFG_TBTT_EN);
+
+	mt76x02_irq_enable(dev, mask);
+
+	mutex_unlock(&dev->mt76.mutex);
+
+	clear_bit(MT76_RESET, &dev->mt76.state);
+
+	tasklet_enable(&dev->mt76.tx_tasklet);
+	napi_enable(&dev->mt76.tx_napi);
+	napi_schedule(&dev->mt76.tx_napi);
+
+	tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+
+	for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) {
+		napi_enable(&dev->mt76.napi[i]);
+		napi_schedule(&dev->mt76.napi[i]);
+	}
+
+	if (restart) {
+		mt76x02_mcu_function_select(dev, Q_SELECT, 1);
+		ieee80211_restart_hw(dev->mt76.hw);
+	} else {
+		ieee80211_wake_queues(dev->mt76.hw);
+		mt76_txq_schedule_all(&dev->mt76);
+	}
+}
+
+static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
+{
+	if (mt76x02_tx_hang(dev)) {
+		if (++dev->tx_hang_check >= MT_TX_HANG_TH)
+			goto restart;
+	} else {
+		dev->tx_hang_check = 0;
+	}
+
+	if (dev->mcu_timeout)
+		goto restart;
+
+	return;
+
+restart:
+	mt76x02_watchdog_reset(dev);
+
+	mutex_lock(&dev->mt76.mmio.mcu.mutex);
+	dev->mcu_timeout = 0;
+	mutex_unlock(&dev->mt76.mmio.mcu.mutex);
+
+	dev->tx_hang_reset++;
+	dev->tx_hang_check = 0;
+	memset(dev->mt76.tx_dma_idx, 0xff,
+	       sizeof(dev->mt76.tx_dma_idx));
+}
+
+void mt76x02_wdt_work(struct work_struct *work)
+{
+	struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+					       wdt_work.work);
+
+	mt76x02_check_tx_hang(dev);
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
+				     MT_WATCHDOG_TIME);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
new file mode 100644
index 0000000..d733426
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "mt76x02.h"
+#include "mt76x02_phy.h"
+
+void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev)
+{
+	u32 val;
+
+	val = mt76_rr(dev, MT_BBP(AGC, 0));
+	val &= ~BIT(4);
+
+	switch (dev->mt76.chainmask & 0xf) {
+	case 2:
+		val |= BIT(3);
+		break;
+	default:
+		val &= ~BIT(3);
+		break;
+	}
+
+	mt76_wr(dev, MT_BBP(AGC, 0), val);
+	mb();
+	val = mt76_rr(dev, MT_BBP(AGC, 0));
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath);
+
+void mt76x02_phy_set_txdac(struct mt76x02_dev *dev)
+{
+	int txpath;
+
+	txpath = (dev->mt76.chainmask >> 8) & 0xf;
+	switch (txpath) {
+	case 2:
+		mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
+		break;
+	default:
+		mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_txdac);
+
+static u32
+mt76x02_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+	u32 val = 0;
+
+	val |= (v1 & (BIT(6) - 1)) << 0;
+	val |= (v2 & (BIT(6) - 1)) << 8;
+	val |= (v3 & (BIT(6) - 1)) << 16;
+	val |= (v4 & (BIT(6) - 1)) << 24;
+	return val;
+}
+
+int mt76x02_get_max_rate_power(struct mt76_rate_power *r)
+{
+	s8 ret = 0;
+	int i;
+
+	for (i = 0; i < sizeof(r->all); i++)
+		ret = max(ret, r->all[i]);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_max_rate_power);
+
+void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+	int i;
+
+	for (i = 0; i < sizeof(r->all); i++)
+		if (r->all[i] > limit)
+			r->all[i] = limit;
+}
+EXPORT_SYMBOL_GPL(mt76x02_limit_rate_power);
+
+void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+	int i;
+
+	for (i = 0; i < sizeof(r->all); i++)
+		r->all[i] += offset;
+}
+EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset);
+
+void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
+{
+	struct mt76_rate_power *t = &dev->mt76.rate_power;
+
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+	mt76_wr(dev, MT_TX_PWR_CFG_0,
+		mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
+				      t->ofdm[2]));
+	mt76_wr(dev, MT_TX_PWR_CFG_1,
+		mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
+				      t->ht[2]));
+	mt76_wr(dev, MT_TX_PWR_CFG_2,
+		mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
+				      t->ht[10]));
+	mt76_wr(dev, MT_TX_PWR_CFG_3,
+		mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
+				      t->stbc[2]));
+	mt76_wr(dev, MT_TX_PWR_CFG_4,
+		mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
+	mt76_wr(dev, MT_TX_PWR_CFG_7,
+		mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
+				      t->vht[9]));
+	mt76_wr(dev, MT_TX_PWR_CFG_8,
+		mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
+	mt76_wr(dev, MT_TX_PWR_CFG_9,
+		mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);
+
+void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
+{
+	int core_val, agc_val;
+
+	switch (width) {
+	case NL80211_CHAN_WIDTH_80:
+		core_val = 3;
+		agc_val = 7;
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		core_val = 2;
+		agc_val = 3;
+		break;
+	default:
+		core_val = 0;
+		agc_val = 1;
+		break;
+	}
+
+	mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+	mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_bw);
+
+void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
+			  bool primary_upper)
+{
+	switch (band) {
+	case NL80211_BAND_2GHZ:
+		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+		break;
+	case NL80211_BAND_5GHZ:
+		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+		break;
+	}
+
+	mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+		       primary_upper);
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_band);
+
+bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
+{
+	u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
+	bool ret = false;
+	u32 false_cca;
+
+	false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
+			      mt76_rr(dev, MT_RX_STAT_1));
+	dev->cal.false_cca = false_cca;
+	if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) {
+		dev->cal.agc_gain_adjust += 2;
+		ret = true;
+	} else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
+		   (dev->cal.agc_gain_adjust >= limit && false_cca < 500)) {
+		dev->cal.agc_gain_adjust -= 2;
+		ret = true;
+	}
+
+	dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
+
+void mt76x02_init_agc_gain(struct mt76x02_dev *dev)
+{
+	dev->cal.agc_gain_init[0] = mt76_get_field(dev, MT_BBP(AGC, 8),
+						   MT_BBP_AGC_GAIN);
+	dev->cal.agc_gain_init[1] = mt76_get_field(dev, MT_BBP(AGC, 9),
+						   MT_BBP_AGC_GAIN);
+	memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
+	       sizeof(dev->cal.agc_gain_cur));
+	dev->cal.low_gain = -1;
+	dev->cal.gain_init_done = true;
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
new file mode 100644
index 0000000..fc2e410
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x02_PHY_H
+#define __MT76x02_PHY_H
+
+#include "mt76x02_regs.h"
+
+static inline int
+mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev)
+{
+	switch (dev->mt76.chandef.width) {
+	case NL80211_CHAN_WIDTH_80:
+		return -62;
+	case NL80211_CHAN_WIDTH_40:
+		return -65;
+	default:
+		return -68;
+	}
+}
+
+static inline int
+mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
+{
+	switch (dev->mt76.chandef.width) {
+	case NL80211_CHAN_WIDTH_80:
+		return -76;
+	case NL80211_CHAN_WIDTH_40:
+		return -79;
+	default:
+		return -82;
+	}
+}
+
+void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset);
+void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_2);
+void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit);
+int mt76x02_get_max_rate_power(struct mt76_rate_power *r);
+void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev);
+void mt76x02_phy_set_txdac(struct mt76x02_dev *dev);
+void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
+void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
+			  bool primary_upper);
+bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev);
+void mt76x02_init_agc_gain(struct mt76x02_dev *dev);
+
+#endif /* __MT76x02_PHY_H */
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
new file mode 100644
index 0000000..21c0f35
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -0,0 +1,712 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#ifndef __MT76X02_REGS_H
+#define __MT76X02_REGS_H
+
+#define MT_ASIC_VERSION			0x0000
+
+#define MT76XX_REV_E3			0x22
+#define MT76XX_REV_E4			0x33
+
+#define MT_CMB_CTRL			0x0020
+#define MT_CMB_CTRL_XTAL_RDY		BIT(22)
+#define MT_CMB_CTRL_PLL_LD		BIT(23)
+
+#define MT_EFUSE_CTRL			0x0024
+#define MT_EFUSE_CTRL_AOUT		GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE		GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME	GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME	GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN		GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK		BIT(30)
+#define MT_EFUSE_CTRL_SEL		BIT(31)
+
+#define MT_EFUSE_DATA_BASE		0x0028
+#define MT_EFUSE_DATA(_n)		(MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0			0x0040
+#define MT_COEXCFG0_COEX_EN		BIT(0)
+
+#define MT_WLAN_FUN_CTRL		0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN	BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN	BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF	BIT(2)
+
+#define MT_COEXCFG3			0x004c
+
+#define	MT_LDO_CTRL_0			0x006c
+#define	MT_LDO_CTRL_1			0x0070
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET	BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN	BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ	BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL	BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL	BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST	BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST	BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN	BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN	GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT	GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN	GENMASK(31, 24) /* MT76x0 */
+
+/* MT76x0 */
+#define MT_CSR_EE_CFG1			0x0104
+
+#define MT_XO_CTRL0			0x0100
+#define MT_XO_CTRL1			0x0104
+#define MT_XO_CTRL2			0x0108
+#define MT_XO_CTRL3			0x010c
+#define MT_XO_CTRL4			0x0110
+
+#define MT_XO_CTRL5			0x0114
+#define MT_XO_CTRL5_C2_VAL		GENMASK(14, 8)
+
+#define MT_XO_CTRL6			0x0118
+#define MT_XO_CTRL6_C2_CTRL		GENMASK(14, 8)
+
+#define MT_XO_CTRL7			0x011c
+
+#define MT_IOCFG_6			0x0124
+
+#define MT_USB_U3DMA_CFG		0x9018
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT	GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT	GENMASK(15, 8)
+#define MT_USB_DMA_CFG_UDMA_TX_WL_DROP	BIT(16)
+#define MT_USB_DMA_CFG_WAKE_UP_EN	BIT(17)
+#define MT_USB_DMA_CFG_RX_DROP_OR_PAD	BIT(18)
+#define MT_USB_DMA_CFG_TX_CLR		BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT	BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN	BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN	BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN	BIT(23)
+#define MT_USB_DMA_CFG_EP_OUT_VALID	GENMASK(29, 24)
+#define MT_USB_DMA_CFG_RX_BUSY		BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY		BIT(31)
+
+#define MT_WLAN_MTC_CTRL		0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP	BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK	BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S	BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD	GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD	BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD	BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD	BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB	BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB	BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB	BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB	BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP	BIT(28)
+
+#define MT_INT_SOURCE_CSR		0x0200
+#define MT_INT_MASK_CSR			0x0204
+
+#define MT_INT_RX_DONE(_n)		BIT(_n)
+#define MT_INT_RX_DONE_ALL		GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL		GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n)		BIT((_n) + 4)
+#define MT_INT_RX_COHERENT		BIT(16)
+#define MT_INT_TX_COHERENT		BIT(17)
+#define MT_INT_ANY_COHERENT		BIT(18)
+#define MT_INT_MCU_CMD			BIT(19)
+#define MT_INT_TBTT			BIT(20)
+#define MT_INT_PRE_TBTT			BIT(21)
+#define MT_INT_TX_STAT			BIT(22)
+#define MT_INT_AUTO_WAKEUP		BIT(23)
+#define MT_INT_GPTIMER			BIT(24)
+#define MT_INT_RXDELAYINT		BIT(26)
+#define MT_INT_TXDELAYINT		BIT(27)
+
+#define MT_WPDMA_GLO_CFG		0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN	BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY	BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN	BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY	BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE	GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE	BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN	BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN	GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS	BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET	BIT(31)
+
+#define MT_WPDMA_RST_IDX		0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG		0x0210
+
+#define MT_WMM_AIFSN			0x0214
+#define MT_WMM_AIFSN_MASK		GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_CWMIN			0x0218
+#define MT_WMM_CWMIN_MASK		GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_CWMAX			0x021c
+#define MT_WMM_CWMAX_MASK		GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_TXOP_BASE		0x0220
+#define MT_WMM_TXOP(_n)			(MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n)		(((_n) & 1) * 16)
+#define MT_WMM_TXOP_MASK		GENMASK(15, 0)
+
+#define MT_WMM_CTRL			0x0230 /* MT76x0 */
+#define MT_FCE_DMA_ADDR			0x0230
+#define MT_FCE_DMA_LEN			0x0234
+#define MT_USB_DMA_CFG			0x0238
+
+#define MT_TSO_CTRL			0x0250
+#define MT_HEADER_TRANS_CTRL_REG	0x0260
+
+#define MT_US_CYC_CFG			0x02a4
+#define MT_US_CYC_CNT			GENMASK(7, 0)
+
+#define MT_TX_RING_BASE			0x0300
+#define MT_RX_RING_BASE			0x03c0
+
+#define MT_TX_HW_QUEUE_MCU		8
+#define MT_TX_HW_QUEUE_MGMT		9
+
+#define MT_PBF_SYS_CTRL			0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET	BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET	BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET	BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET	BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET	BIT(4)
+
+#define MT_PBF_CFG			0x0404
+#define MT_PBF_CFG_TX0Q_EN		BIT(0)
+#define MT_PBF_CFG_TX1Q_EN		BIT(1)
+#define MT_PBF_CFG_TX2Q_EN		BIT(2)
+#define MT_PBF_CFG_TX3Q_EN		BIT(3)
+#define MT_PBF_CFG_RX0Q_EN		BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN		BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT		0x0408
+#define MT_PBF_RX_MAX_PCNT		0x040c
+
+#define MT_BCN_OFFSET_BASE		0x041c
+#define MT_BCN_OFFSET(_n)		(MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RXQ_STA			0x0430
+#define MT_TXQ_STA			0x0434
+#define	MT_RF_CSR_CFG			0x0500
+#define MT_RF_CSR_CFG_DATA		GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID		GENMASK(14, 8)
+#define MT_RF_CSR_CFG_REG_BANK		GENMASK(17, 15)
+#define MT_RF_CSR_CFG_WR		BIT(30)
+#define MT_RF_CSR_CFG_KICK		BIT(31)
+
+#define MT_RF_BYPASS_0			0x0504
+#define MT_RF_BYPASS_1			0x0508
+#define MT_RF_SETTING_0			0x050c
+
+#define MT_RF_MISC			0x0518
+#define MT_RF_DATA_WRITE		0x0524
+
+#define MT_RF_CTRL			0x0528
+#define MT_RF_CTRL_ADDR			GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE		BIT(12)
+#define MT_RF_CTRL_BUSY			BIT(13)
+#define MT_RF_CTRL_IDX			BIT(16)
+
+#define MT_RF_DATA_READ			0x052c
+
+#define MT_COM_REG0			0x0730
+#define MT_COM_REG1			0x0734
+#define MT_COM_REG2			0x0738
+#define MT_COM_REG3			0x073C
+
+#define MT_LED_CTRL			0x0770
+#define MT_LED_CTRL_REPLAY(_n)		BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n)	BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n)	BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n)		BIT(7 + (8 * (_n)))
+
+#define MT_LED_TX_BLINK_0		0x0774
+#define MT_LED_TX_BLINK_1		0x0778
+
+#define MT_LED_S0_BASE			0x077C
+#define MT_LED_S0(_n)			(MT_LED_S0_BASE + 8 * (_n))
+#define MT_LED_S1_BASE			0x0780
+#define MT_LED_S1(_n)			(MT_LED_S1_BASE + 8 * (_n))
+#define MT_LED_STATUS_OFF_MASK		GENMASK(31, 24)
+#define MT_LED_STATUS_OFF(_v)		(((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
+					 MT_LED_STATUS_OFF_MASK)
+#define MT_LED_STATUS_ON_MASK		GENMASK(23, 16)
+#define MT_LED_STATUS_ON(_v)		(((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
+					 MT_LED_STATUS_ON_MASK)
+#define MT_LED_STATUS_DURATION_MASK	GENMASK(15, 8)
+#define MT_LED_STATUS_DURATION(_v)	(((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
+					 MT_LED_STATUS_DURATION_MASK)
+
+#define MT_FCE_PSE_CTRL			0x0800
+#define MT_FCE_PARAMETERS		0x0804
+#define MT_FCE_CSO			0x0808
+
+#define MT_FCE_L2_STUFF			0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN	BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN	BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN	BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN	BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN	BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP	BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN	GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN	GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT	GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1	0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR	0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT	0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX	0x09a8
+#define MT_FCE_PDMA_GLOBAL_CONF		0x09c4
+#define MT_FCE_SKIP_FS			0x0a6c
+
+#define MT_PAUSE_ENABLE_CONTROL1	0x0a38
+
+#define MT_MAC_CSR0			0x1000
+
+#define MT_MAC_SYS_CTRL			0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR	BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP	BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX	BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX	BIT(3)
+
+#define MT_MAC_ADDR_DW0			0x1008
+#define MT_MAC_ADDR_DW1			0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK	GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0		0x1010
+#define MT_MAC_BSSID_DW1		0x1014
+#define MT_MAC_BSSID_DW1_ADDR		GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE	GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N	GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT	BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2	BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3	BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE	GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG			0x1018
+#define MT_MAX_LEN_CFG_AMPDU		GENMASK(13, 12)
+
+#define MT_LED_CFG			0x102c
+
+#define MT_AMPDU_MAX_LEN_20M1S		0x1030
+#define MT_AMPDU_MAX_LEN_20M2S		0x1034
+#define MT_AMPDU_MAX_LEN_40M1S		0x1038
+#define MT_AMPDU_MAX_LEN_40M2S		0x103c
+#define MT_AMPDU_MAX_LEN		0x1040
+
+#define MT_WCID_DROP_BASE		0x106c
+#define MT_WCID_DROP(_n)		(MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n)		BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK		0x108c
+
+#define MT_MAC_APC_BSSID_BASE		0x1090
+#define MT_MAC_APC_BSSID_L(_n)		(MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n)		(MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR		GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN		BIT(16)
+
+#define MT_XIFS_TIME_CFG		0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS	GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS	GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS	GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS		GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN	BIT(29)
+
+#define MT_BKOFF_SLOT_CFG		0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME	GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY	GENMASK(11, 8)
+
+#define MT_CH_TIME_CFG			0x110c
+#define MT_CH_TIME_CFG_TIMER_EN		BIT(0)
+#define MT_CH_TIME_CFG_TX_AS_BUSY	BIT(1)
+#define MT_CH_TIME_CFG_RX_AS_BUSY	BIT(2)
+#define MT_CH_TIME_CFG_NAV_AS_BUSY	BIT(3)
+#define MT_CH_TIME_CFG_EIFS_AS_BUSY	BIT(4)
+#define MT_CH_TIME_CFG_MDRDY_CNT_EN	BIT(5)
+#define MT_CH_CCA_RC_EN			BIT(6)
+#define MT_CH_TIME_CFG_CH_TIMER_CLR	GENMASK(9, 8)
+#define MT_CH_TIME_CFG_MDRDY_CLR	GENMASK(11, 10)
+
+#define MT_PBF_LIFE_TIMER		0x1110
+
+#define MT_BEACON_TIME_CFG		0x1114
+#define MT_BEACON_TIME_CFG_INTVAL	GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN	BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE	GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN	BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX	BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP	GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG		0x1118
+#define MT_TSF_TIMER_DW0		0x111c
+#define MT_TSF_TIMER_DW1		0x1120
+#define MT_TBTT_TIMER			0x1124
+#define MT_TBTT_TIMER_VAL		GENMASK(16, 0)
+
+#define MT_INT_TIMER_CFG		0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT	GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER	GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN			0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN	BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN	BIT(1)
+
+#define MT_CH_IDLE			0x1130
+#define MT_CH_BUSY			0x1134
+#define MT_EXT_CH_BUSY			0x1138
+#define MT_ED_CCA_TIMER			0x1140
+
+#define MT_MAC_STATUS			0x1200
+#define MT_MAC_STATUS_TX		BIT(0)
+#define MT_MAC_STATUS_RX		BIT(1)
+
+#define MT_PWR_PIN_CFG			0x1204
+#define MT_AUX_CLK_CFG			0x120c
+
+#define MT_BB_PA_MODE_CFG0		0x1214
+#define MT_BB_PA_MODE_CFG1		0x1218
+#define MT_RF_PA_MODE_CFG0		0x121c
+#define MT_RF_PA_MODE_CFG1		0x1220
+
+#define MT_RF_PA_MODE_ADJ0		0x1228
+#define MT_RF_PA_MODE_ADJ1		0x122c
+
+#define MT_DACCLK_EN_DLY_CFG		0x1264
+
+#define MT_EDCA_CFG_BASE		0x1300
+#define MT_EDCA_CFG_AC(_n)		(MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP		GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN		GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN		GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX		GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0			0x1314
+#define MT_TX_PWR_CFG_1			0x1318
+#define MT_TX_PWR_CFG_2			0x131c
+#define MT_TX_PWR_CFG_3			0x1320
+#define MT_TX_PWR_CFG_4			0x1324
+#define MT_TX_PIN_CFG			0x1328
+#define MT_TX_PIN_CFG_TXANT		GENMASK(3, 0)
+#define MT_TX_PIN_CFG_RXANT		GENMASK(11, 8)
+#define MT_TX_PIN_RFTR_EN		BIT(16)
+#define MT_TX_PIN_TRSW_EN		BIT(18)
+
+#define MT_TX_BAND_CFG			0x132c
+#define MT_TX_BAND_CFG_UPPER_40M	BIT(0)
+#define MT_TX_BAND_CFG_5G		BIT(1)
+#define MT_TX_BAND_CFG_2G		BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY		0x1384
+#define MT_TX_MPDU_ADJ_INT		0x1388
+
+#define MT_TX_PWR_CFG_7			0x13d4
+#define MT_TX_PWR_CFG_8			0x13d8
+#define MT_TX_PWR_CFG_9			0x13dc
+
+#define MT_TX_SW_CFG0			0x1330
+#define MT_TX_SW_CFG1			0x1334
+#define MT_TX_SW_CFG2			0x1338
+
+#define MT_TXOP_CTRL_CFG		0x1340
+#define MT_TXOP_TRUN_EN			GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY		GENMASK(15, 8)
+#define MT_TXOP_ED_CCA_EN		BIT(20)
+
+#define MT_TX_RTS_CFG			0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT	GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH		GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK		BIT(24)
+
+#define MT_TX_TIMEOUT_CFG		0x1348
+#define MT_TX_TIMEOUT_CFG_ACKTO		GENMASK(15, 8)
+
+#define MT_TX_RETRY_CFG			0x134c
+#define MT_TX_LINK_CFG			0x1350
+#define MT_TX_CFACK_EN			BIT(12)
+#define MT_VHT_HT_FBK_CFG0		0x1354
+#define MT_VHT_HT_FBK_CFG1		0x1358
+#define MT_LG_FBK_CFG0			0x135c
+#define MT_LG_FBK_CFG1			0x1360
+
+#define MT_PROT_CFG_RATE		GENMASK(15, 0)
+#define MT_PROT_CFG_CTRL		GENMASK(17, 16)
+#define MT_PROT_CFG_NAV			GENMASK(19, 18)
+#define MT_PROT_CFG_TXOP_ALLOW		GENMASK(25, 20)
+#define MT_PROT_CFG_RTS_THRESH		BIT(26)
+
+#define MT_CCK_PROT_CFG			0x1364
+#define MT_OFDM_PROT_CFG		0x1368
+#define MT_MM20_PROT_CFG		0x136c
+#define MT_MM40_PROT_CFG		0x1370
+#define MT_GF20_PROT_CFG		0x1374
+#define MT_GF40_PROT_CFG		0x1378
+
+#define MT_PROT_RATE			GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS		BIT(16)
+#define MT_PROT_CTRL_CTS2SELF		BIT(17)
+#define MT_PROT_NAV_SHORT		BIT(18)
+#define MT_PROT_NAV_LONG		BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK		BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM		BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20		BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40		BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20		BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40		BIT(25)
+#define MT_PROT_RTS_THR_EN		BIT(26)
+#define MT_PROT_RATE_CCK_11		0x0003
+#define MT_PROT_RATE_OFDM_6		0x2000
+#define MT_PROT_RATE_OFDM_24		0x2004
+#define MT_PROT_RATE_DUP_OFDM_24	0x2084
+#define MT_PROT_RATE_SGI_OFDM_24	0x2104
+#define MT_PROT_TXOP_ALLOW_ALL		GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20		(MT_PROT_TXOP_ALLOW_ALL &	\
+					 ~MT_PROT_TXOP_ALLOW_MM40 &	\
+					 ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME			0x1380
+
+#define MT_TX_PWR_CFG_0_EXT		0x1390
+#define MT_TX_PWR_CFG_1_EXT		0x1394
+
+#define MT_TX_FBK_LIMIT			0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK	GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK	GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR	BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR	BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT	BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR		0x13a0
+#define MT_TX1_RF_GAIN_CORR		0x13a4
+#define MT_TX0_RF_GAIN_ATTEN		0x13a8
+#define MT_TX0_RF_GAIN_ATTEN		0x13a8 /* MT76x0 */
+
+#define MT_TX_ALC_CFG_0			0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0	GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1	GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0		GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1		GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1			0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP	GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2			0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP	GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_3			0x13ac
+#define MT_TX_ALC_CFG_4			0x13c0
+#define MT_TX_ALC_CFG_4_LOWGAIN_CH_EN	BIT(31)
+#define MT_TX0_BB_GAIN_ATTEN		0x13c0 /* MT76x0 */
+
+#define MT_TX_ALC_VGA3			0x13c8
+
+#define MT_TX_PROT_CFG6			0x13e0
+#define MT_TX_PROT_CFG7			0x13e4
+#define MT_TX_PROT_CFG8			0x13e8
+
+#define MT_PIFS_TX_CFG			0x13ec
+
+#define MT_RX_FILTR_CFG			0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR		BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR		BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC		BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS	BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR		BIT(4)
+#define MT_RX_FILTR_CFG_MCAST		BIT(5)
+#define MT_RX_FILTR_CFG_BCAST		BIT(6)
+#define MT_RX_FILTR_CFG_DUP		BIT(7)
+#define MT_RX_FILTR_CFG_CFACK		BIT(8)
+#define MT_RX_FILTR_CFG_CFEND		BIT(9)
+#define MT_RX_FILTR_CFG_ACK		BIT(10)
+#define MT_RX_FILTR_CFG_CTS		BIT(11)
+#define MT_RX_FILTR_CFG_RTS		BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL		BIT(13)
+#define MT_RX_FILTR_CFG_BA		BIT(14)
+#define MT_RX_FILTR_CFG_BAR		BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV	BIT(16)
+
+#define MT_AUTO_RSP_CFG			0x1404
+#define MT_AUTO_RSP_EN			BIT(0)
+#define MT_AUTO_RSP_PREAMB_SHORT	BIT(4)
+#define MT_LEGACY_BASIC_RATE		0x1408
+#define MT_HT_BASIC_RATE		0x140c
+
+#define MT_HT_CTRL_CFG			0x1410
+#define MT_RX_PARSER_CFG		0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL	BIT(0)
+
+#define MT_EXT_CCA_CFG			0x141c
+#define MT_EXT_CCA_CFG_CCA0		GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1		GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2		GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3		GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK		GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK	GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3			0x1478
+
+#define MT_PN_PAD_MODE			0x150c
+
+#define MT_TXOP_HLDR_ET			0x1608
+#define MT_TXOP_HLDR_TX40M_BLK_EN	BIT(1)
+
+#define MT_PROT_AUTO_TX_CFG		0x1648
+#define MT_PROT_AUTO_TX_CFG_PROT_PADJ	GENMASK(11, 8)
+#define MT_PROT_AUTO_TX_CFG_AUTO_PADJ	GENMASK(27, 24)
+
+#define MT_RX_STAT_0			0x1700
+#define MT_RX_STAT_0_CRC_ERRORS		GENMASK(15, 0)
+#define MT_RX_STAT_0_PHY_ERRORS		GENMASK(31, 16)
+
+#define MT_RX_STAT_1			0x1704
+#define MT_RX_STAT_1_CCA_ERRORS		GENMASK(15, 0)
+#define MT_RX_STAT_1_PLCP_ERRORS	GENMASK(31, 16)
+
+#define MT_RX_STAT_2			0x1708
+#define MT_RX_STAT_2_DUP_ERRORS		GENMASK(15, 0)
+#define MT_RX_STAT_2_OVERFLOW_ERRORS	GENMASK(31, 16)
+
+#define MT_TX_STA_0			0x170c
+#define MT_TX_STA_1			0x1710
+#define MT_TX_STA_2			0x1714
+
+#define MT_TX_STAT_FIFO			0x1718
+#define MT_TX_STAT_FIFO_VALID		BIT(0)
+#define MT_TX_STAT_FIFO_SUCCESS		BIT(5)
+#define MT_TX_STAT_FIFO_AGGR		BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ		BIT(7)
+#define MT_TX_STAT_FIFO_WCID		GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE		GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT			0x171c
+
+#define MT_TX_AGG_CNT_BASE0		0x1720
+#define MT_MPDU_DENSITY_CNT		0x1740
+#define MT_TX_AGG_CNT_BASE1		0x174c
+
+#define MT_TX_AGG_CNT(_id)		((_id) < 8 ?			\
+					 MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+					 MT_TX_AGG_CNT_BASE1 + (((_id) - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT		0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY	GENMASK(7, 0)
+#define MT_TX_STAT_FIFO_EXT_PKTID	GENMASK(15, 8)
+
+#define MT_WCID_TX_RATE_BASE		0x1c00
+#define MT_WCID_TX_RATE(_i)		(MT_WCID_TX_RATE_BASE + ((_i) << 3))
+
+#define MT_BBP_CORE_BASE		0x2000
+#define MT_BBP_IBI_BASE			0x2100
+#define MT_BBP_AGC_BASE			0x2300
+#define MT_BBP_TXC_BASE			0x2400
+#define MT_BBP_RXC_BASE			0x2500
+#define MT_BBP_TXO_BASE			0x2600
+#define MT_BBP_TXBE_BASE		0x2700
+#define MT_BBP_RXFE_BASE		0x2800
+#define MT_BBP_RXO_BASE			0x2900
+#define MT_BBP_DFS_BASE			0x2a00
+#define MT_BBP_TR_BASE			0x2b00
+#define MT_BBP_CAL_BASE			0x2c00
+#define MT_BBP_DSC_BASE			0x2e00
+#define MT_BBP_PFMU_BASE		0x2f00
+
+#define MT_BBP(_type, _n)		(MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW		GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN		GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW		GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_HIGH_GAIN	GENMASK(21, 16)
+#define MT_BBP_AGC_LNA_MID_GAIN		GENMASK(13, 8)
+#define MT_BBP_AGC_LNA_LOW_GAIN		GENMASK(5, 0)
+
+/* AGC, R6/R7 */
+#define MT_BBP_AGC_LNA_ULOW_GAIN	GENMASK(5, 0)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_LNA_GAIN_MODE	GENMASK(7, 6)
+#define MT_BBP_AGC_GAIN			GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0		GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1		GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN	GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE		0x1800
+#define MT_WCID_ADDR(_n)		(MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE			0x4000
+
+#define MT_WCID_KEY_BASE		0x8000
+#define MT_WCID_KEY(_n)			(MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE			0xa000
+#define MT_WCID_IV(_n)			(MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE		0xa800
+#define MT_WCID_ATTR(_n)		(MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE		BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE		GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX		GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF		GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT	BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT	BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC		BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID		GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0			0xac00
+#define MT_SKEY_BASE_1			0xb400
+#define MT_SKEY_0(_bss, _idx)		(MT_SKEY_BASE_0 + (4 * (_bss) + (_idx)) * 32)
+#define MT_SKEY_1(_bss, _idx)		(MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + (_idx)) * 32)
+#define MT_SKEY(_bss, _idx)		(((_bss) & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0		0xb000
+#define MT_SKEY_MODE_BASE_1		0xb3f0
+#define MT_SKEY_MODE_0(_bss)		(MT_SKEY_MODE_BASE_0 + (((_bss) / 2) << 2))
+#define MT_SKEY_MODE_1(_bss)		(MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss)		(((_bss) & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK		GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx)	(4 * ((_idx) + 4 * ((_bss) & 1)))
+
+#define MT_BEACON_BASE			0xc000
+
+#define MT_TEMP_SENSOR			0x1d000
+#define MT_TEMP_SENSOR_VAL		GENMASK(6, 0)
+
+struct mt76_wcid_addr {
+	u8 macaddr[6];
+	__le16 ba_mask;
+} __packed __aligned(4);
+
+struct mt76_wcid_key {
+	u8 key[16];
+	u8 tx_mic[8];
+	u8 rx_mic[8];
+} __packed __aligned(4);
+
+enum mt76x02_cipher_type {
+	MT_CIPHER_NONE,
+	MT_CIPHER_WEP40,
+	MT_CIPHER_WEP104,
+	MT_CIPHER_TKIP,
+	MT_CIPHER_AES_CCMP,
+	MT_CIPHER_CKIP40,
+	MT_CIPHER_CKIP104,
+	MT_CIPHER_CKIP128,
+	MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c
new file mode 100644
index 0000000..a812c3a
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mt76x02_trace.h"
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
new file mode 100644
index 0000000..61ecaf0
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#if !defined(__MT76x02_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76x02_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76x02.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76x02
+
+#define MAXNAME		32
+#define DEV_ENTRY	__array(char, wiphy_name, 32)
+#define DEV_ASSIGN	strlcpy(__entry->wiphy_name,	\
+				wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
+#define DEV_PR_FMT	"%s"
+#define DEV_PR_ARG	__entry->wiphy_name
+
+#define TXID_ENTRY	__field(u8, wcid) __field(u8, pktid)
+#define TXID_ASSIGN	__entry->wcid = wcid; __entry->pktid = pktid
+#define TXID_PR_FMT	" [%d:%d]"
+#define TXID_PR_ARG	__entry->wcid, __entry->pktid
+
+DECLARE_EVENT_CLASS(dev_evt,
+	TP_PROTO(struct mt76x02_dev *dev),
+	TP_ARGS(dev),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+	),
+	TP_printk(DEV_PR_FMT, DEV_PR_ARG)
+);
+
+DECLARE_EVENT_CLASS(dev_txid_evt,
+	TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
+	TP_ARGS(dev, wcid, pktid),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		TXID_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		TXID_ASSIGN;
+	),
+	TP_printk(
+		DEV_PR_FMT TXID_PR_FMT,
+		DEV_PR_ARG, TXID_PR_ARG
+	)
+);
+
+DEFINE_EVENT(dev_txid_evt, mac_txdone_add,
+	TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
+	TP_ARGS(dev, wcid, pktid)
+);
+
+DEFINE_EVENT(dev_evt, mac_txstat_poll,
+	TP_PROTO(struct mt76x02_dev *dev),
+	TP_ARGS(dev)
+);
+
+TRACE_EVENT(mac_txstat_fetch,
+	TP_PROTO(struct mt76x02_dev *dev,
+		 struct mt76x02_tx_status *stat),
+
+	TP_ARGS(dev, stat),
+
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		TXID_ENTRY
+		__field(bool, success)
+		__field(bool, aggr)
+		__field(bool, ack_req)
+		__field(u16, rate)
+		__field(u8, retry)
+	),
+
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->success = stat->success;
+		__entry->aggr = stat->aggr;
+		__entry->ack_req = stat->ack_req;
+		__entry->wcid = stat->wcid;
+		__entry->pktid = stat->pktid;
+		__entry->rate = stat->rate;
+		__entry->retry = stat->retry;
+	),
+
+	TP_printk(
+		DEV_PR_FMT TXID_PR_FMT
+		" success:%d aggr:%d ack_req:%d"
+		" rate:%04x retry:%d",
+		DEV_PR_ARG, TXID_PR_ARG,
+		__entry->success, __entry->aggr, __entry->ack_req,
+		__entry->rate, __entry->retry
+	)
+);
+
+TRACE_EVENT(dev_irq,
+	TP_PROTO(struct mt76x02_dev *dev, u32 val, u32 mask),
+
+	TP_ARGS(dev, val, mask),
+
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, val)
+		__field(u32, mask)
+	),
+
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->val = val;
+		__entry->mask = mask;
+	),
+
+	TP_printk(
+		DEV_PR_FMT " %08x & %08x",
+		DEV_PR_ARG, __entry->val, __entry->mask
+	)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mt76x02_trace
+
+#include <trace/define_trace.h>
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
new file mode 100644
index 0000000..f27aade
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "mt76x02.h"
+
+void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+		struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mt76x02_dev *dev = hw->priv;
+	struct ieee80211_vif *vif = info->control.vif;
+	struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+
+	if (control->sta) {
+		struct mt76x02_sta *msta;
+
+		msta = (struct mt76x02_sta *)control->sta->drv_priv;
+		wcid = &msta->wcid;
+	} else if (vif) {
+		struct mt76x02_vif *mvif;
+
+		mvif = (struct mt76x02_vif *)vif->drv_priv;
+		wcid = &mvif->group_wcid;
+	}
+
+	mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx);
+
+void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+			  struct sk_buff *skb)
+{
+	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+	void *rxwi = skb->data;
+
+	if (q == MT_RXQ_MCU) {
+		/* this is used just by mmio code */
+		mt76_mcu_rx_event(&dev->mt76, skb);
+		return;
+	}
+
+	skb_pull(skb, sizeof(struct mt76x02_rxwi));
+	if (mt76x02_mac_process_rx(dev, skb, rxwi)) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	mt76_rx(mdev, q, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb);
+
+s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
+				const struct ieee80211_tx_rate *rate)
+{
+	s8 max_txpwr;
+
+	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+		u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+		if (mcs == 8 || mcs == 9) {
+			max_txpwr = dev->mt76.rate_power.vht[8];
+		} else {
+			u8 nss, idx;
+
+			nss = ieee80211_rate_get_vht_nss(rate);
+			idx = ((nss - 1) << 3) + mcs;
+			max_txpwr = dev->mt76.rate_power.ht[idx & 0xf];
+		}
+	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
+		max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf];
+	} else {
+		enum nl80211_band band = dev->mt76.chandef.chan->band;
+
+		if (band == NL80211_BAND_2GHZ) {
+			const struct ieee80211_rate *r;
+			struct wiphy *wiphy = dev->mt76.hw->wiphy;
+			struct mt76_rate_power *rp = &dev->mt76.rate_power;
+
+			r = &wiphy->bands[band]->bitrates[rate->idx];
+			if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+				max_txpwr = rp->cck[r->hw_value & 0x3];
+			else
+				max_txpwr = rp->ofdm[r->hw_value & 0x7];
+		} else {
+			max_txpwr = dev->mt76.rate_power.ofdm[rate->idx & 0x7];
+		}
+	}
+
+	return max_txpwr;
+}
+
+s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+	txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf);
+	txpwr -= (dev->target_power + dev->target_power_delta[0]);
+	txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+	if (!dev->enable_tpc)
+		return 0;
+	else if (txpwr >= 0)
+		return min_t(s8, txpwr, 7);
+	else
+		return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+
+void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
+{
+	s8 txpwr_adj;
+
+	txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, txpwr,
+					     dev->mt76.rate_power.ofdm[4]);
+	mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+		       MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+	mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+		       MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_set_txpwr_auto);
+
+bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+	struct mt76x02_tx_status stat;
+
+	if (!mt76x02_mac_load_tx_status(dev, &stat))
+		return false;
+
+	mt76x02_send_tx_status(dev, &stat, update);
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
+
+int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+			   enum mt76_txq_id qid, struct mt76_wcid *wcid,
+			   struct ieee80211_sta *sta,
+			   struct mt76_tx_info *tx_info)
+{
+	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+	struct mt76x02_txwi *txwi = txwi_ptr;
+	bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
+	int hdrlen, len, pid, qsel = MT_QSEL_EDCA;
+
+	if (qid == MT_TXQ_PSD && wcid && wcid->idx < 128)
+		mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
+
+	hdrlen = ieee80211_hdrlen(hdr->frame_control);
+	len = tx_info->skb->len - (hdrlen & 2);
+	mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
+
+	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+	/* encode packet rate for no-skb packet id to fix up status reporting */
+	if (pid == MT_PACKET_ID_NO_SKB)
+		pid = MT_PACKET_ID_HAS_RATE |
+		      (le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX);
+
+	txwi->pktid = pid;
+
+	if (mt76_is_skb_pktid(pid) && ampdu)
+		qsel = MT_QSEL_MGMT;
+
+	tx_info->info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+			MT_TXD_INFO_80211;
+
+	if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+		tx_info->info |= MT_TXD_INFO_WIV;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_prepare_skb);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
new file mode 100644
index 0000000..98329de
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x02_USB_H
+#define __MT76x02_USB_H
+
+#include "mt76x02.h"
+
+void mt76x02u_init_mcu(struct mt76_dev *dev);
+void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev);
+int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
+			      int data_len, u32 max_payload, u32 offset);
+
+int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
+int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+			    enum mt76_txq_id qid, struct mt76_wcid *wcid,
+			    struct ieee80211_sta *sta,
+			    struct mt76_tx_info *tx_info);
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+			      struct mt76_queue_entry *e);
+void mt76x02u_init_beacon_config(struct mt76x02_dev *dev);
+void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev);
+#endif /* __MT76x02_USB_H */
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
new file mode 100644
index 0000000..78dfc1e
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x02_usb.h"
+
+static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
+{
+	int hdr_len;
+
+	skb_pull(skb, sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN);
+	hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+	if (hdr_len % 4)
+		mt76x02_remove_hdr_pad(skb, 2);
+}
+
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+			      struct mt76_queue_entry *e)
+{
+	mt76x02u_remove_dma_hdr(e->skb);
+	mt76_tx_complete_skb(mdev, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
+
+int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
+{
+	struct sk_buff *iter, *last = skb;
+	u32 info, pad;
+
+	/* Buffer layout:
+	 *	|   4B   | xfer len |      pad       |  4B  |
+	 *	| TXINFO | pkt/cmd  | zero pad to 4B | zero |
+	 *
+	 * length field of TXINFO should be set to 'xfer len'.
+	 */
+	info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+	       FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
+	put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+
+	/* Add zero pad of 4 - 7 bytes */
+	pad = round_up(skb->len, 4) + 4 - skb->len;
+
+	/* First packet of a A-MSDU burst keeps track of the whole burst
+	 * length, need to update length of it and the last packet.
+	 */
+	skb_walk_frags(skb, iter) {
+		last = iter;
+		if (!iter->next) {
+			skb->data_len += pad;
+			skb->len += pad;
+			break;
+		}
+	}
+
+	if (skb_pad(last, pad))
+		return -ENOMEM;
+	__skb_put(last, pad);
+
+	return 0;
+}
+
+int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+			    enum mt76_txq_id qid, struct mt76_wcid *wcid,
+			    struct ieee80211_sta *sta,
+			    struct mt76_tx_info *tx_info)
+{
+	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+	int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
+	struct mt76x02_txwi *txwi;
+	bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
+	enum mt76_qsel qsel;
+	u32 flags;
+
+	mt76_insert_hdr_pad(tx_info->skb);
+
+	txwi = (struct mt76x02_txwi *)(tx_info->skb->data - sizeof(*txwi));
+	mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
+	skb_push(tx_info->skb, sizeof(*txwi));
+
+	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+	/* encode packet rate for no-skb packet id to fix up status reporting */
+	if (pid == MT_PACKET_ID_NO_SKB)
+		pid = MT_PACKET_ID_HAS_RATE |
+		      (le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX);
+
+	txwi->pktid = pid;
+
+	if ((mt76_is_skb_pktid(pid) && ampdu) || ep == MT_EP_OUT_HCCA)
+		qsel = MT_QSEL_MGMT;
+	else
+		qsel = MT_QSEL_EDCA;
+
+	flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+		MT_TXD_INFO_80211;
+	if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+		flags |= MT_TXD_INFO_WIV;
+
+	return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
+
+/* Trigger pre-TBTT event 8 ms before TBTT */
+#define PRE_TBTT_USEC 8000
+
+/* Beacon SRAM memory is limited to 8kB. We need to send PS buffered frames
+ * (which can be 1500 bytes big) via beacon memory. That make limit of number
+ * of slots to 5. TODO: dynamically calculate offsets in beacon SRAM.
+ */
+#define N_BCN_SLOTS 5
+
+static void mt76x02u_start_pre_tbtt_timer(struct mt76x02_dev *dev)
+{
+	u64 time;
+	u32 tbtt;
+
+	/* Get remaining TBTT in usec */
+	tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL);
+	tbtt *= 32;
+
+	if (tbtt <= PRE_TBTT_USEC) {
+		queue_work(system_highpri_wq, &dev->pre_tbtt_work);
+		return;
+	}
+
+	time = (tbtt - PRE_TBTT_USEC) * 1000ull;
+	hrtimer_start(&dev->pre_tbtt_timer, time, HRTIMER_MODE_REL);
+}
+
+static void mt76x02u_restart_pre_tbtt_timer(struct mt76x02_dev *dev)
+{
+	u32 tbtt, dw0, dw1;
+	u64 tsf, time;
+
+	/* Get remaining TBTT in usec */
+	tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL);
+	tbtt *= 32;
+
+	dw0 = mt76_rr(dev, MT_TSF_TIMER_DW0);
+	dw1 = mt76_rr(dev, MT_TSF_TIMER_DW1);
+	tsf = (u64)dw0 << 32 | dw1;
+	dev_dbg(dev->mt76.dev, "TSF: %llu us TBTT %u us\n", tsf, tbtt);
+
+	/* Convert beacon interval in TU (1024 usec) to nsec */
+	time = ((1000000000ull * dev->mt76.beacon_int) >> 10);
+
+	/* Adjust time to trigger hrtimer 8ms before TBTT */
+	if (tbtt < PRE_TBTT_USEC)
+		time -= (PRE_TBTT_USEC - tbtt) * 1000ull;
+	else
+		time += (tbtt - PRE_TBTT_USEC) * 1000ull;
+
+	hrtimer_start(&dev->pre_tbtt_timer, time, HRTIMER_MODE_REL);
+}
+
+static void mt76x02u_stop_pre_tbtt_timer(struct mt76x02_dev *dev)
+{
+	do {
+		hrtimer_cancel(&dev->pre_tbtt_timer);
+		cancel_work_sync(&dev->pre_tbtt_work);
+		/* Timer can be rearmed by work. */
+	} while (hrtimer_active(&dev->pre_tbtt_timer));
+}
+
+static void mt76x02u_pre_tbtt_work(struct work_struct *work)
+{
+	struct mt76x02_dev *dev =
+		container_of(work, struct mt76x02_dev, pre_tbtt_work);
+	struct beacon_bc_data data = {};
+	struct sk_buff *skb;
+	int i, nbeacons;
+
+	if (!dev->mt76.beacon_mask)
+		return;
+
+	if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+		return;
+
+	mt76x02_resync_beacon_timer(dev);
+
+	ieee80211_iterate_active_interfaces(mt76_hw(dev),
+		IEEE80211_IFACE_ITER_RESUME_ALL,
+		mt76x02_update_beacon_iter, dev);
+
+	nbeacons = hweight8(dev->mt76.beacon_mask);
+	mt76x02_enqueue_buffered_bc(dev, &data, N_BCN_SLOTS - nbeacons);
+
+	for (i = nbeacons; i < N_BCN_SLOTS; i++) {
+		skb = __skb_dequeue(&data.q);
+		mt76x02_mac_set_beacon(dev, i, skb);
+	}
+
+	mt76x02u_restart_pre_tbtt_timer(dev);
+}
+
+static enum hrtimer_restart mt76x02u_pre_tbtt_interrupt(struct hrtimer *timer)
+{
+	struct mt76x02_dev *dev =
+	    container_of(timer, struct mt76x02_dev, pre_tbtt_timer);
+
+	queue_work(system_highpri_wq, &dev->pre_tbtt_work);
+
+	return HRTIMER_NORESTART;
+}
+
+static void mt76x02u_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
+{
+	if (en && dev->mt76.beacon_mask &&
+	    !hrtimer_active(&dev->pre_tbtt_timer))
+		mt76x02u_start_pre_tbtt_timer(dev);
+	if (!en)
+		mt76x02u_stop_pre_tbtt_timer(dev);
+}
+
+static void mt76x02u_beacon_enable(struct mt76x02_dev *dev, bool en)
+{
+	int i;
+
+	if (WARN_ON_ONCE(!dev->mt76.beacon_int))
+		return;
+
+	if (en) {
+		mt76x02u_start_pre_tbtt_timer(dev);
+	} else {
+		/* Timer is already stopped, only clean up
+		 * PS buffered frames if any.
+		 */
+		for (i = 0; i < N_BCN_SLOTS; i++)
+			mt76x02_mac_set_beacon(dev, i, NULL);
+	}
+}
+
+void mt76x02u_init_beacon_config(struct mt76x02_dev *dev)
+{
+	static const struct mt76x02_beacon_ops beacon_ops = {
+		.nslots = N_BCN_SLOTS,
+		.slot_size = (8192 / N_BCN_SLOTS) & ~63,
+		.pre_tbtt_enable = mt76x02u_pre_tbtt_enable,
+		.beacon_enable = mt76x02u_beacon_enable,
+	};
+	dev->beacon_ops = &beacon_ops;
+
+	hrtimer_init(&dev->pre_tbtt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	dev->pre_tbtt_timer.function = mt76x02u_pre_tbtt_interrupt;
+	INIT_WORK(&dev->pre_tbtt_work, mt76x02u_pre_tbtt_work);
+
+	mt76x02_init_beacon_config(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_init_beacon_config);
+
+void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev)
+{
+	if (!test_bit(MT76_REMOVED, &dev->mt76.state))
+		mt76_clear(dev, MT_BEACON_TIME_CFG,
+			   MT_BEACON_TIME_CFG_TIMER_EN |
+			   MT_BEACON_TIME_CFG_SYNC_MODE |
+			   MT_BEACON_TIME_CFG_TBTT_EN |
+			   MT_BEACON_TIME_CFG_BEACON_TX);
+
+	mt76x02u_stop_pre_tbtt_timer(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_exit_beacon_config);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
new file mode 100644
index 0000000..a993cd7
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include "mt76x02.h"
+#include "mt76x02_mcu.h"
+#include "mt76x02_usb.h"
+
+#define MT_CMD_HDR_LEN			4
+
+#define MT_FCE_DMA_ADDR			0x0230
+#define MT_FCE_DMA_LEN			0x0234
+
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX	0x09a8
+
+static void
+mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
+{
+	struct mt76_usb *usb = &dev->usb;
+	u32 reg, val;
+	int i;
+
+	if (usb->mcu.burst) {
+		WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
+
+		reg = usb->mcu.rp[0].reg - usb->mcu.base;
+		for (i = 0; i < usb->mcu.rp_len; i++) {
+			val = get_unaligned_le32(data + 4 * i);
+			usb->mcu.rp[i].reg = reg++;
+			usb->mcu.rp[i].value = val;
+		}
+	} else {
+		WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
+
+		for (i = 0; i < usb->mcu.rp_len; i++) {
+			reg = get_unaligned_le32(data + 8 * i) -
+			      usb->mcu.base;
+			val = get_unaligned_le32(data + 8 * i + 4);
+
+			WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
+			usb->mcu.rp[i].value = val;
+		}
+	}
+}
+
+static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
+{
+	struct mt76_usb *usb = &dev->usb;
+	u8 *data = usb->mcu.data;
+	int i, len, ret;
+	u32 rxfce;
+
+	for (i = 0; i < 5; i++) {
+		ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len, 300);
+		if (ret == -ETIMEDOUT)
+			continue;
+		if (ret)
+			goto out;
+
+		if (usb->mcu.rp)
+			mt76x02u_multiple_mcu_reads(dev, data + 4, len - 8);
+
+		rxfce = get_unaligned_le32(data);
+		if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
+		    FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
+			return 0;
+
+		dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
+			FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
+			seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
+	}
+out:
+	dev_err(dev->dev, "error: %s failed with %d\n", __func__, ret);
+	return ret;
+}
+
+static int
+__mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+			int cmd, bool wait_resp)
+{
+	struct mt76_usb *usb = &dev->usb;
+	int ret;
+	u8 seq = 0;
+	u32 info;
+
+	if (test_bit(MT76_REMOVED, &dev->state))
+		return 0;
+
+	if (wait_resp) {
+		seq = ++usb->mcu.msg_seq & 0xf;
+		if (!seq)
+			seq = ++usb->mcu.msg_seq & 0xf;
+	}
+
+	info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+	       FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+	       MT_MCU_MSG_TYPE_CMD;
+	ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info);
+	if (ret)
+		return ret;
+
+	ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500);
+	if (ret)
+		return ret;
+
+	if (wait_resp)
+		ret = mt76x02u_mcu_wait_resp(dev, seq);
+
+	consume_skb(skb);
+
+	return ret;
+}
+
+static int
+mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
+		      int len, bool wait_resp)
+{
+	struct mt76_usb *usb = &dev->usb;
+	struct sk_buff *skb;
+	int err;
+
+	skb = mt76_mcu_msg_alloc(data, MT_CMD_HDR_LEN, len, 8);
+	if (!skb)
+		return -ENOMEM;
+
+	mutex_lock(&usb->mcu.mutex);
+	err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
+	mutex_unlock(&usb->mcu.mutex);
+
+	return err;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+	put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static int
+mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
+		   const struct mt76_reg_pair *data, int n)
+{
+	const int CMD_RANDOM_WRITE = 12;
+	const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
+	struct mt76_usb *usb = &dev->usb;
+	struct sk_buff *skb;
+	int cnt, i, ret;
+
+	if (!n)
+		return 0;
+
+	cnt = min(max_vals_per_cmd, n);
+
+	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+	skb_reserve(skb, MT_DMA_HDR_LEN);
+
+	for (i = 0; i < cnt; i++) {
+		skb_put_le32(skb, base + data[i].reg);
+		skb_put_le32(skb, data[i].value);
+	}
+
+	mutex_lock(&usb->mcu.mutex);
+	ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+	mutex_unlock(&usb->mcu.mutex);
+	if (ret)
+		return ret;
+
+	return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt);
+}
+
+static int
+mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
+		   struct mt76_reg_pair *data, int n)
+{
+	const int CMD_RANDOM_READ = 10;
+	const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
+	struct mt76_usb *usb = &dev->usb;
+	struct sk_buff *skb;
+	int cnt, i, ret;
+
+	if (!n)
+		return 0;
+
+	cnt = min(max_vals_per_cmd, n);
+	if (cnt != n)
+		return -EINVAL;
+
+	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+	skb_reserve(skb, MT_DMA_HDR_LEN);
+
+	for (i = 0; i < cnt; i++) {
+		skb_put_le32(skb, base + data[i].reg);
+		skb_put_le32(skb, data[i].value);
+	}
+
+	mutex_lock(&usb->mcu.mutex);
+
+	usb->mcu.rp = data;
+	usb->mcu.rp_len = n;
+	usb->mcu.base = base;
+	usb->mcu.burst = false;
+
+	ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
+
+	usb->mcu.rp = NULL;
+
+	mutex_unlock(&usb->mcu.mutex);
+
+	return ret;
+}
+
+void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
+{
+	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+			     USB_DIR_OUT | USB_TYPE_VENDOR,
+			     0x1, 0, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
+
+static int
+__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
+			    const void *fw_data, int len, u32 dst_addr)
+{
+	__le32 info;
+	u32 val;
+	int err, data_len;
+
+	info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+			   FIELD_PREP(MT_MCU_MSG_LEN, len) |
+			   MT_MCU_MSG_TYPE_CMD);
+
+	memcpy(data, &info, sizeof(info));
+	memcpy(data + sizeof(info), fw_data, len);
+	memset(data + sizeof(info) + len, 0, 4);
+
+	mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
+			MT_FCE_DMA_ADDR, dst_addr);
+	len = roundup(len, 4);
+	mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
+			MT_FCE_DMA_LEN, len << 16);
+
+	data_len = MT_CMD_HDR_LEN + len + sizeof(info);
+
+	err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000);
+	if (err) {
+		dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
+		return err;
+	}
+
+	val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+	val++;
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+	return 0;
+}
+
+int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
+			      int data_len, u32 max_payload, u32 offset)
+{
+	int len, err = 0, pos = 0, max_len = max_payload - 8;
+	u8 *buf;
+
+	buf = kmalloc(max_payload, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	while (data_len > 0) {
+		len = min_t(int, data_len, max_len);
+		err = __mt76x02u_mcu_fw_send_data(dev, buf, data + pos,
+						  len, offset + pos);
+		if (err < 0)
+			break;
+
+		data_len -= len;
+		pos += len;
+		usleep_range(5000, 10000);
+	}
+	kfree(buf);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
+
+void mt76x02u_init_mcu(struct mt76_dev *dev)
+{
+	static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
+		.mcu_send_msg = mt76x02u_mcu_send_msg,
+		.mcu_wr_rp = mt76x02u_mcu_wr_rp,
+		.mcu_rd_rp = mt76x02u_mcu_rd_rp,
+	};
+
+	dev->mcu_ops = &mt76x02u_mcu_ops;
+}
+EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
new file mode 100644
index 0000000..075871f
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -0,0 +1,701 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/module.h>
+#include "mt76x02.h"
+
+#define CCK_RATE(_idx, _rate) {					\
+	.bitrate = _rate,					\
+	.flags = IEEE80211_RATE_SHORT_PREAMBLE,			\
+	.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),		\
+	.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + (_idx)),	\
+}
+
+#define OFDM_RATE(_idx, _rate) {				\
+	.bitrate = _rate,					\
+	.hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),		\
+	.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),	\
+}
+
+struct ieee80211_rate mt76x02_rates[] = {
+	CCK_RATE(0, 10),
+	CCK_RATE(1, 20),
+	CCK_RATE(2, 55),
+	CCK_RATE(3, 110),
+	OFDM_RATE(0, 60),
+	OFDM_RATE(1, 90),
+	OFDM_RATE(2, 120),
+	OFDM_RATE(3, 180),
+	OFDM_RATE(4, 240),
+	OFDM_RATE(5, 360),
+	OFDM_RATE(6, 480),
+	OFDM_RATE(7, 540),
+};
+EXPORT_SYMBOL_GPL(mt76x02_rates);
+
+static const struct ieee80211_iface_limit mt76x02_if_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_ADHOC)
+	}, {
+		.max = 8,
+		.types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+			 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+			 BIT(NL80211_IFTYPE_AP)
+	 },
+};
+
+static const struct ieee80211_iface_limit mt76x02u_if_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_ADHOC)
+	}, {
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+			 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+			 BIT(NL80211_IFTYPE_AP)
+	},
+};
+
+static const struct ieee80211_iface_combination mt76x02_if_comb[] = {
+	{
+		.limits = mt76x02_if_limits,
+		.n_limits = ARRAY_SIZE(mt76x02_if_limits),
+		.max_interfaces = 8,
+		.num_different_channels = 1,
+		.beacon_int_infra_match = true,
+		.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+				       BIT(NL80211_CHAN_WIDTH_20) |
+				       BIT(NL80211_CHAN_WIDTH_40) |
+				       BIT(NL80211_CHAN_WIDTH_80),
+	}
+};
+
+static const struct ieee80211_iface_combination mt76x02u_if_comb[] = {
+	{
+		.limits = mt76x02u_if_limits,
+		.n_limits = ARRAY_SIZE(mt76x02u_if_limits),
+		.max_interfaces = 2,
+		.num_different_channels = 1,
+		.beacon_int_infra_match = true,
+	}
+};
+
+static void
+mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on,
+		       u8 delay_off)
+{
+	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev,
+					       mt76);
+	u32 val;
+
+	val = MT_LED_STATUS_DURATION(0xff) |
+	      MT_LED_STATUS_OFF(delay_off) |
+	      MT_LED_STATUS_ON(delay_on);
+
+	mt76_wr(dev, MT_LED_S0(mdev->led_pin), val);
+	mt76_wr(dev, MT_LED_S1(mdev->led_pin), val);
+
+	val = MT_LED_CTRL_REPLAY(mdev->led_pin) |
+	      MT_LED_CTRL_KICK(mdev->led_pin);
+	if (mdev->led_al)
+		val |= MT_LED_CTRL_POLARITY(mdev->led_pin);
+	mt76_wr(dev, MT_LED_CTRL, val);
+}
+
+static int
+mt76x02_led_set_blink(struct led_classdev *led_cdev,
+		      unsigned long *delay_on,
+		      unsigned long *delay_off)
+{
+	struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
+					     led_cdev);
+	u8 delta_on, delta_off;
+
+	delta_off = max_t(u8, *delay_off / 10, 1);
+	delta_on = max_t(u8, *delay_on / 10, 1);
+
+	mt76x02_led_set_config(mdev, delta_on, delta_off);
+
+	return 0;
+}
+
+static void
+mt76x02_led_set_brightness(struct led_classdev *led_cdev,
+			   enum led_brightness brightness)
+{
+	struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
+					     led_cdev);
+
+	if (!brightness)
+		mt76x02_led_set_config(mdev, 0, 0xff);
+	else
+		mt76x02_led_set_config(mdev, 0xff, 0);
+}
+
+void mt76x02_init_device(struct mt76x02_dev *dev)
+{
+	struct ieee80211_hw *hw = mt76_hw(dev);
+	struct wiphy *wiphy = hw->wiphy;
+
+	INIT_DELAYED_WORK(&dev->mt76.mac_work, mt76x02_mac_work);
+
+	hw->queues = 4;
+	hw->max_rates = 1;
+	hw->max_report_rates = 7;
+	hw->max_rate_tries = 1;
+	hw->extra_tx_headroom = 2;
+
+	wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+		BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+		BIT(NL80211_IFTYPE_ADHOC);
+
+	if (mt76_is_usb(dev)) {
+		hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
+					 MT_DMA_HDR_LEN;
+		wiphy->iface_combinations = mt76x02u_if_comb;
+		wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02u_if_comb);
+	} else {
+		INIT_DELAYED_WORK(&dev->wdt_work, mt76x02_wdt_work);
+
+		mt76x02_dfs_init_detector(dev);
+
+		wiphy->reg_notifier = mt76x02_regd_notifier;
+		wiphy->iface_combinations = mt76x02_if_comb;
+		wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb);
+		wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+		/* init led callbacks */
+		if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+			dev->mt76.led_cdev.brightness_set =
+					mt76x02_led_set_brightness;
+			dev->mt76.led_cdev.blink_set = mt76x02_led_set_blink;
+		}
+	}
+
+	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+	hw->sta_data_size = sizeof(struct mt76x02_sta);
+	hw->vif_data_size = sizeof(struct mt76x02_vif);
+
+	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+	dev->mt76.global_wcid.idx = 255;
+	dev->mt76.global_wcid.hw_key_idx = -1;
+	dev->slottime = 9;
+
+	if (is_mt76x2(dev)) {
+		dev->mt76.sband_2g.sband.ht_cap.cap |=
+				IEEE80211_HT_CAP_LDPC_CODING;
+		dev->mt76.sband_5g.sband.ht_cap.cap |=
+				IEEE80211_HT_CAP_LDPC_CODING;
+		dev->mt76.chainmask = 0x202;
+		dev->mt76.antenna_mask = 3;
+	} else {
+		dev->mt76.chainmask = 0x101;
+		dev->mt76.antenna_mask = 1;
+	}
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_device);
+
+void mt76x02_configure_filter(struct ieee80211_hw *hw,
+			      unsigned int changed_flags,
+			      unsigned int *total_flags, u64 multicast)
+{
+	struct mt76x02_dev *dev = hw->priv;
+	u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+		flags |= *total_flags & FIF_##_flag;			\
+		dev->mt76.rxfilter &= ~(_hw);				\
+		dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw);	\
+	} while (0)
+
+	mutex_lock(&dev->mt76.mutex);
+
+	dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+	MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+	MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+	MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+			     MT_RX_FILTR_CFG_CTS |
+			     MT_RX_FILTR_CFG_CFEND |
+			     MT_RX_FILTR_CFG_CFACK |
+			     MT_RX_FILTR_CFG_BA |
+			     MT_RX_FILTR_CFG_CTRL_RSV);
+	MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+	*total_flags = flags;
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+
+	mutex_unlock(&dev->mt76.mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x02_configure_filter);
+
+int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+		    struct ieee80211_sta *sta)
+{
+	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+	struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+	struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+	int idx = 0;
+
+	memset(msta, 0, sizeof(*msta));
+
+	idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
+	if (idx < 0)
+		return -ENOSPC;
+
+	msta->vif = mvif;
+	msta->wcid.sta = 1;
+	msta->wcid.idx = idx;
+	msta->wcid.hw_key_idx = -1;
+	mt76x02_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+	mt76x02_mac_wcid_set_drop(dev, idx, false);
+
+	if (vif->type == NL80211_IFTYPE_AP)
+		set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_add);
+
+void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta)
+{
+	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+	int idx = wcid->idx;
+
+	mt76x02_mac_wcid_set_drop(dev, idx, true);
+	mt76x02_mac_wcid_setup(dev, idx, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_remove);
+
+static void
+mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
+		 unsigned int idx)
+{
+	struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+	struct mt76_txq *mtxq;
+
+	memset(mvif, 0, sizeof(*mvif));
+
+	mvif->idx = idx;
+	mvif->group_wcid.idx = MT_VIF_WCID(idx);
+	mvif->group_wcid.hw_key_idx = -1;
+	mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+	mtxq->wcid = &mvif->group_wcid;
+
+	mt76_txq_init(&dev->mt76, vif->txq);
+}
+
+int
+mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct mt76x02_dev *dev = hw->priv;
+	unsigned int idx = 0;
+
+	/* Allow to change address in HW if we create first interface. */
+	if (!dev->vif_mask &&
+	    (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
+	     memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
+		mt76x02_mac_setaddr(dev, vif->addr);
+
+	if (vif->addr[0] & BIT(1))
+		idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
+
+	/*
+	 * Client mode typically only has one configurable BSSID register,
+	 * which is used for bssidx=0. This is linked to the MAC address.
+	 * Since mac80211 allows changing interface types, and we cannot
+	 * force the use of the primary MAC address for a station mode
+	 * interface, we need some other way of configuring a per-interface
+	 * remote BSSID.
+	 * The hardware provides an AP-Client feature, where bssidx 0-7 are
+	 * used for AP mode and bssidx 8-15 for client mode.
+	 * We shift the station interface bss index by 8 to force the
+	 * hardware to recognize the BSSID.
+	 * The resulting bssidx mismatch for unicast frames is ignored by hw.
+	 */
+	if (vif->type == NL80211_IFTYPE_STATION)
+		idx += 8;
+
+	if (dev->vif_mask & BIT(idx))
+		return -EBUSY;
+
+	dev->vif_mask |= BIT(idx);
+
+	mt76x02_vif_init(dev, vif, idx);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_add_interface);
+
+void mt76x02_remove_interface(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif)
+{
+	struct mt76x02_dev *dev = hw->priv;
+	struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+
+	mt76_txq_remove(&dev->mt76, vif->txq);
+	dev->vif_mask &= ~BIT(mvif->idx);
+}
+EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
+
+int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			 struct ieee80211_ampdu_params *params)
+{
+	enum ieee80211_ampdu_mlme_action action = params->action;
+	struct ieee80211_sta *sta = params->sta;
+	struct mt76x02_dev *dev = hw->priv;
+	struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+	struct ieee80211_txq *txq = sta->txq[params->tid];
+	u16 tid = params->tid;
+	u16 ssn = params->ssn;
+	struct mt76_txq *mtxq;
+
+	if (!txq)
+		return -EINVAL;
+
+	mtxq = (struct mt76_txq *)txq->drv_priv;
+
+	mutex_lock(&dev->mt76.mutex);
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid,
+				   ssn, params->buf_size);
+		mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+		mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+			   BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		mtxq->aggr = true;
+		mtxq->send_bar = false;
+		ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		mtxq->aggr = false;
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		mtxq->aggr = false;
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	}
+	mutex_unlock(&dev->mt76.mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_ampdu_action);
+
+int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		    struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		    struct ieee80211_key_conf *key)
+{
+	struct mt76x02_dev *dev = hw->priv;
+	struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+	struct mt76x02_sta *msta;
+	struct mt76_wcid *wcid;
+	int idx = key->keyidx;
+	int ret;
+
+	/* fall back to sw encryption for unsupported ciphers */
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+	case WLAN_CIPHER_SUITE_TKIP:
+	case WLAN_CIPHER_SUITE_CCMP:
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * The hardware does not support per-STA RX GTK, fall back
+	 * to software mode for these.
+	 */
+	if ((vif->type == NL80211_IFTYPE_ADHOC ||
+	     vif->type == NL80211_IFTYPE_MESH_POINT) &&
+	    (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+	     key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+		return -EOPNOTSUPP;
+
+	/*
+	 * In USB AP mode, broadcast/multicast frames are setup in beacon
+	 * data registers and sent via HW beacons engine, they require to
+	 * be already encrypted.
+	 */
+	if (mt76_is_usb(dev) &&
+	    vif->type == NL80211_IFTYPE_AP &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+		return -EOPNOTSUPP;
+
+	/* MT76x0 GTK offloading does not work with more than one VIF */
+	if (is_mt76x0(dev) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+		return -EOPNOTSUPP;
+
+	msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
+	wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+	if (cmd == SET_KEY) {
+		key->hw_key_idx = wcid->idx;
+		wcid->hw_key_idx = idx;
+		if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+			key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+			wcid->sw_iv = true;
+		}
+	} else {
+		if (idx == wcid->hw_key_idx) {
+			wcid->hw_key_idx = -1;
+			wcid->sw_iv = false;
+		}
+
+		key = NULL;
+	}
+	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+	if (!msta) {
+		if (key || wcid->hw_key_idx == idx) {
+			ret = mt76x02_mac_wcid_set_key(dev, wcid->idx, key);
+			if (ret)
+				return ret;
+		}
+
+		return mt76x02_mac_shared_key_setup(dev, mvif->idx, idx, key);
+	}
+
+	return mt76x02_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_key);
+
+int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		    u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+	struct mt76x02_dev *dev = hw->priv;
+	u8 cw_min = 5, cw_max = 10, qid;
+	u32 val;
+
+	qid = dev->mt76.q_tx[queue].q->hw_idx;
+
+	if (params->cw_min)
+		cw_min = fls(params->cw_min);
+	if (params->cw_max)
+		cw_max = fls(params->cw_max);
+
+	val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+	      FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+	      FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+	      FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+	mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+
+	val = mt76_rr(dev, MT_WMM_TXOP(qid));
+	val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
+	val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
+	mt76_wr(dev, MT_WMM_TXOP(qid), val);
+
+	val = mt76_rr(dev, MT_WMM_AIFSN);
+	val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
+	val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
+	mt76_wr(dev, MT_WMM_AIFSN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMIN);
+	val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
+	val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
+	mt76_wr(dev, MT_WMM_CWMIN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMAX);
+	val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
+	val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
+	mt76_wr(dev, MT_WMM_CWMAX, val);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_conf_tx);
+
+void mt76x02_set_tx_ackto(struct mt76x02_dev *dev)
+{
+	u8 ackto, sifs, slottime = dev->slottime;
+
+	/* As defined by IEEE 802.11-2007 17.3.8.6 */
+	slottime += 3 * dev->coverage_class;
+	mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+		       MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+
+	sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG,
+			      MT_XIFS_TIME_CFG_OFDM_SIFS);
+
+	ackto = slottime + sifs;
+	mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG,
+		       MT_TX_TIMEOUT_CFG_ACKTO, ackto);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_tx_ackto);
+
+void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
+				s16 coverage_class)
+{
+	struct mt76x02_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mt76.mutex);
+	dev->coverage_class = coverage_class;
+	mt76x02_set_tx_ackto(dev);
+	mutex_unlock(&dev->mt76.mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_coverage_class);
+
+int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+	struct mt76x02_dev *dev = hw->priv;
+
+	if (val != ~0 && val > 0xffff)
+		return -EINVAL;
+
+	mutex_lock(&dev->mt76.mutex);
+	mt76x02_mac_set_rts_thresh(dev, val);
+	mutex_unlock(&dev->mt76.mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_rts_threshold);
+
+void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_sta *sta)
+{
+	struct mt76x02_dev *dev = hw->priv;
+	struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+	struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+	struct ieee80211_tx_rate rate = {};
+
+	if (!rates)
+		return;
+
+	rate.idx = rates->rate[0].idx;
+	rate.flags = rates->rate[0].flags;
+	mt76x02_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_rate_tbl_update);
+
+void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len)
+{
+	int hdrlen;
+
+	if (!len)
+		return;
+
+	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+	memmove(skb->data + len, skb->data, hdrlen);
+	skb_pull(skb, len);
+}
+EXPORT_SYMBOL_GPL(mt76x02_remove_hdr_pad);
+
+void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif)
+{
+	struct mt76x02_dev *dev = hw->priv;
+
+	clear_bit(MT76_SCANNING, &dev->mt76.state);
+	if (dev->cal.gain_init_done) {
+		/* Restore AGC gain and resume calibration after scanning. */
+		dev->cal.low_gain = -1;
+		ieee80211_queue_delayed_work(hw, &dev->cal_work, 0);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76x02_sw_scan_complete);
+
+void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
+		    bool ps)
+{
+	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+	struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+	int idx = msta->wcid.idx;
+
+	mt76_stop_tx_queues(&dev->mt76, sta, true);
+	if (mt76_is_mmio(dev))
+		mt76x02_mac_wcid_set_drop(dev, idx, ps);
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_ps);
+
+void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      struct ieee80211_bss_conf *info,
+			      u32 changed)
+{
+	struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+	struct mt76x02_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mt76.mutex);
+
+	if (changed & BSS_CHANGED_BSSID)
+		mt76x02_mac_set_bssid(dev, mvif->idx, info->bssid);
+
+	if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+		mt76x02_mac_set_tx_protection(dev, info->use_cts_prot,
+					      info->ht_operation_mode);
+
+	if (changed & BSS_CHANGED_BEACON_INT) {
+		mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+			       MT_BEACON_TIME_CFG_INTVAL,
+			       info->beacon_int << 4);
+		dev->mt76.beacon_int = info->beacon_int;
+	}
+
+	if (changed & BSS_CHANGED_BEACON_ENABLED)
+		mt76x02_mac_set_beacon_enable(dev, vif, info->enable_beacon);
+
+	if (changed & BSS_CHANGED_ERP_PREAMBLE)
+		mt76x02_mac_set_short_preamble(dev, info->use_short_preamble);
+
+	if (changed & BSS_CHANGED_ERP_SLOT) {
+		int slottime = info->use_short_slot ? 9 : 20;
+
+		dev->slottime = slottime;
+		mt76x02_set_tx_ackto(dev);
+	}
+
+	mutex_unlock(&dev->mt76.mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x02_bss_info_changed);
+
+void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev)
+{
+	struct ieee80211_hw *hw = mt76_hw(dev);
+	struct wiphy *wiphy = hw->wiphy;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
+		u8 *addr = dev->macaddr_list[i].addr;
+
+		memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
+
+		if (!i)
+			continue;
+
+		addr[0] |= BIT(1);
+		addr[0] ^= ((i - 1) << 2);
+	}
+	wiphy->addresses = dev->macaddr_list;
+	wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
+}
+EXPORT_SYMBOL_GPL(mt76x02_config_mac_addr_list);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig
new file mode 100644
index 0000000..5fd4973
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MT76x2_COMMON
+	tristate
+	select MT76x02_LIB
+
+config MT76x2E
+	tristate "MediaTek MT76x2E (PCIe) support"
+	select MT76x2_COMMON
+	depends on MAC80211
+	depends on PCI
+	help
+	  This adds support for MT7612/MT7602/MT7662-based wireless PCIe
+	  devices, which comply with IEEE 802.11ac standards and support
+	  2SS to 866Mbit/s PHY rate.
+
+	  To compile this driver as a module, choose M here.
+
+config MT76x2U
+	tristate "MediaTek MT76x2U (USB) support"
+	select MT76x2_COMMON
+	select MT76x02_USB
+	depends on MAC80211
+	depends on USB
+	help
+	  This adds support for MT7612U-based wireless USB 3.0 dongles,
+	  which comply with IEEE 802.11ac standards and support 2SS to
+	  866Mbit/s PHY rate.
+
+	  To compile this driver as a module, choose M here.
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
new file mode 100644
index 0000000..7b2b187
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
+obj-$(CONFIG_MT76x2E) += mt76x2e.o
+obj-$(CONFIG_MT76x2U) += mt76x2u.o
+
+mt76x2-common-y := \
+	eeprom.o mac.o init.o phy.o mcu.o
+
+mt76x2e-y := \
+	pci.o pci_main.o pci_init.o pci_mcu.o \
+	pci_phy.o
+
+mt76x2u-y := \
+	usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \
+	usb_phy.o
+
+CFLAGS_pci_trace.o := -I$(src)
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
new file mode 100644
index 0000000..3ee945e
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include "mt76x2.h"
+#include "eeprom.h"
+
+#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
+
+static int
+mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev)
+{
+	void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
+
+	memcpy(dev->mt76.macaddr, src, ETH_ALEN);
+	return 0;
+}
+
+static bool
+mt76x2_has_cal_free_data(struct mt76x02_dev *dev, u8 *efuse)
+{
+	u16 *efuse_w = (u16 *)efuse;
+
+	if (efuse_w[MT_EE_NIC_CONF_0] != 0)
+		return false;
+
+	if (efuse_w[MT_EE_XTAL_TRIM_1] == 0xffff)
+		return false;
+
+	if (efuse_w[MT_EE_TX_POWER_DELTA_BW40] != 0)
+		return false;
+
+	if (efuse_w[MT_EE_TX_POWER_0_START_2G] == 0xffff)
+		return false;
+
+	if (efuse_w[MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA] != 0)
+		return false;
+
+	if (efuse_w[MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE] == 0xffff)
+		return false;
+
+	return true;
+}
+
+static void
+mt76x2_apply_cal_free_data(struct mt76x02_dev *dev, u8 *efuse)
+{
+#define GROUP_5G(_id)							   \
+	MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id),	   \
+	MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1, \
+	MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id),	   \
+	MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1
+
+	static const u8 cal_free_bytes[] = {
+		MT_EE_XTAL_TRIM_1,
+		MT_EE_TX_POWER_EXT_PA_5G + 1,
+		MT_EE_TX_POWER_0_START_2G,
+		MT_EE_TX_POWER_0_START_2G + 1,
+		MT_EE_TX_POWER_1_START_2G,
+		MT_EE_TX_POWER_1_START_2G + 1,
+		GROUP_5G(0),
+		GROUP_5G(1),
+		GROUP_5G(2),
+		GROUP_5G(3),
+		GROUP_5G(4),
+		GROUP_5G(5),
+		MT_EE_RF_2G_TSSI_OFF_TXPOWER,
+		MT_EE_RF_2G_RX_HIGH_GAIN + 1,
+		MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN,
+		MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN + 1,
+		MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN,
+		MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN + 1,
+		MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN,
+		MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN + 1,
+	};
+	u8 *eeprom = dev->mt76.eeprom.data;
+	u8 prev_grp0[4] = {
+		eeprom[MT_EE_TX_POWER_0_START_5G],
+		eeprom[MT_EE_TX_POWER_0_START_5G + 1],
+		eeprom[MT_EE_TX_POWER_1_START_5G],
+		eeprom[MT_EE_TX_POWER_1_START_5G + 1]
+	};
+	u16 val;
+	int i;
+
+	if (!mt76x2_has_cal_free_data(dev, efuse))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(cal_free_bytes); i++) {
+		int offset = cal_free_bytes[i];
+
+		eeprom[offset] = efuse[offset];
+	}
+
+	if (!(efuse[MT_EE_TX_POWER_0_START_5G] |
+	      efuse[MT_EE_TX_POWER_0_START_5G + 1]))
+		memcpy(eeprom + MT_EE_TX_POWER_0_START_5G, prev_grp0, 2);
+	if (!(efuse[MT_EE_TX_POWER_1_START_5G] |
+	      efuse[MT_EE_TX_POWER_1_START_5G + 1]))
+		memcpy(eeprom + MT_EE_TX_POWER_1_START_5G, prev_grp0 + 2, 2);
+
+	val = get_unaligned_le16(efuse + MT_EE_BT_RCAL_RESULT);
+	if (val != 0xffff)
+		eeprom[MT_EE_BT_RCAL_RESULT] = val & 0xff;
+
+	val = get_unaligned_le16(efuse + MT_EE_BT_VCDL_CALIBRATION);
+	if (val != 0xffff)
+		eeprom[MT_EE_BT_VCDL_CALIBRATION + 1] = val >> 8;
+
+	val = get_unaligned_le16(efuse + MT_EE_BT_PMUCFG);
+	if (val != 0xffff)
+		eeprom[MT_EE_BT_PMUCFG] = val & 0xff;
+}
+
+static int mt76x2_check_eeprom(struct mt76x02_dev *dev)
+{
+	u16 val = get_unaligned_le16(dev->mt76.eeprom.data);
+
+	if (!val)
+		val = get_unaligned_le16(dev->mt76.eeprom.data + MT_EE_PCI_ID);
+
+	switch (val) {
+	case 0x7662:
+	case 0x7612:
+		return 0;
+	default:
+		dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n", val);
+		return -EINVAL;
+	}
+}
+
+static int
+mt76x2_eeprom_load(struct mt76x02_dev *dev)
+{
+	void *efuse;
+	bool found;
+	int ret;
+
+	ret = mt76_eeprom_init(&dev->mt76, MT7662_EEPROM_SIZE);
+	if (ret < 0)
+		return ret;
+
+	found = ret;
+	if (found)
+		found = !mt76x2_check_eeprom(dev);
+
+	dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, MT7662_EEPROM_SIZE,
+					  GFP_KERNEL);
+	dev->mt76.otp.size = MT7662_EEPROM_SIZE;
+	if (!dev->mt76.otp.data)
+		return -ENOMEM;
+
+	efuse = dev->mt76.otp.data;
+
+	if (mt76x02_get_efuse_data(dev, 0, efuse, MT7662_EEPROM_SIZE,
+				   MT_EE_READ))
+		goto out;
+
+	if (found) {
+		mt76x2_apply_cal_free_data(dev, efuse);
+	} else {
+		/* FIXME: check if efuse data is complete */
+		found = true;
+		memcpy(dev->mt76.eeprom.data, efuse, MT7662_EEPROM_SIZE);
+	}
+
+out:
+	if (!found)
+		return -ENOENT;
+
+	return 0;
+}
+
+static void
+mt76x2_set_rx_gain_group(struct mt76x02_dev *dev, u8 val)
+{
+	s8 *dest = dev->cal.rx.high_gain;
+
+	if (!mt76x02_field_valid(val)) {
+		dest[0] = 0;
+		dest[1] = 0;
+		return;
+	}
+
+	dest[0] = mt76x02_sign_extend(val, 4);
+	dest[1] = mt76x02_sign_extend(val >> 4, 4);
+}
+
+static void
+mt76x2_set_rssi_offset(struct mt76x02_dev *dev, int chain, u8 val)
+{
+	s8 *dest = dev->cal.rx.rssi_offset;
+
+	if (!mt76x02_field_valid(val)) {
+		dest[chain] = 0;
+		return;
+	}
+
+	dest[chain] = mt76x02_sign_extend_optional(val, 7);
+}
+
+static enum mt76x2_cal_channel_group
+mt76x2_get_cal_channel_group(int channel)
+{
+	if (channel >= 184 && channel <= 196)
+		return MT_CH_5G_JAPAN;
+	if (channel <= 48)
+		return MT_CH_5G_UNII_1;
+	if (channel <= 64)
+		return MT_CH_5G_UNII_2;
+	if (channel <= 114)
+		return MT_CH_5G_UNII_2E_1;
+	if (channel <= 144)
+		return MT_CH_5G_UNII_2E_2;
+	return MT_CH_5G_UNII_3;
+}
+
+static u8
+mt76x2_get_5g_rx_gain(struct mt76x02_dev *dev, u8 channel)
+{
+	enum mt76x2_cal_channel_group group;
+
+	group = mt76x2_get_cal_channel_group(channel);
+	switch (group) {
+	case MT_CH_5G_JAPAN:
+		return mt76x02_eeprom_get(dev,
+					  MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
+	case MT_CH_5G_UNII_1:
+		return mt76x02_eeprom_get(dev,
+					  MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
+	case MT_CH_5G_UNII_2:
+		return mt76x02_eeprom_get(dev,
+					  MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
+	case MT_CH_5G_UNII_2E_1:
+		return mt76x02_eeprom_get(dev,
+					  MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
+	case MT_CH_5G_UNII_2E_2:
+		return mt76x02_eeprom_get(dev,
+					  MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
+	default:
+		return mt76x02_eeprom_get(dev,
+					  MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
+	}
+}
+
+void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	int channel = chan->hw_value;
+	s8 lna_5g[3], lna_2g;
+	bool use_lna;
+	u8 lna = 0;
+	u16 val;
+
+	if (chan->band == NL80211_BAND_2GHZ)
+		val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
+	else
+		val = mt76x2_get_5g_rx_gain(dev, channel);
+
+	mt76x2_set_rx_gain_group(dev, val);
+
+	mt76x02_get_rx_gain(dev, chan->band, &val, &lna_2g, lna_5g);
+	mt76x2_set_rssi_offset(dev, 0, val);
+	mt76x2_set_rssi_offset(dev, 1, val >> 8);
+
+	dev->cal.rx.mcu_gain =  (lna_2g & 0xff);
+	dev->cal.rx.mcu_gain |= (lna_5g[0] & 0xff) << 8;
+	dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
+	dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
+
+	val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+	if (chan->band == NL80211_BAND_2GHZ)
+		use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
+	else
+		use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
+
+	if (use_lna)
+		lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+
+	dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
+}
+EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
+
+void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
+			   struct ieee80211_channel *chan)
+{
+	bool is_5ghz;
+	u16 val;
+
+	is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+	memset(t, 0, sizeof(*t));
+
+	val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_CCK);
+	t->cck[0] = t->cck[1] = mt76x02_rate_power_val(val);
+	t->cck[2] = t->cck[3] = mt76x02_rate_power_val(val >> 8);
+
+	if (is_5ghz)
+		val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
+	else
+		val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
+	t->ofdm[0] = t->ofdm[1] = mt76x02_rate_power_val(val);
+	t->ofdm[2] = t->ofdm[3] = mt76x02_rate_power_val(val >> 8);
+
+	if (is_5ghz)
+		val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
+	else
+		val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
+	t->ofdm[4] = t->ofdm[5] = mt76x02_rate_power_val(val);
+	t->ofdm[6] = t->ofdm[7] = mt76x02_rate_power_val(val >> 8);
+
+	val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
+	t->ht[0] = t->ht[1] = mt76x02_rate_power_val(val);
+	t->ht[2] = t->ht[3] = mt76x02_rate_power_val(val >> 8);
+
+	val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
+	t->ht[4] = t->ht[5] = mt76x02_rate_power_val(val);
+	t->ht[6] = t->ht[7] = mt76x02_rate_power_val(val >> 8);
+
+	val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
+	t->ht[8] = t->ht[9] = mt76x02_rate_power_val(val);
+	t->ht[10] = t->ht[11] = mt76x02_rate_power_val(val >> 8);
+
+	val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
+	t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val);
+	t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8);
+
+	val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
+	t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val);
+	t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8);
+
+	val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
+	t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val);
+	t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8);
+
+	val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
+	if (!is_5ghz)
+		val >>= 8;
+	t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8);
+
+	memcpy(t->stbc, t->ht, sizeof(t->stbc[0]) * 8);
+	t->stbc[8] = t->vht[8];
+	t->stbc[9] = t->vht[9];
+}
+EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
+
+static void
+mt76x2_get_power_info_2g(struct mt76x02_dev *dev,
+			 struct mt76x2_tx_power_info *t,
+			 struct ieee80211_channel *chan,
+			 int chain, int offset)
+{
+	int channel = chan->hw_value;
+	int delta_idx;
+	u8 data[6];
+	u16 val;
+
+	if (channel < 6)
+		delta_idx = 3;
+	else if (channel < 11)
+		delta_idx = 4;
+	else
+		delta_idx = 5;
+
+	mt76x02_eeprom_copy(dev, offset, data, sizeof(data));
+
+	t->chain[chain].tssi_slope = data[0];
+	t->chain[chain].tssi_offset = data[1];
+	t->chain[chain].target_power = data[2];
+	t->chain[chain].delta =
+		mt76x02_sign_extend_optional(data[delta_idx], 7);
+
+	val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
+	t->target_power = val >> 8;
+}
+
+static void
+mt76x2_get_power_info_5g(struct mt76x02_dev *dev,
+			 struct mt76x2_tx_power_info *t,
+			 struct ieee80211_channel *chan,
+			 int chain, int offset)
+{
+	int channel = chan->hw_value;
+	enum mt76x2_cal_channel_group group;
+	int delta_idx;
+	u16 val;
+	u8 data[5];
+
+	group = mt76x2_get_cal_channel_group(channel);
+	offset += group * MT_TX_POWER_GROUP_SIZE_5G;
+
+	if (channel >= 192)
+		delta_idx = 4;
+	else if (channel >= 184)
+		delta_idx = 3;
+	else if (channel < 44)
+		delta_idx = 3;
+	else if (channel < 52)
+		delta_idx = 4;
+	else if (channel < 58)
+		delta_idx = 3;
+	else if (channel < 98)
+		delta_idx = 4;
+	else if (channel < 106)
+		delta_idx = 3;
+	else if (channel < 116)
+		delta_idx = 4;
+	else if (channel < 130)
+		delta_idx = 3;
+	else if (channel < 149)
+		delta_idx = 4;
+	else if (channel < 157)
+		delta_idx = 3;
+	else
+		delta_idx = 4;
+
+	mt76x02_eeprom_copy(dev, offset, data, sizeof(data));
+
+	t->chain[chain].tssi_slope = data[0];
+	t->chain[chain].tssi_offset = data[1];
+	t->chain[chain].target_power = data[2];
+	t->chain[chain].delta =
+		mt76x02_sign_extend_optional(data[delta_idx], 7);
+
+	val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
+	t->target_power = val & 0xff;
+}
+
+void mt76x2_get_power_info(struct mt76x02_dev *dev,
+			   struct mt76x2_tx_power_info *t,
+			   struct ieee80211_channel *chan)
+{
+	u16 bw40, bw80;
+
+	memset(t, 0, sizeof(*t));
+
+	bw40 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+	bw80 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
+
+	if (chan->band == NL80211_BAND_5GHZ) {
+		bw40 >>= 8;
+		mt76x2_get_power_info_5g(dev, t, chan, 0,
+					 MT_EE_TX_POWER_0_START_5G);
+		mt76x2_get_power_info_5g(dev, t, chan, 1,
+					 MT_EE_TX_POWER_1_START_5G);
+	} else {
+		mt76x2_get_power_info_2g(dev, t, chan, 0,
+					 MT_EE_TX_POWER_0_START_2G);
+		mt76x2_get_power_info_2g(dev, t, chan, 1,
+					 MT_EE_TX_POWER_1_START_2G);
+	}
+
+	if (mt76x2_tssi_enabled(dev) ||
+	    !mt76x02_field_valid(t->target_power))
+		t->target_power = t->chain[0].target_power;
+
+	t->delta_bw40 = mt76x02_rate_power_val(bw40);
+	t->delta_bw80 = mt76x02_rate_power_val(bw80);
+}
+EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
+
+int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t)
+{
+	enum nl80211_band band = dev->mt76.chandef.chan->band;
+	u16 val, slope;
+	u8 bounds;
+
+	memset(t, 0, sizeof(*t));
+
+	if (!mt76x2_temp_tx_alc_enabled(dev))
+		return -EINVAL;
+
+	if (!mt76x02_ext_pa_enabled(dev, band))
+		return -EINVAL;
+
+	val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
+	t->temp_25_ref = val & 0x7f;
+	if (band == NL80211_BAND_5GHZ) {
+		slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
+		bounds = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+	} else {
+		slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
+		bounds = mt76x02_eeprom_get(dev,
+					    MT_EE_TX_POWER_DELTA_BW80) >> 8;
+	}
+
+	t->high_slope = slope & 0xff;
+	t->low_slope = slope >> 8;
+	t->lower_bound = 0 - (bounds & 0xf);
+	t->upper_bound = (bounds >> 4) & 0xf;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp);
+
+int mt76x2_eeprom_init(struct mt76x02_dev *dev)
+{
+	int ret;
+
+	ret = mt76x2_eeprom_load(dev);
+	if (ret)
+		return ret;
+
+	mt76x02_eeprom_parse_hw_cap(dev);
+	mt76x2_eeprom_get_macaddr(dev);
+	mt76_eeprom_override(&dev->mt76);
+	dev->mt76.macaddr[0] &= ~BIT(1);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
new file mode 100644
index 0000000..4dcf651
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#ifndef __MT76x2_EEPROM_H
+#define __MT76x2_EEPROM_H
+
+#include "../mt76x02_eeprom.h"
+
+enum mt76x2_cal_channel_group {
+	MT_CH_5G_JAPAN,
+	MT_CH_5G_UNII_1,
+	MT_CH_5G_UNII_2,
+	MT_CH_5G_UNII_2E_1,
+	MT_CH_5G_UNII_2E_2,
+	MT_CH_5G_UNII_3,
+	__MT_CH_MAX
+};
+
+struct mt76x2_tx_power_info {
+	u8 target_power;
+
+	s8 delta_bw40;
+	s8 delta_bw80;
+
+	struct {
+		s8 tssi_slope;
+		s8 tssi_offset;
+		s8 target_power;
+		s8 delta;
+	} chain[MT_MAX_CHAINS];
+};
+
+struct mt76x2_temp_comp {
+	u8 temp_25_ref;
+	int lower_bound; /* J */
+	int upper_bound; /* J */
+	unsigned int high_slope; /* J / dB */
+	unsigned int low_slope; /* J / dB */
+};
+
+void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
+			   struct ieee80211_channel *chan);
+void mt76x2_get_power_info(struct mt76x02_dev *dev,
+			   struct mt76x2_tx_power_info *t,
+			   struct ieee80211_channel *chan);
+int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t);
+void mt76x2_read_rx_gain(struct mt76x02_dev *dev);
+
+static inline bool
+mt76x2_has_ext_lna(struct mt76x02_dev *dev)
+{
+	u32 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+
+	if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+		return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
+	else
+		return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
+}
+
+static inline bool
+mt76x2_temp_tx_alc_enabled(struct mt76x02_dev *dev)
+{
+	u16 val;
+
+	val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+	if (!(val & BIT(15)))
+		return false;
+
+	return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+	       MT_EE_NIC_CONF_1_TEMP_TX_ALC;
+}
+
+static inline bool
+mt76x2_tssi_enabled(struct mt76x02_dev *dev)
+{
+	return !mt76x2_temp_tx_alc_enabled(dev) &&
+	       (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+		MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
new file mode 100644
index 0000000..79e583e
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x2.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+
+static void
+mt76x2_set_wlan_state(struct mt76x02_dev *dev, bool enable)
+{
+	u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+	if (enable)
+		val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+			MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+	else
+		val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
+			 MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+
+	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+}
+
+void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable)
+{
+	u32 val;
+
+	if (!enable)
+		goto out;
+
+	val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+	val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+	if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+		val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+		mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+		udelay(20);
+
+		val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+	}
+
+	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+
+out:
+	mt76x2_set_wlan_state(dev, enable);
+}
+EXPORT_SYMBOL_GPL(mt76x2_reset_wlan);
+
+void mt76_write_mac_initvals(struct mt76x02_dev *dev)
+{
+#define DEFAULT_PROT_CFG_CCK				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x3) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |		\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) |	\
+	 MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_OFDM				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) |	\
+	 MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_20				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |		\
+	 FIELD_PREP(MT_PROT_CFG_CTRL, 1) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
+
+#define DEFAULT_PROT_CFG_40				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) |		\
+	 FIELD_PREP(MT_PROT_CFG_CTRL, 1) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
+
+	static const struct mt76_reg_pair vals[] = {
+		/* Copied from MediaTek reference source */
+		{ MT_PBF_SYS_CTRL,		0x00080c00 },
+		{ MT_PBF_CFG,			0x1efebcff },
+		{ MT_FCE_PSE_CTRL,		0x00000001 },
+		{ MT_MAC_SYS_CTRL,		0x0000000c },
+		{ MT_MAX_LEN_CFG,		0x003e3f00 },
+		{ MT_AMPDU_MAX_LEN_20M1S,	0xaaa99887 },
+		{ MT_AMPDU_MAX_LEN_20M2S,	0x000000aa },
+		{ MT_XIFS_TIME_CFG,		0x33a40d0a },
+		{ MT_BKOFF_SLOT_CFG,		0x00000209 },
+		{ MT_TBTT_SYNC_CFG,		0x00422010 },
+		{ MT_PWR_PIN_CFG,		0x00000000 },
+		{ 0x1238,			0x001700c8 },
+		{ MT_TX_SW_CFG0,		0x00101001 },
+		{ MT_TX_SW_CFG1,		0x00010000 },
+		{ MT_TX_SW_CFG2,		0x00000000 },
+		{ MT_TXOP_CTRL_CFG,		0x0400583f },
+		{ MT_TX_RTS_CFG,		0x00ffff20 },
+		{ MT_TX_TIMEOUT_CFG,		0x000a2290 },
+		{ MT_TX_RETRY_CFG,		0x47f01f0f },
+		{ MT_EXP_ACK_TIME,		0x002c00dc },
+		{ MT_TX_PROT_CFG6,		0xe3f42004 },
+		{ MT_TX_PROT_CFG7,		0xe3f42084 },
+		{ MT_TX_PROT_CFG8,		0xe3f42104 },
+		{ MT_PIFS_TX_CFG,		0x00060fff },
+		{ MT_RX_FILTR_CFG,		0x00015f97 },
+		{ MT_LEGACY_BASIC_RATE,		0x0000017f },
+		{ MT_HT_BASIC_RATE,		0x00004003 },
+		{ MT_PN_PAD_MODE,		0x00000003 },
+		{ MT_TXOP_HLDR_ET,		0x00000002 },
+		{ 0xa44,			0x00000000 },
+		{ MT_HEADER_TRANS_CTRL_REG,	0x00000000 },
+		{ MT_TSO_CTRL,			0x00000000 },
+		{ MT_AUX_CLK_CFG,		0x00000000 },
+		{ MT_DACCLK_EN_DLY_CFG,		0x00000000 },
+		{ MT_TX_ALC_CFG_4,		0x00000000 },
+		{ MT_TX_ALC_VGA3,		0x00000000 },
+		{ MT_TX_PWR_CFG_0,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_1,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_2,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_3,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_4,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_7,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_8,		0x0000003a },
+		{ MT_TX_PWR_CFG_9,		0x0000003a },
+		{ MT_EFUSE_CTRL,		0x0000d000 },
+		{ MT_PAUSE_ENABLE_CONTROL1,	0x0000000a },
+		{ MT_FCE_WLAN_FLOW_CONTROL1,	0x60401c18 },
+		{ MT_WPDMA_DELAY_INT_CFG,	0x94ff0000 },
+		{ MT_TX_SW_CFG3,		0x00000004 },
+		{ MT_HT_FBK_TO_LEGACY,		0x00001818 },
+		{ MT_VHT_HT_FBK_CFG1,		0xedcba980 },
+		{ MT_PROT_AUTO_TX_CFG,		0x00830083 },
+		{ MT_HT_CTRL_CFG,		0x000001ff },
+		{ MT_TX_LINK_CFG,		0x00001020 },
+	};
+	struct mt76_reg_pair prot_vals[] = {
+		{ MT_CCK_PROT_CFG,		DEFAULT_PROT_CFG_CCK },
+		{ MT_OFDM_PROT_CFG,		DEFAULT_PROT_CFG_OFDM },
+		{ MT_MM20_PROT_CFG,		DEFAULT_PROT_CFG_20 },
+		{ MT_MM40_PROT_CFG,		DEFAULT_PROT_CFG_40 },
+		{ MT_GF20_PROT_CFG,		DEFAULT_PROT_CFG_20 },
+		{ MT_GF40_PROT_CFG,		DEFAULT_PROT_CFG_40 },
+	};
+
+	mt76_wr_rp(dev, 0, vals, ARRAY_SIZE(vals));
+	mt76_wr_rp(dev, 0, prot_vals, ARRAY_SIZE(prot_vals));
+}
+EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
+
+void mt76x2_init_txpower(struct mt76x02_dev *dev,
+			 struct ieee80211_supported_band *sband)
+{
+	struct ieee80211_channel *chan;
+	struct mt76x2_tx_power_info txp;
+	struct mt76_rate_power t = {};
+	int i;
+
+	for (i = 0; i < sband->n_channels; i++) {
+		chan = &sband->channels[i];
+
+		mt76x2_get_power_info(dev, &txp, chan);
+		mt76x2_get_rate_power(dev, &t, chan);
+
+		chan->orig_mpwr = mt76x02_get_max_rate_power(&t) +
+				  txp.target_power;
+		chan->orig_mpwr = DIV_ROUND_UP(chan->orig_mpwr, 2);
+
+		/* convert to combined output power on 2x2 devices */
+		chan->orig_mpwr += 3;
+		chan->max_power = min_t(int, chan->max_reg_power,
+					chan->orig_mpwr);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c
new file mode 100644
index 0000000..e08740c
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force)
+{
+	bool stopped = false;
+	u32 rts_cfg;
+	int i;
+
+	mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+	mt76_clear(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+	rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+	/* Wait for MAC to become idle */
+	for (i = 0; i < 300; i++) {
+		if ((mt76_rr(dev, MT_MAC_STATUS) &
+		     (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
+		    mt76_rr(dev, MT_BBP(IBI, 12))) {
+			udelay(1);
+			continue;
+		}
+
+		stopped = true;
+		break;
+	}
+
+	if (force && !stopped) {
+		mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+		mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+		mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+		mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+	}
+
+	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
new file mode 100644
index 0000000..a158302
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#ifndef __MT76x2_MAC_H
+#define __MT76x2_MAC_H
+
+#include "mt76x2.h"
+
+struct mt76x02_dev;
+struct mt76x2_sta;
+struct mt76x02_vif;
+
+int mt76x2_mac_start(struct mt76x02_dev *dev);
+void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force);
+
+static inline void mt76x2_mac_resume(struct mt76x02_dev *dev)
+{
+	mt76_wr(dev, MT_MAC_SYS_CTRL,
+		MT_MAC_SYS_CTRL_ENABLE_TX |
+		MT_MAC_SYS_CTRL_ENABLE_RX);
+}
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
new file mode 100644
index 0000000..76d8cd3
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x2.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
+			   u8 bw_index, bool scan)
+{
+	struct {
+		u8 idx;
+		u8 scan;
+		u8 bw;
+		u8 _pad0;
+
+		__le16 chainmask;
+		u8 ext_chan;
+		u8 _pad1;
+
+	} __packed __aligned(4) msg = {
+		.idx = channel,
+		.scan = scan,
+		.bw = bw,
+		.chainmask = cpu_to_le16(dev->mt76.chainmask),
+	};
+
+	/* first set the channel without the extension channel info */
+	mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg), true);
+
+	usleep_range(5000, 10000);
+
+	msg.ext_chan = 0xe0 + bw_index;
+	return mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg),
+				 true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel);
+
+int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
+		       u8 channel)
+{
+	struct {
+		u8 cr_mode;
+		u8 temp;
+		u8 ch;
+		u8 _pad0;
+
+		__le32 cfg;
+	} __packed __aligned(4) msg = {
+		.cr_mode = type,
+		.temp = temp_level,
+		.ch = channel,
+	};
+	u32 val;
+
+	val = BIT(31);
+	val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+	val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+	msg.cfg = cpu_to_le32(val);
+
+	/* first set the channel without the extension channel info */
+	return mt76_mcu_send_msg(dev, CMD_LOAD_CR, &msg, sizeof(msg), true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr);
+
+int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
+			 bool force)
+{
+	struct {
+		__le32 channel;
+		__le32 gain_val;
+	} __packed __aligned(4) msg = {
+		.channel = cpu_to_le32(channel),
+		.gain_val = cpu_to_le32(gain),
+	};
+
+	if (force)
+		msg.channel |= cpu_to_le32(BIT(31));
+
+	return mt76_mcu_send_msg(dev, CMD_INIT_GAIN_OP, &msg, sizeof(msg),
+				 true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain);
+
+int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev,
+			 struct mt76x2_tssi_comp *tssi_data)
+{
+	struct {
+		__le32 id;
+		struct mt76x2_tssi_comp data;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+		.data = *tssi_data,
+	};
+
+	return mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg),
+				 true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_tssi_comp);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
new file mode 100644
index 0000000..41fd665
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#ifndef __MT76x2_MCU_H
+#define __MT76x2_MCU_H
+
+#include "../mt76x02_mcu.h"
+
+/* Register definitions */
+#define MT_MCU_CPU_CTL			0x0704
+#define MT_MCU_CLOCK_CTL		0x0708
+#define MT_MCU_PCIE_REMAP_BASE1		0x0740
+#define MT_MCU_PCIE_REMAP_BASE2		0x0744
+#define MT_MCU_PCIE_REMAP_BASE3		0x0748
+
+#define MT_MCU_ROM_PATCH_OFFSET		0x80000
+#define MT_MCU_ROM_PATCH_ADDR		0x90000
+
+#define MT_MCU_ILM_OFFSET		0x80000
+
+#define MT_MCU_DLM_OFFSET		0x100000
+#define MT_MCU_DLM_ADDR			0x90000
+#define MT_MCU_DLM_ADDR_E3		0x90800
+
+enum mcu_calibration {
+	MCU_CAL_R = 1,
+	MCU_CAL_TEMP_SENSOR,
+	MCU_CAL_RXDCOC,
+	MCU_CAL_RC,
+	MCU_CAL_SX_LOGEN,
+	MCU_CAL_LC,
+	MCU_CAL_TX_LOFT,
+	MCU_CAL_TXIQ,
+	MCU_CAL_TSSI,
+	MCU_CAL_TSSI_COMP,
+	MCU_CAL_DPD,
+	MCU_CAL_RXIQC_FI,
+	MCU_CAL_RXIQC_FD,
+	MCU_CAL_PWRON,
+	MCU_CAL_TX_SHAPING,
+};
+
+enum mt76x2_mcu_cr_mode {
+	MT_RF_CR,
+	MT_BBP_CR,
+	MT_RF_BBP_CR,
+	MT_HL_TEMP_CR_UPDATE,
+};
+
+struct mt76x2_tssi_comp {
+	u8 pa_mode;
+	u8 cal_mode;
+	u16 pad;
+
+	u8 slope0;
+	u8 slope1;
+	u8 offset0;
+	u8 offset1;
+} __packed __aligned(4);
+
+int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev,
+			 struct mt76x2_tssi_comp *tssi_data);
+int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
+			 bool force);
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
new file mode 100644
index 0000000..41680c4
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#ifndef __MT76x2_H
+#define __MT76x2_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+
+#define MT7662_FIRMWARE		"mt7662.bin"
+#define MT7662_ROM_PATCH	"mt7662_rom_patch.bin"
+#define MT7662_EEPROM_SIZE	512
+
+#include "../mt76x02.h"
+#include "mac.h"
+
+static inline bool is_mt7612(struct mt76x02_dev *dev)
+{
+	return mt76_chip(&dev->mt76) == 0x7612;
+}
+
+static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+	return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+		chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
+extern const struct ieee80211_ops mt76x2_ops;
+
+int mt76x2_register_device(struct mt76x02_dev *dev);
+
+void mt76x2_phy_power_on(struct mt76x02_dev *dev);
+void mt76x2_stop_hardware(struct mt76x02_dev *dev);
+int mt76x2_eeprom_init(struct mt76x02_dev *dev);
+int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel);
+
+void mt76x2_phy_set_antenna(struct mt76x02_dev *dev);
+int mt76x2_phy_start(struct mt76x02_dev *dev);
+int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
+			   struct cfg80211_chan_def *chandef);
+void mt76x2_phy_calibrate(struct work_struct *work);
+void mt76x2_phy_set_txpower(struct mt76x02_dev *dev);
+
+int mt76x2_mcu_init(struct mt76x02_dev *dev);
+int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
+			   u8 bw_index, bool scan);
+int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
+		       u8 channel);
+
+void mt76x2_cleanup(struct mt76x02_dev *dev);
+
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard);
+void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
+void mt76x2_init_txpower(struct mt76x02_dev *dev,
+			 struct ieee80211_supported_band *sband);
+void mt76_write_mac_initvals(struct mt76x02_dev *dev);
+
+void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev);
+void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
+				 enum nl80211_band band);
+void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
+			       enum nl80211_band band, u8 bw);
+void mt76x2_apply_gain_adj(struct mt76x02_dev *dev);
+void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev);
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
new file mode 100644
index 0000000..c876bac
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76x2U_H
+#define __MT76x2U_H
+
+#include <linux/device.h>
+
+#include "mt76x2.h"
+#include "mcu.h"
+
+#define MT7612U_EEPROM_SIZE		512
+
+#define MT_USB_AGGR_SIZE_LIMIT		21 /* 1024B unit */
+#define MT_USB_AGGR_TIMEOUT		0x80 /* 33ns unit */
+
+extern const struct ieee80211_ops mt76x2u_ops;
+
+int mt76x2u_register_device(struct mt76x02_dev *dev);
+int mt76x2u_init_hardware(struct mt76x02_dev *dev);
+void mt76x2u_cleanup(struct mt76x02_dev *dev);
+void mt76x2u_stop_hw(struct mt76x02_dev *dev);
+
+int mt76x2u_mac_reset(struct mt76x02_dev *dev);
+int mt76x2u_mac_start(struct mt76x02_dev *dev);
+int mt76x2u_mac_stop(struct mt76x02_dev *dev);
+
+int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
+			    struct cfg80211_chan_def *chandef);
+void mt76x2u_phy_calibrate(struct work_struct *work);
+
+void mt76x2u_mcu_complete_urb(struct urb *urb);
+int mt76x2u_mcu_init(struct mt76x02_dev *dev);
+int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev);
+
+int mt76x2u_alloc_queues(struct mt76x02_dev *dev);
+void mt76x2u_queues_deinit(struct mt76x02_dev *dev);
+void mt76x2u_stop_queues(struct mt76x02_dev *dev);
+int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
+			 u32 flags);
+
+#endif /* __MT76x2U_H */
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
new file mode 100644
index 0000000..e6d7646
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt76x2.h"
+
+static const struct pci_device_id mt76pci_device_table[] = {
+	{ PCI_DEVICE(0x14c3, 0x7662) },
+	{ PCI_DEVICE(0x14c3, 0x7612) },
+	{ PCI_DEVICE(0x14c3, 0x7602) },
+	{ },
+};
+
+static int
+mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct mt76_driver_ops drv_ops = {
+		.txwi_size = sizeof(struct mt76x02_txwi),
+		.tx_aligned4_skbs = true,
+		.update_survey = mt76x02_update_channel,
+		.tx_prepare_skb = mt76x02_tx_prepare_skb,
+		.tx_complete_skb = mt76x02_tx_complete_skb,
+		.rx_skb = mt76x02_queue_rx_skb,
+		.rx_poll_complete = mt76x02_rx_poll_complete,
+		.sta_ps = mt76x02_sta_ps,
+		.sta_add = mt76x02_sta_add,
+		.sta_remove = mt76x02_sta_remove,
+	};
+	struct mt76x02_dev *dev;
+	struct mt76_dev *mdev;
+	int ret;
+
+	ret = pcim_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+	if (ret)
+		return ret;
+
+	pci_set_master(pdev);
+
+	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x2_ops,
+				 &drv_ops);
+	if (!mdev)
+		return -ENOMEM;
+
+	dev = container_of(mdev, struct mt76x02_dev, mt76);
+	mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
+	mt76x2_reset_wlan(dev, false);
+
+	mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
+	dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+
+	ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
+			       IRQF_SHARED, KBUILD_MODNAME, dev);
+	if (ret)
+		goto error;
+
+	ret = mt76x2_register_device(dev);
+	if (ret)
+		goto error;
+
+	/* Fix up ASPM configuration */
+
+	/* RG_SSUSB_G1_CDR_BIR_LTR = 0x9 */
+	mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
+
+	/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
+	mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
+
+	/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
+	mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
+
+	mt76_pci_disable_aspm(pdev);
+
+	return 0;
+
+error:
+	ieee80211_free_hw(mt76_hw(dev));
+	return ret;
+}
+
+static void
+mt76pci_remove(struct pci_dev *pdev)
+{
+	struct mt76_dev *mdev = pci_get_drvdata(pdev);
+	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+	mt76_unregister_device(mdev);
+	mt76x2_cleanup(dev);
+	mt76_free_device(mdev);
+}
+
+MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
+MODULE_FIRMWARE(MT7662_FIRMWARE);
+MODULE_FIRMWARE(MT7662_ROM_PATCH);
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct pci_driver mt76pci_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= mt76pci_device_table,
+	.probe		= mt76pci_probe,
+	.remove		= mt76pci_remove,
+};
+
+module_pci_driver(mt76pci_driver);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
new file mode 100644
index 0000000..343127f
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "eeprom.h"
+#include "mcu.h"
+
+static void
+mt76x2_mac_pbf_init(struct mt76x02_dev *dev)
+{
+	u32 val;
+
+	val = MT_PBF_SYS_CTRL_MCU_RESET |
+	      MT_PBF_SYS_CTRL_DMA_RESET |
+	      MT_PBF_SYS_CTRL_MAC_RESET |
+	      MT_PBF_SYS_CTRL_PBF_RESET |
+	      MT_PBF_SYS_CTRL_ASY_RESET;
+
+	mt76_set(dev, MT_PBF_SYS_CTRL, val);
+	mt76_clear(dev, MT_PBF_SYS_CTRL, val);
+
+	mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+	mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+}
+
+static void
+mt76x2_fixup_xtal(struct mt76x02_dev *dev)
+{
+	u16 eep_val;
+	s8 offset = 0;
+
+	eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+	offset = eep_val & 0x7f;
+	if ((eep_val & 0xff) == 0xff)
+		offset = 0;
+	else if (eep_val & 0x80)
+		offset = 0 - offset;
+
+	eep_val >>= 8;
+	if (eep_val == 0x00 || eep_val == 0xff) {
+		eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+		eep_val &= 0xff;
+
+		if (eep_val == 0x00 || eep_val == 0xff)
+			eep_val = 0x14;
+	}
+
+	eep_val &= 0x7f;
+	mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
+	mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
+
+	eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
+	switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+	case 0:
+		mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+		break;
+	case 1:
+		mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+		break;
+	default:
+		break;
+	}
+}
+
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
+{
+	const u8 *macaddr = dev->mt76.macaddr;
+	u32 val;
+	int i, k;
+
+	if (!mt76x02_wait_for_mac(&dev->mt76))
+		return -ETIMEDOUT;
+
+	val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+
+	val &= ~(MT_WPDMA_GLO_CFG_TX_DMA_EN |
+		 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+		 MT_WPDMA_GLO_CFG_RX_DMA_EN |
+		 MT_WPDMA_GLO_CFG_RX_DMA_BUSY |
+		 MT_WPDMA_GLO_CFG_DMA_BURST_SIZE);
+	val |= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3);
+
+	mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+
+	mt76x2_mac_pbf_init(dev);
+	mt76_write_mac_initvals(dev);
+	mt76x2_fixup_xtal(dev);
+
+	mt76_clear(dev, MT_MAC_SYS_CTRL,
+		   MT_MAC_SYS_CTRL_RESET_CSR |
+		   MT_MAC_SYS_CTRL_RESET_BBP);
+
+	if (is_mt7612(dev))
+		mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+	mt76_set(dev, MT_EXT_CCA_CFG, 0x0000f000);
+	mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+	mt76_wr(dev, MT_RF_BYPASS_0, 0x06000000);
+	mt76_wr(dev, MT_RF_SETTING_0, 0x08800000);
+	usleep_range(5000, 10000);
+	mt76_wr(dev, MT_RF_BYPASS_0, 0x00000000);
+
+	mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401);
+	mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+	mt76x02_mac_setaddr(dev, macaddr);
+	mt76x02e_init_beacon_config(dev);
+	if (!hard)
+		return 0;
+
+	for (i = 0; i < 256 / 32; i++)
+		mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
+
+	for (i = 0; i < 256; i++) {
+		mt76x02_mac_wcid_setup(dev, i, 0, NULL);
+		mt76_wr(dev, MT_WCID_TX_RATE(i), 0);
+		mt76_wr(dev, MT_WCID_TX_RATE(i) + 4, 0);
+	}
+
+	for (i = 0; i < MT_MAX_VIFS; i++)
+		mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL);
+
+	for (i = 0; i < 16; i++)
+		for (k = 0; k < 4; k++)
+			mt76x02_mac_shared_key_setup(dev, i, k, NULL);
+
+	for (i = 0; i < 16; i++)
+		mt76_rr(dev, MT_TX_STAT_FIFO);
+
+	mt76_wr(dev, MT_CH_TIME_CFG,
+		MT_CH_TIME_CFG_TIMER_EN |
+		MT_CH_TIME_CFG_TX_AS_BUSY |
+		MT_CH_TIME_CFG_RX_AS_BUSY |
+		MT_CH_TIME_CFG_NAV_AS_BUSY |
+		MT_CH_TIME_CFG_EIFS_AS_BUSY |
+		MT_CH_CCA_RC_EN |
+		FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
+
+	mt76x02_set_tx_ackto(dev);
+
+	return 0;
+}
+
+int mt76x2_mac_start(struct mt76x02_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < 16; i++)
+		mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+	for (i = 0; i < 16; i++)
+		mt76_rr(dev, MT_TX_STAT_FIFO);
+
+	memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats));
+	mt76x02_mac_start(dev);
+
+	return 0;
+}
+
+static void
+mt76x2_power_on_rf_patch(struct mt76x02_dev *dev)
+{
+	mt76_set(dev, 0x10130, BIT(0) | BIT(16));
+	udelay(1);
+
+	mt76_clear(dev, 0x1001c, 0xff);
+	mt76_set(dev, 0x1001c, 0x30);
+
+	mt76_wr(dev, 0x10014, 0x484f);
+	udelay(1);
+
+	mt76_set(dev, 0x10130, BIT(17));
+	udelay(125);
+
+	mt76_clear(dev, 0x10130, BIT(16));
+	udelay(50);
+
+	mt76_set(dev, 0x1014c, BIT(19) | BIT(20));
+}
+
+static void
+mt76x2_power_on_rf(struct mt76x02_dev *dev, int unit)
+{
+	int shift = unit ? 8 : 0;
+
+	/* Enable RF BG */
+	mt76_set(dev, 0x10130, BIT(0) << shift);
+	udelay(10);
+
+	/* Enable RFDIG LDO/AFE/ABB/ADDA */
+	mt76_set(dev, 0x10130, (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift);
+	udelay(10);
+
+	/* Switch RFDIG power to internal LDO */
+	mt76_clear(dev, 0x10130, BIT(2) << shift);
+	udelay(10);
+
+	mt76x2_power_on_rf_patch(dev);
+
+	mt76_set(dev, 0x530, 0xf);
+}
+
+static void
+mt76x2_power_on(struct mt76x02_dev *dev)
+{
+	u32 val;
+
+	/* Turn on WL MTCMOS */
+	mt76_set(dev, MT_WLAN_MTC_CTRL, MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+	val = MT_WLAN_MTC_CTRL_STATE_UP |
+	      MT_WLAN_MTC_CTRL_PWR_ACK |
+	      MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+	mt76_poll(dev, MT_WLAN_MTC_CTRL, val, val, 1000);
+
+	mt76_clear(dev, MT_WLAN_MTC_CTRL, 0x7f << 16);
+	udelay(10);
+
+	mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+	udelay(10);
+
+	mt76_set(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+	mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xfff);
+
+	/* Turn on AD/DA power down */
+	mt76_clear(dev, 0x11204, BIT(3));
+
+	/* WLAN function enable */
+	mt76_set(dev, 0x10080, BIT(0));
+
+	/* Release BBP software reset */
+	mt76_clear(dev, 0x10064, BIT(18));
+
+	mt76x2_power_on_rf(dev, 0);
+	mt76x2_power_on_rf(dev, 1);
+}
+
+static int mt76x2_init_hardware(struct mt76x02_dev *dev)
+{
+	int ret;
+
+	mt76x02_dma_disable(dev);
+	mt76x2_reset_wlan(dev, true);
+	mt76x2_power_on(dev);
+
+	ret = mt76x2_eeprom_init(dev);
+	if (ret)
+		return ret;
+
+	ret = mt76x2_mac_reset(dev, true);
+	if (ret)
+		return ret;
+
+	dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+	ret = mt76x02_dma_init(dev);
+	if (ret)
+		return ret;
+
+	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+	ret = mt76x2_mac_start(dev);
+	if (ret)
+		return ret;
+
+	ret = mt76x2_mcu_init(dev);
+	if (ret)
+		return ret;
+
+	mt76x2_mac_stop(dev, false);
+
+	return 0;
+}
+
+void mt76x2_stop_hardware(struct mt76x02_dev *dev)
+{
+	cancel_delayed_work_sync(&dev->cal_work);
+	cancel_delayed_work_sync(&dev->mt76.mac_work);
+	cancel_delayed_work_sync(&dev->wdt_work);
+	mt76x02_mcu_set_radio_state(dev, false);
+	mt76x2_mac_stop(dev, false);
+}
+
+void mt76x2_cleanup(struct mt76x02_dev *dev)
+{
+	tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+	tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+	mt76x2_stop_hardware(dev);
+	mt76x02_dma_cleanup(dev);
+	mt76x02_mcu_cleanup(dev);
+}
+
+int mt76x2_register_device(struct mt76x02_dev *dev)
+{
+	int ret;
+
+	INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
+
+	mt76x02_init_device(dev);
+
+	ret = mt76x2_init_hardware(dev);
+	if (ret)
+		return ret;
+
+	mt76x02_config_mac_addr_list(dev);
+
+	ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
+				   ARRAY_SIZE(mt76x02_rates));
+	if (ret)
+		goto fail;
+
+	mt76x02_init_debugfs(dev);
+	mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
+	mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
+	return 0;
+
+fail:
+	mt76x2_stop_hardware(dev);
+	return ret;
+}
+
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
new file mode 100644
index 0000000..4971685
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt76x2.h"
+
+static int
+mt76x2_start(struct ieee80211_hw *hw)
+{
+	struct mt76x02_dev *dev = hw->priv;
+	int ret;
+
+	ret = mt76x2_mac_start(dev);
+	if (ret)
+		return ret;
+
+	ret = mt76x2_phy_start(dev);
+	if (ret)
+		return ret;
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+				     MT_MAC_WORK_INTERVAL);
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
+				     MT_WATCHDOG_TIME);
+
+	set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+	return 0;
+}
+
+static void
+mt76x2_stop(struct ieee80211_hw *hw)
+{
+	struct mt76x02_dev *dev = hw->priv;
+
+	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+	mt76x2_stop_hardware(dev);
+}
+
+static int
+mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+{
+	int ret;
+
+	cancel_delayed_work_sync(&dev->cal_work);
+	tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+	tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+
+	mutex_lock(&dev->mt76.mutex);
+	set_bit(MT76_RESET, &dev->mt76.state);
+
+	mt76_set_channel(&dev->mt76);
+
+	mt76x2_mac_stop(dev, true);
+	ret = mt76x2_phy_set_channel(dev, chandef);
+
+	/* channel cycle counters read-and-clear */
+	mt76_rr(dev, MT_CH_IDLE);
+	mt76_rr(dev, MT_CH_BUSY);
+
+	mt76x02_dfs_init_params(dev);
+
+	mt76x2_mac_resume(dev);
+
+	clear_bit(MT76_RESET, &dev->mt76.state);
+	mutex_unlock(&dev->mt76.mutex);
+
+	tasklet_enable(&dev->dfs_pd.dfs_tasklet);
+	tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+
+	mt76_txq_schedule_all(&dev->mt76);
+
+	return ret;
+}
+
+static int
+mt76x2_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct mt76x02_dev *dev = hw->priv;
+	int ret = 0;
+
+	mutex_lock(&dev->mt76.mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+		if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+			dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+		else
+			dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+		mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_POWER) {
+		dev->mt76.txpower_conf = hw->conf.power_level * 2;
+
+		/* convert to per-chain power for 2x2 devices */
+		dev->mt76.txpower_conf -= 6;
+
+		if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
+			mt76x2_phy_set_txpower(dev);
+			mt76x02_tx_set_txpwr_auto(dev, dev->mt76.txpower_conf);
+		}
+	}
+
+	mutex_unlock(&dev->mt76.mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		ieee80211_stop_queues(hw);
+		ret = mt76x2_set_channel(dev, &hw->conf.chandef);
+		ieee80211_wake_queues(hw);
+	}
+
+	return ret;
+}
+
+static void
+mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+	     u32 queues, bool drop)
+{
+}
+
+static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
+			      u32 rx_ant)
+{
+	struct mt76x02_dev *dev = hw->priv;
+
+	if (!tx_ant || tx_ant > 3 || tx_ant != rx_ant)
+		return -EINVAL;
+
+	mutex_lock(&dev->mt76.mutex);
+
+	dev->mt76.chainmask = (tx_ant == 3) ? 0x202 : 0x101;
+	dev->mt76.antenna_mask = tx_ant;
+
+	mt76_set_stream_caps(&dev->mt76, true);
+	mt76x2_phy_set_antenna(dev);
+
+	mutex_unlock(&dev->mt76.mutex);
+
+	return 0;
+}
+
+static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
+			      u32 *rx_ant)
+{
+	struct mt76x02_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mt76.mutex);
+	*tx_ant = dev->mt76.antenna_mask;
+	*rx_ant = dev->mt76.antenna_mask;
+	mutex_unlock(&dev->mt76.mutex);
+
+	return 0;
+}
+
+const struct ieee80211_ops mt76x2_ops = {
+	.tx = mt76x02_tx,
+	.start = mt76x2_start,
+	.stop = mt76x2_stop,
+	.add_interface = mt76x02_add_interface,
+	.remove_interface = mt76x02_remove_interface,
+	.config = mt76x2_config,
+	.configure_filter = mt76x02_configure_filter,
+	.bss_info_changed = mt76x02_bss_info_changed,
+	.sta_state = mt76_sta_state,
+	.set_key = mt76x02_set_key,
+	.conf_tx = mt76x02_conf_tx,
+	.sw_scan_start = mt76_sw_scan,
+	.sw_scan_complete = mt76x02_sw_scan_complete,
+	.flush = mt76x2_flush,
+	.ampdu_action = mt76x02_ampdu_action,
+	.get_txpower = mt76_get_txpower,
+	.wake_tx_queue = mt76_wake_tx_queue,
+	.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+	.release_buffered_frames = mt76_release_buffered_frames,
+	.set_coverage_class = mt76x02_set_coverage_class,
+	.get_survey = mt76_get_survey,
+	.set_tim = mt76_set_tim,
+	.set_antenna = mt76x2_set_antenna,
+	.get_antenna = mt76x2_get_antenna,
+	.set_rts_threshold = mt76x02_set_rts_threshold,
+};
+
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
new file mode 100644
index 0000000..ca6f968
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x2.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+static int
+mt76pci_load_rom_patch(struct mt76x02_dev *dev)
+{
+	const struct firmware *fw = NULL;
+	struct mt76x02_patch_header *hdr;
+	bool rom_protect = !is_mt7612(dev);
+	int len, ret = 0;
+	__le32 *cur;
+	u32 patch_mask, patch_reg;
+
+	if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+		dev_err(dev->mt76.dev,
+			"Could not get hardware semaphore for ROM PATCH\n");
+		return -ETIMEDOUT;
+	}
+
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+		patch_mask = BIT(0);
+		patch_reg = MT_MCU_CLOCK_CTL;
+	} else {
+		patch_mask = BIT(1);
+		patch_reg = MT_MCU_COM_REG0;
+	}
+
+	if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+		dev_info(dev->mt76.dev, "ROM patch already applied\n");
+		goto out;
+	}
+
+	ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
+	if (ret)
+		goto out;
+
+	if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+		ret = -EIO;
+		dev_err(dev->mt76.dev, "Failed to load firmware\n");
+		goto out;
+	}
+
+	hdr = (struct mt76x02_patch_header *)fw->data;
+	dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
+
+	cur = (__le32 *)(fw->data + sizeof(*hdr));
+	len = fw->size - sizeof(*hdr);
+	mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+	/* Trigger ROM */
+	mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
+
+	if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
+		dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
+		ret = -ETIMEDOUT;
+	}
+
+out:
+	/* release semaphore */
+	if (rom_protect)
+		mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+	release_firmware(fw);
+	return ret;
+}
+
+static int
+mt76pci_load_firmware(struct mt76x02_dev *dev)
+{
+	const struct firmware *fw;
+	const struct mt76x02_fw_header *hdr;
+	int len, ret;
+	__le32 *cur;
+	u32 offset, val;
+
+	ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
+	if (ret)
+		return ret;
+
+	if (!fw || !fw->data || fw->size < sizeof(*hdr))
+		goto error;
+
+	hdr = (const struct mt76x02_fw_header *)fw->data;
+
+	len = sizeof(*hdr);
+	len += le32_to_cpu(hdr->ilm_len);
+	len += le32_to_cpu(hdr->dlm_len);
+
+	if (fw->size != len)
+		goto error;
+
+	val = le16_to_cpu(hdr->fw_ver);
+	dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+		 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+	val = le16_to_cpu(hdr->build_ver);
+	dev_info(dev->mt76.dev, "Build: %x\n", val);
+	dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+	cur = (__le32 *)(fw->data + sizeof(*hdr));
+	len = le32_to_cpu(hdr->ilm_len);
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
+	mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
+
+	cur += len / sizeof(*cur);
+	len = le32_to_cpu(hdr->dlm_len);
+
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+		offset = MT_MCU_DLM_ADDR_E3;
+	else
+		offset = MT_MCU_DLM_ADDR;
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
+	mt76_wr_copy(dev, offset, cur, len);
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+	val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
+	if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
+		mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
+
+	/* trigger firmware */
+	mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
+	if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 200)) {
+		dev_err(dev->mt76.dev, "Firmware failed to start\n");
+		release_firmware(fw);
+		return -ETIMEDOUT;
+	}
+
+	mt76x02_set_ethtool_fwver(dev, hdr);
+	dev_info(dev->mt76.dev, "Firmware running!\n");
+
+	release_firmware(fw);
+
+	return ret;
+
+error:
+	dev_err(dev->mt76.dev, "Invalid firmware\n");
+	release_firmware(fw);
+	return -ENOENT;
+}
+
+static int
+mt76pci_mcu_restart(struct mt76_dev *mdev)
+{
+	struct mt76x02_dev *dev;
+	int ret;
+
+	dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+	mt76x02_mcu_cleanup(dev);
+	mt76x2_mac_reset(dev, true);
+
+	ret = mt76pci_load_firmware(dev);
+	if (ret)
+		return ret;
+
+	mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+	return 0;
+}
+
+int mt76x2_mcu_init(struct mt76x02_dev *dev)
+{
+	static const struct mt76_mcu_ops mt76x2_mcu_ops = {
+		.mcu_restart = mt76pci_mcu_restart,
+		.mcu_send_msg = mt76x02_mcu_msg_send,
+	};
+	int ret;
+
+	dev->mt76.mcu_ops = &mt76x2_mcu_ops;
+
+	ret = mt76pci_load_rom_patch(dev);
+	if (ret)
+		return ret;
+
+	ret = mt76pci_load_firmware(dev);
+	if (ret)
+		return ret;
+
+	mt76x02_mcu_function_select(dev, Q_SELECT, 1);
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
new file mode 100644
index 0000000..23f35bf
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+
+static bool
+mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	u32 flag = 0;
+
+	if (!mt76x2_tssi_enabled(dev))
+		return false;
+
+	if (mt76x2_channel_silent(dev))
+		return false;
+
+	if (chan->band == NL80211_BAND_5GHZ)
+		flag |= BIT(0);
+
+	if (mt76x02_ext_pa_enabled(dev, chan->band))
+		flag |= BIT(8);
+
+	mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+	dev->cal.tssi_cal_done = true;
+	return true;
+}
+
+static void
+mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+	if (dev->cal.channel_cal_done)
+		return;
+
+	if (mt76x2_channel_silent(dev))
+		return;
+
+	if (!dev->cal.tssi_cal_done)
+		mt76x2_phy_tssi_init_cal(dev);
+
+	if (!mac_stopped)
+		mt76x2_mac_stop(dev, false);
+
+	if (is_5ghz)
+		mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+	mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+	mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+	mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+	mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+	mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
+
+	if (!mac_stopped)
+		mt76x2_mac_resume(dev);
+
+	mt76x2_apply_gain_adj(dev);
+	mt76x02_edcca_init(dev);
+
+	dev->cal.channel_cal_done = true;
+}
+
+void mt76x2_phy_set_antenna(struct mt76x02_dev *dev)
+{
+	u32 val;
+
+	val = mt76_rr(dev, MT_BBP(AGC, 0));
+	val &= ~(BIT(4) | BIT(1));
+	switch (dev->mt76.antenna_mask) {
+	case 1:
+		/* disable mac DAC control */
+		mt76_clear(dev, MT_BBP(IBI, 9), BIT(11));
+		mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+		mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0x3);
+		mt76_rmw_field(dev, MT_BBP(CORE, 32), GENMASK(21, 20), 2);
+		/* disable DAC 1 */
+		mt76_rmw_field(dev, MT_BBP(CORE, 33), GENMASK(12, 9), 4);
+
+		val &= ~(BIT(3) | BIT(0));
+		break;
+	case 2:
+		/* disable mac DAC control */
+		mt76_clear(dev, MT_BBP(IBI, 9), BIT(11));
+		mt76_rmw_field(dev, MT_BBP(TXBE, 5), 3, 1);
+		mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0xc);
+		mt76_rmw_field(dev, MT_BBP(CORE, 32), GENMASK(21, 20), 1);
+		/* disable DAC 0 */
+		mt76_rmw_field(dev, MT_BBP(CORE, 33), GENMASK(12, 9), 1);
+
+		val &= ~BIT(3);
+		val |= BIT(0);
+		break;
+	case 3:
+	default:
+		/* enable mac DAC control */
+		mt76_set(dev, MT_BBP(IBI, 9), BIT(11));
+		mt76_set(dev, MT_BBP(TXBE, 5), 3);
+		mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0xf);
+		mt76_clear(dev, MT_BBP(CORE, 32), GENMASK(21, 20));
+		mt76_clear(dev, MT_BBP(CORE, 33), GENMASK(12, 9));
+
+		val &= ~BIT(0);
+		val |= BIT(3);
+		break;
+	}
+	mt76_wr(dev, MT_BBP(AGC, 0), val);
+}
+
+int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
+			   struct cfg80211_chan_def *chandef)
+{
+	struct ieee80211_channel *chan = chandef->chan;
+	bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+	enum nl80211_band band = chan->band;
+	u8 channel;
+
+	u32 ext_cca_chan[4] = {
+		[0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+		[1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+		[2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+		[3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+	};
+	int ch_group_index;
+	u8 bw, bw_index;
+	int freq, freq1;
+	int ret;
+
+	dev->cal.channel_cal_done = false;
+	freq = chandef->chan->center_freq;
+	freq1 = chandef->center_freq1;
+	channel = chan->hw_value;
+
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_40:
+		bw = 1;
+		if (freq1 > freq) {
+			bw_index = 1;
+			ch_group_index = 0;
+		} else {
+			bw_index = 3;
+			ch_group_index = 1;
+		}
+		channel += 2 - ch_group_index * 4;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		ch_group_index = (freq - freq1 + 30) / 20;
+		if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+			ch_group_index = 0;
+		bw = 2;
+		bw_index = ch_group_index;
+		channel += 6 - ch_group_index * 4;
+		break;
+	default:
+		bw = 0;
+		bw_index = 0;
+		ch_group_index = 0;
+		break;
+	}
+
+	mt76x2_read_rx_gain(dev);
+	mt76x2_phy_set_txpower_regs(dev, band);
+	mt76x2_configure_tx_delay(dev, band, bw);
+	mt76x2_phy_set_txpower(dev);
+
+	mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
+	mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
+
+	mt76_rmw(dev, MT_EXT_CCA_CFG,
+		 (MT_EXT_CCA_CFG_CCA0 |
+		  MT_EXT_CCA_CFG_CCA1 |
+		  MT_EXT_CCA_CFG_CCA2 |
+		  MT_EXT_CCA_CFG_CCA3 |
+		  MT_EXT_CCA_CFG_CCA_MASK),
+		 ext_cca_chan[ch_group_index]);
+
+	ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
+	if (ret)
+		return ret;
+
+	mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+	mt76x2_phy_set_antenna(dev);
+
+	/* Enable LDPC Rx */
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+		mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+	if (!dev->cal.init_cal_done) {
+		u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+		if (val != 0xff)
+			mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0);
+	}
+
+	mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+	/* Rx LPF calibration */
+	if (!dev->cal.init_cal_done)
+		mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0);
+
+	dev->cal.init_cal_done = true;
+
+	mt76_wr(dev, MT_BBP(AGC, 61), 0xFF64A4E2);
+	mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+	mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+	mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+	mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F);
+
+	if (scan)
+		return 0;
+
+	mt76x2_phy_channel_calibrate(dev, true);
+	mt76x02_init_agc_gain(dev);
+
+	/* init default values for temp compensation */
+	if (mt76x2_tssi_enabled(dev)) {
+		mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+			       0x38);
+		mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+			       0x38);
+	}
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+
+	return 0;
+}
+
+static void
+mt76x2_phy_temp_compensate(struct mt76x02_dev *dev)
+{
+	struct mt76x2_temp_comp t;
+	int temp, db_diff;
+
+	if (mt76x2_get_temp_comp(dev, &t))
+		return;
+
+	temp = mt76_get_field(dev, MT_TEMP_SENSOR, MT_TEMP_SENSOR_VAL);
+	temp -= t.temp_25_ref;
+	temp = (temp * 1789) / 1000 + 25;
+	dev->cal.temp = temp;
+
+	if (temp > 25)
+		db_diff = (temp - 25) / t.high_slope;
+	else
+		db_diff = (25 - temp) / t.low_slope;
+
+	db_diff = min(db_diff, t.upper_bound);
+	db_diff = max(db_diff, t.lower_bound);
+
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+		       db_diff * 2);
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+		       db_diff * 2);
+}
+
+void mt76x2_phy_calibrate(struct work_struct *work)
+{
+	struct mt76x02_dev *dev;
+
+	dev = container_of(work, struct mt76x02_dev, cal_work.work);
+
+	mutex_lock(&dev->mt76.mutex);
+
+	mt76x2_phy_channel_calibrate(dev, false);
+	mt76x2_phy_tssi_compensate(dev);
+	mt76x2_phy_temp_compensate(dev);
+	mt76x2_phy_update_channel_gain(dev);
+
+	mutex_unlock(&dev->mt76.mutex);
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2_phy_start(struct mt76x02_dev *dev)
+{
+	int ret;
+
+	ret = mt76x02_mcu_set_radio_state(dev, true);
+	if (ret)
+		return ret;
+
+	mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+
+	return ret;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
new file mode 100644
index 0000000..edbab4f
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x2.h"
+#include "eeprom.h"
+#include "mcu.h"
+#include "../mt76x02_phy.h"
+
+static void
+mt76x2_adjust_high_lna_gain(struct mt76x02_dev *dev, int reg, s8 offset)
+{
+	s8 gain;
+
+	gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN,
+			 mt76_rr(dev, MT_BBP(AGC, reg)));
+	gain -= offset / 2;
+	mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
+}
+
+static void
+mt76x2_adjust_agc_gain(struct mt76x02_dev *dev, int reg, s8 offset)
+{
+	s8 gain;
+
+	gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+	gain += offset;
+	mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
+}
+
+void mt76x2_apply_gain_adj(struct mt76x02_dev *dev)
+{
+	s8 *gain_adj = dev->cal.rx.high_gain;
+
+	mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
+	mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
+
+	mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
+	mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
+}
+EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
+
+void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
+				 enum nl80211_band band)
+{
+	u32 pa_mode[2];
+	u32 pa_mode_adj;
+
+	if (band == NL80211_BAND_2GHZ) {
+		pa_mode[0] = 0x010055ff;
+		pa_mode[1] = 0x00550055;
+
+		mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
+		mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
+
+		if (mt76x02_ext_pa_enabled(dev, band)) {
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
+		} else {
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
+		}
+	} else {
+		pa_mode[0] = 0x0000ffff;
+		pa_mode[1] = 0x00ff00ff;
+
+		if (mt76x02_ext_pa_enabled(dev, band)) {
+			mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
+			mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
+		} else {
+			mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
+			mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
+		}
+
+		if (mt76x02_ext_pa_enabled(dev, band))
+			pa_mode_adj = 0x04000000;
+		else
+			pa_mode_adj = 0;
+
+		mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
+		mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
+	}
+
+	mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
+	mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
+	mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
+	mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
+
+	if (mt76x02_ext_pa_enabled(dev, band)) {
+		u32 val;
+
+		if (band == NL80211_BAND_2GHZ)
+			val = 0x3c3c023c;
+		else
+			val = 0x363c023c;
+
+		mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+		mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+		mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
+	} else {
+		if (band == NL80211_BAND_2GHZ) {
+			u32 val = 0x0f3c3c3c;
+
+			mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+			mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+			mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
+		} else {
+			mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
+			mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
+			mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
+
+static int
+mt76x2_get_min_rate_power(struct mt76_rate_power *r)
+{
+	int i;
+	s8 ret = 0;
+
+	for (i = 0; i < sizeof(r->all); i++) {
+		if (!r->all[i])
+			continue;
+
+		if (ret)
+			ret = min(ret, r->all[i]);
+		else
+			ret = r->all[i];
+	}
+
+	return ret;
+}
+
+void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
+{
+	enum nl80211_chan_width width = dev->mt76.chandef.width;
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	struct mt76x2_tx_power_info txp;
+	int txp_0, txp_1, delta = 0;
+	struct mt76_rate_power t = {};
+	int base_power, gain;
+
+	mt76x2_get_power_info(dev, &txp, chan);
+
+	if (width == NL80211_CHAN_WIDTH_40)
+		delta = txp.delta_bw40;
+	else if (width == NL80211_CHAN_WIDTH_80)
+		delta = txp.delta_bw80;
+
+	mt76x2_get_rate_power(dev, &t, chan);
+	mt76x02_add_rate_power_offset(&t, txp.target_power + delta);
+	mt76x02_limit_rate_power(&t, dev->mt76.txpower_conf);
+	dev->mt76.txpower_cur = mt76x02_get_max_rate_power(&t);
+
+	base_power = mt76x2_get_min_rate_power(&t);
+	delta = base_power - txp.target_power;
+	txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
+	txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
+
+	gain = min(txp_0, txp_1);
+	if (gain < 0) {
+		base_power -= gain;
+		txp_0 -= gain;
+		txp_1 -= gain;
+	} else if (gain > 0x2f) {
+		base_power -= gain - 0x2f;
+		txp_0 = 0x2f;
+		txp_1 = 0x2f;
+	}
+
+	mt76x02_add_rate_power_offset(&t, -base_power);
+	dev->target_power = txp.target_power;
+	dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
+	dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
+	dev->mt76.rate_power = t;
+
+	mt76x02_phy_set_txpower(dev, txp_0, txp_1);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
+
+void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
+			       enum nl80211_band band, u8 bw)
+{
+	u32 cfg0, cfg1;
+
+	if (mt76x02_ext_pa_enabled(dev, band)) {
+		cfg0 = bw ? 0x000b0c01 : 0x00101101;
+		cfg1 = 0x00011414;
+	} else {
+		cfg0 = bw ? 0x000b0b01 : 0x00101001;
+		cfg1 = 0x00021414;
+	}
+	mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
+	mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
+
+	mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
+}
+EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
+
+void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	struct mt76x2_tx_power_info txp;
+	struct mt76x2_tssi_comp t = {};
+
+	if (!dev->cal.tssi_cal_done)
+		return;
+
+	if (!dev->cal.tssi_comp_pending) {
+		/* TSSI trigger */
+		t.cal_mode = BIT(0);
+		mt76x2_mcu_tssi_comp(dev, &t);
+		dev->cal.tssi_comp_pending = true;
+	} else {
+		if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+			return;
+
+		dev->cal.tssi_comp_pending = false;
+		mt76x2_get_power_info(dev, &txp, chan);
+
+		if (mt76x02_ext_pa_enabled(dev, chan->band))
+			t.pa_mode = 1;
+
+		t.cal_mode = BIT(1);
+		t.slope0 = txp.chain[0].tssi_slope;
+		t.offset0 = txp.chain[0].tssi_offset;
+		t.slope1 = txp.chain[1].tssi_slope;
+		t.offset1 = txp.chain[1].tssi_offset;
+		mt76x2_mcu_tssi_comp(dev, &t);
+
+		if (t.pa_mode || dev->cal.dpd_cal_done || dev->ed_tx_blocked)
+			return;
+
+		usleep_range(10000, 20000);
+		mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
+		dev->cal.dpd_cal_done = true;
+	}
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_tssi_compensate);
+
+static void
+mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
+{
+	u32 val;
+	u8 gain_val[2];
+
+	gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+	gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
+
+	val = 0x1836 << 16;
+	if (!mt76x2_has_ext_lna(dev) &&
+	    dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+		val = 0x1e42 << 16;
+
+	if (mt76x2_has_ext_lna(dev) &&
+	    dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
+	    dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
+		val = 0x0f36 << 16;
+
+	val |= 0xf8;
+
+	mt76_wr(dev, MT_BBP(AGC, 8),
+		val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0]));
+	mt76_wr(dev, MT_BBP(AGC, 9),
+		val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
+
+	if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
+		mt76x02_phy_dfs_adjust_agc(dev);
+}
+
+void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
+{
+	u8 *gain = dev->cal.agc_gain_init;
+	u8 low_gain_delta, gain_delta;
+	u32 agc_35, agc_37;
+	bool gain_change;
+	int low_gain;
+	u32 val;
+
+	dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76);
+	if (!dev->cal.avg_rssi_all)
+		dev->cal.avg_rssi_all = -75;
+
+	low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
+		(dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
+
+	gain_change = dev->cal.low_gain < 0 ||
+		      (dev->cal.low_gain & 2) ^ (low_gain & 2);
+	dev->cal.low_gain = low_gain;
+
+	if (!gain_change) {
+		if (mt76x02_phy_adjust_vga_gain(dev))
+			mt76x2_phy_set_gain_val(dev);
+		return;
+	}
+
+	if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) {
+		mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
+		val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
+		if (low_gain == 2)
+			val |= 0x3;
+		else
+			val |= 0x5;
+		mt76_wr(dev, MT_BBP(AGC, 26), val);
+	} else {
+		mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
+	}
+
+	if (mt76x2_has_ext_lna(dev))
+		low_gain_delta = 10;
+	else
+		low_gain_delta = 14;
+
+	agc_37 = 0x2121262c;
+	if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+		agc_35 = 0x11111516;
+	else if (low_gain == 2)
+		agc_35 = agc_37 = 0x08080808;
+	else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+		agc_35 = 0x10101014;
+	else
+		agc_35 = 0x11111116;
+
+	if (low_gain == 2) {
+		mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
+		mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
+		mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
+		gain_delta = low_gain_delta;
+		dev->cal.agc_gain_adjust = 0;
+	} else {
+		mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
+		gain_delta = 0;
+		dev->cal.agc_gain_adjust = low_gain_delta;
+	}
+
+	mt76_wr(dev, MT_BBP(AGC, 35), agc_35);
+	mt76_wr(dev, MT_BBP(AGC, 37), agc_37);
+
+	dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
+	dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
+	mt76x2_phy_set_gain_val(dev);
+
+	/* clear false CCA counters */
+	mt76_rr(dev, MT_RX_STAT_1);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_update_channel_gain);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
new file mode 100644
index 0000000..96a2b7b
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "../mt76x02_usb.h"
+#include "mt76x2u.h"
+
+static const struct usb_device_id mt76x2u_device_table[] = {
+	{ USB_DEVICE(0x0b05, 0x1833) },	/* Asus USB-AC54 */
+	{ USB_DEVICE(0x0b05, 0x17eb) },	/* Asus USB-AC55 */
+	{ USB_DEVICE(0x0b05, 0x180b) },	/* Asus USB-N53 B1 */
+	{ USB_DEVICE(0x0e8d, 0x7612) },	/* Aukey USBAC1200 - Alfa AWUS036ACM */
+	{ USB_DEVICE(0x057c, 0x8503) },	/* Avm FRITZ!WLAN AC860 */
+	{ USB_DEVICE(0x7392, 0xb711) },	/* Edimax EW 7722 UAC */
+	{ USB_DEVICE(0x0846, 0x9053) },	/* Netgear A6210 */
+	{ USB_DEVICE(0x045e, 0x02e6) },	/* XBox One Wireless Adapter */
+	{ USB_DEVICE(0x045e, 0x02fe) },	/* XBox One Wireless Adapter */
+	{ },
+};
+
+static int mt76x2u_probe(struct usb_interface *intf,
+			 const struct usb_device_id *id)
+{
+	static const struct mt76_driver_ops drv_ops = {
+		.update_survey = mt76x02_update_channel,
+		.tx_prepare_skb = mt76x02u_tx_prepare_skb,
+		.tx_complete_skb = mt76x02u_tx_complete_skb,
+		.tx_status_data = mt76x02_tx_status_data,
+		.rx_skb = mt76x02_queue_rx_skb,
+		.sta_ps = mt76x02_sta_ps,
+		.sta_add = mt76x02_sta_add,
+		.sta_remove = mt76x02_sta_remove,
+	};
+	struct usb_device *udev = interface_to_usbdev(intf);
+	struct mt76x02_dev *dev;
+	struct mt76_dev *mdev;
+	int err;
+
+	mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops,
+				 &drv_ops);
+	if (!mdev)
+		return -ENOMEM;
+
+	dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+	udev = usb_get_dev(udev);
+	usb_reset_device(udev);
+
+	usb_set_intfdata(intf, dev);
+
+	mt76x02u_init_mcu(mdev);
+	err = mt76u_init(mdev, intf);
+	if (err < 0)
+		goto err;
+
+	mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
+	dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+	if (!is_mt76x2(dev)) {
+		err = -ENODEV;
+		goto err;
+	}
+
+	err = mt76x2u_register_device(dev);
+	if (err < 0)
+		goto err;
+
+	return 0;
+
+err:
+	ieee80211_free_hw(mt76_hw(dev));
+	usb_set_intfdata(intf, NULL);
+	usb_put_dev(udev);
+
+	return err;
+}
+
+static void mt76x2u_disconnect(struct usb_interface *intf)
+{
+	struct usb_device *udev = interface_to_usbdev(intf);
+	struct mt76x02_dev *dev = usb_get_intfdata(intf);
+	struct ieee80211_hw *hw = mt76_hw(dev);
+
+	set_bit(MT76_REMOVED, &dev->mt76.state);
+	ieee80211_unregister_hw(hw);
+	mt76x2u_cleanup(dev);
+
+	ieee80211_free_hw(hw);
+	usb_set_intfdata(intf, NULL);
+	usb_put_dev(udev);
+}
+
+static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
+					  pm_message_t state)
+{
+	struct mt76x02_dev *dev = usb_get_intfdata(intf);
+
+	mt76u_stop_rx(&dev->mt76);
+
+	return 0;
+}
+
+static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
+{
+	struct mt76x02_dev *dev = usb_get_intfdata(intf);
+	int err;
+
+	err = mt76u_resume_rx(&dev->mt76);
+	if (err < 0)
+		goto err;
+
+	err = mt76x2u_init_hardware(dev);
+	if (err < 0)
+		goto err;
+
+	return 0;
+
+err:
+	mt76x2u_cleanup(dev);
+	return err;
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
+MODULE_FIRMWARE(MT7662_FIRMWARE);
+MODULE_FIRMWARE(MT7662_ROM_PATCH);
+
+static struct usb_driver mt76x2u_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= mt76x2u_device_table,
+	.probe		= mt76x2u_probe,
+	.disconnect	= mt76x2u_disconnect,
+#ifdef CONFIG_PM
+	.suspend	= mt76x2u_suspend,
+	.resume		= mt76x2u_resume,
+	.reset_resume	= mt76x2u_resume,
+#endif /* CONFIG_PM */
+	.soft_unbind	= 1,
+	.disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x2u_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
new file mode 100644
index 0000000..e305b37
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/delay.h>
+
+#include "mt76x2u.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+#include "../mt76x02_usb.h"
+
+static void mt76x2u_init_dma(struct mt76x02_dev *dev)
+{
+	u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+
+	val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD |
+	       MT_USB_DMA_CFG_RX_BULK_EN |
+	       MT_USB_DMA_CFG_TX_BULK_EN;
+
+	/* disable AGGR_BULK_RX in order to receive one
+	 * frame in each rx urb and avoid copies
+	 */
+	val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+	mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+}
+
+static void mt76x2u_power_on_rf_patch(struct mt76x02_dev *dev)
+{
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
+	udelay(1);
+
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff);
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30);
+
+	mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f);
+	udelay(1);
+
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17));
+	usleep_range(150, 200);
+
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16));
+	usleep_range(50, 100);
+
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
+}
+
+static void mt76x2u_power_on_rf(struct mt76x02_dev *dev, int unit)
+{
+	int shift = unit ? 8 : 0;
+	u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
+
+	/* Enable RF BG */
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift);
+	usleep_range(10, 20);
+
+	/* Enable RFDIG LDO/AFE/ABB/ADDA */
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val);
+	usleep_range(10, 20);
+
+	/* Switch RFDIG power to internal LDO */
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift);
+	usleep_range(10, 20);
+
+	mt76x2u_power_on_rf_patch(dev);
+
+	mt76_set(dev, 0x530, 0xf);
+}
+
+static void mt76x2u_power_on(struct mt76x02_dev *dev)
+{
+	u32 val;
+
+	/* Turn on WL MTCMOS */
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x148),
+		 MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+	val = MT_WLAN_MTC_CTRL_STATE_UP |
+	      MT_WLAN_MTC_CTRL_PWR_ACK |
+	      MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+	mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000);
+
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16);
+	usleep_range(10, 20);
+
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+	usleep_range(10, 20);
+
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff);
+
+	/* Turn on AD/DA power down */
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3));
+
+	/* WLAN function enable */
+	mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0));
+
+	/* Release BBP software reset */
+	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18));
+
+	mt76x2u_power_on_rf(dev, 0);
+	mt76x2u_power_on_rf(dev, 1);
+}
+
+static int mt76x2u_init_eeprom(struct mt76x02_dev *dev)
+{
+	u32 val, i;
+
+	dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev,
+					     MT7612U_EEPROM_SIZE,
+					     GFP_KERNEL);
+	dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE;
+	if (!dev->mt76.eeprom.data)
+		return -ENOMEM;
+
+	for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) {
+		val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i));
+		put_unaligned_le32(val, dev->mt76.eeprom.data + i);
+	}
+
+	mt76x02_eeprom_parse_hw_cap(dev);
+	return 0;
+}
+
+int mt76x2u_init_hardware(struct mt76x02_dev *dev)
+{
+	int i, k, err;
+
+	mt76x2_reset_wlan(dev, true);
+	mt76x2u_power_on(dev);
+
+	if (!mt76x02_wait_for_mac(&dev->mt76))
+		return -ETIMEDOUT;
+
+	err = mt76x2u_mcu_fw_init(dev);
+	if (err < 0)
+		return err;
+
+	if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+			    MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+			    MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
+		return -EIO;
+
+	/* wait for asic ready after fw load. */
+	if (!mt76x02_wait_for_mac(&dev->mt76))
+		return -ETIMEDOUT;
+
+	mt76x2u_init_dma(dev);
+
+	err = mt76x2u_mcu_init(dev);
+	if (err < 0)
+		return err;
+
+	err = mt76x2u_mac_reset(dev);
+	if (err < 0)
+		return err;
+
+	mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+	dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+	if (!mt76x02_wait_for_txrx_idle(&dev->mt76))
+		return -ETIMEDOUT;
+
+	/* reset wcid table */
+	for (i = 0; i < 256; i++)
+		mt76x02_mac_wcid_setup(dev, i, 0, NULL);
+
+	/* reset shared key table and pairwise key table */
+	for (i = 0; i < 16; i++) {
+		for (k = 0; k < 4; k++)
+			mt76x02_mac_shared_key_setup(dev, i, k, NULL);
+	}
+
+	mt76x02u_init_beacon_config(dev);
+
+	mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+	mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
+
+	err = mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+	if (err < 0)
+		return err;
+
+	mt76x02_phy_set_rxpath(dev);
+	mt76x02_phy_set_txdac(dev);
+
+	mt76_wr(dev, MT_CH_TIME_CFG,
+		MT_CH_TIME_CFG_TIMER_EN |
+		MT_CH_TIME_CFG_TX_AS_BUSY |
+		MT_CH_TIME_CFG_RX_AS_BUSY |
+		MT_CH_TIME_CFG_NAV_AS_BUSY |
+		MT_CH_TIME_CFG_EIFS_AS_BUSY);
+
+	return mt76x2u_mac_stop(dev);
+}
+
+int mt76x2u_register_device(struct mt76x02_dev *dev)
+{
+	struct ieee80211_hw *hw = mt76_hw(dev);
+	int err;
+
+	INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
+	mt76x02_init_device(dev);
+
+	err = mt76x2u_init_eeprom(dev);
+	if (err < 0)
+		return err;
+
+	err = mt76u_alloc_queues(&dev->mt76);
+	if (err < 0)
+		goto fail;
+
+	err = mt76x2u_init_hardware(dev);
+	if (err < 0)
+		goto fail;
+
+	err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
+				   ARRAY_SIZE(mt76x02_rates));
+	if (err)
+		goto fail;
+
+	/* check hw sg support in order to enable AMSDU */
+	if (dev->mt76.usb.sg_en)
+		hw->max_tx_fragments = MT_TX_SG_MAX_SIZE;
+	else
+		hw->max_tx_fragments = 1;
+
+	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+	mt76x02_init_debugfs(dev);
+	mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
+	mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
+	return 0;
+
+fail:
+	mt76x2u_cleanup(dev);
+	return err;
+}
+
+void mt76x2u_stop_hw(struct mt76x02_dev *dev)
+{
+	cancel_delayed_work_sync(&dev->cal_work);
+	cancel_delayed_work_sync(&dev->mt76.mac_work);
+	mt76x2u_mac_stop(dev);
+}
+
+void mt76x2u_cleanup(struct mt76x02_dev *dev)
+{
+	mt76x02_mcu_set_radio_state(dev, false);
+	mt76x2u_stop_hw(dev);
+	mt76u_queues_deinit(&dev->mt76);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
new file mode 100644
index 0000000..e7fea3a
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x2u.h"
+#include "eeprom.h"
+
+static void mt76x2u_mac_reset_counters(struct mt76x02_dev *dev)
+{
+	mt76_rr(dev, MT_RX_STAT_0);
+	mt76_rr(dev, MT_RX_STAT_1);
+	mt76_rr(dev, MT_RX_STAT_2);
+	mt76_rr(dev, MT_TX_STA_0);
+	mt76_rr(dev, MT_TX_STA_1);
+	mt76_rr(dev, MT_TX_STA_2);
+}
+
+static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
+{
+	s8 offset = 0;
+	u16 eep_val;
+
+	eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+	offset = eep_val & 0x7f;
+	if ((eep_val & 0xff) == 0xff)
+		offset = 0;
+	else if (eep_val & 0x80)
+		offset = 0 - offset;
+
+	eep_val >>= 8;
+	if (eep_val == 0x00 || eep_val == 0xff) {
+		eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+		eep_val &= 0xff;
+
+		if (eep_val == 0x00 || eep_val == 0xff)
+			eep_val = 0x14;
+	}
+
+	eep_val &= 0x7f;
+	mt76_rmw_field(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL5),
+		       MT_XO_CTRL5_C2_VAL, eep_val + offset);
+	mt76_set(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL6), MT_XO_CTRL6_C2_CTRL);
+
+	mt76_wr(dev, 0x504, 0x06000000);
+	mt76_wr(dev, 0x50c, 0x08800000);
+	mdelay(5);
+	mt76_wr(dev, 0x504, 0x0);
+
+	/* decrease SIFS from 16us to 13us */
+	mt76_rmw_field(dev, MT_XIFS_TIME_CFG,
+		       MT_XIFS_TIME_CFG_OFDM_SIFS, 0xd);
+	mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, MT_BKOFF_SLOT_CFG_CC_DELAY, 1);
+
+	/* init fce */
+	mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+	eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
+	switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+	case 0:
+		mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+		break;
+	case 1:
+		mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+		break;
+	default:
+		break;
+	}
+}
+
+int mt76x2u_mac_reset(struct mt76x02_dev *dev)
+{
+	mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5));
+
+	/* init pbf regs */
+	mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+	mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+
+	mt76_write_mac_initvals(dev);
+
+	mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
+	mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
+	mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
+
+	mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
+	mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
+	mt76_wr(dev, MT_WMM_CWMAX, 0x34aa);
+
+	mt76_clear(dev, MT_MAC_SYS_CTRL,
+		   MT_MAC_SYS_CTRL_RESET_CSR |
+		   MT_MAC_SYS_CTRL_RESET_BBP);
+
+	if (is_mt7612(dev))
+		mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+	mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
+	mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+	mt76x2u_mac_fixup_xtal(dev);
+
+	return 0;
+}
+
+int mt76x2u_mac_start(struct mt76x02_dev *dev)
+{
+	mt76x2u_mac_reset_counters(dev);
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+	mt76x02_wait_for_wpdma(&dev->mt76, 1000);
+	usleep_range(50, 100);
+
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL,
+		MT_MAC_SYS_CTRL_ENABLE_TX |
+		MT_MAC_SYS_CTRL_ENABLE_RX);
+
+	return 0;
+}
+
+int mt76x2u_mac_stop(struct mt76x02_dev *dev)
+{
+	int i, count = 0, val;
+	bool stopped = false;
+	u32 rts_cfg;
+
+	if (test_bit(MT76_REMOVED, &dev->mt76.state))
+		return -EIO;
+
+	rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+	mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+	mt76_clear(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
+
+	/* wait tx dma to stop */
+	for (i = 0; i < 2000; i++) {
+		val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+		if (!(val & MT_USB_DMA_CFG_TX_BUSY) && i > 10)
+			break;
+		usleep_range(50, 100);
+	}
+
+	/* page count on TxQ */
+	for (i = 0; i < 200; i++) {
+		if (!(mt76_rr(dev, 0x0438) & 0xffffffff) &&
+		    !(mt76_rr(dev, 0x0a30) & 0x000000ff) &&
+		    !(mt76_rr(dev, 0x0a34) & 0xff00ff00))
+			break;
+		usleep_range(10, 20);
+	}
+
+	/* disable tx-rx */
+	mt76_clear(dev, MT_MAC_SYS_CTRL,
+		   MT_MAC_SYS_CTRL_ENABLE_RX |
+		   MT_MAC_SYS_CTRL_ENABLE_TX);
+
+	/* Wait for MAC to become idle */
+	for (i = 0; i < 1000; i++) {
+		if (!(mt76_rr(dev, MT_MAC_STATUS) & MT_MAC_STATUS_TX) &&
+		    !mt76_rr(dev, MT_BBP(IBI, 12))) {
+			stopped = true;
+			break;
+		}
+		usleep_range(10, 20);
+	}
+
+	if (!stopped) {
+		mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+		mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+		mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+		mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+	}
+
+	/* page count on RxQ */
+	for (i = 0; i < 200; i++) {
+		if (!(mt76_rr(dev, 0x0430) & 0x00ff0000) &&
+		    !(mt76_rr(dev, 0x0a30) & 0xffffffff) &&
+		    !(mt76_rr(dev, 0x0a34) & 0xffffffff) &&
+		    ++count > 10)
+			break;
+		msleep(50);
+	}
+
+	if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 2000))
+		dev_warn(dev->mt76.dev, "MAC RX failed to stop\n");
+
+	/* wait rx dma to stop */
+	for (i = 0; i < 2000; i++) {
+		val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+		if (!(val & MT_USB_DMA_CFG_RX_BUSY) && i > 10)
+			break;
+		usleep_range(50, 100);
+	}
+
+	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
new file mode 100644
index 0000000..eb73cb8
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x2u.h"
+
+static int mt76x2u_start(struct ieee80211_hw *hw)
+{
+	struct mt76x02_dev *dev = hw->priv;
+	int ret;
+
+	ret = mt76x2u_mac_start(dev);
+	if (ret)
+		return ret;
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+				     MT_MAC_WORK_INTERVAL);
+	set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+	return 0;
+}
+
+static void mt76x2u_stop(struct ieee80211_hw *hw)
+{
+	struct mt76x02_dev *dev = hw->priv;
+
+	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+	mt76u_stop_tx(&dev->mt76);
+	mt76x2u_stop_hw(dev);
+}
+
+static int
+mt76x2u_set_channel(struct mt76x02_dev *dev,
+		    struct cfg80211_chan_def *chandef)
+{
+	int err;
+
+	cancel_delayed_work_sync(&dev->cal_work);
+	mt76x02_pre_tbtt_enable(dev, false);
+
+	mutex_lock(&dev->mt76.mutex);
+	set_bit(MT76_RESET, &dev->mt76.state);
+
+	mt76_set_channel(&dev->mt76);
+
+	mt76x2_mac_stop(dev, false);
+
+	err = mt76x2u_phy_set_channel(dev, chandef);
+
+	/* channel cycle counters read-and-clear */
+	mt76_rr(dev, MT_CH_IDLE);
+	mt76_rr(dev, MT_CH_BUSY);
+
+	mt76x2_mac_resume(dev);
+
+	clear_bit(MT76_RESET, &dev->mt76.state);
+	mutex_unlock(&dev->mt76.mutex);
+
+	mt76x02_pre_tbtt_enable(dev, true);
+	mt76_txq_schedule_all(&dev->mt76);
+
+	return err;
+}
+
+static int
+mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct mt76x02_dev *dev = hw->priv;
+	int err = 0;
+
+	mutex_lock(&dev->mt76.mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+		if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+			dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+		else
+			dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+		mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_POWER) {
+		dev->mt76.txpower_conf = hw->conf.power_level * 2;
+
+		/* convert to per-chain power for 2x2 devices */
+		dev->mt76.txpower_conf -= 6;
+
+		if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+			mt76x2_phy_set_txpower(dev);
+	}
+
+	mutex_unlock(&dev->mt76.mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		ieee80211_stop_queues(hw);
+		err = mt76x2u_set_channel(dev, &hw->conf.chandef);
+		ieee80211_wake_queues(hw);
+	}
+
+	return err;
+}
+
+const struct ieee80211_ops mt76x2u_ops = {
+	.tx = mt76x02_tx,
+	.start = mt76x2u_start,
+	.stop = mt76x2u_stop,
+	.add_interface = mt76x02_add_interface,
+	.remove_interface = mt76x02_remove_interface,
+	.sta_state = mt76_sta_state,
+	.set_key = mt76x02_set_key,
+	.ampdu_action = mt76x02_ampdu_action,
+	.config = mt76x2u_config,
+	.wake_tx_queue = mt76_wake_tx_queue,
+	.bss_info_changed = mt76x02_bss_info_changed,
+	.configure_filter = mt76x02_configure_filter,
+	.conf_tx = mt76x02_conf_tx,
+	.sw_scan_start = mt76_sw_scan,
+	.sw_scan_complete = mt76x02_sw_scan_complete,
+	.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+	.get_txpower = mt76_get_txpower,
+	.get_survey = mt76_get_survey,
+	.set_tim = mt76_set_tim,
+	.release_buffered_frames = mt76_release_buffered_frames,
+};
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
new file mode 100644
index 0000000..dd22d8a
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/firmware.h>
+
+#include "mt76x2u.h"
+#include "eeprom.h"
+#include "../mt76x02_usb.h"
+
+#define MT_CMD_HDR_LEN			4
+
+#define MCU_FW_URB_MAX_PAYLOAD		0x3900
+#define MCU_ROM_PATCH_MAX_PAYLOAD	2048
+
+#define MT76U_MCU_ILM_OFFSET		0x80000
+#define MT76U_MCU_DLM_OFFSET		0x110000
+#define MT76U_MCU_ROM_PATCH_OFFSET	0x90000
+
+static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev)
+{
+	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+			     USB_DIR_OUT | USB_TYPE_VENDOR,
+			     0x12, 0, NULL, 0);
+}
+
+static void mt76x2u_mcu_enable_patch(struct mt76x02_dev *dev)
+{
+	struct mt76_usb *usb = &dev->mt76.usb;
+	static const u8 data[] = {
+		0x6f, 0xfc, 0x08, 0x01,
+		0x20, 0x04, 0x00, 0x00,
+		0x00, 0x09, 0x00,
+	};
+
+	memcpy(usb->data, data, sizeof(data));
+	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+			     USB_DIR_OUT | USB_TYPE_CLASS,
+			     0x12, 0, usb->data, sizeof(data));
+}
+
+static void mt76x2u_mcu_reset_wmt(struct mt76x02_dev *dev)
+{
+	struct mt76_usb *usb = &dev->mt76.usb;
+	u8 data[] = {
+		0x6f, 0xfc, 0x05, 0x01,
+		0x07, 0x01, 0x00, 0x04
+	};
+
+	memcpy(usb->data, data, sizeof(data));
+	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+			     USB_DIR_OUT | USB_TYPE_CLASS,
+			     0x12, 0, usb->data, sizeof(data));
+}
+
+static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev)
+{
+	bool rom_protect = !is_mt7612(dev);
+	struct mt76x02_patch_header *hdr;
+	u32 val, patch_mask, patch_reg;
+	const struct firmware *fw;
+	int err;
+
+	if (rom_protect &&
+	    !mt76_poll_msec(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+		dev_err(dev->mt76.dev,
+			"could not get hardware semaphore for ROM PATCH\n");
+		return -ETIMEDOUT;
+	}
+
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+		patch_mask = BIT(0);
+		patch_reg = MT_MCU_CLOCK_CTL;
+	} else {
+		patch_mask = BIT(1);
+		patch_reg = MT_MCU_COM_REG0;
+	}
+
+	if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+		dev_info(dev->mt76.dev, "ROM patch already applied\n");
+		return 0;
+	}
+
+	err = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
+	if (err < 0)
+		return err;
+
+	if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+		dev_err(dev->mt76.dev, "failed to load firmware\n");
+		err = -EIO;
+		goto out;
+	}
+
+	hdr = (struct mt76x02_patch_header *)fw->data;
+	dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+	/* enable USB_DMA_CFG */
+	val = MT_USB_DMA_CFG_RX_BULK_EN |
+	      MT_USB_DMA_CFG_TX_BULK_EN |
+	      FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+	mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+
+	/* vendor reset */
+	mt76x02u_mcu_fw_reset(dev);
+	usleep_range(5000, 10000);
+
+	/* enable FCE to send in-band cmd */
+	mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+	/* FCE tx_fs_base_ptr */
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+	/* FCE tx_fs_max_cnt */
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+	/* FCE pdma enable */
+	mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+	/* FCE skip_fs_en */
+	mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+	err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
+					fw->size - sizeof(*hdr),
+					MCU_ROM_PATCH_MAX_PAYLOAD,
+					MT76U_MCU_ROM_PATCH_OFFSET);
+	if (err < 0) {
+		err = -EIO;
+		goto out;
+	}
+
+	mt76x2u_mcu_enable_patch(dev);
+	mt76x2u_mcu_reset_wmt(dev);
+	mdelay(20);
+
+	if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 100)) {
+		dev_err(dev->mt76.dev, "failed to load ROM patch\n");
+		err = -ETIMEDOUT;
+	}
+
+out:
+	if (rom_protect)
+		mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+	release_firmware(fw);
+	return err;
+}
+
+static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
+{
+	u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET;
+	const struct mt76x02_fw_header *hdr;
+	int err, len, ilm_len, dlm_len;
+	const struct firmware *fw;
+
+	err = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
+	if (err < 0)
+		return err;
+
+	if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	hdr = (const struct mt76x02_fw_header *)fw->data;
+	ilm_len = le32_to_cpu(hdr->ilm_len);
+	dlm_len = le32_to_cpu(hdr->dlm_len);
+	len = sizeof(*hdr) + ilm_len + dlm_len;
+	if (fw->size != len) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	val = le16_to_cpu(hdr->fw_ver);
+	dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+		 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+	val = le16_to_cpu(hdr->build_ver);
+	dev_info(dev->mt76.dev, "Build: %x\n", val);
+	dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+	/* vendor reset */
+	mt76x02u_mcu_fw_reset(dev);
+	usleep_range(5000, 10000);
+
+	/* enable USB_DMA_CFG */
+	val = MT_USB_DMA_CFG_RX_BULK_EN |
+	      MT_USB_DMA_CFG_TX_BULK_EN |
+	      FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+	mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+	/* enable FCE to send in-band cmd */
+	mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+	/* FCE tx_fs_base_ptr */
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+	/* FCE tx_fs_max_cnt */
+	mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+	/* FCE pdma enable */
+	mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+	/* FCE skip_fs_en */
+	mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+	/* load ILM */
+	err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
+					ilm_len, MCU_FW_URB_MAX_PAYLOAD,
+					MT76U_MCU_ILM_OFFSET);
+	if (err < 0) {
+		err = -EIO;
+		goto out;
+	}
+
+	/* load DLM */
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+		dlm_offset += 0x800;
+	err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr) + ilm_len,
+					dlm_len, MCU_FW_URB_MAX_PAYLOAD,
+					dlm_offset);
+	if (err < 0) {
+		err = -EIO;
+		goto out;
+	}
+
+	mt76x2u_mcu_load_ivb(dev);
+	if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 100)) {
+		dev_err(dev->mt76.dev, "firmware failed to start\n");
+		err = -ETIMEDOUT;
+		goto out;
+	}
+
+	mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
+	/* enable FCE to send in-band cmd */
+	mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+	mt76x02_set_ethtool_fwver(dev, hdr);
+	dev_dbg(dev->mt76.dev, "firmware running\n");
+
+out:
+	release_firmware(fw);
+	return err;
+}
+
+int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev)
+{
+	int err;
+
+	err = mt76x2u_mcu_load_rom_patch(dev);
+	if (err < 0)
+		return err;
+
+	return mt76x2u_mcu_load_firmware(dev);
+}
+
+int mt76x2u_mcu_init(struct mt76x02_dev *dev)
+{
+	int err;
+
+	err = mt76x02_mcu_function_select(dev, Q_SELECT, 1);
+	if (err < 0)
+		return err;
+
+	return mt76x02_mcu_set_radio_state(dev, true);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
new file mode 100644
index 0000000..b1381f9
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include "mt76x2u.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+
+static void
+mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+	if (dev->cal.channel_cal_done)
+		return;
+
+	if (mt76x2_channel_silent(dev))
+		return;
+
+	if (!mac_stopped)
+		mt76x2u_mac_stop(dev);
+
+	if (is_5ghz)
+		mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+	mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+	mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+	mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+	mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+	mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
+
+	if (!mac_stopped)
+		mt76x2_mac_resume(dev);
+	mt76x2_apply_gain_adj(dev);
+	mt76x02_edcca_init(dev);
+
+	dev->cal.channel_cal_done = true;
+}
+
+void mt76x2u_phy_calibrate(struct work_struct *work)
+{
+	struct mt76x02_dev *dev;
+
+	dev = container_of(work, struct mt76x02_dev, cal_work.work);
+
+	mutex_lock(&dev->mt76.mutex);
+
+	mt76x2u_phy_channel_calibrate(dev, false);
+	mt76x2_phy_tssi_compensate(dev);
+	mt76x2_phy_update_channel_gain(dev);
+
+	mutex_unlock(&dev->mt76.mutex);
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
+			    struct cfg80211_chan_def *chandef)
+{
+	u32 ext_cca_chan[4] = {
+		[0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+		[1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+		[2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+		[3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+	};
+	bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+	struct ieee80211_channel *chan = chandef->chan;
+	u8 channel = chan->hw_value, bw, bw_index;
+	int ch_group_index, freq, freq1, ret;
+
+	dev->cal.channel_cal_done = false;
+	freq = chandef->chan->center_freq;
+	freq1 = chandef->center_freq1;
+
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_40:
+		bw = 1;
+		if (freq1 > freq) {
+			bw_index = 1;
+			ch_group_index = 0;
+		} else {
+			bw_index = 3;
+			ch_group_index = 1;
+		}
+		channel += 2 - ch_group_index * 4;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		ch_group_index = (freq - freq1 + 30) / 20;
+		if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+			ch_group_index = 0;
+		bw = 2;
+		bw_index = ch_group_index;
+		channel += 6 - ch_group_index * 4;
+		break;
+	default:
+		bw = 0;
+		bw_index = 0;
+		ch_group_index = 0;
+		break;
+	}
+
+	mt76x2_read_rx_gain(dev);
+	mt76x2_phy_set_txpower_regs(dev, chan->band);
+	mt76x2_configure_tx_delay(dev, chan->band, bw);
+	mt76x2_phy_set_txpower(dev);
+
+	mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
+	mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
+
+	mt76_rmw(dev, MT_EXT_CCA_CFG,
+		 (MT_EXT_CCA_CFG_CCA0 |
+		  MT_EXT_CCA_CFG_CCA1 |
+		  MT_EXT_CCA_CFG_CCA2 |
+		  MT_EXT_CCA_CFG_CCA3 |
+		  MT_EXT_CCA_CFG_CCA_MASK),
+		 ext_cca_chan[ch_group_index]);
+
+	ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
+	if (ret)
+		return ret;
+
+	mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+	/* Enable LDPC Rx */
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+		mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+	if (!dev->cal.init_cal_done) {
+		u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+		if (val != 0xff)
+			mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0);
+	}
+
+	mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+	/* Rx LPF calibration */
+	if (!dev->cal.init_cal_done)
+		mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0);
+	dev->cal.init_cal_done = true;
+
+	mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
+	mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+	mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+	mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+	mt76_wr(dev, MT_TXOP_CTRL_CFG, 0X04101b3f);
+
+	mt76_set(dev, MT_BBP(TXO, 4), BIT(25));
+	mt76_set(dev, MT_BBP(RXO, 13), BIT(8));
+
+	if (scan)
+		return 0;
+
+	mt76x2u_phy_channel_calibrate(dev, true);
+	mt76x02_init_agc_gain(dev);
+
+	if (mt76x2_tssi_enabled(dev)) {
+		/* init default values for temp compensation */
+		mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+			       0x38);
+		mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+			       0x38);
+
+		/* init tssi calibration */
+		if (!mt76x2_channel_silent(dev)) {
+			struct ieee80211_channel *chan;
+			u32 flag = 0;
+
+			chan = dev->mt76.chandef.chan;
+			if (chan->band == NL80211_BAND_5GHZ)
+				flag |= BIT(0);
+			if (mt76x02_ext_pa_enabled(dev, chan->band))
+				flag |= BIT(8);
+			mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+			dev->cal.tssi_cal_done = true;
+		}
+	}
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/pci.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/pci.c
new file mode 100644
index 0000000..04c5a69
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/pci.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/pci.h>
+
+void mt76_pci_disable_aspm(struct pci_dev *pdev)
+{
+	struct pci_dev *parent = pdev->bus->self;
+	u16 aspm_conf, parent_aspm_conf = 0;
+
+	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm_conf);
+	aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+	if (parent) {
+		pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+					  &parent_aspm_conf);
+		parent_aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+	}
+
+	if (!aspm_conf && (!parent || !parent_aspm_conf)) {
+		/* aspm already disabled */
+		return;
+	}
+
+	dev_info(&pdev->dev, "disabling ASPM %s %s\n",
+		 (aspm_conf & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
+		 (aspm_conf & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
+
+	if (IS_ENABLED(CONFIG_PCIEASPM)) {
+		int err;
+
+		err = pci_disable_link_state(pdev, aspm_conf);
+		if (!err)
+			return;
+	}
+
+	/* both device and parent should have the same ASPM setting.
+	 * disable ASPM in downstream component first and then upstream.
+	 */
+	pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_conf);
+	if (parent)
+		pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+					   aspm_conf);
+}
+EXPORT_SYMBOL_GPL(mt76_pci_disable_aspm);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/trace.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/trace.c
new file mode 100644
index 0000000..ed3df3c
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/trace.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/trace.h
new file mode 100644
index 0000000..0b3e635
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/trace.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#if !defined(__MT76_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76
+
+#define MAXNAME		32
+#define DEV_ENTRY	__array(char, wiphy_name, 32)
+#define DEV_ASSIGN	strlcpy(__entry->wiphy_name,	\
+				wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT	"%s"
+#define DEV_PR_ARG	__entry->wiphy_name
+
+#define REG_ENTRY	__field(u32, reg) __field(u32, val)
+#define REG_ASSIGN	__entry->reg = reg; __entry->val = val
+#define REG_PR_FMT	" %04x=%08x"
+#define REG_PR_ARG	__entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		REG_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		REG_ASSIGN;
+	),
+	TP_printk(
+		DEV_PR_FMT REG_PR_FMT,
+		DEV_PR_ARG, REG_PR_ARG
+	)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_rr,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_wr,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/tx.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/tx.c
new file mode 100644
index 0000000..c22a05f
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/tx.c
@@ -0,0 +1,638 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt76.h"
+
+static struct mt76_txwi_cache *
+mt76_alloc_txwi(struct mt76_dev *dev)
+{
+	struct mt76_txwi_cache *t;
+	dma_addr_t addr;
+	u8 *txwi;
+	int size;
+
+	size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
+	txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+	if (!txwi)
+		return NULL;
+
+	addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
+			      DMA_TO_DEVICE);
+	t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
+	t->dma_addr = addr;
+
+	return t;
+}
+
+static struct mt76_txwi_cache *
+__mt76_get_txwi(struct mt76_dev *dev)
+{
+	struct mt76_txwi_cache *t = NULL;
+
+	spin_lock_bh(&dev->lock);
+	if (!list_empty(&dev->txwi_cache)) {
+		t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
+				     list);
+		list_del(&t->list);
+	}
+	spin_unlock_bh(&dev->lock);
+
+	return t;
+}
+
+struct mt76_txwi_cache *
+mt76_get_txwi(struct mt76_dev *dev)
+{
+	struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
+
+	if (t)
+		return t;
+
+	return mt76_alloc_txwi(dev);
+}
+
+void
+mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+	if (!t)
+		return;
+
+	spin_lock_bh(&dev->lock);
+	list_add(&t->list, &dev->txwi_cache);
+	spin_unlock_bh(&dev->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_put_txwi);
+
+void mt76_tx_free(struct mt76_dev *dev)
+{
+	struct mt76_txwi_cache *t;
+
+	while ((t = __mt76_get_txwi(dev)) != NULL)
+		dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
+				 DMA_TO_DEVICE);
+}
+
+static int
+mt76_txq_get_qid(struct ieee80211_txq *txq)
+{
+	if (!txq->sta)
+		return MT_TXQ_BE;
+
+	return txq->ac;
+}
+
+static void
+mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+	if (!ieee80211_is_data_qos(hdr->frame_control) ||
+	    !ieee80211_is_data_present(hdr->frame_control))
+		return;
+
+	mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
+}
+
+void
+mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
+		   __acquires(&dev->status_list.lock)
+{
+	__skb_queue_head_init(list);
+	spin_lock_bh(&dev->status_list.lock);
+	__acquire(&dev->status_list.lock);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
+
+void
+mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
+		      __releases(&dev->status_list.unlock)
+{
+	struct sk_buff *skb;
+
+	spin_unlock_bh(&dev->status_list.lock);
+	__release(&dev->status_list.unlock);
+
+	while ((skb = __skb_dequeue(list)) != NULL)
+		ieee80211_tx_status(dev->hw, skb);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
+
+static void
+__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
+			  struct sk_buff_head *list)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+	u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
+
+	flags |= cb->flags;
+	cb->flags = flags;
+
+	if ((flags & done) != done)
+		return;
+
+	__skb_unlink(skb, &dev->status_list);
+
+	/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
+	if (flags & MT_TX_CB_TXS_FAILED) {
+		ieee80211_tx_info_clear_status(info);
+		info->status.rates[0].idx = -1;
+		info->flags |= IEEE80211_TX_STAT_ACK;
+	}
+
+	__skb_queue_tail(list, skb);
+}
+
+void
+mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
+			struct sk_buff_head *list)
+{
+	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
+
+int
+mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
+		       struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+	int pid;
+
+	if (!wcid)
+		return MT_PACKET_ID_NO_ACK;
+
+	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+		return MT_PACKET_ID_NO_ACK;
+
+	if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
+			     IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
+		return MT_PACKET_ID_NO_SKB;
+
+	spin_lock_bh(&dev->status_list.lock);
+
+	memset(cb, 0, sizeof(*cb));
+	wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
+	if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
+	    wcid->packet_id == MT_PACKET_ID_NO_SKB)
+		wcid->packet_id = MT_PACKET_ID_FIRST;
+
+	pid = wcid->packet_id;
+	cb->wcid = wcid->idx;
+	cb->pktid = pid;
+	cb->jiffies = jiffies;
+
+	__skb_queue_tail(&dev->status_list, skb);
+	spin_unlock_bh(&dev->status_list.lock);
+
+	return pid;
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
+
+struct sk_buff *
+mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
+		       struct sk_buff_head *list)
+{
+	struct sk_buff *skb, *tmp;
+
+	skb_queue_walk_safe(&dev->status_list, skb, tmp) {
+		struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+
+		if (wcid && cb->wcid != wcid->idx)
+			continue;
+
+		if (cb->pktid == pktid)
+			return skb;
+
+		if (pktid >= 0 && !time_after(jiffies, cb->jiffies +
+					      MT_TX_STATUS_SKB_TIMEOUT))
+			continue;
+
+		__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
+						    MT_TX_CB_TXS_DONE, list);
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
+
+void
+mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
+{
+	struct sk_buff_head list;
+
+	mt76_tx_status_lock(dev, &list);
+	mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
+	mt76_tx_status_unlock(dev, &list);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_check);
+
+void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
+{
+	struct sk_buff_head list;
+
+	if (!skb->prev) {
+		ieee80211_free_txskb(dev->hw, skb);
+		return;
+	}
+
+	mt76_tx_status_lock(dev, &list);
+	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
+	mt76_tx_status_unlock(dev, &list);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
+
+void
+mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+	struct mt76_wcid *wcid, struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct mt76_queue *q;
+	int qid = skb_get_queue_mapping(skb);
+
+	if (WARN_ON(qid >= MT_TXQ_PSD)) {
+		qid = MT_TXQ_BE;
+		skb_set_queue_mapping(skb, qid);
+	}
+
+	if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
+		ieee80211_get_tx_rates(info->control.vif, sta, skb,
+				       info->control.rates, 1);
+
+	if (sta && ieee80211_is_data_qos(hdr->frame_control)) {
+		struct ieee80211_txq *txq;
+		struct mt76_txq *mtxq;
+		u8 tid;
+
+		tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+		txq = sta->txq[tid];
+		mtxq = (struct mt76_txq *)txq->drv_priv;
+
+		if (mtxq->aggr)
+			mt76_check_agg_ssn(mtxq, skb);
+	}
+
+	q = dev->q_tx[qid].q;
+
+	spin_lock_bh(&q->lock);
+	dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
+	dev->queue_ops->kick(dev, q);
+
+	if (q->queued > q->ndesc - 8 && !q->stopped) {
+		ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+		q->stopped = true;
+	}
+
+	spin_unlock_bh(&q->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_tx);
+
+static struct sk_buff *
+mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
+{
+	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+	struct sk_buff *skb;
+
+	skb = skb_dequeue(&mtxq->retry_q);
+	if (skb) {
+		u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+
+		if (ps && skb_queue_empty(&mtxq->retry_q))
+			ieee80211_sta_set_buffered(txq->sta, tid, false);
+
+		return skb;
+	}
+
+	skb = ieee80211_tx_dequeue(dev->hw, txq);
+	if (!skb)
+		return NULL;
+
+	return skb;
+}
+
+static void
+mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
+		  struct sk_buff *skb, bool last)
+{
+	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
+	if (last)
+		info->flags |= IEEE80211_TX_STATUS_EOSP |
+			       IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+	mt76_skb_set_moredata(skb, !last);
+	dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
+}
+
+void
+mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+			     u16 tids, int nframes,
+			     enum ieee80211_frame_release_type reason,
+			     bool more_data)
+{
+	struct mt76_dev *dev = hw->priv;
+	struct sk_buff *last_skb = NULL;
+	struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
+	int i;
+
+	spin_lock_bh(&hwq->lock);
+	for (i = 0; tids && nframes; i++, tids >>= 1) {
+		struct ieee80211_txq *txq = sta->txq[i];
+		struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
+		struct sk_buff *skb;
+
+		if (!(tids & 1))
+			continue;
+
+		do {
+			skb = mt76_txq_dequeue(dev, mtxq, true);
+			if (!skb)
+				break;
+
+			if (mtxq->aggr)
+				mt76_check_agg_ssn(mtxq, skb);
+
+			nframes--;
+			if (last_skb)
+				mt76_queue_ps_skb(dev, sta, last_skb, false);
+
+			last_skb = skb;
+		} while (nframes);
+	}
+
+	if (last_skb) {
+		mt76_queue_ps_skb(dev, sta, last_skb, true);
+		dev->queue_ops->kick(dev, hwq);
+	} else {
+		ieee80211_sta_eosp(sta);
+	}
+
+	spin_unlock_bh(&hwq->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
+
+static int
+mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
+		    struct mt76_txq *mtxq, bool *empty)
+{
+	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+	enum mt76_txq_id qid = mt76_txq_get_qid(txq);
+	struct mt76_wcid *wcid = mtxq->wcid;
+	struct mt76_queue *hwq = sq->q;
+	struct ieee80211_tx_info *info;
+	struct sk_buff *skb;
+	int n_frames = 1, limit;
+	struct ieee80211_tx_rate tx_rate;
+	bool ampdu;
+	bool probe;
+	int idx;
+
+	if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) {
+		*empty = true;
+		return 0;
+	}
+
+	skb = mt76_txq_dequeue(dev, mtxq, false);
+	if (!skb) {
+		*empty = true;
+		return 0;
+	}
+
+	info = IEEE80211_SKB_CB(skb);
+	if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
+		ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
+				       info->control.rates, 1);
+	tx_rate = info->control.rates[0];
+
+	probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+	ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
+	limit = ampdu ? 16 : 3;
+
+	if (ampdu)
+		mt76_check_agg_ssn(mtxq, skb);
+
+	idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
+
+	if (idx < 0)
+		return idx;
+
+	do {
+		bool cur_ampdu;
+
+		if (probe)
+			break;
+
+		if (test_bit(MT76_RESET, &dev->state))
+			return -EBUSY;
+
+		skb = mt76_txq_dequeue(dev, mtxq, false);
+		if (!skb) {
+			*empty = true;
+			break;
+		}
+
+		info = IEEE80211_SKB_CB(skb);
+		cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+
+		if (ampdu != cur_ampdu ||
+		    (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
+			skb_queue_tail(&mtxq->retry_q, skb);
+			break;
+		}
+
+		info->control.rates[0] = tx_rate;
+
+		if (cur_ampdu)
+			mt76_check_agg_ssn(mtxq, skb);
+
+		idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
+						   txq->sta);
+		if (idx < 0)
+			return idx;
+
+		n_frames++;
+	} while (n_frames < limit);
+
+	if (!probe) {
+		hwq->entry[idx].qid = sq - dev->q_tx;
+		hwq->entry[idx].schedule = true;
+		sq->swq_queued++;
+	}
+
+	dev->queue_ops->kick(dev, hwq);
+
+	return n_frames;
+}
+
+static int
+mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
+{
+	struct mt76_sw_queue *sq = &dev->q_tx[qid];
+	struct mt76_queue *hwq = sq->q;
+	struct ieee80211_txq *txq;
+	struct mt76_txq *mtxq;
+	struct mt76_wcid *wcid;
+	int ret = 0;
+
+	spin_lock_bh(&hwq->lock);
+	while (1) {
+		bool empty = false;
+
+		if (sq->swq_queued >= 4)
+			break;
+
+		if (test_bit(MT76_RESET, &dev->state)) {
+			ret = -EBUSY;
+			break;
+		}
+
+		txq = ieee80211_next_txq(dev->hw, qid);
+		if (!txq)
+			break;
+
+		mtxq = (struct mt76_txq *)txq->drv_priv;
+		wcid = mtxq->wcid;
+		if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
+			continue;
+
+		if (mtxq->send_bar && mtxq->aggr) {
+			struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+			struct ieee80211_sta *sta = txq->sta;
+			struct ieee80211_vif *vif = txq->vif;
+			u16 agg_ssn = mtxq->agg_ssn;
+			u8 tid = txq->tid;
+
+			mtxq->send_bar = false;
+			spin_unlock_bh(&hwq->lock);
+			ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
+			spin_lock_bh(&hwq->lock);
+		}
+
+		ret += mt76_txq_send_burst(dev, sq, mtxq, &empty);
+		if (skb_queue_empty(&mtxq->retry_q))
+			empty = true;
+		ieee80211_return_txq(dev->hw, txq, !empty);
+	}
+	spin_unlock_bh(&hwq->lock);
+
+	return ret;
+}
+
+void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
+{
+	struct mt76_sw_queue *sq = &dev->q_tx[qid];
+	int len;
+
+	if (qid >= 4)
+		return;
+
+	if (sq->swq_queued >= 4)
+		return;
+
+	rcu_read_lock();
+
+	do {
+		ieee80211_txq_schedule_start(dev->hw, qid);
+		len = mt76_txq_schedule_list(dev, qid);
+		ieee80211_txq_schedule_end(dev->hw, qid);
+	} while (len > 0);
+
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule);
+
+void mt76_txq_schedule_all(struct mt76_dev *dev)
+{
+	int i;
+
+	for (i = 0; i <= MT_TXQ_BK; i++)
+		mt76_txq_schedule(dev, i);
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
+
+void mt76_tx_tasklet(unsigned long data)
+{
+	struct mt76_dev *dev = (struct mt76_dev *)data;
+
+	mt76_txq_schedule_all(dev);
+}
+
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+			 bool send_bar)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+		struct ieee80211_txq *txq = sta->txq[i];
+		struct mt76_queue *hwq;
+		struct mt76_txq *mtxq;
+
+		if (!txq)
+			continue;
+
+		mtxq = (struct mt76_txq *)txq->drv_priv;
+		hwq = mtxq->swq->q;
+
+		spin_lock_bh(&hwq->lock);
+		mtxq->send_bar = mtxq->aggr && send_bar;
+		spin_unlock_bh(&hwq->lock);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
+
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+	struct mt76_dev *dev = hw->priv;
+
+	if (!test_bit(MT76_STATE_RUNNING, &dev->state))
+		return;
+
+	tasklet_schedule(&dev->tx_tasklet);
+}
+EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
+
+void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
+{
+	struct mt76_txq *mtxq;
+	struct sk_buff *skb;
+
+	if (!txq)
+		return;
+
+	mtxq = (struct mt76_txq *)txq->drv_priv;
+
+	while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
+		ieee80211_free_txskb(dev->hw, skb);
+}
+EXPORT_SYMBOL_GPL(mt76_txq_remove);
+
+void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
+{
+	struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
+
+	skb_queue_head_init(&mtxq->retry_q);
+
+	mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
+}
+EXPORT_SYMBOL_GPL(mt76_txq_init);
+
+u8 mt76_ac_to_hwq(u8 ac)
+{
+	static const u8 wmm_queue_map[] = {
+		[IEEE80211_AC_BE] = 0,
+		[IEEE80211_AC_BK] = 1,
+		[IEEE80211_AC_VI] = 2,
+		[IEEE80211_AC_VO] = 3,
+	};
+
+	if (WARN_ON(ac >= IEEE80211_NUM_ACS))
+		return 0;
+
+	return wmm_queue_map[ac];
+}
+EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/usb.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/usb.c
new file mode 100644
index 0000000..05aa42b
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/usb.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/module.h>
+#include "mt76.h"
+#include "usb_trace.h"
+#include "dma.h"
+
+#define MT_VEND_REQ_MAX_RETRY	10
+#define MT_VEND_REQ_TOUT_MS	300
+
+static bool disable_usb_sg;
+module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
+MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
+
+/* should be called with usb_ctrl_mtx locked */
+static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+				  u8 req_type, u16 val, u16 offset,
+				  void *buf, size_t len)
+{
+	struct usb_interface *uintf = to_usb_interface(dev->dev);
+	struct usb_device *udev = interface_to_usbdev(uintf);
+	unsigned int pipe;
+	int i, ret;
+
+	pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
+				       : usb_sndctrlpipe(udev, 0);
+	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+		if (test_bit(MT76_REMOVED, &dev->state))
+			return -EIO;
+
+		ret = usb_control_msg(udev, pipe, req, req_type, val,
+				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
+		if (ret == -ENODEV)
+			set_bit(MT76_REMOVED, &dev->state);
+		if (ret >= 0 || ret == -ENODEV)
+			return ret;
+		usleep_range(5000, 10000);
+	}
+
+	dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
+		req, offset, ret);
+	return ret;
+}
+
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+			 u8 req_type, u16 val, u16 offset,
+			 void *buf, size_t len)
+{
+	int ret;
+
+	mutex_lock(&dev->usb.usb_ctrl_mtx);
+	ret = __mt76u_vendor_request(dev, req, req_type,
+				     val, offset, buf, len);
+	trace_usb_reg_wr(dev, offset, val);
+	mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt76u_vendor_request);
+
+/* should be called with usb_ctrl_mtx locked */
+static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+	struct mt76_usb *usb = &dev->usb;
+	u32 data = ~0;
+	u16 offset;
+	int ret;
+	u8 req;
+
+	switch (addr & MT_VEND_TYPE_MASK) {
+	case MT_VEND_TYPE_EEPROM:
+		req = MT_VEND_READ_EEPROM;
+		break;
+	case MT_VEND_TYPE_CFG:
+		req = MT_VEND_READ_CFG;
+		break;
+	default:
+		req = MT_VEND_MULTI_READ;
+		break;
+	}
+	offset = addr & ~MT_VEND_TYPE_MASK;
+
+	ret = __mt76u_vendor_request(dev, req,
+				     USB_DIR_IN | USB_TYPE_VENDOR,
+				     0, offset, &usb->reg_val, sizeof(__le32));
+	if (ret == sizeof(__le32))
+		data = le32_to_cpu(usb->reg_val);
+	trace_usb_reg_rr(dev, addr, data);
+
+	return data;
+}
+
+static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+	u32 ret;
+
+	mutex_lock(&dev->usb.usb_ctrl_mtx);
+	ret = __mt76u_rr(dev, addr);
+	mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+	return ret;
+}
+
+/* should be called with usb_ctrl_mtx locked */
+static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+	struct mt76_usb *usb = &dev->usb;
+	u16 offset;
+	u8 req;
+
+	switch (addr & MT_VEND_TYPE_MASK) {
+	case MT_VEND_TYPE_CFG:
+		req = MT_VEND_WRITE_CFG;
+		break;
+	default:
+		req = MT_VEND_MULTI_WRITE;
+		break;
+	}
+	offset = addr & ~MT_VEND_TYPE_MASK;
+
+	usb->reg_val = cpu_to_le32(val);
+	__mt76u_vendor_request(dev, req,
+			       USB_DIR_OUT | USB_TYPE_VENDOR, 0,
+			       offset, &usb->reg_val, sizeof(__le32));
+	trace_usb_reg_wr(dev, addr, val);
+}
+
+static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+	mutex_lock(&dev->usb.usb_ctrl_mtx);
+	__mt76u_wr(dev, addr, val);
+	mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
+		     u32 mask, u32 val)
+{
+	mutex_lock(&dev->usb.usb_ctrl_mtx);
+	val |= __mt76u_rr(dev, addr) & ~mask;
+	__mt76u_wr(dev, addr, val);
+	mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+	return val;
+}
+
+static void mt76u_copy(struct mt76_dev *dev, u32 offset,
+		       const void *data, int len)
+{
+	struct mt76_usb *usb = &dev->usb;
+	const u32 *val = data;
+	int i, ret;
+
+	mutex_lock(&usb->usb_ctrl_mtx);
+	for (i = 0; i < DIV_ROUND_UP(len, 4); i++) {
+		put_unaligned(val[i], (u32 *)usb->data);
+		ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
+					     USB_DIR_OUT | USB_TYPE_VENDOR,
+					     0, offset + i * 4, usb->data,
+					     sizeof(u32));
+		if (ret < 0)
+			break;
+	}
+	mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+		     const u16 offset, const u32 val)
+{
+	mutex_lock(&dev->usb.usb_ctrl_mtx);
+	__mt76u_vendor_request(dev, req,
+			       USB_DIR_OUT | USB_TYPE_VENDOR,
+			       val & 0xffff, offset, NULL, 0);
+	__mt76u_vendor_request(dev, req,
+			       USB_DIR_OUT | USB_TYPE_VENDOR,
+			       val >> 16, offset + 2, NULL, 0);
+	mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+EXPORT_SYMBOL_GPL(mt76u_single_wr);
+
+static int
+mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
+		const struct mt76_reg_pair *data, int len)
+{
+	struct mt76_usb *usb = &dev->usb;
+
+	mutex_lock(&usb->usb_ctrl_mtx);
+	while (len > 0) {
+		__mt76u_wr(dev, base + data->reg, data->value);
+		len--;
+		data++;
+	}
+	mutex_unlock(&usb->usb_ctrl_mtx);
+
+	return 0;
+}
+
+static int
+mt76u_wr_rp(struct mt76_dev *dev, u32 base,
+	    const struct mt76_reg_pair *data, int n)
+{
+	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
+		return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
+	else
+		return mt76u_req_wr_rp(dev, base, data, n);
+}
+
+static int
+mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
+		int len)
+{
+	struct mt76_usb *usb = &dev->usb;
+
+	mutex_lock(&usb->usb_ctrl_mtx);
+	while (len > 0) {
+		data->value = __mt76u_rr(dev, base + data->reg);
+		len--;
+		data++;
+	}
+	mutex_unlock(&usb->usb_ctrl_mtx);
+
+	return 0;
+}
+
+static int
+mt76u_rd_rp(struct mt76_dev *dev, u32 base,
+	    struct mt76_reg_pair *data, int n)
+{
+	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
+		return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
+	else
+		return mt76u_req_rd_rp(dev, base, data, n);
+}
+
+static bool mt76u_check_sg(struct mt76_dev *dev)
+{
+	struct usb_interface *uintf = to_usb_interface(dev->dev);
+	struct usb_device *udev = interface_to_usbdev(uintf);
+
+	return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
+		(udev->bus->no_sg_constraint ||
+		 udev->speed == USB_SPEED_WIRELESS));
+}
+
+static int
+mt76u_set_endpoints(struct usb_interface *intf,
+		    struct mt76_usb *usb)
+{
+	struct usb_host_interface *intf_desc = intf->cur_altsetting;
+	struct usb_endpoint_descriptor *ep_desc;
+	int i, in_ep = 0, out_ep = 0;
+
+	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+		ep_desc = &intf_desc->endpoint[i].desc;
+
+		if (usb_endpoint_is_bulk_in(ep_desc) &&
+		    in_ep < __MT_EP_IN_MAX) {
+			usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
+			in_ep++;
+		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
+			   out_ep < __MT_EP_OUT_MAX) {
+			usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
+			out_ep++;
+		}
+	}
+
+	if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
+		return -EINVAL;
+	return 0;
+}
+
+static int
+mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
+		 int nsgs, gfp_t gfp)
+{
+	int i;
+
+	for (i = 0; i < nsgs; i++) {
+		struct page *page;
+		void *data;
+		int offset;
+
+		data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
+		if (!data)
+			break;
+
+		page = virt_to_head_page(data);
+		offset = data - page_address(page);
+		sg_set_page(&urb->sg[i], page, q->buf_size, offset);
+	}
+
+	if (i < nsgs) {
+		int j;
+
+		for (j = nsgs; j < urb->num_sgs; j++)
+			skb_free_frag(sg_virt(&urb->sg[j]));
+		urb->num_sgs = i;
+	}
+
+	urb->num_sgs = max_t(int, i, urb->num_sgs);
+	urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
+	sg_init_marker(urb->sg, urb->num_sgs);
+
+	return i ? : -ENOMEM;
+}
+
+static int
+mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
+{
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+
+	if (dev->usb.sg_en)
+		return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
+
+	urb->transfer_buffer_length = q->buf_size;
+	urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
+
+	return urb->transfer_buffer ? 0 : -ENOMEM;
+}
+
+static int
+mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
+		int sg_max_size)
+{
+	unsigned int size = sizeof(struct urb);
+
+	if (dev->usb.sg_en)
+		size += sg_max_size * sizeof(struct scatterlist);
+
+	e->urb = kzalloc(size, GFP_KERNEL);
+	if (!e->urb)
+		return -ENOMEM;
+
+	usb_init_urb(e->urb);
+
+	if (dev->usb.sg_en)
+		e->urb->sg = (struct scatterlist *)(e->urb + 1);
+
+	return 0;
+}
+
+static int
+mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
+{
+	int err;
+
+	err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE);
+	if (err)
+		return err;
+
+	return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE,
+			       GFP_KERNEL);
+}
+
+static void mt76u_urb_free(struct urb *urb)
+{
+	int i;
+
+	for (i = 0; i < urb->num_sgs; i++)
+		skb_free_frag(sg_virt(&urb->sg[i]));
+
+	if (urb->transfer_buffer)
+		skb_free_frag(urb->transfer_buffer);
+
+	usb_free_urb(urb);
+}
+
+static void
+mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
+		    struct urb *urb, usb_complete_t complete_fn,
+		    void *context)
+{
+	struct usb_interface *uintf = to_usb_interface(dev->dev);
+	struct usb_device *udev = interface_to_usbdev(uintf);
+	unsigned int pipe;
+
+	if (dir == USB_DIR_IN)
+		pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
+	else
+		pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
+
+	urb->dev = udev;
+	urb->pipe = pipe;
+	urb->complete = complete_fn;
+	urb->context = context;
+}
+
+static inline struct urb *
+mt76u_get_next_rx_entry(struct mt76_dev *dev)
+{
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	struct urb *urb = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	if (q->queued > 0) {
+		urb = q->entry[q->head].urb;
+		q->head = (q->head + 1) % q->ndesc;
+		q->queued--;
+	}
+	spin_unlock_irqrestore(&q->lock, flags);
+
+	return urb;
+}
+
+static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
+{
+	u16 dma_len, min_len;
+
+	dma_len = get_unaligned_le16(data);
+	min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
+		  MT_FCE_INFO_LEN;
+
+	if (data_len < min_len || !dma_len ||
+	    dma_len + MT_DMA_HDR_LEN > data_len ||
+	    (dma_len & 0x3))
+		return -EINVAL;
+	return dma_len;
+}
+
+static struct sk_buff *
+mt76u_build_rx_skb(void *data, int len, int buf_size)
+{
+	struct sk_buff *skb;
+
+	if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) {
+		struct page *page;
+
+		/* slow path, not enough space for data and
+		 * skb_shared_info
+		 */
+		skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
+		if (!skb)
+			return NULL;
+
+		skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
+		data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN);
+		page = virt_to_head_page(data);
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+				page, data - page_address(page),
+				len - MT_SKB_HEAD_LEN, buf_size);
+
+		return skb;
+	}
+
+	/* fast path */
+	skb = build_skb(data, buf_size);
+	if (!skb)
+		return NULL;
+
+	skb_reserve(skb, MT_DMA_HDR_LEN);
+	__skb_put(skb, len);
+
+	return skb;
+}
+
+static int
+mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
+{
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
+	int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
+	int len, nsgs = 1;
+	struct sk_buff *skb;
+
+	if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
+		return 0;
+
+	len = mt76u_get_rx_entry_len(data, urb->actual_length);
+	if (len < 0)
+		return 0;
+
+	data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
+	skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
+	if (!skb)
+		return 0;
+
+	len -= data_len;
+	while (len > 0 && nsgs < urb->num_sgs) {
+		data_len = min_t(int, len, urb->sg[nsgs].length);
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+				sg_page(&urb->sg[nsgs]),
+				urb->sg[nsgs].offset,
+				data_len, q->buf_size);
+		len -= data_len;
+		nsgs++;
+	}
+	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
+
+	return nsgs;
+}
+
+static void mt76u_complete_rx(struct urb *urb)
+{
+	struct mt76_dev *dev = urb->context;
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	unsigned long flags;
+
+	trace_rx_urb(dev, urb);
+
+	switch (urb->status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENOENT:
+		return;
+	default:
+		dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
+				    urb->status);
+		/* fall through */
+	case 0:
+		break;
+	}
+
+	spin_lock_irqsave(&q->lock, flags);
+	if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
+		goto out;
+
+	q->tail = (q->tail + 1) % q->ndesc;
+	q->queued++;
+	tasklet_schedule(&dev->usb.rx_tasklet);
+out:
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static int
+mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
+{
+	mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
+			    mt76u_complete_rx, dev);
+	trace_submit_urb(dev, urb);
+
+	return usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+static void mt76u_rx_tasklet(unsigned long data)
+{
+	struct mt76_dev *dev = (struct mt76_dev *)data;
+	struct urb *urb;
+	int err, count;
+
+	rcu_read_lock();
+
+	while (true) {
+		urb = mt76u_get_next_rx_entry(dev);
+		if (!urb)
+			break;
+
+		count = mt76u_process_rx_entry(dev, urb);
+		if (count > 0) {
+			err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
+			if (err < 0)
+				break;
+		}
+		mt76u_submit_rx_buf(dev, urb);
+	}
+	mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+
+	rcu_read_unlock();
+}
+
+static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
+{
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	unsigned long flags;
+	int i, err = 0;
+
+	spin_lock_irqsave(&q->lock, flags);
+	for (i = 0; i < q->ndesc; i++) {
+		err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
+		if (err < 0)
+			break;
+	}
+	q->head = q->tail = 0;
+	q->queued = 0;
+	spin_unlock_irqrestore(&q->lock, flags);
+
+	return err;
+}
+
+static int mt76u_alloc_rx(struct mt76_dev *dev)
+{
+	struct mt76_usb *usb = &dev->usb;
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	int i, err;
+
+	usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
+	if (!usb->mcu.data)
+		return -ENOMEM;
+
+	spin_lock_init(&q->lock);
+	q->entry = devm_kcalloc(dev->dev,
+				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
+				GFP_KERNEL);
+	if (!q->entry)
+		return -ENOMEM;
+
+	q->ndesc = MT_NUM_RX_ENTRIES;
+	q->buf_size = PAGE_SIZE;
+
+	for (i = 0; i < q->ndesc; i++) {
+		err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
+		if (err < 0)
+			return err;
+	}
+
+	return mt76u_submit_rx_buffers(dev);
+}
+
+static void mt76u_free_rx(struct mt76_dev *dev)
+{
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	struct page *page;
+	int i;
+
+	for (i = 0; i < q->ndesc; i++)
+		mt76u_urb_free(q->entry[i].urb);
+
+	if (!q->rx_page.va)
+		return;
+
+	page = virt_to_page(q->rx_page.va);
+	__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
+	memset(&q->rx_page, 0, sizeof(q->rx_page));
+}
+
+void mt76u_stop_rx(struct mt76_dev *dev)
+{
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	int i;
+
+	for (i = 0; i < q->ndesc; i++)
+		usb_poison_urb(q->entry[i].urb);
+
+	tasklet_kill(&dev->usb.rx_tasklet);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_rx);
+
+int mt76u_resume_rx(struct mt76_dev *dev)
+{
+	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+	int i;
+
+	for (i = 0; i < q->ndesc; i++)
+		usb_unpoison_urb(q->entry[i].urb);
+
+	return mt76u_submit_rx_buffers(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_resume_rx);
+
+static void mt76u_tx_tasklet(unsigned long data)
+{
+	struct mt76_dev *dev = (struct mt76_dev *)data;
+	struct mt76_queue_entry entry;
+	struct mt76_sw_queue *sq;
+	struct mt76_queue *q;
+	bool wake;
+	int i;
+
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		u32 n_dequeued = 0, n_sw_dequeued = 0;
+
+		sq = &dev->q_tx[i];
+		q = sq->q;
+
+		while (q->queued > n_dequeued) {
+			if (!q->entry[q->head].done)
+				break;
+
+			if (q->entry[q->head].schedule) {
+				q->entry[q->head].schedule = false;
+				n_sw_dequeued++;
+			}
+
+			entry = q->entry[q->head];
+			q->entry[q->head].done = false;
+			q->head = (q->head + 1) % q->ndesc;
+			n_dequeued++;
+
+			dev->drv->tx_complete_skb(dev, i, &entry);
+		}
+
+		spin_lock_bh(&q->lock);
+
+		sq->swq_queued -= n_sw_dequeued;
+		q->queued -= n_dequeued;
+
+		wake = q->stopped && q->queued < q->ndesc - 8;
+		if (wake)
+			q->stopped = false;
+
+		if (!q->queued)
+			wake_up(&dev->tx_wait);
+
+		spin_unlock_bh(&q->lock);
+
+		mt76_txq_schedule(dev, i);
+
+		if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
+			ieee80211_queue_delayed_work(dev->hw,
+						     &dev->usb.stat_work,
+						     msecs_to_jiffies(10));
+
+		if (wake)
+			ieee80211_wake_queue(dev->hw, i);
+	}
+}
+
+static void mt76u_tx_status_data(struct work_struct *work)
+{
+	struct mt76_usb *usb;
+	struct mt76_dev *dev;
+	u8 update = 1;
+	u16 count = 0;
+
+	usb = container_of(work, struct mt76_usb, stat_work.work);
+	dev = container_of(usb, struct mt76_dev, usb);
+
+	while (true) {
+		if (test_bit(MT76_REMOVED, &dev->state))
+			break;
+
+		if (!dev->drv->tx_status_data(dev, &update))
+			break;
+		count++;
+	}
+
+	if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
+		ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
+					     msecs_to_jiffies(10));
+	else
+		clear_bit(MT76_READING_STATS, &dev->state);
+}
+
+static void mt76u_complete_tx(struct urb *urb)
+{
+	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
+	struct mt76_queue_entry *e = urb->context;
+
+	if (mt76u_urb_error(urb))
+		dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
+	e->done = true;
+
+	tasklet_schedule(&dev->tx_tasklet);
+}
+
+static int
+mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
+		       struct urb *urb)
+{
+	urb->transfer_buffer_length = skb->len;
+
+	if (!dev->usb.sg_en) {
+		urb->transfer_buffer = skb->data;
+		return 0;
+	}
+
+	sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
+	urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
+	if (!urb->num_sgs)
+		return -ENOMEM;
+
+	return urb->num_sgs;
+}
+
+static int
+mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
+		   struct sk_buff *skb, struct mt76_wcid *wcid,
+		   struct ieee80211_sta *sta)
+{
+	struct mt76_queue *q = dev->q_tx[qid].q;
+	struct mt76_tx_info tx_info = {
+		.skb = skb,
+	};
+	u16 idx = q->tail;
+	int err;
+
+	if (q->queued == q->ndesc)
+		return -ENOSPC;
+
+	skb->prev = skb->next = NULL;
+	err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
+	if (err < 0)
+		return err;
+
+	err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
+	if (err < 0)
+		return err;
+
+	mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
+			    q->entry[idx].urb, mt76u_complete_tx,
+			    &q->entry[idx]);
+
+	q->tail = (q->tail + 1) % q->ndesc;
+	q->entry[idx].skb = tx_info.skb;
+	q->queued++;
+
+	return idx;
+}
+
+static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	struct urb *urb;
+	int err;
+
+	while (q->first != q->tail) {
+		urb = q->entry[q->first].urb;
+
+		trace_submit_urb(dev, urb);
+		err = usb_submit_urb(urb, GFP_ATOMIC);
+		if (err < 0) {
+			if (err == -ENODEV)
+				set_bit(MT76_REMOVED, &dev->state);
+			else
+				dev_err(dev->dev, "tx urb submit failed:%d\n",
+					err);
+			break;
+		}
+		q->first = (q->first + 1) % q->ndesc;
+	}
+}
+
+static int mt76u_alloc_tx(struct mt76_dev *dev)
+{
+	struct mt76_queue *q;
+	int i, j, err;
+
+	for (i = 0; i <= MT_TXQ_PSD; i++) {
+		INIT_LIST_HEAD(&dev->q_tx[i].swq);
+
+		if (i >= IEEE80211_NUM_ACS) {
+			dev->q_tx[i].q = dev->q_tx[0].q;
+			continue;
+		}
+
+		q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
+		if (!q)
+			return -ENOMEM;
+
+		spin_lock_init(&q->lock);
+		q->hw_idx = mt76_ac_to_hwq(i);
+		dev->q_tx[i].q = q;
+
+		q->entry = devm_kcalloc(dev->dev,
+					MT_NUM_TX_ENTRIES, sizeof(*q->entry),
+					GFP_KERNEL);
+		if (!q->entry)
+			return -ENOMEM;
+
+		q->ndesc = MT_NUM_TX_ENTRIES;
+		for (j = 0; j < q->ndesc; j++) {
+			err = mt76u_urb_alloc(dev, &q->entry[j],
+					      MT_TX_SG_MAX_SIZE);
+			if (err < 0)
+				return err;
+		}
+	}
+	return 0;
+}
+
+static void mt76u_free_tx(struct mt76_dev *dev)
+{
+	struct mt76_queue *q;
+	int i, j;
+
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		q = dev->q_tx[i].q;
+		for (j = 0; j < q->ndesc; j++)
+			usb_free_urb(q->entry[j].urb);
+	}
+}
+
+void mt76u_stop_tx(struct mt76_dev *dev)
+{
+	struct mt76_queue_entry entry;
+	struct mt76_queue *q;
+	int i, j, ret;
+
+	ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev),
+				 HZ / 5);
+	if (!ret) {
+		dev_err(dev->dev, "timed out waiting for pending tx\n");
+
+		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+			q = dev->q_tx[i].q;
+			for (j = 0; j < q->ndesc; j++)
+				usb_kill_urb(q->entry[j].urb);
+		}
+
+		tasklet_kill(&dev->tx_tasklet);
+
+		/* On device removal we maight queue skb's, but mt76u_tx_kick()
+		 * will fail to submit urb, cleanup those skb's manually.
+		 */
+		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+			q = dev->q_tx[i].q;
+
+			/* Assure we are in sync with killed tasklet. */
+			spin_lock_bh(&q->lock);
+			while (q->queued) {
+				entry = q->entry[q->head];
+				q->head = (q->head + 1) % q->ndesc;
+				q->queued--;
+
+				dev->drv->tx_complete_skb(dev, i, &entry);
+			}
+			spin_unlock_bh(&q->lock);
+		}
+	}
+
+	cancel_delayed_work_sync(&dev->usb.stat_work);
+	clear_bit(MT76_READING_STATS, &dev->state);
+
+	mt76_tx_status_check(dev, NULL, true);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_tx);
+
+void mt76u_queues_deinit(struct mt76_dev *dev)
+{
+	mt76u_stop_rx(dev);
+	mt76u_stop_tx(dev);
+
+	mt76u_free_rx(dev);
+	mt76u_free_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
+
+int mt76u_alloc_queues(struct mt76_dev *dev)
+{
+	int err;
+
+	err = mt76u_alloc_rx(dev);
+	if (err < 0)
+		return err;
+
+	return mt76u_alloc_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
+
+static const struct mt76_queue_ops usb_queue_ops = {
+	.tx_queue_skb = mt76u_tx_queue_skb,
+	.kick = mt76u_tx_kick,
+};
+
+int mt76u_init(struct mt76_dev *dev,
+	       struct usb_interface *intf)
+{
+	static const struct mt76_bus_ops mt76u_ops = {
+		.rr = mt76u_rr,
+		.wr = mt76u_wr,
+		.rmw = mt76u_rmw,
+		.write_copy = mt76u_copy,
+		.wr_rp = mt76u_wr_rp,
+		.rd_rp = mt76u_rd_rp,
+		.type = MT76_BUS_USB,
+	};
+	struct usb_device *udev = interface_to_usbdev(intf);
+	struct mt76_usb *usb = &dev->usb;
+
+	tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
+	tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
+	INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
+	skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
+
+	mutex_init(&usb->mcu.mutex);
+
+	mutex_init(&usb->usb_ctrl_mtx);
+	dev->bus = &mt76u_ops;
+	dev->queue_ops = &usb_queue_ops;
+
+	dev_set_drvdata(&udev->dev, dev);
+
+	usb->sg_en = mt76u_check_sg(dev);
+
+	return mt76u_set_endpoints(intf, usb);
+}
+EXPORT_SYMBOL_GPL(mt76u_init);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/usb_trace.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/usb_trace.c
new file mode 100644
index 0000000..9942bdd
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/usb_trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "usb_trace.h"
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/usb_trace.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/usb_trace.h
new file mode 100644
index 0000000..f5ab321
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/usb_trace.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#if !defined(__MT76_USB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_USB_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76_usb
+
+#define MAXNAME		32
+#define DEV_ENTRY	__array(char, wiphy_name, 32)
+#define DEV_ASSIGN	strlcpy(__entry->wiphy_name,	\
+				wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT	"%s "
+#define DEV_PR_ARG	__entry->wiphy_name
+
+#define REG_ENTRY	__field(u32, reg) __field(u32, val)
+#define REG_ASSIGN	__entry->reg = reg; __entry->val = val
+#define REG_PR_FMT	"reg:0x%04x=0x%08x"
+#define REG_PR_ARG	__entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		REG_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		REG_ASSIGN;
+	),
+	TP_printk(
+		DEV_PR_FMT REG_PR_FMT,
+		DEV_PR_ARG, REG_PR_ARG
+	)
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_rr,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_wr,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+DECLARE_EVENT_CLASS(urb_transfer,
+	TP_PROTO(struct mt76_dev *dev, struct urb *u),
+	TP_ARGS(dev, u),
+	TP_STRUCT__entry(
+		DEV_ENTRY __field(unsigned int, pipe) __field(u32, len)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->pipe = u->pipe;
+		__entry->len = u->transfer_buffer_length;
+	),
+	TP_printk(DEV_PR_FMT "p:%08x len:%u",
+		  DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+DEFINE_EVENT(urb_transfer, submit_urb,
+	TP_PROTO(struct mt76_dev *dev, struct urb *u),
+	TP_ARGS(dev, u)
+);
+
+DEFINE_EVENT(urb_transfer, rx_urb,
+	TP_PROTO(struct mt76_dev *dev, struct urb *u),
+	TP_ARGS(dev, u)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE usb_trace
+
+#include <trace/define_trace.h>
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/util.c b/marvell/linux/drivers/net/wireless/mediatek/mt76/util.c
new file mode 100644
index 0000000..23d1e1d
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/util.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/module.h>
+#include "mt76.h"
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+		 int timeout)
+{
+	u32 cur;
+
+	timeout /= 10;
+	do {
+		cur = dev->bus->rr(dev, offset) & mask;
+		if (cur == val)
+			return true;
+
+		udelay(10);
+	} while (timeout-- > 0);
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll);
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+		      int timeout)
+{
+	u32 cur;
+
+	timeout /= 10;
+	do {
+		cur = dev->bus->rr(dev, offset) & mask;
+		if (cur == val)
+			return true;
+
+		usleep_range(10000, 20000);
+	} while (timeout-- > 0);
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll_msec);
+
+int mt76_wcid_alloc(unsigned long *mask, int size)
+{
+	int i, idx = 0, cur;
+
+	for (i = 0; i < size / BITS_PER_LONG; i++) {
+		idx = ffs(~mask[i]);
+		if (!idx)
+			continue;
+
+		idx--;
+		cur = i * BITS_PER_LONG + idx;
+		if (cur >= size)
+			break;
+
+		mask[i] |= BIT(idx);
+		return cur;
+	}
+
+	return -1;
+}
+EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
+
+int mt76_get_min_avg_rssi(struct mt76_dev *dev)
+{
+	struct mt76_wcid *wcid;
+	int i, j, min_rssi = 0;
+	s8 cur_rssi;
+
+	local_bh_disable();
+	rcu_read_lock();
+
+	for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+		unsigned long mask = dev->wcid_mask[i];
+
+		if (!mask)
+			continue;
+
+		for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+			if (!(mask & 1))
+				continue;
+
+			wcid = rcu_dereference(dev->wcid[j]);
+			if (!wcid)
+				continue;
+
+			spin_lock(&dev->rx_lock);
+			if (wcid->inactive_count++ < 5)
+				cur_rssi = -ewma_signal_read(&wcid->rssi);
+			else
+				cur_rssi = 0;
+			spin_unlock(&dev->rx_lock);
+
+			if (cur_rssi < min_rssi)
+				min_rssi = cur_rssi;
+		}
+	}
+
+	rcu_read_unlock();
+	local_bh_enable();
+
+	return min_rssi;
+}
+EXPORT_SYMBOL_GPL(mt76_get_min_avg_rssi);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt76/util.h b/marvell/linux/drivers/net/wireless/mediatek/mt76/util.h
new file mode 100644
index 0000000..fe3479c
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt76/util.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ */
+
+#ifndef __MT76_UTIL_H
+#define __MT76_UTIL_H
+
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#define MT76_INCR(_var, _size) \
+	(_var = (((_var) + 1) % (_size)))
+
+int mt76_wcid_alloc(unsigned long *mask, int size);
+
+static inline void
+mt76_wcid_free(unsigned long *mask, int idx)
+{
+	mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+}
+
+static inline void
+mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+	if (enable)
+		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+	else
+		hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/Kconfig b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/Kconfig
new file mode 100644
index 0000000..0b230f3
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MT7601U
+	tristate "MediaTek MT7601U (USB) support"
+	depends on MAC80211
+	depends on USB
+	---help---
+	  This adds support for MT7601U-based wireless USB dongles.
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/Makefile b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/Makefile
new file mode 100644
index 0000000..30f2391
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MT7601U)	+= mt7601u.o
+
+mt7601u-objs	= \
+	usb.o init.o main.o mcu.o trace.o dma.o core.o eeprom.o phy.o \
+	mac.o util.o debugfs.o tx.o
+
+CFLAGS_trace.o := -I$(src)
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/core.c b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/core.c
new file mode 100644
index 0000000..907443b
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/core.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+
+int mt7601u_wait_asic_ready(struct mt7601u_dev *dev)
+{
+	int i = 100;
+	u32 val;
+
+	do {
+		if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+			return -EIO;
+
+		val = mt7601u_rr(dev, MT_MAC_CSR0);
+		if (val && ~val)
+			return 0;
+
+		udelay(10);
+	} while (i--);
+
+	return -EIO;
+}
+
+bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+	       int timeout)
+{
+	u32 cur;
+
+	timeout /= 10;
+	do {
+		if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+			return false;
+
+		cur = mt7601u_rr(dev, offset) & mask;
+		if (cur == val)
+			return true;
+
+		udelay(10);
+	} while (timeout-- > 0);
+
+	dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
+
+	return false;
+}
+
+bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+		    int timeout)
+{
+	u32 cur;
+
+	timeout /= 10;
+	do {
+		if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+			return false;
+
+		cur = mt7601u_rr(dev, offset) & mask;
+		if (cur == val)
+			return true;
+
+		msleep(10);
+	} while (timeout-- > 0);
+
+	dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
+
+	return false;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/debugfs.c
new file mode 100644
index 0000000..5e54983
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/debugfs.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include <linux/debugfs.h>
+
+#include "mt7601u.h"
+#include "eeprom.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+	struct mt7601u_dev *dev = data;
+
+	mt76_wr(dev, dev->debugfs_reg, val);
+	return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+	struct mt7601u_dev *dev = data;
+
+	*val = mt76_rr(dev, dev->debugfs_reg);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+
+static int
+mt7601u_ampdu_stat_read(struct seq_file *file, void *data)
+{
+	struct mt7601u_dev *dev = file->private;
+	int i, j;
+
+#define stat_printf(grp, off, name)					\
+	seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
+
+	stat_printf(rx_stat, 0, rx_crc_err);
+	stat_printf(rx_stat, 1, rx_phy_err);
+	stat_printf(rx_stat, 2, rx_false_cca);
+	stat_printf(rx_stat, 3, rx_plcp_err);
+	stat_printf(rx_stat, 4, rx_fifo_overflow);
+	stat_printf(rx_stat, 5, rx_duplicate);
+
+	stat_printf(tx_stat, 0, tx_fail_cnt);
+	stat_printf(tx_stat, 1, tx_bcn_cnt);
+	stat_printf(tx_stat, 2, tx_success);
+	stat_printf(tx_stat, 3, tx_retransmit);
+	stat_printf(tx_stat, 4, tx_zero_len);
+	stat_printf(tx_stat, 5, tx_underflow);
+
+	stat_printf(aggr_stat, 0, non_aggr_tx);
+	stat_printf(aggr_stat, 1, aggr_tx);
+
+	stat_printf(zero_len_del, 0, tx_zero_len_del);
+	stat_printf(zero_len_del, 1, rx_zero_len_del);
+#undef stat_printf
+
+	seq_puts(file, "Aggregations stats:\n");
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 8; j++)
+			seq_printf(file, "%08llx ",
+				   dev->stats.aggr_n[i * 8 + j]);
+		seq_putc(file, '\n');
+	}
+
+	seq_printf(file, "recent average AMPDU len: %d\n",
+		   atomic_read(&dev->avg_ampdu_len));
+
+	return 0;
+}
+
+static int
+mt7601u_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, mt7601u_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+	.open = mt7601u_ampdu_stat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int
+mt7601u_eeprom_param_read(struct seq_file *file, void *data)
+{
+	struct mt7601u_dev *dev = file->private;
+	struct mt7601u_rate_power *rp = &dev->ee->power_rate_table;
+	struct tssi_data *td = &dev->ee->tssi_data;
+	int i;
+
+	seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
+	seq_printf(file, "RSSI offset: %hhx %hhx\n",
+		   dev->ee->rssi_offset[0], dev->ee->rssi_offset[1]);
+	seq_printf(file, "Reference temp: %hhx\n", dev->ee->ref_temp);
+	seq_printf(file, "LNA gain: %hhx\n", dev->ee->lna_gain);
+	seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
+		   dev->ee->reg.start + dev->ee->reg.num - 1);
+
+	seq_puts(file, "Per rate power:\n");
+	for (i = 0; i < 2; i++)
+		seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+			   rp->cck[i].raw, rp->cck[i].bw20, rp->cck[i].bw40);
+	for (i = 0; i < 4; i++)
+		seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+			   rp->ofdm[i].raw, rp->ofdm[i].bw20, rp->ofdm[i].bw40);
+	for (i = 0; i < 4; i++)
+		seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+			   rp->ht[i].raw, rp->ht[i].bw20, rp->ht[i].bw40);
+
+	seq_puts(file, "Per channel power:\n");
+	for (i = 0; i < 7; i++)
+		seq_printf(file, "\t tx_power  ch%u:%02hhx ch%u:%02hhx\n",
+			   i * 2 + 1, dev->ee->chan_pwr[i * 2],
+			   i * 2 + 2, dev->ee->chan_pwr[i * 2 + 1]);
+
+	if (!dev->ee->tssi_enabled)
+		return 0;
+
+	seq_puts(file, "TSSI:\n");
+	seq_printf(file, "\t slope:%02hhx\n", td->slope);
+	seq_printf(file, "\t offset=%02hhx %02hhx %02hhx\n",
+		   td->offset[0], td->offset[1], td->offset[2]);
+	seq_printf(file, "\t delta_off:%08x\n", td->tx0_delta_offset);
+
+	return 0;
+}
+
+static int
+mt7601u_eeprom_param_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, mt7601u_eeprom_param_read, inode->i_private);
+}
+
+static const struct file_operations fops_eeprom_param = {
+	.open = mt7601u_eeprom_param_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void mt7601u_init_debugfs(struct mt7601u_dev *dev)
+{
+	struct dentry *dir;
+
+	dir = debugfs_create_dir("mt7601u", dev->hw->wiphy->debugfsdir);
+	if (!dir)
+		return;
+
+	debugfs_create_u8("temperature", 0400, dir, &dev->raw_temp);
+	debugfs_create_u32("temp_mode", 0400, dir, &dev->temp_mode);
+
+	debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg);
+	debugfs_create_file("regval", 0600, dir, dev, &fops_regval);
+	debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
+	debugfs_create_file("eeprom_param", 0400, dir, dev, &fops_eeprom_param);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/dma.c b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/dma.c
new file mode 100644
index 0000000..0728880
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+#include "dma.h"
+#include "usb.h"
+#include "trace.h"
+
+static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
+				 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
+
+static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
+{
+	const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
+	unsigned int hdrlen;
+
+	if (unlikely(len < 10))
+		return 0;
+	hdrlen = ieee80211_hdrlen(hdr->frame_control);
+	if (unlikely(hdrlen > len))
+		return 0;
+	return hdrlen;
+}
+
+static struct sk_buff *
+mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
+			void *data, u32 seg_len, u32 truesize, struct page *p)
+{
+	struct sk_buff *skb;
+	u32 true_len, hdr_len = 0, copy, frag;
+
+	skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
+	if (!skb)
+		return NULL;
+
+	true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
+	if (!true_len || true_len > seg_len)
+		goto bad_frame;
+
+	hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
+	if (!hdr_len)
+		goto bad_frame;
+
+	if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+		skb_put_data(skb, data, hdr_len);
+
+		data += hdr_len + 2;
+		true_len -= hdr_len;
+		hdr_len = 0;
+	}
+
+	/* If not doing paged RX allocated skb will always have enough space */
+	copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
+	frag = true_len - copy;
+
+	skb_put_data(skb, data, copy);
+	data += copy;
+
+	if (frag) {
+		skb_add_rx_frag(skb, 0, p, data - page_address(p),
+				frag, truesize);
+		get_page(p);
+	}
+
+	return skb;
+
+bad_frame:
+	dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
+			    true_len, hdr_len);
+	dev_kfree_skb(skb);
+	return NULL;
+}
+
+static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
+				   u32 seg_len, struct page *p)
+{
+	struct sk_buff *skb;
+	struct mt7601u_rxwi *rxwi;
+	u32 fce_info, truesize = seg_len;
+
+	/* DMA_INFO field at the beginning of the segment contains only some of
+	 * the information, we need to read the FCE descriptor from the end.
+	 */
+	fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
+	seg_len -= MT_FCE_INFO_LEN;
+
+	data += MT_DMA_HDR_LEN;
+	seg_len -= MT_DMA_HDR_LEN;
+
+	rxwi = (struct mt7601u_rxwi *) data;
+	data += sizeof(struct mt7601u_rxwi);
+	seg_len -= sizeof(struct mt7601u_rxwi);
+
+	if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
+		dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
+	if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
+		dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
+
+	trace_mt_rx(dev, rxwi, fce_info);
+
+	skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
+	if (!skb)
+		return;
+
+	spin_lock(&dev->mac_lock);
+	ieee80211_rx(dev->hw, skb);
+	spin_unlock(&dev->mac_lock);
+}
+
+static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
+{
+	u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
+		sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
+	u16 dma_len = get_unaligned_le16(data);
+
+	if (data_len < min_seg_len ||
+	    WARN_ON_ONCE(!dma_len) ||
+	    WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
+	    WARN_ON_ONCE(dma_len & 0x3) ||
+	    WARN_ON_ONCE(dma_len < min_seg_len))
+		return 0;
+
+	return MT_DMA_HDRS + dma_len;
+}
+
+static void
+mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
+{
+	u32 seg_len, data_len = e->urb->actual_length;
+	u8 *data = page_address(e->p);
+	struct page *new_p = NULL;
+	int cnt = 0;
+
+	if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
+		return;
+
+	/* Copy if there is very little data in the buffer. */
+	if (data_len > 512)
+		new_p = dev_alloc_pages(MT_RX_ORDER);
+
+	while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
+		mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
+
+		data_len -= seg_len;
+		data += seg_len;
+		cnt++;
+	}
+
+	if (cnt > 1)
+		trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
+
+	if (new_p) {
+		/* we have one extra ref from the allocator */
+		put_page(e->p);
+		e->p = new_p;
+	}
+}
+
+static struct mt7601u_dma_buf_rx *
+mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
+{
+	struct mt7601u_rx_queue *q = &dev->rx_q;
+	struct mt7601u_dma_buf_rx *buf = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->rx_lock, flags);
+
+	if (!q->pending)
+		goto out;
+
+	buf = &q->e[q->start];
+	q->pending--;
+	q->start = (q->start + 1) % q->entries;
+out:
+	spin_unlock_irqrestore(&dev->rx_lock, flags);
+
+	return buf;
+}
+
+static void mt7601u_complete_rx(struct urb *urb)
+{
+	struct mt7601u_dev *dev = urb->context;
+	struct mt7601u_rx_queue *q = &dev->rx_q;
+	unsigned long flags;
+
+	/* do no schedule rx tasklet if urb has been unlinked
+	 * or the device has been removed
+	 */
+	switch (urb->status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENOENT:
+		return;
+	default:
+		dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
+				    urb->status);
+		/* fall through */
+	case 0:
+		break;
+	}
+
+	spin_lock_irqsave(&dev->rx_lock, flags);
+	if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
+		goto out;
+
+	q->end = (q->end + 1) % q->entries;
+	q->pending++;
+	tasklet_schedule(&dev->rx_tasklet);
+out:
+	spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static void mt7601u_rx_tasklet(unsigned long data)
+{
+	struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+	struct mt7601u_dma_buf_rx *e;
+
+	while ((e = mt7601u_rx_get_pending_entry(dev))) {
+		if (e->urb->status)
+			continue;
+
+		mt7601u_rx_process_entry(dev, e);
+		mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
+	}
+}
+
+static void mt7601u_complete_tx(struct urb *urb)
+{
+	struct mt7601u_tx_queue *q = urb->context;
+	struct mt7601u_dev *dev = q->dev;
+	struct sk_buff *skb;
+	unsigned long flags;
+
+	switch (urb->status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENOENT:
+		return;
+	default:
+		dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
+				    urb->status);
+		/* fall through */
+	case 0:
+		break;
+	}
+
+	spin_lock_irqsave(&dev->tx_lock, flags);
+	if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
+		goto out;
+
+	skb = q->e[q->start].skb;
+	q->e[q->start].skb = NULL;
+	trace_mt_tx_dma_done(dev, skb);
+
+	__skb_queue_tail(&dev->tx_skb_done, skb);
+	tasklet_schedule(&dev->tx_tasklet);
+
+	if (q->used == q->entries - q->entries / 8)
+		ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
+
+	q->start = (q->start + 1) % q->entries;
+	q->used--;
+out:
+	spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+static void mt7601u_tx_tasklet(unsigned long data)
+{
+	struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+	struct sk_buff_head skbs;
+	unsigned long flags;
+
+	__skb_queue_head_init(&skbs);
+
+	spin_lock_irqsave(&dev->tx_lock, flags);
+
+	set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
+	if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
+		queue_delayed_work(dev->stat_wq, &dev->stat_work,
+				   msecs_to_jiffies(10));
+
+	skb_queue_splice_init(&dev->tx_skb_done, &skbs);
+
+	spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+	while (!skb_queue_empty(&skbs)) {
+		struct sk_buff *skb = __skb_dequeue(&skbs);
+
+		mt7601u_tx_status(dev, skb);
+	}
+}
+
+static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
+				 struct sk_buff *skb, u8 ep)
+{
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+	unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
+	struct mt7601u_dma_buf_tx *e;
+	struct mt7601u_tx_queue *q = &dev->tx_q[ep];
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&dev->tx_lock, flags);
+
+	if (WARN_ON(q->entries <= q->used)) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	e = &q->e[q->end];
+	usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
+			  mt7601u_complete_tx, q);
+	ret = usb_submit_urb(e->urb, GFP_ATOMIC);
+	if (ret) {
+		/* Special-handle ENODEV from TX urb submission because it will
+		 * often be the first ENODEV we see after device is removed.
+		 */
+		if (ret == -ENODEV)
+			set_bit(MT7601U_STATE_REMOVED, &dev->state);
+		else
+			dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
+				ret);
+		goto out;
+	}
+
+	q->end = (q->end + 1) % q->entries;
+	q->used++;
+	e->skb = skb;
+
+	if (q->used >= q->entries)
+		ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+out:
+	spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+	return ret;
+}
+
+/* Map hardware Q to USB endpoint number */
+static u8 q2ep(u8 qid)
+{
+	/* TODO: take management packets to queue 5 */
+	return qid + 1;
+}
+
+/* Map USB endpoint number to Q id in the DMA engine */
+static enum mt76_qsel ep2dmaq(u8 ep)
+{
+	if (ep == 5)
+		return MT_QSEL_MGMT;
+	return MT_QSEL_EDCA;
+}
+
+int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
+			   struct mt76_wcid *wcid, int hw_q)
+{
+	u8 ep = q2ep(hw_q);
+	u32 dma_flags;
+	int ret;
+
+	dma_flags = MT_TXD_PKT_INFO_80211;
+	if (wcid->hw_key_idx == 0xff)
+		dma_flags |= MT_TXD_PKT_INFO_WIV;
+
+	ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
+	if (ret)
+		return ret;
+
+	ret = mt7601u_dma_submit_tx(dev, skb, ep);
+	if (ret) {
+		ieee80211_free_txskb(dev->hw, skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void mt7601u_kill_rx(struct mt7601u_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < dev->rx_q.entries; i++)
+		usb_poison_urb(dev->rx_q.e[i].urb);
+}
+
+static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
+				 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
+{
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+	u8 *buf = page_address(e->p);
+	unsigned pipe;
+	int ret;
+
+	pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
+
+	usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
+			  mt7601u_complete_rx, dev);
+
+	trace_mt_submit_urb(dev, e->urb);
+	ret = usb_submit_urb(e->urb, gfp);
+	if (ret)
+		dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
+
+	return ret;
+}
+
+static int mt7601u_submit_rx(struct mt7601u_dev *dev)
+{
+	int i, ret;
+
+	for (i = 0; i < dev->rx_q.entries; i++) {
+		ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void mt7601u_free_rx(struct mt7601u_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < dev->rx_q.entries; i++) {
+		__free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
+		usb_free_urb(dev->rx_q.e[i].urb);
+	}
+}
+
+static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
+{
+	int i;
+
+	memset(&dev->rx_q, 0, sizeof(dev->rx_q));
+	dev->rx_q.dev = dev;
+	dev->rx_q.entries = N_RX_ENTRIES;
+
+	for (i = 0; i < N_RX_ENTRIES; i++) {
+		dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+		dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
+
+		if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
+{
+	int i;
+
+	for (i = 0; i < q->entries; i++)  {
+		usb_poison_urb(q->e[i].urb);
+		if (q->e[i].skb)
+			mt7601u_tx_status(q->dev, q->e[i].skb);
+		usb_free_urb(q->e[i].urb);
+	}
+}
+
+static void mt7601u_free_tx(struct mt7601u_dev *dev)
+{
+	int i;
+
+	if (!dev->tx_q)
+		return;
+
+	for (i = 0; i < __MT_EP_OUT_MAX; i++)
+		mt7601u_free_tx_queue(&dev->tx_q[i]);
+}
+
+static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
+				  struct mt7601u_tx_queue *q)
+{
+	int i;
+
+	q->dev = dev;
+	q->entries = N_TX_ENTRIES;
+
+	for (i = 0; i < N_TX_ENTRIES; i++) {
+		q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!q->e[i].urb)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
+{
+	int i;
+
+	dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
+				 sizeof(*dev->tx_q), GFP_KERNEL);
+	if (!dev->tx_q)
+		return -ENOMEM;
+
+	for (i = 0; i < __MT_EP_OUT_MAX; i++)
+		if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
+			return -ENOMEM;
+
+	return 0;
+}
+
+int mt7601u_dma_init(struct mt7601u_dev *dev)
+{
+	int ret = -ENOMEM;
+
+	tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
+	tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
+
+	ret = mt7601u_alloc_tx(dev);
+	if (ret)
+		goto err;
+	ret = mt7601u_alloc_rx(dev);
+	if (ret)
+		goto err;
+
+	ret = mt7601u_submit_rx(dev);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	mt7601u_dma_cleanup(dev);
+	return ret;
+}
+
+void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
+{
+	mt7601u_kill_rx(dev);
+
+	tasklet_kill(&dev->rx_tasklet);
+
+	mt7601u_free_rx(dev);
+	mt7601u_free_tx(dev);
+
+	tasklet_kill(&dev->tx_tasklet);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/dma.h b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/dma.h
new file mode 100644
index 0000000..81e559e
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/dma.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT7601U_DMA_H
+#define __MT7601U_DMA_H
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+
+#define MT_DMA_HDR_LEN			4
+#define MT_RX_INFO_LEN			4
+#define MT_FCE_INFO_LEN			4
+#define MT_DMA_HDRS			(MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
+
+/* Common Tx DMA descriptor fields */
+#define MT_TXD_INFO_LEN			GENMASK(15, 0)
+#define MT_TXD_INFO_D_PORT		GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE		GENMASK(31, 30)
+
+enum mt76_msg_port {
+	WLAN_PORT,
+	CPU_RX_PORT,
+	CPU_TX_PORT,
+	HOST_PORT,
+	VIRTUAL_CPU_RX_PORT,
+	VIRTUAL_CPU_TX_PORT,
+	DISCARD,
+};
+
+enum mt76_info_type {
+	DMA_PACKET,
+	DMA_COMMAND,
+};
+
+/* Tx DMA packet specific flags */
+#define MT_TXD_PKT_INFO_NEXT_VLD	BIT(16)
+#define MT_TXD_PKT_INFO_TX_BURST	BIT(17)
+#define MT_TXD_PKT_INFO_80211		BIT(19)
+#define MT_TXD_PKT_INFO_TSO		BIT(20)
+#define MT_TXD_PKT_INFO_CSO		BIT(21)
+#define MT_TXD_PKT_INFO_WIV		BIT(24)
+#define MT_TXD_PKT_INFO_QSEL		GENMASK(26, 25)
+
+enum mt76_qsel {
+	MT_QSEL_MGMT,
+	MT_QSEL_HCCA,
+	MT_QSEL_EDCA,
+	MT_QSEL_EDCA_2,
+};
+
+/* Tx DMA MCU command specific flags */
+#define MT_TXD_CMD_INFO_SEQ		GENMASK(19, 16)
+#define MT_TXD_CMD_INFO_TYPE		GENMASK(26, 20)
+
+static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
+				       enum mt76_msg_port d_port,
+				       enum mt76_info_type type, u32 flags)
+{
+	u32 info;
+
+	/* Buffer layout:
+	 *	|   4B   | xfer len |      pad       |  4B  |
+	 *	| TXINFO | pkt/cmd  | zero pad to 4B | zero |
+	 *
+	 * length field of TXINFO should be set to 'xfer len'.
+	 */
+
+	info = flags |
+		FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+		FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
+		FIELD_PREP(MT_TXD_INFO_TYPE, type);
+
+	put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+	return skb_put_padto(skb, round_up(skb->len, 4) + 4);
+}
+
+static inline int
+mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
+{
+	flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
+	return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
+}
+
+/* Common Rx DMA descriptor fields */
+#define MT_RXD_INFO_LEN			GENMASK(13, 0)
+#define MT_RXD_INFO_PCIE_INTR		BIT(24)
+#define MT_RXD_INFO_QSEL		GENMASK(26, 25)
+#define MT_RXD_INFO_PORT		GENMASK(29, 27)
+#define MT_RXD_INFO_TYPE		GENMASK(31, 30)
+
+/* Rx DMA packet specific flags */
+#define MT_RXD_PKT_INFO_UDP_ERR		BIT(16)
+#define MT_RXD_PKT_INFO_TCP_ERR		BIT(17)
+#define MT_RXD_PKT_INFO_IP_ERR		BIT(18)
+#define MT_RXD_PKT_INFO_PKT_80211	BIT(19)
+#define MT_RXD_PKT_INFO_L3L4_DONE	BIT(20)
+#define MT_RXD_PKT_INFO_MAC_LEN		GENMASK(23, 21)
+
+/* Rx DMA MCU command specific flags */
+#define MT_RXD_CMD_INFO_SELF_GEN	BIT(15)
+#define MT_RXD_CMD_INFO_CMD_SEQ		GENMASK(19, 16)
+#define MT_RXD_CMD_INFO_EVT_TYPE	GENMASK(23, 20)
+
+enum mt76_evt_type {
+	CMD_DONE,
+	CMD_ERROR,
+	CMD_RETRY,
+	EVENT_PWR_RSP,
+	EVENT_WOW_RSP,
+	EVENT_CARRIER_DETECT_RSP,
+	EVENT_DFS_DETECT_RSP,
+};
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/eeprom.c
new file mode 100644
index 0000000..aa3b649
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/eeprom.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "mt7601u.h"
+#include "eeprom.h"
+#include "mac.h"
+
+static bool
+field_valid(u8 val)
+{
+	return val != 0xff;
+}
+
+static s8
+field_validate(u8 val)
+{
+	if (!field_valid(val))
+		return 0;
+
+	return val;
+}
+
+static int
+mt7601u_efuse_read(struct mt7601u_dev *dev, u16 addr, u8 *data,
+		   enum mt7601u_eeprom_access_modes mode)
+{
+	u32 val;
+	int i;
+
+	val = mt76_rr(dev, MT_EFUSE_CTRL);
+	val &= ~(MT_EFUSE_CTRL_AIN |
+		 MT_EFUSE_CTRL_MODE);
+	val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+	       FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
+	       MT_EFUSE_CTRL_KICK;
+	mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+	if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+		return -ETIMEDOUT;
+
+	val = mt76_rr(dev, MT_EFUSE_CTRL);
+	if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+		/* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
+		 * will not return valid data but it's ok.
+		 */
+		memset(data, 0xff, 16);
+		return 0;
+	}
+
+	for (i = 0; i < 4; i++) {
+		val = mt76_rr(dev, MT_EFUSE_DATA(i));
+		put_unaligned_le32(val, data + 4 * i);
+	}
+
+	return 0;
+}
+
+static int
+mt7601u_efuse_physical_size_check(struct mt7601u_dev *dev)
+{
+	const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16);
+	u8 data[round_up(MT_EFUSE_USAGE_MAP_SIZE, 16)];
+	int ret, i;
+	u32 start = 0, end = 0, cnt_free;
+
+	for (i = 0; i < map_reads; i++) {
+		ret = mt7601u_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
+					 data + i * 16, MT_EE_PHYSICAL_READ);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
+		if (!data[i]) {
+			if (!start)
+				start = MT_EE_USAGE_MAP_START + i;
+			end = MT_EE_USAGE_MAP_START + i;
+		}
+	cnt_free = end - start + 1;
+
+	if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
+		dev_err(dev->dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static bool
+mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+	return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+static void
+mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
+	u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+	if (!field_valid(nic_conf1 & 0xff))
+		nic_conf1 &= 0xff00;
+
+	dev->ee->tssi_enabled = mt7601u_has_tssi(dev, eeprom) &&
+				!(nic_conf1 & MT_EE_NIC_CONF_1_TEMP_TX_ALC);
+
+	if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
+		dev_err(dev->dev,
+			"Error: this driver does not support HW RF ctrl\n");
+
+	if (!field_valid(nic_conf0 >> 8))
+		return;
+
+	if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+	    FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+		dev_err(dev->dev,
+			"Error: device has more than 1 RX/TX stream!\n");
+}
+
+static void mt7601u_set_channel_target_power(struct mt7601u_dev *dev,
+					     u8 *eeprom, u8 max_pwr)
+{
+	u8 trgt_pwr = eeprom[MT_EE_TX_TSSI_TARGET_POWER];
+
+	if (trgt_pwr > max_pwr || !trgt_pwr) {
+		dev_warn(dev->dev, "Error: EEPROM trgt power invalid %hhx!\n",
+			 trgt_pwr);
+		trgt_pwr = 0x20;
+	}
+
+	memset(dev->ee->chan_pwr, trgt_pwr, sizeof(dev->ee->chan_pwr));
+}
+
+static void
+mt7601u_set_channel_power(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	u32 i, val;
+	u8 max_pwr;
+
+	val = mt7601u_rr(dev, MT_TX_ALC_CFG_0);
+	max_pwr = FIELD_GET(MT_TX_ALC_CFG_0_LIMIT_0, val);
+
+	if (mt7601u_has_tssi(dev, eeprom)) {
+		mt7601u_set_channel_target_power(dev, eeprom, max_pwr);
+		return;
+	}
+
+	for (i = 0; i < 14; i++) {
+		s8 power = field_validate(eeprom[MT_EE_TX_POWER_OFFSET + i]);
+
+		if (power > max_pwr || power < 0)
+			power = MT7601U_DEFAULT_TX_POWER;
+
+		dev->ee->chan_pwr[i] = power;
+	}
+}
+
+static void
+mt7601u_set_country_reg(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	/* Note: - region 31 is not valid for mt7601u (see rtmp_init.c)
+	 *	 - comments in rtmp_def.h are incorrect (see rt_channel.c)
+	 */
+	static const struct reg_channel_bounds chan_bounds[] = {
+		/* EEPROM country regions 0 - 7 */
+		{  1, 11 },	{  1, 13 },	{ 10,  2 },	{ 10,  4 },
+		{ 14,  1 },	{  1, 14 },	{  3,  7 },	{  5,  9 },
+		/* EEPROM country regions 32 - 33 */
+		{  1, 11 },	{  1, 14 }
+	};
+	u8 val = eeprom[MT_EE_COUNTRY_REGION];
+	int idx = -1;
+
+	if (val < 8)
+		idx = val;
+	if (val > 31 && val < 33)
+		idx = val - 32 + 8;
+
+	if (idx != -1)
+		dev_info(dev->dev,
+			 "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
+			 val, chan_bounds[idx].start,
+			 chan_bounds[idx].start + chan_bounds[idx].num - 1);
+	else
+		idx = 5; /* channels 1 - 14 */
+
+	dev->ee->reg = chan_bounds[idx];
+
+	/* TODO: country region 33 is special - phy should be set to B-mode
+	 *	 before entering channel 14 (see sta/connect.c)
+	 */
+}
+
+static void
+mt7601u_set_rf_freq_off(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	u8 comp;
+
+	dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
+	comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
+
+	if (comp & BIT(7))
+		dev->ee->rf_freq_off -= comp & 0x7f;
+	else
+		dev->ee->rf_freq_off += comp;
+}
+
+static void
+mt7601u_set_rssi_offset(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	int i;
+	s8 *rssi_offset = dev->ee->rssi_offset;
+
+	for (i = 0; i < 2; i++) {
+		rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
+
+		if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+			dev_warn(dev->dev,
+				 "Warning: EEPROM RSSI is invalid %02hhx\n",
+				 rssi_offset[i]);
+			rssi_offset[i] = 0;
+		}
+	}
+}
+
+static void
+mt7601u_extra_power_over_mac(struct mt7601u_dev *dev)
+{
+	u32 val;
+
+	val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_1) & 0x0000ff00) >> 8);
+	val |= ((mt7601u_rr(dev, MT_TX_PWR_CFG_2) & 0x0000ff00) << 8);
+	mt7601u_wr(dev, MT_TX_PWR_CFG_7, val);
+
+	val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
+	mt7601u_wr(dev, MT_TX_PWR_CFG_9, val);
+}
+
+static void
+mt7601u_set_power_rate(struct power_per_rate *rate, s8 delta, u8 value)
+{
+	/* Invalid? Note: vendor driver does not handle this */
+	if (value == 0xff)
+		return;
+
+	rate->raw = s6_validate(value);
+	rate->bw20 = s6_to_int(value);
+	/* Note: vendor driver does cap the value to s6 right away */
+	rate->bw40 = rate->bw20 + delta;
+}
+
+static void
+mt7601u_save_power_rate(struct mt7601u_dev *dev, s8 delta, u32 val, int i)
+{
+	struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+
+	switch (i) {
+	case 0:
+		mt7601u_set_power_rate(&t->cck[0], delta, (val >> 0) & 0xff);
+		mt7601u_set_power_rate(&t->cck[1], delta, (val >> 8) & 0xff);
+		/* Save cck bw20 for fixups of channel 14 */
+		dev->ee->real_cck_bw20[0] = t->cck[0].bw20;
+		dev->ee->real_cck_bw20[1] = t->cck[1].bw20;
+
+		mt7601u_set_power_rate(&t->ofdm[0], delta, (val >> 16) & 0xff);
+		mt7601u_set_power_rate(&t->ofdm[1], delta, (val >> 24) & 0xff);
+		break;
+	case 1:
+		mt7601u_set_power_rate(&t->ofdm[2], delta, (val >> 0) & 0xff);
+		mt7601u_set_power_rate(&t->ofdm[3], delta, (val >> 8) & 0xff);
+		mt7601u_set_power_rate(&t->ht[0], delta, (val >> 16) & 0xff);
+		mt7601u_set_power_rate(&t->ht[1], delta, (val >> 24) & 0xff);
+		break;
+	case 2:
+		mt7601u_set_power_rate(&t->ht[2], delta, (val >> 0) & 0xff);
+		mt7601u_set_power_rate(&t->ht[3], delta, (val >> 8) & 0xff);
+		break;
+	}
+}
+
+static s8
+get_delta(u8 val)
+{
+	s8 ret;
+
+	if (!field_valid(val) || !(val & BIT(7)))
+		return 0;
+
+	ret = val & 0x1f;
+	if (ret > 8)
+		ret = 8;
+	if (val & BIT(6))
+		ret = -ret;
+
+	return ret;
+}
+
+static void
+mt7601u_config_tx_power_per_rate(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	u32 val;
+	s8 bw40_delta;
+	int i;
+
+	bw40_delta = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
+
+	for (i = 0; i < 5; i++) {
+		val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
+
+		mt7601u_save_power_rate(dev, bw40_delta, val, i);
+
+		if (~val)
+			mt7601u_wr(dev, MT_TX_PWR_CFG_0 + i * 4, val);
+	}
+
+	mt7601u_extra_power_over_mac(dev);
+}
+
+static void
+mt7601u_init_tssi_params(struct mt7601u_dev *dev, u8 *eeprom)
+{
+	struct tssi_data *d = &dev->ee->tssi_data;
+
+	if (!dev->ee->tssi_enabled)
+		return;
+
+	d->slope = eeprom[MT_EE_TX_TSSI_SLOPE];
+	d->tx0_delta_offset = eeprom[MT_EE_TX_TSSI_OFFSET] * 1024;
+	d->offset[0] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP];
+	d->offset[1] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 1];
+	d->offset[2] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 2];
+}
+
+int
+mt7601u_eeprom_init(struct mt7601u_dev *dev)
+{
+	u8 *eeprom;
+	int i, ret;
+
+	ret = mt7601u_efuse_physical_size_check(dev);
+	if (ret)
+		return ret;
+
+	dev->ee = devm_kzalloc(dev->dev, sizeof(*dev->ee), GFP_KERNEL);
+	if (!dev->ee)
+		return -ENOMEM;
+
+	eeprom = kmalloc(MT7601U_EEPROM_SIZE, GFP_KERNEL);
+	if (!eeprom)
+		return -ENOMEM;
+
+	for (i = 0; i + 16 <= MT7601U_EEPROM_SIZE; i += 16) {
+		ret = mt7601u_efuse_read(dev, i, eeprom + i, MT_EE_READ);
+		if (ret)
+			goto out;
+	}
+
+	if (eeprom[MT_EE_VERSION_EE] > MT7601U_EE_MAX_VER)
+		dev_warn(dev->dev,
+			 "Warning: unsupported EEPROM version %02hhx\n",
+			 eeprom[MT_EE_VERSION_EE]);
+	dev_info(dev->dev, "EEPROM ver:%02hhx fae:%02hhx\n",
+		 eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
+
+	mt7601u_set_macaddr(dev, eeprom + MT_EE_MAC_ADDR);
+	mt7601u_set_chip_cap(dev, eeprom);
+	mt7601u_set_channel_power(dev, eeprom);
+	mt7601u_set_country_reg(dev, eeprom);
+	mt7601u_set_rf_freq_off(dev, eeprom);
+	mt7601u_set_rssi_offset(dev, eeprom);
+	dev->ee->ref_temp = eeprom[MT_EE_REF_TEMP];
+	dev->ee->lna_gain = eeprom[MT_EE_LNA_GAIN];
+
+	mt7601u_config_tx_power_per_rate(dev, eeprom);
+
+	mt7601u_init_tssi_params(dev, eeprom);
+out:
+	kfree(eeprom);
+	return ret;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/eeprom.h
new file mode 100644
index 0000000..4d3fd49
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/eeprom.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT7601U_EEPROM_H
+#define __MT7601U_EEPROM_H
+
+struct mt7601u_dev;
+
+#define MT7601U_EE_MAX_VER			0x0d
+#define MT7601U_EEPROM_SIZE			256
+
+#define MT7601U_DEFAULT_TX_POWER		6
+
+enum mt76_eeprom_field {
+	MT_EE_CHIP_ID =				0x00,
+	MT_EE_VERSION_FAE =			0x02,
+	MT_EE_VERSION_EE =			0x03,
+	MT_EE_MAC_ADDR =			0x04,
+	MT_EE_NIC_CONF_0 =			0x34,
+	MT_EE_NIC_CONF_1 =			0x36,
+	MT_EE_COUNTRY_REGION =			0x39,
+	MT_EE_FREQ_OFFSET =			0x3a,
+	MT_EE_NIC_CONF_2 =			0x42,
+
+	MT_EE_LNA_GAIN =			0x44,
+	MT_EE_RSSI_OFFSET =			0x46,
+
+	MT_EE_TX_POWER_DELTA_BW40 =		0x50,
+	MT_EE_TX_POWER_OFFSET =			0x52,
+
+	MT_EE_TX_TSSI_SLOPE =			0x6e,
+	MT_EE_TX_TSSI_OFFSET_GROUP =		0x6f,
+	MT_EE_TX_TSSI_OFFSET =			0x76,
+
+	MT_EE_TX_TSSI_TARGET_POWER =		0xd0,
+	MT_EE_REF_TEMP =			0xd1,
+	MT_EE_FREQ_OFFSET_COMPENSATION =	0xdb,
+	MT_EE_TX_POWER_BYRATE_BASE =		0xde,
+
+	MT_EE_USAGE_MAP_START =			0x1e0,
+	MT_EE_USAGE_MAP_END =			0x1fc,
+};
+
+#define MT_EE_NIC_CONF_0_RX_PATH		GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH		GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE		GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL		BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC		BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G		BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G		BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN		BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM		GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM		GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV		BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION		GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE		BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD		GENMASK(15, 13)
+
+#define MT_EE_TX_POWER_BYRATE(i)		(MT_EE_TX_POWER_BYRATE_BASE + \
+						 (i) * 4)
+
+#define MT_EFUSE_USAGE_MAP_SIZE			(MT_EE_USAGE_MAP_END -	\
+						 MT_EE_USAGE_MAP_START + 1)
+
+enum mt7601u_eeprom_access_modes {
+	MT_EE_READ = 0,
+	MT_EE_PHYSICAL_READ = 1,
+};
+
+struct power_per_rate  {
+	u8 raw;  /* validated s6 value */
+	s8 bw20; /* sign-extended int */
+	s8 bw40; /* sign-extended int */
+};
+
+/* Power per rate - one value per two rates */
+struct mt7601u_rate_power {
+	struct power_per_rate cck[2];
+	struct power_per_rate ofdm[4];
+	struct power_per_rate ht[4];
+};
+
+struct reg_channel_bounds {
+	u8 start;
+	u8 num;
+};
+
+struct mt7601u_eeprom_params {
+	bool tssi_enabled;
+	u8 rf_freq_off;
+	s8 rssi_offset[2];
+	s8 ref_temp;
+	s8 lna_gain;
+
+	u8 chan_pwr[14];
+	struct mt7601u_rate_power power_rate_table;
+	s8 real_cck_bw20[2];
+
+	/* TSSI stuff - only with internal TX ALC */
+	struct tssi_data {
+		int tx0_delta_offset;
+		u8 slope;
+		u8 offset[3];
+	} tssi_data;
+
+	struct reg_channel_bounds reg;
+};
+
+int mt7601u_eeprom_init(struct mt7601u_dev *dev);
+
+static inline u32 s6_validate(u32 reg)
+{
+	WARN_ON(reg & ~GENMASK(5, 0));
+	return reg & GENMASK(5, 0);
+}
+
+static inline int s6_to_int(u32 reg)
+{
+	int s6;
+
+	s6 = s6_validate(reg);
+	if (s6 & BIT(5))
+		s6 -= BIT(6);
+
+	return s6;
+}
+
+static inline u32 int_to_s6(int val)
+{
+	if (val < -0x20)
+		return 0x20;
+	if (val > 0x1f)
+		return 0x1f;
+
+	return val & 0x3f;
+}
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/init.c b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/init.c
new file mode 100644
index 0000000..cada488
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "mcu.h"
+
+#include "initvals.h"
+
+static void
+mt7601u_set_wlan_state(struct mt7601u_dev *dev, u32 val, bool enable)
+{
+	int i;
+
+	/* Note: we don't turn off WLAN_CLK because that makes the device
+	 *	 not respond properly on the probe path.
+	 *	 In case anyone (PSM?) wants to use this function we can
+	 *	 bring the clock stuff back and fixup the probe path.
+	 */
+
+	if (enable)
+		val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+			MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+	else
+		val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
+
+	mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+
+	if (enable) {
+		set_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
+	} else {
+		clear_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
+		return;
+	}
+
+	for (i = 200; i; i--) {
+		val = mt7601u_rr(dev, MT_CMB_CTRL);
+
+		if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
+			break;
+
+		udelay(20);
+	}
+
+	/* Note: vendor driver tries to disable/enable wlan here and retry
+	 *       but the code which does it is so buggy it must have never
+	 *       triggered, so don't bother.
+	 */
+	if (!i)
+		dev_err(dev->dev, "Error: PLL and XTAL check failed!\n");
+}
+
+static void mt7601u_chip_onoff(struct mt7601u_dev *dev, bool enable, bool reset)
+{
+	u32 val;
+
+	mutex_lock(&dev->hw_atomic_mutex);
+
+	val = mt7601u_rr(dev, MT_WLAN_FUN_CTRL);
+
+	if (reset) {
+		val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
+		val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+		if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+			val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
+				MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+			mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+			udelay(20);
+
+			val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
+				 MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+		}
+	}
+
+	mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+
+	mt7601u_set_wlan_state(dev, val, enable);
+
+	mutex_unlock(&dev->hw_atomic_mutex);
+}
+
+static void mt7601u_reset_csr_bbp(struct mt7601u_dev *dev)
+{
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, (MT_MAC_SYS_CTRL_RESET_CSR |
+					  MT_MAC_SYS_CTRL_RESET_BBP));
+	mt7601u_wr(dev, MT_USB_DMA_CFG, 0);
+	msleep(1);
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+}
+
+static void mt7601u_init_usb_dma(struct mt7601u_dev *dev)
+{
+	u32 val;
+
+	val = FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+	      FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT,
+			 MT_USB_AGGR_SIZE_LIMIT) |
+	      MT_USB_DMA_CFG_RX_BULK_EN |
+	      MT_USB_DMA_CFG_TX_BULK_EN;
+	if (dev->in_max_packet == 512)
+		val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+	mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+
+	val |= MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
+	mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+	val &= ~MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
+	mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+}
+
+static int mt7601u_init_bbp(struct mt7601u_dev *dev)
+{
+	int ret;
+
+	ret = mt7601u_wait_bbp_ready(dev);
+	if (ret)
+		return ret;
+
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_common_vals,
+				      ARRAY_SIZE(bbp_common_vals));
+	if (ret)
+		return ret;
+
+	return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_chip_vals,
+				       ARRAY_SIZE(bbp_chip_vals));
+}
+
+static void
+mt76_init_beacon_offsets(struct mt7601u_dev *dev)
+{
+	u16 base = MT_BEACON_BASE;
+	u32 regs[4] = {};
+	int i;
+
+	for (i = 0; i < 16; i++) {
+		u16 addr = dev->beacon_offsets[i];
+
+		regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+	}
+
+	for (i = 0; i < 4; i++)
+		mt7601u_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static int mt7601u_write_mac_initvals(struct mt7601u_dev *dev)
+{
+	int ret;
+
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, mac_common_vals,
+				      ARRAY_SIZE(mac_common_vals));
+	if (ret)
+		return ret;
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN,
+				      mac_chip_vals, ARRAY_SIZE(mac_chip_vals));
+	if (ret)
+		return ret;
+
+	mt76_init_beacon_offsets(dev);
+
+	mt7601u_wr(dev, MT_AUX_CLK_CFG, 0);
+
+	return 0;
+}
+
+static int mt7601u_init_wcid_mem(struct mt7601u_dev *dev)
+{
+	u32 *vals;
+	int i, ret;
+
+	vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+	if (!vals)
+		return -ENOMEM;
+
+	for (i = 0; i < N_WCIDS; i++)  {
+		vals[i * 2] = 0xffffffff;
+		vals[i * 2 + 1] = 0x00ffffff;
+	}
+
+	ret = mt7601u_burst_write_regs(dev, MT_WCID_ADDR_BASE,
+				       vals, N_WCIDS * 2);
+	kfree(vals);
+
+	return ret;
+}
+
+static int mt7601u_init_key_mem(struct mt7601u_dev *dev)
+{
+	u32 vals[4] = {};
+
+	return mt7601u_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
+					vals, ARRAY_SIZE(vals));
+}
+
+static int mt7601u_init_wcid_attr_mem(struct mt7601u_dev *dev)
+{
+	u32 *vals;
+	int i, ret;
+
+	vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+	if (!vals)
+		return -ENOMEM;
+
+	for (i = 0; i < N_WCIDS * 2; i++)
+		vals[i] = 1;
+
+	ret = mt7601u_burst_write_regs(dev, MT_WCID_ATTR_BASE,
+				       vals, N_WCIDS * 2);
+	kfree(vals);
+
+	return ret;
+}
+
+static void mt7601u_reset_counters(struct mt7601u_dev *dev)
+{
+	mt7601u_rr(dev, MT_RX_STA_CNT0);
+	mt7601u_rr(dev, MT_RX_STA_CNT1);
+	mt7601u_rr(dev, MT_RX_STA_CNT2);
+	mt7601u_rr(dev, MT_TX_STA_CNT0);
+	mt7601u_rr(dev, MT_TX_STA_CNT1);
+	mt7601u_rr(dev, MT_TX_STA_CNT2);
+}
+
+int mt7601u_mac_start(struct mt7601u_dev *dev)
+{
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+
+	if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+		       MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
+		return -ETIMEDOUT;
+
+	dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
+		MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
+		MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
+		MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
+		MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
+		MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
+		MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
+	mt7601u_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL,
+		   MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+
+	if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+		       MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static void mt7601u_mac_stop_hw(struct mt7601u_dev *dev)
+{
+	int i, ok;
+
+	if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+		return;
+
+	mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
+		   MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
+		   MT_BEACON_TIME_CFG_BEACON_TX);
+
+	if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+		dev_warn(dev->dev, "Warning: TX DMA did not stop!\n");
+
+	/* Page count on TxQ */
+	i = 200;
+	while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
+		       (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+		       (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
+		msleep(10);
+
+	if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
+		dev_warn(dev->dev, "Warning: MAC TX did not stop!\n");
+
+	mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
+					 MT_MAC_SYS_CTRL_ENABLE_TX);
+
+	/* Page count on RxQ */
+	ok = 0;
+	i = 200;
+	while (i--) {
+		if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
+		    !mt76_rr(dev, 0x0a30) &&
+		    !mt76_rr(dev, 0x0a34)) {
+			if (ok++ > 5)
+				break;
+			continue;
+		}
+		msleep(1);
+	}
+
+	if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
+		dev_warn(dev->dev, "Warning: MAC RX did not stop!\n");
+
+	if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+		dev_warn(dev->dev, "Warning: RX DMA did not stop!\n");
+}
+
+void mt7601u_mac_stop(struct mt7601u_dev *dev)
+{
+	mt7601u_mac_stop_hw(dev);
+	flush_delayed_work(&dev->stat_work);
+	cancel_delayed_work_sync(&dev->stat_work);
+}
+
+static void mt7601u_stop_hardware(struct mt7601u_dev *dev)
+{
+	mt7601u_chip_onoff(dev, false, false);
+}
+
+int mt7601u_init_hardware(struct mt7601u_dev *dev)
+{
+	static const u16 beacon_offsets[16] = {
+		/* 512 byte per beacon */
+		0xc000,	0xc200,	0xc400,	0xc600,
+		0xc800,	0xca00,	0xcc00,	0xce00,
+		0xd000,	0xd200,	0xd400,	0xd600,
+		0xd800,	0xda00,	0xdc00,	0xde00
+	};
+	int ret;
+
+	dev->beacon_offsets = beacon_offsets;
+
+	mt7601u_chip_onoff(dev, true, false);
+
+	ret = mt7601u_wait_asic_ready(dev);
+	if (ret)
+		goto err;
+	ret = mt7601u_mcu_init(dev);
+	if (ret)
+		goto err;
+
+	if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+			    MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+			    MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
+		ret = -EIO;
+		goto err;
+	}
+
+	/* Wait for ASIC ready after FW load. */
+	ret = mt7601u_wait_asic_ready(dev);
+	if (ret)
+		goto err;
+
+	mt7601u_reset_csr_bbp(dev);
+	mt7601u_init_usb_dma(dev);
+
+	ret = mt7601u_mcu_cmd_init(dev);
+	if (ret)
+		goto err;
+	ret = mt7601u_dma_init(dev);
+	if (ret)
+		goto err_mcu;
+	ret = mt7601u_write_mac_initvals(dev);
+	if (ret)
+		goto err_rx;
+
+	if (!mt76_poll_msec(dev, MT_MAC_STATUS,
+			    MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 100)) {
+		ret = -EIO;
+		goto err_rx;
+	}
+
+	ret = mt7601u_init_bbp(dev);
+	if (ret)
+		goto err_rx;
+	ret = mt7601u_init_wcid_mem(dev);
+	if (ret)
+		goto err_rx;
+	ret = mt7601u_init_key_mem(dev);
+	if (ret)
+		goto err_rx;
+	ret = mt7601u_init_wcid_attr_mem(dev);
+	if (ret)
+		goto err_rx;
+
+	mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+					     MT_BEACON_TIME_CFG_SYNC_MODE |
+					     MT_BEACON_TIME_CFG_TBTT_EN |
+					     MT_BEACON_TIME_CFG_BEACON_TX));
+
+	mt7601u_reset_counters(dev);
+
+	mt7601u_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+
+	mt7601u_wr(dev, MT_TXOP_CTRL_CFG,
+		   FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+		   FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+	ret = mt7601u_eeprom_init(dev);
+	if (ret)
+		goto err_rx;
+
+	ret = mt7601u_phy_init(dev);
+	if (ret)
+		goto err_rx;
+
+	mt7601u_set_rx_path(dev, 0);
+	mt7601u_set_tx_dac(dev, 0);
+
+	mt7601u_mac_set_ctrlch(dev, false);
+	mt7601u_bbp_set_ctrlch(dev, false);
+	mt7601u_bbp_set_bw(dev, MT_BW_20);
+
+	return 0;
+
+err_rx:
+	mt7601u_dma_cleanup(dev);
+err_mcu:
+	mt7601u_mcu_cmd_deinit(dev);
+err:
+	mt7601u_chip_onoff(dev, false, false);
+	return ret;
+}
+
+void mt7601u_cleanup(struct mt7601u_dev *dev)
+{
+	if (!test_and_clear_bit(MT7601U_STATE_INITIALIZED, &dev->state))
+		return;
+
+	mt7601u_stop_hardware(dev);
+	mt7601u_dma_cleanup(dev);
+	mt7601u_mcu_cmd_deinit(dev);
+}
+
+struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
+{
+	struct ieee80211_hw *hw;
+	struct mt7601u_dev *dev;
+
+	hw = ieee80211_alloc_hw(sizeof(*dev), &mt7601u_ops);
+	if (!hw)
+		return NULL;
+
+	dev = hw->priv;
+	dev->dev = pdev;
+	dev->hw = hw;
+	mutex_init(&dev->vendor_req_mutex);
+	mutex_init(&dev->reg_atomic_mutex);
+	mutex_init(&dev->hw_atomic_mutex);
+	mutex_init(&dev->mutex);
+	spin_lock_init(&dev->tx_lock);
+	spin_lock_init(&dev->rx_lock);
+	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->mac_lock);
+	spin_lock_init(&dev->con_mon_lock);
+	atomic_set(&dev->avg_ampdu_len, 1);
+	skb_queue_head_init(&dev->tx_skb_done);
+
+	dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0);
+	if (!dev->stat_wq) {
+		ieee80211_free_hw(hw);
+		return NULL;
+	}
+
+	return dev;
+}
+
+#define CHAN2G(_idx, _freq) {			\
+	.band = NL80211_BAND_2GHZ,		\
+	.center_freq = (_freq),			\
+	.hw_value = (_idx),			\
+	.max_power = 30,			\
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+	CHAN2G(1, 2412),
+	CHAN2G(2, 2417),
+	CHAN2G(3, 2422),
+	CHAN2G(4, 2427),
+	CHAN2G(5, 2432),
+	CHAN2G(6, 2437),
+	CHAN2G(7, 2442),
+	CHAN2G(8, 2447),
+	CHAN2G(9, 2452),
+	CHAN2G(10, 2457),
+	CHAN2G(11, 2462),
+	CHAN2G(12, 2467),
+	CHAN2G(13, 2472),
+	CHAN2G(14, 2484),
+};
+
+#define CCK_RATE(_idx, _rate) {					\
+	.bitrate = _rate,					\
+	.flags = IEEE80211_RATE_SHORT_PREAMBLE,			\
+	.hw_value = (MT_PHY_TYPE_CCK << 8) | _idx,		\
+	.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx),	\
+}
+
+#define OFDM_RATE(_idx, _rate) {				\
+	.bitrate = _rate,					\
+	.hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx,		\
+	.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx,	\
+}
+
+static struct ieee80211_rate mt76_rates[] = {
+	CCK_RATE(0, 10),
+	CCK_RATE(1, 20),
+	CCK_RATE(2, 55),
+	CCK_RATE(3, 110),
+	OFDM_RATE(0, 60),
+	OFDM_RATE(1, 90),
+	OFDM_RATE(2, 120),
+	OFDM_RATE(3, 180),
+	OFDM_RATE(4, 240),
+	OFDM_RATE(5, 360),
+	OFDM_RATE(6, 480),
+	OFDM_RATE(7, 540),
+};
+
+static int
+mt76_init_sband(struct mt7601u_dev *dev, struct ieee80211_supported_band *sband,
+		const struct ieee80211_channel *chan, int n_chan,
+		struct ieee80211_rate *rates, int n_rates)
+{
+	struct ieee80211_sta_ht_cap *ht_cap;
+	void *chanlist;
+	int size;
+
+	size = n_chan * sizeof(*chan);
+	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
+	if (!chanlist)
+		return -ENOMEM;
+
+	sband->channels = chanlist;
+	sband->n_channels = n_chan;
+	sband->bitrates = rates;
+	sband->n_bitrates = n_rates;
+
+	ht_cap = &sband->ht_cap;
+	ht_cap->ht_supported = true;
+	ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+		      IEEE80211_HT_CAP_GRN_FLD |
+		      IEEE80211_HT_CAP_SGI_20 |
+		      IEEE80211_HT_CAP_SGI_40 |
+		      (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+	ht_cap->mcs.rx_mask[0] = 0xff;
+	ht_cap->mcs.rx_mask[4] = 0x1;
+	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+	ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
+
+	dev->chandef.chan = &sband->channels[0];
+
+	return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt7601u_dev *dev)
+{
+	dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
+				     GFP_KERNEL);
+	if (!dev->sband_2g)
+		return -ENOMEM;
+
+	dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = dev->sband_2g;
+
+	WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
+		ARRAY_SIZE(mt76_channels_2ghz));
+
+	return mt76_init_sband(dev, dev->sband_2g,
+			       &mt76_channels_2ghz[dev->ee->reg.start - 1],
+			       dev->ee->reg.num,
+			       mt76_rates, ARRAY_SIZE(mt76_rates));
+}
+
+int mt7601u_register_device(struct mt7601u_dev *dev)
+{
+	struct ieee80211_hw *hw = dev->hw;
+	struct wiphy *wiphy = hw->wiphy;
+	int ret;
+
+	/* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
+	 * entry no. 1 like it does in the vendor driver.
+	 */
+	dev->wcid_mask[0] |= 1;
+
+	/* init fake wcid for monitor interfaces */
+	dev->mon_wcid = devm_kmalloc(dev->dev, sizeof(*dev->mon_wcid),
+				     GFP_KERNEL);
+	if (!dev->mon_wcid)
+		return -ENOMEM;
+	dev->mon_wcid->idx = 0xff;
+	dev->mon_wcid->hw_key_idx = -1;
+
+	SET_IEEE80211_DEV(hw, dev->dev);
+
+	hw->queues = 4;
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+	ieee80211_hw_set(hw, MFP_CAPABLE);
+	hw->max_rates = 1;
+	hw->max_report_rates = 7;
+	hw->max_rate_tries = 1;
+
+	hw->sta_data_size = sizeof(struct mt76_sta);
+	hw->vif_data_size = sizeof(struct mt76_vif);
+
+	SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
+	ret = mt76_init_sband_2g(dev);
+	if (ret)
+		return ret;
+
+	INIT_DELAYED_WORK(&dev->mac_work, mt7601u_mac_work);
+	INIT_DELAYED_WORK(&dev->stat_work, mt7601u_tx_stat);
+
+	ret = ieee80211_register_hw(hw);
+	if (ret)
+		return ret;
+
+	mt7601u_init_debugfs(dev);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/initvals.h b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/initvals.h
new file mode 100644
index 0000000..59acbf5
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/initvals.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT7601U_INITVALS_H
+#define __MT7601U_INITVALS_H
+
+static const struct mt76_reg_pair bbp_common_vals[] = {
+	{  65,	0x2c },
+	{  66,	0x38 },
+	{  68,	0x0b },
+	{  69,	0x12 },
+	{  70,	0x0a },
+	{  73,	0x10 },
+	{  81,	0x37 },
+	{  82,	0x62 },
+	{  83,	0x6a },
+	{  84,	0x99 },
+	{  86,	0x00 },
+	{  91,	0x04 },
+	{  92,	0x00 },
+	{ 103,	0x00 },
+	{ 105,	0x05 },
+	{ 106,	0x35 },
+};
+
+static const struct mt76_reg_pair bbp_chip_vals[] = {
+	{   1, 0x04 },	{   4, 0x40 },	{  20, 0x06 },	{  31, 0x08 },
+	/* CCK Tx Control */
+	{ 178, 0xff },
+	/* AGC/Sync controls */
+	{  66, 0x14 },	{  68, 0x8b },	{  69, 0x12 },	{  70, 0x09 },
+	{  73, 0x11 },	{  75, 0x60 },	{  76, 0x44 },	{  84, 0x9a },
+	{  86, 0x38 },	{  91, 0x07 },	{  92, 0x02 },
+	/* Rx Path Controls */
+	{  99, 0x50 },	{ 101, 0x00 },	{ 103, 0xc0 },	{ 104, 0x92 },
+	{ 105, 0x3c },	{ 106, 0x03 },	{ 128, 0x12 },
+	/* Change RXWI content: Gain Report */
+	{ 142, 0x04 },	{ 143, 0x37 },
+	/* Change RXWI content: Antenna Report */
+	{ 142, 0x03 },	{ 143, 0x99 },
+	/* Calibration Index Register */
+	/* CCK Receiver Control */
+	{ 160, 0xeb },	{ 161, 0xc4 },	{ 162, 0x77 },	{ 163, 0xf9 },
+	{ 164, 0x88 },	{ 165, 0x80 },	{ 166, 0xff },	{ 167, 0xe4 },
+	/* Added AGC controls - these AGC/GLRT registers are accessed
+	 * through R195 and R196.
+	 */
+	{ 195, 0x00 },	{ 196, 0x00 },
+	{ 195, 0x01 },	{ 196, 0x04 },
+	{ 195, 0x02 },	{ 196, 0x20 },
+	{ 195, 0x03 },	{ 196, 0x0a },
+	{ 195, 0x06 },	{ 196, 0x16 },
+	{ 195, 0x07 },	{ 196, 0x05 },
+	{ 195, 0x08 },	{ 196, 0x37 },
+	{ 195, 0x0a },	{ 196, 0x15 },
+	{ 195, 0x0b },	{ 196, 0x17 },
+	{ 195, 0x0c },	{ 196, 0x06 },
+	{ 195, 0x0d },	{ 196, 0x09 },
+	{ 195, 0x0e },	{ 196, 0x05 },
+	{ 195, 0x0f },	{ 196, 0x09 },
+	{ 195, 0x10 },	{ 196, 0x20 },
+	{ 195, 0x20 },	{ 196, 0x17 },
+	{ 195, 0x21 },	{ 196, 0x06 },
+	{ 195, 0x22 },	{ 196, 0x09 },
+	{ 195, 0x23 },	{ 196, 0x17 },
+	{ 195, 0x24 },	{ 196, 0x06 },
+	{ 195, 0x25 },	{ 196, 0x09 },
+	{ 195, 0x26 },	{ 196, 0x17 },
+	{ 195, 0x27 },	{ 196, 0x06 },
+	{ 195, 0x28 },	{ 196, 0x09 },
+	{ 195, 0x29 },	{ 196, 0x05 },
+	{ 195, 0x2a },	{ 196, 0x09 },
+	{ 195, 0x80 },	{ 196, 0x8b },
+	{ 195, 0x81 },	{ 196, 0x12 },
+	{ 195, 0x82 },	{ 196, 0x09 },
+	{ 195, 0x83 },	{ 196, 0x17 },
+	{ 195, 0x84 },	{ 196, 0x11 },
+	{ 195, 0x85 },	{ 196, 0x00 },
+	{ 195, 0x86 },	{ 196, 0x00 },
+	{ 195, 0x87 },	{ 196, 0x18 },
+	{ 195, 0x88 },	{ 196, 0x60 },
+	{ 195, 0x89 },	{ 196, 0x44 },
+	{ 195, 0x8a },	{ 196, 0x8b },
+	{ 195, 0x8b },	{ 196, 0x8b },
+	{ 195, 0x8c },	{ 196, 0x8b },
+	{ 195, 0x8d },	{ 196, 0x8b },
+	{ 195, 0x8e },	{ 196, 0x09 },
+	{ 195, 0x8f },	{ 196, 0x09 },
+	{ 195, 0x90 },	{ 196, 0x09 },
+	{ 195, 0x91 },	{ 196, 0x09 },
+	{ 195, 0x92 },	{ 196, 0x11 },
+	{ 195, 0x93 },	{ 196, 0x11 },
+	{ 195, 0x94 },	{ 196, 0x11 },
+	{ 195, 0x95 },	{ 196, 0x11 },
+	/* PPAD */
+	{  47, 0x80 },	{  60, 0x80 },	{ 150, 0xd2 },	{ 151, 0x32 },
+	{ 152, 0x23 },	{ 153, 0x41 },	{ 154, 0x00 },	{ 155, 0x4f },
+	{ 253, 0x7e },	{ 195, 0x30 },	{ 196, 0x32 },	{ 195, 0x31 },
+	{ 196, 0x23 },	{ 195, 0x32 },	{ 196, 0x45 },	{ 195, 0x35 },
+	{ 196, 0x4a },	{ 195, 0x36 },	{ 196, 0x5a },	{ 195, 0x37 },
+	{ 196, 0x5a },
+};
+
+static const struct mt76_reg_pair mac_common_vals[] = {
+	{ MT_LEGACY_BASIC_RATE,		0x0000013f },
+	{ MT_HT_BASIC_RATE,		0x00008003 },
+	{ MT_MAC_SYS_CTRL,		0x00000000 },
+	{ MT_RX_FILTR_CFG,		0x00017f97 },
+	{ MT_BKOFF_SLOT_CFG,		0x00000209 },
+	{ MT_TX_SW_CFG0,		0x00000000 },
+	{ MT_TX_SW_CFG1,		0x00080606 },
+	{ MT_TX_LINK_CFG,		0x00001020 },
+	{ MT_TX_TIMEOUT_CFG,		0x000a2090 },
+	{ MT_MAX_LEN_CFG,		0x00003fff },
+	{ MT_PBF_TX_MAX_PCNT,		0x1fbf1f1f },
+	{ MT_PBF_RX_MAX_PCNT,		0x0000009f },
+	{ MT_TX_RETRY_CFG,		0x47d01f0f },
+	{ MT_AUTO_RSP_CFG,		0x00000013 },
+	{ MT_CCK_PROT_CFG,		0x05740003 },
+	{ MT_OFDM_PROT_CFG,		0x05740003 },
+	{ MT_MM40_PROT_CFG,		0x03f44084 },
+	{ MT_GF20_PROT_CFG,		0x01744004 },
+	{ MT_GF40_PROT_CFG,		0x03f44084 },
+	{ MT_MM20_PROT_CFG,		0x01744004 },
+	{ MT_TXOP_CTRL_CFG,		0x0000583f },
+	{ MT_TX_RTS_CFG,		0x01092b20 },
+	{ MT_EXP_ACK_TIME,		0x002400ca },
+	{ MT_TXOP_HLDR_ET,		0x00000002 },
+	{ MT_XIFS_TIME_CFG,		0x33a41010 },
+	{ MT_PWR_PIN_CFG,		0x00000000 },
+	{ MT_PN_PAD_MODE,		0x00000001 },
+};
+
+static const struct mt76_reg_pair mac_chip_vals[] = {
+	{ MT_TSO_CTRL,			0x00006050 },
+	{ MT_BCN_OFFSET(0),		0x18100800 },
+	{ MT_BCN_OFFSET(1),		0x38302820 },
+	{ MT_PBF_SYS_CTRL,		0x00080c00 },
+	{ MT_PBF_CFG,			0x7f723c1f },
+	{ MT_FCE_PSE_CTRL,		0x00000001 },
+	{ MT_PAUSE_ENABLE_CONTROL1,	0x00000000 },
+	{ MT_TX0_RF_GAIN_CORR,		0x003b0005 },
+	{ MT_TX0_RF_GAIN_ATTEN,		0x00006900 },
+	{ MT_TX0_BB_GAIN_ATTEN,		0x00000400 },
+	{ MT_TX_ALC_VGA3,		0x00060006 },
+	{ MT_TX_SW_CFG0,		0x00000402 },
+	{ MT_TX_SW_CFG1,		0x00000000 },
+	{ MT_TX_SW_CFG2,		0x00000000 },
+	{ MT_HEADER_TRANS_CTRL_REG,	0x00000000 },
+	{ MT_FCE_CSO,			0x0000030f },
+	{ MT_FCE_PARAMETERS,		0x00256f0f },
+};
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h
new file mode 100644
index 0000000..55cfedb
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT7601U_PHY_INITVALS_H
+#define __MT7601U_PHY_INITVALS_H
+
+#define RF_REG_PAIR(bank, reg, value)				\
+	{ MT_MCU_MEMMAP_RF | (bank) << 16 | (reg), value }
+
+static const struct mt76_reg_pair rf_central[] = {
+	/* Bank 0 - for central blocks: BG, PLL, XTAL, LO, ADC/DAC */
+	RF_REG_PAIR(0,	 0, 0x02),
+	RF_REG_PAIR(0,	 1, 0x01),
+	RF_REG_PAIR(0,	 2, 0x11),
+	RF_REG_PAIR(0,	 3, 0xff),
+	RF_REG_PAIR(0,	 4, 0x0a),
+	RF_REG_PAIR(0,	 5, 0x20),
+	RF_REG_PAIR(0,	 6, 0x00),
+	/* B/G */
+	RF_REG_PAIR(0,	 7, 0x00),
+	RF_REG_PAIR(0,	 8, 0x00),
+	RF_REG_PAIR(0,	 9, 0x00),
+	RF_REG_PAIR(0,	10, 0x00),
+	RF_REG_PAIR(0,	11, 0x21),
+	/* XO */
+	RF_REG_PAIR(0,	13, 0x00),		/* 40mhz xtal */
+	/* RF_REG_PAIR(0,	13, 0x13), */	/* 20mhz xtal */
+	RF_REG_PAIR(0,	14, 0x7c),
+	RF_REG_PAIR(0,	15, 0x22),
+	RF_REG_PAIR(0,	16, 0x80),
+	/* PLL */
+	RF_REG_PAIR(0,	17, 0x99),
+	RF_REG_PAIR(0,	18, 0x99),
+	RF_REG_PAIR(0,	19, 0x09),
+	RF_REG_PAIR(0,	20, 0x50),
+	RF_REG_PAIR(0,	21, 0xb0),
+	RF_REG_PAIR(0,	22, 0x00),
+	RF_REG_PAIR(0,	23, 0xc5),
+	RF_REG_PAIR(0,	24, 0xfc),
+	RF_REG_PAIR(0,	25, 0x40),
+	RF_REG_PAIR(0,	26, 0x4d),
+	RF_REG_PAIR(0,	27, 0x02),
+	RF_REG_PAIR(0,	28, 0x72),
+	RF_REG_PAIR(0,	29, 0x01),
+	RF_REG_PAIR(0,	30, 0x00),
+	RF_REG_PAIR(0,	31, 0x00),
+	/* test ports */
+	RF_REG_PAIR(0,	32, 0x00),
+	RF_REG_PAIR(0,	33, 0x00),
+	RF_REG_PAIR(0,	34, 0x23),
+	RF_REG_PAIR(0,	35, 0x01), /* change setting to reduce spurs */
+	RF_REG_PAIR(0,	36, 0x00),
+	RF_REG_PAIR(0,	37, 0x00),
+	/* ADC/DAC */
+	RF_REG_PAIR(0,	38, 0x00),
+	RF_REG_PAIR(0,	39, 0x20),
+	RF_REG_PAIR(0,	40, 0x00),
+	RF_REG_PAIR(0,	41, 0xd0),
+	RF_REG_PAIR(0,	42, 0x1b),
+	RF_REG_PAIR(0,	43, 0x02),
+	RF_REG_PAIR(0,	44, 0x00),
+};
+
+static const struct mt76_reg_pair rf_channel[] = {
+	RF_REG_PAIR(4,	 0, 0x01),
+	RF_REG_PAIR(4,	 1, 0x00),
+	RF_REG_PAIR(4,	 2, 0x00),
+	RF_REG_PAIR(4,	 3, 0x00),
+	/* LDO */
+	RF_REG_PAIR(4,	 4, 0x00),
+	RF_REG_PAIR(4,	 5, 0x08),
+	RF_REG_PAIR(4,	 6, 0x00),
+	/* RX */
+	RF_REG_PAIR(4,	 7, 0x5b),
+	RF_REG_PAIR(4,	 8, 0x52),
+	RF_REG_PAIR(4,	 9, 0xb6),
+	RF_REG_PAIR(4,	10, 0x57),
+	RF_REG_PAIR(4,	11, 0x33),
+	RF_REG_PAIR(4,	12, 0x22),
+	RF_REG_PAIR(4,	13, 0x3d),
+	RF_REG_PAIR(4,	14, 0x3e),
+	RF_REG_PAIR(4,	15, 0x13),
+	RF_REG_PAIR(4,	16, 0x22),
+	RF_REG_PAIR(4,	17, 0x23),
+	RF_REG_PAIR(4,	18, 0x02),
+	RF_REG_PAIR(4,	19, 0xa4),
+	RF_REG_PAIR(4,	20, 0x01),
+	RF_REG_PAIR(4,	21, 0x12),
+	RF_REG_PAIR(4,	22, 0x80),
+	RF_REG_PAIR(4,	23, 0xb3),
+	RF_REG_PAIR(4,	24, 0x00), /* reserved */
+	RF_REG_PAIR(4,	25, 0x00), /* reserved */
+	RF_REG_PAIR(4,	26, 0x00), /* reserved */
+	RF_REG_PAIR(4,	27, 0x00), /* reserved */
+	/* LOGEN */
+	RF_REG_PAIR(4,	28, 0x18),
+	RF_REG_PAIR(4,	29, 0xee),
+	RF_REG_PAIR(4,	30, 0x6b),
+	RF_REG_PAIR(4,	31, 0x31),
+	RF_REG_PAIR(4,	32, 0x5d),
+	RF_REG_PAIR(4,	33, 0x00), /* reserved */
+	/* TX */
+	RF_REG_PAIR(4,	34, 0x96),
+	RF_REG_PAIR(4,	35, 0x55),
+	RF_REG_PAIR(4,	36, 0x08),
+	RF_REG_PAIR(4,	37, 0xbb),
+	RF_REG_PAIR(4,	38, 0xb3),
+	RF_REG_PAIR(4,	39, 0xb3),
+	RF_REG_PAIR(4,	40, 0x03),
+	RF_REG_PAIR(4,	41, 0x00), /* reserved */
+	RF_REG_PAIR(4,	42, 0x00), /* reserved */
+	RF_REG_PAIR(4,	43, 0xc5),
+	RF_REG_PAIR(4,	44, 0xc5),
+	RF_REG_PAIR(4,	45, 0xc5),
+	RF_REG_PAIR(4,	46, 0x07),
+	RF_REG_PAIR(4,	47, 0xa8),
+	RF_REG_PAIR(4,	48, 0xef),
+	RF_REG_PAIR(4,	49, 0x1a),
+	/* PA */
+	RF_REG_PAIR(4,	54, 0x07),
+	RF_REG_PAIR(4,	55, 0xa7),
+	RF_REG_PAIR(4,	56, 0xcc),
+	RF_REG_PAIR(4,	57, 0x14),
+	RF_REG_PAIR(4,	58, 0x07),
+	RF_REG_PAIR(4,	59, 0xa8),
+	RF_REG_PAIR(4,	60, 0xd7),
+	RF_REG_PAIR(4,	61, 0x10),
+	RF_REG_PAIR(4,	62, 0x1c),
+	RF_REG_PAIR(4,	63, 0x00), /* reserved */
+};
+
+static const struct mt76_reg_pair rf_vga[] = {
+	RF_REG_PAIR(5,	 0, 0x47),
+	RF_REG_PAIR(5,	 1, 0x00),
+	RF_REG_PAIR(5,	 2, 0x00),
+	RF_REG_PAIR(5,	 3, 0x08),
+	RF_REG_PAIR(5,	 4, 0x04),
+	RF_REG_PAIR(5,	 5, 0x20),
+	RF_REG_PAIR(5,	 6, 0x3a),
+	RF_REG_PAIR(5,	 7, 0x3a),
+	RF_REG_PAIR(5,	 8, 0x00),
+	RF_REG_PAIR(5,	 9, 0x00),
+	RF_REG_PAIR(5,	10, 0x10),
+	RF_REG_PAIR(5,	11, 0x10),
+	RF_REG_PAIR(5,	12, 0x10),
+	RF_REG_PAIR(5,	13, 0x10),
+	RF_REG_PAIR(5,	14, 0x10),
+	RF_REG_PAIR(5,	15, 0x20),
+	RF_REG_PAIR(5,	16, 0x22),
+	RF_REG_PAIR(5,	17, 0x7c),
+	RF_REG_PAIR(5,	18, 0x00),
+	RF_REG_PAIR(5,	19, 0x00),
+	RF_REG_PAIR(5,	20, 0x00),
+	RF_REG_PAIR(5,	21, 0xf1),
+	RF_REG_PAIR(5,	22, 0x11),
+	RF_REG_PAIR(5,	23, 0x02),
+	RF_REG_PAIR(5,	24, 0x41),
+	RF_REG_PAIR(5,	25, 0x20),
+	RF_REG_PAIR(5,	26, 0x00),
+	RF_REG_PAIR(5,	27, 0xd7),
+	RF_REG_PAIR(5,	28, 0xa2),
+	RF_REG_PAIR(5,	29, 0x20),
+	RF_REG_PAIR(5,	30, 0x49),
+	RF_REG_PAIR(5,	31, 0x20),
+	RF_REG_PAIR(5,	32, 0x04),
+	RF_REG_PAIR(5,	33, 0xf1),
+	RF_REG_PAIR(5,	34, 0xa1),
+	RF_REG_PAIR(5,	35, 0x01),
+	RF_REG_PAIR(5,	41, 0x00),
+	RF_REG_PAIR(5,	42, 0x00),
+	RF_REG_PAIR(5,	43, 0x00),
+	RF_REG_PAIR(5,	44, 0x00),
+	RF_REG_PAIR(5,	45, 0x00),
+	RF_REG_PAIR(5,	46, 0x00),
+	RF_REG_PAIR(5,	47, 0x00),
+	RF_REG_PAIR(5,	48, 0x00),
+	RF_REG_PAIR(5,	49, 0x00),
+	RF_REG_PAIR(5,	50, 0x00),
+	RF_REG_PAIR(5,	51, 0x00),
+	RF_REG_PAIR(5,	52, 0x00),
+	RF_REG_PAIR(5,	53, 0x00),
+	RF_REG_PAIR(5,	54, 0x00),
+	RF_REG_PAIR(5,	55, 0x00),
+	RF_REG_PAIR(5,	56, 0x00),
+	RF_REG_PAIR(5,	57, 0x00),
+	RF_REG_PAIR(5,	58, 0x31),
+	RF_REG_PAIR(5,	59, 0x31),
+	RF_REG_PAIR(5,	60, 0x0a),
+	RF_REG_PAIR(5,	61, 0x02),
+	RF_REG_PAIR(5,	62, 0x00),
+	RF_REG_PAIR(5,	63, 0x00),
+};
+
+/* TODO: BBP178 is set to 0xff for "CCK CH14 OBW" which overrides the settings
+ *	 from channel switching. Seems stupid at best.
+ */
+static const struct mt76_reg_pair bbp_high_temp[] = {
+	{  75, 0x60 },
+	{  92, 0x02 },
+	{ 178, 0xff }, /* For CCK CH14 OBW */
+	{ 195, 0x88 }, { 196, 0x60 },
+}, bbp_high_temp_bw20[] = {
+	{  69, 0x12 },
+	{  91, 0x07 },
+	{ 195, 0x23 }, { 196, 0x17 },
+	{ 195, 0x24 }, { 196, 0x06 },
+	{ 195, 0x81 }, { 196, 0x12 },
+	{ 195, 0x83 }, { 196, 0x17 },
+}, bbp_high_temp_bw40[] = {
+	{  69, 0x15 },
+	{  91, 0x04 },
+	{ 195, 0x23 }, { 196, 0x12 },
+	{ 195, 0x24 }, { 196, 0x08 },
+	{ 195, 0x81 }, { 196, 0x15 },
+	{ 195, 0x83 }, { 196, 0x16 },
+}, bbp_low_temp[] = {
+	{ 178, 0xff }, /* For CCK CH14 OBW */
+}, bbp_low_temp_bw20[] = {
+	{  69, 0x12 },
+	{  75, 0x5e },
+	{  91, 0x07 },
+	{  92, 0x02 },
+	{ 195, 0x23 }, { 196, 0x17 },
+	{ 195, 0x24 }, { 196, 0x06 },
+	{ 195, 0x81 }, { 196, 0x12 },
+	{ 195, 0x83 }, { 196, 0x17 },
+	{ 195, 0x88 }, { 196, 0x5e },
+}, bbp_low_temp_bw40[] = {
+	{  69, 0x15 },
+	{  75, 0x5c },
+	{  91, 0x04 },
+	{  92, 0x03 },
+	{ 195, 0x23 }, { 196, 0x10 },
+	{ 195, 0x24 }, { 196, 0x08 },
+	{ 195, 0x81 }, { 196, 0x15 },
+	{ 195, 0x83 }, { 196, 0x16 },
+	{ 195, 0x88 }, { 196, 0x5b },
+}, bbp_normal_temp[] = {
+	{  75, 0x60 },
+	{  92, 0x02 },
+	{ 178, 0xff }, /* For CCK CH14 OBW */
+	{ 195, 0x88 }, { 196, 0x60 },
+}, bbp_normal_temp_bw20[] = {
+	{  69, 0x12 },
+	{  91, 0x07 },
+	{ 195, 0x23 }, { 196, 0x17 },
+	{ 195, 0x24 }, { 196, 0x06 },
+	{ 195, 0x81 }, { 196, 0x12 },
+	{ 195, 0x83 }, { 196, 0x17 },
+}, bbp_normal_temp_bw40[] = {
+	{  69, 0x15 },
+	{  91, 0x04 },
+	{ 195, 0x23 }, { 196, 0x12 },
+	{ 195, 0x24 }, { 196, 0x08 },
+	{ 195, 0x81 }, { 196, 0x15 },
+	{ 195, 0x83 }, { 196, 0x16 },
+};
+
+#define BBP_TABLE(arr) { arr, ARRAY_SIZE(arr), }
+
+static const struct reg_table {
+	const struct mt76_reg_pair *regs;
+	size_t n;
+} bbp_mode_table[3][3] = {
+	{
+		BBP_TABLE(bbp_normal_temp_bw20),
+		BBP_TABLE(bbp_normal_temp_bw40),
+		BBP_TABLE(bbp_normal_temp),
+	}, {
+		BBP_TABLE(bbp_high_temp_bw20),
+		BBP_TABLE(bbp_high_temp_bw40),
+		BBP_TABLE(bbp_high_temp),
+	}, {
+		BBP_TABLE(bbp_low_temp_bw20),
+		BBP_TABLE(bbp_low_temp_bw40),
+		BBP_TABLE(bbp_low_temp),
+	}
+};
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mac.c b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mac.c
new file mode 100644
index 0000000..cad5e81
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+#include "trace.h"
+#include <linux/etherdevice.h>
+
+void mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *addr)
+{
+	ether_addr_copy(dev->macaddr, addr);
+
+	if (!is_valid_ether_addr(dev->macaddr)) {
+		eth_random_addr(dev->macaddr);
+		dev_info(dev->dev,
+			 "Invalid MAC address, using random address %pM\n",
+			 dev->macaddr);
+	}
+
+	mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
+	mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
+		FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+}
+
+static void
+mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
+{
+	u8 idx = FIELD_GET(MT_TXWI_RATE_MCS, rate);
+
+	txrate->idx = 0;
+	txrate->flags = 0;
+	txrate->count = 1;
+
+	switch (FIELD_GET(MT_TXWI_RATE_PHY_MODE, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		txrate->idx = idx + 4;
+		return;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8)
+			idx -= 8;
+
+		txrate->idx = idx;
+		return;
+	case MT_PHY_TYPE_HT_GF:
+		txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		txrate->flags |= IEEE80211_TX_RC_MCS;
+		txrate->idx = idx;
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+	if (FIELD_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40)
+		txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+	if (rate & MT_TXWI_RATE_SGI)
+		txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+}
+
+static void
+mt76_mac_fill_tx_status(struct mt7601u_dev *dev, struct ieee80211_tx_info *info,
+			struct mt76_tx_status *st)
+{
+	struct ieee80211_tx_rate *rate = info->status.rates;
+	int cur_idx, last_rate;
+	int i;
+
+	last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+	mt76_mac_process_tx_rate(&rate[last_rate], st->rate);
+	if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+		rate[last_rate + 1].idx = -1;
+
+	cur_idx = rate[last_rate].idx + st->retry;
+	for (i = 0; i <= last_rate; i++) {
+		rate[i].flags = rate[last_rate].flags;
+		rate[i].idx = max_t(int, 0, cur_idx - i);
+		rate[i].count = 1;
+	}
+
+	if (last_rate > 0)
+		rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+	info->status.ampdu_len = 1;
+	info->status.ampdu_ack_len = st->success;
+
+	if (st->is_probe)
+		info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+	if (st->aggr)
+		info->flags |= IEEE80211_TX_CTL_AMPDU |
+			       IEEE80211_TX_STAT_AMPDU;
+
+	if (!st->ack_req)
+		info->flags |= IEEE80211_TX_CTL_NO_ACK;
+	else if (st->success)
+		info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
+			 const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+	u16 rateval;
+	u8 phy, rate_idx;
+	u8 nss = 1;
+	u8 bw = 0;
+
+	if (rate->flags & IEEE80211_TX_RC_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 3);
+		phy = MT_PHY_TYPE_HT;
+		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+			phy = MT_PHY_TYPE_HT_GF;
+		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			bw = 1;
+	} else {
+		const struct ieee80211_rate *r;
+		int band = dev->chandef.chan->band;
+		u16 val;
+
+		r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx];
+		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+			val = r->hw_value_short;
+		else
+			val = r->hw_value;
+
+		phy = val >> 8;
+		rate_idx = val & 0xff;
+		bw = 0;
+	}
+
+	rateval = FIELD_PREP(MT_RXWI_RATE_MCS, rate_idx);
+	rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+	rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+	if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+		rateval |= MT_RXWI_RATE_SGI;
+
+	*nss_val = nss;
+	return rateval;
+}
+
+void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
+			    const struct ieee80211_tx_rate *rate)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	wcid->tx_rate = mt76_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+	wcid->tx_rate_set = true;
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev)
+{
+	struct mt76_tx_status stat = {};
+	u32 val;
+
+	val = mt7601u_rr(dev, MT_TX_STAT_FIFO);
+	stat.valid = !!(val & MT_TX_STAT_FIFO_VALID);
+	stat.success = !!(val & MT_TX_STAT_FIFO_SUCCESS);
+	stat.aggr = !!(val & MT_TX_STAT_FIFO_AGGR);
+	stat.ack_req = !!(val & MT_TX_STAT_FIFO_ACKREQ);
+	stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_PID_TYPE, val);
+	stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, val);
+	stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, val);
+
+	return stat;
+}
+
+void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
+{
+	struct ieee80211_tx_info info = {};
+	struct ieee80211_sta *sta = NULL;
+	struct mt76_wcid *wcid = NULL;
+	void *msta;
+
+	rcu_read_lock();
+	if (stat->wcid < ARRAY_SIZE(dev->wcid))
+		wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+	if (wcid) {
+		msta = container_of(wcid, struct mt76_sta, wcid);
+		sta = container_of(msta, struct ieee80211_sta,
+				   drv_priv);
+	}
+
+	mt76_mac_fill_tx_status(dev, &info, stat);
+
+	spin_lock_bh(&dev->mac_lock);
+	ieee80211_tx_status_noskb(dev->hw, sta, &info);
+	spin_unlock_bh(&dev->mac_lock);
+
+	rcu_read_unlock();
+}
+
+void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
+				int ht_mode)
+{
+	int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+	bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+	u32 prot[6];
+	bool ht_rts[4] = {};
+	int i;
+
+	prot[0] = MT_PROT_NAV_SHORT |
+		  MT_PROT_TXOP_ALLOW_ALL |
+		  MT_PROT_RTS_THR_EN;
+	prot[1] = prot[0];
+	if (legacy_prot)
+		prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+	prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
+	prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
+
+	if (legacy_prot) {
+		prot[2] |= MT_PROT_RATE_CCK_11;
+		prot[3] |= MT_PROT_RATE_CCK_11;
+		prot[4] |= MT_PROT_RATE_CCK_11;
+		prot[5] |= MT_PROT_RATE_CCK_11;
+	} else {
+		prot[2] |= MT_PROT_RATE_OFDM_24;
+		prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+		prot[4] |= MT_PROT_RATE_OFDM_24;
+		prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+	}
+
+	switch (mode) {
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+		break;
+
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+		ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+		break;
+
+	case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+		ht_rts[1] = ht_rts[3] = true;
+		break;
+
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+		ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+		break;
+	}
+
+	if (non_gf)
+		ht_rts[2] = ht_rts[3] = true;
+
+	for (i = 0; i < 4; i++)
+		if (ht_rts[i])
+			prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
+
+	for (i = 0; i < 6; i++)
+		mt7601u_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+}
+
+void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb)
+{
+	if (short_preamb)
+		mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+	else
+		mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
+void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval)
+{
+	u32 val = mt7601u_rr(dev, MT_BEACON_TIME_CFG);
+
+	val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
+		 MT_BEACON_TIME_CFG_SYNC_MODE |
+		 MT_BEACON_TIME_CFG_TBTT_EN);
+
+	if (!enable) {
+		mt7601u_wr(dev, MT_BEACON_TIME_CFG, val);
+		return;
+	}
+
+	val &= ~MT_BEACON_TIME_CFG_INTVAL;
+	val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+		MT_BEACON_TIME_CFG_TIMER_EN |
+		MT_BEACON_TIME_CFG_SYNC_MODE |
+		MT_BEACON_TIME_CFG_TBTT_EN;
+}
+
+static void mt7601u_check_mac_err(struct mt7601u_dev *dev)
+{
+	u32 val = mt7601u_rr(dev, 0x10f4);
+
+	if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+		return;
+
+	dev_err(dev->dev, "Error: MAC specific condition occurred\n");
+
+	mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+	udelay(10);
+	mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+}
+
+void mt7601u_mac_work(struct work_struct *work)
+{
+	struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+					       mac_work.work);
+	struct {
+		u32 addr_base;
+		u32 span;
+		u64 *stat_base;
+	} spans[] = {
+		{ MT_RX_STA_CNT0,	3,	dev->stats.rx_stat },
+		{ MT_TX_STA_CNT0,	3,	dev->stats.tx_stat },
+		{ MT_TX_AGG_STAT,	1,	dev->stats.aggr_stat },
+		{ MT_MPDU_DENSITY_CNT,	1,	dev->stats.zero_len_del },
+		{ MT_TX_AGG_CNT_BASE0,	8,	&dev->stats.aggr_n[0] },
+		{ MT_TX_AGG_CNT_BASE1,	8,	&dev->stats.aggr_n[16] },
+	};
+	u32 sum, n;
+	int i, j, k;
+
+	/* Note: using MCU_RANDOM_READ is actually slower then reading all the
+	 *	 registers by hand.  MCU takes ca. 20ms to complete read of 24
+	 *	 registers while reading them one by one will takes roughly
+	 *	 24*200us =~ 5ms.
+	 */
+
+	k = 0;
+	n = 0;
+	sum = 0;
+	for (i = 0; i < ARRAY_SIZE(spans); i++)
+		for (j = 0; j < spans[i].span; j++) {
+			u32 val = mt7601u_rr(dev, spans[i].addr_base + j * 4);
+
+			spans[i].stat_base[j * 2] += val & 0xffff;
+			spans[i].stat_base[j * 2 + 1] += val >> 16;
+
+			/* Calculate average AMPDU length */
+			if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
+			    spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
+				continue;
+
+			n += (val >> 16) + (val & 0xffff);
+			sum += (val & 0xffff) * (1 + k * 2) +
+				(val >> 16) * (2 + k * 2);
+			k++;
+		}
+
+	atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
+
+	mt7601u_check_mac_err(dev);
+
+	ieee80211_queue_delayed_work(dev->hw, &dev->mac_work, 10 * HZ);
+}
+
+void
+mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+	u8 zmac[ETH_ALEN] = {};
+	u32 attr;
+
+	attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+	       FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+	mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+	if (mac)
+		memcpy(zmac, mac, sizeof(zmac));
+
+	mt7601u_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
+}
+
+void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev)
+{
+	struct ieee80211_sta *sta;
+	struct mt76_wcid *wcid;
+	void *msta;
+	u8 min_factor = 3;
+	int i;
+
+	rcu_read_lock();
+	for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
+		wcid = rcu_dereference(dev->wcid[i]);
+		if (!wcid)
+			continue;
+
+		msta = container_of(wcid, struct mt76_sta, wcid);
+		sta = container_of(msta, struct ieee80211_sta, drv_priv);
+
+		min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
+	}
+	rcu_read_unlock();
+
+	mt7601u_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
+		   FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
+}
+
+static void
+mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
+{
+	u8 idx = FIELD_GET(MT_RXWI_RATE_MCS, rate);
+
+	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		if (WARN_ON(idx >= 8))
+			idx = 0;
+		idx += 4;
+
+		status->rate_idx = idx;
+		return;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8) {
+			idx -= 8;
+			status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+		}
+
+		if (WARN_ON(idx >= 4))
+			idx = 0;
+
+		status->rate_idx = idx;
+		return;
+	case MT_PHY_TYPE_HT_GF:
+		status->enc_flags |= RX_ENC_FLAG_HT_GF;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		status->encoding = RX_ENC_HT;
+		status->rate_idx = idx;
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+	if (rate & MT_RXWI_RATE_SGI)
+		status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+	if (rate & MT_RXWI_RATE_STBC)
+		status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+	if (rate & MT_RXWI_RATE_BW)
+		status->bw = RATE_INFO_BW_40;
+}
+
+static void
+mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
+			  u16 rate, int rssi)
+{
+	dev->bcn_freq_off = rxwi->freq_off;
+	dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
+	ewma_rssi_add(&dev->avg_rssi, -rssi);
+}
+
+static int
+mt7601u_rx_is_our_beacon(struct mt7601u_dev *dev, u8 *data)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
+
+	return ieee80211_is_beacon(hdr->frame_control) &&
+		ether_addr_equal(hdr->addr2, dev->ap_bssid);
+}
+
+u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
+			u8 *data, void *rxi)
+{
+	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+	struct mt7601u_rxwi *rxwi = rxi;
+	u32 len, ctl = le32_to_cpu(rxwi->ctl);
+	u16 rate = le16_to_cpu(rxwi->rate);
+	int rssi;
+
+	len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+	if (len < 10)
+		return 0;
+
+	if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
+		status->flag |= RX_FLAG_DECRYPTED;
+		status->flag |= RX_FLAG_MMIC_STRIPPED;
+		status->flag |= RX_FLAG_MIC_STRIPPED;
+		status->flag |= RX_FLAG_ICV_STRIPPED;
+		status->flag |= RX_FLAG_IV_STRIPPED;
+	}
+	/* let mac80211 take care of PN validation since apparently
+	 * the hardware does not support it
+	 */
+	if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_PN_LEN))
+		status->flag &= ~RX_FLAG_IV_STRIPPED;
+
+	status->chains = BIT(0);
+	rssi = mt7601u_phy_get_rssi(dev, rxwi, rate);
+	status->chain_signal[0] = status->signal = rssi;
+	status->freq = dev->chandef.chan->center_freq;
+	status->band = dev->chandef.chan->band;
+
+	mt76_mac_process_rate(status, rate);
+
+	spin_lock_bh(&dev->con_mon_lock);
+	if (mt7601u_rx_is_our_beacon(dev, data))
+		mt7601u_rx_monitor_beacon(dev, rxwi, rate, rssi);
+	else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M))
+		ewma_rssi_add(&dev->avg_rssi, -rssi);
+	spin_unlock_bh(&dev->con_mon_lock);
+
+	return len;
+}
+
+static enum mt76_cipher_type
+mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+	memset(key_data, 0, 32);
+	if (!key)
+		return MT_CIPHER_NONE;
+
+	if (key->keylen > 32)
+		return MT_CIPHER_NONE;
+
+	memcpy(key_data, key->key, key->keylen);
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		return MT_CIPHER_WEP40;
+	case WLAN_CIPHER_SUITE_WEP104:
+		return MT_CIPHER_WEP104;
+	case WLAN_CIPHER_SUITE_TKIP:
+		return MT_CIPHER_TKIP;
+	case WLAN_CIPHER_SUITE_CCMP:
+		return MT_CIPHER_AES_CCMP;
+	default:
+		return MT_CIPHER_NONE;
+	}
+}
+
+int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
+			  struct ieee80211_key_conf *key)
+{
+	enum mt76_cipher_type cipher;
+	u8 key_data[32];
+	u8 iv_data[8];
+	u32 val;
+
+	cipher = mt76_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EINVAL;
+
+	trace_set_key(dev, idx);
+
+	mt7601u_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+	memset(iv_data, 0, sizeof(iv_data));
+	if (key) {
+		iv_data[3] = key->keyidx << 6;
+		if (cipher >= MT_CIPHER_TKIP) {
+			/* Note: start with 1 to comply with spec,
+			 *	 (see comment on common/cmm_wpa.c:4291).
+			 */
+			iv_data[0] |= 1;
+			iv_data[3] |= 0x20;
+		}
+	}
+	mt7601u_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+	val = mt7601u_rr(dev, MT_WCID_ATTR(idx));
+	val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
+	val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+	       FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+	val &= ~MT_WCID_ATTR_PAIRWISE;
+	val |= MT_WCID_ATTR_PAIRWISE *
+		!!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+	mt7601u_wr(dev, MT_WCID_ATTR(idx), val);
+
+	return 0;
+}
+
+int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
+			      struct ieee80211_key_conf *key)
+{
+	enum mt76_cipher_type cipher;
+	u8 key_data[32];
+	u32 val;
+
+	cipher = mt76_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EINVAL;
+
+	trace_set_shared_key(dev, vif_idx, key_idx);
+
+	mt7601u_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
+			key_data, sizeof(key_data));
+
+	val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+	val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+	val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+	mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mac.h b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mac.h
new file mode 100644
index 0000000..54bd4fa
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mac.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT76_MAC_H
+#define __MT76_MAC_H
+
+struct mt76_tx_status {
+	u8 valid:1;
+	u8 success:1;
+	u8 aggr:1;
+	u8 ack_req:1;
+	u8 is_probe:1;
+	u8 wcid;
+	u8 pktid;
+	u8 retry;
+	u16 rate;
+} __packed __aligned(2);
+
+/* Note: values in original "RSSI" and "SNR" fields are not actually what they
+ *	 are called for MT7601U, names used by this driver are educated guesses
+ *	 (see vendor mac/ral_omac.c).
+ */
+struct mt7601u_rxwi {
+	__le32 rxinfo;
+
+	__le32 ctl;
+
+	__le16 frag_sn;
+	__le16 rate;
+
+	u8 unknown;
+	u8 zero[3];
+
+	u8 snr;
+	u8 ant;
+	u8 gain;
+	u8 freq_off;
+
+	__le32 resv2;
+	__le32 expert_ant;
+} __packed __aligned(4);
+
+#define MT_RXINFO_BA			BIT(0)
+#define MT_RXINFO_DATA			BIT(1)
+#define MT_RXINFO_NULL			BIT(2)
+#define MT_RXINFO_FRAG			BIT(3)
+#define MT_RXINFO_U2M			BIT(4)
+#define MT_RXINFO_MULTICAST		BIT(5)
+#define MT_RXINFO_BROADCAST		BIT(6)
+#define MT_RXINFO_MYBSS			BIT(7)
+#define MT_RXINFO_CRCERR		BIT(8)
+#define MT_RXINFO_ICVERR		BIT(9)
+#define MT_RXINFO_MICERR		BIT(10)
+#define MT_RXINFO_AMSDU			BIT(11)
+#define MT_RXINFO_HTC			BIT(12)
+#define MT_RXINFO_RSSI			BIT(13)
+#define MT_RXINFO_L2PAD			BIT(14)
+#define MT_RXINFO_AMPDU			BIT(15)
+#define MT_RXINFO_DECRYPT		BIT(16)
+#define MT_RXINFO_BSSIDX3		BIT(17)
+#define MT_RXINFO_WAPI_KEY		BIT(18)
+#define MT_RXINFO_PN_LEN		GENMASK(21, 19)
+#define MT_RXINFO_SW_PKT_80211		BIT(22)
+#define MT_RXINFO_TCP_SUM_BYPASS	BIT(28)
+#define MT_RXINFO_IP_SUM_BYPASS		BIT(29)
+#define MT_RXINFO_TCP_SUM_ERR		BIT(30)
+#define MT_RXINFO_IP_SUM_ERR		BIT(31)
+
+#define MT_RXWI_CTL_WCID		GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX		GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX		GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF			GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN		GENMASK(27, 16)
+#define MT_RXWI_CTL_TID			GENMASK(31, 28)
+
+#define MT_RXWI_FRAG			GENMASK(3, 0)
+#define MT_RXWI_SN			GENMASK(15, 4)
+
+#define MT_RXWI_RATE_MCS		GENMASK(6, 0)
+#define MT_RXWI_RATE_BW			BIT(7)
+#define MT_RXWI_RATE_SGI		BIT(8)
+#define MT_RXWI_RATE_STBC		GENMASK(10, 9)
+#define MT_RXWI_RATE_ETXBF		BIT(11)
+#define MT_RXWI_RATE_SND		BIT(12)
+#define MT_RXWI_RATE_ITXBF		BIT(13)
+#define MT_RXWI_RATE_PHY		GENMASK(15, 14)
+
+#define MT_RXWI_GAIN_RSSI_VAL		GENMASK(5, 0)
+#define MT_RXWI_GAIN_RSSI_LNA_ID	GENMASK(7, 6)
+#define MT_RXWI_ANT_AUX_LNA		BIT(7)
+
+#define MT_RXWI_EANT_ENC_ANT_ID		GENMASK(7, 0)
+
+enum mt76_phy_type {
+	MT_PHY_TYPE_CCK,
+	MT_PHY_TYPE_OFDM,
+	MT_PHY_TYPE_HT,
+	MT_PHY_TYPE_HT_GF,
+};
+
+enum mt76_phy_bandwidth {
+	MT_PHY_BW_20,
+	MT_PHY_BW_40,
+};
+
+struct mt76_txwi {
+	__le16 flags;
+	__le16 rate_ctl;
+
+	u8 ack_ctl;
+	u8 wcid;
+	__le16 len_ctl;
+
+	__le32 iv;
+
+	__le32 eiv;
+
+	u8 aid;
+	u8 txstream;
+	__le16 ctl;
+} __packed __aligned(4);
+
+#define MT_TXWI_FLAGS_FRAG		BIT(0)
+#define MT_TXWI_FLAGS_MMPS		BIT(1)
+#define MT_TXWI_FLAGS_CFACK		BIT(2)
+#define MT_TXWI_FLAGS_TS		BIT(3)
+#define MT_TXWI_FLAGS_AMPDU		BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY	GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP		GENMASK(9, 8)
+#define MT_TXWI_FLAGS_CWMIN		GENMASK(12, 10)
+#define MT_TXWI_FLAGS_NO_RATE_FALLBACK	BIT(13)
+#define MT_TXWI_FLAGS_TX_RPT		BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT	BIT(15)
+
+#define MT_TXWI_RATE_MCS		GENMASK(6, 0)
+#define MT_TXWI_RATE_BW			BIT(7)
+#define MT_TXWI_RATE_SGI		BIT(8)
+#define MT_TXWI_RATE_STBC		GENMASK(10, 9)
+#define MT_TXWI_RATE_PHY_MODE		GENMASK(15, 14)
+
+#define MT_TXWI_ACK_CTL_REQ		BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ		BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW	GENMASK(7, 2)
+
+#define MT_TXWI_LEN_BYTE_CNT		GENMASK(11, 0)
+#define MT_TXWI_LEN_PKTID		GENMASK(15, 12)
+
+#define MT_TXWI_CTL_TX_POWER_ADJ	GENMASK(3, 0)
+#define MT_TXWI_CTL_CHAN_CHECK_PKT	BIT(4)
+#define MT_TXWI_CTL_PIFS_REV		BIT(6)
+
+u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
+			u8 *data, void *rxi);
+int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
+			  struct ieee80211_key_conf *key);
+void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
+			    const struct ieee80211_tx_rate *rate);
+
+int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
+			      struct ieee80211_key_conf *key);
+u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
+			 const struct ieee80211_tx_rate *rate, u8 *nss_val);
+struct mt76_tx_status
+mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev);
+void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat);
+void mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *addr);
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/main.c b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/main.c
new file mode 100644
index 0000000..72e608c
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+#include "mac.h"
+#include <linux/etherdevice.h>
+
+static int mt7601u_start(struct ieee80211_hw *hw)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	int ret;
+
+	mutex_lock(&dev->mutex);
+
+	ret = mt7601u_mac_start(dev);
+	if (ret)
+		goto out;
+
+	ieee80211_queue_delayed_work(dev->hw, &dev->mac_work,
+				     MT_CALIBRATE_INTERVAL);
+	ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+out:
+	mutex_unlock(&dev->mutex);
+	return ret;
+}
+
+static void mt7601u_stop(struct ieee80211_hw *hw)
+{
+	struct mt7601u_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mutex);
+
+	cancel_delayed_work_sync(&dev->cal_work);
+	cancel_delayed_work_sync(&dev->mac_work);
+	mt7601u_mac_stop(dev);
+
+	mutex_unlock(&dev->mutex);
+}
+
+static int mt7601u_add_interface(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+	unsigned int idx = 0;
+	unsigned int wcid = GROUP_WCID(idx);
+
+	/* Note: for AP do the AP-STA things mt76 does:
+	 *	- beacon offsets
+	 *	- do mac address tricks
+	 *	- shift vif idx
+	 */
+	mvif->idx = idx;
+
+	if (!ether_addr_equal(dev->macaddr, vif->addr))
+		mt7601u_set_macaddr(dev, vif->addr);
+
+	if (dev->wcid_mask[wcid / BITS_PER_LONG] & BIT(wcid % BITS_PER_LONG))
+		return -ENOSPC;
+	dev->wcid_mask[wcid / BITS_PER_LONG] |= BIT(wcid % BITS_PER_LONG);
+	mvif->group_wcid.idx = wcid;
+	mvif->group_wcid.hw_key_idx = -1;
+
+	return 0;
+}
+
+static void mt7601u_remove_interface(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+	unsigned int wcid = mvif->group_wcid.idx;
+
+	dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+}
+
+static int mt7601u_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	int ret = 0;
+
+	mutex_lock(&dev->mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		ieee80211_stop_queues(hw);
+		ret = mt7601u_phy_set_channel(dev, &hw->conf.chandef);
+		ieee80211_wake_queues(hw);
+	}
+
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+static void
+mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+		      unsigned int *total_flags, u64 multicast)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+		flags |= *total_flags & FIF_##_flag;			\
+		dev->rxfilter &= ~(_hw);				\
+		dev->rxfilter |= !(flags & FIF_##_flag) * (_hw);	\
+	} while (0)
+
+	mutex_lock(&dev->mutex);
+
+	dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+	MT76_FILTER(OTHER_BSS, MT_RX_FILTR_CFG_PROMISC);
+	MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+	MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+	MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+			     MT_RX_FILTR_CFG_CTS |
+			     MT_RX_FILTR_CFG_CFEND |
+			     MT_RX_FILTR_CFG_CFACK |
+			     MT_RX_FILTR_CFG_BA |
+			     MT_RX_FILTR_CFG_CTRL_RSV);
+	MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+	*total_flags = flags;
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+	mutex_unlock(&dev->mutex);
+}
+
+static void
+mt7601u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			 struct ieee80211_bss_conf *info, u32 changed)
+{
+	struct mt7601u_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mutex);
+
+	if (changed & BSS_CHANGED_ASSOC)
+		mt7601u_phy_con_cal_onoff(dev, info);
+
+	if (changed & BSS_CHANGED_BSSID) {
+		mt7601u_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
+
+		/* Note: this is a hack because beacon_int is not changed
+		 *	 on leave nor is any more appropriate event generated.
+		 *	 rt2x00 doesn't seem to be bothered though.
+		 */
+		if (is_zero_ether_addr(info->bssid))
+			mt7601u_mac_config_tsf(dev, false, 0);
+	}
+
+	if (changed & BSS_CHANGED_BASIC_RATES) {
+		mt7601u_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
+		mt7601u_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
+		mt7601u_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
+		mt7601u_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
+		mt7601u_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
+	}
+
+	if (changed & BSS_CHANGED_BEACON_INT)
+		mt7601u_mac_config_tsf(dev, true, info->beacon_int);
+
+	if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+		mt7601u_mac_set_protection(dev, info->use_cts_prot,
+					   info->ht_operation_mode);
+
+	if (changed & BSS_CHANGED_ERP_PREAMBLE)
+		mt7601u_mac_set_short_preamble(dev, info->use_short_preamble);
+
+	if (changed & BSS_CHANGED_ERP_SLOT) {
+		int slottime = info->use_short_slot ? 9 : 20;
+
+		mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+			       MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+	}
+
+	if (changed & BSS_CHANGED_ASSOC)
+		mt7601u_phy_recalibrate_after_assoc(dev);
+
+	mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76_wcid_alloc(struct mt7601u_dev *dev)
+{
+	int i, idx = 0;
+
+	for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+		idx = ffs(~dev->wcid_mask[i]);
+		if (!idx)
+			continue;
+
+		idx--;
+		dev->wcid_mask[i] |= BIT(idx);
+		break;
+	}
+
+	idx = i * BITS_PER_LONG + idx;
+	if (idx > 119)
+		return -1;
+
+	return idx;
+}
+
+static int
+mt7601u_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		struct ieee80211_sta *sta)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+	struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+	int ret = 0;
+	int idx = 0;
+
+	mutex_lock(&dev->mutex);
+
+	idx = mt76_wcid_alloc(dev);
+	if (idx < 0) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	msta->wcid.idx = idx;
+	msta->wcid.hw_key_idx = -1;
+	mt7601u_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+	mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+	rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+	mt7601u_mac_set_ampdu_factor(dev);
+
+out:
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+static int
+mt7601u_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+	int idx = msta->wcid.idx;
+
+	mutex_lock(&dev->mutex);
+	rcu_assign_pointer(dev->wcid[idx], NULL);
+	mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+	dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+	mt7601u_mac_wcid_setup(dev, idx, 0, NULL);
+	mt7601u_mac_set_ampdu_factor(dev);
+	mutex_unlock(&dev->mutex);
+
+	return 0;
+}
+
+static void
+mt7601u_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+}
+
+static void
+mt7601u_sw_scan(struct ieee80211_hw *hw,
+		struct ieee80211_vif *vif,
+		const u8 *mac_addr)
+{
+	struct mt7601u_dev *dev = hw->priv;
+
+	mt7601u_agc_save(dev);
+	set_bit(MT7601U_STATE_SCANNING, &dev->state);
+}
+
+static void
+mt7601u_sw_scan_complete(struct ieee80211_hw *hw,
+			 struct ieee80211_vif *vif)
+{
+	struct mt7601u_dev *dev = hw->priv;
+
+	mt7601u_agc_restore(dev);
+	clear_bit(MT7601U_STATE_SCANNING, &dev->state);
+
+	ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+	if (dev->freq_cal.enabled)
+		ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+					     MT_FREQ_CAL_INIT_DELAY);
+}
+
+static int
+mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		struct ieee80211_key_conf *key)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+	struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
+	struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
+	int idx = key->keyidx;
+	int ret;
+
+	/* fall back to sw encryption for unsupported ciphers */
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+	case WLAN_CIPHER_SUITE_TKIP:
+	case WLAN_CIPHER_SUITE_CCMP:
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	if (cmd == SET_KEY) {
+		key->hw_key_idx = wcid->idx;
+		wcid->hw_key_idx = idx;
+	} else {
+		if (idx == wcid->hw_key_idx)
+			wcid->hw_key_idx = -1;
+
+		key = NULL;
+	}
+
+	if (!msta) {
+		if (key || wcid->hw_key_idx == idx) {
+			ret = mt76_mac_wcid_set_key(dev, wcid->idx, key);
+			if (ret)
+				return ret;
+		}
+
+		return mt76_mac_shared_key_setup(dev, mvif->idx, idx, key);
+	}
+
+	return mt76_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+	struct mt7601u_dev *dev = hw->priv;
+
+	mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
+
+	return 0;
+}
+
+static int
+mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  struct ieee80211_ampdu_params *params)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct ieee80211_sta *sta = params->sta;
+	enum ieee80211_ampdu_mlme_action action = params->action;
+	u16 tid = params->tid;
+	u16 ssn = params->ssn;
+	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+
+	WARN_ON(msta->wcid.idx > GROUP_WCID(0));
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+			   BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		msta->agg_ssn[tid] = ssn << 4;
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	}
+
+	return 0;
+}
+
+static void
+mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			 struct ieee80211_sta *sta)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+	struct ieee80211_sta_rates *rates;
+	struct ieee80211_tx_rate rate = {};
+
+	rcu_read_lock();
+	rates = rcu_dereference(sta->rates);
+
+	if (!rates)
+		goto out;
+
+	rate.idx = rates->rate[0].idx;
+	rate.flags = rates->rate[0].flags;
+	mt76_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+
+out:
+	rcu_read_unlock();
+}
+
+const struct ieee80211_ops mt7601u_ops = {
+	.tx = mt7601u_tx,
+	.start = mt7601u_start,
+	.stop = mt7601u_stop,
+	.add_interface = mt7601u_add_interface,
+	.remove_interface = mt7601u_remove_interface,
+	.config = mt7601u_config,
+	.configure_filter = mt76_configure_filter,
+	.bss_info_changed = mt7601u_bss_info_changed,
+	.sta_add = mt7601u_sta_add,
+	.sta_remove = mt7601u_sta_remove,
+	.sta_notify = mt7601u_sta_notify,
+	.set_key = mt7601u_set_key,
+	.conf_tx = mt7601u_conf_tx,
+	.sw_scan_start = mt7601u_sw_scan,
+	.sw_scan_complete = mt7601u_sw_scan_complete,
+	.ampdu_action = mt76_ampdu_action,
+	.sta_rate_tbl_update = mt76_sta_rate_tbl_update,
+	.set_rts_threshold = mt7601u_set_rts_threshold,
+};
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mcu.c b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mcu.c
new file mode 100644
index 0000000..af55ed8
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mcu.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include "mt7601u.h"
+#include "dma.h"
+#include "mcu.h"
+#include "usb.h"
+#include "trace.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD		0x3800
+#define MCU_FW_URB_SIZE			(MCU_FW_URB_MAX_PAYLOAD + 12)
+#define MCU_RESP_URB_SIZE		1024
+
+static inline int firmware_running(struct mt7601u_dev *dev)
+{
+	return mt7601u_rr(dev, MT_MCU_COM_REG0) == 1;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+	put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb,
+					    u8 seq, enum mcu_cmd cmd)
+{
+	WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
+				     FIELD_PREP(MT_TXD_CMD_INFO_SEQ, seq) |
+				     FIELD_PREP(MT_TXD_CMD_INFO_TYPE, cmd)));
+}
+
+static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev,
+					    struct sk_buff *skb, bool need_resp)
+{
+	u32 i, csum = 0;
+
+	for (i = 0; i < skb->len / 4; i++)
+		csum ^= get_unaligned_le32(skb->data + i * 4);
+
+	trace_mt_mcu_msg_send(dev, skb, csum, need_resp);
+}
+
+static struct sk_buff *mt7601u_mcu_msg_alloc(const void *data, int len)
+{
+	struct sk_buff *skb;
+
+	WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
+
+	skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+	if (skb) {
+		skb_reserve(skb, MT_DMA_HDR_LEN);
+		skb_put_data(skb, data, len);
+	}
+
+	return skb;
+}
+
+static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq)
+{
+	struct urb *urb = dev->mcu.resp.urb;
+	u32 rxfce;
+	int urb_status, ret, i = 5;
+
+	while (i--) {
+		if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
+						 msecs_to_jiffies(300))) {
+			dev_warn(dev->dev, "Warning: %s retrying\n", __func__);
+			continue;
+		}
+
+		/* Make copies of important data before reusing the urb */
+		rxfce = get_unaligned_le32(dev->mcu.resp.buf);
+		urb_status = urb->status * mt7601u_urb_has_error(urb);
+
+		ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+					     &dev->mcu.resp, GFP_KERNEL,
+					     mt7601u_complete_urb,
+					     &dev->mcu.resp_cmpl);
+		if (ret)
+			return ret;
+
+		if (urb_status)
+			dev_err(dev->dev, "Error: MCU resp urb failed:%d\n",
+				urb_status);
+
+		if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+		    FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+			return 0;
+
+		dev_err(dev->dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
+			FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+			seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+	}
+
+	dev_err(dev->dev, "Error: %s timed out\n", __func__);
+	return -ETIMEDOUT;
+}
+
+static int
+mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb,
+		     enum mcu_cmd cmd, bool wait_resp)
+{
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+	unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
+					    dev->out_eps[MT_EP_OUT_INBAND_CMD]);
+	int sent, ret;
+	u8 seq = 0;
+
+	if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+		return 0;
+
+	mutex_lock(&dev->mcu.mutex);
+
+	if (wait_resp)
+		while (!seq)
+			seq = ++dev->mcu.msg_seq & 0xf;
+
+	mt7601u_dma_skb_wrap_cmd(skb, seq, cmd);
+
+	if (dev->mcu.resp_cmpl.done)
+		dev_err(dev->dev, "Error: MCU response pre-completed!\n");
+
+	trace_mt_mcu_msg_send_cs(dev, skb, wait_resp);
+	trace_mt_submit_urb_sync(dev, cmd_pipe, skb->len);
+	ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
+	if (ret) {
+		dev_err(dev->dev, "Error: send MCU cmd failed:%d\n", ret);
+		goto out;
+	}
+	if (sent != skb->len)
+		dev_err(dev->dev, "Error: %s sent != skb->len\n", __func__);
+
+	if (wait_resp)
+		ret = mt7601u_mcu_wait_resp(dev, seq);
+out:
+	mutex_unlock(&dev->mcu.mutex);
+
+	consume_skb(skb);
+
+	return ret;
+}
+
+static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
+				       enum mcu_function func, u32 val)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 id;
+		__le32 value;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(func),
+		.value = cpu_to_le32(val),
+	};
+
+	skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg));
+	if (!skb)
+		return -ENOMEM;
+	return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
+}
+
+int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga)
+{
+	int ret;
+
+	if (!test_bit(MT7601U_STATE_MCU_RUNNING, &dev->state))
+		return 0;
+
+	ret = mt7601u_mcu_function_select(dev, ATOMIC_TSSI_SETTING,
+					  use_hvga);
+	if (ret) {
+		dev_warn(dev->dev, "Warning: MCU TSSI read kick failed\n");
+		return ret;
+	}
+
+	dev->tssi_read_trig = true;
+
+	return 0;
+}
+
+int
+mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 id;
+		__le32 value;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(cal),
+		.value = cpu_to_le32(val),
+	};
+
+	skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg));
+	if (!skb)
+		return -ENOMEM;
+	return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
+			    const struct mt76_reg_pair *data, int n)
+{
+	const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+	struct sk_buff *skb;
+	int cnt, i, ret;
+
+	if (!n)
+		return 0;
+
+	cnt = min(max_vals_per_cmd, n);
+
+	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+	skb_reserve(skb, MT_DMA_HDR_LEN);
+
+	for (i = 0; i < cnt; i++) {
+		skb_put_le32(skb, base + data[i].reg);
+		skb_put_le32(skb, data[i].value);
+	}
+
+	ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+	if (ret)
+		return ret;
+
+	return mt7601u_write_reg_pairs(dev, base, data + cnt, n - cnt);
+}
+
+int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
+			     const u32 *data, int n)
+{
+	const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+	struct sk_buff *skb;
+	int cnt, i, ret;
+
+	if (!n)
+		return 0;
+
+	cnt = min(max_regs_per_cmd, n);
+
+	skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+	skb_reserve(skb, MT_DMA_HDR_LEN);
+
+	skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
+	for (i = 0; i < cnt; i++)
+		skb_put_le32(skb, data[i]);
+
+	ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
+	if (ret)
+		return ret;
+
+	return mt7601u_burst_write_regs(dev, offset + cnt * 4,
+					data + cnt, n - cnt);
+}
+
+struct mt76_fw_header {
+	__le32 ilm_len;
+	__le32 dlm_len;
+	__le16 build_ver;
+	__le16 fw_ver;
+	u8 pad[4];
+	char build_time[16];
+};
+
+struct mt76_fw {
+	struct mt76_fw_header hdr;
+	u8 ivb[MT_MCU_IVB_SIZE];
+	u8 ilm[];
+};
+
+static int __mt7601u_dma_fw(struct mt7601u_dev *dev,
+			    const struct mt7601u_dma_buf *dma_buf,
+			    const void *data, u32 len, u32 dst_addr)
+{
+	DECLARE_COMPLETION_ONSTACK(cmpl);
+	struct mt7601u_dma_buf buf = *dma_buf; /* we need to fake length */
+	__le32 reg;
+	u32 val;
+	int ret;
+
+	reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_PACKET) |
+			  FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+			  FIELD_PREP(MT_TXD_INFO_LEN, len));
+	memcpy(buf.buf, &reg, sizeof(reg));
+	memcpy(buf.buf + sizeof(reg), data, len);
+	memset(buf.buf + sizeof(reg) + len, 0, 8);
+
+	ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+				       MT_FCE_DMA_ADDR, dst_addr);
+	if (ret)
+		return ret;
+	len = roundup(len, 4);
+	ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+				       MT_FCE_DMA_LEN, len << 16);
+	if (ret)
+		return ret;
+
+	buf.len = MT_DMA_HDR_LEN + len + 4;
+	ret = mt7601u_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
+				     &buf, GFP_KERNEL,
+				     mt7601u_complete_urb, &cmpl);
+	if (ret)
+		return ret;
+
+	if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
+		dev_err(dev->dev, "Error: firmware upload timed out\n");
+		usb_kill_urb(buf.urb);
+		return -ETIMEDOUT;
+	}
+	if (mt7601u_urb_has_error(buf.urb)) {
+		dev_err(dev->dev, "Error: firmware upload urb failed:%d\n",
+			buf.urb->status);
+		return buf.urb->status;
+	}
+
+	val = mt7601u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+	val++;
+	mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+	return 0;
+}
+
+static int
+mt7601u_dma_fw(struct mt7601u_dev *dev, struct mt7601u_dma_buf *dma_buf,
+	       const void *data, int len, u32 dst_addr)
+{
+	int n, ret;
+
+	if (len == 0)
+		return 0;
+
+	n = min(MCU_FW_URB_MAX_PAYLOAD, len);
+	ret = __mt7601u_dma_fw(dev, dma_buf, data, n, dst_addr);
+	if (ret)
+		return ret;
+
+	if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
+		return -ETIMEDOUT;
+
+	return mt7601u_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
+}
+
+static int
+mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw)
+{
+	struct mt7601u_dma_buf dma_buf;
+	void *ivb;
+	u32 ilm_len, dlm_len;
+	int i, ret;
+
+	ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
+	if (!ivb)
+		return -ENOMEM;
+	if (mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
+	dev_dbg(dev->dev, "loading FW - ILM %u + IVB %zu\n",
+		ilm_len, sizeof(fw->ivb));
+	ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
+	if (ret)
+		goto error;
+
+	dlm_len = le32_to_cpu(fw->hdr.dlm_len);
+	dev_dbg(dev->dev, "loading FW - DLM %u\n", dlm_len);
+	ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
+			     dlm_len, MT_MCU_DLM_OFFSET);
+	if (ret)
+		goto error;
+
+	ret = mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+				     0x12, 0, ivb, sizeof(fw->ivb));
+	if (ret < 0)
+		goto error;
+	ret = 0;
+
+	for (i = 100; i && !firmware_running(dev); i--)
+		msleep(10);
+	if (!i) {
+		ret = -ETIMEDOUT;
+		goto error;
+	}
+
+	dev_dbg(dev->dev, "Firmware running!\n");
+error:
+	kfree(ivb);
+	mt7601u_usb_free_buf(dev, &dma_buf);
+
+	return ret;
+}
+
+static int mt7601u_load_firmware(struct mt7601u_dev *dev)
+{
+	const struct firmware *fw;
+	const struct mt76_fw_header *hdr;
+	int len, ret;
+	u32 val;
+
+	mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+					 MT_USB_DMA_CFG_TX_BULK_EN));
+
+	if (firmware_running(dev))
+		return firmware_request_cache(dev->dev, MT7601U_FIRMWARE);
+
+	ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev);
+	if (ret)
+		return ret;
+
+	if (!fw || !fw->data || fw->size < sizeof(*hdr))
+		goto err_inv_fw;
+
+	hdr = (const struct mt76_fw_header *) fw->data;
+
+	if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+		goto err_inv_fw;
+
+	len = sizeof(*hdr);
+	len += le32_to_cpu(hdr->ilm_len);
+	len += le32_to_cpu(hdr->dlm_len);
+
+	if (fw->size != len)
+		goto err_inv_fw;
+
+	val = le16_to_cpu(hdr->fw_ver);
+	dev_info(dev->dev,
+		 "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+		 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+		 le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+	len = le32_to_cpu(hdr->ilm_len);
+
+	mt7601u_wr(dev, 0x94c, 0);
+	mt7601u_wr(dev, MT_FCE_PSE_CTRL, 0);
+
+	mt7601u_vendor_reset(dev);
+	msleep(5);
+
+	mt7601u_wr(dev, 0xa44, 0);
+	mt7601u_wr(dev, 0x230, 0x84210);
+	mt7601u_wr(dev, 0x400, 0x80c00);
+	mt7601u_wr(dev, 0x800, 1);
+
+	mt7601u_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+					 MT_PBF_CFG_TX1Q_EN |
+					 MT_PBF_CFG_TX2Q_EN |
+					 MT_PBF_CFG_TX3Q_EN));
+
+	mt7601u_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+	mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+					 MT_USB_DMA_CFG_TX_BULK_EN));
+	val = mt76_set(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_CLR);
+	val &= ~MT_USB_DMA_CFG_TX_CLR;
+	mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+
+	/* FCE tx_fs_base_ptr */
+	mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+	/* FCE tx_fs_max_cnt */
+	mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+	/* FCE pdma enable */
+	mt7601u_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+	/* FCE skip_fs_en */
+	mt7601u_wr(dev, MT_FCE_SKIP_FS, 3);
+
+	ret = mt7601u_upload_firmware(dev, (const struct mt76_fw *)fw->data);
+
+	release_firmware(fw);
+
+	return ret;
+
+err_inv_fw:
+	dev_err(dev->dev, "Invalid firmware image\n");
+	release_firmware(fw);
+	return -ENOENT;
+}
+
+int mt7601u_mcu_init(struct mt7601u_dev *dev)
+{
+	int ret;
+
+	mutex_init(&dev->mcu.mutex);
+
+	ret = mt7601u_load_firmware(dev);
+	if (ret)
+		return ret;
+
+	set_bit(MT7601U_STATE_MCU_RUNNING, &dev->state);
+
+	return 0;
+}
+
+int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev)
+{
+	int ret;
+
+	ret = mt7601u_mcu_function_select(dev, Q_SELECT, 1);
+	if (ret)
+		return ret;
+
+	init_completion(&dev->mcu.resp_cmpl);
+	if (mt7601u_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
+		mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+		return -ENOMEM;
+	}
+
+	ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+				     &dev->mcu.resp, GFP_KERNEL,
+				     mt7601u_complete_urb, &dev->mcu.resp_cmpl);
+	if (ret) {
+		mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+		return ret;
+	}
+
+	return 0;
+}
+
+void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev)
+{
+	usb_kill_urb(dev->mcu.resp.urb);
+	mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mcu.h b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mcu.h
new file mode 100644
index 0000000..56cdd31
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mcu.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT7601U_MCU_H
+#define __MT7601U_MCU_H
+
+struct mt7601u_dev;
+
+/* Register definitions */
+#define MT_MCU_RESET_CTL		0x070C
+#define MT_MCU_INT_LEVEL		0x0718
+#define MT_MCU_COM_REG0			0x0730
+#define MT_MCU_COM_REG1			0x0734
+#define MT_MCU_COM_REG2			0x0738
+#define MT_MCU_COM_REG3			0x073C
+
+#define MT_MCU_IVB_SIZE			0x40
+#define MT_MCU_DLM_OFFSET		0x80000
+
+#define MT_MCU_MEMMAP_WLAN		0x00410000
+#define MT_MCU_MEMMAP_BBP		0x40000000
+#define MT_MCU_MEMMAP_RF		0x80000000
+
+#define INBAND_PACKET_MAX_LEN		192
+
+enum mcu_cmd {
+	CMD_FUN_SET_OP = 1,
+	CMD_LOAD_CR = 2,
+	CMD_INIT_GAIN_OP = 3,
+	CMD_DYNC_VGA_OP = 6,
+	CMD_TDLS_CH_SW = 7,
+	CMD_BURST_WRITE = 8,
+	CMD_READ_MODIFY_WRITE = 9,
+	CMD_RANDOM_READ = 10,
+	CMD_BURST_READ = 11,
+	CMD_RANDOM_WRITE = 12,
+	CMD_LED_MODE_OP = 16,
+	CMD_POWER_SAVING_OP = 20,
+	CMD_WOW_CONFIG = 21,
+	CMD_WOW_QUERY = 22,
+	CMD_WOW_FEATURE = 24,
+	CMD_CARRIER_DETECT_OP = 28,
+	CMD_RADOR_DETECT_OP = 29,
+	CMD_SWITCH_CHANNEL_OP = 30,
+	CMD_CALIBRATION_OP = 31,
+	CMD_BEACON_OP = 32,
+	CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+	Q_SELECT = 1,
+	ATOMIC_TSSI_SETTING = 5,
+};
+
+enum mcu_power_mode {
+	RADIO_OFF = 0x30,
+	RADIO_ON = 0x31,
+	RADIO_OFF_AUTO_WAKEUP = 0x32,
+	RADIO_OFF_ADVANCE = 0x33,
+	RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibrate {
+	MCU_CAL_R = 1,
+	MCU_CAL_DCOC,
+	MCU_CAL_LC,
+	MCU_CAL_LOFT,
+	MCU_CAL_TXIQ,
+	MCU_CAL_BW,
+	MCU_CAL_DPD,
+	MCU_CAL_RXIQ,
+	MCU_CAL_TXDCOC,
+};
+
+int mt7601u_mcu_init(struct mt7601u_dev *dev);
+int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev);
+void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev);
+
+int
+mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val);
+int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga);
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
new file mode 100644
index 0000000..a122f1d
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef MT7601U_H
+#define MT7601U_H
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <net/mac80211.h>
+#include <linux/debugfs.h>
+#include <linux/average.h>
+
+#include "regs.h"
+
+#define MT_CALIBRATE_INTERVAL		(4 * HZ)
+
+#define MT_FREQ_CAL_INIT_DELAY		(30 * HZ)
+#define MT_FREQ_CAL_CHECK_INTERVAL	(10 * HZ)
+#define MT_FREQ_CAL_ADJ_INTERVAL	(HZ / 2)
+
+#define MT_BBP_REG_VERSION		0x00
+
+#define MT_USB_AGGR_SIZE_LIMIT		28 /* * 1024B */
+#define MT_USB_AGGR_TIMEOUT		0x80 /* * 33ns */
+#define MT_RX_ORDER			3
+#define MT_RX_URB_SIZE			(PAGE_SIZE << MT_RX_ORDER)
+
+struct mt7601u_dma_buf {
+	struct urb *urb;
+	void *buf;
+	dma_addr_t dma;
+	size_t len;
+};
+
+struct mt7601u_mcu {
+	struct mutex mutex;
+
+	u8 msg_seq;
+
+	struct mt7601u_dma_buf resp;
+	struct completion resp_cmpl;
+};
+
+struct mt7601u_freq_cal {
+	struct delayed_work work;
+	u8 freq;
+	bool enabled;
+	bool adjusting;
+};
+
+struct mac_stats {
+	u64 rx_stat[6];
+	u64 tx_stat[6];
+	u64 aggr_stat[2];
+	u64 aggr_n[32];
+	u64 zero_len_del[2];
+};
+
+#define N_RX_ENTRIES	16
+struct mt7601u_rx_queue {
+	struct mt7601u_dev *dev;
+
+	struct mt7601u_dma_buf_rx {
+		struct urb *urb;
+		struct page *p;
+	} e[N_RX_ENTRIES];
+
+	unsigned int start;
+	unsigned int end;
+	unsigned int entries;
+	unsigned int pending;
+};
+
+#define N_TX_ENTRIES	64
+
+struct mt7601u_tx_queue {
+	struct mt7601u_dev *dev;
+
+	struct mt7601u_dma_buf_tx {
+		struct urb *urb;
+		struct sk_buff *skb;
+	} e[N_TX_ENTRIES];
+
+	unsigned int start;
+	unsigned int end;
+	unsigned int entries;
+	unsigned int used;
+	unsigned int fifo_seq;
+};
+
+/* WCID allocation:
+ *     0: mcast wcid
+ *     1: bssid wcid
+ *  1...: STAs
+ * ...7e: group wcids
+ *    7f: reserved
+ */
+#define N_WCIDS		128
+#define GROUP_WCID(idx)	(N_WCIDS - 2 - idx)
+
+struct mt7601u_eeprom_params;
+
+#define MT_EE_TEMPERATURE_SLOPE		39
+#define MT_FREQ_OFFSET_INVALID		-128
+
+enum mt_temp_mode {
+	MT_TEMP_MODE_NORMAL,
+	MT_TEMP_MODE_HIGH,
+	MT_TEMP_MODE_LOW,
+};
+
+enum mt_bw {
+	MT_BW_20,
+	MT_BW_40,
+};
+
+enum {
+	MT7601U_STATE_INITIALIZED,
+	MT7601U_STATE_REMOVED,
+	MT7601U_STATE_WLAN_RUNNING,
+	MT7601U_STATE_MCU_RUNNING,
+	MT7601U_STATE_SCANNING,
+	MT7601U_STATE_READING_STATS,
+	MT7601U_STATE_MORE_STATS,
+};
+
+DECLARE_EWMA(rssi, 10, 4);
+
+/**
+ * struct mt7601u_dev - adapter structure
+ * @lock:		protects @wcid->tx_rate.
+ * @mac_lock:		locks out mac80211's tx status and rx paths.
+ * @tx_lock:		protects @tx_q and changes of MT7601U_STATE_*_STATS
+ *			flags in @state.
+ * @rx_lock:		protects @rx_q.
+ * @con_mon_lock:	protects @ap_bssid, @bcn_*, @avg_rssi.
+ * @mutex:		ensures exclusive access from mac80211 callbacks.
+ * @vendor_req_mutex:	protects @vend_buf, ensures atomicity of read/write
+ *			accesses
+ * @reg_atomic_mutex:	ensures atomicity of indirect register accesses
+ *			(accesses to RF and BBP).
+ * @hw_atomic_mutex:	ensures exclusive access to HW during critical
+ *			operations (power management, channel switch).
+ */
+struct mt7601u_dev {
+	struct ieee80211_hw *hw;
+	struct device *dev;
+
+	unsigned long state;
+
+	struct mutex mutex;
+
+	unsigned long wcid_mask[N_WCIDS / BITS_PER_LONG];
+
+	struct cfg80211_chan_def chandef;
+	struct ieee80211_supported_band *sband_2g;
+
+	struct mt7601u_mcu mcu;
+
+	struct delayed_work cal_work;
+	struct delayed_work mac_work;
+
+	struct workqueue_struct *stat_wq;
+	struct delayed_work stat_work;
+
+	struct mt76_wcid *mon_wcid;
+	struct mt76_wcid __rcu *wcid[N_WCIDS];
+
+	spinlock_t lock;
+	spinlock_t mac_lock;
+
+	const u16 *beacon_offsets;
+
+	u8 macaddr[ETH_ALEN];
+	struct mt7601u_eeprom_params *ee;
+
+	struct mutex vendor_req_mutex;
+	void *vend_buf;
+
+	struct mutex reg_atomic_mutex;
+	struct mutex hw_atomic_mutex;
+
+	u32 rxfilter;
+	u32 debugfs_reg;
+
+	u8 out_eps[8];
+	u8 in_eps[8];
+	u16 out_max_packet;
+	u16 in_max_packet;
+
+	/* TX */
+	spinlock_t tx_lock;
+	struct tasklet_struct tx_tasklet;
+	struct mt7601u_tx_queue *tx_q;
+	struct sk_buff_head tx_skb_done;
+
+	atomic_t avg_ampdu_len;
+
+	/* RX */
+	spinlock_t rx_lock;
+	struct tasklet_struct rx_tasklet;
+	struct mt7601u_rx_queue rx_q;
+
+	/* Connection monitoring things */
+	spinlock_t con_mon_lock;
+	u8 ap_bssid[ETH_ALEN];
+
+	s8 bcn_freq_off;
+	u8 bcn_phy_mode;
+
+	struct ewma_rssi avg_rssi;
+
+	u8 agc_save;
+
+	struct mt7601u_freq_cal freq_cal;
+
+	bool tssi_read_trig;
+
+	s8 tssi_init;
+	s8 tssi_init_hvga;
+	s16 tssi_init_hvga_offset_db;
+
+	int prev_pwr_diff;
+
+	enum mt_temp_mode temp_mode;
+	int curr_temp;
+	int dpd_temp;
+	s8 raw_temp;
+	bool pll_lock_protect;
+
+	u8 bw;
+	bool chan_ext_below;
+
+	/* PA mode */
+	u32 rf_pa_mode[2];
+
+	struct mac_stats stats;
+};
+
+struct mt7601u_tssi_params {
+	char tssi0;
+	int trgt_power;
+};
+
+struct mt76_wcid {
+	u8 idx;
+	u8 hw_key_idx;
+
+	u16 tx_rate;
+	bool tx_rate_set;
+	u8 tx_rate_nss;
+};
+
+struct mt76_vif {
+	u8 idx;
+
+	struct mt76_wcid group_wcid;
+};
+
+struct mt76_sta {
+	struct mt76_wcid wcid;
+	u16 agg_ssn[IEEE80211_NUM_TIDS];
+};
+
+struct mt76_reg_pair {
+	u32 reg;
+	u32 value;
+};
+
+struct mt7601u_rxwi;
+
+extern const struct ieee80211_ops mt7601u_ops;
+
+void mt7601u_init_debugfs(struct mt7601u_dev *dev);
+
+u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset);
+void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val);
+u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
+u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
+void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
+		     const void *data, int len);
+
+int mt7601u_wait_asic_ready(struct mt7601u_dev *dev);
+bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+	       int timeout);
+bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+		    int timeout);
+
+/* Compatibility with mt76 */
+#define mt76_rmw_field(_dev, _reg, _field, _val)	\
+	mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+static inline u32 mt76_rr(struct mt7601u_dev *dev, u32 offset)
+{
+	return mt7601u_rr(dev, offset);
+}
+
+static inline void mt76_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+	return mt7601u_wr(dev, offset, val);
+}
+
+static inline u32
+mt76_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+	return mt7601u_rmw(dev, offset, mask, val);
+}
+
+static inline u32 mt76_set(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+	return mt76_rmw(dev, offset, 0, val);
+}
+
+static inline u32 mt76_clear(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+	return mt76_rmw(dev, offset, val, 0);
+}
+
+int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
+			    const struct mt76_reg_pair *data, int len);
+int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
+			     const u32 *data, int n);
+void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr);
+
+/* Init */
+struct mt7601u_dev *mt7601u_alloc_device(struct device *dev);
+int mt7601u_init_hardware(struct mt7601u_dev *dev);
+int mt7601u_register_device(struct mt7601u_dev *dev);
+void mt7601u_cleanup(struct mt7601u_dev *dev);
+
+int mt7601u_mac_start(struct mt7601u_dev *dev);
+void mt7601u_mac_stop(struct mt7601u_dev *dev);
+
+/* PHY */
+int mt7601u_phy_init(struct mt7601u_dev *dev);
+int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev);
+void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path);
+void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 path);
+int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw);
+void mt7601u_agc_save(struct mt7601u_dev *dev);
+void mt7601u_agc_restore(struct mt7601u_dev *dev);
+int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+			    struct cfg80211_chan_def *chandef);
+void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev);
+int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
+			 struct mt7601u_rxwi *rxwi, u16 rate);
+void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
+			       struct ieee80211_bss_conf *info);
+
+/* MAC */
+void mt7601u_mac_work(struct work_struct *work);
+void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
+				int ht_mode);
+void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb);
+void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval);
+void
+mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev);
+
+/* TX */
+void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+		struct sk_buff *skb);
+int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		    u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb);
+void mt7601u_tx_stat(struct work_struct *work);
+
+/* util */
+void mt76_remove_hdr_pad(struct sk_buff *skb);
+int mt76_insert_hdr_pad(struct sk_buff *skb);
+
+u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below);
+
+static inline u32 mt7601u_mac_set_ctrlch(struct mt7601u_dev *dev, bool below)
+{
+	return mt7601u_rmc(dev, MT_TX_BAND_CFG, 1, below);
+}
+
+int mt7601u_dma_init(struct mt7601u_dev *dev);
+void mt7601u_dma_cleanup(struct mt7601u_dev *dev);
+
+int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
+			   struct mt76_wcid *wcid, int hw_q);
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/phy.c b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/phy.c
new file mode 100644
index 0000000..d863ab4
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -0,0 +1,1252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "initvals_phy.h"
+
+#include <linux/etherdevice.h>
+
+static void mt7601u_agc_reset(struct mt7601u_dev *dev);
+
+static int
+mt7601u_rf_wr(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 value)
+{
+	int ret = 0;
+
+	if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+	    WARN_ON(offset > 63))
+		return -EINVAL;
+	if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+		return 0;
+
+	mutex_lock(&dev->reg_atomic_mutex);
+
+	if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	mt7601u_wr(dev, MT_RF_CSR_CFG,
+		   FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
+		   FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+		   FIELD_PREP(MT_RF_CSR_CFG_REG_ID, offset) |
+		   MT_RF_CSR_CFG_WR |
+		   MT_RF_CSR_CFG_KICK);
+	trace_rf_write(dev, bank, offset, value);
+out:
+	mutex_unlock(&dev->reg_atomic_mutex);
+
+	if (ret < 0)
+		dev_err(dev->dev, "Error: RF write %02hhx:%02hhx failed:%d!!\n",
+			bank, offset, ret);
+
+	return ret;
+}
+
+static int
+mt7601u_rf_rr(struct mt7601u_dev *dev, u8 bank, u8 offset)
+{
+	int ret = -ETIMEDOUT;
+	u32 val;
+
+	if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+	    WARN_ON(offset > 63))
+		return -EINVAL;
+	if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+		return 0xff;
+
+	mutex_lock(&dev->reg_atomic_mutex);
+
+	if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+		goto out;
+
+	mt7601u_wr(dev, MT_RF_CSR_CFG,
+		   FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+		   FIELD_PREP(MT_RF_CSR_CFG_REG_ID, offset) |
+		   MT_RF_CSR_CFG_KICK);
+
+	if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+		goto out;
+
+	val = mt7601u_rr(dev, MT_RF_CSR_CFG);
+	if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == offset &&
+	    FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+		ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
+		trace_rf_read(dev, bank, offset, ret);
+	}
+out:
+	mutex_unlock(&dev->reg_atomic_mutex);
+
+	if (ret < 0)
+		dev_err(dev->dev, "Error: RF read %02hhx:%02hhx failed:%d!!\n",
+			bank, offset, ret);
+
+	return ret;
+}
+
+static int
+mt7601u_rf_rmw(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask, u8 val)
+{
+	int ret;
+
+	ret = mt7601u_rf_rr(dev, bank, offset);
+	if (ret < 0)
+		return ret;
+	val |= ret & ~mask;
+	ret = mt7601u_rf_wr(dev, bank, offset, val);
+	if (ret)
+		return ret;
+
+	return val;
+}
+
+static int
+mt7601u_rf_set(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 val)
+{
+	return mt7601u_rf_rmw(dev, bank, offset, 0, val);
+}
+
+static int
+mt7601u_rf_clear(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask)
+{
+	return mt7601u_rf_rmw(dev, bank, offset, mask, 0);
+}
+
+static void mt7601u_bbp_wr(struct mt7601u_dev *dev, u8 offset, u8 val)
+{
+	if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+	    test_bit(MT7601U_STATE_REMOVED, &dev->state))
+		return;
+
+	mutex_lock(&dev->reg_atomic_mutex);
+
+	if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) {
+		dev_err(dev->dev, "Error: BBP write %02hhx failed!!\n", offset);
+		goto out;
+	}
+
+	mt7601u_wr(dev, MT_BBP_CSR_CFG,
+		   FIELD_PREP(MT_BBP_CSR_CFG_VAL, val) |
+		   FIELD_PREP(MT_BBP_CSR_CFG_REG_NUM, offset) |
+		   MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY);
+	trace_bbp_write(dev, offset, val);
+out:
+	mutex_unlock(&dev->reg_atomic_mutex);
+}
+
+static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset)
+{
+	u32 val;
+	int ret = -ETIMEDOUT;
+
+	if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)))
+		return -EINVAL;
+	if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+		return 0xff;
+
+	mutex_lock(&dev->reg_atomic_mutex);
+
+	if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000))
+		goto out;
+
+	mt7601u_wr(dev, MT_BBP_CSR_CFG,
+		   FIELD_PREP(MT_BBP_CSR_CFG_REG_NUM, offset) |
+		   MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY |
+		   MT_BBP_CSR_CFG_READ);
+
+	if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000))
+		goto out;
+
+	val = mt7601u_rr(dev, MT_BBP_CSR_CFG);
+	if (FIELD_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) {
+		ret = FIELD_GET(MT_BBP_CSR_CFG_VAL, val);
+		trace_bbp_read(dev, offset, ret);
+	}
+out:
+	mutex_unlock(&dev->reg_atomic_mutex);
+
+	if (ret < 0)
+		dev_err(dev->dev, "Error: BBP read %02hhx failed:%d!!\n",
+			offset, ret);
+
+	return ret;
+}
+
+static int mt7601u_bbp_rmw(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val)
+{
+	int ret;
+
+	ret = mt7601u_bbp_rr(dev, offset);
+	if (ret < 0)
+		return ret;
+	val |= ret & ~mask;
+	mt7601u_bbp_wr(dev, offset, val);
+
+	return val;
+}
+
+static u8 mt7601u_bbp_rmc(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val)
+{
+	int ret;
+
+	ret = mt7601u_bbp_rr(dev, offset);
+	if (ret < 0)
+		return ret;
+	val |= ret & ~mask;
+	if (ret != val)
+		mt7601u_bbp_wr(dev, offset, val);
+
+	return val;
+}
+
+int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev)
+{
+	int i = 20;
+	u8 val;
+
+	do {
+		val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION);
+		if (val && val != 0xff)
+			break;
+	} while (--i);
+
+	if (!i) {
+		dev_err(dev->dev, "Error: BBP is not ready\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below)
+{
+	return mt7601u_bbp_rmc(dev, 3, 0x20, below ? 0x20 : 0);
+}
+
+int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
+			 struct mt7601u_rxwi *rxwi, u16 rate)
+{
+	static const s8 lna[2][2][3] = {
+		/* main LNA */ {
+			/* bw20 */ { -2, 15, 33 },
+			/* bw40 */ {  0, 16, 34 }
+		},
+		/*  aux LNA */ {
+			/* bw20 */ { -2, 15, 33 },
+			/* bw40 */ { -2, 16, 34 }
+		}
+	};
+	int bw = FIELD_GET(MT_RXWI_RATE_BW, rate);
+	int aux_lna = FIELD_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant);
+	int lna_id = FIELD_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain);
+	int val;
+
+	if (lna_id) /* LNA id can be 0, 2, 3. */
+		lna_id--;
+
+	val = 8;
+	val -= lna[aux_lna][bw][lna_id];
+	val -= FIELD_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain);
+	val -= dev->ee->lna_gain;
+	val -= dev->ee->rssi_offset[0];
+
+	return val;
+}
+
+static void mt7601u_vco_cal(struct mt7601u_dev *dev)
+{
+	mt7601u_rf_wr(dev, 0, 4, 0x0a);
+	mt7601u_rf_wr(dev, 0, 5, 0x20);
+	mt7601u_rf_set(dev, 0, 4, BIT(7));
+	msleep(2);
+}
+
+static int mt7601u_set_bw_filter(struct mt7601u_dev *dev, bool cal)
+{
+	u32 filter = 0;
+	int ret;
+
+	if (!cal)
+		filter |= 0x10000;
+	if (dev->bw != MT_BW_20)
+		filter |= 0x00100;
+
+	/* TX */
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter | 1);
+	if (ret)
+		return ret;
+	/* RX */
+	return mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter);
+}
+
+static int mt7601u_load_bbp_temp_table_bw(struct mt7601u_dev *dev)
+{
+	const struct reg_table *t;
+
+	if (WARN_ON(dev->temp_mode > MT_TEMP_MODE_LOW))
+		return -EINVAL;
+
+	t = &bbp_mode_table[dev->temp_mode][dev->bw];
+
+	return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, t->regs, t->n);
+}
+
+static int mt7601u_bbp_temp(struct mt7601u_dev *dev, int mode, const char *name)
+{
+	const struct reg_table *t;
+	int ret;
+
+	if (dev->temp_mode == mode)
+		return 0;
+
+	dev->temp_mode = mode;
+	trace_temp_mode(dev, mode);
+
+	t = bbp_mode_table[dev->temp_mode];
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+				      t[2].regs, t[2].n);
+	if (ret)
+		return ret;
+
+	return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+				       t[dev->bw].regs, t[dev->bw].n);
+}
+
+static void mt7601u_apply_ch14_fixup(struct mt7601u_dev *dev, int hw_chan)
+{
+	struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+
+	if (hw_chan != 14 || dev->bw != MT_BW_20) {
+		mt7601u_bbp_rmw(dev, 4, 0x20, 0);
+		mt7601u_bbp_wr(dev, 178, 0xff);
+
+		t->cck[0].bw20 = dev->ee->real_cck_bw20[0];
+		t->cck[1].bw20 = dev->ee->real_cck_bw20[1];
+	} else { /* Apply CH14 OBW fixup */
+		mt7601u_bbp_wr(dev, 4, 0x60);
+		mt7601u_bbp_wr(dev, 178, 0);
+
+		/* Note: vendor code is buggy here for negative values */
+		t->cck[0].bw20 = dev->ee->real_cck_bw20[0] - 2;
+		t->cck[1].bw20 = dev->ee->real_cck_bw20[1] - 2;
+	}
+}
+
+static int __mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+				     struct cfg80211_chan_def *chandef)
+{
+#define FREQ_PLAN_REGS	4
+	static const u8 freq_plan[14][FREQ_PLAN_REGS] = {
+		{ 0x99,	0x99,	0x09,	0x50 },
+		{ 0x46,	0x44,	0x0a,	0x50 },
+		{ 0xec,	0xee,	0x0a,	0x50 },
+		{ 0x99,	0x99,	0x0b,	0x50 },
+		{ 0x46,	0x44,	0x08,	0x51 },
+		{ 0xec,	0xee,	0x08,	0x51 },
+		{ 0x99,	0x99,	0x09,	0x51 },
+		{ 0x46,	0x44,	0x0a,	0x51 },
+		{ 0xec,	0xee,	0x0a,	0x51 },
+		{ 0x99,	0x99,	0x0b,	0x51 },
+		{ 0x46,	0x44,	0x08,	0x52 },
+		{ 0xec,	0xee,	0x08,	0x52 },
+		{ 0x99,	0x99,	0x09,	0x52 },
+		{ 0x33,	0x33,	0x0b,	0x52 },
+	};
+	struct mt76_reg_pair channel_freq_plan[FREQ_PLAN_REGS] = {
+		{ 17, 0 }, { 18, 0 }, { 19, 0 }, { 20, 0 },
+	};
+	struct mt76_reg_pair bbp_settings[3] = {
+		{ 62, 0x37 - dev->ee->lna_gain },
+		{ 63, 0x37 - dev->ee->lna_gain },
+		{ 64, 0x37 - dev->ee->lna_gain },
+	};
+
+	struct ieee80211_channel *chan = chandef->chan;
+	enum nl80211_channel_type chan_type =
+		cfg80211_get_chandef_type(chandef);
+	struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+	int chan_idx;
+	bool chan_ext_below;
+	u8 bw;
+	int i, ret;
+
+	bw = MT_BW_20;
+	chan_ext_below = (chan_type == NL80211_CHAN_HT40MINUS);
+	chan_idx = chan->hw_value - 1;
+
+	if (chandef->width == NL80211_CHAN_WIDTH_40) {
+		bw = MT_BW_40;
+
+		if (chan_idx > 1 && chan_type == NL80211_CHAN_HT40MINUS)
+			chan_idx -= 2;
+		else if (chan_idx < 12 && chan_type == NL80211_CHAN_HT40PLUS)
+			chan_idx += 2;
+		else
+			dev_err(dev->dev, "Error: invalid 40MHz channel!!\n");
+	}
+
+	if (bw != dev->bw || chan_ext_below != dev->chan_ext_below) {
+		dev_dbg(dev->dev, "Info: switching HT mode bw:%d below:%d\n",
+			bw, chan_ext_below);
+
+		mt7601u_bbp_set_bw(dev, bw);
+
+		mt7601u_bbp_set_ctrlch(dev, chan_ext_below);
+		mt7601u_mac_set_ctrlch(dev, chan_ext_below);
+		dev->chan_ext_below = chan_ext_below;
+	}
+
+	for (i = 0; i < FREQ_PLAN_REGS; i++)
+		channel_freq_plan[i].value = freq_plan[chan_idx][i];
+
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_RF,
+				      channel_freq_plan, FREQ_PLAN_REGS);
+	if (ret)
+		return ret;
+
+	mt7601u_rmw(dev, MT_TX_ALC_CFG_0, 0x3f3f,
+		    dev->ee->chan_pwr[chan_idx] & 0x3f);
+
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+				      bbp_settings, ARRAY_SIZE(bbp_settings));
+	if (ret)
+		return ret;
+
+	mt7601u_vco_cal(dev);
+	mt7601u_bbp_set_bw(dev, bw);
+	ret = mt7601u_set_bw_filter(dev, false);
+	if (ret)
+		return ret;
+
+	mt7601u_apply_ch14_fixup(dev, chan->hw_value);
+	mt7601u_wr(dev, MT_TX_PWR_CFG_0, int_to_s6(t->ofdm[1].bw20) << 24 |
+					 int_to_s6(t->ofdm[0].bw20) << 16 |
+					 int_to_s6(t->cck[1].bw20) << 8 |
+					 int_to_s6(t->cck[0].bw20));
+
+	if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+		mt7601u_agc_reset(dev);
+
+	dev->chandef = *chandef;
+
+	return 0;
+}
+
+int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+			    struct cfg80211_chan_def *chandef)
+{
+	int ret;
+
+	cancel_delayed_work_sync(&dev->cal_work);
+	cancel_delayed_work_sync(&dev->freq_cal.work);
+
+	mutex_lock(&dev->hw_atomic_mutex);
+	ret = __mt7601u_phy_set_channel(dev, chandef);
+	mutex_unlock(&dev->hw_atomic_mutex);
+	if (ret)
+		return ret;
+
+	if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+		return 0;
+
+	ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+	if (dev->freq_cal.enabled)
+		ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+					     MT_FREQ_CAL_INIT_DELAY);
+	return 0;
+}
+
+#define BBP_R47_FLAG		GENMASK(2, 0)
+#define BBP_R47_F_TSSI		0
+#define BBP_R47_F_PKT_T		1
+#define BBP_R47_F_TX_RATE	2
+#define BBP_R47_F_TEMP		4
+/**
+ * mt7601u_bbp_r47_get - read value through BBP R47/R49 pair
+ * @dev:	pointer to adapter structure
+ * @reg:	value of BBP R47 before the operation
+ * @flag:	one of the BBP_R47_F_* flags
+ *
+ * Convenience helper for reading values through BBP R47/R49 pair.
+ * Takes old value of BBP R47 as @reg, because callers usually have it
+ * cached already.
+ *
+ * Return: value of BBP R49.
+ */
+static u8 mt7601u_bbp_r47_get(struct mt7601u_dev *dev, u8 reg, u8 flag)
+{
+	flag |= reg & ~BBP_R47_FLAG;
+	mt7601u_bbp_wr(dev, 47, flag);
+	usleep_range(500, 700);
+	return mt7601u_bbp_rr(dev, 49);
+}
+
+static s8 mt7601u_read_bootup_temp(struct mt7601u_dev *dev)
+{
+	u8 bbp_val, temp;
+	u32 rf_bp, rf_set;
+	int i;
+
+	rf_set = mt7601u_rr(dev, MT_RF_SETTING_0);
+	rf_bp = mt7601u_rr(dev, MT_RF_BYPASS_0);
+
+	mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+	mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000010);
+	mt7601u_wr(dev, MT_RF_BYPASS_0, 0x00000010);
+
+	bbp_val = mt7601u_bbp_rmw(dev, 47, 0, 0x10);
+
+	mt7601u_bbp_wr(dev, 22, 0x40);
+
+	for (i = 100; i && (bbp_val & 0x10); i--)
+		bbp_val = mt7601u_bbp_rr(dev, 47);
+
+	temp = mt7601u_bbp_r47_get(dev, bbp_val, BBP_R47_F_TEMP);
+
+	mt7601u_bbp_wr(dev, 22, 0);
+
+	bbp_val = mt7601u_bbp_rr(dev, 21);
+	bbp_val |= 0x02;
+	mt7601u_bbp_wr(dev, 21, bbp_val);
+	bbp_val &= ~0x02;
+	mt7601u_bbp_wr(dev, 21, bbp_val);
+
+	mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+	mt7601u_wr(dev, MT_RF_SETTING_0, rf_set);
+	mt7601u_wr(dev, MT_RF_BYPASS_0, rf_bp);
+
+	trace_read_temp(dev, temp);
+	return temp;
+}
+
+static s8 mt7601u_read_temp(struct mt7601u_dev *dev)
+{
+	int i;
+	u8 val;
+	s8 temp;
+
+	val = mt7601u_bbp_rmw(dev, 47, 0x7f, 0x10);
+
+	/* Note: this rarely succeeds, temp can change even if it fails. */
+	for (i = 100; i && (val & 0x10); i--)
+		val = mt7601u_bbp_rr(dev, 47);
+
+	temp = mt7601u_bbp_r47_get(dev, val, BBP_R47_F_TEMP);
+
+	trace_read_temp(dev, temp);
+	return temp;
+}
+
+static void mt7601u_rxdc_cal(struct mt7601u_dev *dev)
+{
+	static const struct mt76_reg_pair intro[] = {
+		{ 158, 0x8d }, { 159, 0xfc },
+		{ 158, 0x8c }, { 159, 0x4c },
+	}, outro[] = {
+		{ 158, 0x8d }, { 159, 0xe0 },
+	};
+	u32 mac_ctrl;
+	int i, ret;
+
+	mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX);
+
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+				      intro, ARRAY_SIZE(intro));
+	if (ret)
+		dev_err(dev->dev, "%s intro failed:%d\n", __func__, ret);
+
+	for (i = 20; i; i--) {
+		usleep_range(300, 500);
+
+		mt7601u_bbp_wr(dev, 158, 0x8c);
+		if (mt7601u_bbp_rr(dev, 159) == 0x0c)
+			break;
+	}
+	if (!i)
+		dev_err(dev->dev, "%s timed out\n", __func__);
+
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+	ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+				      outro, ARRAY_SIZE(outro));
+	if (ret)
+		dev_err(dev->dev, "%s outro failed:%d\n", __func__, ret);
+
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl);
+}
+
+void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev)
+{
+	mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->curr_temp);
+
+	mt7601u_rxdc_cal(dev);
+}
+
+/* Note: function copied from vendor driver */
+static s16 lin2dBd(u16 linear)
+{
+	short exp = 0;
+	unsigned int mantisa;
+	int app, dBd;
+
+	if (WARN_ON(!linear))
+		return -10000;
+
+	mantisa = linear;
+
+	exp = fls(mantisa) - 16;
+	if (exp > 0)
+		mantisa >>= exp;
+	else
+		mantisa <<= abs(exp);
+
+	if (mantisa <= 0xb800)
+		app = (mantisa + (mantisa >> 3) + (mantisa >> 4) - 0x9600);
+	else
+		app = (mantisa - (mantisa >> 3) - (mantisa >> 6) - 0x5a00);
+	if (app < 0)
+		app = 0;
+
+	dBd = ((15 + exp) << 15) + app;
+	dBd = (dBd << 2) + (dBd << 1) + (dBd >> 6) + (dBd >> 7);
+	dBd = (dBd >> 10);
+
+	return dBd;
+}
+
+static void
+mt7601u_set_initial_tssi(struct mt7601u_dev *dev, s16 tssi_db, s16 tssi_hvga_db)
+{
+	struct tssi_data *d = &dev->ee->tssi_data;
+	int init_offset;
+
+	init_offset = -((tssi_db * d->slope + d->offset[1]) / 4096) + 10;
+
+	mt76_rmw(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+		 int_to_s6(init_offset) & MT_TX_ALC_CFG_1_TEMP_COMP);
+}
+
+static void mt7601u_tssi_dc_gain_cal(struct mt7601u_dev *dev)
+{
+	u8 rf_vga, rf_mixer, bbp_r47;
+	int i, j;
+	s8 res[4];
+	s16 tssi_init_db, tssi_init_hvga_db;
+
+	mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000030);
+	mt7601u_wr(dev, MT_RF_BYPASS_0, 0x000c0030);
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+	mt7601u_bbp_wr(dev, 58, 0);
+	mt7601u_bbp_wr(dev, 241, 0x2);
+	mt7601u_bbp_wr(dev, 23, 0x8);
+	bbp_r47 = mt7601u_bbp_rr(dev, 47);
+
+	/* Set VGA gain */
+	rf_vga = mt7601u_rf_rr(dev, 5, 3);
+	mt7601u_rf_wr(dev, 5, 3, 8);
+
+	/* Mixer disable */
+	rf_mixer = mt7601u_rf_rr(dev, 4, 39);
+	mt7601u_rf_wr(dev, 4, 39, 0);
+
+	for (i = 0; i < 4; i++) {
+		mt7601u_rf_wr(dev, 4, 39, (i & 1) ? rf_mixer : 0);
+
+		mt7601u_bbp_wr(dev, 23, (i < 2) ? 0x08 : 0x02);
+		mt7601u_rf_wr(dev, 5, 3, (i < 2) ? 0x08 : 0x11);
+
+		/* BBP TSSI initial and soft reset */
+		mt7601u_bbp_wr(dev, 22, 0);
+		mt7601u_bbp_wr(dev, 244, 0);
+
+		mt7601u_bbp_wr(dev, 21, 1);
+		udelay(1);
+		mt7601u_bbp_wr(dev, 21, 0);
+
+		/* TSSI measurement */
+		mt7601u_bbp_wr(dev, 47, 0x50);
+		mt7601u_bbp_wr(dev, (i & 1) ? 244 : 22, (i & 1) ? 0x31 : 0x40);
+
+		for (j = 20; j; j--)
+			if (!(mt7601u_bbp_rr(dev, 47) & 0x10))
+				break;
+		if (!j)
+			dev_err(dev->dev, "%s timed out\n", __func__);
+
+		/* TSSI read */
+		mt7601u_bbp_wr(dev, 47, 0x40);
+		res[i] = mt7601u_bbp_rr(dev, 49);
+	}
+
+	tssi_init_db = lin2dBd((short)res[1] - res[0]);
+	tssi_init_hvga_db = lin2dBd(((short)res[3] - res[2]) * 4);
+	dev->tssi_init = res[0];
+	dev->tssi_init_hvga = res[2];
+	dev->tssi_init_hvga_offset_db = tssi_init_hvga_db - tssi_init_db;
+
+	dev_dbg(dev->dev,
+		"TSSI_init:%hhx db:%hx hvga:%hhx hvga_db:%hx off_db:%hx\n",
+		dev->tssi_init, tssi_init_db, dev->tssi_init_hvga,
+		tssi_init_hvga_db, dev->tssi_init_hvga_offset_db);
+
+	mt7601u_bbp_wr(dev, 22, 0);
+	mt7601u_bbp_wr(dev, 244, 0);
+
+	mt7601u_bbp_wr(dev, 21, 1);
+	udelay(1);
+	mt7601u_bbp_wr(dev, 21, 0);
+
+	mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+	mt7601u_wr(dev, MT_RF_SETTING_0, 0);
+
+	mt7601u_rf_wr(dev, 5, 3, rf_vga);
+	mt7601u_rf_wr(dev, 4, 39, rf_mixer);
+	mt7601u_bbp_wr(dev, 47, bbp_r47);
+
+	mt7601u_set_initial_tssi(dev, tssi_init_db, tssi_init_hvga_db);
+}
+
+static int mt7601u_temp_comp(struct mt7601u_dev *dev, bool on)
+{
+	int ret, temp, hi_temp = 400, lo_temp = -200;
+
+	temp = (dev->raw_temp - dev->ee->ref_temp) * MT_EE_TEMPERATURE_SLOPE;
+	dev->curr_temp = temp;
+
+	/* DPD Calibration */
+	if (temp - dev->dpd_temp > 450 || temp - dev->dpd_temp < -450) {
+		dev->dpd_temp = temp;
+
+		ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp);
+		if (ret)
+			return ret;
+
+		mt7601u_vco_cal(dev);
+
+		dev_dbg(dev->dev, "Recalibrate DPD\n");
+	}
+
+	/* PLL Lock Protect */
+	if (temp < -50 && !dev->pll_lock_protect) { /* < 20C */
+		dev->pll_lock_protect =  true;
+
+		mt7601u_rf_wr(dev, 4, 4, 6);
+		mt7601u_rf_clear(dev, 4, 10, 0x30);
+
+		dev_dbg(dev->dev, "PLL lock protect on - too cold\n");
+	} else if (temp > 50 && dev->pll_lock_protect) { /* > 30C */
+		dev->pll_lock_protect = false;
+
+		mt7601u_rf_wr(dev, 4, 4, 0);
+		mt7601u_rf_rmw(dev, 4, 10, 0x30, 0x10);
+
+		dev_dbg(dev->dev, "PLL lock protect off\n");
+	}
+
+	if (on) {
+		hi_temp -= 50;
+		lo_temp -= 50;
+	}
+
+	/* BBP CR for H, L, N temperature */
+	if (temp > hi_temp)
+		return mt7601u_bbp_temp(dev, MT_TEMP_MODE_HIGH, "high");
+	else if (temp > lo_temp)
+		return mt7601u_bbp_temp(dev, MT_TEMP_MODE_NORMAL, "normal");
+	else
+		return mt7601u_bbp_temp(dev, MT_TEMP_MODE_LOW, "low");
+}
+
+/* Note: this is used only with TSSI, we can just use trgt_pwr from eeprom. */
+static int mt7601u_current_tx_power(struct mt7601u_dev *dev)
+{
+	return dev->ee->chan_pwr[dev->chandef.chan->hw_value - 1];
+}
+
+static bool mt7601u_use_hvga(struct mt7601u_dev *dev)
+{
+	return !(mt7601u_current_tx_power(dev) > 20);
+}
+
+static s16
+mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate)
+{
+	static const s16 decode_tb[] = { 0, 8847, -5734, -5734 };
+	u32 reg;
+
+	switch (phy_mode) {
+	case MT_PHY_TYPE_OFDM:
+		tx_rate += 4;
+		/* fall through */
+	case MT_PHY_TYPE_CCK:
+		reg = dev->rf_pa_mode[0];
+		break;
+	default:
+		reg = dev->rf_pa_mode[1];
+		break;
+	}
+
+	return decode_tb[(reg >> (tx_rate * 2)) & 0x3];
+}
+
+static struct mt7601u_tssi_params
+mt7601u_tssi_params_get(struct mt7601u_dev *dev)
+{
+	static const u8 ofdm_pkt2rate[8] = { 6, 4, 2, 0, 7, 5, 3, 1 };
+	static const int static_power[4] = { 0, -49152, -98304, 49152 };
+	struct mt7601u_tssi_params p;
+	u8 bbp_r47, pkt_type, tx_rate;
+	struct power_per_rate *rate_table;
+
+	bbp_r47 = mt7601u_bbp_rr(dev, 47);
+
+	p.tssi0 = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TSSI);
+	dev->raw_temp = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TEMP);
+	pkt_type = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_PKT_T);
+
+	p.trgt_power = mt7601u_current_tx_power(dev);
+
+	switch (pkt_type & 0x03) {
+	case MT_PHY_TYPE_CCK:
+		tx_rate = (pkt_type >> 4) & 0x03;
+		rate_table = dev->ee->power_rate_table.cck;
+		break;
+
+	case MT_PHY_TYPE_OFDM:
+		tx_rate = ofdm_pkt2rate[(pkt_type >> 4) & 0x07];
+		rate_table = dev->ee->power_rate_table.ofdm;
+		break;
+
+	default:
+		tx_rate = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TX_RATE);
+		tx_rate &= 0x7f;
+		rate_table = dev->ee->power_rate_table.ht;
+		break;
+	}
+
+	if (dev->bw == MT_BW_20)
+		p.trgt_power += rate_table[tx_rate / 2].bw20;
+	else
+		p.trgt_power += rate_table[tx_rate / 2].bw40;
+
+	p.trgt_power <<= 12;
+
+	dev_dbg(dev->dev, "tx_rate:%02hhx pwr:%08x\n", tx_rate, p.trgt_power);
+
+	p.trgt_power += mt7601u_phy_rf_pa_mode_val(dev, pkt_type & 0x03,
+						   tx_rate);
+
+	/* Channel 14, cck, bw20 */
+	if ((pkt_type & 0x03) == MT_PHY_TYPE_CCK) {
+		if (mt7601u_bbp_rr(dev, 4) & 0x20)
+			p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 18022 : 9830;
+		else
+			p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 819 : 24576;
+	}
+
+	p.trgt_power += static_power[mt7601u_bbp_rr(dev, 1) & 0x03];
+
+	p.trgt_power += dev->ee->tssi_data.tx0_delta_offset;
+
+	dev_dbg(dev->dev,
+		"tssi:%02hhx t_power:%08x temp:%02hhx pkt_type:%02hhx\n",
+		p.tssi0, p.trgt_power, dev->raw_temp, pkt_type);
+
+	return p;
+}
+
+static bool mt7601u_tssi_read_ready(struct mt7601u_dev *dev)
+{
+	return !(mt7601u_bbp_rr(dev, 47) & 0x10);
+}
+
+static int mt7601u_tssi_cal(struct mt7601u_dev *dev)
+{
+	struct mt7601u_tssi_params params;
+	int curr_pwr, diff_pwr;
+	char tssi_offset;
+	s8 tssi_init;
+	s16 tssi_m_dc, tssi_db;
+	bool hvga;
+	u32 val;
+
+	if (!dev->ee->tssi_enabled)
+		return 0;
+
+	hvga = mt7601u_use_hvga(dev);
+	if (!dev->tssi_read_trig)
+		return mt7601u_mcu_tssi_read_kick(dev, hvga);
+
+	if (!mt7601u_tssi_read_ready(dev))
+		return 0;
+
+	params = mt7601u_tssi_params_get(dev);
+
+	tssi_init = (hvga ? dev->tssi_init_hvga : dev->tssi_init);
+	tssi_m_dc = params.tssi0 - tssi_init;
+	tssi_db = lin2dBd(tssi_m_dc);
+	dev_dbg(dev->dev, "tssi dc:%04hx db:%04hx hvga:%d\n",
+		tssi_m_dc, tssi_db, hvga);
+
+	if (dev->chandef.chan->hw_value < 5)
+		tssi_offset = dev->ee->tssi_data.offset[0];
+	else if (dev->chandef.chan->hw_value < 9)
+		tssi_offset = dev->ee->tssi_data.offset[1];
+	else
+		tssi_offset = dev->ee->tssi_data.offset[2];
+
+	if (hvga)
+		tssi_db -= dev->tssi_init_hvga_offset_db;
+
+	curr_pwr = tssi_db * dev->ee->tssi_data.slope + (tssi_offset << 9);
+	diff_pwr = params.trgt_power - curr_pwr;
+	dev_dbg(dev->dev, "Power curr:%08x diff:%08x\n", curr_pwr, diff_pwr);
+
+	if (params.tssi0 > 126 && diff_pwr > 0) {
+		dev_err(dev->dev, "Error: TSSI upper saturation\n");
+		diff_pwr = 0;
+	}
+	if (params.tssi0 - tssi_init < 1 && diff_pwr < 0) {
+		dev_err(dev->dev, "Error: TSSI lower saturation\n");
+		diff_pwr = 0;
+	}
+
+	if ((dev->prev_pwr_diff ^ diff_pwr) < 0 && abs(diff_pwr) < 4096 &&
+	    (abs(diff_pwr) > abs(dev->prev_pwr_diff) ||
+	     (diff_pwr > 0 && diff_pwr == -dev->prev_pwr_diff)))
+		diff_pwr = 0;
+	else
+		dev->prev_pwr_diff = diff_pwr;
+
+	diff_pwr += (diff_pwr > 0) ? 2048 : -2048;
+	diff_pwr /= 4096;
+
+	dev_dbg(dev->dev, "final diff: %08x\n", diff_pwr);
+
+	val = mt7601u_rr(dev, MT_TX_ALC_CFG_1);
+	curr_pwr = s6_to_int(FIELD_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val));
+	diff_pwr += curr_pwr;
+	val = (val & ~MT_TX_ALC_CFG_1_TEMP_COMP) | int_to_s6(diff_pwr);
+	mt7601u_wr(dev, MT_TX_ALC_CFG_1, val);
+
+	return mt7601u_mcu_tssi_read_kick(dev, hvga);
+}
+
+static u8 mt7601u_agc_default(struct mt7601u_dev *dev)
+{
+	return (dev->ee->lna_gain - 8) * 2 + 0x34;
+}
+
+static void mt7601u_agc_reset(struct mt7601u_dev *dev)
+{
+	u8 agc = mt7601u_agc_default(dev);
+
+	mt7601u_bbp_wr(dev, 66,	agc);
+}
+
+void mt7601u_agc_save(struct mt7601u_dev *dev)
+{
+	dev->agc_save = mt7601u_bbp_rr(dev, 66);
+}
+
+void mt7601u_agc_restore(struct mt7601u_dev *dev)
+{
+	mt7601u_bbp_wr(dev, 66, dev->agc_save);
+}
+
+static void mt7601u_agc_tune(struct mt7601u_dev *dev)
+{
+	u8 val = mt7601u_agc_default(dev);
+	long avg_rssi;
+
+	if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+		return;
+
+	/* Note: only in STA mode and not dozing; perhaps do this only if
+	 *	 there is enough rssi updates since last run?
+	 *	 Rssi updates are only on beacons and U2M so should work...
+	 */
+	spin_lock_bh(&dev->con_mon_lock);
+	avg_rssi = ewma_rssi_read(&dev->avg_rssi);
+	spin_unlock_bh(&dev->con_mon_lock);
+	if (avg_rssi == 0)
+		return;
+
+	avg_rssi = -avg_rssi;
+	if (avg_rssi <= -70)
+		val -= 0x20;
+	else if (avg_rssi <= -60)
+		val -= 0x10;
+
+	if (val != mt7601u_bbp_rr(dev, 66))
+		mt7601u_bbp_wr(dev, 66, val);
+
+	/* TODO: also if lost a lot of beacons try resetting
+	 *       (see RTMPSetAGCInitValue() call in mlme.c).
+	 */
+}
+
+static void mt7601u_phy_calibrate(struct work_struct *work)
+{
+	struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+					    cal_work.work);
+
+	mt7601u_agc_tune(dev);
+	mt7601u_tssi_cal(dev);
+	/* If TSSI calibration was run it already updated temperature. */
+	if (!dev->ee->tssi_enabled)
+		dev->raw_temp = mt7601u_read_temp(dev);
+	mt7601u_temp_comp(dev, true); /* TODO: find right value for @on */
+
+	ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+}
+
+static unsigned long
+__mt7601u_phy_freq_cal(struct mt7601u_dev *dev, s8 last_offset, u8 phy_mode)
+{
+	u8 activate_threshold, deactivate_threshold;
+
+	trace_freq_cal_offset(dev, phy_mode, last_offset);
+
+	/* No beacons received - reschedule soon */
+	if (last_offset == MT_FREQ_OFFSET_INVALID)
+		return MT_FREQ_CAL_ADJ_INTERVAL;
+
+	switch (phy_mode) {
+	case MT_PHY_TYPE_CCK:
+		activate_threshold = 19;
+		deactivate_threshold = 5;
+		break;
+	case MT_PHY_TYPE_OFDM:
+		activate_threshold = 102;
+		deactivate_threshold = 32;
+		break;
+	case MT_PHY_TYPE_HT:
+	case MT_PHY_TYPE_HT_GF:
+		activate_threshold = 82;
+		deactivate_threshold = 20;
+		break;
+	default:
+		WARN_ON(1);
+		return MT_FREQ_CAL_CHECK_INTERVAL;
+	}
+
+	if (abs(last_offset) >= activate_threshold)
+		dev->freq_cal.adjusting = true;
+	else if (abs(last_offset) <= deactivate_threshold)
+		dev->freq_cal.adjusting = false;
+
+	if (!dev->freq_cal.adjusting)
+		return MT_FREQ_CAL_CHECK_INTERVAL;
+
+	if (last_offset > deactivate_threshold) {
+		if (dev->freq_cal.freq > 0)
+			dev->freq_cal.freq--;
+		else
+			dev->freq_cal.adjusting = false;
+	} else if (last_offset < -deactivate_threshold) {
+		if (dev->freq_cal.freq < 0xbf)
+			dev->freq_cal.freq++;
+		else
+			dev->freq_cal.adjusting = false;
+	}
+
+	trace_freq_cal_adjust(dev, dev->freq_cal.freq);
+	mt7601u_rf_wr(dev, 0, 12, dev->freq_cal.freq);
+	mt7601u_vco_cal(dev);
+
+	return dev->freq_cal.adjusting ? MT_FREQ_CAL_ADJ_INTERVAL :
+					 MT_FREQ_CAL_CHECK_INTERVAL;
+}
+
+static void mt7601u_phy_freq_cal(struct work_struct *work)
+{
+	struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+					       freq_cal.work.work);
+	s8 last_offset;
+	u8 phy_mode;
+	unsigned long delay;
+
+	spin_lock_bh(&dev->con_mon_lock);
+	last_offset = dev->bcn_freq_off;
+	phy_mode = dev->bcn_phy_mode;
+	spin_unlock_bh(&dev->con_mon_lock);
+
+	delay = __mt7601u_phy_freq_cal(dev, last_offset, phy_mode);
+	ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, delay);
+
+	spin_lock_bh(&dev->con_mon_lock);
+	dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+	spin_unlock_bh(&dev->con_mon_lock);
+}
+
+void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
+			       struct ieee80211_bss_conf *info)
+{
+	if (!info->assoc)
+		cancel_delayed_work_sync(&dev->freq_cal.work);
+
+	/* Start/stop collecting beacon data */
+	spin_lock_bh(&dev->con_mon_lock);
+	ether_addr_copy(dev->ap_bssid, info->bssid);
+	ewma_rssi_init(&dev->avg_rssi);
+	dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+	spin_unlock_bh(&dev->con_mon_lock);
+
+	dev->freq_cal.freq = dev->ee->rf_freq_off;
+	dev->freq_cal.enabled = info->assoc;
+	dev->freq_cal.adjusting = false;
+
+	if (info->assoc)
+		ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+					     MT_FREQ_CAL_INIT_DELAY);
+}
+
+static int mt7601u_init_cal(struct mt7601u_dev *dev)
+{
+	u32 mac_ctrl;
+	int ret;
+
+	dev->raw_temp = mt7601u_read_bootup_temp(dev);
+	dev->curr_temp = (dev->raw_temp - dev->ee->ref_temp) *
+		MT_EE_TEMPERATURE_SLOPE;
+	dev->dpd_temp = dev->curr_temp;
+
+	mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_R, 0);
+	if (ret)
+		return ret;
+
+	ret = mt7601u_rf_rr(dev, 0, 4);
+	if (ret < 0)
+		return ret;
+	ret |= 0x80;
+	ret = mt7601u_rf_wr(dev, 0, 4, ret);
+	if (ret)
+		return ret;
+	msleep(2);
+
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXDCOC, 0);
+	if (ret)
+		return ret;
+
+	mt7601u_rxdc_cal(dev);
+
+	ret = mt7601u_set_bw_filter(dev, true);
+	if (ret)
+		return ret;
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_LOFT, 0);
+	if (ret)
+		return ret;
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXIQ, 0);
+	if (ret)
+		return ret;
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_RXIQ, 0);
+	if (ret)
+		return ret;
+	ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp);
+	if (ret)
+		return ret;
+
+	mt7601u_rxdc_cal(dev);
+
+	mt7601u_tssi_dc_gain_cal(dev);
+
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl);
+
+	mt7601u_temp_comp(dev, true);
+
+	return 0;
+}
+
+int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw)
+{
+	u32 val, old;
+
+	if (bw == dev->bw) {
+		/* Vendor driver does the rmc even when no change is needed. */
+		mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10);
+
+		return 0;
+	}
+	dev->bw = bw;
+
+	/* Stop MAC for the time of bw change */
+	old = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+	val = old & ~(MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, val);
+	mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+		  0, 500000);
+
+	mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10);
+
+	mt7601u_wr(dev, MT_MAC_SYS_CTRL, old);
+
+	return mt7601u_load_bbp_temp_table_bw(dev);
+}
+
+/**
+ * mt7601u_set_rx_path - set rx path in BBP
+ * @dev:	pointer to adapter structure
+ * @path:	rx path to set values are 0-based
+ */
+void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path)
+{
+	mt7601u_bbp_rmw(dev, 3, 0x18, path << 3);
+}
+
+/**
+ * mt7601u_set_tx_dac - set which tx DAC to use
+ * @dev:	pointer to adapter structure
+ * @path:	DAC index, values are 0-based
+ */
+void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 dac)
+{
+	mt7601u_bbp_rmc(dev, 1, 0x18, dac << 3);
+}
+
+int mt7601u_phy_init(struct mt7601u_dev *dev)
+{
+	int ret;
+
+	dev->rf_pa_mode[0] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG0);
+	dev->rf_pa_mode[1] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG1);
+
+	ret = mt7601u_rf_wr(dev, 0, 12, dev->ee->rf_freq_off);
+	if (ret)
+		return ret;
+	ret = mt7601u_write_reg_pairs(dev, 0, rf_central,
+				      ARRAY_SIZE(rf_central));
+	if (ret)
+		return ret;
+	ret = mt7601u_write_reg_pairs(dev, 0, rf_channel,
+				      ARRAY_SIZE(rf_channel));
+	if (ret)
+		return ret;
+	ret = mt7601u_write_reg_pairs(dev, 0, rf_vga, ARRAY_SIZE(rf_vga));
+	if (ret)
+		return ret;
+
+	ret = mt7601u_init_cal(dev);
+	if (ret)
+		return ret;
+
+	dev->prev_pwr_diff = 100;
+
+	INIT_DELAYED_WORK(&dev->cal_work, mt7601u_phy_calibrate);
+	INIT_DELAYED_WORK(&dev->freq_cal.work, mt7601u_phy_freq_cal);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/regs.h b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/regs.h
new file mode 100644
index 0000000..e755227
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/regs.h
@@ -0,0 +1,627 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT76_REGS_H
+#define __MT76_REGS_H
+
+#include <linux/bitops.h>
+
+#define MT_ASIC_VERSION			0x0000
+
+#define MT76XX_REV_E3		0x22
+#define MT76XX_REV_E4		0x33
+
+#define MT_CMB_CTRL			0x0020
+#define MT_CMB_CTRL_XTAL_RDY		BIT(22)
+#define MT_CMB_CTRL_PLL_LD		BIT(23)
+
+#define MT_EFUSE_CTRL			0x0024
+#define MT_EFUSE_CTRL_AOUT		GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE		GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME	GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME	GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN		GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK		BIT(30)
+#define MT_EFUSE_CTRL_SEL		BIT(31)
+
+#define MT_EFUSE_DATA_BASE		0x0028
+#define MT_EFUSE_DATA(_n)		(MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0			0x0040
+#define MT_COEXCFG0_COEX_EN		BIT(0)
+
+#define MT_WLAN_FUN_CTRL		0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN	BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN	BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF	BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET	BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN	BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ	BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL	BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL	BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST	BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST	BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN	BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN	GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT	GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN	GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0			0x0100
+#define MT_XO_CTRL1			0x0104
+#define MT_XO_CTRL2			0x0108
+#define MT_XO_CTRL3			0x010c
+#define MT_XO_CTRL4			0x0110
+
+#define MT_XO_CTRL5			0x0114
+#define MT_XO_CTRL5_C2_VAL		GENMASK(14, 8)
+
+#define MT_XO_CTRL6			0x0118
+#define MT_XO_CTRL6_C2_CTRL		GENMASK(14, 8)
+
+#define MT_XO_CTRL7			0x011c
+
+#define MT_WLAN_MTC_CTRL		0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP	BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK	BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S	BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD	GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD	BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD	BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD	BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB	BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB	BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB	BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB	BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP	BIT(28)
+
+#define MT_INT_SOURCE_CSR		0x0200
+#define MT_INT_MASK_CSR			0x0204
+
+#define MT_INT_RX_DONE(_n)		BIT(_n)
+#define MT_INT_RX_DONE_ALL		GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL		GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n)		BIT(_n + 4)
+#define MT_INT_RX_COHERENT		BIT(16)
+#define MT_INT_TX_COHERENT		BIT(17)
+#define MT_INT_ANY_COHERENT		BIT(18)
+#define MT_INT_MCU_CMD			BIT(19)
+#define MT_INT_TBTT			BIT(20)
+#define MT_INT_PRE_TBTT			BIT(21)
+#define MT_INT_TX_STAT			BIT(22)
+#define MT_INT_AUTO_WAKEUP		BIT(23)
+#define MT_INT_GPTIMER			BIT(24)
+#define MT_INT_RXDELAYINT		BIT(26)
+#define MT_INT_TXDELAYINT		BIT(27)
+
+#define MT_WPDMA_GLO_CFG		0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN	BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY	BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN	BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY	BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE	GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE	BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN	BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN	GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS	BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET	BIT(31)
+
+#define MT_WPDMA_RST_IDX		0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG		0x0210
+
+#define MT_WMM_AIFSN		0x0214
+#define MT_WMM_AIFSN_MASK		GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_CWMIN		0x0218
+#define MT_WMM_CWMIN_MASK		GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_CWMAX		0x021c
+#define MT_WMM_CWMAX_MASK		GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_TXOP_BASE		0x0220
+#define MT_WMM_TXOP(_n)			(MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n)		((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK		GENMASK(15, 0)
+
+#define MT_FCE_DMA_ADDR			0x0230
+#define MT_FCE_DMA_LEN			0x0234
+
+#define MT_USB_DMA_CFG			0x238
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT	GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT	GENMASK(15, 8)
+#define MT_USB_DMA_CFG_PHY_CLR		BIT(16)
+#define MT_USB_DMA_CFG_TX_CLR		BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT	BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN	BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN	BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN	BIT(23)
+#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP	BIT(25)
+#define MT_USB_DMA_CFG_EP_OUT_VALID	GENMASK(29, 27)
+#define MT_USB_DMA_CFG_RX_BUSY		BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY		BIT(31)
+
+#define MT_TSO_CTRL			0x0250
+#define MT_HEADER_TRANS_CTRL_REG	0x0260
+
+#define MT_US_CYC_CFG			0x02a4
+#define MT_US_CYC_CNT			GENMASK(7, 0)
+
+#define MT_TX_RING_BASE			0x0300
+#define MT_RX_RING_BASE			0x03c0
+#define MT_RING_SIZE			0x10
+
+#define MT_TX_HW_QUEUE_MCU		8
+#define MT_TX_HW_QUEUE_MGMT		9
+
+#define MT_PBF_SYS_CTRL			0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET	BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET	BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET	BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET	BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET	BIT(4)
+
+#define MT_PBF_CFG			0x0404
+#define MT_PBF_CFG_TX0Q_EN		BIT(0)
+#define MT_PBF_CFG_TX1Q_EN		BIT(1)
+#define MT_PBF_CFG_TX2Q_EN		BIT(2)
+#define MT_PBF_CFG_TX3Q_EN		BIT(3)
+#define MT_PBF_CFG_RX0Q_EN		BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN		BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT		0x0408
+#define MT_PBF_RX_MAX_PCNT		0x040c
+
+#define MT_BCN_OFFSET_BASE		0x041c
+#define MT_BCN_OFFSET(_n)		(MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RXQ_STA			0x0430
+#define MT_TXQ_STA			0x0434
+
+#define	MT_RF_CSR_CFG			0x0500
+#define MT_RF_CSR_CFG_DATA		GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID		GENMASK(13, 8)
+#define MT_RF_CSR_CFG_REG_BANK		GENMASK(17, 14)
+#define MT_RF_CSR_CFG_WR		BIT(30)
+#define MT_RF_CSR_CFG_KICK		BIT(31)
+
+#define MT_RF_BYPASS_0			0x0504
+#define MT_RF_BYPASS_1			0x0508
+#define MT_RF_SETTING_0			0x050c
+
+#define MT_RF_DATA_WRITE		0x0524
+
+#define MT_RF_CTRL			0x0528
+#define MT_RF_CTRL_ADDR			GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE		BIT(12)
+#define MT_RF_CTRL_BUSY			BIT(13)
+#define MT_RF_CTRL_IDX			BIT(16)
+
+#define MT_RF_DATA_READ			0x052c
+
+#define MT_FCE_PSE_CTRL			0x0800
+#define MT_FCE_PARAMETERS		0x0804
+#define MT_FCE_CSO			0x0808
+
+#define MT_FCE_L2_STUFF			0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN	BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN	BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN	BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN	BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN	BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP	BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN	GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN	GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT	GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1	0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR	0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT	0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX	0x09a8
+
+#define MT_FCE_PDMA_GLOBAL_CONF		0x09c4
+
+#define MT_PAUSE_ENABLE_CONTROL1	0x0a38
+
+#define MT_FCE_SKIP_FS			0x0a6c
+
+#define MT_MAC_CSR0			0x1000
+
+#define MT_MAC_SYS_CTRL			0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR	BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP	BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX	BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX	BIT(3)
+
+#define MT_MAC_ADDR_DW0			0x1008
+#define MT_MAC_ADDR_DW1			0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK	GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0		0x1010
+#define MT_MAC_BSSID_DW1		0x1014
+#define MT_MAC_BSSID_DW1_ADDR		GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE	GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N	GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT	BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2	BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3	BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE	GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG			0x1018
+#define MT_MAX_LEN_CFG_AMPDU		GENMASK(13, 12)
+
+#define MT_BBP_CSR_CFG			0x101c
+#define MT_BBP_CSR_CFG_VAL		GENMASK(7, 0)
+#define MT_BBP_CSR_CFG_REG_NUM		GENMASK(15, 8)
+#define MT_BBP_CSR_CFG_READ		BIT(16)
+#define MT_BBP_CSR_CFG_BUSY		BIT(17)
+#define MT_BBP_CSR_CFG_PAR_DUR		BIT(18)
+#define MT_BBP_CSR_CFG_RW_MODE		BIT(19)
+
+#define MT_AMPDU_MAX_LEN_20M1S		0x1030
+#define MT_AMPDU_MAX_LEN_20M2S		0x1034
+#define MT_AMPDU_MAX_LEN_40M1S		0x1038
+#define MT_AMPDU_MAX_LEN_40M2S		0x103c
+#define MT_AMPDU_MAX_LEN		0x1040
+
+#define MT_WCID_DROP_BASE		0x106c
+#define MT_WCID_DROP(_n)		(MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n)		BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK		0x108c
+
+#define MT_MAC_APC_BSSID_BASE		0x1090
+#define MT_MAC_APC_BSSID_L(_n)		(MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n)		(MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR		GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN		BIT(16)
+
+#define MT_XIFS_TIME_CFG		0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS	GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS	GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS	GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS		GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN	BIT(29)
+
+#define MT_BKOFF_SLOT_CFG		0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME	GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY	GENMASK(11, 8)
+
+#define MT_BEACON_TIME_CFG		0x1114
+#define MT_BEACON_TIME_CFG_INTVAL	GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN	BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE	GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN	BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX	BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP	GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG		0x1118
+#define MT_TBTT_TIMER_CFG		0x1124
+
+#define MT_INT_TIMER_CFG		0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT	GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER	GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN			0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN	BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN	BIT(1)
+
+#define MT_MAC_STATUS			0x1200
+#define MT_MAC_STATUS_TX		BIT(0)
+#define MT_MAC_STATUS_RX		BIT(1)
+
+#define MT_PWR_PIN_CFG			0x1204
+#define MT_AUX_CLK_CFG			0x120c
+
+#define MT_BB_PA_MODE_CFG0		0x1214
+#define MT_BB_PA_MODE_CFG1		0x1218
+#define MT_RF_PA_MODE_CFG0		0x121c
+#define MT_RF_PA_MODE_CFG1		0x1220
+
+#define MT_RF_PA_MODE_ADJ0		0x1228
+#define MT_RF_PA_MODE_ADJ1		0x122c
+
+#define MT_DACCLK_EN_DLY_CFG		0x1264
+
+#define MT_EDCA_CFG_BASE		0x1300
+#define MT_EDCA_CFG_AC(_n)		(MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP		GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN		GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN		GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX		GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0			0x1314
+#define MT_TX_PWR_CFG_1			0x1318
+#define MT_TX_PWR_CFG_2			0x131c
+#define MT_TX_PWR_CFG_3			0x1320
+#define MT_TX_PWR_CFG_4			0x1324
+
+#define MT_TX_BAND_CFG			0x132c
+#define MT_TX_BAND_CFG_UPPER_40M	BIT(0)
+#define MT_TX_BAND_CFG_5G		BIT(1)
+#define MT_TX_BAND_CFG_2G		BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY		0x1384
+#define MT_TX_MPDU_ADJ_INT		0x1388
+
+#define MT_TX_PWR_CFG_7			0x13d4
+#define MT_TX_PWR_CFG_8			0x13d8
+#define MT_TX_PWR_CFG_9			0x13dc
+
+#define MT_TX_SW_CFG0			0x1330
+#define MT_TX_SW_CFG1			0x1334
+#define MT_TX_SW_CFG2			0x1338
+
+#define MT_TXOP_CTRL_CFG		0x1340
+#define MT_TXOP_TRUN_EN			GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY		GENMASK(15, 8)
+#define MT_TXOP_CTRL
+
+#define MT_TX_RTS_CFG			0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT	GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH		GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK		BIT(24)
+
+#define MT_TX_TIMEOUT_CFG		0x1348
+#define MT_TX_RETRY_CFG			0x134c
+#define MT_TX_LINK_CFG			0x1350
+#define MT_HT_FBK_CFG0			0x1354
+#define MT_HT_FBK_CFG1			0x1358
+#define MT_LG_FBK_CFG0			0x135c
+#define MT_LG_FBK_CFG1			0x1360
+
+#define MT_CCK_PROT_CFG			0x1364
+#define MT_OFDM_PROT_CFG		0x1368
+#define MT_MM20_PROT_CFG		0x136c
+#define MT_MM40_PROT_CFG		0x1370
+#define MT_GF20_PROT_CFG		0x1374
+#define MT_GF40_PROT_CFG		0x1378
+
+#define MT_PROT_RATE			GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS		BIT(16)
+#define MT_PROT_CTRL_CTS2SELF		BIT(17)
+#define MT_PROT_NAV_SHORT		BIT(18)
+#define MT_PROT_NAV_LONG		BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK		BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM		BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20		BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40		BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20		BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40		BIT(25)
+#define MT_PROT_RTS_THR_EN		BIT(26)
+#define MT_PROT_RATE_CCK_11		0x0003
+#define MT_PROT_RATE_OFDM_6		0x4000
+#define MT_PROT_RATE_OFDM_24		0x4004
+#define MT_PROT_RATE_DUP_OFDM_24	0x4084
+#define MT_PROT_TXOP_ALLOW_ALL		GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20		(MT_PROT_TXOP_ALLOW_ALL &	\
+					 ~MT_PROT_TXOP_ALLOW_MM40 &	\
+					 ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME			0x1380
+
+#define MT_TX_PWR_CFG_0_EXT		0x1390
+#define MT_TX_PWR_CFG_1_EXT		0x1394
+
+#define MT_TX_FBK_LIMIT			0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK	GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK	GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR	BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR	BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT	BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR		0x13a0
+#define MT_TX1_RF_GAIN_CORR		0x13a4
+#define MT_TX0_RF_GAIN_ATTEN		0x13a8
+
+#define MT_TX_ALC_CFG_0			0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0	GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1	GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0		GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1		GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1			0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP	GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2			0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP	GENMASK(5, 0)
+
+#define MT_TX0_BB_GAIN_ATTEN		0x13c0
+
+#define MT_TX_ALC_VGA3			0x13c8
+
+#define MT_TX_PROT_CFG6			0x13e0
+#define MT_TX_PROT_CFG7			0x13e4
+#define MT_TX_PROT_CFG8			0x13e8
+
+#define MT_PIFS_TX_CFG			0x13ec
+
+#define MT_RX_FILTR_CFG			0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR		BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR		BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC		BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS	BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR		BIT(4)
+#define MT_RX_FILTR_CFG_MCAST		BIT(5)
+#define MT_RX_FILTR_CFG_BCAST		BIT(6)
+#define MT_RX_FILTR_CFG_DUP		BIT(7)
+#define MT_RX_FILTR_CFG_CFACK		BIT(8)
+#define MT_RX_FILTR_CFG_CFEND		BIT(9)
+#define MT_RX_FILTR_CFG_ACK		BIT(10)
+#define MT_RX_FILTR_CFG_CTS		BIT(11)
+#define MT_RX_FILTR_CFG_RTS		BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL		BIT(13)
+#define MT_RX_FILTR_CFG_BA		BIT(14)
+#define MT_RX_FILTR_CFG_BAR		BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV	BIT(16)
+
+#define MT_AUTO_RSP_CFG			0x1404
+
+#define MT_AUTO_RSP_PREAMB_SHORT	BIT(4)
+
+#define MT_LEGACY_BASIC_RATE		0x1408
+#define MT_HT_BASIC_RATE		0x140c
+
+#define MT_RX_PARSER_CFG		0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL	BIT(0)
+
+#define MT_EXT_CCA_CFG			0x141c
+#define MT_EXT_CCA_CFG_CCA0		GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1		GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2		GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3		GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK		GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK	GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3			0x1478
+
+#define MT_PN_PAD_MODE			0x150c
+
+#define MT_TXOP_HLDR_ET			0x1608
+
+#define MT_PROT_AUTO_TX_CFG		0x1648
+
+#define MT_RX_STA_CNT0			0x1700
+#define MT_RX_STA_CNT1			0x1704
+#define MT_RX_STA_CNT2			0x1708
+#define MT_TX_STA_CNT0			0x170c
+#define MT_TX_STA_CNT1			0x1710
+#define MT_TX_STA_CNT2			0x1714
+
+/* Vendor driver defines content of the second word of STAT_FIFO as follows:
+ *	MT_TX_STAT_FIFO_RATE		GENMASK(26, 16)
+ *	MT_TX_STAT_FIFO_ETXBF		BIT(27)
+ *	MT_TX_STAT_FIFO_SND		BIT(28)
+ *	MT_TX_STAT_FIFO_ITXBF		BIT(29)
+ * However, tests show that b16-31 have the same layout as TXWI rate_ctl
+ * with rate set to rate at which frame was acked.
+ */
+#define MT_TX_STAT_FIFO			0x1718
+#define MT_TX_STAT_FIFO_VALID		BIT(0)
+#define MT_TX_STAT_FIFO_PID_TYPE	GENMASK(4, 1)
+#define MT_TX_STAT_FIFO_SUCCESS		BIT(5)
+#define MT_TX_STAT_FIFO_AGGR		BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ		BIT(7)
+#define MT_TX_STAT_FIFO_WCID		GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE		GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT			0x171c
+
+#define MT_TX_AGG_CNT_BASE0		0x1720
+
+#define MT_MPDU_DENSITY_CNT		0x1740
+
+#define MT_TX_AGG_CNT_BASE1		0x174c
+
+#define MT_TX_AGG_CNT(_id)		((_id) < 8 ?			\
+					 MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+					 MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT		0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY	GENMASK(7, 0)
+
+#define MT_BBP_CORE_BASE		0x2000
+#define MT_BBP_IBI_BASE			0x2100
+#define MT_BBP_AGC_BASE			0x2300
+#define MT_BBP_TXC_BASE			0x2400
+#define MT_BBP_RXC_BASE			0x2500
+#define MT_BBP_TXO_BASE			0x2600
+#define MT_BBP_TXBE_BASE		0x2700
+#define MT_BBP_RXFE_BASE		0x2800
+#define MT_BBP_RXO_BASE			0x2900
+#define MT_BBP_DFS_BASE			0x2a00
+#define MT_BBP_TR_BASE			0x2b00
+#define MT_BBP_CAL_BASE			0x2c00
+#define MT_BBP_DSC_BASE			0x2e00
+#define MT_BBP_PFMU_BASE		0x2f00
+
+#define MT_BBP(_type, _n)		(MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW		GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN		GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW		GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_GAIN		GENMASK(21, 16)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_GAIN			GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0		GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1		GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN	GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE		0x1800
+#define MT_WCID_ADDR(_n)		(MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE			0x4000
+
+#define MT_WCID_KEY_BASE		0x8000
+#define MT_WCID_KEY(_n)			(MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE			0xa000
+#define MT_WCID_IV(_n)			(MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE		0xa800
+#define MT_WCID_ATTR(_n)		(MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE		BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE		GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX		GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF		GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT	BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT	BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC		BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID		GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0			0xac00
+#define MT_SKEY_BASE_1			0xb400
+#define MT_SKEY_0(_bss, _idx)		\
+	(MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx)		\
+	(MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx)		\
+	((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0		0xb000
+#define MT_SKEY_MODE_BASE_1		0xb3f0
+#define MT_SKEY_MODE_0(_bss)		\
+	(MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss)		\
+	(MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss)		\
+	((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK		GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx)	(4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE			0xc000
+
+#define MT_TEMP_SENSOR			0x1d000
+#define MT_TEMP_SENSOR_VAL		GENMASK(6, 0)
+
+enum mt76_cipher_type {
+	MT_CIPHER_NONE,
+	MT_CIPHER_WEP40,
+	MT_CIPHER_WEP104,
+	MT_CIPHER_TKIP,
+	MT_CIPHER_AES_CCMP,
+	MT_CIPHER_CKIP40,
+	MT_CIPHER_CKIP104,
+	MT_CIPHER_CKIP128,
+	MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/trace.c b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/trace.c
new file mode 100644
index 0000000..42df68a
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/trace.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/trace.h b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/trace.h
new file mode 100644
index 0000000..5a6ba01
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/trace.h
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#if !defined(__MT7601U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT7601U_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt7601u.h"
+#include "mac.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt7601u
+
+#define MAXNAME		32
+#define DEV_ENTRY	__array(char, wiphy_name, 32)
+#define DEV_ASSIGN	strlcpy(__entry->wiphy_name,			\
+				wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT	"%s "
+#define DEV_PR_ARG	__entry->wiphy_name
+
+#define REG_ENTRY	__field(u32, reg) __field(u32, val)
+#define REG_ASSIGN	__entry->reg = reg; __entry->val = val
+#define REG_PR_FMT	"%04x=%08x"
+#define REG_PR_ARG	__entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evtu,
+	TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		REG_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		REG_ASSIGN;
+	),
+	TP_printk(
+		DEV_PR_FMT REG_PR_FMT,
+		DEV_PR_ARG, REG_PR_ARG
+	)
+);
+
+DEFINE_EVENT(dev_reg_evtu, reg_read,
+	TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evtu, reg_write,
+	TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+TRACE_EVENT(mt_submit_urb,
+	TP_PROTO(struct mt7601u_dev *dev, struct urb *u),
+	TP_ARGS(dev, u),
+	TP_STRUCT__entry(
+		DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->pipe = u->pipe;
+		__entry->len = u->transfer_buffer_length;
+	),
+	TP_printk(DEV_PR_FMT "p:%08x len:%u",
+		  DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+#define trace_mt_submit_urb_sync(__dev, __pipe, __len) ({	\
+	struct urb u;					\
+	u.pipe = __pipe;				\
+	u.transfer_buffer_length = __len;		\
+	trace_mt_submit_urb(__dev, &u);			\
+})
+
+TRACE_EVENT(mt_mcu_msg_send,
+	TP_PROTO(struct mt7601u_dev *dev,
+		 struct sk_buff *skb, u32 csum, bool resp),
+	TP_ARGS(dev, skb, csum, resp),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, info)
+		__field(u32, csum)
+		__field(bool, resp)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->info = *(u32 *)skb->data;
+		__entry->csum = csum;
+		__entry->resp = resp;
+	),
+	TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
+		  DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
+);
+
+TRACE_EVENT(mt_vend_req,
+	TP_PROTO(struct mt7601u_dev *dev, unsigned pipe, u8 req, u8 req_type,
+		 u16 val, u16 offset, void *buf, size_t buflen, int ret),
+	TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
+		__field(u16, val) __field(u16, offset) __field(void*, buf)
+		__field(int, buflen) __field(int, ret)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->pipe = pipe;
+		__entry->req = req;
+		__entry->req_type = req_type;
+		__entry->val = val;
+		__entry->offset = offset;
+		__entry->buf = buf;
+		__entry->buflen = buflen;
+		__entry->ret = ret;
+	),
+	TP_printk(DEV_PR_FMT
+		  "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
+		  DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
+		  __entry->req_type, __entry->val, __entry->offset,
+		  !!__entry->buf, __entry->buflen)
+);
+
+TRACE_EVENT(ee_read,
+	TP_PROTO(struct mt7601u_dev *dev, int offset, u16 val),
+	TP_ARGS(dev, offset, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(int, o) __field(u16, v)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->o = offset;
+		__entry->v = val;
+	),
+	TP_printk(DEV_PR_FMT "%04x=%04x", DEV_PR_ARG, __entry->o, __entry->v)
+);
+
+DECLARE_EVENT_CLASS(dev_rf_reg_evt,
+	TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+	TP_ARGS(dev, bank, reg, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, bank)
+		__field(u8, reg)
+		__field(u8, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		REG_ASSIGN;
+		__entry->bank = bank;
+	),
+	TP_printk(
+		DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
+		DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
+	)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, rf_read,
+	TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+	TP_ARGS(dev, bank, reg, val)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, rf_write,
+	TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+	TP_ARGS(dev, bank, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_bbp_reg_evt,
+	TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+	TP_ARGS(dev, reg, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, reg)
+		__field(u8, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		REG_ASSIGN;
+	),
+	TP_printk(
+		DEV_PR_FMT "%02hhx=%02hhx",
+		DEV_PR_ARG, __entry->reg, __entry->val
+	)
+);
+
+DEFINE_EVENT(dev_bbp_reg_evt, bbp_read,
+	TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+	TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_bbp_reg_evt, bbp_write,
+	TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+	TP_ARGS(dev, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_simple_evt,
+	TP_PROTO(struct mt7601u_dev *dev, u8 val),
+	TP_ARGS(dev, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->val = val;
+	),
+	TP_printk(
+		DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
+	)
+);
+
+DEFINE_EVENT(dev_simple_evt, temp_mode,
+	TP_PROTO(struct mt7601u_dev *dev, u8 val),
+	TP_ARGS(dev, val)
+);
+
+DEFINE_EVENT(dev_simple_evt, read_temp,
+	TP_PROTO(struct mt7601u_dev *dev, u8 val),
+	TP_ARGS(dev, val)
+);
+
+DEFINE_EVENT(dev_simple_evt, freq_cal_adjust,
+	TP_PROTO(struct mt7601u_dev *dev, u8 val),
+	TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(freq_cal_offset,
+	TP_PROTO(struct mt7601u_dev *dev, u8 phy_mode, s8 freq_off),
+	TP_ARGS(dev, phy_mode, freq_off),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, phy_mode)
+		__field(s8, freq_off)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->phy_mode = phy_mode;
+		__entry->freq_off = freq_off;
+	),
+	TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+		  DEV_PR_ARG, __entry->phy_mode, __entry->freq_off)
+);
+
+TRACE_EVENT(mt_rx,
+	TP_PROTO(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, u32 f),
+	TP_ARGS(dev, rxwi, f),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field_struct(struct mt7601u_rxwi, rxwi)
+		__field(u32, fce_info)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->rxwi = *rxwi;
+		__entry->fce_info = f;
+	),
+	TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x frag_sn:%04hx rate:%04hx "
+		  "uknw:%02hhx z:%02hhx%02hhx%02hhx snr:%02hhx "
+		  "ant:%02hhx gain:%02hhx freq_o:%02hhx "
+		  "r:%08x ea:%08x fce:%08x", DEV_PR_ARG,
+		  le32_to_cpu(__entry->rxwi.rxinfo),
+		  le32_to_cpu(__entry->rxwi.ctl),
+		  le16_to_cpu(__entry->rxwi.frag_sn),
+		  le16_to_cpu(__entry->rxwi.rate),
+		  __entry->rxwi.unknown,
+		  __entry->rxwi.zero[0], __entry->rxwi.zero[1],
+		  __entry->rxwi.zero[2],
+		  __entry->rxwi.snr, __entry->rxwi.ant,
+		  __entry->rxwi.gain, __entry->rxwi.freq_off,
+		  __entry->rxwi.resv2, __entry->rxwi.expert_ant,
+		  __entry->fce_info)
+);
+
+TRACE_EVENT(mt_tx,
+	TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb,
+		 struct mt76_sta *sta, struct mt76_txwi *h),
+	TP_ARGS(dev, skb, sta, h),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field_struct(struct mt76_txwi, h)
+		__field(struct sk_buff *, skb)
+		__field(struct mt76_sta *, sta)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->h = *h;
+		__entry->skb = skb;
+		__entry->sta = sta;
+	),
+	TP_printk(DEV_PR_FMT "skb:%p sta:%p  flg:%04hx rate_ctl:%04hx "
+		  "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
+		  __entry->skb, __entry->sta,
+		  le16_to_cpu(__entry->h.flags),
+		  le16_to_cpu(__entry->h.rate_ctl),
+		  __entry->h.ack_ctl, __entry->h.wcid,
+		  le16_to_cpu(__entry->h.len_ctl))
+);
+
+TRACE_EVENT(mt_tx_dma_done,
+	TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb),
+	TP_ARGS(dev, skb),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(struct sk_buff *, skb)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->skb = skb;
+	),
+	TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
+);
+
+TRACE_EVENT(mt_tx_status_cleaned,
+	TP_PROTO(struct mt7601u_dev *dev, int cleaned),
+	TP_ARGS(dev, cleaned),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(int, cleaned)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->cleaned = cleaned;
+	),
+	TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
+);
+
+TRACE_EVENT(mt_tx_status,
+	TP_PROTO(struct mt7601u_dev *dev, u32 stat1, u32 stat2),
+	TP_ARGS(dev, stat1, stat2),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, stat1)	__field(u32, stat2)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->stat1 = stat1;
+		__entry->stat2 = stat2;
+	),
+	TP_printk(DEV_PR_FMT "%08x %08x",
+		  DEV_PR_ARG, __entry->stat1, __entry->stat2)
+);
+
+TRACE_EVENT(mt_rx_dma_aggr,
+	TP_PROTO(struct mt7601u_dev *dev, int cnt, bool paged),
+	TP_ARGS(dev, cnt, paged),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, cnt)
+		__field(bool, paged)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->cnt = cnt;
+		__entry->paged = paged;
+	),
+	TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
+		  DEV_PR_ARG, __entry->cnt, __entry->paged)
+);
+
+DEFINE_EVENT(dev_simple_evt, set_key,
+	TP_PROTO(struct mt7601u_dev *dev, u8 val),
+	TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(set_shared_key,
+	TP_PROTO(struct mt7601u_dev *dev, u8 vid, u8 key),
+	TP_ARGS(dev, vid, key),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u8, vid)
+		__field(u8, key)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->vid = vid;
+		__entry->key = key;
+	),
+	TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+		  DEV_PR_ARG, __entry->vid, __entry->key)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/tx.c b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/tx.c
new file mode 100644
index 0000000..f3dff83
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include "mt7601u.h"
+#include "trace.h"
+
+enum mt76_txq_id {
+	MT_TXQ_VO = IEEE80211_AC_VO,
+	MT_TXQ_VI = IEEE80211_AC_VI,
+	MT_TXQ_BE = IEEE80211_AC_BE,
+	MT_TXQ_BK = IEEE80211_AC_BK,
+	MT_TXQ_PSD,
+	MT_TXQ_MCU,
+	__MT_TXQ_MAX
+};
+
+/* Hardware uses mirrored order of queues with Q0 having the highest priority */
+static u8 q2hwq(u8 q)
+{
+	return q ^ 0x3;
+}
+
+/* Take mac80211 Q id from the skb and translate it to hardware Q id */
+static u8 skb2q(struct sk_buff *skb)
+{
+	int qid = skb_get_queue_mapping(skb);
+
+	if (WARN_ON(qid >= MT_TXQ_PSD)) {
+		qid = MT_TXQ_BE;
+		skb_set_queue_mapping(skb, qid);
+	}
+
+	return q2hwq(qid);
+}
+
+/* Note: TX retry reporting is a bit broken.
+ *	 Retries are reported only once per AMPDU and often come a frame early
+ *	 i.e. they are reported in the last status preceding the AMPDU. Apart
+ *	 from the fact that it's hard to know the length of the AMPDU (which is
+ *	 required to know to how many consecutive frames retries should be
+ *	 applied), if status comes early on full FIFO it gets lost and retries
+ *	 of the whole AMPDU become invisible.
+ *	 As a work-around encode the desired rate in PKT_ID of TX descriptor
+ *	 and based on that guess the retries (every rate is tried once).
+ *	 Only downside here is that for MCS0 we have to rely solely on
+ *	 transmission failures as no retries can ever be reported.
+ *	 Not having to read EXT_FIFO has a nice effect of doubling the number
+ *	 of reports which can be fetched.
+ *	 Also the vendor driver never uses the EXT_FIFO register so it may be
+ *	 undertested.
+ */
+static u8 mt7601u_tx_pktid_enc(struct mt7601u_dev *dev, u8 rate, bool is_probe)
+{
+	u8 encoded = (rate + 1) + is_probe *  8;
+
+	/* Because PKT_ID 0 disables status reporting only 15 values are
+	 * available but 16 are needed (8 MCS * 2 for encoding is_probe)
+	 * - we need to cram together two rates. MCS0 and MCS7 with is_probe
+	 * share PKT_ID 9.
+	 */
+	if (is_probe && rate == 7)
+		return encoded - 7;
+
+	return encoded;
+}
+
+static void
+mt7601u_tx_pktid_dec(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
+{
+	u8 req_rate = stat->pktid;
+	u8 eff_rate = stat->rate & 0x7;
+
+	req_rate -= 1;
+
+	if (req_rate > 7) {
+		stat->is_probe = true;
+		req_rate -= 8;
+
+		/* Decide between MCS0 and MCS7 which share pktid 9 */
+		if (!req_rate && eff_rate)
+			req_rate = 7;
+	}
+
+	stat->retry = req_rate - eff_rate;
+}
+
+static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb,
+					       struct ieee80211_tx_info *info)
+{
+	int pkt_len = (unsigned long)info->status.status_driver_data[0];
+
+	skb_pull(skb, sizeof(struct mt76_txwi) + 4);
+	if (ieee80211_get_hdrlen_from_skb(skb) % 4)
+		mt76_remove_hdr_pad(skb);
+
+	skb_trim(skb, pkt_len);
+}
+
+void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	mt7601u_tx_skb_remove_dma_overhead(skb, info);
+
+	ieee80211_tx_info_clear_status(info);
+	info->status.rates[0].idx = -1;
+	info->flags |= IEEE80211_TX_STAT_ACK;
+
+	spin_lock_bh(&dev->mac_lock);
+	ieee80211_tx_status(dev->hw, skb);
+	spin_unlock_bh(&dev->mac_lock);
+}
+
+static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
+{
+	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+	u32 need_head;
+
+	need_head = sizeof(struct mt76_txwi) + 4;
+	if (hdr_len % 4)
+		need_head += 2;
+
+	return skb_cow(skb, need_head);
+}
+
+static struct mt76_txwi *
+mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
+		  struct ieee80211_sta *sta, struct mt76_wcid *wcid,
+		  int pkt_len)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_rate *rate = &info->control.rates[0];
+	struct mt76_txwi *txwi;
+	unsigned long flags;
+	bool is_probe;
+	u32 pkt_id;
+	u16 rate_ctl;
+	u8 nss;
+
+	txwi = skb_push(skb, sizeof(struct mt76_txwi));
+	memset(txwi, 0, sizeof(*txwi));
+
+	if (!wcid->tx_rate_set)
+		ieee80211_get_tx_rates(info->control.vif, sta, skb,
+				       info->control.rates, 1);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (rate->idx < 0 || !rate->count)
+		rate_ctl = wcid->tx_rate;
+	else
+		rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	txwi->rate_ctl = cpu_to_le16(rate_ctl);
+
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+
+	if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+		u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+		ba_size <<= sta->ht_cap.ampdu_factor;
+		ba_size = min_t(int, 63, ba_size);
+		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+			ba_size = 0;
+		txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+		txwi->flags =
+			cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
+				    FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+					       sta->ht_cap.ampdu_density));
+		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+			txwi->flags = 0;
+	}
+
+	txwi->wcid = wcid->idx;
+
+	is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+	pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe);
+	pkt_len |= FIELD_PREP(MT_TXWI_LEN_PKTID, pkt_id);
+	txwi->len_ctl = cpu_to_le16(pkt_len);
+
+	return txwi;
+}
+
+void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+		struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mt7601u_dev *dev = hw->priv;
+	struct ieee80211_vif *vif = info->control.vif;
+	struct ieee80211_sta *sta = control->sta;
+	struct mt76_sta *msta = NULL;
+	struct mt76_wcid *wcid = dev->mon_wcid;
+	struct mt76_txwi *txwi;
+	int pkt_len = skb->len;
+	int hw_q = skb2q(skb);
+
+	BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+	info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
+
+	if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) {
+		ieee80211_free_txskb(dev->hw, skb);
+		return;
+	}
+
+	if (sta) {
+		msta = (struct mt76_sta *) sta->drv_priv;
+		wcid = &msta->wcid;
+	} else if (vif) {
+		struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+		wcid = &mvif->group_wcid;
+	}
+
+	txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len);
+
+	if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q))
+		return;
+
+	trace_mt_tx(dev, skb, msta, txwi);
+}
+
+void mt7601u_tx_stat(struct work_struct *work)
+{
+	struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+					       stat_work.work);
+	struct mt76_tx_status stat;
+	unsigned long flags;
+	int cleaned = 0;
+
+	while (!test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
+		stat = mt7601u_mac_fetch_tx_status(dev);
+		if (!stat.valid)
+			break;
+
+		mt7601u_tx_pktid_dec(dev, &stat);
+		mt76_send_tx_status(dev, &stat);
+
+		cleaned++;
+	}
+	trace_mt_tx_status_cleaned(dev, cleaned);
+
+	spin_lock_irqsave(&dev->tx_lock, flags);
+	if (cleaned)
+		queue_delayed_work(dev->stat_wq, &dev->stat_work,
+				   msecs_to_jiffies(10));
+	else if (test_and_clear_bit(MT7601U_STATE_MORE_STATS, &dev->state))
+		queue_delayed_work(dev->stat_wq, &dev->stat_work,
+				   msecs_to_jiffies(20));
+	else
+		clear_bit(MT7601U_STATE_READING_STATS, &dev->state);
+	spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		    u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+	struct mt7601u_dev *dev = hw->priv;
+	u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
+	u32 val;
+
+	/* TODO: should we do funny things with the parameters?
+	 *	 See what mt7601u_set_default_edca() used to do in init.c.
+	 */
+
+	if (params->cw_min)
+		cw_min = fls(params->cw_min);
+	if (params->cw_max)
+		cw_max = fls(params->cw_max);
+
+	WARN_ON(params->txop > 0xff);
+	WARN_ON(params->aifs > 0xf);
+	WARN_ON(cw_min > 0xf);
+	WARN_ON(cw_max > 0xf);
+
+	val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+	      FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+	      FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+	/* TODO: based on user-controlled EnableTxBurst var vendor drv sets
+	 *	 a really long txop on AC0 (see connect.c:2009) but only on
+	 *	 connect? When not connected should be 0.
+	 */
+	if (!hw_q)
+		val |= 0x60;
+	else
+		val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
+	mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
+
+	val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
+	val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
+	val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
+	mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
+
+	val = mt76_rr(dev, MT_WMM_AIFSN);
+	val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
+	val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
+	mt76_wr(dev, MT_WMM_AIFSN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMIN);
+	val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
+	val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
+	mt76_wr(dev, MT_WMM_CWMIN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMAX);
+	val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
+	val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
+	mt76_wr(dev, MT_WMM_CWMAX, val);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/usb.c b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/usb.c
new file mode 100644
index 0000000..cc77204
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7601u.h"
+#include "usb.h"
+#include "trace.h"
+
+static const struct usb_device_id mt7601u_device_table[] = {
+	{ USB_DEVICE(0x0b05, 0x17d3) },
+	{ USB_DEVICE(0x0e8d, 0x760a) },
+	{ USB_DEVICE(0x0e8d, 0x760b) },
+	{ USB_DEVICE(0x13d3, 0x3431) },
+	{ USB_DEVICE(0x13d3, 0x3434) },
+	{ USB_DEVICE(0x148f, 0x7601) },
+	{ USB_DEVICE(0x148f, 0x760a) },
+	{ USB_DEVICE(0x148f, 0x760b) },
+	{ USB_DEVICE(0x148f, 0x760c) },
+	{ USB_DEVICE(0x148f, 0x760d) },
+	{ USB_DEVICE(0x2001, 0x3d04) },
+	{ USB_DEVICE(0x2717, 0x4106) },
+	{ USB_DEVICE(0x2955, 0x0001) },
+	{ USB_DEVICE(0x2955, 0x1001) },
+	{ USB_DEVICE(0x2955, 0x1003) },
+	{ USB_DEVICE(0x2a5f, 0x1000) },
+	{ USB_DEVICE(0x7392, 0x7710) },
+	{ 0, }
+};
+
+bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len,
+			   struct mt7601u_dma_buf *buf)
+{
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+
+	buf->len = len;
+	buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+	buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
+
+	return !buf->urb || !buf->buf;
+}
+
+void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf)
+{
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+
+	usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
+	usb_free_urb(buf->urb);
+}
+
+int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx,
+			   struct mt7601u_dma_buf *buf, gfp_t gfp,
+			   usb_complete_t complete_fn, void *context)
+{
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+	unsigned pipe;
+	int ret;
+
+	if (dir == USB_DIR_IN)
+		pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[ep_idx]);
+	else
+		pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep_idx]);
+
+	usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
+			  complete_fn, context);
+	buf->urb->transfer_dma = buf->dma;
+	buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+	trace_mt_submit_urb(dev, buf->urb);
+	ret = usb_submit_urb(buf->urb, gfp);
+	if (ret)
+		dev_err(dev->dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
+			dir, ep_idx, ret);
+	return ret;
+}
+
+void mt7601u_complete_urb(struct urb *urb)
+{
+	struct completion *cmpl = urb->context;
+
+	complete(cmpl);
+}
+
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+			   const u8 direction, const u16 val, const u16 offset,
+			   void *buf, const size_t buflen)
+{
+	int i, ret;
+	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+	const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+	const unsigned int pipe = (direction == USB_DIR_IN) ?
+		usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
+
+	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+		ret = usb_control_msg(usb_dev, pipe, req, req_type,
+				      val, offset, buf, buflen,
+				      MT_VEND_REQ_TOUT_MS);
+		trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
+				  buf, buflen, ret);
+
+		if (ret == -ENODEV)
+			set_bit(MT7601U_STATE_REMOVED, &dev->state);
+		if (ret >= 0 || ret == -ENODEV)
+			return ret;
+
+		msleep(5);
+	}
+
+	dev_err(dev->dev, "Vendor request req:%02x off:%04x failed:%d\n",
+		req, offset, ret);
+
+	return ret;
+}
+
+void mt7601u_vendor_reset(struct mt7601u_dev *dev)
+{
+	mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+			       MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
+}
+
+/* should be called with vendor_req_mutex held */
+static u32 __mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
+{
+	int ret;
+	u32 val = ~0;
+
+	WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+
+	ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN,
+				     0, offset, dev->vend_buf, MT_VEND_BUF);
+	if (ret == MT_VEND_BUF)
+		val = get_unaligned_le32(dev->vend_buf);
+	else if (ret > 0)
+		dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
+			ret, offset);
+
+	trace_reg_read(dev, offset, val);
+	return val;
+}
+
+u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
+{
+	u32 ret;
+
+	mutex_lock(&dev->vendor_req_mutex);
+	ret = __mt7601u_rr(dev, offset);
+	mutex_unlock(&dev->vendor_req_mutex);
+
+	return ret;
+}
+
+/* should be called with vendor_req_mutex held */
+static int __mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+				      const u16 offset, const u32 val)
+{
+	int ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+					 val & 0xffff, offset, NULL, 0);
+	if (!ret)
+		ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+					     val >> 16, offset + 2, NULL, 0);
+	trace_reg_write(dev, offset, val);
+	return ret;
+}
+
+int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+			     const u16 offset, const u32 val)
+{
+	int ret;
+
+	mutex_lock(&dev->vendor_req_mutex);
+	ret = __mt7601u_vendor_single_wr(dev, req, offset, val);
+	mutex_unlock(&dev->vendor_req_mutex);
+
+	return ret;
+}
+
+void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+	WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
+
+	mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val);
+}
+
+u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+	mutex_lock(&dev->vendor_req_mutex);
+	val |= __mt7601u_rr(dev, offset) & ~mask;
+	__mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val);
+	mutex_unlock(&dev->vendor_req_mutex);
+
+	return val;
+}
+
+u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+	u32 reg;
+
+	mutex_lock(&dev->vendor_req_mutex);
+	reg = __mt7601u_rr(dev, offset);
+	val |= reg & ~mask;
+	if (reg != val)
+		__mt7601u_vendor_single_wr(dev, MT_VEND_WRITE,
+					   offset, val);
+	mutex_unlock(&dev->vendor_req_mutex);
+
+	return val;
+}
+
+void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
+		     const void *data, int len)
+{
+	WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
+	WARN_ONCE(len & 3, "short write copy off:%08x", offset);
+
+	mt7601u_burst_write_regs(dev, offset, data, len / 4);
+}
+
+void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr)
+{
+	mt7601u_wr(dev, offset, get_unaligned_le32(addr));
+	mt7601u_wr(dev, offset + 4, addr[4] | addr[5] << 8);
+}
+
+static int mt7601u_assign_pipes(struct usb_interface *usb_intf,
+				struct mt7601u_dev *dev)
+{
+	struct usb_endpoint_descriptor *ep_desc;
+	struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
+	unsigned i, ep_i = 0, ep_o = 0;
+
+	BUILD_BUG_ON(sizeof(dev->in_eps) < __MT_EP_IN_MAX);
+	BUILD_BUG_ON(sizeof(dev->out_eps) < __MT_EP_OUT_MAX);
+
+	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+		ep_desc = &intf_desc->endpoint[i].desc;
+
+		if (usb_endpoint_is_bulk_in(ep_desc) &&
+		    ep_i++ < __MT_EP_IN_MAX) {
+			dev->in_eps[ep_i - 1] = usb_endpoint_num(ep_desc);
+			dev->in_max_packet = usb_endpoint_maxp(ep_desc);
+			/* Note: this is ignored by usb sub-system but vendor
+			 *	 code does it. We can drop this at some point.
+			 */
+			dev->in_eps[ep_i - 1] |= USB_DIR_IN;
+		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
+			   ep_o++ < __MT_EP_OUT_MAX) {
+			dev->out_eps[ep_o - 1] = usb_endpoint_num(ep_desc);
+			dev->out_max_packet = usb_endpoint_maxp(ep_desc);
+		}
+	}
+
+	if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
+		dev_err(dev->dev, "Error: wrong pipe number in:%d out:%d\n",
+			ep_i, ep_o);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mt7601u_probe(struct usb_interface *usb_intf,
+			 const struct usb_device_id *id)
+{
+	struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
+	struct mt7601u_dev *dev;
+	u32 asic_rev, mac_rev;
+	int ret;
+
+	dev = mt7601u_alloc_device(&usb_intf->dev);
+	if (!dev)
+		return -ENOMEM;
+
+	usb_dev = usb_get_dev(usb_dev);
+	usb_reset_device(usb_dev);
+
+	usb_set_intfdata(usb_intf, dev);
+
+	dev->vend_buf = devm_kmalloc(dev->dev, MT_VEND_BUF, GFP_KERNEL);
+	if (!dev->vend_buf) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = mt7601u_assign_pipes(usb_intf, dev);
+	if (ret)
+		goto err;
+	ret = mt7601u_wait_asic_ready(dev);
+	if (ret)
+		goto err;
+
+	asic_rev = mt7601u_rr(dev, MT_ASIC_VERSION);
+	mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
+	dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
+		 asic_rev, mac_rev);
+	if ((asic_rev >> 16) != 0x7601) {
+		ret = -ENODEV;
+		goto err;
+	}
+
+	/* Note: vendor driver skips this check for MT7601U */
+	if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
+		dev_warn(dev->dev, "Warning: eFUSE not present\n");
+
+	ret = mt7601u_init_hardware(dev);
+	if (ret)
+		goto err;
+	ret = mt7601u_register_device(dev);
+	if (ret)
+		goto err_hw;
+
+	set_bit(MT7601U_STATE_INITIALIZED, &dev->state);
+
+	return 0;
+err_hw:
+	mt7601u_cleanup(dev);
+err:
+	usb_set_intfdata(usb_intf, NULL);
+	usb_put_dev(interface_to_usbdev(usb_intf));
+
+	destroy_workqueue(dev->stat_wq);
+	ieee80211_free_hw(dev->hw);
+	return ret;
+}
+
+static void mt7601u_disconnect(struct usb_interface *usb_intf)
+{
+	struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+	ieee80211_unregister_hw(dev->hw);
+	mt7601u_cleanup(dev);
+
+	usb_set_intfdata(usb_intf, NULL);
+	usb_put_dev(interface_to_usbdev(usb_intf));
+
+	destroy_workqueue(dev->stat_wq);
+	ieee80211_free_hw(dev->hw);
+}
+
+static int mt7601u_suspend(struct usb_interface *usb_intf, pm_message_t state)
+{
+	struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+	mt7601u_cleanup(dev);
+
+	return 0;
+}
+
+static int mt7601u_resume(struct usb_interface *usb_intf)
+{
+	struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+	int ret;
+
+	ret = mt7601u_init_hardware(dev);
+	if (ret)
+		return ret;
+
+	set_bit(MT7601U_STATE_INITIALIZED, &dev->state);
+
+	return 0;
+}
+
+MODULE_DEVICE_TABLE(usb, mt7601u_device_table);
+MODULE_FIRMWARE(MT7601U_FIRMWARE);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver mt7601u_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= mt7601u_device_table,
+	.probe		= mt7601u_probe,
+	.disconnect	= mt7601u_disconnect,
+	.suspend	= mt7601u_suspend,
+	.resume		= mt7601u_resume,
+	.reset_resume	= mt7601u_resume,
+	.soft_unbind	= 1,
+	.disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt7601u_driver);
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/usb.h b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/usb.h
new file mode 100644
index 0000000..9fdf359
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/usb.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ */
+
+#ifndef __MT7601U_USB_H
+#define __MT7601U_USB_H
+
+#include "mt7601u.h"
+
+#define MT7601U_FIRMWARE	"mt7601u.bin"
+
+#define MT_VEND_REQ_MAX_RETRY	10
+#define MT_VEND_REQ_TOUT_MS	300
+
+#define MT_VEND_DEV_MODE_RESET	1
+
+#define MT_VEND_BUF		sizeof(__le32)
+
+enum mt_vendor_req {
+	MT_VEND_DEV_MODE = 1,
+	MT_VEND_WRITE = 2,
+	MT_VEND_MULTI_READ = 7,
+	MT_VEND_WRITE_FCE = 0x42,
+};
+
+enum mt_usb_ep_in {
+	MT_EP_IN_PKT_RX,
+	MT_EP_IN_CMD_RESP,
+	__MT_EP_IN_MAX,
+};
+
+enum mt_usb_ep_out {
+	MT_EP_OUT_INBAND_CMD,
+	MT_EP_OUT_AC_BK,
+	MT_EP_OUT_AC_BE,
+	MT_EP_OUT_AC_VI,
+	MT_EP_OUT_AC_VO,
+	MT_EP_OUT_HCCA,
+	__MT_EP_OUT_MAX,
+};
+
+static inline struct usb_device *mt7601u_to_usb_dev(struct mt7601u_dev *mt7601u)
+{
+	return interface_to_usbdev(to_usb_interface(mt7601u->dev));
+}
+
+static inline bool mt7601u_urb_has_error(struct urb *urb)
+{
+	return urb->status &&
+		urb->status != -ENOENT &&
+		urb->status != -ECONNRESET &&
+		urb->status != -ESHUTDOWN;
+}
+
+bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len,
+			   struct mt7601u_dma_buf *buf);
+void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf);
+int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx,
+			   struct mt7601u_dma_buf *buf, gfp_t gfp,
+			   usb_complete_t complete_fn, void *context);
+void mt7601u_complete_urb(struct urb *urb);
+
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+			   const u8 direction, const u16 val, const u16 offset,
+			   void *buf, const size_t buflen);
+void mt7601u_vendor_reset(struct mt7601u_dev *dev);
+int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+			     const u16 offset, const u32 val);
+
+#endif
diff --git a/marvell/linux/drivers/net/wireless/mediatek/mt7601u/util.c b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/util.c
new file mode 100644
index 0000000..050c2dd
--- /dev/null
+++ b/marvell/linux/drivers/net/wireless/mediatek/mt7601u/util.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ */
+
+#include "mt7601u.h"
+
+void mt76_remove_hdr_pad(struct sk_buff *skb)
+{
+	int len = ieee80211_get_hdrlen_from_skb(skb);
+
+	memmove(skb->data + 2, skb->data, len);
+	skb_pull(skb, 2);
+}
+
+int mt76_insert_hdr_pad(struct sk_buff *skb)
+{
+	int len = ieee80211_get_hdrlen_from_skb(skb);
+	int ret;
+
+	if (len % 4 == 0)
+		return 0;
+
+	ret = skb_cow(skb, 2);
+	if (ret)
+		return ret;
+
+	skb_push(skb, 2);
+	memmove(skb->data, skb->data + 2, len);
+
+	skb->data[len] = 0;
+	skb->data[len + 1] = 0;
+	return 0;
+}