[Feature] Merge MR0.5 from MTK
Change-Id: I77f817d2d58dfa7a69b4af32123f2929c724356e
diff --git a/src/kernel/linux/v4.19/drivers/iio/adc/mt635x-auxadc.c b/src/kernel/linux/v4.19/drivers/iio/adc/mt635x-auxadc.c
index 5ae3e3b..30a1cdd 100644
--- a/src/kernel/linux/v4.19/drivers/iio/adc/mt635x-auxadc.c
+++ b/src/kernel/linux/v4.19/drivers/iio/adc/mt635x-auxadc.c
@@ -132,8 +132,8 @@
.ch_name = "IMIX_R",
.has_regs = false,
},
- MT635x_AUXADC_CHANNEL(TYPEL, 13, 12, true),
- MT635x_AUXADC_CHANNEL(DRDI, 13, 12, true),
+ MT635x_AUXADC_CHANNEL(TYPEL, 13, 15, true),
+ MT635x_AUXADC_CHANNEL(DRDI, 13, 15, true),
};
struct auxadc_regs {
diff --git a/src/kernel/linux/v4.19/drivers/memory/mediatek/emi-mpu.c b/src/kernel/linux/v4.19/drivers/memory/mediatek/emi-mpu.c
index 6146ea5..28ab63f 100644
--- a/src/kernel/linux/v4.19/drivers/memory/mediatek/emi-mpu.c
+++ b/src/kernel/linux/v4.19/drivers/memory/mediatek/emi-mpu.c
@@ -111,10 +111,10 @@
curr_dbg_cb = curr_dbg_cb->next_dbg_cb)
curr_dbg_cb->func();
- /* FIXME: un-comment after AEE is ready */
+#ifdef CONFIG_MTK_AEE_FEATURE
if (mpu->vio_msg)
aee_kernel_exception("EMIMPU", mpu->vio_msg);
-
+#endif
mpu->in_msg_dump = 0;
}
static DECLARE_WORK(emimpu_work, emimpu_vio_dump);
diff --git a/src/kernel/linux/v4.19/drivers/misc/mediatek/medmcu/rv33/v02/medmcu_excep.c b/src/kernel/linux/v4.19/drivers/misc/mediatek/medmcu/rv33/v02/medmcu_excep.c
index 4f44c8f..9b71920 100644
--- a/src/kernel/linux/v4.19/drivers/misc/mediatek/medmcu/rv33/v02/medmcu_excep.c
+++ b/src/kernel/linux/v4.19/drivers/misc/mediatek/medmcu/rv33/v02/medmcu_excep.c
@@ -337,10 +337,12 @@
scp_prepare_aed_dump(scp_aed_title, id);
+#ifdef CONFIG_MTK_AEE_FEATURE
/* scp aed api, only detail information available*/
aed_common_exception_api("scp", NULL, 0, NULL, 0,
scp_dump.detail_buff, DB_OPT_DEFAULT);
pr_notice("[SCP] scp exception dump is done\n");
+#endif
mutex_unlock(&scp_excep_mutex);
}
diff --git a/src/kernel/linux/v4.19/drivers/misc/mediatek/wakeup_dtr/wakeup_dtr.c b/src/kernel/linux/v4.19/drivers/misc/mediatek/wakeup_dtr/wakeup_dtr.c
index 3138b7b..d361274 100644
--- a/src/kernel/linux/v4.19/drivers/misc/mediatek/wakeup_dtr/wakeup_dtr.c
+++ b/src/kernel/linux/v4.19/drivers/misc/mediatek/wakeup_dtr/wakeup_dtr.c
@@ -32,6 +32,7 @@
static int wakeup_irq;
static int wakeup_irq_status;//current irq status,1:TRIGGER_RISING ,0:TRIGGER_FALLING
static int wakeup_irq_occurs;//Whether an interrupt occurs,1:occurs ,0:not occurs
+static int wakeup_enable = 1;
static struct fasync_struct *wakeup_dtr_fasync;
static wait_queue_head_t wakeup_dtr_wqhead;
@@ -45,11 +46,11 @@
wakeup_irq_status = gpio_get_value(wakeup_dtr);
if (!wakeup_irq_status) {
printk( "low, keep waklock\n");
- pm_relax(dev_wake);
+ pm_stay_awake(dev_wake);
} else {
printk( "high, free wakelock\n");
- pm_stay_awake(dev_wake);
+ pm_relax(dev_wake);
}
wakeup_irq_occurs = 1;
@@ -84,6 +85,35 @@
return size;
}
+static ssize_t wakeup_dtr_write(struct file *file, const char __user *usr, size_t sz, loff_t *loff)
+{
+ int ret=0;
+ char *buffer = NULL;
+
+ buffer = kmalloc(sz+1, GFP_KERNEL);
+ if (!buffer || !usr)
+ return -ENOMEM;
+
+ ret = copy_from_user(buffer,usr,sz);
+ if(ret ){
+ printk("%s copy_from_user err\n",__func__);
+ return -EINVAL;
+ }
+
+ ret = sscanf(buffer, "wakeup_enable=%d",&wakeup_enable);
+
+ if(wakeup_enable){
+ printk("wakeup:enable dtr wakeup\n");
+ device_set_wakeup_capable(dev_wake, true);
+ }else{
+ printk("wakeup:disable dtr wakeup\n");
+ device_set_wakeup_capable(dev_wake, false);
+ }
+
+ kfree(buffer);
+ return sz;
+}
+
static int wakeup_dtr_fasync_fun(int fd, struct file *file, int on)
{
printk("wakeup_dtr_fasync_fun\n");
@@ -93,6 +123,7 @@
static struct file_operations wakeup_dtr_chdev_ops = {
.read = wakeup_dtr_read,
+ .write = wakeup_dtr_write,
.fasync = wakeup_dtr_fasync_fun,
};
@@ -158,7 +189,7 @@
return ret;
}
- ret = gpio_direction_output(wakeup_ri,1);
+ ret = gpio_direction_output(wakeup_ri,0);
if (ret < 0) {
printk( "fail to set gpio%d as output pin,ret=%d\n",wakeup_ri, ret);
return ret;
@@ -184,7 +215,7 @@
return ret;
}
- enable_irq_wake(wakeup_irq);
+ //enable_irq_wake(wakeup_irq);
init_waitqueue_head(&wakeup_dtr_wqhead);
ret = wakeup_dtr_cdev_init();
@@ -204,8 +235,11 @@
static int wakeup_dtr_suspend(struct platform_device *dev,pm_message_t state)
{
printk("wakeup_dtr_suspend\n");
- gpio_set_value(wakeup_ri,0);
- enable_irq_wake(wakeup_irq);
+ gpio_set_value(wakeup_ri,1);
+ if(device_may_wakeup(&dev->dev)){
+ enable_irq_wake(wakeup_irq);
+ }
+
return 0;
}
@@ -213,7 +247,11 @@
static int wakeup_dtr_resume(struct platform_device *dev)
{
printk("wakeup_dtr_resume\n");
- gpio_set_value(wakeup_ri,1);
+ gpio_set_value(wakeup_ri,0);
+ if(device_may_wakeup(&dev->dev)){
+ printk("tianyan add wakeup_dtr_resume device_may_wakeup\n");
+ disable_irq_wake(wakeup_irq);
+ }
return 0;
}
diff --git a/src/kernel/linux/v4.19/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/src/kernel/linux/v4.19/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b46e312..e634475 100755
--- a/src/kernel/linux/v4.19/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/src/kernel/linux/v4.19/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -3103,8 +3103,10 @@
/* default setup the forward port to send frame to PDMA */
val &= ~0xffff;
+#if defined(CONFIG_HW_NAT)
if (ppe_hook_tx_eth)
val |= MTK_GDMA_PPE;
+#endif
val |= config;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
diff --git a/src/kernel/linux/v4.19/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/src/kernel/linux/v4.19/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 106b120..7d0d374 100644
--- a/src/kernel/linux/v4.19/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/src/kernel/linux/v4.19/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -244,7 +244,7 @@
if (set)
mtk_dvfsrc_force_opp(0);
else
- mtk_dvfsrc_force_opp(vcorefs_get_num_opp());
+ mtk_dvfsrc_force_opp(21);
}
static bool __loopback_test(struct stmmac_priv *priv, int num, int delay,
diff --git a/src/kernel/linux/v4.19/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/src/kernel/linux/v4.19/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 03d1c57..301c01d 100644
--- a/src/kernel/linux/v4.19/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/src/kernel/linux/v4.19/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -31,6 +31,11 @@
#include <linux/net_tstamp.h>
#include <linux/reset.h>
+#if defined(CONFIG_HW_NAT)
+extern int (*ppe_hook_rx_snps)(struct sk_buff *skb);
+extern int (*ppe_hook_tx_snps)(struct sk_buff *skb);
+#endif
+
struct stmmac_resources {
void __iomem *addr;
const char *mac;
diff --git a/src/kernel/linux/v4.19/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/src/kernel/linux/v4.19/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 1a96e69..ba75fdd 100644
--- a/src/kernel/linux/v4.19/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/src/kernel/linux/v4.19/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -53,6 +53,9 @@
#include "dwmac1000.h"
#include "dwxgmac2.h"
#include "hwif.h"
+#if defined(CONFIG_HW_NAT)
+#include <net/ra_nat.h>
+#endif
#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
@@ -3111,6 +3114,15 @@
int entry, first_tx;
dma_addr_t des;
+#if defined(CONFIG_HW_NAT)
+ if (ppe_hook_tx_snps) {
+ if (ppe_hook_tx_snps(skb) == 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
+
tx_q = &priv->tx_queue[queue];
first_tx = tx_q->cur_tx;
@@ -3610,7 +3622,17 @@
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&ch->rx_napi, skb);
+#if defined(CONFIG_HW_NAT)
+ if (ppe_hook_rx_snps) {
+ FOE_MAGIC_TAG(skb) = FOE_MAGIC_SNPS;
+ if (ppe_hook_rx_snps(skb))
+ napi_gro_receive(&ch->rx_napi, skb);
+ } else {
+#endif
+ napi_gro_receive(&ch->rx_napi, skb);
+#if defined(CONFIG_HW_NAT)
+ }
+#endif
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
diff --git a/src/kernel/linux/v4.19/drivers/net/phy/phy_device.c b/src/kernel/linux/v4.19/drivers/net/phy/phy_device.c
index 5cdd75a..2560bbb 100644
--- a/src/kernel/linux/v4.19/drivers/net/phy/phy_device.c
+++ b/src/kernel/linux/v4.19/drivers/net/phy/phy_device.c
@@ -630,9 +630,9 @@
memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids));
r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
+ printk("phy id = %x\n",phy_id);
if (r)
return ERR_PTR(r);
-
/* If the phy_id is mostly Fs, there is no device there */
if ((phy_id & 0x1fffffff) == 0x1fffffff)
return ERR_PTR(-ENODEV);
@@ -2067,7 +2067,8 @@
return retval;
}
-
+
+ printk("%s: Registered new driver\n", new_driver->name);
pr_debug("%s: Registered new driver\n", new_driver->name);
return 0;
@@ -2126,7 +2127,7 @@
int rc;
rc = mdio_bus_init();
- if (rc)
+ if (rc)
return rc;
rc = phy_driver_register(&genphy_10g_driver, THIS_MODULE);
@@ -2136,6 +2137,7 @@
rc = phy_driver_register(&genphy_driver, THIS_MODULE);
if (rc) {
phy_driver_unregister(&genphy_10g_driver);
+
err_10g:
mdio_bus_exit();
}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/Kconfig b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/Kconfig
index dba05f6..767d58d 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/Kconfig
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/Kconfig
@@ -39,6 +39,7 @@
config BCM4359
tristate "BCM4359 support"
depends on BCMDHD
+ default n
config BCM4361
tristate "BCM4361 support"
@@ -48,6 +49,7 @@
config CYW89570
tristate "CYW89570 support"
depends on BCMDHD
+ default n
config DHD_OF_SUPPORT
bool "Use in-drive platform device"
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/Makefile b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/Makefile
index cf2bb88..7f176b4 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/Makefile
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/Makefile
@@ -1,6 +1,8 @@
# bcmdhd
#
-# Copyright (C) 1999-2020, Broadcom.
+# Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+#
+# Copyright (C) 1999-2017, Broadcom Corporation
#
# Unless you and Broadcom execute a separate written software license
# agreement governing use of this software, this software is licensed to you
@@ -39,13 +41,17 @@
-DDHD_RND_DEBUG -DDHD_DUMP_FILE_WRITE_FROM_KERNEL \
-DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
-# In case not supported by compiler, remove the below option
+GCCVERSIONGTEQ9 := $(shell expr `$(CROSS_COMPILE)gcc -dumpversion | cut -f1 -d.` \>= 9)
+ifeq "$(GCCVERSIONGTEQ9)" "1"
+ DHDCFLAGS += -Wno-error=date-time
+endif
DHDCFLAGS += $(call cc-disable-warning, date-time)
DHDCFLAGS += $(call cc-disable-warning, stringop-overflow)
#################
# Common feature
#################
+DHDCFLAGS += -DAUTOMOTIVE_FEATURE
DHDCFLAGS += -DWL_VIRTUAL_APSTA
# Dongle init fail
DHDCFLAGS += -DDEBUG_DNGL_INIT_FAIL
@@ -69,15 +75,12 @@
# Enable Register access via dhd IOVAR
DHDCFLAGS += -DDHD_PCIE_REG_ACCESS -DDHD_BUS_MEM_ACCESS
-# Disable ASPM to avoid CTO in Interposer
-DHDCFLAGS += -DDHD_DISABLE_ASPM
-
# CUSTOMER4 flags
DHDCFLAGS += -DDHD_PKTID_AUDIT_ENABLED
-DHDCFLAGS += -DSUPPORT_HIDDEN_AP -DDHD_LOSSLESS_ROAMING
+DHDCFLAGS += -DSUPPORT_HIDDEN_AP
DHDCFLAGS += -DPASS_ALL_MCAST_PKTS -DESCAN_BUF_OVERFLOW_MGMT -DPKTPRIO_OVERRIDE
DHDCFLAGS += -DUSE_INITIAL_SHORT_DWELL_TIME
-DHDCFLAGS += -DENABLE_BCN_LI_BCN_WAKEUP -DSOFTAP_UAPSD_OFF
+DHDCFLAGS += -DSOFTAP_UAPSD_OFF
DHDCFLAGS += -DSUPPORT_LTECX -DSUPPORT_2G_VHT
DHDCFLAGS += -DSUPPORT_WL_TXPOWER -DBLOCK_IPV6_PACKET -DSUPPORT_DEEP_SLEEP
DHDCFLAGS += -DSUPPORT_AMPDU_MPDU_CMD -DSOFTAP_SEND_HANGEVT -DNUM_SCB_MAX_PROBE=3
@@ -87,6 +90,7 @@
DHDCFLAGS += -DSUPPORT_2G_VHT -DSUPPORT_5G_1024QAM_VHT
DHDCFLAGS += -DEWP_ECNTRS_LOGGING
DHDCFLAGS += -DEWP_ETD_PRSRV_LOGS
+DHDCFLAGS += -DCFI_CHECK
# Runtime PM feature
# DHDCFLAGS += -DDHD_PCIE_RUNTIMEPM -DMAX_IDLE_COUNT=11 -DCUSTOM_DHD_RUNTIME_MS=100
@@ -135,19 +139,23 @@
# Set PM 2 always regardless suspend/resume
DHDCFLAGS += -DSUPPORT_PM2_ONLY
+# FT roam
+DHDCFLAGS += -DWLFBT -DGTK_OFFLOAD_SUPPORT
+DHDCFLAGS += -DBCMCCX
+DHDCFLAGS += -DWBTEXT
+DHDCFLAGS += -DDHD_LOSSLESS_ROAMING
+
# For special PNO Event keep wake lock for 10sec
DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=10
DHDCFLAGS += -DMIRACAST_AMPDU_SIZE=8
#Vendor Extension
-DHDCFLAGS += -DWL_VENDOR_EXT_SUPPORT
+#DHDCFLAGS += -DWL_VENDOR_EXT_SUPPORT
#Gscan
DHDCFLAGS += -DGSCAN_SUPPORT
#PNO Scan Result Version
DHDCFLAGS += -DPFN_SCANRESULT_2
#RSSI Monitor
DHDCFLAGS += -DRSSI_MONITOR_SUPPORT
-#RTT
-DHDCFLAGS += -DRTT_SUPPORT -DRTT_DEBUG
#APF
DHDCFLAGS += -DAPF
#NDOffload
@@ -172,7 +180,7 @@
DHDCFLAGS += -DESCAN_RESULT_PATCH
# NAN
-DHDCFLAGS += -DWL_NAN -DWL_NAN_DISC_CACHE
+#DHDCFLAGS += -DWL_NAN -DWL_NAN_DISC_CACHE
# For Static Buffer
ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y)
@@ -183,9 +191,6 @@
DHDCFLAGS += -DWL_SCHED_SCAN
endif
-# Ioctl timeout 5000ms
-DHDCFLAGS += -DIOCTL_RESP_TIMEOUT=5000
-
# Prevent rx thread monopolize
DHDCFLAGS += -DWAIT_DEQUEUE
@@ -205,6 +210,7 @@
DHDCFLAGS += -DCUSTOM_FORCE_NODFS_FLAG
ifeq ($(CONFIG_ANDROID),y)
DHDCFLAGS += -DKEEP_WIFION_OPTION
+ DHDCFLAGS += -Wno-date-time
endif
# SoftAP
@@ -212,6 +218,12 @@
DHDCFLAGS += -DWL_CFG80211_ACL
ifeq ($(CONFIG_WL_AP_IF),y)
DHDCFLAGS += -DWL_STATIC_IF
+ DHDCFLAGS += -DDHD_NUM_STATIC_IFACES=2
+endif
+
+# QT support
+ifeq ($(CONFIG_QT), y)
+ DHDCFLAGS += -DBCMQT_HW -DBCMSLTGT -DBCMQT
endif
# Disable FRAMEBURST on VSDB
@@ -244,53 +256,54 @@
#############################
# Bring up WARs for CYW89570.
-# To be removed later
-#############################
-ifneq ($(filter y, ) $(CONFIG_CYW89570),)
- DHDCFLAGS += -DH2_BRING_UP
- DHDCFLAGS += -DDISABLE_BUILTIN_ROAM
- DHDCFLAGS += -DBCMTRXV3
+# # To be removed later
+# #############################
+ifneq ($(CONFIG_WIRELESS_EXT),)
+ DHDOFILES += wl_iw.o bcm_app_utils.o
+ DHDCFLAGS += -DWL_WIRELESS_EXT -DWLLXIW -DUSE_IW
+endif
+
+ifeq ($(CONFIG_TRXHDR_V3),y)
+ DHDCFLAGS += -DBCMTRXV3
+else
+ DHDCFLAGS += -DBCMTRXV4
endif
#########################
# Chip dependent feature
#########################
-ifneq ($(filter y, ) $(CONFIG_BCM4358) $(CONFIG_BCM4359) $(CONFIG_BCM4361) $(CONFIG_BCM4373) $(CONFIG_BCM89273) $(CONFIG_CYW89570),)
DHDCFLAGS += -DUSE_WL_TXBF
DHDCFLAGS += -DCUSTOM_DPC_CPUCORE=0
# New Features
DHDCFLAGS += -DWL11U
DHDCFLAGS += -DMFP
+# Disabling LPC for 89570 for now.
+# DHDCFLAGS += -DDHD_ENABLE_LPC
DHDCFLAGS += -DCUSTOM_COUNTRY_CODE
- DHDCFLAGS += -DRTT_SUPPORT -DRTT_DEBUG
DHDCFLAGS += -DNDO_CONFIG_SUPPORT
DHDCFLAGS += -DIPV6_NDO_SUPPORT
DHDCFLAGS += -DIGUANA_LEGACY_CHIPS
-# Disable LPC temporarily. To be enabled later.
-# DHDCFLAGS += -DDHD_ENABLE_LPC
-
-ifneq ($(CONFIG_BCMDHD_SDMMC),)
+ifeq ($(CONFIG_BCMDHD_SDMMC),y)
DHDCFLAGS += -DBCMLXSDMMC -DCUSTOM_TXGLOM=1
ifneq ($(CONFIG_HAVE_IMX8_SOC),)
DHDCFLAGS += -DCONFIG_DTS -DPLATFORM_IMX
endif
endif
-ifneq ($(CONFIG_BCMDHD_SDIOH_STD),)
+ifeq ($(CONFIG_BCMDHD_SDIOH_STD),y)
DHDCFLAGS += -DBCMSDIOH_STD
endif
-ifneq ($(CONFIG_BCMDHD_SDIO),)
+ifeq ($(CONFIG_BCMDHD_SDIO),y)
DHDCFLAGS += -DBDC -DHW_OOB -DDHD_BCMEVENTS -DMMC_SDIO_ABORT
DHDCFLAGS += -DBCMSDIO -DUSE_SDIOFIFO_IOVAR
DHDCFLAGS += -DPROP_TXSTATUS -DLIMIT_BORROW
DHDCFLAGS += -DPROP_TXSTATUS_VSDB
DHDCFLAGS += -DUSE_WL_FRAMEBURST
DHDCFLAGS += -DCUSTOM_AMPDU_MPDU=16
- DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=64
# tput enhancement
DHDCFLAGS += -DCUSTOM_GLOM_SETTING=8 -DCUSTOM_RXCHAIN=1
DHDCFLAGS += -DUSE_DYNAMIC_F2_BLKSIZE -DDYNAMIC_F2_BLKSIZE_FOR_NONLEGACY=128
@@ -301,6 +314,12 @@
DHDCFLAGS += -DCUSTOM_MAX_TXGLOM_SIZE=40
DHDCFLAGS += -DMAX_HDR_READ=128
DHDCFLAGS += -DDHD_FIRSTREAD=64
+ifneq ($(CONFIG_BCM4373),)
+ DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=32
+else
+ DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=64
+
+endif
# bcn_timeout
DHDCFLAGS += -DCUSTOM_BCN_TIMEOUT_SETTING=5
@@ -350,7 +369,7 @@
# DHDCFLAGS += -DDHD_PCIE_RUNTIMEPM -DMAX_IDLE_COUNT=11 -DCUSTOM_DHD_RUNTIME_MS=100
# debug info
- DHDCFLAGS += -DDHD_WAKE_STATUS -DDHD_WAKE_RX_STATUS -DDHD_WAKE_EVENT_STATUS
+ DHDCFLAGS += -DDHD_WAKE_STATUS -DDHD_WAKE_RX_STATUS
# Enable SSSR Dump
DHDCFLAGS += -DDHD_SSSR_DUMP
@@ -364,7 +383,8 @@
# Enable MSI interrupt Support
DHDCFLAGS += -DDHD_MSI_SUPPORT
-endif
+# Enabling temporarily due to build failure
+ DHDCFLAGS += -DDHD_PKTTS -DTX_STATUS_LATENCY_STATS
ifneq ($(filter y, $(CONFIG_BCM4354) $(CONFIG_BCM4356)),)
DHDCFLAGS += -DUSE_WL_TXBF
@@ -382,7 +402,7 @@
DHDCFLAGS += -DSAR_SUPPORT
# debug info
DHDCFLAGS += -DDHD_WAKE_STATUS
-ifneq ($(CONFIG_BCMDHD_SDIO),)
+ifeq ($(CONFIG_BCMDHD_SDIO),y)
DHDCFLAGS += -DBDC -DOOB_INTR_ONLY -DHW_OOB -DDHD_BCMEVENTS -DMMC_SDIO_ABORT
DHDCFLAGS += -DBCMSDIO -DBCMLXSDMMC -DUSE_SDIOFIFO_IOVAR
DHDCFLAGS += -DPROP_TXSTATUS
@@ -448,25 +468,26 @@
endif
#EXTRA_LDFLAGS += --strip-debug
+#tianyan
+DHDCFLAGS += -Wno-array-bounds -Wno-sizeof-pointer-memaccess -Wno-unused-function
-#tianyan@2021.7.27 modify for add wifi6 module start
-DHDCFLAGS += -DDONGLE_ENABLE_ISOLATION
+
ifeq ($(DRIVER_TYPE),y)
-# DHDCFLAGS += -DENABLE_INSMOD_NO_FW_LOAD
-#tianyan@2021.7.27 modify for add wifi6 module end
+ DHDCFLAGS += -DENABLE_INSMOD_NO_FW_LOAD
DHDCFLAGS += -DUSE_LATE_INITCALL_SYNC
# Use kernel strlcpy() implementation instead of one, defined in bcmstdlib_s.c
DHDCFLAGS += -DBCM_USE_PLATFORM_STRLCPY
endif
-DHDOFILES := dhd_pno.o dhd_common.o dhd_ip.o dhd_custom_gpio.o \
+DHDOFILES += dhd_pno.o dhd_common.o dhd_ip.o dhd_custom_gpio.o \
dhd_linux.o dhd_linux_sched.o dhd_cfg80211.o dhd_linux_wq.o aiutils.o \
bcmevent.o bcmutils.o bcmwifi_channels.o hndpmu.o linux_osl.o linux_pkt.o \
- sbutils.o siutils.o wl_android.o wl_roam.o wl_cfg80211.o wl_cfgp2p.o \
+ sbutils.o siutils.o wl_android.o wl_roam.o wl_cfg80211.o wl_cfgscan.o wl_cfgp2p.o \
wl_cfg_btcoex.o wldev_common.o wl_linux_mon.o dhd_linux_platdev.o \
- dhd_pno.o dhd_rtt.o wl_cfg_btcoex.o hnd_pktq.o \
+ dhd_pno.o dhd_linux_pktdump.o wl_cfg_btcoex.o hnd_pktq.o \
hnd_pktpool.o wl_cfgvendor.o bcmxtlv.o bcm_app_utils.o dhd_debug.o \
- dhd_debug_linux.o wl_cfgnan.o dhd_mschdbg.o bcmbloom.o dhd_dbg_ring.o \
+ dhd_debug_linux.o dhd_mschdbg.o bcmbloom.o dhd_dbg_ring.o bcmstdlib_s.o \
+ dhd_linux_exportfs.o
ifneq ($(CONFIG_DHD_OF_SUPPORT),)
DHDCFLAGS += -DDHD_OF_SUPPORT
@@ -476,16 +497,16 @@
endif
endif
-ifneq ($(CONFIG_BCMDHD_SDMMC),)
+ifeq ($(CONFIG_BCMDHD_SDMMC),y)
DHDOFILES += bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o
endif
-ifneq ($(CONFIG_BCMDHD_SDIO),)
+ifeq ($(CONFIG_BCMDHD_SDIO),y)
DHDOFILES += bcmsdh.o bcmsdh_linux.o
DHDOFILES += dhd_cdc.o dhd_wlfc.o dhd_sdio.o
endif
-ifneq ($(CONFIG_BCMDHD_SDIOH_STD),)
+ifeq ($(CONFIG_BCMDHD_SDIOH_STD),y)
DHDOFILES += bcmsdstd.o bcmsdstd_linux.o
endif
@@ -495,17 +516,16 @@
endif
ifneq ($(filter -DDHD_LB, $(DHDCFLAGS)),)
-#DHDOFILES += dhd_linux_lb.o
+DHDOFILES += dhd_linux_lb.o
endif
ifneq ($(filter -DDHD_BANDSTEER, $(DHDCFLAGS)),)
DHDOFILES += dhd_bandsteer.o
endif
-ifneq ($(CONFIG_WIRELESS_EXT),)
- DHDOFILES += wl_iw.o bcm_app_utils.o
- DHDCFLAGS += -DWL_WIRELESS_EXT -DWLLXIW -DUSE_IW
+ifneq ($(CONFIG_QT),y)
+ DHDCFLAGS += -DRTT_SUPPORT -DRTT_DEBUG
+ DHDOFILES += dhd_rtt.o
endif
-
EXTRA_CFLAGS += $(DHDCFLAGS) -DDHD_DEBUG
EXTRA_CFLAGS += -DSRCBASE=\"$(src)\"
EXTRA_CFLAGS += -I$(src)/include/ -I$(src)/
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/aiutils.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/aiutils.c
index 520f026..4d2bcd0 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/aiutils.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/aiutils.c
@@ -2,7 +2,9 @@
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -55,7 +57,11 @@
/* EROM parsing */
+#ifdef BCMQT
+#define SPINWAIT_TIME_US 3000
+#else
#define SPINWAIT_TIME_US 300
+#endif /* BCMQT */
static uint32
get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
@@ -252,12 +258,16 @@
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
&addrl, &addrh, &sizel, &sizeh);
if (asd != 0) {
- sii->oob_router = addrl;
+ if ((sii->oob_router != 0) && (sii->oob_router != addrl)) {
+ sii->oob_router1 = addrl;
+ } else {
+ sii->oob_router = addrl;
+ }
}
}
if (cid != NS_CCB_CORE_ID &&
cid != PMU_CORE_ID && cid != GCI_CORE_ID && cid != SR_CORE_ID &&
- cid != HUB_CORE_ID)
+ cid != HUB_CORE_ID && cid != HND_OOBR_CORE_ID)
continue;
}
@@ -951,6 +961,97 @@
}
/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ volatile uint32 *r = NULL;
+ uint w = 0;
+ uint intr_val = 0;
+ bool fast = FALSE;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ ASSERT(GOODIDX(coreidx));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
+ regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx)
+ ai_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (w);
+}
+
+/*
* If there is no need for fiddling with interrupts or core switches (typically silicon
* back plane registers, pci registers and chipcommon registers), this function
* returns the register offset on this core to a mapped address. This address can
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcm_app_utils.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcm_app_utils.c
index 7f244a6..ec4b9dc 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcm_app_utils.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcm_app_utils.c
@@ -3,7 +3,9 @@
* Contents are wifi-specific, used by any kernel or app-level
* software that might want wifi things as it grows.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmbloom.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmbloom.c
index cea32a9..94e0e3c 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmbloom.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmbloom.c
@@ -1,7 +1,9 @@
/*
* Bloom filter support
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -189,7 +191,9 @@
pos = (*bp->hash[i])(bp->cb_ctx, i, tag, tag_len);
/* all bits must be set for a match */
+ CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
if (isclr(buf, pos % BLOOM_BIT_LEN(buf_len))) {
+ CLANG_DIAGNOSTIC_POP()
err = BCME_NOTFOUND;
break;
}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmevent.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmevent.c
index 4c23f5e..a631a12 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmevent.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmevent.c
@@ -1,7 +1,9 @@
/*
* bcmevent read-only data shared by kernel or app layers
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -72,6 +74,7 @@
BCMEVENT_NAME(WLC_E_AUTOAUTH),
BCMEVENT_NAME(WLC_E_EAPOL_MSG),
BCMEVENT_NAME(WLC_E_SCAN_COMPLETE),
+ BCMEVENT_NAME(WLC_E_IND_DOS_STATUS),
BCMEVENT_NAME(WLC_E_ADDTS_IND),
BCMEVENT_NAME(WLC_E_DELTS_IND),
BCMEVENT_NAME(WLC_E_BCNSENT_IND),
@@ -149,7 +152,7 @@
#ifdef WLWNM
BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP),
#endif /* WLWNM */
-#if defined(WL_PROXDETECT)
+#if defined(WL_PROXDETECT) || defined(RTT_SUPPORT)
BCMEVENT_NAME(WLC_E_PROXD),
#endif // endif
BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL),
@@ -200,6 +203,8 @@
BCMEVENT_NAME(WLC_E_NAN),
#endif /* WL_NAN */
BCMEVENT_NAME(WLC_E_RPSNOA),
+ BCMEVENT_NAME(WLC_E_PHY_CAL),
+ BCMEVENT_NAME(WLC_E_WA_LQM),
};
const char *bcmevent_get_name(uint event_type)
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh.c
index b43ef06..b453875 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh.c
@@ -2,7 +2,9 @@
* BCMSDH interface glue
* implement bcmsdh API for SDIOH driver
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh_linux.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
index c5d0b0d..e639a6f 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
@@ -1,7 +1,9 @@
/*
* SDIO access interface for drivers - linux specific (pci only)
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -96,6 +98,9 @@
if (vendor == VENDOR_SI_IMAGE) {
return (TRUE);
}
+ if (device == SDIOH_FPGA_ID && vendor == VENDOR_CYPRESS) {
+ return (TRUE);
+ }
/* Check for BRCM 27XX Standard host controller */
if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) {
return (TRUE);
@@ -157,10 +162,13 @@
bcmsdh_osinfo->dev = dev;
osl_set_bus_handle(osh, bcmsdh);
-#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+#if (!defined(CONFIG_PM_WAKELOCKS) || !defined(CONFIG_HAS_WAKELOCK)) && \
+ (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
if (dev && device_init_wakeup(dev, true) == 0)
bcmsdh_osinfo->dev_wake_enabled = TRUE;
-#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+#endif /* CONFIG_PM_WAKELOCKS ||CONFIG_HAS_WAKELOCK &&
+ * (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ */
#if defined(OOB_INTR_ONLY)
spin_lock_init(&bcmsdh_osinfo->oob_irq_spinlock);
@@ -175,9 +183,15 @@
/* Read the vendor/device ID from the CIS */
vendevid = bcmsdh_query_device(bcmsdh);
+
/* try to attach to the target device */
+#if defined(BCMSPI) && defined(BCMPCISPIHOST)
+ bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num,
+ slot_num, 0, bus_type, (void *)regs, NULL, bcmsdh);
+#else
bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num,
slot_num, 0, bus_type, (void *)regs, osh, bcmsdh);
+#endif /* BCMSPI && BCMPCISPIHOST */
if (bcmsdh_osinfo->context == NULL) {
SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
goto err;
@@ -198,11 +212,14 @@
{
bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
-#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+#if (!defined(CONFIG_PM_WAKELOCKS) || !defined(CONFIG_HAS_WAKELOCK)) && \
+ (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
if (bcmsdh_osinfo->dev)
device_init_wakeup(bcmsdh_osinfo->dev, false);
bcmsdh_osinfo->dev_wake_enabled = FALSE;
-#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+#endif /* CONFIG_PM_WAKELOCKS ||CONFIG_HAS_WAKELOCK &&
+ * (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ */
drvinfo.remove(bcmsdh_osinfo->context);
MFREE(bcmsdh->osh, bcmsdh->os_cxt, sizeof(bcmsdh_os_info_t));
@@ -296,18 +313,24 @@
void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *bcmsdh)
{
-#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+#if (!defined(CONFIG_PM_WAKELOCKS) || !defined(CONFIG_HAS_WAKELOCK)) && \
+ (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
pm_stay_awake(bcmsdh_osinfo->dev);
-#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+#endif /* CONFIG_PM_WAKELOCKS ||CONFIG_HAS_WAKELOCK &&
+ * (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ */
}
void bcmsdh_dev_relax(bcmsdh_info_t *bcmsdh)
{
-#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+#if (!defined(CONFIG_PM_WAKELOCKS) || !defined(CONFIG_HAS_WAKELOCK)) && \
+ (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
pm_relax(bcmsdh_osinfo->dev);
-#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+#endif /* CONFIG_PM_WAKELOCKS ||CONFIG_HAS_WAKELOCK &&
+ * (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ */
}
bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *bcmsdh)
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
index 5f0e5dc..ede9593 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
@@ -1,7 +1,9 @@
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -38,22 +40,7 @@
#include <sdiovar.h> /* ioctl/iovars */
#include <linux/mmc/core.h>
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0))
-#include <drivers/mmc/core/host.h>
-void
-mmc_host_clk_hold(struct mmc_host *host)
-{
- BCM_REFERENCE(host);
- return;
-}
-
-void
-mmc_host_clk_release(struct mmc_host *host)
-{
- BCM_REFERENCE(host);
- return;
-}
-#elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8))
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8))
#include <drivers/mmc/core/host.h>
#else
#include <linux/mmc/host.h>
@@ -72,6 +59,29 @@
#endif // endif
#include "bcmsdh_sdmmc.h"
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) || (LINUX_VERSION_CODE >= \
+ KERNEL_VERSION(4, 4, 0))
+static inline void
+mmc_host_clk_hold(struct mmc_host *host)
+{
+ BCM_REFERENCE(host);
+ return;
+}
+
+static inline void
+mmc_host_clk_release(struct mmc_host *host)
+{
+ BCM_REFERENCE(host);
+ return;
+}
+
+static inline unsigned int
+mmc_host_clk_rate(struct mmc_host *host)
+{
+ return host->ios.clock;
+}
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0) */
+
#ifndef BCMSDH_MODULE
extern int sdio_function_init(void);
extern void sdio_function_cleanup(void);
@@ -82,7 +92,14 @@
static void IRQHandlerF2(struct sdio_func *func);
#endif /* !defined(OOB_INTR_ONLY) */
static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+#if defined(OEM_ANDROID) && !defined(CONFIG_SOC_S5E5515)
+static int sdio_reset_comm(struct mmc_card *card)
+{
+ return 0;
+}
+#else
extern int sdio_reset_comm(struct mmc_card *card);
+#endif /* OEM_ANDROID */
#define DEFAULT_SDIO_F2_BLKSIZE 512
#ifndef CUSTOM_SDIO_F2_BLKSIZE
@@ -109,7 +126,7 @@
uint sd_power = 1; /* Default to SD Slot powered ON */
uint sd_clock = 1; /* Default to SD Clock turned ON */
uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
-uint sd_msglevel = 0x01;
+uint sd_msglevel = SDH_ERROR_VAL;
uint sd_use_dma = TRUE;
#ifndef CUSTOM_RXCHAIN
@@ -121,6 +138,9 @@
DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
+#if !defined(ARCH_DMA_MINALIGN)
+#define ARCH_DMA_MINALIGN 128
+#endif /* !defined(ARCH_DMA_MINALIGN) */
#define DMA_ALIGN_MASK 0x03
#define MMC_SDIO_ABORT_RETRY_LIMIT 5
@@ -139,9 +159,6 @@
uint sdmmc_get_clock_rate(sdioh_info_t *sd);
void sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div);
-#ifdef CONFIG_HAVE_IMX8_SOC
-#define SD_CLOCK_UHS_IMX8 104000000
-#endif // endif
static int
sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
{
@@ -242,10 +259,8 @@
sd->sd_clk_rate = sdmmc_get_clock_rate(sd);
DHD_ERROR(("%s: sd clock rate = %u\n", __FUNCTION__, sd->sd_clk_rate));
+
sdioh_sdmmc_card_enablefuncs(sd);
-#ifdef CONFIG_HAVE_IMX8_SOC
- sdmmc_set_clock_rate(sd, SD_CLOCK_UHS_IMX8);
-#endif // endif
sd_trace(("%s: Done\n", __FUNCTION__));
return sd;
@@ -1069,7 +1084,9 @@
* a restriction on max tx/glom count (based on host->max_segs).
*/
if (sg_count >= ARRAYSIZE(sd->sg_list)) {
- sd_err(("%s: sg list entries exceed limit\n", __FUNCTION__));
+ sd_err(("%s: sg list entries(%u) exceed limit(%u),"
+ " sd blk_size=%u\n",
+ __FUNCTION__, sg_count, ARRAYSIZE(sd->sg_list), blk_size));
return (SDIOH_API_RC_FAIL);
}
pdata += pkt_offset;
@@ -1081,8 +1098,9 @@
* DMA descriptor, use multiple sg buffers when xfer_size is bigger than
* max_seg_size
*/
- if (sg_data_size > host->max_seg_size)
+ if (sg_data_size > host->max_seg_size) {
sg_data_size = host->max_seg_size;
+ }
sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
ttl_len += sg_data_size;
@@ -1218,7 +1236,7 @@
ASSERT(buffer);
/* buffer and length are aligned, use it directly so we can avoid memory copy */
- if (((ulong)buffer & DMA_ALIGN_MASK) == 0 && (buf_len & DMA_ALIGN_MASK) == 0)
+ if (((ulong)buffer & (ARCH_DMA_MINALIGN - 1)) == 0 && (buf_len & DMA_ALIGN_MASK) == 0)
return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
sd_trace(("%s: [%d] doing memory copy buf=%p, len=%d\n",
@@ -1558,11 +1576,6 @@
DHD_ERROR(("%s: Intended rate exceeds max rate, setting to max\n", __FUNCTION__));
hz = host->f_max;
}
-#ifdef CONFIG_HAVE_IMX8_SOC
- if (hz >= SD_CLOCK_UHS_IMX8) {
- ios->timing = MMC_TIMING_UHS_SDR104;
- }
-#endif // endif
ios->clock = hz;
host->ops->set_ios(host, ios);
DHD_ERROR(("%s: After change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
index 4dab751..149f2da 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
@@ -1,7 +1,9 @@
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -43,6 +45,7 @@
#include <dhd_linux.h>
#include <bcmsdh_sdmmc.h>
#include <dhd_dbg.h>
+#include <bcmdevs.h>
#if !defined(SDIO_VENDOR_ID_BROADCOM)
#define SDIO_VENDOR_ID_BROADCOM 0x02d0
@@ -180,9 +183,17 @@
/* devices we support, null terminated */
static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) },
- { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) },
- { 0, 0, 0, 0 /* end: all zeroes */
- },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM4362_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43751_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43012_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N2G_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N5G_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM4373_CHIP_ID) },
+ /* { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_ANY_ID) }, */
+ /* end: all zeroes */
+ { 0, 0, 0, 0},
};
MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
@@ -255,6 +266,15 @@
static int dummy_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
+ sd_err(("%s: enter\n", __FUNCTION__));
+ if (func)
+ sd_err(("%s: func->num=0x%x; \n", __FUNCTION__, func->num));
+ if (id) {
+ sd_err(("%s: class=0x%x; vendor=0x%x; device=0x%x\n", __FUNCTION__,
+ id->class, id->vendor, id->device));
+ if (id->vendor != SDIO_VENDOR_ID_BROADCOM)
+ return -ENODEV;
+ }
if (func && (func->num != 2)) {
return 0;
}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c
index 267cc16..d75e1e7 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c
@@ -1,7 +1,9 @@
/*
* Broadcom SPI Host Controller Driver - Linux Per-port
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -42,6 +44,7 @@
#include <sdio.h> /* SDIO Device and Protocol Specs */
#include <linux/sched.h> /* request_irq(), free_irq() */
#include <bcmsdspi.h>
+#include <bcmdevs.h>
#include <bcmspi.h>
#endif /* BCMSPI_ANDROID */
@@ -69,6 +72,204 @@
#define BLOCKABLE() (!in_interrupt())
#endif // endif
+/* For Broadcom PCI-SPI Host controller (Raggedstone) */
+#if defined(BCMSPI) && defined(BCMPCISPIHOST)
+#ifndef SDLX_MSG
+#define SDLX_MSG(x) printf x
+#endif // endif
+extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info,
+ uint bus_type, uint bus_num, uint slot_num);
+extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh);
+extern sdioh_info_t * sdioh_attach(osl_t *osh, void *bar0, uint irq);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+
+/* forward declarations for PCI probe and remove functions. */
+static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev);
+
+/* forward declarations for PCI probe and remove functions. */
+static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev);
+
+/**
+ * pci id table
+ */
+static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = {
+ { vendor: PCI_ANY_ID,
+ device: PCI_ANY_ID,
+ subvendor: PCI_ANY_ID,
+ subdevice: PCI_ANY_ID,
+ class: 0,
+ class_mask: 0,
+ driver_data: 0,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid);
+
+/**
+ * PCI-SPI Host Controller: pci driver info
+ */
+static struct pci_driver bcmsdh_pci_driver = {
+ node: {},
+ name: "bcmsdh",
+ id_table: bcmsdh_pci_devid,
+ probe: bcmsdh_pci_probe,
+ remove: bcmsdh_pci_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ save_state: NULL,
+#endif // endif
+ suspend: NULL,
+ resume: NULL,
+};
+
+/* Force detection to a particular PCI */
+/* slot only . Allows for having multiple */
+/* WL devices at once in a PC */
+/* Only one instance of dhd will be */
+/* usable at a time */
+/* Upper word is bus number, */
+/* lower word is slot number */
+/* Default value of 0xffffffff turns this off */
+extern uint sd_pci_slot;
+module_param(sd_pci_slot, uint, 0);
+
+/**
+ * Detect supported Host Controller and attach if found.
+ *
+ * Determine if the device described by pdev is a supported PCI Host
+ * Controller. If so, attach to it and attach to the target device.
+ */
+static int __devinit
+bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ osl_t *osh = NULL;
+ sdioh_info_t *sdioh = NULL;
+
+ int rc;
+
+ if (sd_pci_slot != 0xFFFFFFFF) {
+ if (pdev->bus->number != (sd_pci_slot>>16) ||
+ PCI_SLOT(pdev->devfn) != (sd_pci_slot&0xffff)) {
+ SDLX_MSG(("%s: %s: bus %X, slot %X, vend %X, dev %X\n",
+ __FUNCTION__,
+ bcmsdh_chipmatch(pdev->vendor, pdev->device)
+ ?"Found compatible SDIOHC"
+ :"Probing unknown device",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor,
+ pdev->device));
+ return -ENODEV;
+ }
+ SDLX_MSG(("%s: %s: bus %X, slot %X, vendor %X, device %X (good PCI location)\n",
+ __FUNCTION__,
+ bcmsdh_chipmatch(pdev->vendor, pdev->device)
+ ?"Using compatible SDIOHC"
+ :"WARNING, forced use of unkown device",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device));
+ }
+
+ if ((pdev->vendor == VENDOR_TI) && ((pdev->device == PCIXX21_FLASHMEDIA_ID) ||
+ (pdev->device == PCIXX21_FLASHMEDIA0_ID))) {
+ uint32 config_reg;
+
+ SDLX_MSG(("%s: Disabling TI FlashMedia Controller.\n", __FUNCTION__));
+ if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+ config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4);
+
+ /*
+ * Set MMC_SD_DIS bit in FlashMedia Controller.
+ * Disbling the SD/MMC Controller in the FlashMedia Controller
+ * allows the Standard SD Host Controller to take over control
+ * of the SD Slot.
+ */
+ config_reg |= 0x02;
+ OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg);
+ osl_detach(osh);
+ }
+ /* match this pci device with what we support */
+ /* we can't solely rely on this to believe it is our SDIO Host Controller! */
+ if (!bcmsdh_chipmatch(pdev->vendor, pdev->device)) {
+ if (pdev->vendor == VENDOR_BROADCOM) {
+ SDLX_MSG(("%s: Unknown Broadcom device (vendor: %#x, device: %#x).\n",
+ __FUNCTION__, pdev->vendor, pdev->device));
+ }
+ return -ENODEV;
+ }
+
+ /* this is a pci device we might support */
+ SDLX_MSG(("%s: Found possible SDIO Host Controller: bus %d slot %d func %d irq %d\n",
+ __FUNCTION__,
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), pdev->irq));
+
+ /* use bcmsdh_query_device() to get the vendor ID of the target device so
+ * it will eventually appear in the Broadcom string on the console
+ */
+
+ /* allocate SDIO Host Controller state info */
+ if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* map to address where host can access */
+ pci_set_master(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ SDLX_MSG(("%s: Cannot enable PCI device\n", __FUNCTION__));
+ goto err;
+ }
+
+ if (!(sdioh = sdioh_attach(osh, (void *)(ulong)pci_resource_start(pdev, 0), pdev->irq))) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ sdioh->bcmsdh = bcmsdh_probe(osh, &pdev->dev, sdioh, NULL, PCI_BUS, -1, -1);
+ if (sdioh->bcmsdh == NULL) {
+ sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ pci_set_drvdata(pdev, sdioh);
+ return 0;
+
+ /* error handling */
+err:
+ if (sdioh != NULL)
+ sdioh_detach(osh, sdioh);
+ if (osh != NULL)
+ osl_detach(osh);
+ return -ENOMEM;
+
+}
+
+/**
+ * Detach from target devices and PCI-SPI Host Controller
+ */
+static void __devexit
+bcmsdh_pci_remove(struct pci_dev *pdev)
+{
+
+ sdioh_info_t *sdioh = NULL;
+ osl_t *osh;
+
+ sdioh = pci_get_drvdata(pdev);
+ if (sdioh == NULL) {
+ sd_err(("%s: error, no sdh handler found\n", __FUNCTION__));
+ return;
+ }
+
+ osh = sdioh->osh;
+ bcmsdh_remove(sdioh->bcmsdh);
+ sdioh_detach(osh, sdioh);
+ osl_detach(osh);
+
+}
+#endif /* BCMSPI && BCMPCISPIHOST */
/* Interrupt handler */
static irqreturn_t
sdspi_isr(int irq, void *dev_id
@@ -149,6 +350,7 @@
.owner = THIS_MODULE,
},
};
+#endif /* BCMSPI_ANDROID */
/*
* module init
@@ -157,8 +359,11 @@
{
int error = 0;
sd_trace(("bcmsdh_gspi: %s Enter\n", __FUNCTION__));
-
+#if defined(BCMSPI) && defined(BCMPCISPIHOST)
+ error = pci_module_init(&bcmsdh_pci_driver);
+#else
error = spi_register_driver(&bcmsdh_spi_driver);
+#endif /* BCMSPI && BCMPCISPIHOST */
return error;
}
@@ -169,9 +374,12 @@
void bcmsdh_unregister_client_driver(void)
{
sd_trace(("%s Enter\n", __FUNCTION__));
+#if defined(BCMSPI) && defined(BCMPCISPIHOST)
+ pci_unregister_driver(&bcmsdh_pci_driver);
+#else
spi_unregister_driver(&bcmsdh_spi_driver);
+#endif /* BCMSPI && BCMPCISPIHOST */
}
-#endif /* BCMSPI_ANDROID */
/* Register with Linux for interrupts */
int
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdstd.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdstd.c
index f4e2126..104b3c3 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdstd.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdstd.c
@@ -1,7 +1,9 @@
/*
* 'Standard' SDIO HOST CONTROLLER driver
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -52,6 +54,7 @@
*/
#define SDH_GPIO16 16
#define SDH_GPIO_ENABLE 0xffff
+#define DEFAULT_F2_BLKSIZE 256
#include <bcmsdstd.h>
#include <sbsdio.h> /* SDIOH (host controller) core hardware definitions */
@@ -61,7 +64,7 @@
uint sd_hiok = TRUE; /* Use hi-speed mode if available? */
uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
-uint sd_f2_blocksize = 64; /* Default blocksize */
+uint sd_f2_blocksize; /* Default blocksize */
uint sd_f1_blocksize = BLOCK_SIZE_4318; /* Default blocksize */
#define sd3_trace(x)
@@ -89,11 +92,11 @@
* see sd3_autoselect_uhsi_max
*/
int sd_uhsimode = SD3CLKMODE_DISABLED;
-uint sd_tuning_period = 0;
+uint sd_tuning_period = CAP3_RETUNING_TC_OTHER;
uint sd_delay_value = 500000;
-/* Enables host to dongle glomming. Also increases the
- * dma buffer size. This will increase the rx throughput
- * as there will be lesser CMD53 transactions
+/* Enables host to dongle glomming. Also increases the
+ * dma buffer size. This will increase the rx throughput
+ * as there will be lesser CMD53 transactions
*/
#ifdef BCMSDIOH_TXGLOM
uint sd_txglom;
@@ -140,13 +143,13 @@
0x0004, /* SDR25: DrvStr:'B' [0]; CLKGen:0;
* SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
*/
- 0x0001, /* SDR50: DrvStr:'B' [0]; CLKGen:0;
+ 0x0002, /* SDR50: DrvStr:'B' [0]; CLKGen:0;
* SDCLKFreqSel: 2 [division: 1*2 = 2: ~100 MHz]
*/
0x0001, /* SDR104: DrvStr:'B' [0]; CLKGen:0;
SDCLKFreqSel: 1 [no division: ~255/~208 MHz]
*/
- 0x0002 /* DDR50: DrvStr:'B' [0]; CLKGen:0;
+ 0x0004 /* DDR50: DrvStr:'B' [0]; CLKGen:0;
SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
*/
};
@@ -156,7 +159,11 @@
Voltage switch: not supported
Clock Tuning: not supported
*/
+#ifdef BCMQT
+bool sd3_sw_override1 = TRUE;
+#else
bool sd3_sw_override1 = FALSE;
+#endif // endif
bool sd3_sw_read_magic_bytes = FALSE;
#define SD3_TUNING_REQD(sd, sd_uhsimode) ((sd_uhsimode != SD3CLKMODE_DISABLED) && \
@@ -176,6 +183,10 @@
#define F1_SLEEPCSR_ADDR 0x1001F
+#ifdef BCMQT
+#define SDSTD_WAIT_TIME_MS 1
+#endif /* BCMQT */
+
uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz
:might get changed in code for 208
*/
@@ -406,6 +417,7 @@
sdstd_3_osclean_tuning(sd);
sd->sd3_tuning_reqd = FALSE;
}
+ sd->sd3_tuning_disable = FALSE;
sd_trace(("%s: freeing irq %d\n", __FUNCTION__, sd->irq));
sdstd_free_irq(sd->irq, sd);
if (sd->card_init_done)
@@ -488,7 +500,8 @@
IOV_FORCERB,
IOV_CLOCK,
IOV_UHSIMOD,
- IOV_TUNEMOD
+ IOV_TUNEMOD,
+ IOV_TUNEDIS
};
const bcm_iovar_t sdioh_iovars[] = {
@@ -514,6 +527,7 @@
{"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0},
{"sd_uhsimode", IOV_UHSIMOD, 0, 0, IOVT_UINT32, 0},
{"tuning_mode", IOV_TUNEMOD, 0, 0, IOVT_UINT32, 0},
+ {"sd3_tuning_disable", IOV_TUNEDIS, 0, 0, IOVT_BOOL, 0},
{NULL, 0, 0, 0, 0, 0 }
};
@@ -913,6 +927,10 @@
break;
}
+ case IOV_SVAL(IOV_TUNEDIS):
+ si->sd3_tuning_disable = (bool)int_val;
+ break;
+
default:
bcmerror = BCME_UNSUPPORTED;
break;
@@ -994,7 +1012,7 @@
if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) {
/* Change to DATA_TRANSFER_IDLE */
- sd->sd3_dat_state = DATA_TRANSFER_IDLE;
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
sdstd_unlock(sd);
return status;
}
@@ -1456,6 +1474,11 @@
/* Otherwise, wait for the command to complete */
retries = RETRIES_LARGE;
do {
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
int_reg = sdstd_rreg16(sd, SD_IntrStatus);
} while (--retries &&
(GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
@@ -1471,6 +1494,11 @@
sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
retries = RETRIES_LARGE;
do {
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
} while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
SW_RESET_CMD)) && retries--);
@@ -1502,6 +1530,11 @@
sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
retries = RETRIES_LARGE;
do {
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
sd_trace(("%s: waiting for DAT line reset\n", __FUNCTION__));
} while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
SW_RESET_DAT)) && retries--);
@@ -1562,6 +1595,11 @@
rflags = sdstd_rreg8(sd, SD_SoftwareReset);
if (!GFIELD(rflags, SW_RESET_DAT) && !GFIELD(rflags, SW_RESET_CMD))
break;
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
} while (--retries);
if (!retries) {
@@ -1615,6 +1653,14 @@
if ((regval = sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)) == 0)
return SUCCESS;
+#ifdef BCMQT
+ if (regval == 0xFFFF) {
+ sd_err(("%s: Bogus SD_ErrorIntrStatus: 0x%x????\n", __FUNCTION__, regval));
+ sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval);
+ return SUCCESS;
+ }
+#endif // endif
+
sd_info(("%s: ErrorIntrStatus 0x%04x (clearing), IntrStatus 0x%04x PresentState 0x%08x\n",
__FUNCTION__, regval, sdstd_rreg16(sdioh_info, SD_IntrStatus),
sdstd_rreg(sdioh_info, SD_PresentState)));
@@ -1633,6 +1679,11 @@
for (retries = RETRIES_LARGE; retries; retries--) {
if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_CMD)))
break;
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
}
if (!retries) {
sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
@@ -1649,6 +1700,11 @@
for (retries = RETRIES_LARGE; retries; retries--) {
if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_DAT)))
break;
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
}
if (!retries) {
sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
@@ -1702,6 +1758,11 @@
sdstd_wreg8(sd, SD_SoftwareReset, regval);
do {
sd_trace(("%s: waiting for reset\n", __FUNCTION__));
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
} while ((sdstd_rreg8(sd, SD_SoftwareReset) & regval) && retries--);
if (!retries) {
@@ -1797,6 +1858,10 @@
detect_slots = TRUE;
/* Controller supports SDMA, so turn it on here. */
sd->sd_dma_mode = DMA_MODE_SDMA;
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_CYPRESS) {
+ sd_info(("%s: Found Cypress 27xx Standard SDIO Host Controller\n", __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_BCM27XX;
+ detect_slots = FALSE;
} else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_BROADCOM) {
sd_info(("%s: Found Broadcom 27xx Standard SDIO Host Controller\n", __FUNCTION__));
sd->controller_type = SDIOH_TYPE_BCM27XX;
@@ -1842,6 +1907,19 @@
/* Controller supports ADMA2, so turn it on here. */
sd->sd_dma_mode = DMA_MODE_ADMA2;
+ } else if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) ==
+ ((SDIOH_FPGA_ID << 16) | VENDOR_CYPRESS)) {
+ sd_err(("%s: Found Cypress Standard SDIO Host Controller FPGA\n", __FUNCTION__));
+ /* Set BAR0 Window to SDIOSTH core */
+ OSL_PCI_WRITE_CONFIG(sd->osh, PCI_BAR0_WIN, 4, 0x18001000);
+
+ /* Set defaults particular to this controller. */
+ detect_slots = TRUE;
+ num_slots = 1;
+ first_bar = 0;
+
+ /* Controller supports ADMA2, so turn it on here. */
+ sd->sd_dma_mode = DMA_MODE_ADMA2;
}
/* Map in each slot on the board and query it to see if a
@@ -1856,6 +1934,10 @@
for (slot = 0; slot < num_slots; slot++) {
bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(slot + first_bar)), 4);
+#ifdef BCMQT
+ /* PCIe 64-bit alignment */
+ bar &= 0xfffffff8;
+#endif // endif
sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh,
(uintptr)bar, SDIOH_REG_WINSZ);
@@ -1887,13 +1969,26 @@
}
bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4);
+#ifdef BCMQT
+ /* PCIe 64-bit alignment */
+ bar &= 0xfffffff8;
+#endif // endif
sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (uintptr)bar, SDIOH_REG_WINSZ);
+#ifdef BCMQT
+ sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n",
+ full_slot,
+ (full_slot + first_bar),
+ OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4) &
+ 0xfffffff8,
+ sd->mem_space));
+#else
sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n",
full_slot,
(full_slot + first_bar),
OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4),
sd->mem_space));
+#endif /* BCMQT */
sd->adapter_slot = full_slot;
@@ -1968,6 +2063,17 @@
sd->host_UHSISupported = 1;
}
+#ifdef BCMQT
+ {
+ uint32 intmask;
+
+ /* FIX: force interrupts with QT sdio20 host */
+ /* pci cw [expr $def(configbase) +0x95] 1 2 */
+ intmask = OSL_PCI_READ_CONFIG(sd->osh, PCI_INT_MASK, 4);
+ intmask |= 0x0200;
+ OSL_PCI_WRITE_CONFIG(sd->osh, PCI_INT_MASK, 4, intmask);
+ }
+#endif // endif
return (SUCCESS);
}
#define CMD5_RETRIES 1000
@@ -1990,6 +2096,11 @@
sdstd_cmd_getrsp(sd, cmd_rsp, 1);
if (!GFIELD(*cmd_rsp, RSP4_CARD_READY))
sd_trace(("%s: Waiting for card to become ready\n", __FUNCTION__));
+#ifdef BCMQT
+ if (retries != CMD5_RETRIES) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
} while ((!GFIELD(*cmd_rsp, RSP4_CARD_READY)) && --retries);
if (!retries)
return ERROR;
@@ -2135,7 +2246,7 @@
fn_ints = INTR_CTL_FUNC1_EN;
if (sd->num_funcs >= 2) {
- set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */);
+ set_client_block_size(sd, 2, DEFAULT_F2_BLKSIZE /* BLOCK_SIZE_4328 */);
fn_ints |= INTR_CTL_FUNC2_EN;
}
@@ -2272,6 +2383,7 @@
sd_err(("%s: CMD19 failed\n", __FUNCTION__));
val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
+ val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
sdstd_wreg16(sd, SD3_HostCntrl2, val1);
return status;
}
@@ -2296,6 +2408,7 @@
__FUNCTION__));
val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
+ val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
sdstd_wreg16(sd, SD3_HostCntrl2, val1);
return ERROR;
}
@@ -2336,6 +2449,7 @@
"exceeded!\n", __FUNCTION__));
val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
+ val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
sdstd_wreg16(sd, SD3_HostCntrl2, val1);
return ERROR;
}
@@ -2357,6 +2471,7 @@
sd_err(("%s: TUNINGFAILED: SamplClkSel failed!\n", __FUNCTION__));
val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
+ val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
sdstd_wreg16(sd, SD3_HostCntrl2, val1);
return ERROR;
}
@@ -2538,7 +2653,7 @@
{
int retries = 0;
- if (sd->sd3_tuning_reqd) {
+ if (!sd->sd3_tuning_disable && sd->sd3_tuning_reqd) {
sd3_trace(("sd3: %s: tuning reqd\n", __FUNCTION__));
if (tuning_param == CHECK_TUNING_PRE_DATA) {
if (sd->sd3_tun_state == TUNING_ONGOING) {
@@ -2616,10 +2731,11 @@
uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd)
{
- if (sd_tuning_period == 0)
+ if (sd_tuning_period == CAP3_RETUNING_TC_OTHER) {
return GFIELD(sd->caps3, CAP3_RETUNING_TC);
- else
+ } else {
return (uint8)sd_tuning_period;
+ }
}
uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd)
@@ -2834,12 +2950,16 @@
val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+#ifdef BCMQT
+ /* wait 10s for Zebu */
+ OSL_DELAY(10 * 1000 * 1000);
+#else
/* wait 5ms */
- OSL_DELAY(5000);
+ OSL_DELAY(5 * 1000);
+#endif /* BCMQT */
/* check 1.8V sig enable in HC2. if cleared, error */
val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
-
if (!GFIELD(val1, HOSTCtrl2_1_8SIG_EN)) {
sd_err(("%s: FAIL: HC2:1.8V_En:0x%x\n", __FUNCTION__, val1));
return ERROR;
@@ -2849,8 +2969,13 @@
val1 = sdstd_rreg16(sd, SD_ClockCntrl);
sdstd_wreg16(sd, SD_ClockCntrl, val1 | 0x4);
+#ifdef BCMQT
+ /* wait 5s for Zebu */
+ OSL_DELAY(5 * 1000 * 1000);
+#else
/* wait 1ms */
- OSL_DELAY(1000);
+ OSL_DELAY(1 * 1000);
+#endif /* BCMQT */
/* check DAT[3..0] using Present State Reg. If not 0b1111, error */
presst = sdstd_rreg(sd, SD_PresentState);
@@ -3139,6 +3264,9 @@
/* Set timeout control (adjust default value based on divisor).
* Disabling timeout interrupts during setting is advised by host spec.
*/
+#ifdef BCMQT
+ if (GFIELD(sd->caps, CAP_BASECLK) < 50)
+#endif // endif
{
uint toval;
@@ -3155,6 +3283,13 @@
sdstd_wreg8(sd, SD_TimeoutCntrl, (uint8)toval);
sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, regdata);
}
+#ifdef BCMQT
+ else {
+ sd_info(("%s: REsetting err int control\n", __FUNCTION__));
+ regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
+ sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT));
+ }
+#endif // endif
OSL_DELAY(2);
sd_info(("Final Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
@@ -3451,6 +3586,7 @@
{
sd_trace(("%s\n", __FUNCTION__));
sd->sd3_tuning_reqd = FALSE;
+ sd->sd3_tuning_disable = FALSE;
if ((sdstd_host_init(sd)) != SUCCESS) {
return ERROR;
}
@@ -3613,6 +3749,11 @@
do {
sdstd_os_yield(sd);
int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
} while (--retries && (GFIELD(int_reg, INTSTAT_BUF_READ_READY) == 0));
if (!retries) {
@@ -3639,6 +3780,11 @@
retries = RETRIES_LARGE;
do {
int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
} while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
/* Check for any errors from the data phase */
@@ -3719,6 +3865,11 @@
do {
int_reg = sdstd_rreg16(sd, SD_IntrStatus);
err_reg = sdstd_rreg16(sd, SD_ErrorIntrStatus);
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
} while (--retries && !(int_reg & norm) && !(err_reg & err));
norm |= sd->intmask;
@@ -3784,6 +3935,11 @@
/* Wait for Write Buffer to become ready */
do {
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
int_reg = sdstd_rreg16(sd, SD_IntrStatus);
} while (--retries && (GFIELD(int_reg, INTSTAT_BUF_WRITE_READY) == 0));
@@ -3810,6 +3966,11 @@
/* Wait for Transfer Complete */
retries = RETRIES_LARGE;
do {
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
int_reg = sdstd_rreg16(sd, SD_IntrStatus);
} while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
@@ -3870,6 +4031,11 @@
if (retries == RETRIES_SMALL)
sd_err(("%s: Waiting for Command Inhibit cmd = %d 0x%x\n",
__FUNCTION__, cmd, sdstd_rreg(sdioh_info, SD_PresentState)));
+#ifdef BCMQT
+ else {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
}
if (!retries) {
sd_err(("%s: Command Inhibit timeout\n", __FUNCTION__));
@@ -4079,7 +4245,7 @@
xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, use_dma);
if (sdioh_info->client_block_size[func] != blocksize)
- set_client_block_size(sdioh_info, 1, blocksize);
+ set_client_block_size(sdioh_info, func, blocksize);
if (blockcount > 1) {
xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 1);
@@ -4101,6 +4267,11 @@
PRES_DAT_INHIBIT) && --retries) {
sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
__FUNCTION__, cmd));
+#ifdef BCMQT
+ if (retries != RETRIES_SMALL) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
}
if (!retries) {
sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
@@ -4210,6 +4381,11 @@
do {
int_reg = sdstd_rreg16(sdioh_info, SD_IntrStatus);
sdstd_os_yield(sdioh_info);
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
} while (--retries &&
(GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
(GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
@@ -4226,6 +4402,11 @@
retries = RETRIES_LARGE;
do {
sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
+#ifdef BCMQT
+ if (retries != RETRIES_LARGE) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ }
+#endif /* BCMQT */
} while ((GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset),
SW_RESET_CMD)) && retries--);
@@ -4268,6 +4449,9 @@
uint32 i;
uint8 *localbuf = NULL;
#endif // endif
+#ifdef BCMQT
+ int retries;
+#endif // endif
ASSERT(nbytes);
@@ -4419,6 +4603,13 @@
/* If not on, wait for it (or for xfer error) */
int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+#ifdef BCMQT
+ retries = RETRIES_LARGE;
+ while (!(int_reg & int_bit) && --retries) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ }
+#endif // endif
if (!(int_reg & int_bit)) {
status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS,
local_yield, &int_reg);
@@ -4508,6 +4699,13 @@
/* If not on, wait for it (or for xfer error) */
int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+#ifdef BCMQT
+ retries = RETRIES_LARGE;
+ while (!(int_reg & int_bit) && --retries) {
+ OSL_SLEEP(SDSTD_WAIT_TIME_MS);
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ }
+#endif // endif
if (!(int_reg & int_bit)) {
status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, local_yield, &int_reg);
switch (status) {
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdstd_linux.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdstd_linux.c
index 4c32400..0d39c6d 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdstd_linux.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmsdstd_linux.c
@@ -1,7 +1,9 @@
/*
* 'Standard' SDIO HOST CONTROLLER driver - linux portion
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -52,7 +54,7 @@
sdioh_info_t *sd;
spinlock_t lock;
wait_queue_head_t intr_wait_queue;
- struct timer_list tuning_timer;
+ timer_list_compat_t tuning_timer;
int tuning_timer_exp;
atomic_t timer_enab;
struct tasklet_struct tuning_tasklet;
@@ -68,6 +70,7 @@
static void
sdstd_3_ostasklet(ulong data);
+
static void
sdstd_3_tuning_timer(ulong data);
@@ -176,16 +179,24 @@
sdstd_3_osinit_tuning(sdioh_info_t *sd)
{
struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info;
+ uint8 timer_count = sdstd_3_get_tuning_exp(sdos->sd);
sd_trace(("%s Enter\n", __FUNCTION__));
/* initialize timer and tasklet for tuning */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0))
init_timer(&sdos->tuning_timer);
sdos->tuning_timer.data = (ulong)sdos;
sdos->tuning_timer.function = sdstd_3_tuning_timer;
- sdos->tuning_timer_exp = 2 * (sdstd_3_get_tuning_exp(sdos->sd));
+#else
+ init_timer_compat(&sdos->tuning_timer, sdstd_3_tuning_timer, sdos);
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) */
+ if (timer_count == CAP3_RETUNING_TC_DISABLED || timer_count > CAP3_RETUNING_TC_1024S) {
+ sdos->tuning_timer_exp = 0;
+ } else {
+ sdos->tuning_timer_exp = 1 << (timer_count - 1);
+ }
tasklet_init(&sdos->tuning_tasklet, sdstd_3_ostasklet, (ulong)sdos);
if (sdos->tuning_timer_exp) {
- sdos->tuning_timer.expires = jiffies + sdos->tuning_timer_exp * HZ;
add_timer(&sdos->tuning_timer);
atomic_set(&sdos->timer_enab, TRUE);
}
@@ -261,6 +272,13 @@
/* * enable ISR's */
local_irq_restore(int_flags);
sdstd_3_clk_tuning(sd, sdstd_3_get_uhsi_clkmode(sd));
+#ifdef BCMSDIOH_STD_TUNING_WAR
+ /*
+ * Observed intermittent SDIO command error after re-tuning done
+ * successfully. Re-tuning twice is giving much reliable results.
+ */
+ sdstd_3_clk_tuning(sd, sdstd_3_get_uhsi_clkmode(sd));
+#endif /* BCMSDIOH_STD_TUNING_WAR */
/* * disable ISR's */
local_irq_save(int_flags);
sdstd_3_set_tune_state(sd, TUNING_IDLE);
@@ -273,8 +291,7 @@
/* start retuning timer if enabled */
if ((sdos->tuning_timer_exp) && (timer_enab)) {
if (sd->sd3_tuning_reqd) {
- sdos->tuning_timer.expires = jiffies + sdos->tuning_timer_exp * HZ;
- mod_timer(&sdos->tuning_timer, sdos->tuning_timer.expires);
+ mod_timer(&sdos->tuning_timer, jiffies + sdos->tuning_timer_exp * HZ);
}
}
exit:
@@ -413,6 +430,16 @@
}
}
+#ifdef BCMQT
+void
+sdstd_os_yield(sdioh_info_t *sd)
+{
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 29))
+ yield();
+#endif // endif
+}
+#endif /* BCMQT */
+
/* Returns 0 for success, -1 for interrupted, -2 for timeout */
int
sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool local_yield, uint16 *bits)
@@ -466,8 +493,7 @@
if (val == SD_DHD_ENABLE_PERIODIC_TUNING) {
/* start of tuning timer */
- sdos->tuning_timer.expires = jiffies + sdos->tuning_timer_exp * HZ;
- mod_timer(&sdos->tuning_timer, sdos->tuning_timer.expires);
+ mod_timer(&sdos->tuning_timer, jiffies + sdos->tuning_timer_exp * HZ);
}
if (val == SD_DHD_DISABLE_PERIODIC_TUNING) {
/* stop periodic timer */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmspibrcm.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmspibrcm.c
index bb6fbef..4490d07 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmspibrcm.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmspibrcm.c
@@ -1,7 +1,9 @@
/*
* Broadcom BCMSDH to gSPI Protocol Conversion Layer
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -75,9 +77,10 @@
uint sd_hiok = FALSE; /* Use hi-speed mode if available? */
uint sd_sdmode = SDIOH_MODE_SPI; /* Use SD4 mode by default */
+uint sd_f1_blocksize = 64; /* Default blocksize */
uint sd_f2_blocksize = 64; /* Default blocksize */
-uint sd_divisor = 2;
+uint sd_divisor = 1;
uint sd_power = 1; /* Default to SD Slot powered ON */
uint sd_clock = 1; /* Default to SD Clock turned ON */
uint sd_crc = 0; /* Default to SPI CRC Check turned OFF */
@@ -336,22 +339,22 @@
};
const bcm_iovar_t sdioh_iovars[] = {
- {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
- {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
- {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 },
- {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
- {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
- {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
- {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
- {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
- {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
- {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
- {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
- {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
- {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0},
- {"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) },
- {"spi_respdelay", IOV_RESP_DELAY_ALL, 0, IOVT_BOOL, 0 },
- {NULL, 0, 0, 0, 0 }
+ {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 },
+ {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 },
+ {"sd_dma", IOV_DMA, 0, 0, IOVT_BOOL, 0 },
+ {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 },
+ {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 },
+ {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 },
+ {"sd_hostreg", IOV_HOSTREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_devreg", IOV_DEVREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 },
+ {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 },
+ {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 },
+ {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100},
+ {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0},
+ {"spi_errstats", IOV_SPIERRSTATS, 0, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) },
+ {"spi_respdelay", IOV_RESP_DELAY_ALL, 0, 0, IOVT_BOOL, 0 },
+ {NULL, 0, 0, 0, 0, 0 }
};
int
@@ -362,7 +365,6 @@
int bcmerror = 0;
int val_size;
int32 int_val = 0;
- bool bool_val;
uint32 actionid;
/*
sdioh_regs_t *regs;
@@ -401,8 +403,6 @@
if (plen >= (int)sizeof(int_val))
bcopy(params, &int_val, sizeof(int_val));
- bool_val = (int_val != 0) ? TRUE : FALSE;
-
actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
switch (actionid) {
case IOV_GVAL(IOV_MSGLEVEL):
@@ -617,7 +617,7 @@
int offset;
uint32 cis_byte;
uint16 *cis = (uint16 *)cisd;
- uint bar0 = SI_ENUM_BASE(sd->sih);
+ uint bar0 = SI_ENUM_BASE_DEFAULT;
int status;
uint8 data;
@@ -1542,9 +1542,8 @@
uint8 resp_delay = 0;
int err = SUCCESS;
uint32 hostlen;
- uint32 spilen = 0;
uint32 dstatus_idx = 0;
- uint16 templen, buslen, len, *ptr = NULL;
+// uint16 templen, buslen, len;
sd_trace(("spi cmd = 0x%x\n", cmd_arg));
@@ -1753,12 +1752,6 @@
return status;
}
- /* gSPI expects that hw-header-len is equal to spi-command-len */
- if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) {
- ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff));
- ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16)));
- }
-
if ((nbytes > 2000) && !write) {
sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes));
}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmstdlib_s.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmstdlib_s.c
new file mode 100644
index 0000000..12d8984
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmstdlib_s.c
@@ -0,0 +1,308 @@
+/*
+ * Broadcom Secure Standard Library.
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#ifdef BCMDRIVER
+#include <osl.h>
+#else /* BCMDRIVER */
+#include <stddef.h>
+#include <string.h>
+#endif /* else BCMDRIVER */
+
+#include <bcmstdlib_s.h>
+#include <bcmutils.h>
+
+/*
+ * __SIZE_MAX__ value is depending on platform:
+ * Firmware Dongle: RAMSIZE (Dongle Specific Limit).
+ * LINUX NIC/Windows/MACOSX/Application: OS Native or
+ * 0xFFFFFFFFu if not defined.
+ */
+#ifndef SIZE_MAX
+#ifndef __SIZE_MAX__
+#define __SIZE_MAX__ 0xFFFFFFFFu
+#endif /* __SIZE_MAX__ */
+#define SIZE_MAX __SIZE_MAX__
+#endif /* SIZE_MAX */
+#define RSIZE_MAX (SIZE_MAX >> 1u)
+
+#if !defined(__STDC_WANT_SECURE_LIB__) && !(defined(__STDC_LIB_EXT1__) && \
+ defined(__STDC_WANT_LIB_EXT1__))
+/*
+ * memmove_s - secure memmove
+ * dest : pointer to the object to copy to
+ * destsz : size of the destination buffer
+ * src : pointer to the object to copy from
+ * n : number of bytes to copy
+ * Return Value : zero on success and non-zero on error
+ * Also on error, if dest is not a null pointer and destsz not greater
+ * than RSIZE_MAX, writes destsz zero bytes into the dest object.
+ */
+int
+memmove_s(void *dest, size_t destsz, const void *src, size_t n)
+{
+ int err = BCME_OK;
+
+ if ((!dest) || (((char *)dest + destsz) < (char *)dest)) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ if (destsz > RSIZE_MAX) {
+ err = BCME_BADLEN;
+ goto exit;
+ }
+
+ if (destsz < n) {
+ memset(dest, 0, destsz);
+ err = BCME_BADLEN;
+ goto exit;
+ }
+
+ if ((!src) || (((const char *)src + n) < (const char *)src)) {
+ memset(dest, 0, destsz);
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ memmove(dest, src, n);
+exit:
+ return err;
+}
+
+/*
+ * memcpy_s - secure memcpy
+ * dest : pointer to the object to copy to
+ * destsz : size of the destination buffer
+ * src : pointer to the object to copy from
+ * n : number of bytes to copy
+ * Return Value : zero on success and non-zero on error
+ * Also on error, if dest is not a null pointer and destsz not greater
+ * than RSIZE_MAX, writes destsz zero bytes into the dest object.
+ */
+int
+memcpy_s(void *dest, size_t destsz, const void *src, size_t n)
+{
+ int err = BCME_OK;
+ char *d = dest;
+ const char *s = src;
+
+ if ((!d) || ((d + destsz) < d)) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ if (destsz > RSIZE_MAX) {
+ err = BCME_BADLEN;
+ goto exit;
+ }
+
+ if (destsz < n) {
+ memset(dest, 0, destsz);
+ err = BCME_BADLEN;
+ goto exit;
+ }
+
+ if ((!s) || ((s + n) < s)) {
+ memset(dest, 0, destsz);
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ /* overlap checking between dest and src */
+ if (!(((d + destsz) <= s) || (d >= (s + n)))) {
+ memset(dest, 0, destsz);
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ (void)memcpy(dest, src, n);
+exit:
+ return err;
+}
+
+/*
+ * memset_s - secure memset
+ * dest : pointer to the object to be set
+ * destsz : size of the destination buffer
+ * c : byte value
+ * n : number of bytes to be set
+ * Return Value : zero on success and non-zero on error
+ * Also on error, if dest is not a null pointer and destsz not greater
+ * than RSIZE_MAX, writes destsz bytes with value c into the dest object.
+ */
+int
+memset_s(void *dest, size_t destsz, int c, size_t n)
+{
+ int err = BCME_OK;
+ if ((!dest) || (((char *)dest + destsz) < (char *)dest)) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ if (destsz > RSIZE_MAX) {
+ err = BCME_BADLEN;
+ goto exit;
+ }
+
+ if (destsz < n) {
+ (void)memset(dest, c, destsz);
+ err = BCME_BADLEN;
+ goto exit;
+ }
+
+ (void)memset(dest, c, n);
+exit:
+ return err;
+}
+#endif /* !__STDC_WANT_SECURE_LIB__ && !(__STDC_LIB_EXT1__ && __STDC_WANT_LIB_EXT1__) */
+
+#if !defined(FREEBSD) && !defined(BCM_USE_PLATFORM_STRLCPY)
+/**
+ * strlcpy - Copy a %NUL terminated string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @size: size of destination buffer 0 if input parameters are NOK
+ * return: string leng of src (assume src is NUL terminated)
+ *
+ * Compatible with *BSD: the result is always a valid
+ * NUL-terminated string that fits in the buffer (unless,
+ * of course, the buffer size is zero). It does not pad
+ * out the result like strncpy() does.
+ */
+size_t strlcpy(char *dest, const char *src, size_t size)
+{
+ const char *s = src;
+ size_t n;
+
+ if (dest == NULL) {
+ return 0;
+ }
+
+ /* terminate dest if src is NULL and return 0 as only NULL was added */
+ if (s == NULL) {
+ *dest = '\0';
+ return 0;
+ }
+
+ /* allows us to handle size 0 */
+ if (size == 0) {
+ n = 0;
+ } else {
+ n = size - 1u;
+ }
+
+ /* perform copy */
+ while (*s && n != 0) {
+ *dest++ = *s++;
+ n--;
+ }
+
+ *dest = '\0';
+
+ /* count to end of s or compensate for NULL */
+ if (n == 0) {
+ while (*s++)
+ ;
+ } else {
+ s++;
+ }
+
+ /* return bytes copied not accounting NUL */
+ return (s - src - 1u);
+}
+#endif // endif
+
+/**
+ * strlcat_s - Concatenate a %NUL terminated string with a sized buffer
+ * @dest: Where to concatenate the string to
+ * @src: Where to copy the string from
+ * @size: size of destination buffer
+ * return: string length of created string (i.e. the initial length of dest plus the length of src)
+ * not including the NUL char, up until size
+ *
+ * Unlike strncat(), strlcat() take the full size of the buffer (not just the number of bytes to
+ * copy) and guarantee to NUL-terminate the result (even when there's nothing to concat).
+ * If the length of dest string concatinated with the src string >= size, truncation occurs.
+ *
+ * Compatible with *BSD: the result is always a valid NUL-terminated string that fits in the buffer
+ * (unless, of course, the buffer size is zero).
+ *
+ * If either src or dest is not NUL-terminated, dest[size-1] will be set to NUL.
+ * If size < strlen(dest) + strlen(src), dest[size-1] will be set to NUL.
+ * If size == 0, dest[0] will be set to NUL.
+ */
+size_t
+strlcat_s(char *dest, const char *src, size_t size)
+{
+ char *d = dest;
+ const char *s = src; /* point to the start of the src string */
+ size_t n = size;
+ size_t dlen;
+ size_t bytes_to_copy = 0;
+
+ if (dest == NULL) {
+ return 0;
+ }
+
+ /* set d to point to the end of dest string (up to size) */
+ while (n != 0 && *d != '\0') {
+ d++;
+ n--;
+ }
+ dlen = (size_t)(d - dest);
+
+ if (s != NULL) {
+ size_t slen = 0;
+
+ /* calculate src len in case it's not null-terminated */
+ n = size;
+ while (n-- != 0 && *(s + slen) != '\0') {
+ ++slen;
+ }
+
+ n = size - dlen; /* maximum num of chars to copy */
+ if (n != 0) {
+ /* copy relevant chars (until end of src buf or given size is reached) */
+ bytes_to_copy = MIN(slen - (size_t)(s - src), n - 1);
+ (void)memcpy(d, s, bytes_to_copy);
+ d += bytes_to_copy;
+ }
+ }
+ if (n == 0 && dlen != 0) {
+ --d; /* nothing to copy, but NUL-terminate dest anyway */
+ }
+ *d = '\0'; /* NUL-terminate dest */
+
+ return (dlen + bytes_to_copy);
+}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmutils.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmutils.c
index c9750c1..9021aea 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmutils.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmutils.c
@@ -1,7 +1,9 @@
/*
* Driver O/S-independent utility routines
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -59,6 +61,7 @@
#define ASSERT(exp)
#endif /* WL_UNITTEST */
+#include <bcmstdlib_s.h>
#include <bcmendian.h>
#include <bcmdevs.h>
#include <ethernet.h>
@@ -82,7 +85,7 @@
total = 0;
for (; p; p = PKTNEXT(osh, p)) {
len = PKTLEN(osh, p);
- total += len;
+ total += (uint)len;
#ifdef BCMLFRAG
if (BCMLFRAG_ENAB()) {
if (PKTISFRAG(osh, p)) {
@@ -138,7 +141,7 @@
for (; p && offset; p = PKTNEXT(osh, p)) {
if (offset < (uint)PKTLEN(osh, p))
break;
- offset -= PKTLEN(osh, p);
+ offset -= (uint)PKTLEN(osh, p);
}
if (!p)
@@ -167,7 +170,7 @@
for (; p && offset; p = PKTNEXT(osh, p)) {
if (offset < (uint)PKTLEN(osh, p))
break;
- offset -= PKTLEN(osh, p);
+ offset -= (uint)PKTLEN(osh, p);
}
if (!p)
@@ -199,7 +202,7 @@
for (; p; p = PKTNEXT(osh, p)) {
pdata = (uint8 *) PKTDATA(osh, p);
pkt_off = offset - len;
- len += PKTLEN(osh, p);
+ len += (uint)PKTLEN(osh, p);
if (len > offset)
break;
}
@@ -217,7 +220,7 @@
return NULL;
for (; p; p = PKTNEXT(osh, p)) {
- len += PKTLEN(osh, p);
+ len += (uint)PKTLEN(osh, p);
if (len > offset)
break;
}
@@ -245,7 +248,7 @@
printf("%s:\n", msg);
for (p = p0; p; p = PKTNEXT(osh, p))
- prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
+ prhex(NULL, PKTDATA(osh, p), (uint)PKTLEN(osh, p));
}
#endif // endif
@@ -259,8 +262,8 @@
struct ether_header *eh;
struct ethervlan_header *evh;
uint8 *pktdata;
- int priority = 0;
- int rc = 0;
+ uint priority = 0;
+ uint rc = 0;
pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
@@ -269,18 +272,18 @@
if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
uint16 vlan_tag;
- int vlan_prio, dscp_prio = 0;
+ uint vlan_prio, dscp_prio = 0;
evh = (struct ethervlan_header *)eh;
vlan_tag = ntoh16(evh->vlan_tag);
- vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+ vlan_prio = (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
(evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
- uint8 tos_tc = IP_TOS46(ip_body);
- dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ uint8 tos_tc = (uint8)IP_TOS46(ip_body);
+ dscp_prio = tos_tc >> IPV4_TOS_PREC_SHIFT;
}
/* DSCP priority gets precedence over 802.1P (vlan tag) */
@@ -312,35 +315,44 @@
} else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
(eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
uint8 *ip_body = pktdata + sizeof(struct ether_header);
- uint8 tos_tc = IP_TOS46(ip_body);
+ uint8 tos_tc = (uint8)IP_TOS46(ip_body);
uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
switch (dscp) {
case DSCP_EF:
+ case DSCP_VA:
priority = PRIO_8021D_VO;
break;
case DSCP_AF31:
case DSCP_AF32:
case DSCP_AF33:
+ case DSCP_CS3:
priority = PRIO_8021D_CL;
break;
case DSCP_AF21:
case DSCP_AF22:
case DSCP_AF23:
+ priority = PRIO_8021D_EE;
+ break;
case DSCP_AF11:
case DSCP_AF12:
case DSCP_AF13:
- priority = PRIO_8021D_EE;
+ case DSCP_CS2:
+ priority = PRIO_8021D_BE;
+ break;
+ case DSCP_CS6:
+ case DSCP_CS7:
+ priority = PRIO_8021D_NC;
break;
default:
- priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ priority = tos_tc >> IPV4_TOS_PREC_SHIFT;
break;
}
rc |= PKTPRIO_DSCP;
}
- ASSERT(priority >= 0 && priority <= MAXPRIO);
- PKTSETPRIO(pkt, priority);
+ ASSERT(priority <= MAXPRIO);
+ PKTSETPRIO(pkt, (int)priority);
return (rc | priority);
}
@@ -375,12 +387,12 @@
uint rc = 0;
pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
- pktlen = PKTLEN(OSH_NULL, pkt);
+ pktlen = (uint)PKTLEN(OSH_NULL, pkt);
if (pktgetdscp(pktdata, pktlen, &dscp)) {
rc = PKTPRIO_DSCP;
user_priority = dscp2up(up_table, dscp);
- PKTSETPRIO(pkt, user_priority);
+ PKTSETPRIO(pkt, (int)user_priority);
}
return (rc | user_priority);
@@ -407,7 +419,7 @@
if (eh->ether_type == HTON16(ETHER_TYPE_IP)) {
ip_body = pktdata + sizeof(struct ether_header);
- *dscp = IP_DSCP46(ip_body);
+ *dscp = (uint8)IP_DSCP46(ip_body);
rc = TRUE;
}
else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) {
@@ -417,7 +429,7 @@
if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN &&
evh->ether_type == HTON16(ETHER_TYPE_IP)) {
ip_body = pktdata + sizeof(struct ethervlan_header);
- *dscp = IP_DSCP46(ip_body);
+ *dscp = (uint8)IP_DSCP46(ip_body);
rc = TRUE;
}
}
@@ -462,7 +474,7 @@
uint8 *except_ptr = (uint8 *)qos_map_ie->data;
uint8 except_len = len - QOS_MAP_FIXED_LENGTH;
uint8 *range_ptr = except_ptr + except_len;
- int i;
+ uint8 i;
/* fill in ranges */
for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) {
@@ -502,7 +514,7 @@
BCMRAMFN(bcmerrorstr)(int bcmerror)
{
/* check if someone added a bcmerror code but forgot to add errorstring */
- ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+ ASSERT((uint)ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
if (bcmerror > 0 || bcmerror < BCME_LAST) {
snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror);
@@ -775,7 +787,7 @@
MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
bcm_count_leading_zeros(bitmap));
bitix = (BCM_MWBMAP_BITS_WORD - 1)
- - bcm_count_leading_zeros(bitmap); /* use asm clz */
+ - (uint32)bcm_count_leading_zeros(bitmap); /* use asm clz */
wordix = BCM_MWBMAP_MULOP(wordix) + bitix;
/* Clear bit if wd count is 0, without conditional branch */
@@ -783,14 +795,14 @@
count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1;
#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
mwbmap_p->wd_count[wordix]--;
- count = mwbmap_p->wd_count[wordix];
+ count = (uint32)mwbmap_p->wd_count[wordix];
MWBMAP_ASSERT(count ==
(bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1));
#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
MWBMAP_ASSERT(count >= 0);
/* clear wd_bitmap bit if id_map count is 0 */
- bitmap = (count == 0) << bitix;
+ bitmap = ((uint32)(count == 0)) << BCM_MWBMAP_MODOP(bitix);
MWBMAP_DBG((
"Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
@@ -810,7 +822,7 @@
bcm_count_leading_zeros(bitmap));
bitix = BCM_MWBMAP_MULOP(wordix)
+ (BCM_MWBMAP_BITS_WORD - 1)
- - bcm_count_leading_zeros(bitmap); /* use asm clz */
+ - (uint32)bcm_count_leading_zeros(bitmap); /* use asm clz */
mwbmap_p->ifree--; /* decrement system wide free count */
MWBMAP_ASSERT(mwbmap_p->ifree >= 0);
@@ -869,12 +881,12 @@
count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
mwbmap_p->wd_count[bitix]--;
- count = mwbmap_p->wd_count[bitix];
+ count = (uint32)mwbmap_p->wd_count[bitix];
MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
MWBMAP_ASSERT(count >= 0);
- bitmap = (count == 0) << BCM_MWBMAP_MODOP(bitix);
+ bitmap = (uint32)(count == 0) << BCM_MWBMAP_MODOP(bitix);
MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap,
@@ -958,7 +970,7 @@
ASSERT(mwbmap_p->ifree >= 0);
- return mwbmap_p->ifree;
+ return (uint32)mwbmap_p->ifree;
}
/* Determine whether an index is inuse or free */
@@ -1001,7 +1013,7 @@
#if defined(BCM_MWBMAP_USE_CNTSETBITS)
count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]);
#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
- count = mwbmap_p->wd_count[ix];
+ count = (uint32)mwbmap_p->wd_count[ix];
MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix]));
#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count);
@@ -1031,7 +1043,7 @@
#if defined(BCM_MWBMAP_USE_CNTSETBITS)
count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]);
#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
- count = mwbmap_p->wd_count[idmap_ix];
+ count = (uint32)mwbmap_p->wd_count[idmap_ix];
ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]));
#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
ASSERT(count != 0U);
@@ -1212,7 +1224,9 @@
id16_map_t * id16_map;
ASSERT(id16_map_hndl != NULL);
-
+ if (!id16_map_hndl) {
+ return ID16_INVALID;
+ }
id16_map = (id16_map_t *)id16_map_hndl;
ASSERT(id16_map->total > 0);
@@ -1280,7 +1294,9 @@
id16_map_t * id16_map;
ASSERT(id16_map_hndl != NULL);
-
+ if (!id16_map_hndl) {
+ goto done;
+ }
id16_map = (id16_map_t *)id16_map_hndl;
ASSERT(id16_map->stack_idx >= -1);
@@ -1330,23 +1346,23 @@
void
dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size)
{
- uint32 mem_size;
- mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+ uint32 memsize;
+ memsize = sizeof(dll_pool_t) + (elems_max * elem_size);
if (pool)
- MFREE(osh, pool, mem_size);
+ MFREE(osh, pool, memsize);
}
dll_pool_t *
dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size)
{
- uint32 mem_size, i;
+ uint32 memsize, i;
dll_pool_t * dll_pool_p;
dll_t * elem_p;
ASSERT(elem_size > sizeof(dll_t));
- mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+ memsize = sizeof(dll_pool_t) + (elems_max * elem_size);
- if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, mem_size)) == NULL) {
+ if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, memsize)) == NULL) {
printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n",
elems_max, elem_size);
ASSERT(0);
@@ -1446,7 +1462,7 @@
if ((r == -1) || (r >= (int)b->size)) {
b->size = 0;
} else {
- b->size -= r;
+ b->size -= (uint)r;
b->buf += r;
}
@@ -1514,7 +1530,7 @@
/* Look for vendor-specific IE with specified OUI and optional type */
bcm_tlv_t *
-bcm_find_vendor_ie(const void *tlvs, int tlvs_len, const char *voui, uint8 *type, int type_len)
+bcm_find_vendor_ie(const void *tlvs, uint tlvs_len, const char *voui, uint8 *type, uint type_len)
{
const bcm_tlv_t *ie;
uint8 ie_len;
@@ -1567,7 +1583,7 @@
} else if (bcm_isprint((uchar)c)) {
*p++ = (char)c;
} else {
- p += snprintf(p, (endp - p), "\\x%02X", c);
+ p += snprintf(p, (size_t)(endp - p), "\\x%02X", c);
}
}
*p = '\0';
@@ -1752,7 +1768,7 @@
result = 0;
while (bcm_isxdigit(*cp) &&
- (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) {
+ (value = (uint64)(bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10)) < base) {
result = result*base + value;
/* Detected overflow */
if (result < last_result && !minus) {
@@ -1804,7 +1820,7 @@
len = (int)strlen(haystack) - nlen + 1;
for (i = 0; i < len; i++)
- if (memcmp(needle, &haystack[i], nlen) == 0)
+ if (memcmp(needle, &haystack[i], (size_t)nlen) == 0)
return DISCARD_QUAL(&haystack[i], char);
return (NULL);
}
@@ -1908,7 +1924,7 @@
for (; *str; str++) {
if (map[*str >> 5] & (1 << (*str & 31))) {
if (tokdelim != NULL) {
- *tokdelim = *str;
+ *tokdelim = (char)*str;
}
*str++ = '\0';
@@ -2004,7 +2020,7 @@
char *ep;
for (;;) {
- ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16);
+ ea->octet[i++] = (uint8) bcm_strtoul(p, &ep, 16);
p = ep;
if (!*p++ || i == 6)
break;
@@ -2504,7 +2520,7 @@
*/
if (datalen > 0) {
- memcpy(dst_tlv->data, data, datalen);
+ memcpy(dst_tlv->data, data, (size_t)datalen);
}
/* update the output destination poitner to point past
@@ -2574,7 +2590,7 @@
/* if len + tlv hdr len is more than destlen, don't do anything
* just return the buffer untouched
*/
- if ((int)(datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) {
+ if ((int)(datalen + (int)BCM_TLV_HDR_SIZE) <= dst_maxlen) {
new_dst = bcm_write_tlv(type, data, datalen, dst);
}
@@ -2889,9 +2905,9 @@
* by the TLV parameter's length if it is valid.
*/
bcm_tlv_t *
-bcm_next_tlv(const bcm_tlv_t *elt, int *buflen)
+bcm_next_tlv(const bcm_tlv_t *elt, uint *buflen)
{
- int len;
+ uint len;
/* validate current elt */
if (!bcm_valid_tlv(elt, *buflen)) {
@@ -2913,13 +2929,116 @@
GCC_DIAGNOSTIC_POP();
}
+/**
+ * Advance a const tlv buffer pointer and length up to the given tlv element pointer
+ * 'elt'. The function checks that elt is a valid tlv; the elt pointer and data
+ * are all in the range of the buffer/length.
+ *
+ * @param elt pointer to a valid bcm_tlv_t in the buffer
+ * @param buffer pointer to a tlv buffer
+ * @param buflen length of the buffer in bytes
+ *
+ * On return, if elt is not a tlv in the buffer bounds, the *buffer parameter
+ * will be set to NULL and *buflen parameter will be set to zero. Otherwise,
+ * *buffer will point to elt, and *buflen will have been adjusted by the the
+ * difference between *buffer and elt.
+ */
+void
+bcm_tlv_buffer_advance_to(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen)
+{
+ uint new_buflen;
+ const uint8 *new_buffer;
+
+ new_buffer = (const uint8*)elt;
+
+ /* make sure the input buffer pointer is non-null, that (buffer + buflen) does not wrap,
+ * and that the elt pointer is in the range of [buffer, buffer + buflen]
+ */
+ if ((*buffer != NULL) &&
+ ((uintptr)*buffer < ((uintptr)*buffer + *buflen)) &&
+ (new_buffer >= *buffer) &&
+ (new_buffer < (*buffer + *buflen))) {
+ /* delta between buffer and new_buffer is <= *buflen, so truncating cast to uint
+ * from ptrdiff is ok
+ */
+ uint delta = (uint)(new_buffer - *buffer);
+
+ /* New buffer length is old len minus the delta from the buffer start to elt.
+ * The check just above guarantees that the subtractions does not underflow.
+ */
+ new_buflen = *buflen - delta;
+
+ /* validate current elt */
+ if (bcm_valid_tlv(elt, new_buflen)) {
+ /* All good, so update the input/output parameters */
+ *buffer = new_buffer;
+ *buflen = new_buflen;
+ return;
+ }
+ }
+
+ /* something did not check out, clear out the buffer info */
+ *buffer = NULL;
+ *buflen = 0;
+
+ return;
+}
+
+/**
+ * Advance a const tlv buffer pointer and length past the given tlv element pointer
+ * 'elt'. The function checks that elt is a valid tlv; the elt pointer and data
+ * are all in the range of the buffer/length. The function also checks that the
+ * remaining buffer starts with a valid tlv.
+ *
+ * @param elt pointer to a valid bcm_tlv_t in the buffer
+ * @param buffer pointer to a tlv buffer
+ * @param buflen length of the buffer in bytes
+ *
+ * On return, if elt is not a tlv in the buffer bounds, or the remaining buffer
+ * following the elt does not begin with a tlv in the buffer bounds, the *buffer
+ * parameter will be set to NULL and *buflen parameter will be set to zero.
+ * Otherwise, *buffer will point to the first byte past elt, and *buflen will
+ * have the remaining buffer length.
+ */
+void
+bcm_tlv_buffer_advance_past(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen)
+{
+ /* Start by advancing the buffer up to the given elt */
+ bcm_tlv_buffer_advance_to(elt, buffer, buflen);
+
+ /* if that did not work, bail out */
+ if (*buflen == 0) {
+ return;
+ }
+
+#if defined(__COVERITY__)
+ /* The elt has been verified by bcm_tlv_buffer_advance_to() to be a valid element,
+ * so its elt->len is in the bounds of the buffer. The following check prevents
+ * Coverity from flagging the (elt->data + elt->len) statement below as using a
+ * tainted elt->len to index into array 'elt->data'.
+ */
+ if (elt->len > *buflen) {
+ return;
+ }
+#endif /* __COVERITY__ */
+
+ /* We know we are advanced up to a good tlv.
+ * Now just advance to the following tlv.
+ */
+ elt = (const bcm_tlv_t*)(elt->data + elt->len);
+
+ bcm_tlv_buffer_advance_to(elt, buffer, buflen);
+
+ return;
+}
+
/*
* Traverse a string of 1-byte tag/1-byte length/variable-length value
* triples, returning a pointer to the substring whose first element
* matches tag
*/
bcm_tlv_t *
-bcm_parse_tlvs(const void *buf, int buflen, uint key)
+bcm_parse_tlvs(const void *buf, uint buflen, uint key)
{
const bcm_tlv_t *elt;
int totlen;
@@ -2927,17 +3046,17 @@
if ((elt = (const bcm_tlv_t*)buf) == NULL) {
return NULL;
}
- totlen = buflen;
+ totlen = (int)buflen;
/* find tagged parameter */
while (totlen >= TLV_HDR_LEN) {
- int len = elt->len;
+ uint len = elt->len;
/* validate remaining totlen */
if ((elt->id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- return (bcm_tlv_t *)(elt);
- GCC_DIAGNOSTIC_POP();
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ return (bcm_tlv_t *)(elt);
+ GCC_DIAGNOSTIC_POP();
}
elt = (const bcm_tlv_t*)((const uint8*)elt + (len + TLV_HDR_LEN));
@@ -2999,7 +3118,7 @@
bcm_parse_tlvs_min_bodylen(const void *buf, int buflen, uint key, int min_bodylen)
{
bcm_tlv_t * ret;
- ret = bcm_parse_tlvs(buf, buflen, key);
+ ret = bcm_parse_tlvs(buf, (uint)buflen, key);
if (ret == NULL || ret->len < min_bodylen) {
return NULL;
}
@@ -3061,8 +3180,8 @@
bit = bd->bitfield[i].bit;
if ((flags & mask) == bit) {
if (len > (int)strlen(name)) {
- slen = strlen(name);
- strncpy(buf, name, slen+1);
+ slen = (int)strlen(name);
+ strncpy(buf, name, (size_t)slen+1);
}
break;
}
@@ -3096,7 +3215,7 @@
} else if ((flags & bit) == 0)
continue;
flags &= ~bit;
- nlen = strlen(name);
+ nlen = (int)strlen(name);
slen += nlen;
/* count btwn flag space */
if (flags != 0)
@@ -3105,7 +3224,7 @@
if (len <= slen)
break;
/* copy NULL char but don't count it */
- strncpy(p, name, nlen + 1);
+ strncpy(p, name, (size_t)nlen + 1);
p += nlen;
/* copy btwn flag space and NULL char */
if (flags != 0)
@@ -3142,8 +3261,10 @@
for (i = 0; i < bdsz; i++) {
bit = bd[i].bit;
name = bd[i].name;
+ CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
if (isset(addr, bit)) {
- nlen = strlen(name);
+ CLANG_DIAGNOSTIC_POP();
+ nlen = (int)strlen(name);
slen += nlen;
/* need SPACE - for simplicity */
slen += 1;
@@ -3152,7 +3273,7 @@
more = TRUE;
break;
}
- memcpy(p, name, nlen);
+ memcpy(p, name, (size_t)nlen);
p += nlen;
p[0] = ' ';
p += 1;
@@ -3200,12 +3321,12 @@
p = line;
for (i = 0; i < nbytes; i++) {
if (i % 16 == 0) {
- nchar = snprintf(p, len, " %04x: ", i); /* line prefix */
+ nchar = snprintf(p, (size_t)len, " %04x: ", i); /* line prefix */
p += nchar;
len -= nchar;
}
if (len > 0) {
- nchar = snprintf(p, len, "%02x ", buf[i]);
+ nchar = snprintf(p, (size_t)len, "%02x ", buf[i]);
p += nchar;
len -= nchar;
}
@@ -3269,7 +3390,18 @@
const char *fmt;
fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ /*
+ * The following call to snprintf generates a compiler warning
+ * due to -Wformat-nonliteral. However, the format string is coming
+ * from internal callers rather than external data input, and is a
+ * useful debugging tool serving a variety of diagnostics. Rather
+ * than expand code size by replicating multiple functions with different
+ * argument lists, or disabling the warning globally, let's consider
+ * if we can just disable the warning for this one instance.
+ */
+ CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT()
snprintf(buf, len, fmt, chipid);
+ CLANG_DIAGNOSTIC_POP()
return buf;
}
@@ -3327,14 +3459,26 @@
while (bufsize > 1) {
if (cur_ptr->nameandfmt == NULL)
break;
+
+ /*
+ * The following call to snprintf generates a compiler warning
+ * due to -Wformat-nonliteral. However, the format string is coming
+ * from internal callers rather than external data input, and is a
+ * useful debugging tool serving a variety of diagnostics. Rather
+ * than expand code size by replicating multiple functions with different
+ * argument lists, or disabling the warning globally, let's consider
+ * if we can just disable the warning for this one instance.
+ */
+ CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT()
len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
- read_rtn(arg0, arg1, cur_ptr->offset));
+ read_rtn(arg0, arg1, cur_ptr->offset));
+ CLANG_DIAGNOSTIC_POP()
/* check for snprintf overflow or error */
if (len < 0 || (uint32)len >= bufsize)
- len = bufsize - 1;
+ len = (int)(bufsize - 1);
buf += len;
- bufsize -= len;
- filled_len += len;
+ bufsize -= (uint32)len;
+ filled_len += (uint32)len;
cur_ptr++;
}
return filled_len;
@@ -3412,7 +3556,7 @@
/* return the mW value scaled down to the correct factor of 10,
* adding in factor/2 to get proper rounding.
*/
- return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+ return (uint16)((nqdBm_to_mW_map[idx] + factor/2) / factor);
}
uint8
@@ -3598,13 +3742,13 @@
bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum)
{
while (len > 1) {
- sum += (buf[0] << 8) | buf[1];
+ sum += (uint32)((buf[0] << 8) | buf[1]);
buf += 2;
len -= 2;
}
if (len > 0) {
- sum += (*buf) << 8;
+ sum += (uint32)((*buf) << 8);
}
while (sum >> 16) {
@@ -3710,7 +3854,7 @@
ptr += OFFSETOF(struct ipv4_hdr, hdr_chksum) + 2;
/* return calculated chksum */
- return ip_cksum(sum, ptr, ip_len - OFFSETOF(struct ipv4_hdr, src_ip));
+ return ip_cksum(sum, ptr, (uint32)((uint)ip_len - OFFSETOF(struct ipv4_hdr, src_ip)));
}
/* calculate TCP header checksum using partial sum */
@@ -3753,6 +3897,8 @@
ASSERT(tcp != NULL);
ASSERT(tcp_len >= TCP_MIN_HEADER_LEN);
+ if (!ip || !tcp || !(tcp_len >= TCP_MIN_HEADER_LEN))
+ return 0;
/* pseudo header cksum */
memset(&tcp_ps, 0, sizeof(tcp_ps));
memcpy(&tcp_ps.dst_ip, ip_hdr->dst_ip, IPV4_ADDR_LEN);
@@ -3789,6 +3935,8 @@
ASSERT(tcp != NULL);
ASSERT(tcp_len >= TCP_MIN_HEADER_LEN);
+ if (!ipv6 || !tcp || !(tcp_len >= TCP_MIN_HEADER_LEN))
+ return 0;
/* pseudo header cksum */
memset((char *)&ipv6_pseudo, 0, sizeof(ipv6_pseudo));
memcpy((char *)ipv6_pseudo.saddr, (char *)ipv6_hdr->saddr.addr,
@@ -3860,7 +4008,7 @@
/* all bits are in the same byte */
if (fbyte == lbyte) {
- mask = ((1 << nbits) - 1) << fbit;
+ mask = (uint8)(((1 << nbits) - 1) << fbit);
addr[fbyte] &= ~mask;
addr[fbyte] |= (uint8)(val << fbit);
return;
@@ -3868,7 +4016,7 @@
/* first partial byte */
if (fbit > 0) {
- mask = (0xff << fbit);
+ mask = (uint8)(0xff << fbit);
addr[fbyte] &= ~mask;
addr[fbyte] |= (uint8)(val << fbit);
val >>= (8 - fbit);
@@ -3878,7 +4026,7 @@
/* last partial byte */
if (rbits > 0) {
- mask = (1 << rbits) - 1;
+ mask = (uint8)((1 << rbits) - 1);
addr[lbyte] &= ~mask;
addr[lbyte] |= (uint8)(val >> (nbits - rbits));
lbyte --; /* last full byte */
@@ -3913,7 +4061,7 @@
/* all bits are in the same byte */
if (fbyte == lbyte) {
- mask = ((1 << nbits) - 1) << fbit;
+ mask = (uint8)(((1 << nbits) - 1) << fbit);
val = (addr[fbyte] & mask) >> fbit;
return val;
}
@@ -3921,21 +4069,21 @@
/* first partial byte */
if (fbit > 0) {
bits = 8 - fbit;
- mask = (0xff << fbit);
+ mask = (uint8)(0xFFu << fbit);
val |= (addr[fbyte] & mask) >> fbit;
fbyte ++; /* first full byte */
}
/* last partial byte */
if (rbits > 0) {
- mask = (1 << rbits) - 1;
- val |= (addr[lbyte] & mask) << (nbits - rbits);
+ mask = (uint8)((1 << rbits) - 1);
+ val |= (uint32)((addr[lbyte] & mask) << (nbits - rbits));
lbyte --; /* last full byte */
}
/* remaining full byte(s) */
for (byte = fbyte; byte <= lbyte; byte ++) {
- val |= (addr[byte] << (((byte - fbyte) << 3) + bits));
+ val |= (uint32)((addr[byte] << (((byte - fbyte) << 3) + bits)));
}
return val;
@@ -4156,3 +4304,48 @@
}
return ret;
}
+
+/* Adds/replaces NVRAM variable with given value
+ * varbuf[in,out] - Buffer with NVRAM variables (sequence of zero-terminated 'name=value' records,
+ * terminated with additional zero)
+ * buflen[in] - Length of buffer (may, even should, have some unused space)
+ * variable[in] - Variable to add/replace in 'name=value' form
+ * datalen[out,opt] - Optional output parameter - resulting length of data in buffer
+ * Returns TRUE on success, FALSE if buffer too short or variable specified incorrectly
+ */
+bool
+replace_nvram_variable(char *varbuf, unsigned int buflen, const char *variable,
+ unsigned int *datalen)
+{
+ char *p;
+ int variable_heading_len, record_len, variable_record_len = (int)strlen(variable) + 1;
+ char *buf_end = varbuf + buflen;
+ p = strchr(variable, '=');
+ if (!p) {
+ return FALSE;
+ }
+ /* Length of given variable name, followed by '=' */
+ variable_heading_len = (int)((const char *)(p + 1) - variable);
+ /* Scanning NVRAM, record by record up to trailing 0 */
+ for (p = varbuf; *p; p += strlen(p) + 1) {
+ /* If given variable found - remove it */
+ if (!strncmp(p, variable, (size_t)variable_heading_len)) {
+ record_len = (int)strlen(p) + 1;
+ memmove_s(p, buf_end - p, p + record_len,
+ (size_t)(buf_end - (p + record_len)));
+ }
+ }
+ /* If buffer does not have space for given variable - return FALSE */
+ if ((p + variable_record_len + 1) > buf_end) {
+ return FALSE;
+ }
+ /* Copy given variable to end of buffer */
+ memmove_s(p, buf_end - p, variable, (size_t)variable_record_len);
+ /* Adding trailing 0 */
+ p[variable_record_len] = 0;
+ /* Setting optional output parameter - length of data in buffer */
+ if (datalen) {
+ *datalen = (unsigned int)(p + variable_record_len + 1 - varbuf);
+ }
+ return TRUE;
+}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_channels.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_channels.c
index f0e36bf..2afa029 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_channels.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_channels.c
@@ -3,7 +3,9 @@
* Contents are wifi-specific, used by any kernel or app-level
* software that might want wifi things as it grows.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -256,7 +258,7 @@
return -1;
}
- return sb;
+ return (int)sb;
}
/* return primary20 channel given center channel and side band */
@@ -275,7 +277,7 @@
uint i;
for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) {
if (ch == wf_5g_80m_chans[i])
- return i;
+ return (int)i;
}
return -1;
@@ -379,10 +381,11 @@
}
/* given a chanspec string, convert to a chanspec.
+ * if bandwidth not specified in chanspec input string, then use default_bw as bandwidth.
* On error return 0
*/
chanspec_t
-wf_chspec_aton(const char *a)
+wf_chspec_aton_ex(const char *a, const uint default_bw)
{
chanspec_t chspec;
uint chspec_ch, chspec_band, bw, chspec_bw, chspec_sb;
@@ -399,7 +402,7 @@
if (!read_uint(&a, &num))
return 0;
/* if we are looking at a 'g', then the first number was a band */
- c = tolower((int)a[0]);
+ c = tolower(a[0]);
if (c == 'g') {
a++; /* consume the char */
@@ -415,7 +418,7 @@
if (!read_uint(&a, &pri_ch))
return 0;
- c = tolower((int)a[0]);
+ c = tolower(a[0]);
}
else {
/* first number is channel, use default for band */
@@ -425,8 +428,9 @@
}
if (c == '\0') {
- /* default BW of 20MHz */
- chspec_bw = WL_CHANSPEC_BW_20;
+ /* bandwidth not specified in chanspec input string, so use default_bw bandwidth */
+ chspec_bw = default_bw;
+ bw = wf_bw_chspec_to_mhz(default_bw);
goto done_read;
}
@@ -469,7 +473,7 @@
* or '+80' if bw = 80, to make '80+80' bw.
*/
- c = tolower((int)a[0]);
+ c = (char)tolower((int)a[0]);
/* if we have a 2g/40 channel, we should have a l/u spec now */
if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) {
@@ -573,7 +577,7 @@
sb = channel_to_sb(center_ch[i], pri_ch, bw);
if (sb >= 0) {
chspec_ch = center_ch[i];
- chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+ chspec_sb = (uint)(sb << WL_CHANSPEC_CTL_SB_SHIFT);
break;
}
}
@@ -609,10 +613,10 @@
return 0;
}
- chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+ chspec_sb = (uint)(sb << WL_CHANSPEC_CTL_SB_SHIFT);
}
- chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb);
+ chspec = (chanspec_t)(chspec_ch | chspec_band | chspec_bw | chspec_sb);
if (wf_chspec_malformed(chspec))
return 0;
@@ -620,6 +624,15 @@
return chspec;
}
+/* given a chanspec string, convert to a chanspec.
+ * On error return 0
+ */
+chanspec_t
+wf_chspec_aton(const char *a)
+{
+ return wf_chspec_aton_ex(a, WL_CHANSPEC_BW_20);
+}
+
/*
* Verify the chanspec is using a legal set of parameters, i.e. that the
* chanspec specified a band, bw, pri_sb and channel and that the
@@ -797,6 +810,35 @@
}
/**
+ * Create a 20MHz chanspec for the given band.
+ *
+ * This function returns a 20MHz chanspec in the given band.
+ *
+ * @param channel 20MHz channel number
+ * @param band a chanspec band (e.g. WL_CHANSPEC_BAND_2G)
+ *
+ * @return Returns a 20MHz chanspec, or IVNCHANSPEC in case of error.
+ */
+chanspec_t
+wf_create_20MHz_chspec(uint channel, chanspec_band_t band)
+{
+ chanspec_t chspec;
+
+ if (channel <= WL_CHANSPEC_CHAN_MASK &&
+ (band == WL_CHANSPEC_BAND_2G ||
+ band == WL_CHANSPEC_BAND_5G)) {
+ chspec = band | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE | channel;
+ if (!wf_chspec_valid(chspec)) {
+ chspec = INVCHANSPEC;
+ }
+ } else {
+ chspec = INVCHANSPEC;
+ }
+
+ return chspec;
+}
+
+/**
* Return the primary 20MHz channel.
*
* This function returns the channel number of the primary 20MHz channel. For
@@ -956,7 +998,7 @@
}
/* Create primary 40MHz chanspec */
- chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 |
+ chspec40 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 |
sb | center_chan);
}
@@ -1006,7 +1048,7 @@
if ((freq < base) || (freq > base + 1000))
return -1;
- offset = freq - base;
+ offset = (int)(freq - base);
ch = offset / 5;
/* check that frequency is a 5MHz multiple from the base */
@@ -1047,7 +1089,7 @@
else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
freq = 2484;
else
- freq = ch * 5 + start_factor / 2;
+ freq = (int)(ch * 5 + start_factor / 2);
return freq;
}
@@ -1140,7 +1182,7 @@
seg1 = chan0_id;
}
- chanspec = ((seg0 << WL_CHANSPEC_CHAN1_SHIFT) |
+ chanspec = (uint16)((seg0 << WL_CHANSPEC_CHAN1_SHIFT) |
(seg1 << WL_CHANSPEC_CHAN2_SHIFT) |
(sb << WL_CHANSPEC_CTL_SB_SHIFT) |
WL_CHANSPEC_BW_8080 |
@@ -1223,7 +1265,7 @@
center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec));
/* Create primary 80MHz chanspec */
- chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+ chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
}
else if (CHSPEC_IS160(chspec)) {
center_chan = CHSPEC_CHANNEL(chspec);
@@ -1240,7 +1282,7 @@
}
/* Create primary 80MHz chanspec */
- chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+ chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
}
else {
chspec80 = INVCHANSPEC;
@@ -1265,7 +1307,7 @@
center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chspec));
/* Create secondary 80MHz chanspec */
- chspec80 = (WL_CHANSPEC_BAND_5G |
+ chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G |
WL_CHANSPEC_BW_80 |
WL_CHANSPEC_CTL_SB_LL |
center_chan);
@@ -1283,7 +1325,7 @@
}
/* Create secondary 80MHz chanspec */
- chspec80 = (WL_CHANSPEC_BAND_5G |
+ chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G |
WL_CHANSPEC_BW_80 |
WL_CHANSPEC_CTL_SB_LL |
center_chan);
@@ -1316,7 +1358,7 @@
else {
/* for 20, 40, and 80 Mhz */
ch[0] = CHSPEC_CHANNEL(chspec);
- ch[1] = -1;
+ ch[1] = 0xFFu;
}
return;
@@ -1414,11 +1456,11 @@
uint8 pri_ch = (pext)[0] = wf_chspec_primary20_chan(t);
if (CHSPEC_IS20(chspec)) return; /* nothing more to do since 20MHz chspec */
/* 20MHz EXT */
- (pext)[1] = pri_ch + (IS_CTL_IN_L20(t) ? CH_20MHZ_APART : -CH_20MHZ_APART);
+ (pext)[1] = pri_ch + (uint8)(IS_CTL_IN_L20(t) ? CH_20MHZ_APART : -CH_20MHZ_APART);
if (CHSPEC_IS40(chspec)) return; /* nothing more to do since 40MHz chspec */
/* center 40MHz EXT */
- t = wf_channel2chspec(pri_ch + (IS_CTL_IN_L40(chspec) ?
- CH_40MHZ_APART : -CH_40MHZ_APART), WL_CHANSPEC_BW_40);
+ t = wf_channel2chspec((uint)(pri_ch + (IS_CTL_IN_L40(chspec) ?
+ CH_40MHZ_APART : -CH_40MHZ_APART)), WL_CHANSPEC_BW_40);
GET_ALL_SB(t, &((pext)[2])); /* get the 20MHz side bands in 40MHz EXT */
if (CHSPEC_IS80(chspec)) return; /* nothing more to do since 80MHz chspec */
t = CH80MHZ_CHSPEC(wf_chspec_secondary80_channel(chspec), WL_CHANSPEC_CTL_SB_LLL);
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_channels.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_channels.h
index e29cbfe..9e1bff0 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_channels.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_channels.h
@@ -3,7 +3,9 @@
* This header file housing the define and function prototype use by
* both the wl driver, tools & Apps.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -34,6 +36,9 @@
/* A chanspec holds the channel number, band, bandwidth and primary 20MHz sideband */
typedef uint16 chanspec_t;
+typedef uint16 chanspec_band_t;
+typedef uint16 chanspec_bw_t;
+typedef uint16 chanspec_subband_t;
/* channel defines */
#define CH_80MHZ_APART 16
@@ -41,7 +46,11 @@
#define CH_20MHZ_APART 4
#define CH_10MHZ_APART 2
#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
-#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
+
+#define CH_MIN_2G_CHANNEL 1u /* Min channel in 2G band */
+#define CH_MAX_2G_CHANNEL 14u /* Max channel in 2G band */
+#define CH_MIN_2G_40M_CHANNEL 3u /* Min 40MHz center channel in 2G band */
+#define CH_MAX_2G_40M_CHANNEL 11u /* Max 40MHz center channel in 2G band */
/* maximum # channels the s/w supports */
#define MAXCHANNEL 224 /* max # supported channels. The max channel no is above,
@@ -94,24 +103,24 @@
#define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU
#define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL
-#define WL_CHANSPEC_BW_MASK 0x3800
-#define WL_CHANSPEC_BW_SHIFT 11
-#define WL_CHANSPEC_BW_5 0x0000
-#define WL_CHANSPEC_BW_10 0x0800
-#define WL_CHANSPEC_BW_20 0x1000
-#define WL_CHANSPEC_BW_40 0x1800
-#define WL_CHANSPEC_BW_80 0x2000
-#define WL_CHANSPEC_BW_160 0x2800
-#define WL_CHANSPEC_BW_8080 0x3000
+#define WL_CHANSPEC_BW_MASK 0x3800u
+#define WL_CHANSPEC_BW_SHIFT 11u
+#define WL_CHANSPEC_BW_5 0x0000u
+#define WL_CHANSPEC_BW_10 0x0800u
+#define WL_CHANSPEC_BW_20 0x1000u
+#define WL_CHANSPEC_BW_40 0x1800u
+#define WL_CHANSPEC_BW_80 0x2000u
+#define WL_CHANSPEC_BW_160 0x2800u
+#define WL_CHANSPEC_BW_8080 0x3000u
-#define WL_CHANSPEC_BAND_MASK 0xc000
-#define WL_CHANSPEC_BAND_SHIFT 14
-#define WL_CHANSPEC_BAND_2G 0x0000
-#define WL_CHANSPEC_BAND_3G 0x4000
-#define WL_CHANSPEC_BAND_4G 0x8000
-#define WL_CHANSPEC_BAND_5G 0xc000
-#define INVCHANSPEC 255
-#define MAX_CHANSPEC 0xFFFF
+#define WL_CHANSPEC_BAND_MASK 0xc000u
+#define WL_CHANSPEC_BAND_SHIFT 14u
+#define WL_CHANSPEC_BAND_2G 0x0000u
+#define WL_CHANSPEC_BAND_3G 0x4000u
+#define WL_CHANSPEC_BAND_4G 0x8000u
+#define WL_CHANSPEC_BAND_5G 0xc000u
+#define INVCHANSPEC 255u
+#define MAX_CHANSPEC 0xFFFFu
#define WL_CHANNEL_BAND(ch) (((ch) <= CH_MAX_2G_CHANNEL) ? \
WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)
@@ -546,6 +555,16 @@
extern chanspec_t wf_chspec_aton(const char *a);
/**
+ * Convert ascii string to chanspec
+ *
+ * @param a pointer to chanspec input string
+ * @param default_bw default bandwidth to use if not specified in chanspec input string
+ *
+ * @return >= 0 if successful or 0 otherwise
+ */
+extern chanspec_t wf_chspec_aton_ex(const char *a, const uint default_bw);
+
+/**
* Verify the chanspec fields are valid.
*
* Verify the chanspec is using a legal set field values, i.e. that the chanspec
@@ -596,6 +615,11 @@
extern const char *wf_chspec_to_bw_str(chanspec_t chspec);
/**
+ * Create a 20MHz chanspec for the given band.
+ */
+chanspec_t wf_create_20MHz_chspec(uint channel, chanspec_band_t band);
+
+/**
* Return the primary 20MHz chanspec.
*
* This function returns the chanspec of the primary 20MHz channel. For 20MHz
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_rates.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_rates.h
index 9afaf2d..8603d61 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_rates.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_rates.h
@@ -1,7 +1,9 @@
/*
* Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_rspec.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_rspec.h
index 40da854..76ac971 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_rspec.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmwifi_rspec.h
@@ -1,7 +1,9 @@
/*
* Common OS-independent driver header for rate management.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmxtlv.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmxtlv.c
index 59d5943..509fb2f 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmxtlv.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/bcmxtlv.c
@@ -1,7 +1,9 @@
/*
* Driver O/S-independent utility routines
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -95,12 +97,17 @@
int len;
lenp = (const uint8 *)&elt->len; /* nominal */
- if (opts & BCM_XTLV_OPTION_IDU8) --lenp;
+ if (opts & BCM_XTLV_OPTION_IDU8) {
+ --lenp;
+ }
- if (opts & BCM_XTLV_OPTION_LENU8)
+ if (opts & BCM_XTLV_OPTION_LENU8) {
len = *lenp;
- else
+ } else if (opts & BCM_XTLV_OPTION_LENBE) {
+ len = (uint32)hton16(elt->len);
+ } else {
len = ltoh16_ua(lenp);
+ }
return len;
}
@@ -109,10 +116,13 @@
bcm_xtlv_id(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts)
{
int id = 0;
- if (opts & BCM_XTLV_OPTION_IDU8)
+ if (opts & BCM_XTLV_OPTION_IDU8) {
id = *(const uint8 *)elt;
- else
+ } else if (opts & BCM_XTLV_OPTION_IDBE) {
+ id = (uint32)hton16(elt->id);
+ } else {
id = ltoh16_ua((const uint8 *)elt);
+ }
return id;
}
@@ -217,7 +227,9 @@
*lenp = (uint8)len;
data_buf = lenp + sizeof(uint8);
} else {
- ASSERT(!"Unexpected xtlv option");
+ bool Unexpected_xtlv_option = TRUE;
+ BCM_REFERENCE(Unexpected_xtlv_option);
+ ASSERT(!Unexpected_xtlv_option);
return;
}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd.h
index 9f19022..c1be625 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd.h
@@ -4,7 +4,9 @@
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -37,6 +39,8 @@
#ifndef _dhd_h_
#define _dhd_h_
+#undef CONFIG_PM_WAKELOCKS
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -50,9 +54,9 @@
#include <linux/proc_fs.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
#include <linux/wakelock.h>
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
#include <linux/sched/types.h>
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
@@ -68,7 +72,13 @@
#define ALL_INTERFACES 0xff
+/* H2D and D2H ring dump is enabled by default */
+#ifdef PCIE_FULL_DONGLE
+#define DHD_DUMP_PCIE_RINGS
+#endif /* PCIE_FULL_DONGLE */
+
#include <wlioctl.h>
+#include <bcmstdlib_s.h>
#include <dhdioctl.h>
#include <wlfc_proto.h>
#include <hnd_armtrap.h>
@@ -89,21 +99,27 @@
#include <pom.h>
#endif /* DHD_ERPOM */
+#include <dngl_stats.h>
+
#ifdef DEBUG_DPC_THREAD_WATCHDOG
#define MAX_RESCHED_CNT 600
#endif /* DEBUG_DPC_THREAD_WATCHDOG */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) && LINUX_VERSION_CODE < \
- KERNEL_VERSION(3, 18, 0) || defined(CONFIG_BCMDHD_VENDOR_EXT))
-#define WL_VENDOR_EXT_SUPPORT
-#endif /* 3.18 > KERNEL_VER >= 3.14 || defined(CONFIG_BCMDHD_VENDOR_EXT) */
-
#if defined(KEEP_ALIVE)
/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
#define KEEP_ALIVE_PERIOD 55000
#define NULL_PKT_STR "null_pkt"
#endif /* KEEP_ALIVE */
+/* By default enabled from here, later the WQ code will be removed */
+#define DHD_USE_KTHREAD_FOR_LOGTRACE
+
+/*
+ * Earlier DHD used to have it own time stamp for printk and
+ * Dongle used to have its own time stamp for console messages
+ * With this flag, DHD and Dongle console messges will have same time zone
+ */
+#define DHD_H2D_LOG_TIME_SYNC
/* Forward decls */
struct dhd_bus;
struct dhd_prot;
@@ -111,6 +127,9 @@
struct dhd_ioctl;
struct dhd_dbg;
struct dhd_ts;
+#ifdef DNGL_AXI_ERROR_LOGGING
+struct dhd_axi_error_dump;
+#endif /* DNGL_AXI_ERROR_LOGGING */
/* The level of bus communication with the dongle */
enum dhd_bus_state {
@@ -153,6 +172,7 @@
#define DHD_BUS_BUSY_IN_MEMDUMP 0x1000
#define DHD_BUS_BUSY_IN_SSSRDUMP 0x2000
#define DHD_BUS_BUSY_IN_LOGDUMP 0x4000
+#define DHD_BUS_BUSY_IN_HALDUMP 0x8000
#define DHD_BUS_BUSY_SET_IN_TX(dhdp) \
(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX
@@ -179,11 +199,13 @@
#define DHD_BUS_BUSY_SET_IN_CHECKDIED(dhdp) \
(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_CHECKDIED
#define DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_MEMDUMP
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_MEMDUMP
#define DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SSSRDUMP
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SSSRDUMP
#define DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_LOGDUMP
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_LOGDUMP
+#define DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_HALDUMP
#define DHD_BUS_BUSY_CLEAR_IN_TX(dhdp) \
(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX
@@ -215,6 +237,8 @@
(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SSSRDUMP
#define DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp) \
(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_LOGDUMP
+#define DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_HALDUMP
#define DHD_BUS_BUSY_CHECK_IN_TX(dhdp) \
((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX)
@@ -248,6 +272,8 @@
((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SSSRDUMP)
#define DHD_BUS_BUSY_CHECK_IN_LOGDUMP(dhdp) \
((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_LOGDUMP)
+#define DHD_BUS_BUSY_CHECK_IN_HALDUMP(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP)
#define DHD_BUS_BUSY_CHECK_IDLE(dhdp) \
((dhdp)->dhd_bus_busy_state == 0)
@@ -258,18 +284,43 @@
(DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) || \
DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp))
+#define DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp) \
+ ((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(dhdp))
+
#define DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp) \
((dhdp)->busstate == DHD_BUS_DOWN || (dhdp)->busstate == DHD_BUS_DOWN_IN_PROGRESS)
-#ifndef USEC_PER_SEC
-#define USEC_PER_SEC (1000 * 1000)
-#endif // endif
+/* IOVar flags for common error checks */
+#define DHD_IOVF_PWRREQ_BYPASS (1<<0) /* flags to prevent bp access during host sleep state */
+
+#define MAX_MTU_SZ (1600u)
+
+/* (u64)result = (u64)dividend / (u64)divisor */
+#define DIV_U64_BY_U64(dividend, divisor) div64_u64(dividend, divisor)
+
+/* (u64)result = (u64)dividend / (u32)divisor */
+#define DIV_U64_BY_U32(dividend, divisor) div_u64(dividend, divisor)
+
+/* Be careful while using this, as it divides dividend also
+ * (u32)remainder = (u64)dividend % (u32)divisor
+ * (u64)dividend = (u64)dividend / (u32)divisor
+ */
+#define DIV_AND_MOD_U64_BY_U32(dividend, divisor) do_div(dividend, divisor)
+
+/* (u32)remainder = (u64)dividend % (u32)divisor */
+#define MOD_U64_BY_U32(dividend, divisor) ({ \
+ uint64 temp_dividend = (dividend); \
+ uint32 rem = DIV_AND_MOD_U64_BY_U32(temp_dividend, (divisor)); \
+ rem; \
+})
#define SEC_USEC_FMT \
- "%llu.%6llu"
+ "%5llu.%06u"
+/* t: time in nano second */
#define GET_SEC_USEC(t) \
- (t) / USEC_PER_SEC, (t) % USEC_PER_SEC
+ DIV_U64_BY_U32(t, NSEC_PER_SEC), \
+ ((uint32)(MOD_U64_BY_U32(t, NSEC_PER_SEC) / (uint32)NSEC_PER_USEC))
/* Download Types */
typedef enum download_type {
@@ -281,7 +332,7 @@
/* For supporting multiple interfaces */
#define DHD_MAX_IFS 16
-#define DHD_MAX_STATIC_IFS 1
+#define DHD_MAX_STATIC_IFS 5
#define DHD_DEL_IF -0xE
#define DHD_BAD_IF -0xF
#define DHD_DUMMY_INFO_IF 0xDEAF /* Hack i/f to handle events from INFO Ring */
@@ -323,9 +374,13 @@
#define DHD_SCAN_ASSOC_ACTIVE_TIME 40 /* ms: Embedded default Active setting from DHD */
#define DHD_SCAN_UNASSOC_ACTIVE_TIME 80 /* ms: Embedded def. Unassoc Active setting from DHD */
-#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */
#define DHD_SCAN_HOME_TIME 45 /* ms: Embedded default Home time setting from DHD */
#define DHD_SCAN_HOME_AWAY_TIME 100 /* ms: Embedded default Home Away time setting from DHD */
+#ifndef CUSTOM_SCAN_PASSIVE_TIME
+#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */
+#else
+#define DHD_SCAN_PASSIVE_TIME CUSTOM_SCAN_PASSIVE_TIME /* ms: Custom Passive setting from DHD */
+#endif /* CUSTOM_SCAN_PASSIVE_TIME */
#ifndef POWERUP_MAX_RETRY
#define POWERUP_MAX_RETRY 3 /* how many times we retry to power up the chip */
@@ -344,7 +399,7 @@
#ifdef DHD_DEBUG
#define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */
#define DHD_SCAN_DEF_TIMEOUT 10000 /* ms: Max time out for scan in progress */
-#endif // endif
+#endif /* DHD_DEBUG */
#ifndef CONFIG_BCMDHD_CLM_PATH
#ifdef OEM_ANDROID
@@ -356,108 +411,113 @@
#define WL_CCODE_NULL_COUNTRY "#n"
#define FW_VER_STR_LEN 128
+#define FWID_STR_LEN 256
#define CLM_VER_STR_LEN 128
#define BUS_API_REV_STR_LEN 128
+#define FW_VER_STR "Version"
+#define FWID_STR_1 "FWID: 01-"
+#define FWID_STR_2 "FWID=01-"
extern char bus_api_revision[];
enum dhd_bus_wake_state {
- WAKE_LOCK_OFF,
- WAKE_LOCK_PRIV,
- WAKE_LOCK_DPC,
- WAKE_LOCK_IOCTL,
- WAKE_LOCK_DOWNLOAD,
- WAKE_LOCK_TMOUT,
- WAKE_LOCK_WATCHDOG,
- WAKE_LOCK_LINK_DOWN_TMOUT,
- WAKE_LOCK_PNO_FIND_TMOUT,
- WAKE_LOCK_SOFTAP_SET,
- WAKE_LOCK_SOFTAP_STOP,
- WAKE_LOCK_SOFTAP_START,
- WAKE_LOCK_SOFTAP_THREAD
+ WAKE_LOCK_OFF = 0,
+ WAKE_LOCK_PRIV = 1,
+ WAKE_LOCK_DPC = 2,
+ WAKE_LOCK_IOCTL = 3,
+ WAKE_LOCK_DOWNLOAD = 4,
+ WAKE_LOCK_TMOUT = 5,
+ WAKE_LOCK_WATCHDOG = 6,
+ WAKE_LOCK_LINK_DOWN_TMOUT = 7,
+ WAKE_LOCK_PNO_FIND_TMOUT = 8,
+ WAKE_LOCK_SOFTAP_SET = 9,
+ WAKE_LOCK_SOFTAP_STOP = 10,
+ WAKE_LOCK_SOFTAP_START = 11,
+ WAKE_LOCK_SOFTAP_THREAD = 12
};
enum dhd_prealloc_index {
- DHD_PREALLOC_PROT = 0,
- DHD_PREALLOC_RXBUF,
- DHD_PREALLOC_DATABUF,
- DHD_PREALLOC_OSL_BUF,
-#if defined(STATIC_WL_PRIV_STRUCT)
- DHD_PREALLOC_WIPHY_ESCAN0 = 5,
-#ifdef DUAL_ESCAN_RESULT_BUFFER
- DHD_PREALLOC_WIPHY_ESCAN1,
-#endif /* DUAL_ESCAN_RESULT_BUFFER */
-#endif /* STATIC_WL_PRIV_STRUCT */
- DHD_PREALLOC_DHD_INFO = 7,
- DHD_PREALLOC_DHD_WLFC_INFO = 8,
- DHD_PREALLOC_IF_FLOW_LKUP = 9,
+ DHD_PREALLOC_PROT = 0,
+ DHD_PREALLOC_RXBUF = 1,
+ DHD_PREALLOC_DATABUF = 2,
+ DHD_PREALLOC_OSL_BUF = 3,
+ /* 4 */
+ DHD_PREALLOC_WIPHY_ESCAN0 = 5,
+ DHD_PREALLOC_WIPHY_ESCAN1 = 6,
+ DHD_PREALLOC_DHD_INFO = 7,
+ DHD_PREALLOC_DHD_WLFC_INFO = 8,
+ DHD_PREALLOC_IF_FLOW_LKUP = 9,
/* 10 */
- DHD_PREALLOC_MEMDUMP_RAM = 11,
- DHD_PREALLOC_DHD_WLFC_HANGER = 12,
- DHD_PREALLOC_PKTID_MAP = 13,
- DHD_PREALLOC_PKTID_MAP_IOCTL = 14,
- DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15,
- DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,
- DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17
+ DHD_PREALLOC_MEMDUMP_RAM = 11,
+ DHD_PREALLOC_DHD_WLFC_HANGER = 12,
+ DHD_PREALLOC_PKTID_MAP = 13,
+ DHD_PREALLOC_PKTID_MAP_IOCTL = 14,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,
+ DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17
};
enum dhd_dongledump_mode {
- DUMP_DISABLED = 0,
- DUMP_MEMONLY,
- DUMP_MEMFILE,
- DUMP_MEMFILE_BUGON,
- DUMP_MEMFILE_MAX
+ DUMP_DISABLED = 0,
+ DUMP_MEMONLY = 1,
+ DUMP_MEMFILE = 2,
+ DUMP_MEMFILE_BUGON = 3,
+ DUMP_MEMFILE_MAX = 4
};
enum dhd_dongledump_type {
- DUMP_TYPE_RESUMED_ON_TIMEOUT = 1,
- DUMP_TYPE_D3_ACK_TIMEOUT,
- DUMP_TYPE_DONGLE_TRAP,
- DUMP_TYPE_MEMORY_CORRUPTION,
- DUMP_TYPE_PKTID_AUDIT_FAILURE,
- DUMP_TYPE_PKTID_INVALID,
- DUMP_TYPE_SCAN_TIMEOUT,
- DUMP_TYPE_SCAN_BUSY,
- DUMP_TYPE_BY_SYSDUMP,
- DUMP_TYPE_BY_LIVELOCK,
- DUMP_TYPE_AP_LINKUP_FAILURE,
- DUMP_TYPE_AP_ABNORMAL_ACCESS,
- DUMP_TYPE_CFG_VENDOR_TRIGGERED,
- DUMP_TYPE_RESUMED_ON_TIMEOUT_TX,
- DUMP_TYPE_RESUMED_ON_TIMEOUT_RX,
- DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR,
- DUMP_TYPE_TRANS_ID_MISMATCH,
- DUMP_TYPE_IFACE_OP_FAILURE,
-#ifdef DEBUG_DNGL_INIT_FAIL
- DUMP_TYPE_DONGLE_INIT_FAILURE,
-#endif /* DEBUG_DNGL_INIT_FAIL */
-#ifdef SUPPORT_LINKDOWN_RECOVERY
- DUMP_TYPE_READ_SHM_FAIL,
-#endif /* SUPPORT_LINKDOWN_RECOVERY */
- DUMP_TYPE_DONGLE_HOST_EVENT,
- DUMP_TYPE_SMMU_FAULT,
- DUMP_TYPE_RESUMED_UNKNOWN,
-#ifdef DHD_ERPOM
- DUMP_TYPE_DUE_TO_BT,
-#endif /* DHD_ERPOM */
- DUMP_TYPE_BY_USER
+ DUMP_TYPE_RESUMED_ON_TIMEOUT = 1,
+ DUMP_TYPE_D3_ACK_TIMEOUT = 2,
+ DUMP_TYPE_DONGLE_TRAP = 3,
+ DUMP_TYPE_MEMORY_CORRUPTION = 4,
+ DUMP_TYPE_PKTID_AUDIT_FAILURE = 5,
+ DUMP_TYPE_PKTID_INVALID = 6,
+ DUMP_TYPE_SCAN_TIMEOUT = 7,
+ DUMP_TYPE_SCAN_BUSY = 8,
+ DUMP_TYPE_BY_SYSDUMP = 9,
+ DUMP_TYPE_BY_LIVELOCK = 10,
+ DUMP_TYPE_AP_LINKUP_FAILURE = 11,
+ DUMP_TYPE_AP_ABNORMAL_ACCESS = 12,
+ DUMP_TYPE_CFG_VENDOR_TRIGGERED = 13,
+ DUMP_TYPE_RESUMED_ON_TIMEOUT_TX = 14,
+ DUMP_TYPE_RESUMED_ON_TIMEOUT_RX = 15,
+ DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR = 16,
+ DUMP_TYPE_TRANS_ID_MISMATCH = 17,
+ DUMP_TYPE_IFACE_OP_FAILURE = 18,
+ DUMP_TYPE_DONGLE_INIT_FAILURE = 19,
+ DUMP_TYPE_READ_SHM_FAIL = 20,
+ DUMP_TYPE_DONGLE_HOST_EVENT = 21,
+ DUMP_TYPE_SMMU_FAULT = 22,
+ DUMP_TYPE_RESUMED_UNKNOWN = 23,
+ DUMP_TYPE_DUE_TO_BT = 24,
+ DUMP_TYPE_LOGSET_BEYOND_RANGE = 25,
+ DUMP_TYPE_BY_USER = 26,
+ DUMP_TYPE_CTO_RECOVERY = 27,
+ DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR = 28,
+ DUMP_TYPE_PROXD_TIMEOUT = 29,
+ DUMP_TYPE_PKTID_POOL_DEPLETED = 30
};
enum dhd_hang_reason {
- HANG_REASON_MASK = 0x8000,
- HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001,
- HANG_REASON_DONGLE_TRAP = 0x8002,
- HANG_REASON_D3_ACK_TIMEOUT = 0x8003,
- HANG_REASON_BUS_DOWN = 0x8004,
- HANG_REASON_MSGBUF_LIVELOCK = 0x8006,
- HANG_REASON_IFACE_DEL_FAILURE = 0x8007,
- HANG_REASON_HT_AVAIL_ERROR = 0x8008,
- HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009,
- HANG_REASON_PCIE_PKTID_ERROR = 0x800A,
- HANG_REASON_IFACE_ADD_FAILURE = 0x800B,
- HANG_REASON_PCIE_LINK_DOWN = 0x8805,
- HANG_REASON_INVALID_EVENT_OR_DATA = 0x8806,
- HANG_REASON_UNKNOWN = 0x8807,
- HANG_REASON_MAX = 0x8808
+ HANG_REASON_MASK = 0x8000,
+ HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001,
+ HANG_REASON_DONGLE_TRAP = 0x8002,
+ HANG_REASON_D3_ACK_TIMEOUT = 0x8003,
+ HANG_REASON_BUS_DOWN = 0x8004,
+ HANG_REASON_MSGBUF_LIVELOCK = 0x8006,
+ HANG_REASON_IFACE_DEL_FAILURE = 0x8007,
+ HANG_REASON_HT_AVAIL_ERROR = 0x8008,
+ HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009,
+ HANG_REASON_PCIE_PKTID_ERROR = 0x800A,
+ HANG_REASON_IFACE_ADD_FAILURE = 0x800B,
+ HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR = 0x800C,
+ HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR = 0x800D,
+ HANG_REASON_SEQUENTIAL_PRIVCMD_ERROR = 0x800E,
+ HANG_REASON_PCIE_LINK_DOWN_RC_DETECT = 0x8805,
+ HANG_REASON_INVALID_EVENT_OR_DATA = 0x8806,
+ HANG_REASON_UNKNOWN = 0x8807,
+ HANG_REASON_PCIE_LINK_DOWN_EP_DETECT = 0x8808,
+ HANG_REASON_PCIE_CTO_DETECT = 0x8809,
+ HANG_REASON_MAX = 0x880A
};
#define WLC_E_DEAUTH_MAX_REASON 0x0FFF
@@ -474,7 +534,13 @@
};
#define VENDOR_SEND_HANG_EXT_INFO_LEN (800 + 1)
+
+#ifdef DHD_EWPR_VER2
+#define VENDOR_SEND_HANG_EXT_INFO_VER 20181111
+#else
#define VENDOR_SEND_HANG_EXT_INFO_VER 20170905
+#endif // endif
+
#define HANG_INFO_TRAP_T_NAME_MAX 6
#define HANG_INFO_TRAP_T_REASON_IDX 0
#define HANG_INFO_TRAP_T_SUBTYPE_IDX 2
@@ -493,10 +559,19 @@
#define HANG_KEY_DEL ' '
#define HANG_RAW_DEL '_'
+#ifdef DHD_EWPR_VER2
+#define HANG_INFO_BIGDATA_EXTRA_KEY 4
+#define HANG_INFO_TRAP_T_EXTRA_KEY_IDX 5
+#endif // endif
+
/* Packet alignment for most efficient SDIO (can change based on platform) */
#ifndef DHD_SDALIGN
-#define DHD_SDALIGN 32
-#endif // endif
+#if defined(BCMSPI)
+#define DHD_SDALIGN 4
+#else
+#define DHD_SDALIGN 32
+#endif /* BCMSPI */
+#endif /* !DHD_SDALIGN */
#define DHD_TX_CONTEXT_MASK 0xff
#define DHD_TX_START_XMIT 0x01
@@ -635,13 +710,43 @@
#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
#ifdef DHD_LOG_DUMP
+#define DUMP_SSSR_ATTR_START 2
+#define DUMP_SSSR_ATTR_COUNT 6
+
+typedef enum {
+ SSSR_C0_D11_BEFORE = 0,
+ SSSR_C0_D11_AFTER = 1,
+ SSSR_C1_D11_BEFORE = 2,
+ SSSR_C1_D11_AFTER = 3,
+ SSSR_DIG_BEFORE = 4,
+ SSSR_DIG_AFTER = 5
+} EWP_SSSR_DUMP;
+
+typedef enum {
+ DLD_BUF_TYPE_GENERAL = 0,
+ DLD_BUF_TYPE_PRESERVE = 1,
+ DLD_BUF_TYPE_SPECIAL = 2,
+ DLD_BUF_TYPE_ECNTRS = 3,
+ DLD_BUF_TYPE_FILTER = 4,
+ DLD_BUF_TYPE_ALL = 5
+} log_dump_type_t;
+
#define LOG_DUMP_MAGIC 0xDEB3DEB3
#define HEALTH_CHK_BUF_SIZE 256
-#ifdef DEBUGABILITY_ECNTRS_LOGGING
+#ifdef EWP_ECNTRS_LOGGING
#define ECNTR_RING_ID 0xECDB
#define ECNTR_RING_NAME "ewp_ecntr_ring"
-#endif /* DEBUGABILITY_ECNTRS_LOGGING */
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+#define RTT_RING_ID 0xADCD
+#define RTT_RING_NAME "ewp_rtt_ring"
+#endif /* EWP_ECNTRS_LOGGING */
+
+#if defined(DEBUGABILITY) && defined(EWP_ECNTRS_LOGGING)
+#error "Duplicate rings will be created since both the features are enabled"
+#endif /* DEBUGABILITY && EWP_ECNTRS_LOGGING */
typedef enum {
LOG_DUMP_SECTION_GENERAL = 0,
@@ -651,7 +756,10 @@
LOG_DUMP_SECTION_EXT_TRAP,
LOG_DUMP_SECTION_HEALTH_CHK,
LOG_DUMP_SECTION_PRESERVE,
- LOG_DUMP_SECTION_COOKIE
+ LOG_DUMP_SECTION_COOKIE,
+ LOG_DUMP_SECTION_FLOWRING,
+ LOG_DUMP_SECTION_STATUS,
+ LOG_DUMP_SECTION_RTT
} log_dump_section_type_t;
/* Each section in the debug_dump log file shall begin with a header */
@@ -682,8 +790,6 @@
extern void dhd_log_dump_write(int type, char *binary_data,
int binary_len, const char *fmt, ...);
-extern char *dhd_log_dump_get_timestamp(void);
-bool dhd_log_dump_ecntr_enabled(void);
#endif /* DHD_LOG_DUMP */
/* DEBUG_DUMP SUB COMMAND */
@@ -700,17 +806,29 @@
#define DHD_DUMP_SUBSTR_UNWANTED "_unwanted"
#define DHD_DUMP_SUBSTR_DISCONNECTED "_disconnected"
+#ifdef DNGL_AXI_ERROR_LOGGING
+#define DHD_DUMP_AXI_ERROR_FILENAME "axi_error"
+#define DHD_DUMP_HAL_FILENAME_SUFFIX "_hal"
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
extern void get_debug_dump_time(char *str);
extern void clear_debug_dump_time(char *str);
#if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING)
extern void copy_debug_dump_time(char *dest, char *src);
#endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */
-#define WL_MAX_PRESERVE_BUFFER 8
#define FW_LOGSET_MASK_ALL 0xFF
+#ifdef WL_MONITOR
+#define MONPKT_EXTRA_LEN 48u
+#endif /* WL_MONITOR */
+
+#define DHDIF_FWDER(dhdif) FALSE
+
#if defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
#define DHD_COMMON_DUMP_PATH "/data/misc/wifi/"
+#elif defined(OEM_ANDROID) && (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
+#define DHD_COMMON_DUMP_PATH "/data/vendor/wifi/"
#elif defined(OEM_ANDROID) /* For Brix Live Image */
#define DHD_COMMON_DUMP_PATH "/installmedia/"
#else /* Default */
@@ -745,12 +863,76 @@
/* see wlfc_proto.h for tx status details */
#define DHD_MAX_TX_STATUS_MSGS 9u
+#ifdef TX_STATUS_LATENCY_STATS
typedef struct dhd_if_tx_status_latency {
/* total number of tx_status received on this interface */
uint64 num_tx_status;
/* cumulative tx_status latency for this interface */
uint64 cum_tx_status_latency;
} dhd_if_tx_status_latency_t;
+#endif /* TX_STATUS_LATENCY_STATS */
+
+#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
+/* Timestamps to trace dhd_logtrace_thread() */
+struct dhd_logtrace_thr_ts {
+ uint64 entry_time;
+ uint64 sem_down_time;
+ uint64 flush_time;
+ uint64 unexpected_break_time;
+ uint64 complete_time;
+};
+#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
+
+/* Enable Reserve STA flowrings only for Android */
+#if defined(OEM_ANDROID)
+#define DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+#endif /* OEM_ANDROID */
+
+typedef enum dhd_induce_error_states
+{
+ DHD_INDUCE_ERROR_CLEAR = 0x0,
+ DHD_INDUCE_IOCTL_TIMEOUT = 0x1,
+ DHD_INDUCE_D3_ACK_TIMEOUT = 0x2,
+ DHD_INDUCE_LIVELOCK = 0x3,
+ DHD_INDUCE_DROP_OOB_IRQ = 0x4,
+ DHD_INDUCE_DROP_AXI_SIG = 0x5,
+ DHD_INDUCE_ERROR_MAX = 0x6
+} dhd_induce_error_states_t;
+
+#ifdef DHD_HP2P
+#define MAX_TX_HIST_BIN 16
+#define MAX_RX_HIST_BIN 10
+#define MAX_HP2P_FLOWS 16
+#define HP2P_PRIO 7
+#define HP2P_PKT_THRESH 48
+#define HP2P_TIME_THRESH 200
+#define HP2P_PKT_EXPIRY 40
+#define HP2P_TIME_SCALE 32
+
+typedef struct hp2p_info {
+ void *dhd_pub;
+ uint16 flowid;
+ bool hrtimer_init;
+ void *ring;
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21))
+ struct tasklet_hrtimer timer;
+#else
+ struct hrtimer timer;
+#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
+ uint64 num_pkt_limit;
+ uint64 num_timer_limit;
+ uint64 num_timer_start;
+ uint64 tx_t0[MAX_TX_HIST_BIN];
+ uint64 tx_t1[MAX_TX_HIST_BIN];
+ uint64 rx_t0[MAX_RX_HIST_BIN];
+} hp2p_info_t;
+#endif /* DHD_HP2P */
+
+typedef enum {
+ FW_UNLOADED = 0,
+ FW_DOWNLOAD_IN_PROGRESS = 1,
+ FW_DOWNLOAD_DONE = 2
+} fw_download_status_t;
/**
* Common structure for module and instance linkage.
@@ -763,6 +945,9 @@
struct dhd_prot *prot; /* Protocol module handle */
struct dhd_info *info; /* Info module handle */
struct dhd_dbg *dbg; /* Debugability module handle */
+#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
+ struct dhd_logtrace_thr_ts logtrace_thr_ts;
+#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
/* to NDIS developer, the structure dhd_common is redundant,
* please do NOT merge it back from other branches !!!
@@ -781,7 +966,11 @@
uint maxctl; /* Max size rxctl request from proto to bus */
uint rxsz; /* Rx buffer size bus module should use */
uint8 wme_dp; /* wme discard priority */
-
+#ifdef DNGL_AXI_ERROR_LOGGING
+ uint32 axierror_logbuf_addr;
+ bool axi_error;
+ struct dhd_axi_error_dump *axi_err_dump;
+#endif /* DNGL_AXI_ERROR_LOGGING */
/* Dongle media info */
bool iswl; /* Dongle-resident driver is wl */
ulong drv_version; /* Version of dongle-resident driver */
@@ -808,7 +997,7 @@
ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */
ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */
ulong fc_packets; /* Number of flow control pkts recvd */
-
+ ulong tx_big_packets; /* Dropped data packets that are larger than MAX_MTU_SZ */
#ifdef DMAMAP_STATS
/* DMA Mapping statistics */
dma_stats_t dma_stats;
@@ -860,10 +1049,10 @@
*/
/* #define WL_ENABLE_P2P_IF 1 */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
- struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */
- struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID) */
+#if defined(OEM_ANDROID)
+ struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */
+ struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */
+#endif /* defined(OEM_ANDROID) */
#ifdef PROP_TXSTATUS
bool wlfc_enabled;
@@ -912,15 +1101,32 @@
#endif // endif
bool dongle_isolation;
bool is_pcie_watchdog_reset;
+
+/* Begin - Variables to track Bus Errors */
bool dongle_trap_occured; /* flag for sending HANG event to upper layer */
bool iovar_timeout_occured; /* flag to indicate iovar resumed on timeout */
+ bool is_sched_error; /* flag to indicate timeout due to scheduling issue */
#ifdef PCIE_FULL_DONGLE
bool d3ack_timeout_occured; /* flag to indicate d3ack resumed on timeout */
+ bool livelock_occured; /* flag to indicate livelock occured */
+ bool pktid_audit_failed; /* flag to indicate pktid audit failure */
#endif /* PCIE_FULL_DONGLE */
+ bool iface_op_failed; /* flag to indicate interface operation failed */
+ bool scan_timeout_occurred; /* flag to indicate scan has timedout */
+ bool scan_busy_occurred; /* flag to indicate scan busy occurred */
#ifdef BT_OVER_SDIO
bool is_bt_recovery_required;
#endif // endif
+ bool smmu_fault_occurred; /* flag to indicate SMMU Fault */
+ /*
+ * Add any new variables to track Bus errors above
+ * this line. Also ensure that the variable is
+ * cleared from dhd_clear_bus_errors
+ */
+/* End - Variables to track Bus Errors */
+
int hang_was_sent;
+ int hang_was_pending;
int rxcnt_timeout; /* counter rxcnt timeout to send HANG */
int txcnt_timeout; /* counter txcnt timeout to send HANG */
#ifdef BCMPCIE
@@ -975,6 +1181,8 @@
void *if_flow_lkup; /* per interface flowid lkup hash table */
void *flowid_lock; /* per os lock for flowid info protection */
void *flowring_list_lock; /* per os lock for flowring list protection */
+ uint8 max_multi_client_flow_rings;
+ uint8 multi_client_flow_rings;
uint32 num_flow_rings;
cumm_ctr_t cumm_ctr; /* cumm queue length placeholder */
cumm_ctr_t l2cumm_ctr; /* level 2 cumm queue length placeholder */
@@ -986,6 +1194,9 @@
bool dma_h2d_ring_upd_support;
bool dma_ring_upd_overwrite; /* host overwrites support setting */
+ bool hwa_enable;
+ uint hwa_inited;
+
bool idma_enable;
uint idma_inited;
@@ -1002,6 +1213,7 @@
#endif /* DHD_L2_FILTER */
#ifdef DHD_SSSR_DUMP
bool sssr_inited;
+ bool sssr_dump_collected; /* Flag to indicate sssr dump is collected */
sssr_reg_info_v1_t sssr_reg_info;
uint8 *sssr_mempool;
uint *sssr_d11_before[MAX_NUM_D11CORES];
@@ -1009,10 +1221,16 @@
bool sssr_d11_outofreset[MAX_NUM_D11CORES];
uint *sssr_dig_buf_before;
uint *sssr_dig_buf_after;
+ uint32 sssr_dump_mode;
+ bool collect_sssr; /* Flag to indicate SSSR dump is required */
#endif /* DHD_SSSR_DUMP */
uint8 *soc_ram;
uint32 soc_ram_length;
uint32 memdump_type;
+#ifdef DHD_RND_DEBUG
+ uint8 *rnd_buf;
+ uint32 rnd_len;
+#endif /* DHD_RND_DEBUG */
#ifdef DHD_FW_COREDUMP
uint32 memdump_enabled;
#ifdef DHD_DEBUG_UART
@@ -1044,7 +1262,7 @@
bool h2d_phase_supported;
bool force_dongletrap_on_bad_h2d_phase;
uint32 dongle_trap_data;
- bool fw_download_done;
+ fw_download_status_t fw_download_status;
trap_t last_trap_info; /* trap info from the last trap */
uint8 rand_mac_oui[DOT11_OUI_LEN];
#ifdef DHD_LOSSLESS_ROAMING
@@ -1076,9 +1294,12 @@
uint64 last_file_posn;
int logdump_periodic_flush;
/* ecounter debug ring */
-#ifdef DEBUGABILITY_ECNTRS_LOGGING
+#ifdef EWP_ECNTRS_LOGGING
void *ecntr_dbg_ring;
#endif // endif
+#ifdef EWP_RTT_LOGGING
+ void *rtt_dbg_ring;
+#endif // endif
#ifdef DNGL_EVENT_SUPPORT
uint8 health_chk_event_data[HEALTH_CHK_BUF_SIZE];
#endif // endif
@@ -1109,12 +1330,6 @@
uint pcie_txs_metadata_enable;
uint wbtext_policy; /* wbtext policy of dongle */
bool wbtext_support; /* for product policy only */
-#ifdef SHOW_LOGTRACE
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
- struct mutex dhd_trace_lock;
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
-#endif /* SHOW_LOGTRACE */
-
bool max_dtim_enable; /* use MAX bcn_li_dtim value in suspend mode */
tput_test_t tput_data;
uint64 tput_start_ts;
@@ -1141,15 +1356,76 @@
struct _dhd_dump_file_manage *dump_file_manage;
#endif /* DHD_DUMP_MNGR */
int debug_dump_subcmd;
+ uint64 debug_dump_time_sec;
+ bool hscb_enable;
wait_queue_head_t tx_completion_wait;
uint32 batch_tx_pkts_cmpl;
uint32 batch_tx_num_pkts;
#ifdef DHD_ERPOM
+ bool enable_erpom;
pom_func_handler_t pom_wlan_handler;
+ int (*pom_func_register)(pom_func_handler_t *func);
+ int (*pom_func_deregister)(pom_func_handler_t *func);
+ int (*pom_toggle_reg_on)(uchar func_id, uchar reason);
#endif /* DHD_ERPOM */
+#ifdef EWP_EDL
+ bool dongle_edl_support;
+ dhd_dma_buf_t edl_ring_mem;
+#endif /* EWP_EDL */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
struct mutex ndev_op_sync;
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+
+ bool debug_buf_dest_support;
+ uint32 debug_buf_dest_stat[DEBUG_BUF_DEST_MAX];
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+#define DHD_H2D_LOG_TIME_STAMP_MATCH (10000) /* 10 Seconds */
+ /*
+ * Interval for updating the dongle console message time stamp with the Host (DHD)
+ * time stamp
+ */
+ uint32 dhd_rte_time_sync_ms;
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+ int wlc_ver_major;
+ int wlc_ver_minor;
+#ifdef DHD_STATUS_LOGGING
+ void *statlog;
+#endif /* DHD_STATUS_LOGGING */
+#ifdef DHD_HP2P
+ bool hp2p_enable;
+ bool hp2p_infra_enable;
+ bool hp2p_capable;
+ bool hp2p_ts_capable;
+ uint16 pkt_thresh;
+ uint16 time_thresh;
+ uint16 pkt_expiry;
+ hp2p_info_t hp2p_info[MAX_HP2P_FLOWS];
+ bool hp2p_ring_active;
+#endif /* D2H_HP2P */
+#ifdef DHD_DB0TS
+ bool db0ts_capable;
+#endif /* DHD_DB0TS */
+ bool event_log_max_sets_queried;
+ uint32 event_log_max_sets;
+ uint16 dhd_induce_error;
+#ifdef CONFIG_SILENT_ROAM
+ bool sroam_turn_on; /* Silent roam monitor enable flags */
+ bool sroamed; /* Silent roam monitor check flags */
+#endif /* CONFIG_SILENT_ROAM */
+ bool extdtxs_in_txcpl;
+ bool hostrdy_after_init;
+#ifdef SUPPORT_SET_TID
+ uint8 tid_mode;
+ uint32 target_uid;
+ uint8 target_tid;
+#endif /* SUPPORT_SET_TID */
+#ifdef DHD_PKTDUMP_ROAM
+ void *pktcnts;
+#endif /* DHD_PKTDUMP_ROAM */
+ bool disable_dtim_in_suspend; /* Disable set bcn_li_dtim in suspend */
+#ifdef REVERSE_AIFSN
+ bool aifsn_reverse;
+#endif /* REVERSE_AIFSN */
} dhd_pub_t;
typedef struct {
@@ -1178,13 +1454,17 @@
/* Packet Tag for PCIE Full Dongle DHD */
typedef struct dhd_pkttag_fd {
uint16 flowid; /* Flowring Id */
- uint16 dataoff; /* start of packet */
+ uint16 ifid;
+#ifndef DHD_PCIE_PKTID
uint16 dma_len; /* pkt len for DMA_MAP/UNMAP */
dmaaddr_t pa; /* physical address */
void *dmah; /* dma mapper handle */
void *secdma; /* secure dma sec_cma_info handle */
+#endif /* !DHD_PCIE_PKTID */
+#if defined(TX_STATUS_LATENCY_STATS)
uint64 q_time_us; /* time when tx pkt queued to flowring */
- uint16 flags;
+#endif // endif
+ uint16 flags;
} dhd_pkttag_fd_t;
/* Packet Tag for DHD PCIE Full Dongle */
@@ -1214,9 +1494,11 @@
#define DHD_PKT_SET_SECDMA(pkt, pkt_secdma) \
DHD_PKTTAG_FD(pkt)->secdma = (void *)(pkt_secdma)
+#if defined(TX_STATUS_LATENCY_STATS)
#define DHD_PKT_GET_QTIME(pkt) ((DHD_PKTTAG_FD(pkt))->q_time_us)
#define DHD_PKT_SET_QTIME(pkt, pkt_q_time_us) \
DHD_PKTTAG_FD(pkt)->q_time_us = (uint64)(pkt_q_time_us)
+#endif // endif
#define DHD_PKT_GET_FLAGS(pkt) ((DHD_PKTTAG_FD(pkt))->flags)
#define DHD_PKT_SET_FLAGS(pkt, pkt_flags) \
@@ -1241,7 +1523,7 @@
WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context)
#endif /* (BCMWDF) */
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+ #if defined(CONFIG_PM_SLEEP)
#define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
#define _DHD_PM_RESUME_WAIT(a, b) do {\
@@ -1289,7 +1571,7 @@
} \
} while (0)
- #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+ #endif /* CONFIG_PM_SLEEP */
#ifndef OSL_SLEEP
#define OSL_SLEEP(ms) OSL_DELAY(ms*1000)
@@ -1331,23 +1613,23 @@
inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
mutex_init(&dhdp->wl_softap_lock);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#endif /* OEM_ANDROID */
}
inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
mutex_lock(&dhdp->wl_softap_lock);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#endif /* OEM_ANDROID */
}
inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
mutex_unlock(&dhdp->wl_softap_lock);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#endif /* OEM_ANDROID */
}
#ifdef DHD_DEBUG_WAKE_LOCK
@@ -1521,7 +1803,7 @@
#define OOB_WAKE_LOCK_TIMEOUT 500
extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val);
extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub);
-extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus);
+
#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val)
#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub)
#endif /* BCMPCIE_OOB_HOST_WAKE */
@@ -1548,8 +1830,7 @@
void dhd_net_if_unlock(struct net_device *dev);
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID) && \
- defined(BCMSDIO)
+#if defined(OEM_ANDROID) && defined(BCMSDIO)
extern struct mutex _dhd_sdio_mutex_lock_;
#endif // endif
#endif /* MULTIPLE_SUPPLICANT */
@@ -1621,12 +1902,16 @@
* bus_hdrlen specifies required headroom for bus module header.
*/
extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen);
+extern int dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock);
#if defined(WLP2P) && defined(WL_CFG80211)
/* To allow attach/detach calls corresponding to p2p0 interface */
extern int dhd_attach_p2p(dhd_pub_t *);
extern int dhd_detach_p2p(dhd_pub_t *);
#endif /* WLP2P && WL_CFG80211 */
extern int dhd_register_if(dhd_pub_t *dhdp, int idx, bool need_rtnl_lock);
+#ifdef WL_VIF_SUPPORT
+extern int dhd_register_vif(dhd_pub_t *dhdp);
+#endif /* WL_VIF_SUPPORT */
/* Indication from bus module regarding removal/absence of dongle */
extern void dhd_detach(dhd_pub_t *dhdp);
@@ -1656,6 +1941,10 @@
/* Notify tx completion */
extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+extern void dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx);
+extern void dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
extern void dhd_bus_wakeup_work(dhd_pub_t *dhdp);
@@ -1687,6 +1976,10 @@
#define WIFI_FEATURE_CONTROL_ROAMING 0x800000 /* Enable/Disable firmware roaming */
#define WIFI_FEATURE_FILTER_IE 0x1000000 /* Probe req ie filter */
#define WIFI_FEATURE_SCAN_RAND 0x2000000 /* Support MAC & Prb SN randomization */
+#define WIFI_FEATURE_SET_TX_POWER_LIMIT 0x4000000 /* Support Tx Power Limit setting */
+#define WIFI_FEATURE_USE_BODY_HEAD_SAR 0x8000000 /* Body/Head Proximity for SAR */
+#define WIFI_FEATURE_SET_LATENCY_MODE 0x40000000 /* Support Latency mode setting */
+#define WIFI_FEATURE_P2P_RAND_MAC 0x80000000 /* Support P2P MAC randomization */
#define WIFI_FEATURE_INVALID 0xFFFFFFFF /* Invalid Feature */
#define MAX_FEATURE_SET_CONCURRRENT_GROUPS 3
@@ -1740,11 +2033,22 @@
{ printf("%s is NOT implemented for SDIO", __FUNCTION__); return; }
#endif // endif
#ifdef SHOW_LOGTRACE
+/* Bound and delay are fine tuned after several experiments and these
+ * are the best case values to handle bombarding of console logs.
+ */
+#define DHD_EVENT_LOGTRACE_BOUND 10
+/* since FW has event log rate health check (EVENT_LOG_RATE_HC) we can reduce
+ * the reschedule delay to 10ms
+*/
+#define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 10u
extern int dhd_os_read_file(void *file, char *buf, uint32 size);
extern int dhd_os_seek_file(void *file, int64 offset);
+void dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg);
#endif /* SHOW_LOGTRACE */
int dhd_os_write_file_posn(void *fp, unsigned long *posn,
void *buf, unsigned long buflen);
+int dhd_msix_message_set(dhd_pub_t *dhdp, uint table_entry,
+ uint message_number, bool unmask);
extern void
dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr);
@@ -1807,7 +2111,13 @@
void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size);
#endif /* DHD_FW_COREDUMP */
-void dhd_schedule_sssr_dump(dhd_pub_t *dhdp);
+void dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode);
+#ifdef DNGL_AXI_ERROR_LOGGING
+void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type);
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#ifdef BCMPCIE
+void dhd_schedule_cto_recovery(dhd_pub_t *dhdp);
+#endif /* BCMPCIE */
#ifdef PKT_FILTER_SUPPORT
#define DHD_UNICAST_FILTER_NUM 0
@@ -1835,6 +2145,7 @@
#define MAX_PKTFLT_FIXED_PATTERN_SIZE 32
#define MAX_PKTFLT_FIXED_BUF_SIZE \
(WL_PKT_FILTER_FIXED_LEN + MAX_PKTFLT_FIXED_PATTERN_SIZE * 2)
+#define MAXPKT_ARG 16
#endif /* PKT_FILTER_SUPPORT */
#if defined(OEM_ANDROID) && defined(BCMPCIE)
@@ -1871,7 +2182,7 @@
#ifdef SHOW_LOGTRACE
typedef struct {
- int num_fmts;
+ uint num_fmts;
char **fmts;
char *raw_fmts;
char *raw_sstr;
@@ -1889,30 +2200,6 @@
} dhd_event_log_t;
#endif /* SHOW_LOGTRACE */
-#if defined(DHD_NON_DMA_M2M_CORRUPTION)
-#define PCIE_DMAXFER_LPBK_LENGTH 4096
-typedef struct dhd_pcie_dmaxfer_lpbk {
- union {
- uint32 length;
- uint32 status;
- } u;
- uint32 srcdelay;
- uint32 destdelay;
- uint32 lpbkmode;
- uint32 wait;
- uint32 core;
-} dhd_pcie_dmaxfer_lpbk_t;
-#endif /* DHD_NON_DMA_M2M_CORRUPTION */
-enum d11_lpbk_type {
- M2M_DMA_LPBK = 0,
- D11_LPBK = 1,
- BMC_LPBK = 2,
- M2M_NON_DMA_LPBK = 3,
- D11_HOST_MEM_LPBK = 4,
- BMC_HOST_MEM_LPBK = 5,
- MAX_LPBK = 6
-};
-
#ifdef KEEP_ALIVE
extern int dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
uint16 ip_pkt_len, uint8* src_mac_addr, uint8* dst_mac_addr, uint32 period_msec);
@@ -1997,6 +2284,9 @@
#ifdef LOG_INTO_TCPDUMP
extern void dhd_sendup_log(dhd_pub_t *dhdp, void *data, int len);
#endif /* LOG_INTO_TCPDUMP */
+#ifdef SHOW_LOGTRACE
+void dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg);
+#endif // endif
extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
extern uint dhd_bus_status(dhd_pub_t *dhdp);
extern int dhd_bus_start(dhd_pub_t *dhdp);
@@ -2142,6 +2432,14 @@
/* Roaming mode control */
extern uint dhd_radio_up;
+/* TCM verification control */
+extern uint dhd_tcm_test_enable;
+
+/* Disable BUG_ON(1) */
+#ifdef DHD_FW_COREDUMP
+extern uint disable_bug_on;
+#endif /* DHD_FW_COREDUMP */
+
/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
extern int dhd_idletime;
#ifdef DHD_USE_IDLECOUNT
@@ -2326,14 +2624,42 @@
#define VENDOR_PATH ""
#endif /* ANDROID_PLATFORM_VERSION */
-#ifdef DHD_LEGACY_FILE_PATH
+#if defined(ANDROID_PLATFORM_VERSION)
+#if (ANDROID_PLATFORM_VERSION < 9)
+#ifdef WL_STATIC_IF
+#undef WL_STATIC_IF
+#endif /* WL_STATIC_IF */
+#ifdef WL_STATIC_IFNAME_PREFIX
+#undef WL_STATIC_IFNAME_PREFIX
+#endif /* WL_STATIC_IFNAME_PREFIX */
+#endif /* ANDROID_PLATFORM_VERSION < 9 */
+#endif /* ANDROID_PLATFORM_VERSION */
+
+#if defined(DHD_LEGACY_FILE_PATH)
#define PLATFORM_PATH "/data/"
#elif defined(PLATFORM_SLP)
#define PLATFORM_PATH "/opt/etc/"
#else
-#define PLATFORM_PATH "/data/misc/conn/"
+#if defined(ANDROID_PLATFORM_VERSION)
+#if (ANDROID_PLATFORM_VERSION >= 9)
+#define PLATFORM_PATH "/data/vendor/conn/"
+#define DHD_MAC_ADDR_EXPORT
+#define DHD_ADPS_BAM_EXPORT
+#define DHD_EXPORT_CNTL_FILE
+#define DHD_SOFTAP_DUAL_IF_INFO
+#define DHD_SEND_HANG_PRIVCMD_ERRORS
+#else
+#define PLATFORM_PATH "/data/misc/conn/"
+#endif /* ANDROID_PLATFORM_VERSION >= 9 */
+#else
+#define PLATFORM_PATH "/data/misc/conn/"
+#endif /* ANDROID_PLATFORM_VERSION */
#endif /* DHD_LEGACY_FILE_PATH */
+#ifdef DHD_MAC_ADDR_EXPORT
+extern struct ether_addr sysfs_mac_addr;
+#endif /* DHD_MAC_ADDR_EXPORT */
+
/* Flag to indicate if we should download firmware on driver load */
extern uint dhd_download_fw_on_driverload;
extern int allow_delay_fwdl;
@@ -2342,6 +2668,7 @@
extern int dhd_write_file(const char *filepath, char *buf, int buf_len);
extern int dhd_read_file(const char *filepath, char *buf, int buf_len);
extern int dhd_write_file_and_check(const char *filepath, char *buf, int buf_len);
+extern int dhd_file_delete(char *path);
#ifdef READ_MACADDR
extern int dhd_set_macaddr_from_file(dhd_pub_t *dhdp);
@@ -2354,6 +2681,10 @@
static INLINE int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
#endif /* WRITE_MACADDR */
#ifdef USE_CID_CHECK
+#define MAX_VNAME_LEN 64
+#ifdef DHD_EXPORT_CNTL_FILE
+extern char cidinfostr[MAX_VNAME_LEN];
+#endif /* DHD_EXPORT_CNTL_FILE */
extern int dhd_check_module_cid(dhd_pub_t *dhdp);
extern char *dhd_get_cid_info(unsigned char *vid, int vid_length);
#else
@@ -2379,9 +2710,9 @@
#define BCM4359_MODULE_TYPE_B90B 1
#define BCM4359_MODULE_TYPE_B90S 2
#endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
-#if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
-extern int dhd_check_module_bcm4361(char *module_type, int index, bool *is_murata_fem);
-#endif /* defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK) */
+#if defined(USE_CID_CHECK)
+extern int dhd_check_module_bcm(char *module_type, int index, bool *is_murata_fem);
+#endif /* defined(USE_CID_CHECK) */
#else
static INLINE int dhd_read_cis(dhd_pub_t *dhdp) { return 0; }
static INLINE void dhd_clear_cis(dhd_pub_t *dhdp) { }
@@ -2464,7 +2795,7 @@
int dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen);
int dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size);
-
+void dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname);
uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail);
void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size);
@@ -2493,6 +2824,20 @@
PARAM_LAST_VALUE
};
extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val);
+#ifdef DHD_EXPORT_CNTL_FILE
+#define VALUENOTSET 0xFFFFFFFFu
+extern uint32 bus_txglom;
+extern uint32 roam_off;
+#ifdef USE_WL_FRAMEBURST
+extern uint32 frameburst;
+#endif /* USE_WL_FRAMEBURST */
+#ifdef USE_WL_TXBF
+extern uint32 txbf;
+#endif /* USE_WL_TXBF */
+#ifdef PROP_TXSTATUS
+extern uint32 proptx;
+#endif /* PROP_TXSTATUS */
+#endif /* DHD_EXPORT_CNTL_FILE */
#endif /* USE_WFA_CERT_CONF */
#define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid) do {} while (0)
@@ -2541,6 +2886,10 @@
#define DHD_BUS_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
#define DHD_BUS_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
+/* Enable DHD backplane spin lock/unlock */
+#define DHD_BACKPLANE_ACCESS_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
+#define DHD_BACKPLANE_ACCESS_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
+
#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
#define DHD_BUS_INB_DW_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
@@ -2673,6 +3022,7 @@
#if defined(DHD_LB_STATS)
#include <bcmutils.h>
extern void dhd_lb_stats_init(dhd_pub_t *dhd);
+extern void dhd_lb_stats_deinit(dhd_pub_t *dhd);
extern void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count);
extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count);
@@ -2710,41 +3060,46 @@
#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP
#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP
#endif /* !DHD_LB_STATS */
-#ifdef DHD_LB_IRQSET
-extern void dhd_irq_set_affinity(dhd_pub_t *dhdp);
-#endif /* DHD_LB_IRQSET */
#ifdef DHD_SSSR_DUMP
-#define DHD_SSSR_MEMPOOL_SIZE (1024 * 1024) /* 1MB size */
+#define DHD_SSSR_MEMPOOL_SIZE (2 * 1024 * 1024) /* 2MB size */
+
+/* used in sssr_dump_mode */
+#define SSSR_DUMP_MODE_SSSR 0 /* dump both *before* and *after* files */
+#define SSSR_DUMP_MODE_FIS 1 /* dump *after* files only */
+
extern int dhd_sssr_mempool_init(dhd_pub_t *dhd);
extern void dhd_sssr_mempool_deinit(dhd_pub_t *dhd);
extern int dhd_sssr_dump_init(dhd_pub_t *dhd);
extern void dhd_sssr_dump_deinit(dhd_pub_t *dhd);
+extern int dhdpcie_sssr_dump(dhd_pub_t *dhd);
+extern void dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path);
+
#define DHD_SSSR_MEMPOOL_INIT(dhdp) dhd_sssr_mempool_init(dhdp)
#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) dhd_sssr_mempool_deinit(dhdp)
#define DHD_SSSR_DUMP_INIT(dhdp) dhd_sssr_dump_init(dhdp)
#define DHD_SSSR_DUMP_DEINIT(dhdp) dhd_sssr_dump_deinit(dhdp)
+#define DHD_SSSR_PRINT_FILEPATH(dhdp, path) dhd_sssr_print_filepath(dhdp, path)
#else
-#define DHD_SSSR_MEMPOOL_INIT(dhdp) do { /* noop */ } while (0)
-#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) do { /* noop */ } while (0)
-#define DHD_SSSR_DUMP_INIT(dhdp) do { /* noop */ } while (0)
-#define DHD_SSSR_DUMP_DEINIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_MEMPOOL_INIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_DUMP_INIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_DUMP_DEINIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_PRINT_FILEPATH(dhdp, path) do { /* noop */ } while (0)
#endif /* DHD_SSSR_DUMP */
-#ifdef SHOW_LOGTRACE
-void dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *read_buf_info);
-#endif /* SHOW_LOGTRACE */
-
#ifdef BCMPCIE
extern int dhd_prot_debug_info_print(dhd_pub_t *dhd);
extern bool dhd_bus_skip_clm(dhd_pub_t *dhdp);
+extern void dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd);
+extern bool dhd_pcie_dump_int_regs(dhd_pub_t *dhd);
#else
#define dhd_prot_debug_info_print(x)
static INLINE bool dhd_bus_skip_clm(dhd_pub_t *dhd_pub)
{ return 0; }
#endif /* BCMPCIE */
-bool dhd_fw_download_status(dhd_pub_t * dhd_pub);
+fw_download_status_t dhd_fw_download_status(dhd_pub_t * dhd_pub);
void dhd_show_kirqstats(dhd_pub_t *dhd);
/* Bitmask used for Join Timeout */
@@ -2753,8 +3108,8 @@
extern int dhd_start_join_timer(dhd_pub_t *pub);
extern int dhd_stop_join_timer(dhd_pub_t *pub);
-extern int dhd_start_scan_timer(dhd_pub_t *pub);
-extern int dhd_stop_scan_timer(dhd_pub_t *pub);
+extern int dhd_start_scan_timer(dhd_pub_t *pub, bool is_escan);
+extern int dhd_stop_scan_timer(dhd_pub_t *pub, bool is_escan, uint16 sync_id);
extern int dhd_start_cmd_timer(dhd_pub_t *pub);
extern int dhd_stop_cmd_timer(dhd_pub_t *pub);
extern int dhd_start_bus_timer(dhd_pub_t *pub);
@@ -2778,6 +3133,10 @@
void dhd_pktid_error_handler(dhd_pub_t *dhdp);
#endif /* DHD_PKTID_AUDIT_ENABLED */
+#ifdef DHD_MAP_PKTID_LOGGING
+extern void dhd_pktid_logging_dump(dhd_pub_t *dhdp);
+#endif /* DHD_MAP_PKTID_LOGGING */
+
#ifdef DHD_PCIE_RUNTIMEPM
extern bool dhd_runtimepm_state(dhd_pub_t *dhd);
extern bool dhd_runtime_bus_wake(struct dhd_bus *bus, bool wait, void *func_addr);
@@ -2802,24 +3161,26 @@
#define DHD_ENABLE_RUNTIME_PM(dhdp)
#endif /* DHD_PCIE_RUNTIMEPM */
+#ifdef REVERSE_AIFSN
+extern int check_reverse_aifsn_condition(dhd_pub_t *dhdp, struct net_device *ndev);
+
+#define DHD_REVERSE_AIFSN(dhdp, ndev) \
+do { \
+ check_reverse_aifsn_condition(dhdp, ndev); \
+} while (0);
+#else
+#define DHD_REVERSE_AIFSN(dhdp, ndev)
+#endif /* AIFSN_REVERSE */
+
extern bool dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info);
extern void dhd_prot_dump_ring_ptrs(void *prot_info);
-/*
- * Enable this macro if you want to track the calls to wake lock
- * This records can be printed using the following command
- * cat /sys/bcm-dhd/wklock_trace
- * DHD_TRACE_WAKE_LOCK supports over linux 2.6.0 version
- */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
-#undef DHD_TRACE_WAKE_LOCK
-#endif /* KERNEL_VER < KERNEL_VERSION(2, 6, 0) */
-
#if defined(DHD_TRACE_WAKE_LOCK)
void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp);
#endif // endif
extern bool dhd_query_bus_erros(dhd_pub_t *dhdp);
+void dhd_clear_bus_errors(dhd_pub_t *dhdp);
#if defined(CONFIG_64BIT)
#define DHD_SUPPORT_64BIT
@@ -2855,14 +3216,27 @@
int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8 *buf, int len);
#endif /* FILTER_IE */
+uint16 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp);
+
#ifdef SET_PCIE_IRQ_CPU_CORE
-extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set);
-extern void set_irq_cpucore(unsigned int irq, int set);
+enum {
+ PCIE_IRQ_AFFINITY_OFF = 0,
+ PCIE_IRQ_AFFINITY_BIG_CORE_ANY,
+ PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS,
+ PCIE_IRQ_AFFINITY_LAST
+};
+extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd);
#endif /* SET_PCIE_IRQ_CPU_CORE */
+
#if defined(DHD_HANG_SEND_UP_TEST)
extern void dhd_make_hang_with_reason(struct net_device *dev, const char *string_num);
#endif /* DHD_HANG_SEND_UP_TEST */
+#ifdef DHD_RND_DEBUG
+int dhd_dump_rnd_info(dhd_pub_t *dhd, uint8 *rnd_buf, uint32 rnd_len);
+int dhd_get_rnd_info(dhd_pub_t *dhd);
+#endif /* DHD_RND_DEBUG */
+
#ifdef DHD_WAKE_STATUS
wake_counts_t* dhd_get_wakecount(dhd_pub_t *dhdp);
#endif /* DHD_WAKE_STATUS */
@@ -2872,6 +3246,7 @@
#endif /* DHD_BLOB_EXISTENCE_CHECK */
/* configuration of ecounters. API's tp start/stop. currently supported only for linux */
+extern int dhd_ecounter_configure(dhd_pub_t *dhd, bool enable);
extern int dhd_start_ecounters(dhd_pub_t *dhd);
extern int dhd_stop_ecounters(dhd_pub_t *dhd);
extern int dhd_start_event_ecounters(dhd_pub_t *dhd);
@@ -2883,15 +3258,82 @@
void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type);
void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd);
int dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
- unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr);
-int dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, unsigned long *f_pos);
+ unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr, char *text_hdr,
+ uint32 sec_type);
+int dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
+ log_dump_section_hdr_t *sec_hdr, char *text_hdr, int buflen, uint32 sec_type);
+int dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp,
+ const void *user_buf, unsigned long *f_pos);
+int dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf);
+uint32 dhd_log_dump_cookie_len(dhd_pub_t *dhdp);
int dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size);
void dhd_logdump_cookie_deinit(dhd_pub_t *dhdp);
void dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type);
int dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size);
int dhd_logdump_cookie_count(dhd_pub_t *dhdp);
-#endif /* DHD_LOG_DUMP */
+int dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf, void *fp,
+ uint32 len, int type, void *pos);
+int dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos);
+#ifdef DHD_DUMP_PCIE_RINGS
+int dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+uint32 dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp);
+#endif /* DHD_DUMP_PCIE_RINGS */
+#ifdef DHD_STATUS_LOGGING
+extern int dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp,
+ const void *user_buf, void *fp, uint32 len, void *pos);
+extern uint32 dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp);
+#endif /* DHD_STATUS_LOGGING */
+int dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp,
+ char *dump_path, int size);
+uint32 dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_time_str_len(void);
+uint32 dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_dld_len(int log_type);
+void dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr);
+extern char *dhd_log_dump_get_timestamp(void);
+bool dhd_log_dump_ecntr_enabled(void);
+bool dhd_log_dump_rtt_enabled(void);
+void dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len);
+int dhd_get_debug_dump(void *dev, const void *user_buf, uint32 len, int type);
+int
+dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core);
+int
+dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core);
+int
+dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len);
+int
+dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len);
+#ifdef DHD_PKT_LOGGING
+extern int dhd_os_get_pktlog_dump(void *dev, const void *user_buf, uint32 len);
+extern uint32 dhd_os_get_pktlog_dump_size(struct net_device *dev);
+extern void dhd_os_get_pktlogdump_filename(struct net_device *dev, char *dump_path, int len);
+#endif /* DHD_PKT_LOGGING */
+#ifdef DNGL_AXI_ERROR_LOGGING
+extern int dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len);
+extern int dhd_os_get_axi_error_dump_size(struct net_device *dev);
+extern void dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len);
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#endif /* DHD_LOG_DUMP */
+int dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, int buf_len, void *pos);
#define DHD_PCIE_CONFIG_SAVE(bus) pci_save_state(bus->dev)
#define DHD_PCIE_CONFIG_RESTORE(bus) pci_restore_state(bus->dev)
@@ -2907,11 +3349,17 @@
/* ========= RING API functions : exposed to others ============= */
#define DHD_RING_TYPE_FIXED 1
+#define DHD_RING_TYPE_SINGLE_IDX 2
uint32 dhd_ring_get_hdr_size(void);
-void *dhd_ring_init(uint8 *buf, uint32 buf_size, uint32 elem_size, uint32 elem_cnt);
-void dhd_ring_deinit(void *_ring);
+void *dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
+ uint32 elem_cnt, uint32 type);
+void dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring);
void *dhd_ring_get_first(void *_ring);
void dhd_ring_free_first(void *_ring);
+void dhd_ring_set_read_idx(void *_ring, uint32 read_idx);
+void dhd_ring_set_write_idx(void *_ring, uint32 write_idx);
+uint32 dhd_ring_get_read_idx(void *_ring);
+uint32 dhd_ring_get_write_idx(void *_ring);
void *dhd_ring_get_last(void *_ring);
void *dhd_ring_get_next(void *_ring, void *cur);
void *dhd_ring_get_prev(void *_ring, void *cur);
@@ -2923,6 +3371,8 @@
void *dhd_ring_lock_get_last(void *_ring);
int dhd_ring_lock_get_count(void *_ring);
void dhd_ring_lock_free_first(void *ring);
+void dhd_ring_whole_lock(void *ring);
+void dhd_ring_whole_unlock(void *ring);
#define DHD_DUMP_TYPE_NAME_SIZE 32
#define DHD_DUMP_FILE_PATH_SIZE 256
@@ -2942,11 +3392,102 @@
extern void dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname);
#endif /* DHD_DUMP_MNGR */
-extern uint32 wlreg_l;
-extern uint32 wlreg_h;
-extern uint32 wlreg_len_l;
-extern uint32 wlreg_len_h;
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
+#endif // endif
-extern int dhd_get_host_whitelist_region(void *buf, uint len);
+#ifdef DHD_DUMP_PCIE_RINGS
+extern int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
+ unsigned long *file_posn, bool file_write);
+#endif /* DHD_DUMP_PCIE_RINGS */
+
+#ifdef EWP_EDL
+#define DHD_EDL_RING_SIZE (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_ITEMSIZE)
+int dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
+ void *evt_decode_data);
+int dhd_edl_mem_init(dhd_pub_t *dhd);
+void dhd_edl_mem_deinit(dhd_pub_t *dhd);
+void dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd);
+#define DHD_EDL_MEM_INIT(dhdp) dhd_edl_mem_init(dhdp)
+#define DHD_EDL_MEM_DEINIT(dhdp) dhd_edl_mem_deinit(dhdp)
+#define DHD_EDL_RING_TCM_RD_UPDATE(dhdp) \
+ dhd_prot_edl_ring_tcm_rd_update(dhdp)
+#else
+#define DHD_EDL_MEM_INIT(dhdp) do { /* noop */ } while (0)
+#define DHD_EDL_MEM_DEINIT(dhdp) do { /* noop */ } while (0)
+#define DHD_EDL_RING_TCM_RD_UPDATE(dhdp) do { /* noop */ } while (0)
+#endif /* EWP_EDL */
+
+void dhd_schedule_logtrace(void *dhd_info);
+int dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath);
+
+#define HD_PREFIX_SIZE 2 /* hexadecimal prefix size */
+#define HD_BYTE_SIZE 2 /* hexadecimal byte size */
+
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp);
+void dhd_h2d_log_time_sync(dhd_pub_t *dhdp);
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+extern void dhd_cleanup_if(struct net_device *net);
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+extern void dhd_axi_error(dhd_pub_t *dhd);
+#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
+extern void dhd_axi_error_dispatch(dhd_pub_t *dhdp);
+#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#ifdef DHD_HP2P
+extern unsigned long dhd_os_hp2plock(dhd_pub_t *pub);
+extern void dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags);
+#endif /* DHD_HP2P */
+extern struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
+
+#ifdef DHD_STATUS_LOGGING
+#include <dhd_statlog.h>
+#else
+#define ST(x) 0
+#define STDIR(x) 0
+#define DHD_STATLOG_CTRL(dhdp, stat, ifidx, reason) \
+ do { /* noop */ } while (0)
+#define DHD_STATLOG_DATA(dhdp, stat, ifidx, dir, cond) \
+ do { BCM_REFERENCE(cond); } while (0)
+#define DHD_STATLOG_DATA_RSN(dhdp, stat, ifidx, dir, reason) \
+ do { /* noop */ } while (0)
+#endif /* DHD_STATUS_LOGGING */
+
+#ifdef CONFIG_SILENT_ROAM
+extern int dhd_sroam_set_mon(dhd_pub_t *dhd, bool set);
+typedef wlc_sroam_info_v1_t wlc_sroam_info_t;
+#endif /* CONFIG_SILENT_ROAM */
+
+#ifdef SUPPORT_SET_TID
+enum dhd_set_tid_mode {
+ /* Disalbe changing TID */
+ SET_TID_OFF = 0,
+ /* Change TID for all UDP frames */
+ SET_TID_ALL_UDP,
+ /* Change TID for UDP frames based on UID */
+ SET_TID_BASED_ON_UID
+};
+extern void dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt);
+#endif /* SUPPORT_SET_TID */
+
+#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
+#define FILE_NAME_HAL_TAG ""
+#else
+#define FILE_NAME_HAL_TAG "_hal" /* The tag name concatenated by HAL */
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+
+#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
+extern int dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab);
+extern uint8 control_he_enab;
+#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
+
+#if defined(BCMSDIO)
+void dhd_set_role(dhd_pub_t *dhdp, int role, int bssidx);
+#endif /* BCMSDIO */
#endif /* _dhd_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_bandsteer.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_bandsteer.c
new file mode 100644
index 0000000..55395e0
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_bandsteer.c
@@ -0,0 +1,625 @@
+/*
+ * Band Steering logic
+ *
+ * Feature by which dualband capable PEERs will be
+ * forced move on 5GHz interface
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $ Copyright Cypress Semiconductor $
+ *
+ * $Id: dhd_bandsteer.c 724689 2020-03-04 10:04:03Z $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <bcmutils.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+#include <wl_cfg80211.h>
+#include <wl_android.h>
+#include <wldev_common.h>
+#include <dhd_linux_wq.h>
+#include <dhd_cfg80211.h>
+#include <dhd_bandsteer.h>
+#include <dhd_dbg.h>
+
+/* defines */
+/* BANDSTEER STATE MACHINE STATES */
+#define DHD_BANDSTEER_START 0x0001
+#define DHD_BANDSTEER_WNM_FRAME_SEND 0x0002
+#define DHD_BANDSTEER_WNM_FRAME_RETRY 0x0004
+#define DHD_BANDSTEER_WNM_FRAME_SENT 0x0008
+#define DHD_BANDSTEER_TRIAL_DONE 0x0080
+
+#define DHD_BANDSTEER_ON_PROCESS_MASK (DHD_BANDSTEER_START | DHD_BANDSTEER_WNM_FRAME_SEND \
+ | DHD_BANDSTEER_WNM_FRAME_RETRY | DHD_BANDSTEER_TRIAL_DONE)
+
+#define DHD_BANDSTEER_WNM_FRAME_MAXRETRY 3
+#define DHD_BANDSTEER_WNM_FRAME_DELAY 1000
+#define DHD_BANDSTEER_WNM_FRAME_START_DELAY 10
+#define DHD_BANDSTEER_WNM_FRAME_RESPONSE_DWELL 40
+#define MAX_NUM_OF_ASSOCLIST 64
+#define DHD_BANDSTEER_MAXIFACES 2
+
+#define CHANNEL_IS_5G(channel) (((channel >= 36) && (channel <= 165)) ? \
+true : false)
+
+/* ********************** Function declaration *********************** */
+static s32
+dhd_bandsteer_addmac_to_monitorlist(dhd_bandsteer_context_t *dhd_bandsteer_cntx, uint8 *mac_addr);
+static s32
+dhd_bandsteer_remove_mac_from_list(dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac, int all);
+static dhd_bandsteer_mac_entry_t*
+dhd_bandsteer_look_for_match(dhd_bandsteer_context_t *dhd_bandsteer_cntx, uint8 *mac_addr);
+static s32
+dhd_bandsteer_tx_wnm_actframe(dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac);
+static void
+dhd_bandsteer_add_to_black_list(dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac);
+
+extern int
+wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist);
+
+/* ********************** Function declartion ends ****************** */
+
+static void
+dhd_bandsteer_add_timer(dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac, unsigned long msec)
+{
+ timer_expires(&dhd_bandsteer_mac->dhd_bandsteer_timer) =
+ jiffies + msecs_to_jiffies(msec);
+ add_timer(&dhd_bandsteer_mac->dhd_bandsteer_timer);
+}
+
+static void
+dhd_bandsteer_delete_timer(dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac)
+{
+ del_timer(&dhd_bandsteer_mac->dhd_bandsteer_timer);
+}
+
+/*
+ * Idea used to call same callback everytime you conifigure timer
+ * based on the status of the mac entry next step will be taken
+ */
+static void
+dhd_bandsteer_state_machine(ulong arg)
+{
+ dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac = (dhd_bandsteer_mac_entry_t *)arg;
+
+ if (dhd_bandsteer_mac == NULL) {
+ DHD_ERROR(("%s: dhd_bandsteer_mac is null\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_TRACE(("%s: Peer STA BandSteer status 0x%x", __FUNCTION__,
+ dhd_bandsteer_mac->dhd_bandsteer_status));
+
+ switch (dhd_bandsteer_mac->dhd_bandsteer_status) {
+ case DHD_BANDSTEER_START:
+ case DHD_BANDSTEER_WNM_FRAME_RETRY:
+ dhd_bandsteer_mac->dhd_bandsteer_status = DHD_BANDSTEER_WNM_FRAME_SEND;
+ dhd_bandsteer_schedule_work_on_timeout(dhd_bandsteer_mac);
+ break;
+
+ case DHD_BANDSTEER_WNM_FRAME_SEND:
+ dhd_bandsteer_tx_wnm_actframe(dhd_bandsteer_mac);
+ if (dhd_bandsteer_mac->wnm_frame_counter < DHD_BANDSTEER_WNM_FRAME_MAXRETRY) {
+ /* Sending out WNM action frame as soon as assoc indication recieved */
+ dhd_bandsteer_mac->dhd_bandsteer_status = DHD_BANDSTEER_WNM_FRAME_RETRY;
+ }
+ else {
+ dhd_bandsteer_mac->dhd_bandsteer_status = DHD_BANDSTEER_TRIAL_DONE;
+ }
+ break;
+ case DHD_BANDSTEER_TRIAL_DONE:
+ dhd_bandsteer_remove_mac_from_list(dhd_bandsteer_mac, 0);
+ break;
+ }
+ return;
+}
+
+void
+dhd_bandsteer_workqueue_wrapper(void *handle, void *event_info, u8 event)
+{
+ dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac = (dhd_bandsteer_mac_entry_t *)event_info;
+
+ if (event != DHD_WQ_WORK_BANDSTEER_STEP_MOVE) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
+
+ dhd_bandsteer_state_machine((ulong)dhd_bandsteer_mac);
+}
+
+/*
+ * This API create and initilize an entry into list which need to be processed later
+ */
+static s32
+dhd_bandsteer_addmac_to_monitorlist(dhd_bandsteer_context_t *dhd_bandsteer_cntx, uint8 *mac_addr)
+{
+ dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac;
+
+ dhd_bandsteer_mac = kzalloc(sizeof(dhd_bandsteer_mac_entry_t), GFP_KERNEL);
+ if (unlikely(!dhd_bandsteer_mac)) {
+ DHD_ERROR(("%s: alloc failed\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ INIT_LIST_HEAD(&dhd_bandsteer_mac->list);
+ /* pointer dhd_bandsteer_cntx for future use */
+ dhd_bandsteer_mac->dhd_bandsteer_cntx = dhd_bandsteer_cntx;
+
+ dhd_bandsteer_mac->dhd_bandsteer_status = DHD_BANDSTEER_START;
+
+ memcpy(&dhd_bandsteer_mac->mac_addr.octet, mac_addr, ETHER_ADDR_LEN);
+
+ /* Configure timer for 20 Sec */
+ init_timer_compat(&dhd_bandsteer_mac->dhd_bandsteer_timer, dhd_bandsteer_state_machine,
+ dhd_bandsteer_mac);
+ dhd_bandsteer_mac->wnm_frame_counter = 0;
+
+ /* Add new entry into the list */
+ list_add_tail(&dhd_bandsteer_mac->list,
+ &dhd_bandsteer_cntx->dhd_bandsteer_monitor_list);
+
+ DHD_TRACE(("%s: " MACDBG " added into list \n", __FUNCTION__,
+ MAC2STRDBG(dhd_bandsteer_mac->mac_addr.octet)));
+ /* This can be tweaked more */
+ dhd_bandsteer_add_timer(dhd_bandsteer_mac, DHD_BANDSTEER_WNM_FRAME_START_DELAY);
+
+ return BCME_OK;
+}
+
+/*
+ * This function removes one or all mac entry from the list
+ */
+static s32
+dhd_bandsteer_remove_mac_from_list(dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac, int all)
+{
+ dhd_bandsteer_context_t *dhd_bandsteer_cntx;
+ dhd_bandsteer_mac_entry_t *curr, *next;
+
+ DHD_INFO(("%s: entered \n", __FUNCTION__));
+ /* TODO:probably these sanity lines can be removed */
+ if (dhd_bandsteer_mac == NULL) {
+ DHD_ERROR(("%s: dhd_bandsteer_mac is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dhd_bandsteer_cntx = dhd_bandsteer_mac->dhd_bandsteer_cntx;
+
+ list_for_each_entry_safe(curr, next,
+ &dhd_bandsteer_cntx->dhd_bandsteer_monitor_list, list) {
+
+ if (curr == NULL) {
+ DHD_ERROR(("%s: Invalid MAC\n", __FUNCTION__));
+ break;
+ }
+
+ if ((curr == dhd_bandsteer_mac) || all) {
+ DHD_ERROR(("%s: " MACDBG " deleted from list \n", __FUNCTION__,
+ MAC2STRDBG(dhd_bandsteer_mac->mac_addr.octet)));
+ list_del(&curr->list);
+ dhd_bandsteer_delete_timer(curr);
+ kfree(curr);
+ if (!all)
+ break;
+ }
+ }
+ return BCME_OK;
+}
+
+/*
+ * Logic to find corresponding node in list based given mac address
+ * Returns null if entry not seen
+ */
+static dhd_bandsteer_mac_entry_t*
+dhd_bandsteer_look_for_match(dhd_bandsteer_context_t *dhd_bandsteer_cntx, uint8 *mac_addr)
+{
+ dhd_bandsteer_mac_entry_t *curr = NULL, *next = NULL;
+
+ list_for_each_entry_safe(curr, next,
+ &dhd_bandsteer_cntx->dhd_bandsteer_monitor_list, list) {
+ if (memcmp(&curr->mac_addr.octet, mac_addr, ETHER_ADDR_LEN) == 0) {
+ return curr;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * This API will send wnm action frame also configure timeout timer
+ */
+static s32
+dhd_bandsteer_tx_wnm_actframe(dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac)
+{
+ dhd_bandsteer_context_t *dhd_bandsteer_cntx = dhd_bandsteer_mac->dhd_bandsteer_cntx;
+ wl_action_frame_t *action_frame = NULL;
+ wl_af_params_t *af_params = NULL;
+ char *smbuf = NULL;
+ int error = BCME_ERROR;
+ uint8 *bp;
+ dhd_bandsteer_iface_info_t *if_info_5g, *if_info_2g;
+
+ if_info_5g = &dhd_bandsteer_cntx->bsd_ifaces[dhd_bandsteer_cntx->ifidx_5g];
+ if_info_2g = &dhd_bandsteer_cntx->bsd_ifaces[!dhd_bandsteer_cntx->ifidx_5g];
+
+ smbuf = kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (smbuf == NULL) {
+ DHD_ERROR(("%s: failed to allocated memory %d bytes\n", __FUNCTION__,
+ WLC_IOCTL_MAXLEN));
+ goto send_action_frame_out;
+ }
+
+ af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL);
+ if (af_params == NULL) {
+ DHD_ERROR(("%s: unable to allocate frame\n", __FUNCTION__));
+ goto send_action_frame_out;
+ }
+
+ af_params->channel = if_info_2g->channel;
+ af_params->dwell_time = DHD_BANDSTEER_WNM_FRAME_RESPONSE_DWELL;
+ memcpy(&af_params->BSSID, &dhd_bandsteer_mac->mac_addr.octet, ETHER_ADDR_LEN);
+ action_frame = &af_params->action_frame;
+
+ action_frame->packetId = 0;
+ memcpy(&action_frame->da, &dhd_bandsteer_mac->mac_addr.octet, ETHER_ADDR_LEN);
+
+ dhd_bandsteer_mac->wnm_frame_counter++;
+ bp = (uint8 *)action_frame->data;
+ *bp++ = 0xa; /* Category */
+ *bp++ = 0x7; /* Action ID */
+ *bp++ = dhd_bandsteer_mac->wnm_frame_counter; /* Dialog Token */
+ *bp++ = 0x1; /* Request mode */
+ *bp++ = 0x0; /* disassociation timer has two bytes */
+ *bp++ = 0x0;
+ *bp++ = 0x0; /* Validity interval */
+ *bp++ = 0x34; /* Element ID */
+ *bp++ = 0xd; /* Len */
+ memcpy(bp, if_info_5g->macaddr.octet, ETHER_ADDR_LEN);
+ bp += ETHER_ADDR_LEN;
+ bp +=4; /* Skip BSSID info 4 bytes in size */
+ *bp++ = 0x7d; /* Operating class */
+ *bp++ = if_info_5g->channel; /* Channel number */
+ *bp = 0x0; /* Phy Type */
+
+ action_frame->len = (bp - (uint8 *)&action_frame->data) + 1;
+
+ error = wldev_iovar_setbuf(if_info_2g->ndev, "actframe", af_params,
+ sizeof(wl_af_params_t), smbuf, WLC_IOCTL_MAXLEN, NULL);
+ if (error) {
+ DHD_ERROR(("Failed to set action frame, error=%d\n", error));
+ goto send_action_frame_out;
+ }
+ DHD_TRACE(("%s: BSS Trans Req frame sent to " MACDBG " try %d\n", __FUNCTION__,
+ MAC2STRDBG(dhd_bandsteer_mac->mac_addr.octet),
+ dhd_bandsteer_mac->wnm_frame_counter));
+
+send_action_frame_out:
+ /* Re-schedule the timer */
+ dhd_bandsteer_add_timer(dhd_bandsteer_mac, DHD_BANDSTEER_WNM_FRAME_DELAY);
+ if (af_params)
+ kfree(af_params);
+
+ if (smbuf)
+ kfree(smbuf);
+
+ if (error)
+ return BCME_ERROR;
+
+ return BCME_OK;
+}
+
+/*
+ * Call dhd_bandsteer_remove_mac_from_list()
+ * Add into black list of corresponding interace at present 2.4
+ * Uses existing IOVAR calls
+ */
+static void
+dhd_bandsteer_add_to_black_list(dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac)
+{
+ dhd_bandsteer_context_t *dhd_bandsteer_cntx = dhd_bandsteer_mac->dhd_bandsteer_cntx;
+ dhd_bandsteer_iface_info_t *if_info_2g;
+ int err;
+ int macmode = MACLIST_MODE_DENY;
+ struct maclist *maclist;
+ uint8 *pmaclist_ea;
+ uint8 mac_buf[MAX_NUM_OF_ASSOCLIST *
+ sizeof(struct ether_addr) + sizeof(uint)] = {0};
+
+ if_info_2g = &dhd_bandsteer_cntx->bsd_ifaces[!dhd_bandsteer_cntx->ifidx_5g];
+
+ /* Black listing */
+ DHD_INFO(("%s: Black listing " MACDBG " on 2GHz IF\n", __FUNCTION__,
+ MAC2STRDBG(dhd_bandsteer_mac->mac_addr.octet)));
+
+ /* Get current black list */
+ if ((err = wldev_ioctl_get(if_info_2g->ndev, WLC_GET_MACLIST, mac_buf,
+ sizeof(mac_buf))) != 0) {
+ DHD_ERROR(("%s: WLC_GET_MACLIST error=%d\n", __FUNCTION__, err));
+ }
+
+ maclist = (struct maclist *)mac_buf;
+ pmaclist_ea = (uint8*) mac_buf +
+ (sizeof(struct ether_addr) * maclist->count) + sizeof(uint);
+ maclist->count++;
+
+ memcpy(pmaclist_ea, &dhd_bandsteer_mac->mac_addr.octet, ETHER_ADDR_LEN);
+
+ if ((err = wldev_ioctl_set(if_info_2g->ndev, WLC_SET_MACMODE, &macmode,
+ sizeof(macmode))) != 0) {
+ DHD_ERROR(("%s: WLC_SET_MACMODE error=%d\n", __FUNCTION__, err));
+ }
+
+ /* set the MAC filter list */
+ if ((err = wldev_ioctl_set(if_info_2g->ndev, WLC_SET_MACLIST, maclist,
+ sizeof(int) + sizeof(struct ether_addr) * maclist->count)) != 0) {
+ DHD_ERROR(("%s: WLC_SET_MACLIST error=%d\n", __FUNCTION__, err));
+ }
+}
+
+/*
+ * Check if mac association on 2.4 G
+ * If on 2.4
+ * * Ignore if we have already run Bandsteer cycle for this
+ * * Add PEER STA mac monitor_list
+ * * Send BSS transition request frame
+ * Else
+ * * We recieved assoc on 5g from Mac we forced to move onto 5G
+ */
+s32
+dhd_bandsteer_trigger_bandsteer(struct net_device *ndev, uint8 *mac_addr)
+{
+ struct wireless_dev *__wdev = (struct wireless_dev *)(ndev)->ieee80211_ptr;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(__wdev->wiphy);
+ struct net_device *netdev_5g = NULL;
+ dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac = NULL;
+ dhd_bandsteer_context_t *dhd_bandsteer_cntx = NULL;
+
+ DHD_ERROR(("%s: Start band-steer procedure for " MACDBG "\n", __FUNCTION__,
+ MAC2STRDBG(mac_addr)));
+
+ if (cfg == NULL) {
+ DHD_ERROR(("%s: bcmcfg is null\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ if (cfg->dhd_bandsteer_cntx == NULL) {
+ DHD_ERROR(("%s: Band Steering not enabled\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dhd_bandsteer_cntx = cfg->dhd_bandsteer_cntx;
+
+ netdev_5g = dhd_bandsteer_cntx->bsd_ifaces[dhd_bandsteer_cntx->ifidx_5g].ndev;
+ dhd_bandsteer_mac = dhd_bandsteer_look_for_match(dhd_bandsteer_cntx, mac_addr);
+ if (dhd_bandsteer_mac == NULL) {
+ /*
+ * This STA entry not found in list check if bandsteer is already done for this
+ * Check if this on 2.4/5Ghz
+ */
+ if (ndev == netdev_5g) {
+ /* Ignore as device aleady connectd to 5G */
+ DHD_ERROR(("%s: " MACDBG " is on 5GHz interface\n", __FUNCTION__,
+ MAC2STRDBG(mac_addr)));
+ dhd_bandsteer_mac = kzalloc(sizeof(dhd_bandsteer_mac_entry_t), GFP_KERNEL);
+ if (unlikely(!dhd_bandsteer_mac)) {
+ DHD_ERROR(("%s: alloc failed\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ dhd_bandsteer_mac->dhd_bandsteer_cntx = dhd_bandsteer_cntx;
+ memcpy(&dhd_bandsteer_mac->mac_addr.octet, mac_addr, ETHER_ADDR_LEN);
+ dhd_bandsteer_add_to_black_list(dhd_bandsteer_mac);
+ kfree(dhd_bandsteer_mac);
+ dhd_bandsteer_mac = NULL;
+ return BCME_OK;
+ } else {
+ DHD_INFO(("%s: dhd_bandsteer_addmac_to_monitorlist\n", __FUNCTION__));
+ dhd_bandsteer_addmac_to_monitorlist(dhd_bandsteer_cntx, mac_addr);
+ /*
+ * TODO: Time for us to enable PROB_REQ MSG
+ */
+ }
+ } else {
+ /*
+ * Start post connect process as bandsteer is successful for this entry
+ */
+ if (ndev == netdev_5g) {
+ DHD_ERROR(("%s: Band Steer for " MACDBG " successful\n", __FUNCTION__,
+ MAC2STRDBG(mac_addr)));
+ dhd_bandsteer_add_to_black_list(dhd_bandsteer_mac);
+ dhd_bandsteer_remove_mac_from_list(dhd_bandsteer_mac, 0);
+ /* Probabaly add this mac into black list */
+ }
+ }
+ return BCME_OK;
+}
+
+s32
+dhd_bandsteer_module_init(struct net_device *ndev, bool ap, bool p2p)
+{
+ /* Initialize */
+ dhd_bandsteer_context_t *dhd_bandsteer_cntx = NULL;
+ struct channel_info ci;
+ uint8 ifidx;
+ struct wireless_dev *__wdev = (struct wireless_dev *)(ndev)->ieee80211_ptr;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(__wdev->wiphy);
+ int err;
+
+ DHD_INFO(("%s: entered\n", __FUNCTION__));
+
+ if (cfg == NULL) {
+ DHD_ERROR(("%s: bcmcfg is null\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (cfg->dhd_bandsteer_cntx != NULL) {
+ DHD_ERROR(("%s: Band Steering already enabled\n", __FUNCTION__));
+ goto init_done;
+ }
+
+ dhd_bandsteer_cntx = (dhd_bandsteer_context_t *)kzalloc(sizeof(dhd_bandsteer_context_t),
+ GFP_KERNEL);
+ if (unlikely(!dhd_bandsteer_cntx)) {
+ DHD_ERROR(("%s: dhd_bandsteer_cntx alloc failed\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ if (dhd_bandsteer_get_ifaces(cfg->pub, &dhd_bandsteer_cntx->bsd_ifaces)) {
+ DHD_ERROR(("%s: AP interfaces count != 2", __FUNCTION__));
+ err = BCME_ERROR;
+ goto failed;
+ }
+
+ for (ifidx = 0; ifidx < DHD_BANDSTEER_MAXIFACES; ifidx++) {
+ err = wldev_iovar_getbuf_bsscfg(dhd_bandsteer_cntx->bsd_ifaces[ifidx].ndev,
+ "cur_etheraddr", NULL, 0, cfg->ioctl_buf,
+ WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+ if (err) {
+ DHD_ERROR(("%s: Failed to get mac address\n", __FUNCTION__));
+ goto failed;
+ }
+
+ memcpy(dhd_bandsteer_cntx->bsd_ifaces[ifidx].macaddr.octet,
+ cfg->ioctl_buf, ETHER_ADDR_LEN);
+
+ memset(&ci, 0, sizeof(struct channel_info));
+ err = wldev_ioctl_get(dhd_bandsteer_cntx->bsd_ifaces[ifidx].ndev, WLC_GET_CHANNEL,
+ &ci, sizeof(ci));
+ if (err) {
+ DHD_ERROR(("%s: Failed to get channel\n", __FUNCTION__));
+ goto failed;
+ }
+ if (CHANNEL_IS_5G(ci.hw_channel))
+ dhd_bandsteer_cntx->ifidx_5g = ifidx;
+
+ dhd_bandsteer_cntx->bsd_ifaces[ifidx].channel = ci.hw_channel;
+ }
+
+ if (ap) {
+ INIT_LIST_HEAD(&dhd_bandsteer_cntx->dhd_bandsteer_monitor_list);
+ dhd_bandsteer_cntx->dhd_pub = cfg->pub;
+ cfg->dhd_bandsteer_cntx = (void *) dhd_bandsteer_cntx;
+ }
+
+ /*
+ * Enabling iovar "probresp_sw" suppresses probe request as a result of
+ * which p2p discovery for only 2G capable STA fails. Hence commenting for now.
+ *
+ */
+
+init_done:
+ /* Enable p2p bandsteer on 2GHz interface */
+ if (p2p) {
+ if (dhd_bandsteer_cntx == NULL)
+ dhd_bandsteer_cntx = cfg->dhd_bandsteer_cntx;
+
+ if ((err = wldev_iovar_setint(
+ dhd_bandsteer_cntx->bsd_ifaces[!dhd_bandsteer_cntx->ifidx_5g].ndev,
+ "bandsteer", 1)) != BCME_OK) {
+ DHD_ERROR(("%s: Failed to enable bandsteer in FW err = %d\n",
+ __FUNCTION__, err));
+ }
+ }
+
+ DHD_INFO(("%s: exited\n", __FUNCTION__));
+ return BCME_OK;
+
+failed:
+ kfree(dhd_bandsteer_cntx);
+ return err;
+}
+
+s32
+dhd_bandsteer_module_deinit(struct net_device *ndev, bool ap, bool p2p)
+{
+ dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac = NULL;
+ dhd_bandsteer_context_t *dhd_bandsteer_cntx = NULL;
+ struct wireless_dev *__wdev = (struct wireless_dev *)(ndev)->ieee80211_ptr;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(__wdev->wiphy);
+ int macmode = MACLIST_MODE_DISABLED;
+ int err;
+ struct maclist maclist;
+
+ DHD_INFO(("%s: entered\n", __FUNCTION__));
+
+ if (cfg == NULL) {
+ DHD_ERROR(("%s: bcmcfg is null\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (cfg->dhd_bandsteer_cntx == NULL) {
+ DHD_ERROR(("%s: Band Steering not enabled\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dhd_bandsteer_cntx = cfg->dhd_bandsteer_cntx;
+
+ if (ap) {
+ /* Disable mac filter */
+ if ((err = wldev_ioctl_set(
+ dhd_bandsteer_cntx->bsd_ifaces[!dhd_bandsteer_cntx->ifidx_5g].ndev,
+ WLC_SET_MACMODE, &macmode, sizeof(macmode))) != 0) {
+ DHD_ERROR(("%s: WLC_SET_MACMODE error=%d\n", __FUNCTION__, err));
+ }
+
+ /* Set the MAC filter list */
+ memset(&maclist, 0, sizeof(struct maclist));
+ if ((err = wldev_ioctl_set(
+ dhd_bandsteer_cntx->bsd_ifaces[!dhd_bandsteer_cntx->ifidx_5g].ndev,
+ WLC_SET_MACLIST, &maclist, sizeof(struct maclist))) != 0) {
+ DHD_ERROR(("%s: WLC_SET_MACLIST error=%d\n", __FUNCTION__, err));
+ }
+ }
+
+ /* Disable p2p bandsteer on 2GHz interface */
+ if (p2p) {
+ if ((err = wldev_iovar_setint(
+ dhd_bandsteer_cntx->bsd_ifaces[!dhd_bandsteer_cntx->ifidx_5g].ndev,
+ "bandsteer", 0)) != BCME_OK) {
+ DHD_ERROR(("%s: Failed to enable bandsteer in FW err = %d\n",
+ __FUNCTION__, err));
+ }
+ }
+
+ if (ap) {
+ /* Get the first element of the list & pass it to remove */
+ if (dhd_bandsteer_cntx->dhd_bandsteer_monitor_list.next !=
+ &dhd_bandsteer_cntx->dhd_bandsteer_monitor_list) {
+ dhd_bandsteer_mac = (dhd_bandsteer_mac_entry_t *)list_entry(
+ dhd_bandsteer_cntx->dhd_bandsteer_monitor_list.next,
+ dhd_bandsteer_mac_entry_t, list);
+ }
+
+ if (dhd_bandsteer_mac) {
+ dhd_bandsteer_remove_mac_from_list(dhd_bandsteer_mac, 1);
+ }
+ kfree(dhd_bandsteer_cntx);
+ cfg->dhd_bandsteer_cntx = NULL;
+ }
+
+ DHD_INFO(("%s: exited\n", __FUNCTION__));
+ return BCME_OK;
+}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_bandsteer.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_bandsteer.h
new file mode 100644
index 0000000..94ef3b6
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_bandsteer.h
@@ -0,0 +1,77 @@
+/*
+ * Band Steering logic
+ *
+ * Feature by which dualband capable PEERs will be
+ * forced move on 5GHz interface
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $ Copyright Cypress Semiconductor $
+ *
+ * $Id: dhd_bandsteer.h 710124 2019-02-20 13:15:23Z $
+ */
+
+#ifndef _dhd_bandsteer_h_
+#define _dhd_bandsteer_h_
+
+/* Local Types */
+typedef struct dhd_bandsteer_context dhd_bandsteer_context_t;
+
+typedef struct dhd_bandsteer_iface_info {
+ s32 bssidx;
+ s32 channel;
+ struct ether_addr macaddr;
+ struct net_device *ndev;
+} dhd_bandsteer_iface_info_t;
+
+typedef struct dhd_bandsteer_mac_entry {
+ struct list_head list; // Pointer to head of the list
+ uint32 dhd_bandsteer_status; // Usefull in timer call back
+ dhd_bandsteer_context_t *dhd_bandsteer_cntx;
+ timer_list_compat_t dhd_bandsteer_timer; // Callback to Advance BS STATEMACHINE
+ uint8 wnm_frame_counter;
+ struct ether_addr mac_addr;
+} dhd_bandsteer_mac_entry_t;
+
+struct dhd_bandsteer_context {
+ struct list_head dhd_bandsteer_monitor_list;
+ uint8 ifidx_5g;
+ dhd_bandsteer_iface_info_t bsd_ifaces[2]; /* idx 0 5G, 1 2G */
+ void *dhd_pub;
+};
+
+/* Local Types ends */
+
+/* ********************** Function declaration *********************** */
+void dhd_bandsteer_process_disassoc(dhd_bandsteer_context_t *dhd_bandsteer_cntx,
+ const wl_event_msg_t *e);
+s32 dhd_bandsteer_module_init(struct net_device *ndev, bool ap, bool p2p);
+s32 dhd_bandsteer_module_deinit(struct net_device *ndev, bool ap, bool p2p);
+void dhd_bandsteer_schedule_work_on_timeout(dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac);
+void dhd_bandsteer_workqueue_wrapper(void *handle, void *event_info, u8 event);
+s32 dhd_bandsteer_get_ifaces(void *pub, void *ifaces);
+s32 dhd_bandsteer_trigger_bandsteer(struct net_device *, uint8 *);
+/* ********************** Function declartion ends ****************** */
+#endif /* _dhd_bandsteer_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_bus.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_bus.h
index 9dcd53d..bcdb19d 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_bus.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_bus.h
@@ -4,7 +4,9 @@
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -37,6 +39,9 @@
* Exported from dhd bus module (dhd_usb, dhd_sdio)
*/
+/* global variable for the bus */
+extern struct dhd_bus *g_dhd_bus;
+
/* Indicate (dis)interest in finding dongles. */
extern int dhd_bus_register(void);
extern void dhd_bus_unregister(void);
@@ -66,6 +71,10 @@
extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
#endif // endif
+#ifdef BCMPCIE
+extern void dhdpcie_cto_recovery_handler(dhd_pub_t *dhd);
+#endif /* BCMPCIE */
+
/* Send/receive a control message to/from the dongle.
* Expects caller to enforce a single outstanding transaction.
*/
@@ -133,8 +142,10 @@
#if defined(DHD_FW_COREDUMP) && (defined(BCMPCIE) || defined(BCMSDIO))
extern int dhd_bus_mem_dump(dhd_pub_t *dhd);
+extern int dhd_bus_get_mem_dump(dhd_pub_t *dhdp);
#else
#define dhd_bus_mem_dump(x)
+#define dhd_bus_get_mem_dump(x)
#endif /* DHD_FW_COREDUMP && (BCMPCIE || BCMSDIO) */
#ifdef BCMPCIE
@@ -175,10 +186,8 @@
TOTAL_LFRAG_PACKET_CNT,
MAX_HOST_RXBUFS,
HOST_API_VERSION,
-#ifdef D2H_MINIDUMP
- DNGL_TO_HOST_TRAP_ADDR_LEN,
-#endif /* D2H_MINIDUMP */
- DNGL_TO_HOST_TRAP_ADDR
+ DNGL_TO_HOST_TRAP_ADDR,
+ HOST_SCB_ADDR, /* update host scb base address to dongle */
};
typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32);
@@ -207,7 +216,6 @@
extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus);
extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs);
-extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val);
#ifdef IDLE_TX_FLOW_MGMT
extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status);
@@ -223,14 +231,21 @@
extern int dhd_bus_release_dongle(struct dhd_bus *bus);
extern int dhd_bus_request_irq(struct dhd_bus *bus);
extern int dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq);
+extern void dhd_bus_aer_config(struct dhd_bus *bus);
extern struct device * dhd_bus_to_dev(struct dhd_bus *bus);
-extern void dhdpcie_cto_init(struct dhd_bus *bus, bool enable);
+extern int dhdpcie_cto_init(struct dhd_bus *bus, bool enable);
+extern int dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable);
+
+extern void dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus);
#ifdef DHD_FW_COREDUMP
-extern struct dhd_bus *g_dhd_bus;
+#ifdef BCMDHDX
+extern int dhdx_dongle_mem_dump(void);
+#else
extern int dhd_dongle_mem_dump(void);
+#endif /* BCMDHDX */
#endif /* DHD_FW_COREDUMP */
#ifdef IDLE_TX_FLOW_MGMT
@@ -262,12 +277,30 @@
#ifdef BCMPCIE
extern void dhd_bus_dump_console_buffer(struct dhd_bus *bus);
extern void dhd_bus_intr_count_dump(dhd_pub_t *dhdp);
+extern void dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp);
+extern bool dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp);
+extern int dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type);
+#ifndef BCMDHDX
+extern bool dhd_bus_check_driver_up(void);
+#else
+extern bool dhdx_bus_check_driver_up(void);
+#endif /* BCMDHDX */
+extern int dhd_bus_get_cto(dhd_pub_t *dhdp);
+extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val);
+extern int dhd_bus_get_linkdown(dhd_pub_t *dhdp);
#else
#define dhd_bus_dump_console_buffer(x)
static INLINE void dhd_bus_intr_count_dump(dhd_pub_t *dhdp) { UNUSED_PARAMETER(dhdp); }
+static INLINE void dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp) { }
+static INLINE bool dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp) { return 0; }
+static INLINE int dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type) { return 0; }
+static INLINE bool dhd_bus_check_driver_up(void) { return FALSE; }
+extern INLINE void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) { }
+extern INLINE int dhd_bus_get_linkdown(dhd_pub_t *dhdp) { return 0; }
+static INLINE int dhd_bus_get_cto(dhd_pub_t *dhdp) { return 0; }
#endif /* BCMPCIE */
-#if defined(BCMPCIE) && defined(DHD_LOG_DUMP)
+#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
void dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd, uint8 *ext_trap_data,
void *event_decode_data);
#endif // endif
@@ -294,34 +327,31 @@
int dhd_bus_perform_flr(struct dhd_bus *bus, bool force_fail);
extern bool dhd_bus_get_flr_force_fail(struct dhd_bus *bus);
+extern bool dhd_bus_aspm_enable_rc_ep(struct dhd_bus *bus, bool enable);
+extern void dhd_bus_l1ss_enable_rc_ep(struct dhd_bus *bus, bool enable);
+
+bool dhd_bus_is_multibp_capable(struct dhd_bus *bus);
+
#ifdef BCMPCIE
extern void dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp);
extern void dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd);
#endif /* BCMPCIE */
-#ifdef D2H_MINIDUMP
-#ifndef DHD_FW_COREDUMP
-/* Minidump depends on DHD_FW_COREDUMP to dump minidup
- * This dependency is intentional to avoid multiple work queue
- * to dump the SOCRAM, minidum ..etc.
- */
-#error "Minidump doesnot work as DHD_FW_COREDUMP is not defined"
-#endif /* DHD_FW_COREDUMP */
-#ifdef BCM_BUZZZ
-/*
- * In pciedev_shared_t buzz_dbg_ptr and device_trap_debug_buffer_len
- * are overloaded. So when BCM_BUZZZ is defined MINIDUMP should not be defined or
- * vice versa.
- */
-#error "Minidump doesnot work as BCM_BUZZZ is defined"
-#endif /* BCM_BUZZZ */
-extern bool dhd_bus_is_minidump_enabled(dhd_pub_t *dhdp);
-dhd_dma_buf_t* dhd_prot_get_minidump_buf(dhd_pub_t *dhd);
-#endif /* D2H_MINIDUMP */
-
extern bool dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus);
#ifdef DHD_SSSR_DUMP
-extern int dhd_bus_sssr_dump(dhd_pub_t *dhd);
+extern int dhd_bus_fis_trigger(dhd_pub_t *dhd);
+extern int dhd_bus_fis_dump(dhd_pub_t *dhd);
#endif /* DHD_SSSR_DUMP */
+
+#ifdef PCIE_FULL_DONGLE
+extern int dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val);
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_USE_BP_RESET
+extern int dhd_bus_perform_bp_reset(struct dhd_bus *bus);
+#endif /* DHD_USE_BP_RESET */
+
+extern void dhd_bwm_bt_quiesce(struct dhd_bus *bus);
+extern void dhd_bwm_bt_resume(struct dhd_bus *bus);
#endif /* _dhd_bus_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_buzzz.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_buzzz.h
index 3059b0f..3db6a7f 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_buzzz.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_buzzz.h
@@ -3,7 +3,9 @@
/*
* Broadcom logging system - Empty implementaiton
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_cdc.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_cdc.c
index 47086df..5c20413 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_cdc.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_cdc.c
@@ -1,7 +1,9 @@
/*
* DHD Protocol Module for CDC and BDC.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -71,6 +73,13 @@
unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
} dhd_prot_t;
+uint16
+dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
+{
+ /* SDIO does not have ioctl_trans_id yet, so return -1 */
+ return -1;
+}
+
static int
dhdcdc_msg(dhd_pub_t *dhd)
{
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_cfg80211.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_cfg80211.c
index 6bf00fb..08ccaf5 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_cfg80211.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_cfg80211.c
@@ -1,7 +1,9 @@
/*
* Linux cfg80211 driver - Dongle Host Driver (DHD) related
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -60,9 +62,8 @@
#ifndef OEM_ANDROID
static s32 wl_dongle_power(struct net_device *ndev, u32 power_mode);
#ifdef BCMSDIO /* glomming is a sdio specific feature */
-static s32 wl_dongle_glom(struct net_device *ndev, u32 glom, u32 dongle_align);
+static s32 wl_dongle_glom(struct net_device *ndev, s32 glom, u32 dongle_align);
#endif // endif
-static s32 wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout);
static s32 wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time, s32 scan_unassoc_time);
static s32 wl_dongle_offload(struct net_device *ndev, s32 arpoe, s32 arp_ol);
static s32 wl_pattern_atoh(s8 *src, s8 *dst);
@@ -161,9 +162,23 @@
int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg,
int ifidx, struct net_device* ndev, bool rtnl_lock_reqd)
{
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(cfg->pub, CAN_SLEEP(), __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
return dhd_remove_if(cfg->pub, ifidx, rtnl_lock_reqd);
}
+void wl_cfg80211_cleanup_if(struct net_device *net)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(cfg->pub, CAN_SLEEP(), __builtin_return_address(0));
+#else
+ BCM_REFERENCE(cfg);
+#endif /* DHD_PCIE_RUNTIMEPM */
+ dhd_cleanup_if(net);
+}
+
struct net_device * dhd_cfg80211_netdev_free(struct net_device *ndev)
{
struct bcm_cfg80211 *cfg;
@@ -231,7 +246,7 @@
#ifdef BCMSDIO
static s32
-wl_dongle_glom(struct net_device *ndev, u32 glom, u32 dongle_align)
+wl_dongle_glom(struct net_device *ndev, s32 glom, u32 dongle_align)
{
s32 err = 0;
@@ -242,18 +257,21 @@
goto dongle_glom_out;
}
/* disable glom option per default */
- err = wldev_iovar_setint(ndev, "bus:txglom", glom);
- if (unlikely(err)) {
- WL_ERR(("txglom error (%d)\n", err));
- goto dongle_glom_out;
+ if (glom != DEFAULT_GLOM_VALUE) {
+ err = wldev_iovar_setint(ndev, "bus:txglom", glom);
+ if (unlikely(err)) {
+ WL_ERR(("txglom error (%d)\n", err));
+ goto dongle_glom_out;
+ }
}
dongle_glom_out:
return err;
}
#endif /* BCMSDIO */
+#endif /* OEM_ANDROID */
-static s32
+s32
wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
{
s32 err = 0;
@@ -276,6 +294,7 @@
return err;
}
+#ifndef OEM_ANDROID
static s32
wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
s32 scan_unassoc_time)
@@ -463,6 +482,9 @@
#endif // endif
struct net_device *ndev;
s32 err = 0;
+#if !defined(OEM_ANDROID) && defined(BCMSDIO)
+ s32 glom = CUSTOM_GLOM_SETTING;
+#endif // endif
WL_TRACE(("In\n"));
if (dhd_dongle_up) {
@@ -484,7 +506,11 @@
goto default_conf_out;
}
#ifdef BCMSDIO
- err = wl_dongle_glom(ndev, 0, DHD_SDALIGN);
+ if (glom != DEFAULT_GLOM_VALUE) {
+ err = wl_dongle_glom(ndev, glom, DHD_SDALIGN);
+ } else {
+ err = wl_dongle_glom(ndev, DEFAULT_GLOM_VALUE, DHD_SDALIGN);
+ }
if (unlikely(err)) {
WL_ERR(("wl_dongle_glom failed\n"));
goto default_conf_out;
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_cfg80211.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_cfg80211.h
index 6722a1b..2c5d7b9 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_cfg80211.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_cfg80211.h
@@ -1,7 +1,9 @@
/*
* Linux cfg80211 driver - Dongle Host Driver (DHD) related
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -50,4 +52,5 @@
int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg,
struct wireless_dev *wdev, const struct bcm_nlmsg_hdr *nlioc, void *data);
+s32 wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout);
#endif /* __DHD_CFG80211__ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_common.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_common.c
index 0e0eaac..7ae36f7 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_common.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_common.c
@@ -1,7 +1,9 @@
/*
* Broadcom Dongle Host Driver (DHD), common DHD core.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -31,6 +33,7 @@
#include <epivers.h>
#include <bcmutils.h>
+#include <bcmstdlib_s.h>
#include <bcmendian.h>
#include <dngl_stats.h>
@@ -63,9 +66,9 @@
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
#endif // endif
-#ifdef PNO_SUPPORT
+#if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
#include <dhd_pno.h>
-#endif // endif
+#endif /* OEM_ANDROID && PNO_SUPPORT */
#ifdef RTT_SUPPORT
#include <dhd_rtt.h>
#endif // endif
@@ -86,6 +89,10 @@
#include <dhd_wlfc.h>
#endif // endif
+#if defined(DHD_POST_EAPOL_M1_AFTER_ROAM_EVT)
+#include <dhd_linux.h>
+#endif // endif
+
#ifdef DHD_L2_FILTER
#include <dhd_l2_filter.h>
#endif /* DHD_L2_FILTER */
@@ -105,9 +112,14 @@
#endif /* DHD_PKT_LOGGING */
#endif /* DHD_LOG_DUMP */
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+int log_print_threshold = 0;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL | DHD_EVENT_VAL
/* For CUSTOMER_HW4 do not enable DHD_IOVAR_MEM_VAL by default */
+#if !defined(BOARD_HIKEY)
| DHD_IOVAR_MEM_VAL
+#endif // endif
#ifndef OEM_ANDROID
| DHD_MSGTRACE_VAL
#endif /* OEM_ANDROID */
@@ -157,6 +169,14 @@
#define DHD_TPUT_MAX_TX_PKTS_BATCH 1000
+#ifdef EWP_EDL
+typedef struct msg_hdr_edl {
+ uint32 infobuf_ver;
+ info_buf_payload_hdr_t pyld_hdr;
+ msgtrace_hdr_t trace_hdr;
+} msg_hdr_edl_t;
+#endif /* EWP_EDL */
+
/* Last connection success/failure status */
uint32 dhd_conn_event;
uint32 dhd_conn_status;
@@ -291,6 +311,21 @@
IOV_LOG_DUMP,
#endif /* DHD_LOG_DUMP */
IOV_TPUT_TEST,
+ IOV_FIS_TRIGGER,
+ IOV_DEBUG_BUF_DEST_STAT,
+#ifdef DHD_DEBUG
+ IOV_INDUCE_ERROR,
+#endif /* DHD_DEBUG */
+#ifdef WL_IFACE_MGMT_CONF
+#ifdef WL_CFG80211
+#ifdef WL_NANP2P
+ IOV_CONC_DISC,
+#endif /* WL_NANP2P */
+#ifdef WL_IFACE_MGMT
+ IOV_IFACE_POLICY,
+#endif /* WL_IFACE_MGMT */
+#endif /* WL_CFG80211 */
+#endif /* WL_IFACE_MGMT_CONF */
IOV_LAST
};
@@ -378,6 +413,20 @@
#ifndef OEM_ANDROID
{"tput_test", IOV_TPUT_TEST, 0, 0, IOVT_BUFFER, sizeof(tput_test_t)},
#endif // endif
+ {"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_DEBUG
+ {"induce_error", IOV_INDUCE_ERROR, (0), 0, IOVT_UINT16, 0 },
+#endif /* DHD_DEBUG */
+#ifdef WL_IFACE_MGMT_CONF
+#ifdef WL_CFG80211
+#ifdef WL_NANP2P
+ {"conc_disc", IOV_CONC_DISC, (0), 0, IOVT_UINT16, 0 },
+#endif /* WL_NANP2P */
+#ifdef WL_IFACE_MGMT
+ {"if_policy", IOV_IFACE_POLICY, (0), 0, IOVT_BUFFER, sizeof(iface_mgmt_data_t)},
+#endif /* WL_IFACE_MGMT */
+#endif /* WL_CFG80211 */
+#endif /* WL_IFACE_MGMT_CONF */
{NULL, 0, 0, 0, 0, 0 }
};
@@ -389,47 +438,111 @@
bool ret = FALSE;
if (dhdp->dongle_reset) {
- DHD_ERROR(("%s: Dongle Reset occurred, cannot proceed\n",
+ DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n",
__FUNCTION__));
ret = TRUE;
}
if (dhdp->dongle_trap_occured) {
- DHD_ERROR(("%s: FW TRAP has occurred, cannot proceed\n",
+ DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n",
__FUNCTION__));
ret = TRUE;
#ifdef OEM_ANDROID
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
dhd_os_send_hang_message(dhdp);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
#endif /* OEM_ANDROID */
}
if (dhdp->iovar_timeout_occured) {
- DHD_ERROR(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
+ DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
__FUNCTION__));
ret = TRUE;
}
#ifdef PCIE_FULL_DONGLE
if (dhdp->d3ack_timeout_occured) {
- DHD_ERROR(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
+ DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+ if (dhdp->livelock_occured) {
+ DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+ if (dhdp->pktid_audit_failed) {
+ DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n",
__FUNCTION__));
ret = TRUE;
}
#endif /* PCIE_FULL_DONGLE */
-#ifdef DHD_PCIE_RUNTIMEPM
- if (ret) {
- DHD_DISABLE_RUNTIME_PM(dhdp);
+ if (dhdp->iface_op_failed) {
+ DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
}
-#endif /* DHD_PCIE_RUNTIMEPM */
+
+ if (dhdp->scan_timeout_occurred) {
+ DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+ if (dhdp->scan_busy_occurred) {
+ DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+ if (dhdp->axi_error) {
+ DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+ if (dhd_bus_get_linkdown(dhdp)) {
+ DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+ if (dhd_bus_get_cto(dhdp)) {
+ DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
return ret;
}
+void
+dhd_clear_bus_errors(dhd_pub_t *dhdp)
+{
+ if (!dhdp)
+ return;
+
+ dhdp->dongle_reset = FALSE;
+ dhdp->dongle_trap_occured = FALSE;
+ dhdp->iovar_timeout_occured = FALSE;
+#ifdef PCIE_FULL_DONGLE
+ dhdp->d3ack_timeout_occured = FALSE;
+ dhdp->livelock_occured = FALSE;
+ dhdp->pktid_audit_failed = FALSE;
+#endif // endif
+ dhdp->iface_op_failed = FALSE;
+ dhdp->scan_timeout_occurred = FALSE;
+ dhdp->scan_busy_occurred = FALSE;
+}
+
#ifdef DHD_SSSR_DUMP
+
+/* This can be overwritten by module parameter defined in dhd_linux.c */
+uint support_sssr_dump = TRUE;
+
int
dhd_sssr_mempool_init(dhd_pub_t *dhd)
{
@@ -451,6 +564,68 @@
}
}
+void
+dhd_dump_sssr_reg_info(sssr_reg_info_v1_t *sssr_reg_info)
+{
+#ifdef DHD_PCIE_REG_ACCESS
+ int i, j;
+ DHD_ERROR(("************** SSSR REG INFO start ****************\n"));
+ DHD_ERROR(("pmu_regs\n"));
+ DHD_ERROR(("pmuintmask0=0x%x pmuintmask1=0x%x resreqtimer=0x%x "
+ "macresreqtimer=0x%x macresreqtimer1=0x%x\n",
+ sssr_reg_info->pmu_regs.base_regs.pmuintmask0,
+ sssr_reg_info->pmu_regs.base_regs.pmuintmask1,
+ sssr_reg_info->pmu_regs.base_regs.resreqtimer,
+ sssr_reg_info->pmu_regs.base_regs.macresreqtimer,
+ sssr_reg_info->pmu_regs.base_regs.macresreqtimer1));
+ DHD_ERROR(("chipcommon_regs\n"));
+ DHD_ERROR(("intmask=0x%x powerctrl=0x%x clockcontrolstatus=0x%x powerctrl_mask=0x%x\n",
+ sssr_reg_info->chipcommon_regs.base_regs.intmask,
+ sssr_reg_info->chipcommon_regs.base_regs.powerctrl,
+ sssr_reg_info->chipcommon_regs.base_regs.clockcontrolstatus,
+ sssr_reg_info->chipcommon_regs.base_regs.powerctrl_mask));
+ DHD_ERROR(("arm_regs\n"));
+ DHD_ERROR(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x"
+ " resetctrl=0x%x itopoobb=0x%x\n",
+ sssr_reg_info->arm_regs.base_regs.clockcontrolstatus,
+ sssr_reg_info->arm_regs.base_regs.clockcontrolstatus_val,
+ sssr_reg_info->arm_regs.wrapper_regs.resetctrl,
+ sssr_reg_info->arm_regs.wrapper_regs.itopoobb));
+ DHD_ERROR(("pcie_regs\n"));
+ DHD_ERROR(("ltrstate=0x%x clockcontrolstatus=0x%x "
+ "clockcontrolstatus_val=0x%x itopoobb=0x%x\n",
+ sssr_reg_info->pcie_regs.base_regs.ltrstate,
+ sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus,
+ sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus_val,
+ sssr_reg_info->pcie_regs.wrapper_regs.itopoobb));
+ DHD_ERROR(("vasip_regs\n"));
+ DHD_ERROR(("ioctrl=0x%x vasip_sr_addr=0x%x vasip_sr_size=0x%x\n",
+ sssr_reg_info->vasip_regs.wrapper_regs.ioctrl,
+ sssr_reg_info->vasip_regs.vasip_sr_addr,
+ sssr_reg_info->vasip_regs.vasip_sr_size));
+
+ for (i = 0; i < MAX_NUM_D11CORES; i++) {
+ DHD_ERROR(("mac_regs core[%d]\n", i));
+ DHD_ERROR(("xmtaddress=0x%x xmtdata=0x%x clockcontrolstatus=0x%x "
+ "clockcontrolstatus_val=0x%x\n",
+ sssr_reg_info->mac_regs[i].base_regs.xmtaddress,
+ sssr_reg_info->mac_regs[i].base_regs.xmtdata,
+ sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus,
+ sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus_val));
+ DHD_ERROR(("resetctrl=0x%x itopoobb=0x%x ioctrl=0x%x\n",
+ sssr_reg_info->mac_regs[i].wrapper_regs.resetctrl,
+ sssr_reg_info->mac_regs[i].wrapper_regs.itopoobb,
+ sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl));
+ for (j = 0; j < SSSR_D11_RESET_SEQ_STEPS; j++) {
+ DHD_ERROR(("ioctrl_resetseq_val[%d] 0x%x\n", j,
+ sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl_resetseq_val[j]));
+ }
+ DHD_ERROR(("sr_size=0x%x\n", sssr_reg_info->mac_regs[i].sr_size));
+ }
+ DHD_ERROR(("************** SSSR REG INFO end ****************\n"));
+#endif /* DHD_PCIE_REG_ACCESS */
+}
+
int
dhd_get_sssr_reg_info(dhd_pub_t *dhd)
{
@@ -465,6 +640,7 @@
return BCME_ERROR;
}
+ dhd_dump_sssr_reg_info(&dhd->sssr_reg_info);
return BCME_OK;
}
@@ -494,6 +670,11 @@
dhd->sssr_inited = FALSE;
+ if (!support_sssr_dump) {
+ DHD_ERROR(("%s: sssr dump not inited as instructed by mod param\n", __FUNCTION__));
+ return BCME_OK;
+ }
+
/* check if sssr mempool is allocated */
if (dhd->sssr_mempool == NULL) {
DHD_ERROR(("%s: sssr_mempool is not allocated\n",
@@ -587,6 +768,52 @@
return;
}
+void
+dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path)
+{
+ bool print_info = FALSE;
+ int dump_mode;
+
+ if (!dhd || !path) {
+ DHD_ERROR(("%s: dhd or memdump_path is NULL\n",
+ __FUNCTION__));
+ return;
+ }
+
+ if (!dhd->sssr_dump_collected) {
+ /* SSSR dump is not collected */
+ return;
+ }
+
+ dump_mode = dhd->sssr_dump_mode;
+
+ if (bcmstrstr(path, "core_0_before")) {
+ if (dhd->sssr_d11_outofreset[0] &&
+ dump_mode == SSSR_DUMP_MODE_SSSR) {
+ print_info = TRUE;
+ }
+ } else if (bcmstrstr(path, "core_0_after")) {
+ if (dhd->sssr_d11_outofreset[0]) {
+ print_info = TRUE;
+ }
+ } else if (bcmstrstr(path, "core_1_before")) {
+ if (dhd->sssr_d11_outofreset[1] &&
+ dump_mode == SSSR_DUMP_MODE_SSSR) {
+ print_info = TRUE;
+ }
+ } else if (bcmstrstr(path, "core_1_after")) {
+ if (dhd->sssr_d11_outofreset[1]) {
+ print_info = TRUE;
+ }
+ } else {
+ print_info = TRUE;
+ }
+
+ if (print_info) {
+ DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
+ path, FILE_NAME_HAL_TAG));
+ }
+}
#endif /* DHD_SSSR_DUMP */
#ifdef DHD_FW_COREDUMP
@@ -628,8 +855,6 @@
int
dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
{
- char eabuf[ETHER_ADDR_STR_LEN];
-
struct bcmstrbuf b;
struct bcmstrbuf *strbuf = &b;
@@ -647,7 +872,7 @@
bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG"\n",
- dhdp->iswl, dhdp->drv_version, MAC2STRDBG(bcm_ether_ntoa(&dhdp->mac, eabuf)));
+ dhdp->iswl, dhdp->drv_version, MAC2STRDBG(&dhdp->mac));
bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
bcm_bprintf(strbuf, "dongle stats:\n");
@@ -672,6 +897,8 @@
dhdp->rx_readahead_cnt, dhdp->tx_realloc);
bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
dhdp->tx_pktgetfail, dhdp->rx_pktgetfail);
+ bcm_bprintf(strbuf, "tx_big_packets %lu\n",
+ dhdp->tx_big_packets);
bcm_bprintf(strbuf, "\n");
#ifdef DMAMAP_STATS
/* Add DMA MAP info */
@@ -693,7 +920,7 @@
dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz +
dhdp->dma_stats.tsbuf_rx_sz));
#endif /* DMAMAP_STATS */
-
+ bcm_bprintf(strbuf, "dhd_induce_error : %u\n", dhdp->dhd_induce_error);
/* Add any prot info */
dhd_prot_dump(dhdp, strbuf);
bcm_bprintf(strbuf, "\n");
@@ -888,6 +1115,14 @@
tmp[min_len] = '\0';
}
#endif /* DHD_LOG_DUMP */
+
+#ifdef DHD_DISCONNECT_TRACE
+ if ((WLC_DISASSOC == ioc->cmd) || (WLC_DOWN == ioc->cmd) ||
+ (WLC_DISASSOC_MYAP == ioc->cmd)) {
+ DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd));
+ }
+#endif /* HW_DISCONNECT_TRACE */
+
/* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */
if (ioc->set == TRUE) {
char *pars = (char *)buf; // points at user buffer
@@ -949,7 +1184,9 @@
}
}
#endif /* DUMP_IOCTL_IOV_LIST */
+
ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
+
#ifdef DUMP_IOCTL_IOV_LIST
if (ret == -ETIMEDOUT) {
DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n",
@@ -984,19 +1221,27 @@
}
DHD_IOVAR_MEM((
"%s: cmd: %d, msg: %s val: 0x%x,"
- " len: %d, set: %d\n",
+ " len: %d, set: %d, txn-id: %d\n",
ioc->cmd == WLC_GET_VAR ?
"WLC_GET_VAR" : "WLC_SET_VAR",
- ioc->cmd, msg, lval, ioc->len, ioc->set));
+ ioc->cmd, msg, lval, ioc->len, ioc->set,
+ dhd_prot_get_ioctl_trans_id(dhd_pub)));
} else {
- DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d\n",
+ DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n",
ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR",
- ioc->cmd, ioc->len, ioc->set));
+ ioc->cmd, ioc->len, ioc->set,
+ dhd_prot_get_ioctl_trans_id(dhd_pub)));
}
} else {
slen = ioc->len;
- if (buf != NULL) {
- val = *(int*)buf;
+ if (buf != NULL && slen != 0) {
+ if (slen >= 4) {
+ val = *(int*)buf;
+ } else if (slen >= 2) {
+ val = *(short*)buf;
+ } else {
+ val = *(char*)buf;
+ }
/* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */
if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION)
DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, "
@@ -1016,7 +1261,6 @@
DHD_ERROR(("%s: 'resumed on timeout' error is "
"occurred before the interface does not"
" bring up\n", __FUNCTION__));
- dhd_pub->busstate = DHD_BUS_DOWN;
}
#endif /* defined(OEM_ANDROID) */
@@ -1428,6 +1672,7 @@
dhd_pub->rx_readahead_cnt = 0;
dhd_pub->tx_realloc = 0;
dhd_pub->wd_dpc_sched = 0;
+ dhd_pub->tx_big_packets = 0;
memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
dhd_bus_clearcounts(dhd_pub);
#ifdef PROP_TXSTATUS
@@ -1882,16 +2127,21 @@
#ifdef SHOW_LOGTRACE
case IOV_GVAL(IOV_DUMP_TRACE_LOG): {
- trace_buf_info_t *trace_buf_info;
+ trace_buf_info_t *trace_buf_info = (trace_buf_info_t *)arg;
+ dhd_dbg_ring_t *dbg_verbose_ring = NULL;
- trace_buf_info = (trace_buf_info_t *)MALLOC(dhd_pub->osh,
- sizeof(trace_buf_info_t));
+ dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhd_pub, FW_VERBOSE_RING_ID);
+ if (dbg_verbose_ring == NULL) {
+ DHD_ERROR(("dbg_verbose_ring is NULL\n"));
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
if (trace_buf_info != NULL) {
- dhd_get_read_buf_ptr(dhd_pub, trace_buf_info);
- memcpy((void*)arg, (void*)trace_buf_info, sizeof(trace_buf_info_t));
- MFREE(dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t));
+ bzero(trace_buf_info, sizeof(trace_buf_info_t));
+ dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring, trace_buf_info);
} else {
- DHD_ERROR(("Memory allocation Failed\n"));
+ DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__));
bcmerror = BCME_NOMEM;
}
break;
@@ -1989,6 +2239,61 @@
break;
}
#endif /* OEM_ANDROID */
+ case IOV_GVAL(IOV_DEBUG_BUF_DEST_STAT):
+ {
+ if (dhd_pub->debug_buf_dest_support) {
+ debug_buf_dest_stat_t *debug_buf_dest_stat =
+ (debug_buf_dest_stat_t *)arg;
+ memcpy(debug_buf_dest_stat, dhd_pub->debug_buf_dest_stat,
+ sizeof(dhd_pub->debug_buf_dest_stat));
+ } else {
+ bcmerror = BCME_DISABLED;
+ }
+ break;
+ }
+
+#ifdef DHD_DEBUG
+ case IOV_SVAL(IOV_INDUCE_ERROR): {
+ if (int_val >= DHD_INDUCE_ERROR_MAX) {
+ DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__, (uint16)int_val));
+ } else {
+ dhd_pub->dhd_induce_error = (uint16)int_val;
+ }
+ break;
+ }
+#endif /* DHD_DEBUG */
+
+#ifdef WL_IFACE_MGMT_CONF
+#ifdef WL_CFG80211
+#ifdef WL_NANP2P
+ case IOV_GVAL(IOV_CONC_DISC): {
+ int_val = wl_cfg80211_get_iface_conc_disc(
+ dhd_linux_get_primary_netdev(dhd_pub));
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+ case IOV_SVAL(IOV_CONC_DISC): {
+ bcmerror = wl_cfg80211_set_iface_conc_disc(
+ dhd_linux_get_primary_netdev(dhd_pub), (uint8)int_val);
+ break;
+ }
+#endif /* WL_NANP2P */
+#ifdef WL_IFACE_MGMT
+ case IOV_GVAL(IOV_IFACE_POLICY): {
+ int_val = wl_cfg80211_get_iface_policy(
+ dhd_linux_get_primary_netdev(dhd_pub));
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+ case IOV_SVAL(IOV_IFACE_POLICY): {
+ bcmerror = wl_cfg80211_set_iface_policy(
+ dhd_linux_get_primary_netdev(dhd_pub),
+ arg, len);
+ break;
+ }
+#endif /* WL_IFACE_MGMT */
+#endif /* WL_CFG80211 */
+#endif /* WL_IFACE_MGMT_CONF */
default:
bcmerror = BCME_UNSUPPORTED;
break;
@@ -2251,7 +2556,7 @@
* and should not return error for IOCTLs fired before FW
* Download is done
*/
- if (dhd_fw_download_status(dhd_pub)) {
+ if (dhd_fw_download_status(dhd_pub) == FW_DOWNLOAD_DONE) {
DHD_ERROR(("%s: returning as busstate=%d\n",
__FUNCTION__, dhd_pub->busstate));
DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
@@ -2295,7 +2600,13 @@
* to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
* not used in Production platforms but only used in FC19 setups.
*/
- if (!bcmstricmp((char *)buf, "devreset")) {
+ if (!bcmstricmp((char *)buf, "devreset") ||
+#ifdef BCMPCIE
+ (dhd_bus_is_multibp_capable(dhd_pub->bus) &&
+ !bcmstricmp((char *)buf, "dwnldstate")) ||
+#endif /* BCMPCIE */
+ FALSE)
+ {
DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
}
DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
@@ -2422,8 +2733,8 @@
} else if (status == WLC_E_STATUS_TIMEOUT) {
DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
} else if (status == WLC_E_STATUS_FAIL) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
- event_name, eabuf, (int)reason));
+ DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n",
+ event_name, eabuf, (int)status, (int)reason));
} else {
DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
event_name, eabuf, (int)status));
@@ -2442,6 +2753,8 @@
auth_str = "Open System";
else if (auth_type == DOT11_SHARED_KEY)
auth_str = "Shared Key";
+ else if (auth_type == DOT11_SAE)
+ auth_str = "SAE";
else {
snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
auth_str = err_msg;
@@ -2456,8 +2769,14 @@
DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
event_name, eabuf, auth_str));
} else if (status == WLC_E_STATUS_FAIL) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
- event_name, eabuf, auth_str, (int)reason));
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n",
+ event_name, eabuf, auth_str, (int)status, (int)reason));
+ } else if (status == WLC_E_STATUS_NO_ACK) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n",
+ event_name, eabuf, auth_str));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n",
+ event_name, eabuf, auth_str, (int)status, (int)reason));
}
BCM_REFERENCE(auth_str);
@@ -2470,7 +2789,7 @@
DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
} else {
if (status == WLC_E_STATUS_FAIL) {
- DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
+ DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
} else if (status == WLC_E_STATUS_NO_NETWORKS) {
DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
} else {
@@ -2523,6 +2842,11 @@
case WLC_E_SCAN_COMPLETE:
DHD_EVENT(("MACEVENT: %s\n", event_name));
break;
+
+ case WLC_E_IND_DOS_STATUS:
+ DHD_EVENT(("MACEVENT: %s\n", event_name));
+ break;
+
case WLC_E_RSSI_LQM:
case WLC_E_PFN_NET_FOUND:
case WLC_E_PFN_NET_LOST:
@@ -2579,7 +2903,36 @@
case WLC_E_CCA_CHAN_QUAL:
if (datalen) {
cca_chan_qual_event_t *cca_event = (cca_chan_qual_event_t *)event_data;
- if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) {
+ if (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA) {
+ cca_only_chan_qual_event_t *cca_only_event =
+ (cca_only_chan_qual_event_t *)cca_event;
+ BCM_REFERENCE(cca_only_event);
+ DHD_EVENT((
+ "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
+ " channel 0x%02x\n",
+ event_name, event_type, eabuf, (int)status,
+ (int)reason, (int)auth_type, cca_event->chanspec));
+ DHD_EVENT((
+ "\tTOTAL (dur %dms me %dms notme %dms interf %dms"
+ " ts 0x%08x)\n",
+ cca_only_event->cca_busy_ext.duration,
+ cca_only_event->cca_busy_ext.congest_ibss,
+ cca_only_event->cca_busy_ext.congest_obss,
+ cca_only_event->cca_busy_ext.interference,
+ cca_only_event->cca_busy_ext.timestamp));
+ DHD_EVENT((
+ "\t !PM (dur %dms me %dms notme %dms interf %dms)\n",
+ cca_only_event->cca_busy_nopm.duration,
+ cca_only_event->cca_busy_nopm.congest_ibss,
+ cca_only_event->cca_busy_nopm.congest_obss,
+ cca_only_event->cca_busy_nopm.interference));
+ DHD_EVENT((
+ "\t PM (dur %dms me %dms notme %dms interf %dms)\n",
+ cca_only_event->cca_busy_pm.duration,
+ cca_only_event->cca_busy_pm.congest_ibss,
+ cca_only_event->cca_busy_pm.congest_obss,
+ cca_only_event->cca_busy_pm.interference));
+ } else if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) {
DHD_EVENT((
"MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
" channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms"
@@ -2636,6 +2989,7 @@
DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d sync-id %u\n",
event_name, event_type, eabuf, (int)status, dtoh16(escan_result->sync_id)));
#endif // endif
+
break;
}
case WLC_E_IF:
@@ -2690,7 +3044,7 @@
break;
case WLC_E_NAN_CRITICAL:
{
- DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name, reason));
+ DHD_LOG_MEM(("MACEVENT: %s, type:%d\n", event_name, reason));
break;
}
case WLC_E_NAN_NON_CRITICAL:
@@ -2698,6 +3052,13 @@
DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason));
break;
}
+ case WLC_E_PROXD:
+ {
+ wl_proxd_event_t *proxd = (wl_proxd_event_t*)event_data;
+ DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n",
+ event_name, proxd->type, reason));
+ break;
+ }
case WLC_E_RPSNOA:
{
rpsnoa_stats_t *stat = event_data;
@@ -2708,6 +3069,39 @@
}
break;
}
+ case WLC_E_PHY_CAL:
+ {
+ DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason));
+ break;
+ }
+ case WLC_E_WA_LQM:
+ {
+ wl_event_wa_lqm_t *event_wa_lqm = (wl_event_wa_lqm_t *)event_data;
+ bcm_xtlv_t *subevent;
+ wl_event_wa_lqm_basic_t *elqm_basic;
+
+ if ((event_wa_lqm->ver != WL_EVENT_WA_LQM_VER) ||
+ (event_wa_lqm->len < sizeof(wl_event_wa_lqm_t) + BCM_XTLV_HDR_SIZE)) {
+ DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n",
+ event_name, event_wa_lqm->ver, event_wa_lqm->len));
+ break;
+ }
+
+ subevent = (bcm_xtlv_t *)event_wa_lqm->subevent;
+ if ((subevent->id != WL_EVENT_WA_LQM_BASIC) ||
+ (subevent->len < sizeof(wl_event_wa_lqm_basic_t))) {
+ DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n",
+ event_name, subevent->id, subevent->len));
+ break;
+ }
+
+ elqm_basic = (wl_event_wa_lqm_basic_t *)subevent->data;
+ BCM_REFERENCE(elqm_basic);
+ DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n",
+ event_name, elqm_basic->rssi, elqm_basic->snr,
+ elqm_basic->tx_rate, elqm_basic->rx_rate));
+ break;
+ }
default:
DHD_INFO(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
event_name, event_type, eabuf, (int)status, (int)reason,
@@ -2730,7 +3124,7 @@
#ifdef DNGL_EVENT_SUPPORT
/* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */
- int
+int
dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
{
bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata;
@@ -2739,6 +3133,49 @@
return BCME_OK;
}
+#ifdef PARSE_DONGLE_HOST_EVENT
+typedef struct hck_id_to_str_s {
+ uint32 id;
+ char *name;
+} hck_id_to_str_t;
+
+hck_id_to_str_t hck_sw_id_to_str[] = {
+ {WL_HC_DD_PCIE, "WL_HC_DD_PCIE"},
+ {WL_HC_DD_RX_DMA_STALL, "WL_HC_DD_RX_DMA_STALL"},
+ {WL_HC_DD_RX_STALL, "WL_HC_DD_RX_STALL"},
+ {WL_HC_DD_TX_STALL, "WL_HC_DD_TX_STALL"},
+ {WL_HC_DD_SCAN_STALL, "WL_HC_DD_SCAN_STALL"},
+ {WL_HC_DD_PHY, "WL_HC_DD_PHY"},
+ {WL_HC_DD_REINIT, "WL_HC_DD_REINIT"},
+ {WL_HC_DD_TXQ_STALL, "WL_HC_DD_TXQ_STALL"},
+ {0, NULL}
+};
+
+hck_id_to_str_t hck_pcie_module_to_str[] = {
+ {HEALTH_CHECK_PCIEDEV_INDUCED_IND, "PCIEDEV_INDUCED_IND"},
+ {HEALTH_CHECK_PCIEDEV_H2D_DMA_IND, "PCIEDEV_H2D_DMA_IND"},
+ {HEALTH_CHECK_PCIEDEV_D2H_DMA_IND, "PCIEDEV_D2H_DMA_IND"},
+ {HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND, "PCIEDEV_IOCTL_STALL_IND"},
+ {HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND, "PCIEDEV_D3ACK_STALL_IND"},
+ {HEALTH_CHECK_PCIEDEV_NODS_IND, "PCIEDEV_NODS_IND"},
+ {HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND, "PCIEDEV_LINKSPEED_FALLBACK_IND"},
+ {HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND, "PCIEDEV_DSACK_STALL_IND"},
+ {0, NULL}
+};
+
+static void
+dhd_print_dongle_hck_id(uint32 id, hck_id_to_str_t *hck)
+{
+ while (hck->name != NULL) {
+ if (hck->id == id) {
+ DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck->name));
+ return;
+ }
+ hck++;
+ }
+}
+#endif /* PARSE_DONGLE_HOST_EVENT */
+
void
dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
@@ -2785,15 +3222,21 @@
DHD_EVENT(("Line:%d ", *(uint32 *)p));
p += sizeof(uint32);
DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p));
+#ifdef PARSE_DONGLE_HOST_EVENT
+ DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n"));
+#endif /* PARSE_DONGLE_HOST_EVENT */
break;
}
case SOCRAM_IND_TAG_HEALTH_CHECK:
{
bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p;
- DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d\n",
- ltoh32(dngl_hc->top_module_tag), ltoh32(dngl_hc->top_module_len)));
+ DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n",
+ ltoh32(dngl_hc->top_module_tag),
+ ltoh32(dngl_hc->top_module_len),
+ datalen));
if (DHD_EVENT_ON()) {
- prhex("HEALTHCHECK", p, ltoh32(dngl_hc->top_module_len));
+ prhex("HEALTHCHECK", p, MIN(ltoh32(dngl_hc->top_module_len)
+ + BCM_XTLV_HDR_SIZE, datalen));
}
#ifdef DHD_LOG_DUMP
memset(dhdp->health_chk_event_data, 0, HEALTH_CHK_BUF_SIZE);
@@ -2821,6 +3264,11 @@
ltoh32(pcie_hc->pcie_err_ind_type),
ltoh32(pcie_hc->pcie_flag),
ltoh32(pcie_hc->pcie_control_reg)));
+#ifdef PARSE_DONGLE_HOST_EVENT
+ dhd_print_dongle_hck_id(
+ ltoh32(pcie_hc->pcie_err_ind_type),
+ hck_pcie_module_to_str);
+#endif /* PARSE_DONGLE_HOST_EVENT */
break;
}
#ifdef HCHK_COMMON_SW_EVENT
@@ -2836,9 +3284,14 @@
return;
}
BCM_REFERENCE(wl_hc);
- DHD_EVENT(("WL SW HC type %d len %d",
- ltoh16(wl_hc->id), ltoh16(wl_hc->len)));
+ DHD_EVENT(("WL SW HC type %d len %d\n",
+ ltoh16(wl_hc->id), ltoh16(wl_hc->len)));
+#ifdef PARSE_DONGLE_HOST_EVENT
+ dhd_print_dongle_hck_id(ltoh16(wl_hc->id),
+ hck_sw_id_to_str);
+#endif /* PARSE_DONGLE_HOST_EVENT */
break;
+
}
#endif /* HCHK_COMMON_SW_EVENT */
default:
@@ -2852,7 +3305,7 @@
break;
}
default:
- DHD_ERROR(("%s:Unknown TAG", __FUNCTION__));
+ DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__));
if (p && DHD_EVENT_ON()) {
prhex("SOCRAMIND", p, taglen);
}
@@ -2861,7 +3314,7 @@
break;
}
default:
- DHD_ERROR(("%s:Unknown DNGL Event Type:%d", __FUNCTION__, type));
+ DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__, type));
if (p && DHD_EVENT_ON()) {
prhex("SOCRAMIND", p, datalen);
}
@@ -2950,6 +3403,9 @@
uint evlen;
int ret;
uint16 usr_subtype;
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ dhd_if_t *ifp = NULL;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
ret = wl_host_event_get_data(pktdata, pktlen, &evu);
if (ret != BCME_OK) {
@@ -3048,7 +3504,7 @@
#endif // endif
#ifdef PROP_TXSTATUS
{
- uint8* ea = pvt_data->eth.ether_dhost;
+ uint8* ea = &event->addr.octet[0];
WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG"]\n"
ifevent->ifidx,
((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
@@ -3122,7 +3578,9 @@
#endif /* #if defined(OEM_ANDROID) && defined(PNO_SUPPORT) */
#if defined(RTT_SUPPORT)
case WLC_E_PROXD:
+#ifndef WL_CFG80211
dhd_rtt_event_handler(dhd_pub, event, (void *)event_data);
+#endif /* WL_CFG80211 */
break;
#endif /* RTT_SUPPORT */
/* These are what external supplicant/authenticator wants */
@@ -3147,6 +3605,46 @@
dhd_natoe_ct_event(dhd_pub, event_data);
#endif /* WL_NATOE */
break;
+#ifdef WL_NAN
+ case WLC_E_SLOTTED_BSS_PEER_OP:
+ DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: "
+ "" MACDBG ", status = %d\n",
+ __FUNCTION__, MAC2STRDBG(event->addr.octet), status));
+ if (status == WLC_E_STATUS_SLOTTED_PEER_ADD) {
+ dhd_findadd_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
+ event->ifname), &event->addr.octet);
+ } else if (status == WLC_E_STATUS_SLOTTED_PEER_DEL) {
+ uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
+ BCM_REFERENCE(ifindex);
+ dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
+ event->ifname), &event->addr.octet);
+#ifdef PCIE_FULL_DONGLE
+ dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
+ (char *)&event->addr.octet[0]);
+#endif // endif
+ } else {
+ DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n",
+ __FUNCTION__, status));
+ }
+ break;
+#endif /* WL_NAN */
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ case WLC_E_REASSOC:
+ ifp = dhd_get_ifp(dhd_pub, event->ifidx);
+
+ if (!ifp)
+ break;
+
+ /* Consider STA role only since roam is disabled on P2P GC.
+ * Drop EAPOL M1 frame only if roam is done to same BSS.
+ */
+ if ((status == WLC_E_STATUS_SUCCESS) &&
+ IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
+ wl_cfg80211_is_event_from_connected_bssid(ifp->net, event, event->ifidx)) {
+ ifp->recv_reassoc_evt = TRUE;
+ }
+ break;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
case WLC_E_LINK:
#ifdef PCIE_FULL_DONGLE
if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
@@ -3201,8 +3699,15 @@
}
}
#endif /* PCIE_FULL_DONGLE */
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
/* fall through */
-
+ ifp = dhd_get_ifp(dhd_pub, event->ifidx);
+ if (ifp) {
+ ifp->recv_reassoc_evt = FALSE;
+ ifp->post_roam_evt = FALSE;
+ }
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+ /* fall through */
default:
*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
/* push up to external supp/auth */
@@ -3223,6 +3728,13 @@
memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ);
#endif // endif
+#ifdef DHD_STATUS_LOGGING
+ if (dhd_pub->statlog) {
+ dhd_statlog_process_event(dhd_pub, type, *ifidx,
+ status, reason, flags);
+ }
+#endif /* DHD_STATUS_LOGGING */
+
#ifdef SHOW_EVENTS
if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
wl_show_host_event(dhd_pub, event,
@@ -3294,6 +3806,35 @@
}
return i;
}
+
+int
+pattern_atoh_len(char *src, char *dst, int len)
+{
+ int i;
+ if (strncmp(src, "0x", HD_PREFIX_SIZE) != 0 &&
+ strncmp(src, "0X", HD_PREFIX_SIZE) != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+ return -1;
+ }
+ src = src + HD_PREFIX_SIZE; /* Skip past 0x */
+ if (strlen(src) % HD_BYTE_SIZE != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+ return -1;
+ }
+ for (i = 0; *src != '\0'; i++) {
+ char num[HD_BYTE_SIZE + 1];
+
+ if (i > len - 1) {
+ DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i, len));
+ return -1;
+ }
+ bcm_strncpy_s(num, sizeof(num), src, HD_BYTE_SIZE);
+ num[HD_BYTE_SIZE] = '\0';
+ dst[i] = (uint8)strtoul(num, NULL, 16);
+ src += HD_BYTE_SIZE;
+ }
+ return i;
+}
#endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */
#ifdef PKT_FILTER_SUPPORT
@@ -3418,16 +3959,16 @@
void
dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
{
- const char *str;
- wl_pkt_filter_t pkt_filter;
- wl_pkt_filter_t *pkt_filterp;
- int buf_len;
- int str_len;
- int rc;
+ const char *str;
+ wl_pkt_filter_t pkt_filter;
+ wl_pkt_filter_t *pkt_filterp;
+ int buf_len;
+ int str_len;
+ int rc = -1;
uint32 mask_size;
uint32 pattern_size;
- char *argv[16], * buf = 0;
- int i = 0;
+ char *argv[MAXPKT_ARG] = {0}, * buf = 0;
+ int i = 0;
char *arg_save = 0, *arg_org = 0;
if (!arg)
@@ -3454,8 +3995,13 @@
}
argv[i] = bcmstrtok(&arg_save, " ", 0);
- while (argv[i++])
+ while (argv[i++]) {
+ if (i >= MAXPKT_ARG) {
+ DHD_ERROR(("Invalid args provided\n"));
+ goto fail;
+ }
argv[i] = bcmstrtok(&arg_save, " ", 0);
+ }
i = 0;
if (argv[i] == NULL) {
@@ -3545,7 +4091,7 @@
WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
} else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) {
int list_cnt = 0;
- char *endptr = '\0';
+ char *endptr = NULL;
wl_pkt_filter_pattern_listel_t *pf_el =
(wl_pkt_filter_pattern_listel_t *)&pkt_filterp->u.patlist.patterns[0];
@@ -3566,16 +4112,18 @@
*endptr = '\0';
rc = wl_pkt_filter_base_parse(argv[i]);
if (rc == -1) {
- printf("Invalid base %s\n", argv[i]);
+ printf("Invalid base %s\n", argv[i]);
goto fail;
}
*endptr = ':';
- } else {
- printf("Invalid [base:]offset format: %s\n", argv[i]);
- goto fail;
}
}
+ if (endptr == NULL) {
+ printf("Invalid [base:]offset format: %s\n", argv[i]);
+ goto fail;
+ }
+
if (*endptr == ':') {
pf_el->base_offs = htod16(rc);
rc = strtoul(endptr + 1, &endptr, 0);
@@ -3725,7 +4273,15 @@
dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
{
int retcode;
-
+#ifdef WL_CFG80211
+ /* Do not enable arp offload in case of non-STA interfaces active */
+ if (arp_enable &&
+ (wl_cfg80211_check_vif_in_use(dhd_linux_get_primary_netdev(dhd)))) {
+ DHD_TRACE(("%s: Virtual interfaces active, ignore arp offload request \n",
+ __FUNCTION__));
+ return;
+ }
+#endif /* WL_CFG80211 */
retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe",
arp_enable, WLC_SET_VAR, TRUE, 0);
@@ -3904,6 +4460,50 @@
return retcode;
}
+
+#ifdef REVERSE_AIFSN
+int
+check_reverse_aifsn_condition(dhd_pub_t *dhd, struct net_device *ndev)
+{
+ int iov_len = 0;
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ edcf_acparam_t *ac_params = NULL;
+ int retcode;
+ u8 aci, aifsn;
+ int ifidx;
+
+ if (dhd == NULL)
+ return -1;
+
+ ifidx = dhd_net2idx(dhd->info, ndev);
+ if (ifidx == DHD_BAD_IF)
+ return -1;
+
+ dhd->aifsn_reverse = FALSE;
+
+ strcpy(iovbuf, "wme_ac_sta");
+ iov_len = sizeof(iovbuf);
+
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, iov_len, FALSE, ifidx);
+ if (retcode) {
+ DHD_ERROR(("%s: could not get wme_ac_sta params(%d)\n\n",
+ __FUNCTION__, retcode));
+ return -1;
+ }
+
+ ac_params = (edcf_acparam_t *)iovbuf;
+ for (aci = 0; aci < AC_COUNT; aci++) {
+ aifsn = ac_params[aci].ACI & EDCF_AIFSN_MASK;
+ if (aci == AC_VI && aifsn == 10) {
+ DHD_ERROR(("[%s] Reverse AIFSN for AC_VI:10 \n", __FUNCTION__));
+ dhd->aifsn_reverse = TRUE;
+ break;
+ }
+ }
+ return 0;
+}
+#endif /* REVERSE_AIFSN */
+
/*
* Neighbor Discover Offload: enable NDO feature
* Called by ipv6 event handler when interface goes down
@@ -4386,6 +4986,12 @@
int ret = -1;
int allowed_skip_dtim_cnt = 0;
+ if (dhd->disable_dtim_in_suspend) {
+ DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
+ bcn_li_dtim = 0;
+ return bcn_li_dtim;
+ }
+
/* Check if associated */
if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
@@ -4462,6 +5068,12 @@
int ap_beacon = 0;
int allowed_skip_dtim_cnt = 0;
+ if (dhd->disable_dtim_in_suspend) {
+ DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
+ bcn_li_dtim = 0;
+ goto exit;
+ }
+
/* Check if associated */
if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
@@ -4528,6 +5140,66 @@
}
#endif /* OEM_ANDROID && BCMPCIE */
+#ifdef CONFIG_SILENT_ROAM
+int
+dhd_sroam_set_mon(dhd_pub_t *dhd, bool set)
+{
+ int ret = BCME_OK;
+ wlc_sroam_t *psroam;
+ wlc_sroam_info_t *sroam;
+ uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
+
+ /* Check if associated */
+ if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
+ DHD_TRACE(("%s NOT assoc\n", __FUNCTION__));
+ return ret;
+ }
+
+ if (set && (dhd->op_mode &
+ (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
+ DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set, dhd->op_mode));
+ return ret;
+ }
+
+ if (!dhd->sroam_turn_on) {
+ DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set, dhd->sroam_turn_on));
+ return ret;
+ }
+ psroam = (wlc_sroam_t *)MALLOCZ(dhd->osh, sroamlen);
+ if (!psroam) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ ret = dhd_iovar(dhd, 0, "sroam", NULL, 0, (char *)psroam, sroamlen, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) {
+ ret = BCME_VERSION;
+ goto done;
+ }
+
+ sroam = (wlc_sroam_info_t *)psroam->data;
+ sroam->sroam_on = set;
+ DHD_INFO((" Silent roam monitor mode %s\n", set ? "On" : "Off"));
+
+ ret = dhd_iovar(dhd, 0, "sroam", (char *)psroam, sroamlen, NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__, ret));
+ }
+
+done:
+ if (psroam) {
+ MFREE(dhd->osh, psroam, sroamlen);
+ }
+ return ret;
+
+}
+#endif /* CONFIG_SILENT_ROAM */
+
/* Check if the mode supports STA MODE */
bool dhd_support_sta_mode(dhd_pub_t *dhd)
{
@@ -5127,84 +5799,6 @@
MFREE(dhd->osh, buffer, length);
}
-#if defined(DHD_8021X_DUMP)
-#define EAP_PRINT(str) \
- DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: " str "\n", \
- ifname, direction ? "TX" : "RX"));
-/* Parse EAPOL 4 way handshake messages */
-void
-dhd_dump_eapol_4way_message(char *ifname, unsigned char *dump_data, bool direction)
-{
- unsigned char type;
- int pair, ack, mic, kerr, req, sec, install;
- unsigned short us_tmp;
-
- type = dump_data[15];
- if (type == 0) {
- if ((dump_data[22] == 1) && (dump_data[18] == 1)) {
- EAP_PRINT("EAP Packet, Request, Identity");
- } else if ((dump_data[22] == 1) && (dump_data[18] == 2)) {
- EAP_PRINT("EAP Packet, Response, Identity");
- } else if (dump_data[22] == 254) {
- if (dump_data[30] == 1) {
- EAP_PRINT("EAP Packet, WSC Start");
- } else if (dump_data[30] == 4) {
- if (dump_data[41] == 4) {
- EAP_PRINT("EAP Packet, WPS M1");
- } else if (dump_data[41] == 5) {
- EAP_PRINT("EAP Packet, WPS M2");
- } else if (dump_data[41] == 7) {
- EAP_PRINT("EAP Packet, WPS M3");
- } else if (dump_data[41] == 8) {
- EAP_PRINT("EAP Packet, WPS M4");
- } else if (dump_data[41] == 9) {
- EAP_PRINT("EAP Packet, WPS M5");
- } else if (dump_data[41] == 10) {
- EAP_PRINT("EAP Packet, WPS M6");
- } else if (dump_data[41] == 11) {
- EAP_PRINT("EAP Packet, WPS M7");
- } else if (dump_data[41] == 12) {
- EAP_PRINT("EAP Packet, WPS M8");
- }
- } else if (dump_data[30] == 5) {
- EAP_PRINT("EAP Packet, WSC Done");
- }
- } else {
- DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
- ifname, direction ? "TX" : "RX",
- dump_data[14], dump_data[15], dump_data[30]));
- }
- } else if (type == 3 && dump_data[18] == 2) {
- us_tmp = (dump_data[19] << 8) | dump_data[20];
- pair = 0 != (us_tmp & 0x08);
- ack = 0 != (us_tmp & 0x80);
- mic = 0 != (us_tmp & 0x100);
- kerr = 0 != (us_tmp & 0x400);
- req = 0 != (us_tmp & 0x800);
- sec = 0 != (us_tmp & 0x200);
- install = 0 != (us_tmp & 0x40);
-
- if (!sec && !mic && ack && !install && pair && !kerr && !req) {
- EAP_PRINT("EAPOL Packet, 4-way handshake, M1");
- } else if (pair && !install && !ack && mic && !sec && !kerr && !req) {
- EAP_PRINT("EAPOL Packet, 4-way handshake, M2");
- } else if (pair && ack && mic && sec && !kerr && !req) {
- EAP_PRINT("EAPOL Packet, 4-way handshake, M3");
- } else if (pair && !install && !ack && mic && sec && !req && !kerr) {
- EAP_PRINT("EAPOL Packet, 4-way handshake, M4");
- } else {
- DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
- ifname, direction ? "TX" : "RX",
- dump_data[14], dump_data[15], dump_data[30]));
- }
- } else {
- DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
- ifname, direction ? "TX" : "RX",
- dump_data[14], dump_data[15], dump_data[30]));
- }
-}
-#endif /* DHD_8021X_DUMP */
-
#ifdef SHOW_LOGTRACE
int
dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
@@ -5222,7 +5816,7 @@
uint32 fwid = 0;
void *file = NULL;
int file_len = 0;
- char fwid_str[64];
+ char fwid_str[FWID_STR_LEN];
uint32 hdr_logstrs_size = 0;
/* Read last three words in the logstrs.bin file */
@@ -5266,20 +5860,26 @@
goto error;
}
/* fwid is at the end of fw bin in string format */
- if (dhd_os_seek_file(file, file_len - 32) < 0) {
+ if (dhd_os_seek_file(file, file_len - (sizeof(fwid_str) - 1)) < 0) {
DHD_ERROR(("%s: can't seek file \n", __FUNCTION__));
goto error;
}
- memset(fwid_str, 0, 64);
- if (dhd_os_get_image_block(fwid_str, 32, file) <= 0) {
+ memset(fwid_str, 0, sizeof(fwid_str));
+ if (dhd_os_get_image_block(fwid_str, sizeof(fwid_str) - 1, file) <= 0) {
DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__));
goto error;
}
- pfw_id = (uint8 *)bcmstrnstr(fwid_str, 64, "FWID: 01-", 9);
+ pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
+ FWID_STR_1, strlen(FWID_STR_1));
if (!pfw_id) {
- DHD_ERROR(("%s: could not find id in FW bin!\n", __FUNCTION__));
- goto error;
+ pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
+ FWID_STR_2, strlen(FWID_STR_2));
+ if (!pfw_id) {
+ DHD_ERROR(("%s: could not find id in FW bin!\n",
+ __FUNCTION__));
+ goto error;
+ }
}
/* search for the '-' in the fw id str, after which the
* actual 4 byte fw id is present
@@ -5292,12 +5892,14 @@
/* check if fw id in logstrs.bin matches the fw one */
if (hdr->trailer.fw_id != fwid) {
- DHD_ERROR(("%s: logstr id does not match FW!\n", __FUNCTION__));
+ DHD_ERROR(("%s: logstr id does not match FW!"
+ "logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n",
+ __FUNCTION__, hdr->trailer.fw_id, fwid));
goto error;
}
match_fail = FALSE;
- num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
+ num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
ram_index = (hdr->ram_lognums_offset -
hdr->rom_lognums_offset) / sizeof(uint32);
lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
@@ -5635,7 +6237,7 @@
uint32 *crc = 0;
uint32 pktid = 0;
uint32 total_num_tx_pkts = 0;
- int err = 0;
+ int err = 0, err_exit = 0;
uint32 i = 0;
uint64 time_taken = 0;
int max_txbufs = 0;
@@ -5648,12 +6250,14 @@
if (tput_data->version != TPUT_TEST_T_VER ||
tput_data->length != TPUT_TEST_T_LEN) {
DHD_ERROR(("%s: wrong structure ver/len! \n", __FUNCTION__));
- return BCME_BADARG;
+ err_exit = BCME_BADARG;
+ goto exit_error;
}
if (dhd->tput_data.tput_test_running) {
DHD_ERROR(("%s: tput test already running ! \n", __FUNCTION__));
- return BCME_BUSY;
+ err_exit = BCME_BUSY;
+ goto exit_error;
}
#ifdef PCIE_FULL_DONGLE
/*
@@ -5665,7 +6269,8 @@
DHD_ERROR(("%s: payload size is too large! max_payload=%u rx_bufpost_size=%u\n",
__FUNCTION__, TPUT_TEST_MAX_PAYLOAD,
(DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100)));
- return BCME_BUFTOOLONG;
+ err_exit = BCME_BUFTOOLONG;
+ goto exit_error;
}
#endif // endif
max_txbufs = dhd_get_max_txbufs(dhd);
@@ -5674,7 +6279,8 @@
if (!(tput_data->num_pkts > 0)) {
DHD_ERROR(("%s: invalid num_pkts: %d to tx\n",
__FUNCTION__, tput_data->num_pkts));
- return BCME_ERROR;
+ err_exit = BCME_ERROR;
+ goto exit_error;
}
memset(&dhd->tput_data, 0, sizeof(dhd->tput_data));
@@ -5743,7 +6349,8 @@
DHD_ERROR(("%s: pkts_good:%u; pkts_bad:%u; pkts_cmpl:%u\n",
__FUNCTION__, dhd->tput_data.pkts_good,
dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
- return BCME_NOMEM;
+ err_exit = BCME_NOMEM;
+ goto exit_error;
}
pktdata = PKTDATA(dhd->osh, pkt);
PKTSETLEN(dhd->osh, pkt, pktsize);
@@ -5786,7 +6393,8 @@
DHD_ERROR(("%s: TX completion timeout !"
" Total Tx pkts (including STOP) = %u; pkts cmpl = %u; \n",
__FUNCTION__, total_num_tx_pkts, dhd->batch_tx_pkts_cmpl));
- return BCME_ERROR;
+ err_exit = BCME_ERROR;
+ goto exit_error;
}
if (dhd->tput_start_ts && dhd->tput_stop_ts &&
(dhd->tput_stop_ts > dhd->tput_start_ts)) {
@@ -5795,7 +6403,8 @@
dhd->tput_data.tput_test_running = FALSE;
DHD_ERROR(("%s: bad timestamp while cal tx batch time\n",
__FUNCTION__));
- return BCME_ERROR;
+ err_exit = BCME_ERROR;
+ goto exit_error;
}
if (n_batches || n_remain) {
batch_cnt = TRUE;
@@ -5818,35 +6427,31 @@
if (!time_taken) {
time_taken = dhd->tput_stop_ts - dhd->tput_start_ts;
}
-#if !defined(CONFIG_64BIT)
- time_taken = do_div(time_taken, 1000); /* convert to ms */
-#else
- time_taken = time_taken/1000; /* convert to ms */
-#endif // endif
+ time_taken = DIV_U64_BY_U32(time_taken, MSEC_PER_SEC); /* convert to ms */
dhd->tput_data.time_ms = time_taken;
if (time_taken) {
total_size = (pktsize * dhd->tput_data.pkts_cmpl * 8);
-#if !defined(CONFIG_64BIT)
- dhd->tput_data.tput_bps = do_div(total_size, time_taken);
-#else
- dhd->tput_data.tput_bps = total_size/time_taken;
-#endif // endif
+ dhd->tput_data.tput_bps = DIV_U64_BY_U64(total_size, time_taken);
/* convert from ms to seconds */
- dhd->tput_data.tput_bps = dhd->tput_data.tput_bps * 1000;
+ dhd->tput_data.tput_bps = dhd->tput_data.tput_bps * MSEC_PER_SEC;
}
} else {
DHD_ERROR(("%s: bad timestamp !\n", __FUNCTION__));
}
DHD_INFO(("%s: DONE. tput = %llu bps, time = %llu ms\n", __FUNCTION__,
dhd->tput_data.tput_bps, dhd->tput_data.time_ms));
- DHD_INFO(("%s: pkts_good = %u; pkts_bad = %u; pkts_cmpl = %u\n",
- __FUNCTION__, dhd->tput_data.pkts_good,
- dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
memcpy(tput_data, &dhd->tput_data, sizeof(dhd->tput_data));
dhd->tput_data.tput_test_running = FALSE;
- return BCME_OK;
+ err_exit = BCME_OK;
+
+exit_error:
+ DHD_ERROR(("%s: pkts_good = %u; pkts_bad = %u; pkts_cmpl = %u\n",
+ __FUNCTION__, dhd->tput_data.pkts_good,
+ dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
+
+ return err_exit;
}
void
@@ -5981,7 +6586,7 @@
static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = {
/* Interface specific event ecounters */
- {WLC_E_LINK, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS},
+ {WLC_E_DEAUTH_IND, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS},
};
/* Accepts an argument to -s, -g or -f and creates an XTLV */
@@ -6071,7 +6676,7 @@
memset(&logset_op, 0, sizeof(logset_op));
logset_type.version = htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION);
logset_type.len = htod16(sizeof(wl_el_set_type_t));
- for (i = 0; i < WL_MAX_PRESERVE_BUFFER; i++) {
+ for (i = 0; i < dhd->event_log_max_sets; i++) {
logset_type.set = i;
err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type,
sizeof(logset_type), (char *)&logset_op, sizeof(logset_op), FALSE);
@@ -6082,12 +6687,55 @@
logset_op.type == EVENT_LOG_SET_TYPE_PRSRV) {
*logset_mask |= 0x01u << i;
ret = BCME_OK;
+ DHD_ERROR(("[INIT] logset:%d is preserve/chatty\n", i));
}
}
return ret;
}
+static int
+dhd_ecounter_autoconfig(dhd_pub_t *dhd)
+{
+ int rc = BCME_OK;
+ uint32 buf;
+ rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+
+ if (rc != BCME_OK) {
+
+ if (rc != BCME_UNSUPPORTED) {
+ rc = BCME_OK;
+ DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__, rc));
+ } else {
+ DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__));
+ }
+ }
+
+ return rc;
+}
+
+int
+dhd_ecounter_configure(dhd_pub_t *dhd, bool enable)
+{
+ int rc = BCME_OK;
+ if (enable) {
+ if (dhd_ecounter_autoconfig(dhd) != BCME_OK) {
+ if ((rc = dhd_start_ecounters(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
+ } else if ((rc = dhd_start_event_ecounters(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
+ }
+ }
+ } else {
+ if ((rc = dhd_stop_ecounters(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__));
+ } else if ((rc = dhd_stop_event_ecounters(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__));
+ }
+ }
+ return rc;
+}
+
int
dhd_start_ecounters(dhd_pub_t *dhd)
{
@@ -6389,8 +7037,77 @@
#ifdef DHD_LOG_DUMP
int
+dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
+ log_dump_section_hdr_t *sec_hdr,
+ char *text_hdr, int buflen, uint32 sec_type)
+{
+ uint32 rlen = 0;
+ uint32 data_len = 0;
+ void *data = NULL;
+ unsigned long flags = 0;
+ int ret = 0;
+ dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
+ int pos = 0;
+ int fpos_sechdr = 0;
+
+ if (!dhdp || !ring || !user_buf || !sec_hdr || !text_hdr) {
+ return BCME_BADARG;
+ }
+ /* do not allow further writes to the ring
+ * till we flush it
+ */
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->state = RING_SUSPEND;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ if (dhdp->concise_dbg_buf) {
+ /* re-use concise debug buffer temporarily
+ * to pull ring data, to write
+ * record by record to file
+ */
+ data_len = CONCISE_DUMP_BUFLEN;
+ data = dhdp->concise_dbg_buf;
+ ret = dhd_export_debug_data(text_hdr, NULL, user_buf, strlen(text_hdr), &pos);
+ /* write the section header now with zero length,
+ * once the correct length is found out, update
+ * it later
+ */
+ fpos_sechdr = pos;
+ sec_hdr->type = sec_type;
+ sec_hdr->length = 0;
+ ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
+ sizeof(*sec_hdr), &pos);
+ do {
+ rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
+ if (rlen > 0) {
+ /* write the log */
+ ret = dhd_export_debug_data(data, NULL, user_buf, rlen, &pos);
+ }
+ DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__, rlen));
+ } while ((rlen > 0));
+ /* now update the section header length in the file */
+ /* Complete ring size is dumped by HAL, hence updating length to ring size */
+ sec_hdr->length = ring->ring_size;
+ ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
+ sizeof(*sec_hdr), &fpos_sechdr);
+ } else {
+ DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
+ }
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->state = RING_ACTIVE;
+ /* Resetting both read and write pointer,
+ * since all items are read.
+ */
+ ring->rp = ring->wp = 0;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ return ret;
+}
+
+int
dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
- unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr)
+ unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr,
+ char *text_hdr, uint32 sec_type)
{
uint32 rlen = 0;
uint32 data_len = 0, total_len = 0;
@@ -6400,7 +7117,8 @@
int ret = 0;
dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
- if (!dhdp || !ring || !file || !sec_hdr || !file_posn)
+ if (!dhdp || !ring || !file || !sec_hdr ||
+ !file_posn || !text_hdr)
return BCME_BADARG;
/* do not allow further writes to the ring
@@ -6417,14 +7135,15 @@
*/
data_len = CONCISE_DUMP_BUFLEN;
data = dhdp->concise_dbg_buf;
- dhd_os_write_file_posn(file, file_posn, ECNTRS_LOG_HDR,
- strlen(ECNTRS_LOG_HDR));
+ dhd_os_write_file_posn(file, file_posn, text_hdr,
+ strlen(text_hdr));
/* write the section header now with zero length,
* once the correct length is found out, update
* it later
*/
+ dhd_init_sec_hdr(sec_hdr);
fpos_sechdr = *file_posn;
- sec_hdr->type = LOG_DUMP_SECTION_ECNTRS;
+ sec_hdr->type = sec_type;
sec_hdr->length = 0;
dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr,
sizeof(*sec_hdr));
@@ -6452,8 +7171,11 @@
DHD_DBG_RING_LOCK(ring->lock, flags);
ring->state = RING_ACTIVE;
+ /* Resetting both read and write pointer,
+ * since all items are read.
+ */
+ ring->rp = ring->wp = 0;
DHD_DBG_RING_UNLOCK(ring->lock, flags);
-
return BCME_OK;
}
@@ -6477,8 +7199,9 @@
return BCME_ERROR;
}
- dhdp->logdump_cookie = dhd_ring_init(buf, buf_size,
- LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT);
+ dhdp->logdump_cookie = dhd_ring_init(dhdp, buf, buf_size,
+ LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT,
+ DHD_RING_TYPE_FIXED);
if (!dhdp->logdump_cookie) {
DHD_ERROR(("FAIL TO INIT COOKIE RING\n"));
return BCME_ERROR;
@@ -6494,7 +7217,7 @@
return;
}
if (dhdp->logdump_cookie) {
- dhd_ring_deinit(dhdp->logdump_cookie);
+ dhd_ring_deinit(dhdp, dhdp->logdump_cookie);
}
return;
@@ -6554,13 +7277,19 @@
static inline int
__dhd_log_dump_cookie_to_file(
- dhd_pub_t *dhdp, void *fp, unsigned long *f_pos, char *buf, uint32 buf_size)
+ dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos,
+ char *buf, uint32 buf_size)
{
uint32 remain = buf_size;
int ret = BCME_ERROR;
char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
log_dump_section_hdr_t sec_hdr;
+ uint32 read_idx;
+ uint32 write_idx;
+
+ read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
+ write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
while (dhd_logdump_cookie_count(dhdp) > 0) {
memset(tmp_buf, 0, sizeof(tmp_buf));
ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
@@ -6569,7 +7298,10 @@
}
remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
}
- ret = dhd_os_write_file_posn(fp, f_pos, COOKIE_LOG_HDR, strlen(COOKIE_LOG_HDR));
+ dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
+ dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
+
+ ret = dhd_export_debug_data(COOKIE_LOG_HDR, fp, user_buf, strlen(COOKIE_LOG_HDR), f_pos);
if (ret < 0) {
DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__));
return ret;
@@ -6578,13 +7310,14 @@
sec_hdr.timestamp = local_clock();
sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
sec_hdr.length = buf_size - remain;
- ret = dhd_os_write_file_posn(fp, f_pos, (char *)&sec_hdr, sizeof(sec_hdr));
+
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), f_pos);
if (ret < 0) {
DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__));
return ret;
}
- ret = dhd_os_write_file_posn(fp, f_pos, buf, sec_hdr.length);
+ ret = dhd_export_debug_data(buf, fp, user_buf, sec_hdr.length, f_pos);
if (ret < 0) {
DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__));
}
@@ -6592,14 +7325,116 @@
return ret;
}
+uint32
+dhd_log_dump_cookie_len(dhd_pub_t *dhdp)
+{
+ int len = 0;
+ char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
+ log_dump_section_hdr_t sec_hdr;
+ char *buf = NULL;
+ int ret = BCME_ERROR;
+ uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
+ uint32 read_idx;
+ uint32 write_idx;
+ uint32 remain;
+
+ remain = buf_size;
+
+ if (!dhdp || !dhdp->logdump_cookie) {
+ DHD_ERROR(("%s At least one ptr is NULL "
+ "dhdp = %p cookie %p\n",
+ __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
+ goto exit;
+ }
+
+ buf = (char *)MALLOCZ(dhdp->osh, buf_size);
+ if (!buf) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ goto exit;
+ }
+
+ read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
+ write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
+ while (dhd_logdump_cookie_count(dhdp) > 0) {
+ memset(tmp_buf, 0, sizeof(tmp_buf));
+ ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ remain -= (uint32)strlen(tmp_buf);
+ }
+ dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
+ dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
+ len += strlen(COOKIE_LOG_HDR);
+ len += sizeof(sec_hdr);
+ len += (buf_size - remain);
+exit:
+ if (buf)
+ MFREE(dhdp->osh, buf, buf_size);
+ return len;
+}
+
int
-dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, unsigned long *f_pos)
+dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf)
+{
+ int ret = BCME_ERROR;
+ char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
+ log_dump_section_hdr_t sec_hdr;
+ char *buf = NULL;
+ uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
+ int pos = 0;
+ uint32 read_idx;
+ uint32 write_idx;
+ uint32 remain;
+
+ remain = buf_size;
+
+ if (!dhdp || !dhdp->logdump_cookie) {
+ DHD_ERROR(("%s At least one ptr is NULL "
+ "dhdp = %p cookie %p\n",
+ __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
+ goto exit;
+ }
+
+ buf = (char *)MALLOCZ(dhdp->osh, buf_size);
+ if (!buf) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ goto exit;
+ }
+
+ read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
+ write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
+ while (dhd_logdump_cookie_count(dhdp) > 0) {
+ memset(tmp_buf, 0, sizeof(tmp_buf));
+ ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
+ }
+ dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
+ dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
+ ret = dhd_export_debug_data(COOKIE_LOG_HDR, NULL, user_buf, strlen(COOKIE_LOG_HDR), &pos);
+ sec_hdr.magic = LOG_DUMP_MAGIC;
+ sec_hdr.timestamp = local_clock();
+ sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
+ sec_hdr.length = buf_size - remain;
+ ret = dhd_export_debug_data((char *)&sec_hdr, NULL, user_buf, sizeof(sec_hdr), &pos);
+ ret = dhd_export_debug_data(buf, NULL, user_buf, sec_hdr.length, &pos);
+exit:
+ if (buf)
+ MFREE(dhdp->osh, buf, buf_size);
+ return ret;
+}
+
+int
+dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos)
{
char *buf;
int ret = BCME_ERROR;
uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
- if (!dhdp || !dhdp->logdump_cookie ||!fp || !f_pos) {
+ if (!dhdp || !dhdp->logdump_cookie || (!fp && !user_buf) || !f_pos) {
DHD_ERROR(("%s At least one ptr is NULL "
"dhdp = %p cookie %p fp = %p f_pos = %p\n",
__FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos));
@@ -6611,7 +7446,7 @@
DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
return ret;
}
- ret = __dhd_log_dump_cookie_to_file(dhdp, fp, f_pos, buf, buf_size);
+ ret = __dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, f_pos, buf, buf_size);
MFREE(dhdp->osh, buf, buf_size);
return ret;
@@ -6620,45 +7455,253 @@
#endif /* DHD_LOG_DUMP */
#ifdef DHD_LOG_DUMP
+#define DEBUG_DUMP_TRIGGER_INTERVAL_SEC 4
void
dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd)
{
+#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
log_dump_type_t *flush_type;
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+ uint64 current_time_sec;
if (!dhdp) {
DHD_ERROR(("dhdp is NULL !\n"));
return;
}
- flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t));
+ if (subcmd >= CMD_MAX || subcmd < CMD_DEFAULT) {
+ DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__));
+ return;
+ }
+
+ current_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+
+ DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n",
+ __FUNCTION__, current_time_sec, dhdp->debug_dump_time_sec,
+ DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
+
+ if ((current_time_sec - dhdp->debug_dump_time_sec) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC) {
+ DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n",
+ __FUNCTION__, dhdp->debug_dump_time_sec, DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
+ return;
+ }
+
clear_debug_dump_time(dhdp->debug_dump_time_str);
#ifdef DHD_PCIE_RUNTIMEPM
/* wake up RPM if SYSDUMP is triggered */
dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
#endif /* DHD_PCIE_RUNTIMEPM */
/* */
- if (subcmd >= CMD_MAX || subcmd < CMD_DEFAULT) {
- DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__));
- return;
- } else {
- dhdp->debug_dump_subcmd = subcmd;
- }
+ dhdp->debug_dump_subcmd = subcmd;
+ dhdp->debug_dump_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+
+#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ /* flush_type is freed at do_dhd_log_dump function */
+ flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t));
if (flush_type) {
*flush_type = DLD_BUF_TYPE_ALL;
dhd_schedule_log_dump(dhdp, flush_type);
+ } else {
+ DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
+ return;
}
-#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+
+ /* Inside dhd_mem_dump, event notification will be sent to HAL and
+ * from other context DHD pushes memdump, debug_dump and pktlog dump
+ * to HAL and HAL will write into file
+ */
+#if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP)
dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
dhd_bus_mem_dump(dhdp);
#endif /* BCMPCIE && DHD_FW_COREDUMP */
-#ifdef DHD_PKT_LOGGING
+
+#if defined(DHD_PKT_LOGGING) && defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
dhd_schedule_pktlog_dump(dhdp);
-#endif /* DHD_PKT_LOGGING */
+#endif /* DHD_PKT_LOGGING && DHD_DUMP_FILE_WRITE_FROM_KERNEL */
}
#endif /* DHD_LOG_DUMP */
+#ifdef EWP_EDL
+/* For now we are allocating memory for EDL ring using DMA_ALLOC_CONSISTENT
+* The reason being that, in hikey, if we try to DMA_MAP prealloced memory
+* it is failing with an 'out of space in SWIOTLB' error
+*/
+int
+dhd_edl_mem_init(dhd_pub_t *dhd)
+{
+ int ret = 0;
+
+ memset(&dhd->edl_ring_mem, 0, sizeof(dhd->edl_ring_mem));
+ ret = dhd_dma_buf_alloc(dhd, &dhd->edl_ring_mem, DHD_EDL_RING_SIZE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: alloc of edl_ring_mem failed\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+
+/* NOTE:- that dhd_edl_mem_deinit need NOT be called explicitly, because the dma_buf
+* for EDL is freed during 'dhd_prot_detach_edl_rings' which is called during de-init.
+*/
+void
+dhd_edl_mem_deinit(dhd_pub_t *dhd)
+{
+ if (dhd->edl_ring_mem.va != NULL)
+ dhd_dma_buf_free(dhd, &dhd->edl_ring_mem);
+}
+
+int
+dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
+ void *evt_decode_data)
+{
+ msg_hdr_edl_t *msg = NULL;
+ cmn_msg_hdr_t *cmn_msg_hdr = NULL;
+ uint8 *buf = NULL;
+
+ if (!data || !dhdp || !evt_decode_data) {
+ DHD_ERROR(("%s: invalid args ! \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* format of data in each work item in the EDL ring:
+ * |cmn_msg_hdr_t |payload (var len)|cmn_msg_hdr_t|
+ * payload = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>|
+ */
+ cmn_msg_hdr = (cmn_msg_hdr_t *)data;
+ msg = (msg_hdr_edl_t *)(data + sizeof(cmn_msg_hdr_t));
+ buf = (uint8 *)msg;
+ /* validate the fields */
+ if (ltoh32(msg->infobuf_ver) != PCIE_INFOBUF_V1) {
+ DHD_ERROR(("%s: Skipping msg with invalid infobuf ver (0x%x)"
+ " expected (0x%x)\n", __FUNCTION__,
+ msg->infobuf_ver, PCIE_INFOBUF_V1));
+ return BCME_VERSION;
+ }
+
+ /* in EDL, the request_id field of cmn_msg_hdr is overloaded to carry payload length */
+ if (sizeof(info_buf_payload_hdr_t) > cmn_msg_hdr->request_id) {
+ DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
+ __FUNCTION__));
+ return BCME_BUFTOOLONG;
+ }
+
+ if (ltoh16(msg->pyld_hdr.type) != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
+ DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
+ __FUNCTION__, ltoh16(msg->pyld_hdr.type)));
+ return BCME_BADOPTION;
+ }
+
+ if (ltoh16(msg->pyld_hdr.length) > cmn_msg_hdr->request_id) {
+ DHD_ERROR(("%s: infobuf logtrace length %u is bigger"
+ " than available buffer size %u\n", __FUNCTION__,
+ ltoh16(msg->pyld_hdr.length), cmn_msg_hdr->request_id));
+ return BCME_BADLEN;
+ }
+
+ /* dhd_dbg_trace_evnt_handler expects the data to start from msgtrace_hdr_t */
+ buf += sizeof(msg->infobuf_ver) + sizeof(msg->pyld_hdr);
+ dhd_dbg_trace_evnt_handler(dhdp, buf, evt_decode_data,
+ ltoh16(msg->pyld_hdr.length));
+
+ /* check 'dhdp->logtrace_pkt_sendup' and if true alloc an skb
+ * copy the event data to the skb and send it up the stack
+ */
+#ifdef BCMPCIE
+ if (dhdp->logtrace_pkt_sendup) {
+ DHD_INFO(("%s: send up event log, len %u bytes\n", __FUNCTION__,
+ (uint32)(ltoh16(msg->pyld_hdr.length) +
+ sizeof(info_buf_payload_hdr_t) + 4)));
+ dhd_sendup_info_buf(dhdp, (uint8 *)msg);
+ }
+#endif /* BCMPCIE */
+
+ return BCME_OK;
+}
+#endif /* EWP_EDL */
+
+#if defined(SHOW_LOGTRACE)
+int
+dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath)
+{
+ void *file = NULL;
+ int size = 0;
+ char buf[FW_VER_STR_LEN];
+ char *str = NULL;
+ int ret = BCME_OK;
+
+ if (!fwpath)
+ return BCME_BADARG;
+
+ file = dhd_os_open_image1(dhdp, fwpath);
+ if (!file) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ size = dhd_os_get_image_size(file);
+ if (!size) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ /* seek to the last 'X' bytes in the file */
+ if (dhd_os_seek_file(file, size - FW_VER_STR_LEN) != BCME_OK) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ /* read the last 'X' bytes of the file to a buffer */
+ memset(buf, 0, FW_VER_STR_LEN);
+ if (dhd_os_get_image_block(buf, FW_VER_STR_LEN - 1, file) < 0) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ /* search for 'Version' in the buffer */
+ str = bcmstrnstr(buf, FW_VER_STR_LEN, FW_VER_STR, strlen(FW_VER_STR));
+ if (!str) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ /* go back in the buffer to the last ascii character */
+ while (str != buf &&
+ (*str >= ' ' && *str <= '~')) {
+ --str;
+ }
+ /* reverse the final decrement, so that str is pointing
+ * to the first ascii character in the buffer
+ */
+ ++str;
+
+ if (strlen(str) > (FW_VER_STR_LEN - 1)) {
+ ret = BCME_BADLEN;
+ goto exit;
+ }
+
+ DHD_ERROR(("FW version in file '%s': %s\n", fwpath, str));
+ /* copy to global variable, so that in case FW load fails, the
+ * core capture logs will contain FW version read from the file
+ */
+ memset(fw_version, 0, FW_VER_STR_LEN);
+ strlcpy(fw_version, str, FW_VER_STR_LEN);
+
+exit:
+ if (file)
+ dhd_os_close_image1(dhdp, file);
+
+ return ret;
+}
+#endif // endif
+
#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+
+/* Ignore compiler warnings due to -Werror=cast-qual */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+
static void
copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc)
{
@@ -6667,7 +7710,7 @@
int *cnt;
char *dest;
int bytes_written;
- const uint32 *ioc_buf;
+ uint32 ioc_dwlen = 0;
if (!dhd || !dhd->hang_info) {
DHD_ERROR(("%s dhd=%p hang_info=%p\n",
@@ -6695,24 +7738,88 @@
clear_debug_dump_time(dhd->debug_dump_time_hang_str);
- ioc_buf = (const uint32 *)ioc->buf;
+ /* Access ioc->buf only if the ioc->len is more than 4 bytes */
+ ioc_dwlen = (uint32)(ioc->len / sizeof(uint32));
+ if (ioc_dwlen > 0) {
+ const uint32 *ioc_buf = (const uint32 *)ioc->buf;
- remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
- bytes_written += scnprintf(&dest[bytes_written], remain_len,
- "%08x", *(uint32 *)(ioc_buf++));
- (*cnt)++;
- if ((*cnt) >= HANG_FIELD_CNT_MAX) {
- return;
- }
-
- for (i = 1; i < (uint32)(ioc->len / sizeof(uint32)) && *cnt <= HANG_FIELD_CNT_MAX;
- i++, (*cnt)++) {
remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
- bytes_written += scnprintf(&dest[bytes_written], remain_len, "%c%08x",
+ bytes_written += scnprintf(&dest[bytes_written], remain_len,
+ "%08x", *(uint32 *)(ioc_buf++));
+ (*cnt)++;
+ if ((*cnt) >= HANG_FIELD_CNT_MAX) {
+ return;
+ }
+
+ for (i = 1; i < ioc_dwlen && *cnt <= HANG_FIELD_CNT_MAX;
+ i++, (*cnt)++) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
+ bytes_written += scnprintf(&dest[bytes_written], remain_len, "%c%08x",
HANG_RAW_DEL, *(uint32 *)(ioc_buf++));
+ }
}
DHD_INFO(("%s hang info len: %d data: %s\n",
__FUNCTION__, (int)strlen(dhd->hang_info), dhd->hang_info));
}
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+/*
+ * Helper function:
+ * Used for Dongle console message time syncing with Host printk
+ */
+void dhd_h2d_log_time_sync(dhd_pub_t *dhd)
+{
+ uint64 ts;
+
+ /*
+ * local_clock() returns time in nano seconds.
+ * Dongle understand only milli seconds time.
+ */
+ ts = local_clock();
+ /* Nano seconds to milli seconds */
+ do_div(ts, 1000000);
+ if (dhd_wl_ioctl_set_intiovar(dhd, "rte_timesync", ts, WLC_SET_VAR, TRUE, 0)) {
+ DHD_ERROR(("%s rte_timesync **** FAILED ****\n", __FUNCTION__));
+ /* Stopping HOST Dongle console time syncing */
+ dhd->dhd_rte_time_sync_ms = 0;
+ }
+}
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+
+#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
+int
+dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab)
+{
+ int ret = BCME_OK;
+ bcm_xtlv_t *pxtlv = NULL;
+ uint8 mybuf[DHD_IOVAR_BUF_SIZE];
+ uint16 mybuf_len = sizeof(mybuf);
+ pxtlv = (bcm_xtlv_t *)mybuf;
+
+ ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, WL_HE_CMD_ENAB, sizeof(he_enab),
+ &he_enab, BCM_XTLV_OPTION_ALIGN32);
+
+ if (ret != BCME_OK) {
+ ret = -EINVAL;
+ DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__, bcmerrorstr(ret)));
+ return ret;
+ }
+
+ ret = dhd_iovar(dhd, 0, "he", (char *)&mybuf, sizeof(mybuf), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n",
+ __FUNCTION__, he_enab, bcmerrorstr(ret)));
+ } else {
+ DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__, he_enab));
+ }
+
+ return ret;
+}
+#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
index 531f2ce..ebf234c 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
@@ -1,7 +1,9 @@
/*
* Customer code to add GPIO control during WLAN start/stop
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -135,10 +137,6 @@
/* Customized Locale table : OPTIONAL feature */
const struct cntry_locales_custom translate_custom_table[] = {
-//tianyan@2021.7.27 modify for add wifi6 module start
- {},
-//tianyan@2021.7.27 modify for add wifi6 module end
-
/* Table should be filled out based on custom platform regulatory requirement */
#ifdef EXAMPLE_TABLE
{"", "XY", 4}, /* Universal if Country code is unknown or empty */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_custom_memprealloc.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_custom_memprealloc.c
index 6126f20..810e83c 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_custom_memprealloc.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_custom_memprealloc.c
@@ -1,7 +1,9 @@
/*
* Platform Dependent file for usage of Preallocted Memory
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -290,12 +292,14 @@
int i;
int j;
+#if !defined(CONFIG_BCMDHD_PCIE)
for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {
wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE, GFP_KERNEL);
if (!wlan_static_skb[i]) {
goto err_skb_alloc;
}
}
+#endif /* !CONFIG_BCMDHD_PCIE */
for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {
wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE, GFP_KERNEL);
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_custom_msm.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_custom_msm.c
index 4caa8ec..46df5a8 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_custom_msm.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_custom_msm.c
@@ -1,7 +1,9 @@
/*
* Platform Dependent file for Qualcomm MSM/APQ
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -41,9 +43,12 @@
#include <linux/fs.h>
#include <linux/of_gpio.h>
#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
- defined(CONFIG_ARCH_SDM845)
+ defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150) || \
+ defined(USE_CUSTOM_MSM_PCIE)
#include <linux/msm_pcie.h>
-#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 || CONFIG_ARCH_SDM845 */
+#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 ||
+ CONFIG_ARCH_SDM845 || CONFIG_ARCH_SM8150
+ */
#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
extern int dhd_init_wlan_mem(void);
@@ -52,7 +57,12 @@
#define WIFI_TURNON_DELAY 200
static int wlan_reg_on = -1;
+#ifdef CUSTOM_DT_COMPAT_ENTRY
+#define DHD_DT_COMPAT_ENTRY CUSTOM_DT_COMPAT_ENTRY
+#else /* CUSTOM_DT_COMPAT_ENTRY */
#define DHD_DT_COMPAT_ENTRY "android,bcmdhd_wlan"
+#endif /* CUSTOM_DT_COMPAT_ENTRY */
+
#ifdef CUSTOMER_HW2
#define WIFI_WL_REG_ON_PROPNAME "wl_reg_on"
#else
@@ -60,11 +70,15 @@
#endif /* CUSTOMER_HW2 */
#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
- defined(CONFIG_ARCH_SDM845)
+ defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150)
#define MSM_PCIE_CH_NUM 0
+#elif defined(USE_CUSTOM_MSM_PCIE)
+#define MSM_PCIE_CH_NUM MSM_PCIE_CUSTOM_CH_NUM
#else
#define MSM_PCIE_CH_NUM 1
-#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 || CONFIG_ARCH_SDM845 */
+#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998
+ || CONFIG_ARCH_SDM845 || CONFIG_ARCH_SM8150
+ */
#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
static int wlan_host_wake_up = -1;
@@ -76,7 +90,11 @@
#endif /* CUSTOMER_HW2 */
#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+#ifndef USE_CUSTOM_MSM_PCIE
int __init
+#else
+int
+#endif /* USE_CUSTOM_MSM_PCIE */
dhd_wifi_init_gpio(void)
{
char *wlan_node = DHD_DT_COMPAT_ENTRY;
@@ -135,10 +153,11 @@
wlan_host_wake_irq = gpio_to_irq(wlan_host_wake_up);
#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
-#if defined(CONFIG_BCM4359) || defined(CONFIG_BCM4361)
+#if defined(CONFIG_BCM4359) || defined(CONFIG_BCM4361) || defined(CONFIG_BCM4375) || \
+ defined(USE_CUSTOM_MSM_PCIE)
printk(KERN_INFO "%s: Call msm_pcie_enumerate\n", __FUNCTION__);
msm_pcie_enumerate(MSM_PCIE_CH_NUM);
-#endif /* CONFIG_BCM4359 || CONFIG_BCM4361 */
+#endif /* CONFIG_BCM4359 || CONFIG_BCM4361 || CONFIG_BCM4375 */
return 0;
}
@@ -179,6 +198,18 @@
}
EXPORT_SYMBOL(dhd_wlan_power);
+void
+post_power_operation(int on)
+{
+#if defined(CONFIG_BCM4359) || defined(CONFIG_BCM4361) || defined(CONFIG_BCM4375) || \
+ defined(USE_CUSTOM_MSM_PCIE)
+ printk(KERN_INFO "%s: Call msm_pcie_enumerate\n", __FUNCTION__);
+ if (on) {
+ msm_pcie_enumerate(MSM_PCIE_CH_NUM);
+ }
+#endif /* CONFIG_BCM4359 || CONFIG_BCM4361 || CONFIG_BCM4375 */
+}
+
static int
dhd_wlan_reset(int onoff)
{
@@ -191,6 +222,16 @@
return 0;
}
+#if defined(CONFIG_BCMDHD_OOB_HOST_WAKE) && defined(CONFIG_BCMDHD_GET_OOB_STATE)
+int
+dhd_get_wlan_oob_gpio(void)
+{
+ return gpio_is_valid(wlan_host_wake_up) ?
+ gpio_get_value(wlan_host_wake_up) : -1;
+}
+EXPORT_SYMBOL(dhd_get_wlan_oob_gpio);
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE && CONFIG_BCMDHD_GET_OOB_STATE */
+
struct resource dhd_wlan_resources = {
.name = "bcmdhd_wlan_irq",
.start = 0, /* Dummy */
@@ -214,7 +255,11 @@
};
EXPORT_SYMBOL(dhd_wlan_control);
+#ifndef USE_CUSTOM_MSM_PCIE
int __init
+#else
+int
+#endif /* USE_CUSTOM_MSM_PCIE */
dhd_wlan_init(void)
{
int ret;
@@ -245,12 +290,16 @@
return ret;
}
#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
- defined(CONFIG_ARCH_SDM845)
+ defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150)
#if defined(CONFIG_DEFERRED_INITCALLS)
deferred_module_init(dhd_wlan_init);
#else
late_initcall(dhd_wlan_init);
#endif /* CONFIG_DEFERRED_INITCALLS */
#else
+#ifndef USE_CUSTOM_MSM_PCIE
device_initcall(dhd_wlan_init);
-#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 || CONFIG_ARCH_SDM845 */
+#endif /* !USE_CUSTOM_MSM_PCIE */
+#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998
+ * CONFIG_ARCH_SDM845 || CONFIG_ARCH_SM8150
+ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_dbg.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_dbg.h
index aafb9a0..f6dba15 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_dbg.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_dbg.h
@@ -1,7 +1,9 @@
/*
* Debug/trace/assert driver definitions for Dongle Host Driver.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -30,17 +32,7 @@
#ifndef _dhd_dbg_
#define _dhd_dbg_
-#define PRINT_RATE_LIMIT_PERIOD 5000000000u /* 5s in units of ns */
-
#ifdef DHD_LOG_DUMP
-typedef enum {
- DLD_BUF_TYPE_GENERAL = 0,
- DLD_BUF_TYPE_PRESERVE,
- DLD_BUF_TYPE_SPECIAL,
- DLD_BUF_TYPE_ECNTRS,
- DLD_BUF_TYPE_FILTER,
- DLD_BUF_TYPE_ALL
-} log_dump_type_t;
extern char *dhd_log_dump_get_timestamp(void);
extern void dhd_log_dump_write(int type, char *binary_data,
int binary_len, const char *fmt, ...);
@@ -52,6 +44,9 @@
#define DHD_DUMP_LOG_HDR "\n-------------------- 'dhd dump' log -----------------------\n"
#define EXT_TRAP_LOG_HDR "\n-------------------- Extended trap data -------------------\n"
#define HEALTH_CHK_LOG_HDR "\n-------------------- Health check data --------------------\n"
+#ifdef DHD_DUMP_PCIE_RINGS
+#define FLOWRING_DUMP_HDR "\n-------------------- Flowring dump --------------------\n"
+#endif /* DHD_DUMP_PCIE_RINGS */
#define DHD_LOG_DUMP_WRITE(fmt, ...) \
dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, NULL, 0, fmt, ##__VA_ARGS__)
#define DHD_LOG_DUMP_WRITE_EX(fmt, ...) \
@@ -61,6 +56,10 @@
#endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */
#define CONCISE_DUMP_BUFLEN 16 * 1024
#define ECNTRS_LOG_HDR "\n-------------------- Ecounters log --------------------------\n"
+#ifdef DHD_STATUS_LOGGING
+#define STATUS_LOG_HDR "\n-------------------- Status log -----------------------\n"
+#endif /* DHD_STATUS_LOGGING */
+#define RTT_LOG_HDR "\n-------------------- RTT log --------------------------\n"
#define COOKIE_LOG_HDR "\n-------------------- Cookie List ----------------------------\n"
#endif /* DHD_LOG_DUMP */
@@ -73,7 +72,7 @@
do { \
if (dhd_msg_level & DHD_ERROR_VAL) { \
printf args; \
- DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
+ DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
@@ -110,6 +109,13 @@
DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
+#define DHD_LOG_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
/* NON-EFI builds with LOG DUMP enabled */
#define DHD_EVENT(args) \
do { \
@@ -119,10 +125,10 @@
DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
-#define DHD_EVENT_MEM(args) \
+#define DHD_PRSRV_MEM(args) \
do { \
if (dhd_msg_level & DHD_EVENT_VAL) { \
- if (dhd_msg_level & DHD_EVENT_MEM_VAL) \
+ if (dhd_msg_level & DHD_PRSRV_MEM_VAL) \
printf args; \
DHD_LOG_DUMP_WRITE_PRSRV("[%s]: ", dhd_log_dump_get_timestamp()); \
DHD_LOG_DUMP_WRITE_PRSRV args; \
@@ -167,9 +173,10 @@
#define DHD_MSGTRACE_LOG(args) do {if (dhd_msg_level & DHD_MSGTRACE_VAL) printf args;} while (0)
#define DHD_ERROR_MEM(args) DHD_ERROR(args)
#define DHD_IOVAR_MEM(args) DHD_ERROR(args)
+#define DHD_LOG_MEM(args) DHD_ERROR(args)
#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
#define DHD_ECNTR_LOG(args) DHD_EVENT(args)
-#define DHD_EVENT_MEM(args) DHD_EVENT(args)
+#define DHD_PRSRV_MEM(args) DHD_EVENT(args)
#define DHD_ERROR_EX(args) DHD_ERROR(args)
#endif /* DHD_LOG_DUMP */
@@ -189,6 +196,16 @@
#define DHD_PKT_MON(args) do {if (dhd_msg_level & DHD_PKT_MON_VAL) printf args;} while (0)
#if defined(DHD_LOG_DUMP)
+#if defined(DHD_LOG_PRINT_RATE_LIMIT)
+#define DHD_FWLOG(args) \
+ do { \
+ if (dhd_msg_level & DHD_FWLOG_VAL) { \
+ if (!log_print_threshold) \
+ printf args; \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+ } while (0)
+#else
#define DHD_FWLOG(args) \
do { \
if (dhd_msg_level & DHD_FWLOG_VAL) { \
@@ -196,6 +213,7 @@
DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
+#endif // endif
#else /* DHD_LOG_DUMP */
#define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0)
#endif /* DHD_LOG_DUMP */
@@ -261,7 +279,7 @@
#define DHD_EVENT(args)
#define DHD_ECNTR_LOG(args) DHD_EVENT(args)
-#define DHD_EVENT_MEM(args) DHD_EVENT(args)
+#define DHD_PRSRV_MEM(args) DHD_EVENT(args)
#define DHD_BTA(args)
#define DHD_ISCAN(args)
@@ -278,6 +296,7 @@
#define DHD_ERROR_MEM(args) DHD_ERROR(args)
#define DHD_IOVAR_MEM(args) DHD_ERROR(args)
+#define DHD_LOG_MEM(args) DHD_ERROR(args)
#define DHD_ERROR_EX(args) DHD_ERROR(args)
#ifdef CUSTOMER_HW4_DEBUG
@@ -318,13 +337,14 @@
#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
#endif // endif
+#define PRINT_RATE_LIMIT_PERIOD 5000000u /* 5s in units of us */
#define DHD_ERROR_RLMT(args) \
do { \
if (dhd_msg_level & DHD_ERROR_VAL) { \
static uint64 __err_ts = 0; \
static uint32 __err_cnt = 0; \
uint64 __cur_ts = 0; \
- __cur_ts = local_clock(); \
+ __cur_ts = OSL_SYSUPTIME_US(); \
if (__err_ts == 0 || (__cur_ts > __err_ts && \
(__cur_ts - __err_ts > PRINT_RATE_LIMIT_PERIOD))) { \
__err_ts = __cur_ts; \
@@ -347,6 +367,12 @@
#define DHD_NONE(args)
extern int dhd_msg_level;
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+extern int log_print_threshold;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
+
+#define DHD_RTT_MEM(args) DHD_LOG_MEM(args)
+#define DHD_RTT_ERR(args) DHD_ERROR(args)
/* Defines msg bits */
#include <dhdioctl.h>
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_dbg_ring.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_dbg_ring.c
index 4420793..12f9ad8 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_dbg_ring.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_dbg_ring.c
@@ -3,7 +3,9 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -36,7 +38,7 @@
int
dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name,
- uint32 ring_sz, void *allocd_buf)
+ uint32 ring_sz, void *allocd_buf, bool pull_inactive)
{
void *buf;
unsigned long flags = 0;
@@ -62,6 +64,7 @@
ring->state = RING_SUSPEND;
ring->rem_len = 0;
ring->sched_pull = TRUE;
+ ring->pull_inactive = pull_inactive;
DHD_DBG_RING_UNLOCK(ring->lock, flags);
return BCME_OK;
@@ -94,10 +97,18 @@
*/
if (ring->threshold > 0 &&
(pending_len >= ring->threshold) && ring->sched_pull) {
- pull_fn(os_pvt, id);
+ /*
+ * Update the state and release the lock before calling
+ * the pull_fn. Do not transfer control to other layers
+ * with locks held. If the call back again calls into
+ * the same layer fro this context, can lead to deadlock.
+ */
ring->sched_pull = FALSE;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+ pull_fn(os_pvt, id);
+ } else {
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
}
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
}
uint32
@@ -190,7 +201,7 @@
ring->rp);
/* check bounds before incrementing read ptr */
if (ring->rp + ENTRY_LENGTH(r_entry) >= ring->ring_size) {
- DHD_ERROR(("%s: RING%d[%s] rp points out of boundary,"
+ DHD_ERROR(("%s: RING%d[%s] rp points out of boundary, "
"ring->wp=%u, ring->rp=%u, ring->ring_size=%d\n",
__FUNCTION__, ring->id, ring->name, ring->wp,
ring->rp, ring->ring_size));
@@ -251,32 +262,36 @@
return BCME_OK;
}
+/*
+ * This function folds ring->lock, so callers of this function
+ * should not hold ring->lock.
+ */
int
-dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len,
- bool strip_header)
+dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, bool strip_header)
{
dhd_dbg_ring_entry_t *r_entry = NULL;
uint32 rlen = 0;
char *buf = NULL;
+ unsigned long flags;
if (!ring || !data || buf_len <= 0) {
return 0;
}
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+
/* pull from ring is allowed for inactive (suspended) ring
* in case of ecounters only, this is because, for ecounters
* when a trap occurs the ring is suspended and data is then
* pulled to dump it to a file. For other rings if ring is
* not in active state return without processing (as before)
*/
-#ifndef DEBUGABILITY_ECNTRS_LOGGING
- if (ring->state != RING_ACTIVE) {
- return 0;
+ if (!ring->pull_inactive && (ring->state != RING_ACTIVE)) {
+ goto exit;
}
-#endif /* DEBUGABILITY_ECNTRS_LOGGING */
if (ring->rp == ring->wp) {
- return 0;
+ goto exit;
}
DHD_DBGIF(("%s: RING%d[%s] buf_len=%u, wp=%d, rp=%d, ring_start=0x%p; ring_size=%u\n",
@@ -291,7 +306,8 @@
DHD_ERROR(("%s: entry len %d is out of boundary of ring size %d,"
" current ring %d[%s] - rp=%d\n", __FUNCTION__, rlen,
ring->ring_size, ring->id, ring->name, ring->rp));
- return 0;
+ rlen = 0;
+ goto exit;
}
if (strip_header) {
@@ -308,7 +324,8 @@
__FUNCTION__, ring->id, ring->name, ring->ring_size,
ring->wp, ring->rp));
ASSERT(0);
- return 0;
+ rlen = 0;
+ goto exit;
}
memcpy(data, buf, rlen);
@@ -316,7 +333,7 @@
ring->rp += ENTRY_LENGTH(r_entry);
/* don't pass wp but skip padding if there is one */
if (ring->rp != ring->wp &&
- ring->tail_padded && ((ring->rp + ring->rem_len) == ring->ring_size)) {
+ ring->tail_padded && ((ring->rp + ring->rem_len) >= ring->ring_size)) {
DHD_DBGIF(("%s: RING%d[%s] Found padding, rp=%d, wp=%d\n",
__FUNCTION__, ring->id, ring->name, ring->rp, ring->wp));
ring->rp = 0;
@@ -328,12 +345,16 @@
" rp=%d, ring_size=%d\n", __FUNCTION__, ring->id,
ring->name, ring->rp, ring->ring_size));
ASSERT(0);
- return 0;
+ rlen = 0;
+ goto exit;
}
ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
DHD_DBGIF(("%s RING%d[%s]read_bytes %d, wp=%d, rp=%d\n", __FUNCTION__,
ring->id, ring->name, ring->stat.read_bytes, ring->wp, ring->rp));
+exit:
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
return rlen;
}
@@ -341,12 +362,17 @@
dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, bool strip_hdr)
{
int32 r_len, total_r_len = 0;
+ unsigned long flags;
if (!ring || !data)
return 0;
- if (ring->state != RING_ACTIVE)
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ if (!ring->pull_inactive && (ring->state != RING_ACTIVE)) {
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
return 0;
+ }
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
while (buf_len > 0) {
r_len = dhd_dbg_ring_pull_single(ring, data, buf_len, strip_hdr);
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_dbg_ring.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_dbg_ring.h
index 2b5d8b1..26b9f65 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_dbg_ring.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_dbg_ring.h
@@ -3,7 +3,9 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -81,6 +83,7 @@
uint32 ring_size; /* numbers of item in ring */
uint32 wp; /* write pointer */
uint32 rp; /* read pointer */
+ uint32 rp_tmp; /* tmp read pointer */
uint32 log_level; /* log_level */
uint32 threshold; /* threshold bytes */
void * ring_buf; /* pointer of actually ring buffer */
@@ -90,6 +93,7 @@
bool tail_padded; /* writer does not have enough space */
uint32 rem_len; /* number of bytes from wp_pad to end */
bool sched_pull; /* schedule reader immediately */
+ bool pull_inactive; /* pull contents from ring even if it is inactive */
} dhd_dbg_ring_t;
#define DBGRING_FLUSH_THRESHOLD(ring) (ring->ring_size / 3)
@@ -108,7 +112,8 @@
#define DBG_RING_ENTRY_SIZE (sizeof(dhd_dbg_ring_entry_t))
#define ENTRY_LENGTH(hdr) ((hdr)->len + DBG_RING_ENTRY_SIZE)
#define PAYLOAD_MAX_LEN 65535
-#define PAYLOAD_ECNTR_MAX_LEN 1404
+#define PAYLOAD_ECNTR_MAX_LEN 1648u
+#define PAYLOAD_RTT_MAX_LEN 1648u
#define PENDING_LEN_MAX 0xFFFFFFFF
#define DBG_RING_STATUS_SIZE (sizeof(dhd_dbg_ring_status_t))
@@ -122,7 +127,7 @@
typedef void (*os_pullreq_t)(void *os_priv, const int ring_id);
int dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name,
- uint32 ring_sz, void *allocd_buf);
+ uint32 ring_sz, void *allocd_buf, bool pull_inactive);
void dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring);
int dhd_dbg_ring_push(dhd_dbg_ring_t *ring, dhd_dbg_ring_entry_t *hdr, void *data);
int dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len,
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_debug.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_debug.c
index af5f8b9..9c36642 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_debug.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_debug.c
@@ -3,7 +3,9 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -128,42 +130,12 @@
{2, EVENT_LOG_TAG_SCAN_TRACE_HIGH, "SCAN_TRACE_HIGH"}
};
-struct log_level_table fw_event_level_map[] = {
- {1, EVENT_LOG_TAG_TRACE_WL_INFO, "WL_INFO"},
- {1, EVENT_LOG_TAG_TRACE_BTCOEX_INFO, "BTCOEX_INFO"},
-#ifdef CUSTOMER_HW4_DEBUG
- {3, EVENT_LOG_TAG_BEACON_LOG, "BEACON LOG"},
-#else
- {2, EVENT_LOG_TAG_BEACON_LOG, "BEACON LOG"},
-#endif /* CUSTOMER_HW4_DEBUG */
-};
-
-struct map_table nan_event_map[] = {
- {TRACE_NAN_CLUSTER_STARTED, NAN_EVENT_CLUSTER_STARTED, "NAN_CLUSTER_STARTED"},
- {TRACE_NAN_CLUSTER_JOINED, NAN_EVENT_CLUSTER_JOINED, "NAN_CLUSTER_JOINED"},
- {TRACE_NAN_CLUSTER_MERGED, NAN_EVENT_CLUSTER_MERGED, "NAN_CLUSTER_MERGED"},
- {TRACE_NAN_ROLE_CHANGED, NAN_EVENT_ROLE_CHANGED, "NAN_ROLE_CHANGED"},
- {TRACE_NAN_SCAN_COMPLETE, NAN_EVENT_SCAN_COMPLETE, "NAN_SCAN_COMPLETE"},
- {TRACE_NAN_STATUS_CHNG, NAN_EVENT_STATUS_CHNG, "NAN_STATUS_CHNG"},
-};
-
-struct log_level_table nan_event_level_map[] = {
- {1, EVENT_LOG_TAG_NAN_ERROR, "NAN_ERROR"},
- {2, EVENT_LOG_TAG_NAN_INFO, "NAN_INFO"},
- {3, EVENT_LOG_TAG_NAN_DBG, "NAN_DEBUG"},
-};
-
-struct map_table nan_evt_tag_map[] = {
- {TRACE_TAG_BSSID, WIFI_TAG_BSSID, "BSSID"},
- {TRACE_TAG_ADDR, WIFI_TAG_ADDR, "ADDR_0"},
-};
-
/* reference tab table */
uint ref_tag_tbl[EVENT_LOG_TAG_MAX + 1] = {0};
typedef struct dhddbg_loglist_item {
dll_t list;
- event_log_hdr_t *hdr;
+ prcd_event_log_hdr_t prcd_log_hdr;
} loglist_item_t;
typedef struct dhbdbg_pending_item {
@@ -209,6 +181,21 @@
return ret;
}
+dhd_dbg_ring_t *
+dhd_dbg_get_ring_from_ring_id(dhd_pub_t *dhdp, int ring_id)
+{
+ if (!dhdp || !dhdp->dbg) {
+ return NULL;
+ }
+
+ if (!VALID_RING(ring_id)) {
+ DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id));
+ return NULL;
+ }
+
+ return &dhdp->dbg->dbg_rings[ring_id];
+}
+
int
dhd_dbg_pull_single_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len,
bool strip_header)
@@ -307,295 +294,8 @@
}
#endif /* MACOSX_DHD */
#ifdef SHOW_LOGTRACE
-static const uint8 *
-event_get_tlv(uint16 id, const char* tlvs, uint tlvs_len)
-{
- const uint8 *pos = (const uint8 *)tlvs;
- const uint8 *end = pos + tlvs_len;
- const tlv_log *tlv;
- int rest;
-
- while (pos + 1 < end) {
- if (pos + 4 + pos[1] > end)
- break;
- tlv = (const tlv_log *) pos;
- if (tlv->tag == id)
- return pos;
- rest = tlv->len % 4; /* padding values */
- pos += 4 + tlv->len + rest;
- }
- return NULL;
-}
-
#define DATA_UNIT_FOR_LOG_CNT 4
-/* #pragma used as a WAR to fix build failure,
- * ignore dropping of 'const' qualifier in tlv_data assignment
- * this pragma disables the warning only for the following function
- */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
-static int
-dhd_dbg_nan_event_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr, uint32 *log_ptr, uint32 *data)
-{
- int ret = BCME_OK;
- wl_event_log_id_ver_t nan_hdr;
- log_nan_event_t *evt_payload;
- uint16 evt_payload_len = 0, tot_payload_len = 0;
- dhd_dbg_ring_entry_t msg_hdr;
- bool evt_match = FALSE;
- event_log_hdr_t *ts_hdr;
- uint32 *ts_data;
- char *tlvs, *dest_tlvs;
- tlv_log *tlv_data;
- int tlv_len = 0;
- int i = 0, evt_idx = 0;
- char eaddr_buf[ETHER_ADDR_STR_LEN];
- BCM_REFERENCE(eaddr_buf);
-
- if (log_ptr < data) {
- DHD_ERROR(("Invalid log pointer, logptr : %p data : %p \n", log_ptr, data));
- return BCME_BADADDR;
- }
-
- nan_hdr.t = *log_ptr;
- DHD_DBGIF(("%s: version %u event %x\n", __FUNCTION__, nan_hdr.version,
- nan_hdr.event));
-
- if (nan_hdr.version != DIAG_VERSION) {
- DHD_ERROR(("Event payload version %u mismatch with current version %u\n",
- nan_hdr.version, DIAG_VERSION));
- return BCME_VERSION;
- }
-
- /* nan event log should at least contain a wl_event_log_id_ver_t
- * header and a arm cycle count
- */
- if (hdr->count < 2) {
- return BCME_BADLEN;
- }
-
- memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t));
- if (log_ptr > data) {
- ts_hdr = (event_log_hdr_t *)((uint8 *)log_ptr - sizeof(event_log_hdr_t));
- if (ts_hdr->tag == EVENT_LOG_TAG_TS) {
- ts_data = (uint32 *)ts_hdr - ts_hdr->count;
- if (ts_data >= data) {
- msg_hdr.timestamp = (uint64)ts_data[0];
- msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP;
- }
- }
- }
- msg_hdr.type = DBG_RING_ENTRY_NAN_EVENT_TYPE;
- for (i = 0; i < ARRAYSIZE(nan_event_map); i++) {
- if (nan_event_map[i].fw_id == nan_hdr.event) {
- evt_match = TRUE;
- evt_idx = i;
- break;
- }
- }
- if (evt_match) {
- DHD_DBGIF(("%s : event (%s)\n", __FUNCTION__, nan_event_map[evt_idx].desc));
- /* payload length for nan event data */
- evt_payload_len = sizeof(log_nan_event_t) +
- (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT;
- if ((evt_payload = MALLOC(dhdp->osh, evt_payload_len)) == NULL) {
- DHD_ERROR(("Memory allocation failed for nan evt log (%u)\n",
- evt_payload_len));
- return BCME_NOMEM;
- }
- evt_payload->version = NAN_EVENT_VERSION;
- evt_payload->event = nan_event_map[evt_idx].host_id;
- dest_tlvs = (char *)evt_payload->tlvs;
- tot_payload_len = sizeof(log_nan_event_t);
- tlvs = (char *)(&log_ptr[1]);
- tlv_len = (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT;
- for (i = 0; i < ARRAYSIZE(nan_evt_tag_map); i++) {
- tlv_data = (tlv_log *)event_get_tlv(nan_evt_tag_map[i].fw_id,
- tlvs, tlv_len);
- if (tlv_data) {
- DHD_DBGIF(("NAN evt tlv.tag(%s), tlv.len : %d, tlv.data : ",
- nan_evt_tag_map[i].desc, tlv_data->len));
- memcpy(dest_tlvs, tlv_data, sizeof(tlv_log) + tlv_data->len);
- tot_payload_len += tlv_data->len + sizeof(tlv_log);
- switch (tlv_data->tag) {
- case TRACE_TAG_BSSID:
- case TRACE_TAG_ADDR:
- DHD_DBGIF(("%s\n",
- bcm_ether_ntoa(
- (const struct ether_addr *)tlv_data->value,
- eaddr_buf)));
- break;
- default:
- if (DHD_DBGIF_ON()) {
- prhex(NULL, &tlv_data->value[0],
- tlv_data->len);
- }
- break;
- }
- dest_tlvs += tlv_data->len + sizeof(tlv_log);
- }
- }
- msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY;
- msg_hdr.len = tot_payload_len;
- dhd_dbg_push_to_ring(dhdp, NAN_EVENT_RING_ID, &msg_hdr, evt_payload);
- MFREE(dhdp->osh, evt_payload, evt_payload_len);
- }
- return ret;
-}
-
-static int
-dhd_dbg_custom_evnt_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr, uint32 *log_ptr, uint32 *data)
-{
- int i = 0, match_idx = 0;
- int payload_len, tlv_len;
- uint16 tot_payload_len = 0;
- int ret = BCME_OK;
- int log_level;
- wl_event_log_id_ver_t wl_log_id;
- dhd_dbg_ring_entry_t msg_hdr;
- log_conn_event_t *event_data;
- bool evt_match = FALSE;
- event_log_hdr_t *ts_hdr;
- uint32 *ts_data;
- char *tlvs, *dest_tlvs;
- tlv_log *tlv_data;
- static uint64 ts_saved = 0;
- char eabuf[ETHER_ADDR_STR_LEN];
- char chanbuf[CHANSPEC_STR_LEN];
-
- BCM_REFERENCE(eabuf);
- BCM_REFERENCE(chanbuf);
-
- if (log_ptr < data) {
- DHD_ERROR(("Invalid log pointer, logptr : %p data : %p \n", log_ptr, data));
- return BCME_BADADDR;
- }
-
- /* get a event type and version */
- wl_log_id.t = *log_ptr;
- if (wl_log_id.version != DIAG_VERSION)
- return BCME_VERSION;
-
- /* custom event log should at least contain a wl_event_log_id_ver_t
- * header and a arm cycle count
- */
- if (hdr->count < 2) {
- return BCME_BADLEN;
- }
-
- if (log_ptr > data) {
- ts_hdr = (event_log_hdr_t *)((uint8 *)log_ptr - sizeof(event_log_hdr_t));
- if (ts_hdr->tag == EVENT_LOG_TAG_TS) {
- ts_data = (uint32 *)ts_hdr - ts_hdr->count;
- if (ts_data >= data) {
- ts_saved = (uint64)ts_data[0];
- }
- }
- }
- memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t));
- msg_hdr.timestamp = ts_saved;
-
- DHD_DBGIF(("Android Event ver %d, payload %d words, ts %llu\n",
- (*log_ptr >> 16), hdr->count - 1, ts_saved));
-
- /* Perform endian convertion */
- for (i = 0; i < hdr->count; i++) {
- /* *(data + i) = ntoh32(*(data + i)); */
- DHD_DATA(("%08x ", *(log_ptr + i)));
- }
- DHD_DATA(("\n"));
- msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP;
- msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY;
- msg_hdr.type = DBG_RING_ENTRY_EVENT_TYPE;
-
- /* convert the data to log_conn_event_t format */
- for (i = 0; i < ARRAYSIZE(event_map); i++) {
- if (event_map[i].fw_id == wl_log_id.event) {
- evt_match = TRUE;
- match_idx = i;
- break;
- }
- }
- if (evt_match) {
- log_level = dhdp->dbg->dbg_rings[FW_EVENT_RING_ID].log_level;
- /* filter the data based on log_level */
- for (i = 0; i < ARRAYSIZE(fw_event_level_map); i++) {
- if ((fw_event_level_map[i].tag == hdr->tag) &&
- (fw_event_level_map[i].log_level > log_level)) {
- return BCME_OK;
- }
- }
- DHD_DBGIF(("%s : event (%s)\n", __FUNCTION__, event_map[match_idx].desc));
- /* get the payload length for event data (skip : log header + timestamp) */
- payload_len = sizeof(log_conn_event_t) + DATA_UNIT_FOR_LOG_CNT * (hdr->count - 2);
- event_data = (log_conn_event_t *)MALLOC(dhdp->osh, payload_len);
- if (!event_data) {
- DHD_ERROR(("failed to allocate the log_conn_event_t with length(%d)\n",
- payload_len));
- return BCME_NOMEM;
- }
- memset(event_data, 0, payload_len);
- event_data->event = event_map[match_idx].host_id;
- event_data->tlvs =
- (tlv_log *)((char *)event_data + offsetof(log_conn_event_t, tlvs));
- dest_tlvs = (char *)(event_data->tlvs);
- tot_payload_len = sizeof(log_conn_event_t);
- tlvs = (char *)(&log_ptr[1]);
- tlv_len = (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT;
- for (i = 0; i < ARRAYSIZE(event_tag_map); i++) {
- tlv_data = (tlv_log *)event_get_tlv(event_tag_map[i].fw_id,
- tlvs, tlv_len);
- if (tlv_data) {
- DHD_DBGIF(("tlv.tag(%s), tlv.len : %d, tlv.data : ",
- event_tag_map[i].desc, tlv_data->len));
- memcpy(dest_tlvs, tlv_data, sizeof(tlv_log) + tlv_data->len);
- tot_payload_len += tlv_data->len + sizeof(tlv_log);
- switch (tlv_data->tag) {
- case TRACE_TAG_BSSID:
- case TRACE_TAG_ADDR:
- case TRACE_TAG_ADDR1:
- case TRACE_TAG_ADDR2:
- case TRACE_TAG_ADDR3:
- case TRACE_TAG_ADDR4:
- DHD_DBGIF(("%s\n",
- bcm_ether_ntoa((const struct ether_addr *)tlv_data->value,
- eabuf)));
- break;
- case TRACE_TAG_SSID:
- DHD_DBGIF(("%s\n", tlv_data->value));
- break;
- case TRACE_TAG_STATUS:
- DHD_DBGIF(("%d\n", ltoh32_ua(&tlv_data->value[0])));
- break;
- case TRACE_TAG_REASON_CODE:
- DHD_DBGIF(("%d\n", ltoh16_ua(&tlv_data->value[0])));
- break;
- case TRACE_TAG_RATE_MBPS:
- DHD_DBGIF(("%d Kbps\n",
- ltoh16_ua(&tlv_data->value[0]) * 500));
- break;
- case TRACE_TAG_CHANNEL_SPEC:
- DHD_DBGIF(("%s\n",
- wf_chspec_ntoa(
- ltoh16_ua(&tlv_data->value[0]), chanbuf)));
- break;
- default:
- if (DHD_DBGIF_ON()) {
- prhex(NULL, &tlv_data->value[0], tlv_data->len);
- }
- }
- dest_tlvs += tlv_data->len + sizeof(tlv_log);
- }
- }
- msg_hdr.len = tot_payload_len;
- dhd_dbg_push_to_ring(dhdp, FW_EVENT_RING_ID, &msg_hdr, event_data);
- MFREE(dhdp->osh, event_data, payload_len);
- }
- return ret;
-}
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif // endif
@@ -681,19 +381,136 @@
}
}
+/* To identify format of non string format types */
+bool
+check_valid_non_string_format(char *curr_ptr)
+{
+ char *next_ptr;
+ char *next_fmt_stptr;
+ char valid_fmt_types[17] = {'d', 'i', 'x', 'X', 'c', 'p', 'u',
+ 'f', 'F', 'e', 'E', 'g', 'G', 'o',
+ 'a', 'A', 'n'};
+ int i;
+ bool valid = FALSE;
+
+ /* Check for next % in the fmt str */
+ next_fmt_stptr = bcmstrstr(curr_ptr, "%");
+
+ for (next_ptr = curr_ptr; *next_ptr != '\0'; next_ptr++) {
+ for (i = 0; i < (int)((sizeof(valid_fmt_types))/sizeof(valid_fmt_types[0])); i++) {
+ if (*next_ptr == valid_fmt_types[i]) {
+ /* Check whether format type found corresponds to current %
+ * and not the next one, if exists.
+ */
+ if ((next_fmt_stptr == NULL) ||
+ (next_fmt_stptr && (next_ptr < next_fmt_stptr))) {
+ /* Not validating for length/width fields in
+ * format specifier.
+ */
+ valid = TRUE;
+ }
+ goto done;
+ }
+ }
+ }
+
+done:
+ return valid;
+}
+
#define MAX_NO_OF_ARG 16
-#define FMTSTR_SIZE 132
-#define ROMSTR_SIZE 200
+#define FMTSTR_SIZE 200
+#define ROMSTR_SIZE 268
#define SIZE_LOC_STR 50
#define LOG_PRINT_CNT_MAX 16u
#define EL_PARSE_VER "V02"
#define EL_MSEC_PER_SEC 1000
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+#define MAX_LOG_PRINT_COUNT 100u
+#define LOG_PRINT_THRESH (1u * USEC_PER_SEC)
+#endif // endif
+
+bool
+dhd_dbg_process_event_log_hdr(event_log_hdr_t *log_hdr, prcd_event_log_hdr_t *prcd_log_hdr)
+{
+ event_log_extended_hdr_t *ext_log_hdr;
+ uint16 event_log_fmt_num;
+ uint8 event_log_hdr_type;
+
+ /* Identify the type of event tag, payload type etc.. */
+ event_log_hdr_type = log_hdr->fmt_num & DHD_EVENT_LOG_HDR_MASK;
+ event_log_fmt_num = (log_hdr->fmt_num >> DHD_EVENT_LOG_FMT_NUM_OFFSET) &
+ DHD_EVENT_LOG_FMT_NUM_MASK;
+
+ switch (event_log_hdr_type) {
+ case DHD_OW_NB_EVENT_LOG_HDR:
+ prcd_log_hdr->ext_event_log_hdr = FALSE;
+ prcd_log_hdr->binary_payload = FALSE;
+ break;
+ case DHD_TW_NB_EVENT_LOG_HDR:
+ prcd_log_hdr->ext_event_log_hdr = TRUE;
+ prcd_log_hdr->binary_payload = FALSE;
+ break;
+ case DHD_BI_EVENT_LOG_HDR:
+ if (event_log_fmt_num == DHD_OW_BI_EVENT_FMT_NUM) {
+ prcd_log_hdr->ext_event_log_hdr = FALSE;
+ prcd_log_hdr->binary_payload = TRUE;
+ } else if (event_log_fmt_num == DHD_TW_BI_EVENT_FMT_NUM) {
+ prcd_log_hdr->ext_event_log_hdr = TRUE;
+ prcd_log_hdr->binary_payload = TRUE;
+ } else {
+ DHD_ERROR(("%s: invalid format number 0x%X\n",
+ __FUNCTION__, event_log_fmt_num));
+ return FALSE;
+ }
+ break;
+ case DHD_INVALID_EVENT_LOG_HDR:
+ default:
+ DHD_ERROR(("%s: invalid event log header type 0x%X\n",
+ __FUNCTION__, event_log_hdr_type));
+ return FALSE;
+ }
+
+ /* Parse extended and legacy event log headers and populate prcd_event_log_hdr_t */
+ if (prcd_log_hdr->ext_event_log_hdr) {
+ ext_log_hdr = (event_log_extended_hdr_t *)
+ ((uint8 *)log_hdr - sizeof(event_log_hdr_t));
+ prcd_log_hdr->tag = ((ext_log_hdr->extended_tag &
+ DHD_TW_VALID_TAG_BITS_MASK) << DHD_TW_EVENT_LOG_TAG_OFFSET) | log_hdr->tag;
+ } else {
+ prcd_log_hdr->tag = log_hdr->tag;
+ }
+ prcd_log_hdr->count = log_hdr->count;
+ prcd_log_hdr->fmt_num_raw = log_hdr->fmt_num;
+ prcd_log_hdr->fmt_num = event_log_fmt_num;
+
+ /* update arm cycle */
+ /*
+ * For loegacy event tag :-
+ * |payload........|Timestamp| Tag
+ *
+ * For extended event tag:-
+ * |payload........|Timestamp|extended Tag| Tag.
+ *
+ */
+ prcd_log_hdr->armcycle = prcd_log_hdr->ext_event_log_hdr ?
+ *(uint32 *)(log_hdr - EVENT_TAG_TIMESTAMP_EXT_OFFSET) :
+ *(uint32 *)(log_hdr - EVENT_TAG_TIMESTAMP_OFFSET);
+
+ /* update event log data pointer address */
+ prcd_log_hdr->log_ptr =
+ (uint32 *)log_hdr - log_hdr->count - prcd_log_hdr->ext_event_log_hdr;
+
+ /* handle error cases above this */
+ return TRUE;
+}
+
static void
-dhd_dbg_verboselog_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr,
+dhd_dbg_verboselog_handler(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr,
void *raw_event_ptr, uint32 logset, uint16 block, uint32* data)
{
event_log_hdr_t *ts_hdr;
- uint32 *log_ptr = (uint32 *)hdr - hdr->count;
+ uint32 *log_ptr = plog_hdr->log_ptr;
char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 };
uint32 rom_str_len = 0;
uint32 *ts_data;
@@ -709,6 +526,7 @@
BCM_REFERENCE(ts_hdr);
BCM_REFERENCE(ts_data);
+
if (log_ptr > data) {
/* Get time stamp if it's updated */
ts_hdr = (event_log_hdr_t *)((char *)log_ptr - sizeof(event_log_hdr_t));
@@ -721,8 +539,8 @@
}
}
- if (hdr->tag == EVENT_LOG_TAG_ROM_PRINTF) {
- rom_str_len = (hdr->count - 1) * sizeof(uint32);
+ if (plog_hdr->tag == EVENT_LOG_TAG_ROM_PRINTF) {
+ rom_str_len = (plog_hdr->count - 1) * sizeof(uint32);
if (rom_str_len >= (ROMSTR_SIZE -1))
rom_str_len = ROMSTR_SIZE - 1;
@@ -732,7 +550,7 @@
fmtstr_loc_buf[rom_str_len] = '\0';
DHD_MSGTRACE_LOG(("EVENT_LOG_ROM[0x%08x]: %s",
- log_ptr[hdr->count - 1], fmtstr_loc_buf));
+ log_ptr[plog_hdr->count - 1], fmtstr_loc_buf));
/* Add newline if missing */
if (fmtstr_loc_buf[strlen(fmtstr_loc_buf) - 1] != '\n')
@@ -741,17 +559,18 @@
return;
}
- if (hdr->tag == EVENT_LOG_TAG_MSCHPROFILE || hdr->tag == EVENT_LOG_TAG_MSCHPROFILE_TLV) {
- wl_mschdbg_verboselog_handler(dhdp, raw_event_ptr, hdr, log_ptr);
+ if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE ||
+ plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE_TLV) {
+ wl_mschdbg_verboselog_handler(dhdp, raw_event_ptr, plog_hdr, log_ptr);
return;
}
/* print the message out in a logprint */
- dhd_dbg_verboselog_printf(dhdp, hdr, raw_event_ptr, log_ptr, logset, block);
+ dhd_dbg_verboselog_printf(dhdp, plog_hdr, raw_event_ptr, log_ptr, logset, block);
}
void
-dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, event_log_hdr_t *hdr,
+dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr,
void *raw_event_ptr, uint32 *log_ptr, uint32 logset, uint16 block)
{
dhd_event_log_t *raw_event = (dhd_event_log_t *)raw_event_ptr;
@@ -768,28 +587,53 @@
u_arg arg[MAX_NO_OF_ARG] = {{0}};
char *c_ptr = NULL;
struct bcmstrbuf b;
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+ static int log_print_count = 0;
+ static uint64 ts0 = 0;
+ uint64 ts1 = 0;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
BCM_REFERENCE(arg);
- /* print the message out in a logprint */
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+ if (!ts0)
+ ts0 = OSL_SYSUPTIME_US();
+
+ ts1 = OSL_SYSUPTIME_US();
+
+ if (((ts1 - ts0) <= LOG_PRINT_THRESH) && (log_print_count >= MAX_LOG_PRINT_COUNT)) {
+ log_print_threshold = 1;
+ ts0 = 0;
+ log_print_count = 0;
+ DHD_ERROR(("%s: Log print water mark is reached,"
+ " console logs are dumped only to debug_dump file\n", __FUNCTION__));
+ } else if ((ts1 - ts0) > LOG_PRINT_THRESH) {
+ log_print_threshold = 0;
+ ts0 = 0;
+ log_print_count = 0;
+ }
+
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
+ /* print the message out in a logprint. Logprint expects raw format number */
if (!(raw_event->fmts)) {
if (dhdp->dbg) {
log_level = dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level;
for (id = 0; id < ARRAYSIZE(fw_verbose_level_map); id++) {
- if ((fw_verbose_level_map[id].tag == hdr->tag) &&
+ if ((fw_verbose_level_map[id].tag == plog_hdr->tag) &&
(fw_verbose_level_map[id].log_level > log_level))
return;
}
}
- if (hdr->fmt_num == 0xffff) {
- DHD_ECNTR_LOG(("%06d.%03d EL:tag=%d len=%d fmt=0x%x",
- (uint32)(log_ptr[hdr->count - 1] / EL_MSEC_PER_SEC),
- (uint32)(log_ptr[hdr->count - 1] % EL_MSEC_PER_SEC),
- hdr->tag,
- hdr->count,
- hdr->fmt_num));
- for (count = 0; count < (hdr->count - 1); count++) {
+ if (plog_hdr->binary_payload) {
+ DHD_ECNTR_LOG(("%06d.%03d EL:tag=%d len=%d fmt=0x%x",
+ (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC),
+ (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC),
+ plog_hdr->tag,
+ plog_hdr->count,
+ plog_hdr->fmt_num_raw));
+
+ for (count = 0; count < (plog_hdr->count - 1); count++) {
if (count && (count % LOG_PRINT_CNT_MAX == 0)) {
DHD_ECNTR_LOG(("\n\t%08x", log_ptr[count]));
} else {
@@ -802,29 +646,32 @@
bcm_binit(&b, fmtstr_loc_buf, FMTSTR_SIZE);
#ifndef OEM_ANDROID
bcm_bprintf(&b, "%06d.%03d EL: %d 0x%x",
- (uint32)(log_ptr[hdr->count - 1] / EL_MSEC_PER_SEC),
- (uint32)(log_ptr[hdr->count - 1] % EL_MSEC_PER_SEC),
- hdr->tag,
- hdr->fmt_num);
+ (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC),
+ (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC),
+ plog_hdr->tag,
+ plog_hdr->fmt_num_raw);
#else
bcm_bprintf(&b, "%06d.%03d EL:%s:%u:%u %d %d 0x%x",
- (uint32)(log_ptr[hdr->count - 1] / EL_MSEC_PER_SEC),
- (uint32)(log_ptr[hdr->count - 1] % EL_MSEC_PER_SEC),
+ (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC),
+ (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC),
EL_PARSE_VER, logset, block,
- hdr->tag,
- hdr->count,
- hdr->fmt_num);
-#endif // endif
- for (count = 0; count < (hdr->count - 1); count++) {
+ plog_hdr->tag,
+ plog_hdr->count,
+ plog_hdr->fmt_num_raw);
+#endif /* !OEM_ANDROID */
+ for (count = 0; count < (plog_hdr->count - 1); count++) {
bcm_bprintf(&b, " %x", log_ptr[count]);
}
/* ensure preserve fw logs go to debug_dump only in case of customer4 */
- if (logset < WL_MAX_PRESERVE_BUFFER &&
+ if (logset < dhdp->event_log_max_sets &&
((0x01u << logset) & dhdp->logset_prsrv_mask)) {
- DHD_EVENT_MEM(("%s\n", b.origbuf));
+ DHD_PRSRV_MEM(("%s\n", b.origbuf));
} else {
- DHD_EVENT(("%s\n", b.origbuf));
+ DHD_FWLOG(("%s\n", b.origbuf));
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+ log_print_count++;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
}
}
return;
@@ -836,36 +683,46 @@
return;
}
- if ((hdr->fmt_num >> 2) < raw_event->num_fmts) {
- if (hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) {
+ if ((plog_hdr->fmt_num) < raw_event->num_fmts) {
+ if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) {
snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "%s",
- raw_event->fmts[hdr->fmt_num >> 2]);
- hdr->count++;
+ raw_event->fmts[plog_hdr->fmt_num]);
+ plog_hdr->count++;
} else {
- snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "CONSOLE_E:%u:%u %6d.%3d %s",
+ snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "CONSOLE_E:%u:%u %06d.%03d %s",
logset, block,
- log_ptr[hdr->count-1]/1000, (log_ptr[hdr->count - 1] % 1000),
- raw_event->fmts[hdr->fmt_num >> 2]);
+ (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC),
+ (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC),
+ raw_event->fmts[plog_hdr->fmt_num]);
}
c_ptr = fmtstr_loc_buf;
} else {
/* for ecounters, don't print the error as it will flood */
- if (hdr->fmt_num != 0xffff)
- DHD_ERROR(("%s: fmt number out of range\n", __FUNCTION__));
- else
- DHD_INFO(("%s: fmt number out of range\n", __FUNCTION__));
+ if ((plog_hdr->fmt_num != DHD_OW_BI_EVENT_FMT_NUM) &&
+ (plog_hdr->fmt_num != DHD_TW_BI_EVENT_FMT_NUM)) {
+ DHD_ERROR(("%s: fmt number: 0x%x out of range\n",
+ __FUNCTION__, plog_hdr->fmt_num));
+ } else {
+ DHD_INFO(("%s: fmt number: 0x%x out of range\n",
+ __FUNCTION__, plog_hdr->fmt_num));
+ }
+
goto exit;
}
- if (hdr->count > MAX_NO_OF_ARG) {
- DHD_ERROR(("%s: hdr->count(%d) out of range \n", __FUNCTION__, hdr->count));
+ if (plog_hdr->count > MAX_NO_OF_ARG) {
+ DHD_ERROR(("%s: plog_hdr->count(%d) out of range\n",
+ __FUNCTION__, plog_hdr->count));
goto exit;
}
+ /* print the format string which will be needed for debugging incorrect formats */
+ DHD_INFO(("%s: fmtstr_loc_buf = %s\n", __FUNCTION__, fmtstr_loc_buf));
+
/* Replace all %p to %x to handle 32 bit %p */
replace_percent_p_to_x(fmtstr_loc_buf);
- for (count = 0; count < (hdr->count - 1); count++) {
+ for (count = 0; count < (plog_hdr->count - 1); count++) {
if (c_ptr != NULL)
if ((c_ptr = bcmstrstr(c_ptr, "%")) != NULL)
c_ptr++;
@@ -904,23 +761,29 @@
"(s)0x%x", log_ptr[count]);
arg[count].addr = str_buf[count];
}
- } else {
- /* Other than string */
+ } else if (check_valid_non_string_format(c_ptr)) {
+ /* Other than string format */
arg[count].val = log_ptr[count];
+ } else {
+ *(c_ptr - 1) = '\0';
+ break;
}
}
}
/* ensure preserve fw logs go to debug_dump only in case of customer4 */
- if (logset < WL_MAX_PRESERVE_BUFFER &&
+ if (logset < dhdp->event_log_max_sets &&
((0x01u << logset) & dhdp->logset_prsrv_mask)) {
- DHD_EVENT_MEM((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
+ DHD_PRSRV_MEM((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10],
arg[11], arg[12], arg[13], arg[14], arg[15]));
} else {
- DHD_EVENT((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
+ DHD_FWLOG((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10],
arg[11], arg[12], arg[13], arg[14], arg[15]));
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+ log_print_count++;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
}
exit:
@@ -934,26 +797,42 @@
{
msgtrace_hdr_t *hdr;
char *data, *tmpdata;
- int id;
const uint32 log_hdr_len = sizeof(event_log_hdr_t);
uint32 log_pyld_len;
static uint32 seqnum_prev = 0;
event_log_hdr_t *log_hdr;
bool msg_processed = FALSE;
- uint32 *log_ptr = NULL;
+ prcd_event_log_hdr_t prcd_log_hdr;
+ prcd_event_log_hdr_t *plog_hdr;
dll_t list_head, *cur;
loglist_item_t *log_item;
- int32 nan_evt_ring_log_level = 0;
dhd_dbg_ring_entry_t msg_hdr;
char *logbuf;
struct tracelog_header *logentry_header;
uint ring_data_len = 0;
bool ecntr_pushed = FALSE;
+ bool rtt_pushed = FALSE;
+ bool dll_inited = FALSE;
uint32 logset = 0;
uint16 block = 0;
+ bool event_log_max_sets_queried;
+ uint32 event_log_max_sets;
uint min_expected_len = 0;
+ uint16 len_chk = 0;
BCM_REFERENCE(ecntr_pushed);
+ BCM_REFERENCE(rtt_pushed);
+ BCM_REFERENCE(len_chk);
+
+ /* store event_logset_queried and event_log_max_sets in local variables
+ * to avoid race conditions as they were set from different contexts(preinit)
+ */
+ event_log_max_sets_queried = dhdp->event_log_max_sets_queried;
+ /* Make sure queried is read first with wmb and then max_sets,
+ * as it is done in reverse order during preinit ioctls.
+ */
+ OSL_SMP_WMB();
+ event_log_max_sets = dhdp->event_log_max_sets;
if (msgtrace_hdr_present)
min_expected_len = (MSGTRACE_HDRLEN + EVENT_LOG_BLOCK_LEN);
@@ -987,12 +866,12 @@
hdr = (msgtrace_hdr_t *)event_data;
data = (char *)event_data + MSGTRACE_HDRLEN;
datalen -= MSGTRACE_HDRLEN;
- msgtrace_seqnum = hdr->seqnum;
+ msgtrace_seqnum = ntoh32(hdr->seqnum);
} else {
data = (char *)event_data;
}
- if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(msgtrace_seqnum)))
+ if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, msgtrace_seqnum))
return;
/* Save the whole message to event log ring */
@@ -1028,6 +907,20 @@
ltoh16(*((uint16 *)(data)))));
logset = ltoh32(*((uint32 *)(data + 4)));
+
+ if (logset >= event_log_max_sets) {
+ DHD_ERROR(("%s logset: %d max: %d out of range queried: %d\n",
+ __FUNCTION__, logset, event_log_max_sets, event_log_max_sets_queried));
+#ifdef DHD_FW_COREDUMP
+ if (event_log_max_sets_queried) {
+ DHD_ERROR(("%s: collect socram for DUMP_TYPE_LOGSET_BEYOND_RANGE\n",
+ __FUNCTION__));
+ dhdp->memdump_type = DUMP_TYPE_LOGSET_BEYOND_RANGE;
+ dhd_bus_mem_dump(dhdp);
+ }
+#endif /* DHD_FW_COREDUMP */
+ }
+
block = ltoh16(*((uint16 *)(data+2)));
data += EVENT_LOG_BLOCK_HDRLEN;
@@ -1045,10 +938,18 @@
* data log_hdr
*/
dll_init(&list_head);
+ dll_inited = TRUE;
+
while (datalen > log_hdr_len) {
log_hdr = (event_log_hdr_t *)(data + datalen - log_hdr_len);
+ memset(&prcd_log_hdr, 0, sizeof(prcd_log_hdr));
+ if (!dhd_dbg_process_event_log_hdr(log_hdr, &prcd_log_hdr)) {
+ DHD_ERROR(("%s: Error while parsing event log header\n",
+ __FUNCTION__));
+ }
+
/* skip zero padding at end of frame */
- if (log_hdr->tag == EVENT_LOG_TAG_NULL) {
+ if (prcd_log_hdr.tag == EVENT_LOG_TAG_NULL) {
datalen -= log_hdr_len;
continue;
}
@@ -1058,21 +959,27 @@
* arguments except EVENT_LOG_TAG_STATS which could use the
* whole payload of 256 words
*/
- if (log_hdr->count == 0) {
+ if (prcd_log_hdr.count == 0) {
break;
}
- if ((log_hdr->tag != EVENT_LOG_TAG_STATS) && (log_hdr->count > MAX_NO_OF_ARG)) {
+ /* Both tag_stats and proxd are binary payloads so skip
+ * argument count check for these.
+ */
+ if ((prcd_log_hdr.tag != EVENT_LOG_TAG_STATS) &&
+ (prcd_log_hdr.tag != EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT) &&
+ (prcd_log_hdr.count > MAX_NO_OF_ARG)) {
break;
}
- log_pyld_len = log_hdr->count * DATA_UNIT_FOR_LOG_CNT;
+ log_pyld_len = (prcd_log_hdr.count + prcd_log_hdr.ext_event_log_hdr) *
+ DATA_UNIT_FOR_LOG_CNT;
/* log data should not cross the event data boundary */
if ((uint32)((char *)log_hdr - data) < log_pyld_len) {
break;
}
/* skip 4 bytes time stamp packet */
- if (log_hdr->tag == EVENT_LOG_TAG_TS) {
- datalen -= log_pyld_len + log_hdr_len;
+ if (prcd_log_hdr.tag == EVENT_LOG_TAG_TS) {
+ datalen -= (log_pyld_len + log_hdr_len);
continue;
}
if (!(log_item = MALLOC(dhdp->osh, sizeof(*log_item)))) {
@@ -1080,7 +987,17 @@
__FUNCTION__));
break;
}
- log_item->hdr = log_hdr;
+
+ log_item->prcd_log_hdr.tag = prcd_log_hdr.tag;
+ log_item->prcd_log_hdr.count = prcd_log_hdr.count;
+ log_item->prcd_log_hdr.fmt_num = prcd_log_hdr.fmt_num;
+ log_item->prcd_log_hdr.fmt_num_raw = prcd_log_hdr.fmt_num_raw;
+ log_item->prcd_log_hdr.armcycle = prcd_log_hdr.armcycle;
+ log_item->prcd_log_hdr.log_ptr = prcd_log_hdr.log_ptr;
+ log_item->prcd_log_hdr.payload_len = prcd_log_hdr.payload_len;
+ log_item->prcd_log_hdr.ext_event_log_hdr = prcd_log_hdr.ext_event_log_hdr;
+ log_item->prcd_log_hdr.binary_payload = prcd_log_hdr.binary_payload;
+
dll_insert(&log_item->list, &list_head);
datalen -= (log_pyld_len + log_hdr_len);
}
@@ -1096,98 +1013,100 @@
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif // endif
- log_hdr = log_item->hdr;
- log_ptr = (uint32 *)log_hdr - log_hdr->count;
- dll_delete(cur);
- MFREE(dhdp->osh, log_item, sizeof(*log_item));
-#if defined(DEBUGABILITY_ECNTRS_LOGGING) && defined(DHD_LOG_DUMP)
- if ((log_hdr->tag == EVENT_LOG_TAG_ECOUNTERS_TIME_DATA) ||
- ((log_hdr->tag == EVENT_LOG_TAG_STATS) &&
- (log_hdr->fmt_num == 0xffff))) {
+ plog_hdr = &log_item->prcd_log_hdr;
+
+#if defined(EWP_ECNTRS_LOGGING) && defined(DHD_LOG_DUMP)
+ /* Ecounter tag can be time_data or log_stats+binary paloaod */
+ if ((plog_hdr->tag == EVENT_LOG_TAG_ECOUNTERS_TIME_DATA) ||
+ ((plog_hdr->tag == EVENT_LOG_TAG_STATS) &&
+ (plog_hdr->binary_payload))) {
if (!ecntr_pushed && dhd_log_dump_ecntr_enabled()) {
- /* check msg hdr len before pushing */
- if (msg_hdr.len > (sizeof(*logentry_header) +
- PAYLOAD_ECNTR_MAX_LEN)) {
+ /*
+ * check msg hdr len before pushing.
+ * FW msg_hdr.len includes length of event log hdr,
+ * logentry header and payload.
+ */
+ len_chk = (sizeof(*logentry_header) + sizeof(*log_hdr) +
+ PAYLOAD_ECNTR_MAX_LEN);
+ /* account extended event log header(extended_event_log_hdr) */
+ if (plog_hdr->ext_event_log_hdr) {
+ len_chk += sizeof(*log_hdr);
+ }
+ if (msg_hdr.len > len_chk) {
DHD_ERROR(("%s: EVENT_LOG_VALIDATION_FAILS: "
- "msg_hdr.len=%u, max allowed for ecntrs=%u",
- __FUNCTION__, msg_hdr.len,
- (uint32)(sizeof(*logentry_header) +
- PAYLOAD_ECNTR_MAX_LEN)));
+ "msg_hdr.len=%u, max allowed for ecntrs=%u\n",
+ __FUNCTION__, msg_hdr.len, len_chk));
goto exit;
}
dhd_dbg_ring_push(dhdp->ecntr_dbg_ring, &msg_hdr, logbuf);
ecntr_pushed = TRUE;
}
}
-#endif /* DEBUGABILITY_ECNTRS_LOGGING && DHD_LOG_DUMP */
+#endif /* EWP_ECNTRS_LOGGING && DHD_LOG_DUMP */
+
+#if defined(EWP_RTT_LOGGING) && defined(DHD_LOG_DUMP)
+ if ((plog_hdr->tag == EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT) &&
+ plog_hdr->binary_payload) {
+ if (!rtt_pushed && dhd_log_dump_rtt_enabled()) {
+ /*
+ * check msg hdr len before pushing.
+ * FW msg_hdr.len includes length of event log hdr,
+ * logentry header and payload.
+ */
+ len_chk = (sizeof(*logentry_header) + sizeof(*log_hdr) +
+ PAYLOAD_RTT_MAX_LEN);
+ /* account extended event log header(extended_event_log_hdr) */
+ if (plog_hdr->ext_event_log_hdr) {
+ len_chk += sizeof(*log_hdr);
+ }
+ if (msg_hdr.len > len_chk) {
+ DHD_ERROR(("%s: EVENT_LOG_VALIDATION_FAILS: "
+ "msg_hdr.len=%u, max allowed for ecntrs=%u\n",
+ __FUNCTION__, msg_hdr.len, len_chk));
+ goto exit;
+ }
+ dhd_dbg_ring_push(dhdp->rtt_dbg_ring, &msg_hdr, logbuf);
+ rtt_pushed = TRUE;
+ }
+ }
+#endif /* EWP_RTT_LOGGING && DHD_LOG_DUMP */
+
#if defined(DHD_EVENT_LOG_FILTER)
- if (log_hdr->tag == EVENT_LOG_TAG_STATS) {
- dhd_event_log_filter_event_handler(dhdp, log_hdr, log_ptr);
+ if (plog_hdr->tag == EVENT_LOG_TAG_STATS) {
+ dhd_event_log_filter_event_handler(dhdp, plog_hdr, plog_hdr->log_ptr);
}
#endif /* DHD_EVENT_LOG_FILTER */
- /* Before DHD debugability is implemented WLC_E_TRACE had been
- * used to carry verbose logging from firmware. We need to
- * be able to handle those messages even without a initialized
- * debug layer.
- */
- if (dhdp->dbg) {
- /* check the data for NAN event ring; keeping first as small table */
- /* process only user configured to log */
- nan_evt_ring_log_level = dhdp->dbg->dbg_rings[NAN_EVENT_RING_ID].log_level;
- if (dhdp->dbg->dbg_rings[NAN_EVENT_RING_ID].log_level) {
- for (id = 0; id < ARRAYSIZE(nan_event_level_map); id++) {
- if (nan_event_level_map[id].tag == log_hdr->tag) {
- /* dont process if tag log level is greater
- * than ring log level
- */
- if (nan_event_level_map[id].log_level >
- nan_evt_ring_log_level) {
- msg_processed = TRUE;
- break;
- }
- /* In case of BCME_VERSION error,
- * this is not NAN event type data
- */
- if (dhd_dbg_nan_event_handler(dhdp,
- log_hdr, log_ptr, (uint32 *)data)
- != BCME_VERSION) {
- msg_processed = TRUE;
- }
- break;
- }
- }
- }
- if (!msg_processed) {
- /* check the data for event ring */
- for (id = 0; id < ARRAYSIZE(fw_event_level_map); id++) {
- if (fw_event_level_map[id].tag == log_hdr->tag) {
- /* In case of BCME_VERSION error,
- * this is not event type data
- */
- if (dhd_dbg_custom_evnt_handler(dhdp,
- log_hdr, log_ptr, (uint32 *)data)
- != BCME_VERSION) {
- msg_processed = TRUE;
- }
- break;
- }
- }
- }
- }
- if (!msg_processed)
- dhd_dbg_verboselog_handler(dhdp, log_hdr, raw_event_ptr,
+ if (!msg_processed) {
+ dhd_dbg_verboselog_handler(dhdp, plog_hdr, raw_event_ptr,
logset, block, (uint32 *)data);
+ }
+ dll_delete(cur);
+ MFREE(dhdp->osh, log_item, sizeof(*log_item));
}
+ BCM_REFERENCE(log_hdr);
exit:
+ while (dll_inited && (!dll_empty(&list_head))) {
+ cur = dll_head_p(&list_head);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ log_item = (loglist_item_t *)container_of(cur, loglist_item_t, list);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ dll_delete(cur);
+ MFREE(dhdp->osh, log_item, sizeof(*log_item));
+ }
VMFREE(dhdp->osh, logbuf, ring_data_len);
}
#else /* !SHOW_LOGTRACE */
static INLINE void dhd_dbg_verboselog_handler(dhd_pub_t *dhdp,
- event_log_hdr_t *hdr, void *raw_event_ptr, uint32 logset, uint16 block,
+ prcd_event_log_hdr_t *plog_hdr, void *raw_event_ptr, uint32 logset, uint16 block,
uint32 *data) {};
INLINE void dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp,
void *event_data, void *raw_event_ptr, uint datalen,
@@ -1266,15 +1185,9 @@
if (log_level > 0)
set = TRUE;
- if (ring->id == FW_EVENT_RING_ID) {
- log_level_tbl = fw_event_level_map;
- array_len = ARRAYSIZE(fw_event_level_map);
- } else if (ring->id == FW_VERBOSE_RING_ID) {
+ if (ring->id == FW_VERBOSE_RING_ID) {
log_level_tbl = fw_verbose_level_map;
array_len = ARRAYSIZE(fw_verbose_level_map);
- } else if (ring->id == NAN_EVENT_RING_ID) {
- log_level_tbl = nan_event_level_map;
- array_len = ARRAYSIZE(nan_event_level_map);
}
for (i = 0; i < array_len; i++) {
@@ -1293,6 +1206,23 @@
return BCME_OK;
}
+int
+__dhd_dbg_get_ring_status(dhd_dbg_ring_t *ring, dhd_dbg_ring_status_t *get_ring_status)
+{
+ dhd_dbg_ring_status_t ring_status;
+ int ret = BCME_OK;
+
+ if (ring == NULL) {
+ return BCME_BADADDR;
+ }
+
+ bzero(&ring_status, sizeof(dhd_dbg_ring_status_t));
+ RING_STAT_TO_STATUS(ring, ring_status);
+ *get_ring_status = ring_status;
+
+ return ret;
+}
+
/*
* dhd_dbg_get_ring_status : get the ring status from the coresponding ring buffer
* Return: An error code or 0 on success.
@@ -1305,17 +1235,14 @@
int id = 0;
dhd_dbg_t *dbg;
dhd_dbg_ring_t *dbg_ring;
- dhd_dbg_ring_status_t ring_status;
if (!dhdp || !dhdp->dbg)
return BCME_BADADDR;
dbg = dhdp->dbg;
- memset(&ring_status, 0, sizeof(dhd_dbg_ring_status_t));
for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) {
dbg_ring = &dbg->dbg_rings[id];
if (VALID_RING(dbg_ring->id) && (dbg_ring->id == ring_id)) {
- RING_STAT_TO_STATUS(dbg_ring, ring_status);
- *dbg_ring_status = ring_status;
+ __dhd_dbg_get_ring_status(dbg_ring, dbg_ring_status);
break;
}
}
@@ -1326,6 +1253,30 @@
return ret;
}
+#ifdef SHOW_LOGTRACE
+void
+dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_buf_info)
+{
+ dhd_dbg_ring_status_t ring_status;
+ uint32 rlen = 0;
+
+ rlen = dhd_dbg_ring_pull_single(ring, trace_buf_info->buf, TRACE_LOG_BUF_MAX_SIZE, TRUE);
+
+ trace_buf_info->size = rlen;
+ trace_buf_info->availability = NEXT_BUF_NOT_AVAIL;
+ if (rlen == 0) {
+ trace_buf_info->availability = BUF_NOT_AVAILABLE;
+ return;
+ }
+
+ __dhd_dbg_get_ring_status(ring, &ring_status);
+
+ if (ring_status.written_bytes != ring_status.read_bytes) {
+ trace_buf_info->availability = NEXT_BUF_AVAIL;
+ }
+}
+#endif /* SHOW_LOGTRACE */
+
/*
* dhd_dbg_find_ring_id : return ring_id based on ring_name
* Return: An invalid ring id for failure or valid ring id on success.
@@ -1429,15 +1380,14 @@
uint32
__dhd_dbg_driver_ts_usec(void)
{
-//tianyan@2021.7.27 modify for add wifi6 module start
- /*
- struct timespec64 ts64;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
+ struct timespec64 ts;
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ struct timespec ts;
+#endif /* LINUX_VER >= 2.6.39 */
- get_monotonic_boottime(&ts64);
- return ((uint32)(__TIMESPEC_TO_US(ts64)));*/
-//tianyan@2021.7.27 modify for add wifi6 module end
-
- return 0;
+ get_monotonic_boottime(&ts);
+ return ((uint32)(__TIMESPEC_TO_US(ts)));
}
wifi_tx_packet_fate
@@ -2168,6 +2118,9 @@
DHD_PKT_MON(("%s(): packet monitor detach succeeded\n", __FUNCTION__));
return BCME_OK;
}
+#endif /* DBG_PKT_MON */
+
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
bool
dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid,
uint16 status)
@@ -2179,17 +2132,14 @@
}
return pkt_fate;
}
-
-#else /* DBG_PKT_MON */
-
+#else /* DBG_PKT_MON || DHD_PKT_LOGGING */
bool
dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt,
uint32 pktid, uint16 status)
{
return TRUE;
}
-
-#endif /* DBG_PKT_MON */
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
/*
* dhd_dbg_attach: initialziation of dhd dbugability module
@@ -2213,15 +2163,7 @@
if (!buf)
goto error;
ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_VERBOSE_RING_ID], FW_VERBOSE_RING_ID,
- (uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, buf);
- if (ret)
- goto error;
-
- buf = MALLOCZ(dhdp->osh, FW_EVENT_RING_SIZE);
- if (!buf)
- goto error;
- ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_EVENT_RING_ID], FW_EVENT_RING_ID,
- (uint8 *)FW_EVENT_RING_NAME, FW_EVENT_RING_SIZE, buf);
+ (uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, buf, FALSE);
if (ret)
goto error;
@@ -2229,15 +2171,7 @@
if (!buf)
goto error;
ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[DHD_EVENT_RING_ID], DHD_EVENT_RING_ID,
- (uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, buf);
- if (ret)
- goto error;
-
- buf = MALLOCZ(dhdp->osh, NAN_EVENT_RING_SIZE);
- if (!buf)
- goto error;
- ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[NAN_EVENT_RING_ID], NAN_EVENT_RING_ID,
- (uint8 *)NAN_EVENT_RING_NAME, NAN_EVENT_RING_SIZE, buf);
+ (uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, buf, FALSE);
if (ret)
goto error;
@@ -2272,13 +2206,21 @@
dhd_dbg_detach(dhd_pub_t *dhdp)
{
int ring_id;
+ dhd_dbg_ring_t *ring = NULL;
dhd_dbg_t *dbg;
+
if (!dhdp->dbg)
return;
dbg = dhdp->dbg;
for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
if (VALID_RING(dbg->dbg_rings[ring_id].id)) {
- dhd_dbg_ring_deinit(dhdp, &dbg->dbg_rings[ring_id]);
+ ring = &dbg->dbg_rings[ring_id];
+ dhd_dbg_ring_deinit(dhdp, ring);
+ if (ring->ring_buf) {
+ MFREE(dhdp->osh, ring->ring_buf, ring->ring_size);
+ ring->ring_buf = NULL;
+ }
+ ring->ring_size = 0;
}
}
MFREE(dhdp->osh, dhdp->dbg, sizeof(dhd_dbg_t));
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_debug.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_debug.h
index 48570be..2839305 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_debug.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_debug.h
@@ -3,7 +3,9 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -35,9 +37,7 @@
enum {
DEBUG_RING_ID_INVALID = 0,
FW_VERBOSE_RING_ID,
- FW_EVENT_RING_ID,
DHD_EVENT_RING_ID,
- NAN_EVENT_RING_ID,
/* add new id here */
DEBUG_RING_ID_MAX
};
@@ -341,18 +341,24 @@
};
#ifdef OEM_ANDROID
+/*
+ * Assuming that the Ring lock is mutex, bailing out if the
+ * callers are from atomic context. On a long term, one has to
+ * schedule a job to execute in sleepable context so that
+ * contents are pushed to the ring.
+ */
#define DBG_EVENT_LOG(dhdp, connect_state) \
{ \
do { \
uint16 state = connect_state; \
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) \
+ if (CAN_SLEEP() && DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) \
dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID, \
&state, sizeof(state)); \
} while (0); \
}
#else
#define DBG_EVENT_LOG(dhd, connect_state)
-#endif // endif
+#endif /* !OEM_ANDROID */
#define MD5_PREFIX_LEN 4
#define MAX_FATE_LOG_LEN 32
@@ -710,6 +716,48 @@
} dhd_dbg_mwli_t;
#endif /* DHD_DEBUG */
+#define DHD_OW_BI_RAW_EVENT_LOG_FMT 0xFFFF
+
+/* LSB 2 bits of format number to identify the type of event log */
+#define DHD_EVENT_LOG_HDR_MASK 0x3
+
+#define DHD_EVENT_LOG_FMT_NUM_OFFSET 2
+#define DHD_EVENT_LOG_FMT_NUM_MASK 0x3FFF
+/**
+ * OW:- one word
+ * TW:- two word
+ * NB:- non binary
+ * BI:- binary
+ */
+#define DHD_OW_NB_EVENT_LOG_HDR 0
+#define DHD_TW_NB_EVENT_LOG_HDR 1
+#define DHD_BI_EVENT_LOG_HDR 3
+#define DHD_INVALID_EVENT_LOG_HDR 2
+
+#define DHD_TW_VALID_TAG_BITS_MASK 0xF
+#define DHD_OW_BI_EVENT_FMT_NUM 0x3FFF
+#define DHD_TW_BI_EVENT_FMT_NUM 0x3FFE
+
+#define DHD_TW_EVENT_LOG_TAG_OFFSET 8
+
+#define EVENT_TAG_TIMESTAMP_OFFSET 1
+#define EVENT_TAG_TIMESTAMP_EXT_OFFSET 2
+
+typedef struct prcd_event_log_hdr {
+ uint32 tag; /* Event_log entry tag */
+ uint32 count; /* Count of 4-byte entries */
+ uint32 fmt_num_raw; /* Format number */
+ uint32 fmt_num; /* Format number >> 2 */
+ uint32 armcycle; /* global ARM CYCLE for TAG */
+ uint32 *log_ptr; /* start of payload */
+ uint32 payload_len;
+ /* Extended event log header info
+ * 0 - legacy, 1 - extended event log header present
+ */
+ bool ext_event_log_hdr;
+ bool binary_payload; /* 0 - non binary payload, 1 - binary payload */
+} prcd_event_log_hdr_t; /* Processed event log header */
+
/* dhd_dbg functions */
extern void dhd_dbg_trace_evnt_handler(dhd_pub_t *dhdp, void *event_data,
void *raw_event_ptr, uint datalen);
@@ -724,17 +772,22 @@
extern int dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id,
int log_level, int flags, uint32 threshold);
extern int dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name);
+extern dhd_dbg_ring_t *dhd_dbg_get_ring_from_ring_id(dhd_pub_t *dhdp, int ring_id);
extern void *dhd_dbg_get_priv(dhd_pub_t *dhdp);
extern int dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len);
-extern void dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, event_log_hdr_t *hdr,
+extern void dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr,
void *raw_event_ptr, uint32 *log_ptr, uint32 logset, uint16 block);
int dhd_dbg_pull_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len);
int dhd_dbg_pull_single_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len,
bool strip_header);
int dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr,
void *data);
+int __dhd_dbg_get_ring_status(dhd_dbg_ring_t *ring, dhd_dbg_ring_status_t *ring_status);
int dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id,
dhd_dbg_ring_status_t *dbg_ring_status);
+#ifdef SHOW_LOGTRACE
+void dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_buf_info);
+#endif /* SHOW_LOGTRACE */
#ifdef DBG_PKT_MON
extern int dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp,
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_debug_linux.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_debug_linux.c
index b4fc91c..70ef894 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_debug_linux.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_debug_linux.c
@@ -3,7 +3,9 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -121,7 +123,7 @@
unsigned long flags;
ring = &dhdp->dbg->dbg_rings[ringid];
- flags = dhd_os_spin_lock(ring->lock);
+ DHD_DBG_RING_LOCK(ring->lock, flags);
dhd_dbg_get_ring_status(dhdp, ringid, &ring_status);
if (ring->wp > ring->rp) {
@@ -170,7 +172,7 @@
}
}
- dhd_os_spin_unlock(ring->lock, flags);
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
return;
}
@@ -202,7 +204,6 @@
int ret = BCME_OK;
int ring_id;
linux_dbgring_info_t *os_priv, *ring_info;
- uint32 ms;
ring_id = dhd_dbg_find_ring_id(dhdp, ring_name);
if (!VALID_RING(ring_id))
@@ -223,14 +224,7 @@
return BCME_ERROR;
ring_info = &os_priv[ring_id];
ring_info->log_level = log_level;
- if (ring_id == FW_VERBOSE_RING_ID || ring_id == FW_EVENT_RING_ID) {
- ring_info->tsoffset = local_clock();
- if (dhd_wl_ioctl_get_intiovar(dhdp, "rte_timesync", &ms, WLC_GET_VAR,
- FALSE, 0))
- DHD_ERROR(("%s rte_timesync failed\n", __FUNCTION__));
- do_div(ring_info->tsoffset, 1000000);
- ring_info->tsoffset -= ms;
- }
+
if (time_intval == 0 || log_level == 0) {
ring_info->interval = 0;
cancel_delayed_work_sync(&ring_info->work);
@@ -287,8 +281,8 @@
if (!os_priv)
return BCME_ERROR;
- max_log_level = MAX(os_priv[FW_VERBOSE_RING_ID].log_level,
- os_priv[FW_EVENT_RING_ID].log_level);
+ max_log_level = os_priv[FW_VERBOSE_RING_ID].log_level;
+
if (max_log_level == SUPPRESS_LOG_LEVEL) {
/* suppress the logging in FW not to wake up host while device in suspend mode */
ret = dhd_iovar(dhdp, 0, "logtrace", (char *)&enable, sizeof(enable), NULL, 0,
@@ -355,7 +349,7 @@
msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY;
msg_hdr.timestamp = local_clock();
/* convert to ms */
- do_div(msg_hdr.timestamp, 1000000);
+ msg_hdr.timestamp = DIV_U64_BY_U32(msg_hdr.timestamp, NSEC_PER_MSEC);
msg_hdr.len = data_len;
/* filter the event for higher log level with current log level */
for (i = 0; i < ARRAYSIZE(dhd_event_map); i++) {
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_flowring.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_flowring.c
index 5e89df9..c02095f 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_flowring.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_flowring.c
@@ -4,7 +4,9 @@
* Flow rings are transmit traffic (=propagating towards antenna) related entities
*
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -65,11 +67,7 @@
#define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p)
#define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x))
-#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING)
-const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 };
-#else
const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
-#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */
const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
/** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
@@ -93,8 +91,9 @@
ASSERT(dhdp != (dhd_pub_t*)NULL);
ASSERT(flowid < dhdp->num_flow_rings);
- if (flowid < dhdp->num_flow_rings)
+ if (flowid >= dhdp->num_flow_rings) {
return NULL;
+ }
flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]);
@@ -269,6 +268,48 @@
}
}
+uint8
+dhd_num_prio_supported_per_flow_ring(dhd_pub_t *dhdp)
+{
+ uint8 prio_count = 0;
+ int i;
+ // Pick all elements one by one
+ for (i = 0; i < NUMPRIO; i++)
+ {
+ // Check if the picked element is already counted
+ int j;
+ for (j = 0; j < i; j++) {
+ if (dhdp->flow_prio_map[i] == dhdp->flow_prio_map[j]) {
+ break;
+ }
+ }
+ // If not counted earlier, then count it
+ if (i == j) {
+ prio_count++;
+ }
+ }
+
+#ifdef DHD_LOSSLESS_ROAMING
+ /* For LLR, we are using flowring with prio 7 which is not considered
+ * in prio2ac array. But in __dhd_sendpkt, it is hardcoded hardcoded
+ * prio to PRIO_8021D_NC and send to dhd_flowid_update.
+ * So add 1 to prio_count.
+ */
+ prio_count++;
+#endif /* DHD_LOSSLESS_ROAMING */
+
+ return prio_count;
+}
+
+uint8
+dhd_get_max_multi_client_flow_rings(dhd_pub_t *dhdp)
+{
+ uint8 reserved_infra_sta_flow_rings = dhd_num_prio_supported_per_flow_ring(dhdp);
+ uint8 total_tx_flow_rings = dhdp->num_flow_rings - dhdp->bus->max_cmn_rings;
+ uint8 max_multi_client_flow_rings = total_tx_flow_rings - reserved_infra_sta_flow_rings;
+ return max_multi_client_flow_rings;
+}
+
/** Initializes data structures of multiple flow rings */
int
dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings)
@@ -352,6 +393,10 @@
dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+
+ dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
+ dhdp->multi_client_flow_rings = 0U;
+
#ifdef DHD_LOSSLESS_ROAMING
dhdp->dequeue_prec_map = ALLPRIO;
#endif // endif
@@ -451,6 +496,9 @@
dhdp->num_flow_rings = 0U;
bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+ dhdp->max_multi_client_flow_rings = 0U;
+ dhdp->multi_client_flow_rings = 0U;
+
lock = dhdp->flowid_lock;
dhdp->flowid_lock = NULL;
@@ -517,8 +565,7 @@
ASSERT(if_flow_lkup);
- if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
- (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
+ if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
#ifdef WLTDLS
if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
is_tdls_destination(dhdp, da)) {
@@ -592,7 +639,7 @@
if (flowid == FLOWID_INVALID) {
MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t));
- DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__));
+ DHD_ERROR_RLMT(("%s: cannot get free flowid \n", __FUNCTION__));
return FLOWID_INVALID;
}
@@ -604,9 +651,8 @@
DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
- if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
- (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
- /* For STA non TDLS dest and WDS dest we allocate entry based on prio only */
+ if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
+ /* For STA/GC non TDLS dest and WDS dest we allocate entry based on prio only */
#ifdef WLTDLS
if (dhdp->peer_tbl.tdls_peer_count &&
(is_tdls_destination(dhdp, da))) {
@@ -640,6 +686,15 @@
DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
+ if (fl_hash_node->flowid >= dhdp->num_flow_rings) {
+ DHD_ERROR(("%s: flowid=%d num_flow_rings=%d ifindex=%d prio=%d role=%d\n",
+ __FUNCTION__, fl_hash_node->flowid, dhdp->num_flow_rings,
+ ifindex, prio, if_flow_lkup[ifindex].role));
+ dhd_prhex("da", (uchar *)da, ETHER_ADDR_LEN, DHD_ERROR_VAL);
+ dhd_prhex("sa", (uchar *)sa, ETHER_ADDR_LEN, DHD_ERROR_VAL);
+ return FLOWID_INVALID;
+ }
+
return fl_hash_node->flowid;
} /* dhd_flowid_alloc */
@@ -653,7 +708,6 @@
flow_ring_table_t *flow_ring_table;
unsigned long flags;
int ret;
- bool is_sta_assoc;
DHD_TRACE(("%s\n", __FUNCTION__));
@@ -670,37 +724,50 @@
id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
if (id == FLOWID_INVALID) {
-
+ bool if_role_multi_client;
if_flow_lkup_t *if_flow_lkup;
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
if (!if_flow_lkup[ifindex].status)
return BCME_ERROR;
- BCM_REFERENCE(is_sta_assoc);
+
+ /* check role for multi client case */
+ if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
+
+ /* Abort Flowring creation if multi client flowrings crossed the threshold */
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+ if (if_role_multi_client &&
+ (dhdp->multi_client_flow_rings >= dhdp->max_multi_client_flow_rings)) {
+ DHD_ERROR_RLMT(("%s: Max multi client flow rings reached: %d:%d\n",
+ __FUNCTION__, dhdp->multi_client_flow_rings,
+ dhdp->max_multi_client_flow_rings));
+ return BCME_ERROR;
+ }
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
+
+ /* Do not create Flowring if peer is not associated */
#if defined(PCIE_FULL_DONGLE)
- is_sta_assoc = dhd_sta_associated(dhdp, ifindex, (uint8 *)da);
- DHD_ERROR_RLMT(("%s: multi %x ifindex %d role %x assoc %d\n", __FUNCTION__,
- ETHER_ISMULTI(da), ifindex, if_flow_lkup[ifindex].role,
- is_sta_assoc));
- if (!ETHER_ISMULTI(da) &&
- ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_AP) ||
- (FALSE) ||
- (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_P2P_GO)) &&
- (!is_sta_assoc)) {
- DHD_ERROR_RLMT(("Attempt to send pkt with out peer/scb addition\n"));
+ if (if_role_multi_client && !ETHER_ISMULTI(da) &&
+ !dhd_sta_associated(dhdp, ifindex, (uint8 *)da)) {
+ DHD_ERROR_RLMT(("%s: Skip send pkt without peer addition\n", __FUNCTION__));
return BCME_ERROR;
}
#endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
if (id == FLOWID_INVALID) {
- DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
+ DHD_ERROR_RLMT(("%s: alloc flowid ifindex %u status %u\n",
__FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
return BCME_ERROR;
}
ASSERT(id < dhdp->num_flow_rings);
+ /* Only after flowid alloc, increment multi_client_flow_rings */
+ if (if_role_multi_client) {
+ dhdp->multi_client_flow_rings++;
+ }
+
/* register this flowid in dhd_pub */
dhd_add_flowid(dhdp, ifindex, prio, da, id);
@@ -716,8 +783,11 @@
flow_ring_node->active = TRUE;
flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING;
+#ifdef TX_STATUS_LATENCY_STATS
flow_ring_node->flow_info.num_tx_status = 0;
flow_ring_node->flow_info.cum_tx_status_latency = 0;
+ flow_ring_node->flow_info.num_tx_pkts = 0;
+#endif /* TX_STATUS_LATENCY_STATS */
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
/* Create and inform device about the new flow */
@@ -735,7 +805,14 @@
return BCME_OK;
} else {
/* if the Flow id was found in the hash */
- ASSERT(id < dhdp->num_flow_rings);
+
+ if (id >= dhdp->num_flow_rings) {
+ DHD_ERROR(("%s: Invalid flow id : %u, num_flow_rings : %u\n",
+ __FUNCTION__, id, dhdp->num_flow_rings));
+ *flowid = FLOWID_INVALID;
+ ASSERT(0);
+ return BCME_ERROR;
+ }
flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
@@ -867,6 +944,7 @@
flow_hash_info_t *cur, *prev;
if_flow_lkup_t *if_flow_lkup;
unsigned long flags;
+ bool if_role_multi_client;
ASSERT(ifindex < DHD_MAX_IFS);
if (ifindex >= DHD_MAX_IFS)
@@ -875,6 +953,8 @@
DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+ if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
+
for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
cur = if_flow_lkup[ifindex].fl_hash[hashix];
@@ -900,6 +980,11 @@
prev->next = cur->next;
}
+ /* Decrement multi_client_flow_rings */
+ if (if_role_multi_client) {
+ dhdp->multi_client_flow_rings--;
+ }
+
/* deregister flowid from dhd_pub. */
dhd_del_flowid(dhdp, ifindex, flowid);
@@ -991,10 +1076,20 @@
flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
for (id = 0; id < dhdp->num_flow_rings; id++) {
+ /*
+ * Send flowring delete request even if flowring status is
+ * FLOW_RING_STATUS_CREATE_PENDING, to handle cases where DISASSOC_IND
+ * event comes ahead of flowring create response.
+ * Otherwise the flowring will not be deleted later as there will not be any
+ * DISASSOC_IND event. With this change, when create response event comes to DHD,
+ * it will change the status to FLOW_RING_STATUS_OPEN and soon delete response
+ * event will come, upon which DHD will delete the flowring.
+ */
if (flow_ring_table[id].active &&
(flow_ring_table[id].flow_info.ifindex == ifindex) &&
(!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
- (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
+ ((flow_ring_table[id].status == FLOW_RING_STATUS_OPEN) ||
+ (flow_ring_table[id].status == FLOW_RING_STATUS_CREATE_PENDING))) {
DHD_ERROR(("%s: deleting flowid %d\n",
__FUNCTION__, flow_ring_table[id].flowid));
dhd_bus_flow_ring_delete_request(dhdp->bus,
@@ -1107,6 +1202,8 @@
else
bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+ dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
+
return BCME_OK;
}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_flowring.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_flowring.h
index 759ebdb..dd61e35 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_flowring.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_flowring.h
@@ -6,7 +6,9 @@
* Provides type definitions and function prototypes used to create, delete and manage flow rings at
* high level.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -61,10 +63,7 @@
#define FLOW_RING_STATUS_STA_FREEING 7
#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048
-/* Maximum Mu MIMO frame size */
-#ifdef WL_MONITOR
-#define DHD_MAX_MON_FLOWRING_RX_BUFPOST_PKTSZ 4096
-#endif /* WL_MONITOR */
+#define DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX 4096
#define DHD_FLOW_PRIO_AC_MAP 0
#define DHD_FLOW_PRIO_TID_MAP 1
@@ -80,8 +79,24 @@
#define DHD_IF_ROLE(pub, idx) (((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role)
#define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP)
#define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA)
+#define DHD_IF_ROLE_P2PGC(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_CLIENT)
#define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO)
#define DHD_IF_ROLE_WDS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_WDS)
+#define DHD_IF_ROLE_IBSS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_IBSS)
+#ifdef WL_NAN
+#define DHD_IF_ROLE_NAN(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_NAN)
+#else
+#define DHD_IF_ROLE_NAN(pub, idx) (FALSE)
+#endif /* WL_NAN */
+#define DHD_IF_ROLE_AWDL(pub, idx) (FALSE)
+
+#define DHD_IF_ROLE_GENERIC_STA(pub, idx) \
+ (DHD_IF_ROLE_STA(pub, idx) || DHD_IF_ROLE_P2PGC(pub, idx) || DHD_IF_ROLE_WDS(pub, idx))
+
+#define DHD_IF_ROLE_MULTI_CLIENT(pub, idx) \
+ (DHD_IF_ROLE_AP(pub, idx) || DHD_IF_ROLE_P2PGO(pub, idx) || DHD_IF_ROLE_AWDL(pub, idx) ||\
+ DHD_IF_ROLE_NAN(pub, idx))
+
#define DHD_FLOW_RING(dhdp, flowid) \
(flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid])
@@ -154,9 +169,11 @@
typedef struct dhd_pkttag_fr {
uint16 flowid;
uint16 ifid;
+#ifdef DHD_LB_TXC
int dataoff;
dmaaddr_t physaddr;
uint32 pa_len;
+#endif /* DHD_LB_TXC */
} dhd_pkttag_fr_t;
#define DHD_PKTTAG_SET_IFID(tag, idx) ((tag)->ifid = (uint16)(idx))
@@ -170,12 +187,16 @@
typedef struct flow_info {
uint8 tid;
uint8 ifindex;
- char sa[ETHER_ADDR_LEN];
- char da[ETHER_ADDR_LEN];
+ uchar sa[ETHER_ADDR_LEN];
+ uchar da[ETHER_ADDR_LEN];
+#ifdef TX_STATUS_LATENCY_STATS
/* total number of tx_status received on this flowid */
uint64 num_tx_status;
/* cumulative tx_status latency for this flowid */
uint64 cum_tx_status_latency;
+ /* num tx packets sent on this flowring */
+ uint64 num_tx_pkts;
+#endif /* TX_STATUS_LATENCY_STATS */
} flow_info_t;
/** a flow ring is used for outbound (towards antenna) 802.3 packets */
@@ -196,6 +217,9 @@
#ifdef IDLE_TX_FLOW_MGMT
uint64 last_active_ts; /* contains last active timestamp */
#endif /* IDLE_TX_FLOW_MGMT */
+#ifdef DHD_HP2P
+ bool hp2p_ring;
+#endif /* DHD_HP2P */
} flow_ring_node_t;
typedef flow_ring_node_t flow_ring_table_t;
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_ip.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_ip.c
index 65434da..78aaa38 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_ip.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_ip.c
@@ -1,7 +1,9 @@
/*
* IP Packet Parser Module.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -39,11 +41,11 @@
#include <dhd_ip.h>
-#ifdef DHDTCPACK_SUPPRESS
+#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
#include <dhd_bus.h>
#include <dhd_proto.h>
#include <bcmtcp.h>
-#endif /* DHDTCPACK_SUPPRESS */
+#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
/* special values */
/* 802.3 llc/snap header */
@@ -130,7 +132,11 @@
#ifndef TCPACK_SUPPRESS_HOLD_HRT
timer_list_compat_t timer;
#else
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21))
struct tasklet_hrtimer timer;
+#else
+ struct hrtimer timer;
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
#endif /* TCPACK_SUPPRESS_HOLD_HRT */
} tcpack_info_t;
@@ -503,12 +509,16 @@
&tcpack_sup_module->tcpack_info_tbl[i];
tcpack_info_tbl->dhdp = dhdp;
#ifndef TCPACK_SUPPRESS_HOLD_HRT
- init_timer(&tcpack_info_tbl->timer);
- tcpack_info_tbl->timer.data = (ulong)tcpack_info_tbl;
- tcpack_info_tbl->timer.function = dhd_tcpack_send;
+ init_timer_compat(&tcpack_info_tbl->timer,
+ dhd_tcpack_send, tcpack_info_tbl);
#else
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21))
tasklet_hrtimer_init(&tcpack_info_tbl->timer,
dhd_tcpack_send, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+#else
+ hrtimer_init(&tcpack_info_tbl->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_SOFT);
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
#endif /* TCPACK_SUPPRESS_HOLD_HRT */
}
break;
@@ -1360,9 +1370,13 @@
mod_timer(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer,
jiffies + msecs_to_jiffies(dhdp->tcpack_sup_delay));
#else
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21)
tasklet_hrtimer_start(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer,
- ktime_set(0, dhdp->tcpack_sup_delay*1000000),
- HRTIMER_MODE_REL);
+ ktime_set(0, dhdp->tcpack_sup_delay*1000000), HRTIMER_MODE_REL);
+#else
+ hrtimer_start(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer,
+ ktime_set(0, dhdp->tcpack_sup_delay*1000000), HRTIMER_MODE_REL_SOFT);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0) */
#endif /* TCPACK_SUPPRESS_HOLD_HRT */
tcpack_sup_mod->tcpack_info_cnt++;
} else {
@@ -1375,3 +1389,54 @@
return hold;
}
#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+tcp_hdr_flag_t
+dhd_tcpdata_get_flag(dhd_pub_t *dhdp, void *pkt)
+{
+ uint8 *ether_hdr; /* Ethernet header of the new packet */
+ uint16 ether_type; /* Ethernet type of the new packet */
+ uint8 *ip_hdr; /* IP header of the new packet */
+ uint8 *tcp_hdr; /* TCP header of the new packet */
+ uint32 ip_hdr_len; /* IP header length of the new packet */
+ uint32 cur_framelen;
+ uint8 flags;
+
+ ether_hdr = PKTDATA(dhdp->osh, pkt);
+ cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+ ether_type = ether_hdr[12] << 8 | ether_hdr[13];
+
+ if (ether_type != ETHER_TYPE_IP) {
+ DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+ __FUNCTION__, __LINE__, ether_type));
+ return FLAG_OTHERS;
+ }
+
+ ip_hdr = ether_hdr + ETHER_HDR_LEN;
+ cur_framelen -= ETHER_HDR_LEN;
+
+ if (cur_framelen < IPV4_MIN_HEADER_LEN) {
+ return FLAG_OTHERS;
+ }
+
+ ip_hdr_len = IPV4_HLEN(ip_hdr);
+ if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) {
+ DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+ __FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr)));
+ return FLAG_OTHERS;
+ }
+
+ tcp_hdr = ip_hdr + ip_hdr_len;
+
+ flags = (uint8)tcp_hdr[TCP_FLAGS_OFFSET];
+
+ if (flags & TCP_FLAG_SYN) {
+ if (flags & TCP_FLAG_ACK) {
+ return FLAG_SYNCACK;
+ }
+ return FLAG_SYNC;
+ }
+ return FLAG_OTHERS;
+}
+#endif /* DHDTCPSYNC_FLOOD_BLK */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_ip.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_ip.h
index 6bbe94b..640aa93 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_ip.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_ip.h
@@ -3,7 +3,9 @@
*
* Provides type definitions and function prototypes used to parse ip packet.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -32,11 +34,11 @@
#ifndef _dhd_ip_h_
#define _dhd_ip_h_
-#ifdef DHDTCPACK_SUPPRESS
+#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
#include <dngl_stats.h>
#include <bcmutils.h>
#include <dhd.h>
-#endif /* DHDTCPACK_SUPPRESS */
+#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
typedef enum pkt_frag
{
@@ -48,6 +50,17 @@
extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p);
+#ifdef DHDTCPSYNC_FLOOD_BLK
+typedef enum tcp_hdr_flags {
+ FLAG_SYNC,
+ FLAG_SYNCACK,
+ FLAG_RST,
+ FLAG_OTHERS
+} tcp_hdr_flag_t;
+
+extern tcp_hdr_flag_t dhd_tcpdata_get_flag(dhd_pub_t *dhdp, void *pkt);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
#ifdef DHDTCPACK_SUPPRESS
#define TCPACKSZMIN (ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN)
/* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux.c
index 2917d4e..6751a3b 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux.c
@@ -2,7 +2,9 @@
* Broadcom Dongle Host Driver (DHD), Linux-specific network interface
* Basically selected code segments from usb-cdc.c and usb-rndis.c
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -31,6 +33,7 @@
#include <typedefs.h>
#include <linuxver.h>
#include <osl.h>
+#include <bcmstdlib_s.h>
#ifdef SHOW_LOGTRACE
#include <linux/syscalls.h>
#include <event_log.h>
@@ -53,7 +56,6 @@
#include <linux/ethtool.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
-#include <linux/proc_fs.h>
#include <linux/ip.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
@@ -63,11 +65,14 @@
#include <linux/cpufreq.h>
#endif /* ENABLE_ADAPTIVE_SCHED */
#include <linux/rtc.h>
-#ifdef DHD_DUMP_MNGR
#include <linux/namei.h>
-#endif /* DHD_DUMP_MNGR */
#include <asm/uaccess.h>
#include <asm/unaligned.h>
+#include <dhd_linux_priv.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+#include <uapi/linux/sched/types.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
#include <epivers.h>
#include <bcmutils.h>
@@ -80,10 +85,10 @@
#include <vlan.h>
#include <802.3.h>
-#include <dngl_stats.h>
#include <dhd_linux_wq.h>
#include <dhd.h>
#include <dhd_linux.h>
+#include <dhd_linux_pktdump.h>
#ifdef DHD_WET
#include <dhd_wet.h>
#endif /* DHD_WET */
@@ -95,12 +100,15 @@
#include <dhd_dbg.h>
#include <dhd_dbg_ring.h>
#include <dhd_debug.h>
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
#include <linux/wakelock.h>
-#endif // endif
-#ifdef WL_CFG80211
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
+#if defined(WL_CFG80211)
#include <wl_cfg80211.h>
-#endif // endif
+#ifdef WL_BAM
+#include <wl_bam.h>
+#endif /* WL_BAM */
+#endif /* WL_CFG80211 */
#ifdef PNO_SUPPORT
#include <dhd_pno.h>
#endif // endif
@@ -108,9 +116,10 @@
#include <dhd_rtt.h>
#endif // endif
-#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810)
+#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
+ defined(CONFIG_SOC_EXYNOS9820)
#include <linux/exynos-pci-ctrl.h>
-#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 */
+#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
#ifdef DHD_L2_FILTER
#include <bcmicmp.h>
@@ -129,13 +138,20 @@
#include <uapi/linux/sched/types.h>
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
-#ifdef DHDTCPACK_SUPPRESS
+#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
#include <dhd_ip.h>
-#endif /* DHDTCPACK_SUPPRESS */
+#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
#include <dhd_daemon.h>
#ifdef DHD_PKT_LOGGING
#include <dhd_pktlog.h>
#endif /* DHD_PKT_LOGGING */
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+#include <eapol.h>
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#ifdef DHD_BANDSTEER
+#include <dhd_bandsteer.h>
+#endif /* DHD_BANDSTEER */
#ifdef DHD_DEBUG_PAGEALLOC
typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
@@ -144,93 +160,23 @@
#define IP_PROT_RESERVED 0xFF
-#if defined(DHD_LB)
-#if !defined(PCIE_FULL_DONGLE)
-#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE"
-#endif /* !PCIE_FULL_DONGLE */
-#endif /* DHD_LB */
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+static void dhd_m4_state_handler(struct work_struct * work);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
-#if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \
- defined(DHD_LB_STATS)
-#if !defined(DHD_LB)
-#error "DHD loadbalance derivatives are supported only if DHD_LB is defined"
-#endif /* !DHD_LB */
-#endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */
-
-#if defined(DHD_LB)
-/* Dynamic CPU selection for load balancing */
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
-#include <linux/notifier.h>
-#include <linux/workqueue.h>
-#include <asm/atomic.h>
-
-#if !defined(DHD_LB_PRIMARY_CPUS)
-#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
-#endif // endif
-#if !defined(DHD_LB_SECONDARY_CPUS)
-#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
-#endif // endif
-
-#define HIST_BIN_SIZE 9
-
-static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
-
-#if defined(DHD_LB_TXP)
-static void dhd_lb_tx_handler(unsigned long data);
-static void dhd_tx_dispatcher_work(struct work_struct * work);
-static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
-static void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
-
-/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
-typedef struct dhd_tx_lb_pkttag_fr {
- struct net_device *net;
- int ifidx;
-} dhd_tx_lb_pkttag_fr_t;
-
-#define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp)
-#define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net)
-
-#define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx)
-#define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx)
-#endif /* DHD_LB_TXP */
-#endif /* DHD_LB */
+#ifdef DHDTCPSYNC_FLOOD_BLK
+static void dhd_blk_tsfl_handler(struct work_struct * work);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
#ifdef WL_NATOE
#include <dhd_linux_nfct.h>
#endif /* WL_NATOE */
-#ifdef WL_MONITOR
-#include <bcmmsgbuf.h>
-#define MAX_RADIOTAP_SIZE 256 /* Maximum size to hold HE Radiotap header format */
-#define MAX_MON_PKT_SIZE (4096 + MAX_RADIOTAP_SIZE)
-#endif /* WL_MONITOR */
-
-#define htod32(i) (i)
-#define htod16(i) (i)
-#define dtoh32(i) (i)
-#define dtoh16(i) (i)
-#define htodchanspec(i) (i)
-#define dtohchanspec(i) (i)
-
-#ifdef BLOCK_IPV6_PACKET
-#define HEX_PREF_STR "0x"
-#define UNI_FILTER_STR "010000000000"
-#define ZERO_ADDR_STR "000000000000"
-#define ETHER_TYPE_STR "0000"
-#define IPV6_FILTER_STR "20"
-#define ZERO_TYPE_STR "00"
-#endif /* BLOCK_IPV6_PACKET */
-
#if defined(OEM_ANDROID) && defined(SOFTAP)
extern bool ap_cfg_running;
extern bool ap_fw_loaded;
#endif // endif
-#ifdef DHD_8021X_DUMP
-extern void dhd_dump_eapol_4way_message(char *ifname, unsigned char *dump_data, bool direction);
-#endif /* DHD_8021X_DUMP */
-
#ifdef FIX_CPU_MIN_CLOCK
#include <linux/pm_qos.h>
#endif /* FIX_CPU_MIN_CLOCK */
@@ -268,24 +214,6 @@
#include <dhd_event_log_filter.h>
#endif /* DHD_EVENT_LOG_FILTER */
-/*
- * Start of Host DMA whitelist region.
- */
-uint32 wlreg_l = 0;
-uint32 wlreg_h = 0;
-module_param(wlreg_l, uint, 0644);
-module_param(wlreg_h, uint, 0644);
-
-/*
- * Sizeof whitelist region. The dongle will allow DMA to only wlreg to wlreg+wlreg_len.
- * If length of whitelist region is zero, host will not program whitelist region to dongle.
- */
-uint32 wlreg_len_h = 0;
-uint32 wlreg_len_l = 0;
-
-module_param(wlreg_len_l, uint, 0644);
-module_param(wlreg_len_h, uint, 0644);
-
const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
@@ -315,21 +243,19 @@
static bool dhd_inet6addr_notifier_registered = FALSE;
#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#if defined(CONFIG_PM_SLEEP)
#include <linux/suspend.h>
volatile bool dhd_mmc_suspend = FALSE;
DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+#endif /* defined(CONFIG_PM_SLEEP) */
#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(OEM_ANDROID)
-static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
-#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(OEM_ANDROID)) */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#if defined(OEM_ANDROID)
+static void dhd_hang_process(struct work_struct *work_data);
+#endif /* #OEM_ANDROID */
MODULE_LICENSE("GPL and additional rights");
-#endif /* LinuxVer */
#ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
#define MAX_CONSECUTIVE_HANG_COUNTS 5
@@ -357,14 +283,6 @@
extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
#endif /* USE_DYNAMIC_F2_BLKSIZE */
-#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
-const char *
-print_tainted()
-{
- return "";
-}
-#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
-
/* Linux wireless extension support */
#if defined(WL_WIRELESS_EXT)
#include <wl_iw.h>
@@ -389,18 +307,6 @@
#include <linux/nl80211.h>
#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
-#if defined(OEM_ANDROID) && defined(BCMPCIE)
-extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval);
-#else
-extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
-#endif /* OEM_ANDROID && BCMPCIE */
-
-#ifdef PKT_FILTER_SUPPORT
-extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
-extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
-extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
-#endif // endif
-
#if defined(PKT_FILTER_SUPPORT) && defined(APF)
static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
u8* program, uint32 program_len);
@@ -409,51 +315,24 @@
static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
#endif /* PKT_FILTER_SUPPORT && APF */
-#if defined(BT_OVER_SDIO)
-extern void wl_android_set_wifi_on_flag(bool enable);
-#endif /* BT_OVER_SDIO */
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
+static int dhd_wait_for_file_dump(dhd_pub_t *dhdp);
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_COREDUMP */
+
+#if defined(ARGOS_NOTIFY_CB)
+/* ARGOS notifer data */
+static struct notifier_block argos_wifi; /* STA */
+static struct notifier_block argos_p2p; /* P2P */
+argos_rps_ctrl argos_rps_ctrl_data;
+#endif // endif
#ifdef DHD_FW_COREDUMP
-static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
+static int dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
#endif /* DHD_FW_COREDUMP */
#ifdef DHD_LOG_DUMP
-/* 0: DLD_BUF_TYPE_GENERAL, 1: DLD_BUF_TYPE_PRESERVE
-* 2: DLD_BUF_TYPE_SPECIAL
-*/
-#define DLD_BUFFER_NUM 3
-#ifndef CUSTOM_LOG_DUMP_BUFSIZE_MB
-#define CUSTOM_LOG_DUMP_BUFSIZE_MB 4 /* DHD_LOG_DUMP_BUF_SIZE 4 MB static memory in kernel */
-#endif /* CUSTOM_LOG_DUMP_BUFSIZE_MB */
-
-#define LOG_DUMP_TOTAL_BUFSIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_GENERAL_MAX_BUFSIZE (384 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_PRESERVE_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_SPECIAL_MAX_BUFSIZE (8 * 1024)
-#define LOG_DUMP_ECNTRS_MAX_BUFSIZE (384 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_FILTER_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_MAX_FILESIZE (8 *1024 * 1024) /* 8 MB default */
-#ifdef CONFIG_LOG_BUF_SHIFT
-/* 15% of kernel log buf size, if for example klog buf size is 512KB
-* 15% of 512KB ~= 80KB
-*/
-#define LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE \
- (15 * ((1 << CONFIG_LOG_BUF_SHIFT)/100))
-#endif /* CONFIG_LOG_BUF_SHIFT */
-
-#define LOG_DUMP_COOKIE_BUFSIZE 1024u
struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
-static int dld_buf_size[DLD_BUFFER_NUM] = {
- LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */
- LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */
- LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */
-};
-
-typedef struct {
- char *hdr_str;
- log_dump_section_type_t sec_type;
-} dld_hdr_t;
/* Only header for log dump buffers is stored in array
* header for sections like 'dhd dump', 'ext trap'
@@ -466,13 +345,22 @@
{SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL}
};
+static int dld_buf_size[DLD_BUFFER_NUM] = {
+ LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */
+ LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */
+ LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */
+};
+
static void dhd_log_dump_init(dhd_pub_t *dhd);
static void dhd_log_dump_deinit(dhd_pub_t *dhd);
static void dhd_log_dump(void *handle, void *event_info, u8 event);
static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type);
-
-#define DHD_PRINT_BUF_NAME_LEN 30
-static void dhd_print_buf_addr(char *name, void *buf, unsigned int size);
+static int dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type);
+static void dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size);
+void dhd_get_debug_dump_len(void *handle, struct sk_buff *skb, void *event_info, u8 event);
+void cfgvendor_log_dump_len(dhd_pub_t *dhdp, log_dump_type_t *type, struct sk_buff *skb);
+static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size);
+static void dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type);
#endif /* DHD_LOG_DUMP */
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
@@ -505,73 +393,12 @@
#include "dhd_bt_interface.h"
#endif /* defined (BT_OVER_SDIO) */
-#ifdef SHOW_LOGTRACE
-static int dhd_trace_open_proc(struct inode *inode, struct file *file);
-ssize_t dhd_trace_read_proc(struct file *file, char *buffer, size_t tt, loff_t *loff);
-
-static const struct file_operations proc_file_fops = {
- .read = dhd_trace_read_proc,
- .open = dhd_trace_open_proc,
- .release = seq_release,
-};
-#endif // endif
+#ifdef WL_STATIC_IF
+bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
+#endif /* WL_STATIC_IF */
atomic_t exit_in_progress = ATOMIC_INIT(0);
-typedef struct dhd_if_event {
- struct list_head list;
- wl_event_data_if_t event;
- char name[IFNAMSIZ+1];
- uint8 mac[ETHER_ADDR_LEN];
-} dhd_if_event_t;
-
-/* Interface control information */
-typedef struct dhd_if {
- struct dhd_info *info; /* back pointer to dhd_info */
- /* OS/stack specifics */
- struct net_device *net;
- int idx; /* iface idx in dongle */
- uint subunit; /* subunit */
- uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
- bool set_macaddress;
- bool set_multicast;
- uint8 bssidx; /* bsscfg index for the interface */
- bool attached; /* Delayed attachment when unset */
- bool txflowcontrol; /* Per interface flow control indicator */
- char name[IFNAMSIZ+1]; /* linux interface name */
- char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
- struct net_device_stats stats;
-#ifdef PCIE_FULL_DONGLE
- struct list_head sta_list; /* sll of associated stations */
- spinlock_t sta_list_lock; /* lock for manipulating sll */
-#endif /* PCIE_FULL_DONGLE */
- uint32 ap_isolate; /* ap-isolation settings */
-#ifdef DHD_L2_FILTER
- bool parp_enable;
- bool parp_discard;
- bool parp_allnode;
- arp_table_t *phnd_arp_table;
- /* for Per BSS modification */
- bool dhcp_unicast;
- bool block_ping;
- bool grat_arp;
- bool block_tdls;
-#endif /* DHD_L2_FILTER */
-#ifdef DHD_MCAST_REGEN
- bool mcast_regen_bss_enable;
-#endif // endif
- bool rx_pkt_chainable; /* set all rx packet to chainable config by default */
- cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */
- uint8 tx_paths_active;
- bool del_in_progress;
- bool static_if; /* used to avoid some operations on static_if */
-} dhd_if_t;
-
-struct ipv6_work_info_t {
- uint8 if_idx;
- char ipv6_addr[IPV6_ADDR_LEN];
- unsigned long event;
-};
static void dhd_process_daemon_msg(struct sk_buff *skb);
static void dhd_destroy_to_notifier_skt(void);
static int dhd_create_to_notifier_skt(void);
@@ -583,335 +410,7 @@
.groups = 1,
.input = dhd_process_daemon_msg,
};
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
-
-typedef struct dhd_dump {
- uint8 *buf;
- int bufsize;
-} dhd_dump_t;
-
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-struct dhd_rx_tx_work {
- struct work_struct work;
- struct sk_buff *skb;
- struct net_device *net;
- struct dhd_pub *pub;
-};
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
-/* When Perimeter locks are deployed, any blocking calls must be preceeded
- * with a PERIM UNLOCK and followed by a PERIM LOCK.
- * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
- * wait_event_timeout().
- */
-
-/* Local private structure (extension of pub) */
-typedef struct dhd_info {
-#if defined(WL_WIRELESS_EXT)
- wl_iw_t iw; /* wireless extensions state (must be first) */
-#endif /* defined(WL_WIRELESS_EXT) */
- dhd_pub_t pub;
- /* for supporting multiple interfaces.
- * static_ifs hold the net ifaces without valid FW IF
- */
- dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS];
-
- void *adapter; /* adapter information, interrupt, fw path etc. */
- char fw_path[PATH_MAX]; /* path to firmware image */
- char nv_path[PATH_MAX]; /* path to nvram vars file */
-#ifdef DHD_UCODE_DOWNLOAD
- char uc_path[PATH_MAX]; /* path to ucode image */
-#endif /* DHD_UCODE_DOWNLOAD */
-
- /* serialize dhd iovars */
- struct mutex dhd_iovar_mutex;
-
- struct semaphore proto_sem;
-#ifdef PROP_TXSTATUS
- spinlock_t wlfc_spinlock;
-
-#endif /* PROP_TXSTATUS */
- wait_queue_head_t ioctl_resp_wait;
- wait_queue_head_t d3ack_wait;
- wait_queue_head_t dhd_bus_busy_state_wait;
- wait_queue_head_t dmaxfer_wait;
- uint32 default_wd_interval;
-
- timer_list_compat_t timer;
- bool wd_timer_valid;
-#ifdef DHD_PCIE_RUNTIMEPM
- struct timer_list rpm_timer;
- bool rpm_timer_valid;
- tsk_ctl_t thr_rpm_ctl;
-#endif /* DHD_PCIE_RUNTIMEPM */
- struct tasklet_struct tasklet;
- spinlock_t sdlock;
- spinlock_t txqlock;
- spinlock_t dhd_lock;
-
- struct semaphore sdsem;
- tsk_ctl_t thr_dpc_ctl;
- tsk_ctl_t thr_wdt_ctl;
-
- tsk_ctl_t thr_rxf_ctl;
- spinlock_t rxf_lock;
- bool rxthread_enabled;
-
- /* Wakelocks */
-#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- struct wake_lock wl_wifi; /* Wifi wakelock */
- struct wake_lock wl_rxwake; /* Wifi rx wakelock */
- struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
- struct wake_lock wl_wdwake; /* Wifi wd wakelock */
- struct wake_lock wl_evtwake; /* Wifi event wakelock */
- struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */
- struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */
-#ifdef BCMPCIE_OOB_HOST_WAKE
- struct wake_lock wl_intrwake; /* Host wakeup wakelock */
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-#ifdef DHD_USE_SCAN_WAKELOCK
- struct wake_lock wl_scanwake; /* Wifi scan wakelock */
-#endif /* DHD_USE_SCAN_WAKELOCK */
-#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
- /* net_device interface lock, prevent race conditions among net_dev interface
- * calls and wifi_on or wifi_off
- */
- struct mutex dhd_net_if_mutex;
- struct mutex dhd_suspend_mutex;
-#if defined(PKT_FILTER_SUPPORT) && defined(APF)
- struct mutex dhd_apf_mutex;
-#endif /* PKT_FILTER_SUPPORT && APF */
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) && defined(OEM_ANDROID) */
- spinlock_t wakelock_spinlock;
- spinlock_t wakelock_evt_spinlock;
- uint32 wakelock_counter;
- int wakelock_wd_counter;
- int wakelock_rx_timeout_enable;
- int wakelock_ctrl_timeout_enable;
- bool waive_wakelock;
- uint32 wakelock_before_waive;
-
- /* Thread to issue ioctl for multicast */
- wait_queue_head_t ctrl_wait;
- atomic_t pend_8021x_cnt;
- dhd_attach_states_t dhd_state;
-#ifdef SHOW_LOGTRACE
- dhd_event_log_t event_data;
-#endif /* SHOW_LOGTRACE */
-
-#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
- struct early_suspend early_suspend;
-#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
-
-#ifdef ARP_OFFLOAD_SUPPORT
- u32 pend_ipaddr;
-#endif /* ARP_OFFLOAD_SUPPORT */
-#ifdef DHDTCPACK_SUPPRESS
- spinlock_t tcpack_lock;
-#endif /* DHDTCPACK_SUPPRESS */
-#ifdef FIX_CPU_MIN_CLOCK
- bool cpufreq_fix_status;
- struct mutex cpufreq_fix;
- struct pm_qos_request dhd_cpu_qos;
-#ifdef FIX_BUS_MIN_CLOCK
- struct pm_qos_request dhd_bus_qos;
-#endif /* FIX_BUS_MIN_CLOCK */
-#endif /* FIX_CPU_MIN_CLOCK */
- void *dhd_deferred_wq;
-#ifdef DEBUG_CPU_FREQ
- struct notifier_block freq_trans;
- int __percpu *new_freq;
-#endif // endif
- unsigned int unit;
- struct notifier_block pm_notifier;
-#ifdef DHD_PSTA
- uint32 psta_mode; /* PSTA or PSR */
-#endif /* DHD_PSTA */
-#ifdef DHD_WET
- uint32 wet_mode;
-#endif /* DHD_WET */
-#ifdef DHD_DEBUG
- dhd_dump_t *dump;
- struct timer_list join_timer;
- u32 join_timeout_val;
- bool join_timer_active;
- uint scan_time_count;
- struct timer_list scan_timer;
- bool scan_timer_active;
-#endif // endif
-#if defined(DHD_LB)
- /* CPU Load Balance dynamic CPU selection */
-
- /* Variable that tracks the currect CPUs available for candidacy */
- cpumask_var_t cpumask_curr_avail;
-
- /* Primary and secondary CPU mask */
- cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
- cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
-
- struct notifier_block cpu_notifier;
-
- /* Tasklet to handle Tx Completion packet freeing */
- struct tasklet_struct tx_compl_tasklet;
- atomic_t tx_compl_cpu;
-
- /* Tasklet to handle RxBuf Post during Rx completion */
- struct tasklet_struct rx_compl_tasklet;
- atomic_t rx_compl_cpu;
-
- /* Napi struct for handling rx packet sendup. Packets are removed from
- * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
- * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
- * to run to rx_napi_cpu.
- */
- struct sk_buff_head rx_pend_queue ____cacheline_aligned;
- struct sk_buff_head rx_napi_queue ____cacheline_aligned;
- struct napi_struct rx_napi_struct ____cacheline_aligned;
- atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
- struct net_device *rx_napi_netdev; /* netdev of primary interface */
-
- struct work_struct rx_napi_dispatcher_work;
- struct work_struct tx_compl_dispatcher_work;
- struct work_struct tx_dispatcher_work;
- struct work_struct rx_compl_dispatcher_work;
-
- /* Number of times DPC Tasklet ran */
- uint32 dhd_dpc_cnt;
- /* Number of times NAPI processing got scheduled */
- uint32 napi_sched_cnt;
- /* Number of times NAPI processing ran on each available core */
- uint32 *napi_percpu_run_cnt;
- /* Number of times RX Completions got scheduled */
- uint32 rxc_sched_cnt;
- /* Number of times RX Completion ran on each available core */
- uint32 *rxc_percpu_run_cnt;
- /* Number of times TX Completions got scheduled */
- uint32 txc_sched_cnt;
- /* Number of times TX Completions ran on each available core */
- uint32 *txc_percpu_run_cnt;
- /* CPU status */
- /* Number of times each CPU came online */
- uint32 *cpu_online_cnt;
- /* Number of times each CPU went offline */
- uint32 *cpu_offline_cnt;
-
- /* Number of times TX processing run on each core */
- uint32 *txp_percpu_run_cnt;
- /* Number of times TX start run on each core */
- uint32 *tx_start_percpu_run_cnt;
-
- /* Tx load balancing */
-
- /* TODO: Need to see if batch processing is really required in case of TX
- * processing. In case of RX the Dongle can send a bunch of rx completions,
- * hence we took a 3 queue approach
- * enque - adds the skbs to rx_pend_queue
- * dispatch - uses a lock and adds the list of skbs from pend queue to
- * napi queue
- * napi processing - copies the pend_queue into a local queue and works
- * on it.
- * But for TX its going to be 1 skb at a time, so we are just thinking
- * of using only one queue and use the lock supported skb queue functions
- * to add and process it. If its in-efficient we'll re-visit the queue
- * design.
- */
-
- /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
- /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */
- /*
- * From the Tasklet that actually sends out data
- * copy the list tx_pend_queue into tx_active_queue. There by we need
- * to spinlock to only perform the copy the rest of the code ie to
- * construct the tx_pend_queue and the code to process tx_active_queue
- * can be lockless. The concept is borrowed as is from RX processing
- */
- /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */
-
- /* Control TXP in runtime, enable by default */
- atomic_t lb_txp_active;
-
- /*
- * When the NET_TX tries to send a TX packet put it into tx_pend_queue
- * For now, the processing tasklet will also direcly operate on this
- * queue
- */
- struct sk_buff_head tx_pend_queue ____cacheline_aligned;
-
- /* cpu on which the DHD Tx is happenning */
- atomic_t tx_cpu;
-
- /* CPU on which the Network stack is calling the DHD's xmit function */
- atomic_t net_tx_cpu;
-
- /* Tasklet context from which the DHD's TX processing happens */
- struct tasklet_struct tx_tasklet;
-
- /*
- * Consumer Histogram - NAPI RX Packet processing
- * -----------------------------------------------
- * On Each CPU, when the NAPI RX Packet processing call back was invoked
- * how many packets were processed is captured in this data structure.
- * Now its difficult to capture the "exact" number of packets processed.
- * So considering the packet counter to be a 32 bit one, we have a
- * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
- * processed is rounded off to the next power of 2 and put in the
- * approriate "bin" the value in the bin gets incremented.
- * For example, assume that in CPU 1 if NAPI Rx runs 3 times
- * and the packet count processed is as follows (assume the bin counters are 0)
- * iteration 1 - 10 (the bin counter 2^4 increments to 1)
- * iteration 2 - 30 (the bin counter 2^5 increments to 1)
- * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
- */
- uint32 *napi_rx_hist[HIST_BIN_SIZE];
- uint32 *txc_hist[HIST_BIN_SIZE];
- uint32 *rxc_hist[HIST_BIN_SIZE];
-#endif /* DHD_LB */
-
-#ifdef SHOW_LOGTRACE
- struct delayed_work event_log_dispatcher_work;
-#endif /* SHOW_LOGTRACE */
-
-#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
-#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
- struct kobject dhd_kobj;
- struct timer_list timesync_timer;
-#if defined(BT_OVER_SDIO)
- char btfw_path[PATH_MAX];
-#endif /* defined (BT_OVER_SDIO) */
-#ifdef WL_MONITOR
- struct net_device *monitor_dev; /* monitor pseudo device */
- struct sk_buff *monitor_skb;
- uint monitor_len;
- uint monitor_type; /* monitor pseudo device */
-#endif /* WL_MONITOR */
-#if defined(BT_OVER_SDIO)
- struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
- int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
-#endif /* BT_OVER_SDIO */
-#ifdef SHOW_LOGTRACE
- struct sk_buff_head evt_trace_queue ____cacheline_aligned;
-#endif // endif
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- struct workqueue_struct *tx_wq;
- struct workqueue_struct *rx_wq;
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-#ifdef DHD_DEBUG_UART
- bool duart_execute;
-#endif /* DHD_DEBUG_UART */
- struct mutex logdump_lock;
- /* indicates mem_dump was scheduled as work queue or called directly */
- bool scheduled_memdump;
- /* indicates sssrdump is called directly instead of scheduling work queue */
- bool no_wq_sssrdump;
-} dhd_info_t;
-
-#ifdef WL_MONITOR
-#define MONPKT_EXTRA_LEN 48u
-#endif /* WL_MONITOR */
-
-#define DHDIF_FWDER(dhdif) FALSE
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
#if defined(BT_OVER_SDIO)
/* Flag to indicate if driver is initialized */
@@ -926,11 +425,8 @@
/* Definitions to provide path to the firmware and nvram
* example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
*/
-
-//tianyan@2021.10.15 modify for add wifi6 module start
char firmware_path[MOD_PARAM_PATHLEN];
char nvram_path[MOD_PARAM_PATHLEN];
-//tianyan@2021.10.15 modify for add wifi6 module start
char clm_path[MOD_PARAM_PATHLEN];
#ifdef DHD_UCODE_DOWNLOAD
char ucode_path[MOD_PARAM_PATHLEN];
@@ -950,9 +446,9 @@
module_param(op_mode, int, 0644);
#if defined(OEM_ANDROID)
extern int wl_control_wl_start(struct net_device *dev);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
+#if defined(BCMLXSDMMC)
struct semaphore dhd_registration_sem;
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#endif /* BCMXSDMMC */
#endif /* defined(OEM_ANDROID) */
#ifdef DHD_LOG_DUMP
@@ -963,18 +459,30 @@
int logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
int logdump_periodic_flush = FALSE;
module_param(logdump_periodic_flush, int, 0644);
-#ifdef DEBUGABILITY_ECNTRS_LOGGING
+#ifdef EWP_ECNTRS_LOGGING
int logdump_ecntr_enable = TRUE;
#else
int logdump_ecntr_enable = FALSE;
-#endif /* DEBUGABILITY_ECNTRS_LOGGING */
+#endif /* EWP_ECNTRS_LOGGING */
module_param(logdump_ecntr_enable, int, 0644);
+#ifdef EWP_RTT_LOGGING
+int logdump_rtt_enable = TRUE;
+#else
+int logdump_rtt_enable = FALSE;
+#endif /* EWP_RTT_LOGGING */
+module_param(logdump_rtt_enable, int, 0644);
#endif /* DHD_LOG_DUMP */
+#ifdef EWP_EDL
+int host_edl_support = TRUE;
+module_param(host_edl_support, int, 0644);
+#endif // endif
/* deferred handlers */
static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
+#ifndef DHD_DIRECT_SET_MAC
static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
+#endif // endif
static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
#ifdef WL_NATOE
static void dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event);
@@ -987,6 +495,7 @@
#ifdef WL_CFG80211
extern void dhd_netdev_free(struct net_device *ndev);
#endif /* WL_CFG80211 */
+static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
/* update rx_pkt_chainable state of dhd interface */
@@ -1004,9 +513,10 @@
/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
#ifdef ENABLE_ARP_SNOOP_MODE
-uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY;
+uint dhd_arp_mode = (ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY |
+ ARP_OL_UPDATE_HOST_CACHE);
#else
-uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
+uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_UPDATE_HOST_CACHE;
#endif /* ENABLE_ARP_SNOOP_MODE */
module_param(dhd_arp_mode, uint, 0);
@@ -1106,76 +616,54 @@
module_param(enable_msi, uint, 0);
#endif /* PCIE_FULL_DONGLE */
+#ifdef DHD_SSSR_DUMP
+int dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len);
+extern uint support_sssr_dump;
+module_param(support_sssr_dump, uint, 0);
+#endif /* DHD_SSSR_DUMP */
+
/* Keep track of number of instances */
static int dhd_found = 0;
static int instance_base = 0; /* Starting instance number */
module_param(instance_base, int, 0644);
-#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
+/* Takes value of LL of OTP param customvar2=0xKKLLMMNN.
+ * LL is module variant
+ */
+uint32 hw_module_variant = 0;
+module_param(hw_module_variant, uint, 0644);
+
+#if defined(DHD_LB_RXP)
static int dhd_napi_weight = 32;
module_param(dhd_napi_weight, int, 0644);
-#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
+#endif /* DHD_LB_RXP */
#ifdef PCIE_FULL_DONGLE
extern int h2d_max_txpost;
module_param(h2d_max_txpost, int, 0644);
+
+extern uint dma_ring_indices;
+module_param(dma_ring_indices, uint, 0644);
+
+extern bool h2d_phase;
+module_param(h2d_phase, bool, 0644);
+extern bool force_trap_bad_h2d_phase;
+module_param(force_trap_bad_h2d_phase, bool, 0644);
#endif /* PCIE_FULL_DONGLE */
-#ifdef DHD_DHCP_DUMP
-struct bootp_fmt {
- struct iphdr ip_header;
- struct udphdr udp_header;
- uint8 op;
- uint8 htype;
- uint8 hlen;
- uint8 hops;
- uint32 transaction_id;
- uint16 secs;
- uint16 flags;
- uint32 client_ip;
- uint32 assigned_ip;
- uint32 server_ip;
- uint32 relay_ip;
- uint8 hw_address[16];
- uint8 server_name[64];
- uint8 file_name[128];
- uint8 options[312];
-};
-
-static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 };
-static const char dhcp_ops[][10] = {
- "NA", "REQUEST", "REPLY"
-};
-static const char dhcp_types[][10] = {
- "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
-};
-static void dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx);
-#endif /* DHD_DHCP_DUMP */
-
-#ifdef FILTER_IE
-#define FILTER_IE_PATH "/etc/wifi/filter_ie"
-#define FILTER_IE_BUFSZ 1024 /* ioc buffsize for FILTER_IE */
-#define FILE_BLOCK_READ_SIZE 256
-#define WL_FILTER_IE_IOV_HDR_SIZE OFFSETOF(wl_filter_ie_iov_v1_t, tlvs)
-#endif /* FILTER_IE */
-
-#define NULL_CHECK(p, s, err) \
- do { \
- if (!(p)) { \
- printk("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
- err = BCME_ERROR; \
- return err; \
- } \
- } while (0)
-
-#ifdef DHD_ICMP_DUMP
-#include <net/icmp.h>
-static void dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx);
-#endif /* DHD_ICMP_DUMP */
-
-/* Functions to manage sysfs interface for dhd */
-static int dhd_sysfs_init(dhd_info_t *dhd);
-static void dhd_sysfs_exit(dhd_info_t *dhd);
+#ifdef FORCE_TPOWERON
+/*
+ * On Fire's reference platform, coming out of L1.2,
+ * there is a constant delay of 45us between CLKREQ# and stable REFCLK
+ * Due to this delay, with tPowerOn < 50
+ * there is a chance of the refclk sense to trigger on noise.
+ *
+ * 0x29 when written to L1SSControl2 translates to 50us.
+ */
+#define FORCE_TPOWERON_50US 0x29
+uint32 tpoweron_scale = FORCE_TPOWERON_50US; /* default 50us */
+module_param(tpoweron_scale, uint, 0644);
+#endif /* FORCE_TPOWERON */
#ifdef SHOW_LOGTRACE
#if defined(CUSTOMER_HW4_DEBUG)
@@ -1219,708 +707,19 @@
char *map_file);
#endif /* SHOW_LOGTRACE */
-#ifdef D2H_MINIDUMP
-void dhd_d2h_minidump(dhd_pub_t *dhdp);
-#endif /* D2H_MINIDUMP */
+#ifdef BCMSDIO
+#define DHD_IF_ROLE(pub, idx) ((pub)->info->iflist[idx]->role)
+#define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP)
+#define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA)
+#define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO)
-#if defined(DHD_LB)
-
-static void
-dhd_lb_set_default_cpus(dhd_info_t *dhd)
+void dhd_set_role(dhd_pub_t *dhdp, int role, int bssidx)
{
- /* Default CPU allocation for the jobs */
- atomic_set(&dhd->rx_napi_cpu, 1);
- atomic_set(&dhd->rx_compl_cpu, 2);
- atomic_set(&dhd->tx_compl_cpu, 2);
- atomic_set(&dhd->tx_cpu, 2);
- atomic_set(&dhd->net_tx_cpu, 0);
+ int ifidx = dhd_bssidx2idx(dhdp, bssidx);
+ DHD_TRACE(("dhd_set_role ifidx %d role %d\n", ifidx, role));
+ dhdp->info->iflist[ifidx]->role = role;
}
-
-static void
-dhd_cpumasks_deinit(dhd_info_t *dhd)
-{
- free_cpumask_var(dhd->cpumask_curr_avail);
- free_cpumask_var(dhd->cpumask_primary);
- free_cpumask_var(dhd->cpumask_primary_new);
- free_cpumask_var(dhd->cpumask_secondary);
- free_cpumask_var(dhd->cpumask_secondary_new);
-}
-
-static int
-dhd_cpumasks_init(dhd_info_t *dhd)
-{
- int id;
- uint32 cpus, num_cpus = num_possible_cpus();
- int ret = 0;
-
- DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__,
- DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS));
-
- if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
- !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
- !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
- !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
- !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
- DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
- ret = -ENOMEM;
- goto fail;
- }
-
- cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
- cpumask_clear(dhd->cpumask_primary);
- cpumask_clear(dhd->cpumask_secondary);
-
- if (num_cpus > 32) {
- DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus));
- ASSERT(0);
- }
-
- cpus = DHD_LB_PRIMARY_CPUS;
- for (id = 0; id < num_cpus; id++) {
- if (isset(&cpus, id))
- cpumask_set_cpu(id, dhd->cpumask_primary);
- }
-
- cpus = DHD_LB_SECONDARY_CPUS;
- for (id = 0; id < num_cpus; id++) {
- if (isset(&cpus, id))
- cpumask_set_cpu(id, dhd->cpumask_secondary);
- }
-
- return ret;
-fail:
- dhd_cpumasks_deinit(dhd);
- return ret;
-}
-
-/*
- * The CPU Candidacy Algorithm
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * The available CPUs for selection are divided into two groups
- * Primary Set - A CPU mask that carries the First Choice CPUs
- * Secondary Set - A CPU mask that carries the Second Choice CPUs.
- *
- * There are two types of Job, that needs to be assigned to
- * the CPUs, from one of the above mentioned CPU group. The Jobs are
- * 1) Rx Packet Processing - napi_cpu
- * 2) Completion Processiong (Tx, RX) - compl_cpu
- *
- * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
- * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
- * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
- * If there are more processors free, it assigns one to compl_cpu.
- * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
- * CPU, as much as possible.
- *
- * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
- * would allow Tx completion skb's to be released into a local free pool from
- * which the rx buffer posts could have been serviced. it is important to note
- * that a Tx packet may not have a large enough buffer for rx posting.
- */
-void dhd_select_cpu_candidacy(dhd_info_t *dhd)
-{
- uint32 primary_available_cpus; /* count of primary available cpus */
- uint32 secondary_available_cpus; /* count of secondary available cpus */
- uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
- uint32 compl_cpu = 0; /* cpu selected for completion jobs */
- uint32 tx_cpu = 0; /* cpu selected for tx processing job */
-
- cpumask_clear(dhd->cpumask_primary_new);
- cpumask_clear(dhd->cpumask_secondary_new);
-
- /*
- * Now select from the primary mask. Even if a Job is
- * already running on a CPU in secondary group, we still move
- * to primary CPU. So no conditional checks.
- */
- cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
- dhd->cpumask_curr_avail);
-
- cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
- dhd->cpumask_curr_avail);
-
- primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
-
- if (primary_available_cpus > 0) {
- napi_cpu = cpumask_first(dhd->cpumask_primary_new);
-
- /* If no further CPU is available,
- * cpumask_next returns >= nr_cpu_ids
- */
- tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
- if (tx_cpu >= nr_cpu_ids)
- tx_cpu = 0;
-
- /* In case there are no more CPUs, do completions & Tx in same CPU */
- compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new);
- if (compl_cpu >= nr_cpu_ids)
- compl_cpu = tx_cpu;
- }
-
- DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
- __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
-
- /* -- Now check for the CPUs from the secondary mask -- */
- secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
-
- DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
- __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
-
- if (secondary_available_cpus > 0) {
- /* At this point if napi_cpu is unassigned it means no CPU
- * is online from Primary Group
- */
- if (napi_cpu == 0) {
- napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
- tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
- compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
- } else if (tx_cpu == 0) {
- tx_cpu = cpumask_first(dhd->cpumask_secondary_new);
- compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
- } else if (compl_cpu == 0) {
- compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
- }
-
- /* If no CPU was available for tx processing, choose CPU 0 */
- if (tx_cpu >= nr_cpu_ids)
- tx_cpu = 0;
-
- /* If no CPU was available for completion, choose CPU 0 */
- if (compl_cpu >= nr_cpu_ids)
- compl_cpu = 0;
- }
- if ((primary_available_cpus == 0) &&
- (secondary_available_cpus == 0)) {
- /* No CPUs available from primary or secondary mask */
- napi_cpu = 1;
- compl_cpu = 0;
- tx_cpu = 2;
- }
-
- DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
- __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
-
- ASSERT(napi_cpu < nr_cpu_ids);
- ASSERT(compl_cpu < nr_cpu_ids);
- ASSERT(tx_cpu < nr_cpu_ids);
-
- atomic_set(&dhd->rx_napi_cpu, napi_cpu);
- atomic_set(&dhd->tx_compl_cpu, compl_cpu);
- atomic_set(&dhd->rx_compl_cpu, compl_cpu);
- atomic_set(&dhd->tx_cpu, tx_cpu);
-
- return;
-}
-/*
- * Function to handle CPU Hotplug notifications.
- * One of the task it does is to trigger the CPU Candidacy algorithm
- * for load balancing.
- */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
-int dhd_cpu_startup_callback(unsigned int cpu)
-{
- dhd_info_t *dhd = g_dhd_pub->info;
-
- DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu));
- DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
- cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
- dhd_select_cpu_candidacy(dhd);
-
- return 0;
-}
-
-int dhd_cpu_teardown_callback(unsigned int cpu)
-{
- dhd_info_t *dhd = g_dhd_pub->info;
-
- DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu));
- DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
- cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
- dhd_select_cpu_candidacy(dhd);
-
- return 0;
-}
-#else
-int
-dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
-{
- unsigned long int cpu = (unsigned long int)hcpu;
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
-
- if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) {
- DHD_INFO(("%s(): LB data is not initialized yet.\n",
- __FUNCTION__));
- return NOTIFY_BAD;
- }
-
- switch (action)
- {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
- cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
- dhd_select_cpu_candidacy(dhd);
- break;
-
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
- cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
- dhd_select_cpu_candidacy(dhd);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-#endif /* LINUX_VERSION_CODE < 4.10.0 */
-static int dhd_register_cpuhp_callback(dhd_info_t *dhd)
-{
- int cpuhp_ret = 0;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
- cpuhp_ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dhd",
- dhd_cpu_startup_callback, dhd_cpu_teardown_callback);
-
- if (cpuhp_ret < 0) {
- DHD_ERROR(("%s(): cpuhp_setup_state failed %d RX LB won't happen \r\n",
- __FUNCTION__, cpuhp_ret));
- }
-#else
- /*
- * If we are able to initialize CPU masks, lets register to the
- * CPU Hotplug framework to change the CPU for each job dynamically
- * using candidacy algorithm.
- */
- dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
- register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */
-#endif /* LINUX_VERSION_CODE < 4.10.0 */
- return cpuhp_ret;
-}
-
-static int dhd_unregister_cpuhp_callback(dhd_info_t *dhd)
-{
- int ret = 0;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
- /* Don't want to call tear down while unregistering */
- cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
-#else
- if (dhd->cpu_notifier.notifier_call != NULL) {
- unregister_cpu_notifier(&dhd->cpu_notifier);
- }
-#endif // endif
- return ret;
-}
-
-#if defined(DHD_LB_STATS)
-void dhd_lb_stats_init(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd;
- int i, j, num_cpus = num_possible_cpus();
- int alloc_size = sizeof(uint32) * num_cpus;
-
- if (dhdp == NULL) {
- DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
- __FUNCTION__));
- return;
- }
-
- dhd = dhdp->info;
- if (dhd == NULL) {
- DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
- return;
- }
-
- DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
- DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
-
- dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->napi_percpu_run_cnt) {
- DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
-
- DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
-
- dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->rxc_percpu_run_cnt) {
- DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
-
- DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
-
- dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->txc_percpu_run_cnt) {
- DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
-
- dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->cpu_online_cnt) {
- DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
-
- dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->cpu_offline_cnt) {
- DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
-
- dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->txp_percpu_run_cnt) {
- DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
-
- dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->tx_start_percpu_run_cnt) {
- DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
-
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->napi_rx_hist[j]) {
- DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
- __FUNCTION__, j));
- return;
- }
- for (i = 0; i < num_cpus; i++) {
- DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
- }
- }
-#ifdef DHD_LB_TXC
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->txc_hist[j]) {
- DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
- __FUNCTION__, j));
- return;
- }
- for (i = 0; i < num_cpus; i++) {
- DHD_LB_STATS_CLR(dhd->txc_hist[j][i]);
- }
- }
-#endif /* DHD_LB_TXC */
-#ifdef DHD_LB_RXC
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->rxc_hist[j]) {
- DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
- __FUNCTION__, j));
- return;
- }
- for (i = 0; i < num_cpus; i++) {
- DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]);
- }
- }
-#endif /* DHD_LB_RXC */
- return;
-}
-
-void dhd_lb_stats_deinit(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd;
- int j, num_cpus = num_possible_cpus();
- int alloc_size = sizeof(uint32) * num_cpus;
-
- if (dhdp == NULL) {
- DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
- __FUNCTION__));
- return;
- }
-
- dhd = dhdp->info;
- if (dhd == NULL) {
- DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
- return;
- }
-
- if (dhd->napi_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size);
- dhd->napi_percpu_run_cnt = NULL;
- }
- if (dhd->rxc_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size);
- dhd->rxc_percpu_run_cnt = NULL;
- }
- if (dhd->txc_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size);
- dhd->txc_percpu_run_cnt = NULL;
- }
- if (dhd->cpu_online_cnt) {
- MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size);
- dhd->cpu_online_cnt = NULL;
- }
- if (dhd->cpu_offline_cnt) {
- MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size);
- dhd->cpu_offline_cnt = NULL;
- }
-
- if (dhd->txp_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size);
- dhd->txp_percpu_run_cnt = NULL;
- }
- if (dhd->tx_start_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size);
- dhd->tx_start_percpu_run_cnt = NULL;
- }
-
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- if (dhd->napi_rx_hist[j]) {
- MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size);
- dhd->napi_rx_hist[j] = NULL;
- }
-#ifdef DHD_LB_TXC
- if (dhd->txc_hist[j]) {
- MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size);
- dhd->txc_hist[j] = NULL;
- }
-#endif /* DHD_LB_TXC */
-#ifdef DHD_LB_RXC
- if (dhd->rxc_hist[j]) {
- MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size);
- dhd->rxc_hist[j] = NULL;
- }
-#endif /* DHD_LB_RXC */
- }
-
- return;
-}
-
-static void dhd_lb_stats_dump_histo(dhd_pub_t *dhdp,
- struct bcmstrbuf *strbuf, uint32 **hist)
-{
- int i, j;
- uint32 *per_cpu_total;
- uint32 total = 0;
- uint32 num_cpus = num_possible_cpus();
-
- per_cpu_total = (uint32 *)MALLOC(dhdp->osh, sizeof(uint32) * num_cpus);
- if (!per_cpu_total) {
- DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__));
- return;
- }
- bzero(per_cpu_total, sizeof(uint32) * num_cpus);
-
- bcm_bprintf(strbuf, "CPU: \t\t");
- for (i = 0; i < num_cpus; i++)
- bcm_bprintf(strbuf, "%d\t", i);
- bcm_bprintf(strbuf, "\nBin\n");
-
- for (i = 0; i < HIST_BIN_SIZE; i++) {
- bcm_bprintf(strbuf, "%d:\t\t", 1<<i);
- for (j = 0; j < num_cpus; j++) {
- bcm_bprintf(strbuf, "%d\t", hist[i][j]);
- }
- bcm_bprintf(strbuf, "\n");
- }
- bcm_bprintf(strbuf, "Per CPU Total \t");
- total = 0;
- for (i = 0; i < num_cpus; i++) {
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- per_cpu_total[i] += (hist[j][i] * (1<<j));
- }
- bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
- total += per_cpu_total[i];
- }
- bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
-
- if (per_cpu_total) {
- MFREE(dhdp->osh, per_cpu_total, sizeof(uint32) * num_cpus);
- per_cpu_total = NULL;
- }
- return;
-}
-
-static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
-{
- int i, num_cpus = num_possible_cpus();
-
- bcm_bprintf(strbuf, "CPU: \t");
- for (i = 0; i < num_cpus; i++)
- bcm_bprintf(strbuf, "%d\t", i);
- bcm_bprintf(strbuf, "\n");
-
- bcm_bprintf(strbuf, "Val: \t");
- for (i = 0; i < num_cpus; i++)
- bcm_bprintf(strbuf, "%u\t", *(p+i));
- bcm_bprintf(strbuf, "\n");
- return;
-}
-
-void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
-{
- dhd_info_t *dhd;
-
- if (dhdp == NULL || strbuf == NULL) {
- DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
- __FUNCTION__, dhdp, strbuf));
- return;
- }
-
- dhd = dhdp->info;
- if (dhd == NULL) {
- DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
- return;
- }
-
- bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
-
- bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
-
- bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
- dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
- dhd->txc_sched_cnt);
-
-#ifdef DHD_LB_RXP
- bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
- bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
- dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->napi_rx_hist);
-#endif /* DHD_LB_RXP */
-
-#ifdef DHD_LB_RXC
- bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
- bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
- dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->rxc_hist);
-#endif /* DHD_LB_RXC */
-
-#ifdef DHD_LB_TXC
- bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
- bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
- dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->txc_hist);
-#endif /* DHD_LB_TXC */
-
-#ifdef DHD_LB_TXP
- bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt);
-
- bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt);
-#endif /* DHD_LB_TXP */
-}
-
-/* Given a number 'n' returns 'm' that is next larger power of 2 after n */
-static inline uint32 next_larger_power2(uint32 num)
-{
- num--;
- num |= (num >> 1);
- num |= (num >> 2);
- num |= (num >> 4);
- num |= (num >> 8);
- num |= (num >> 16);
-
- return (num + 1);
-}
-
-static void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu)
-{
- uint32 bin_power;
- uint32 *p;
- bin_power = next_larger_power2(count);
-
- switch (bin_power) {
- case 1: p = bin[0] + cpu; break;
- case 2: p = bin[1] + cpu; break;
- case 4: p = bin[2] + cpu; break;
- case 8: p = bin[3] + cpu; break;
- case 16: p = bin[4] + cpu; break;
- case 32: p = bin[5] + cpu; break;
- case 64: p = bin[6] + cpu; break;
- case 128: p = bin[7] + cpu; break;
- default : p = bin[8] + cpu; break;
- }
-
- *p = *p + 1;
- return;
-}
-
-extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
-{
- int cpu;
- dhd_info_t *dhd = dhdp->info;
-
- cpu = get_cpu();
- put_cpu();
- dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu);
-
- return;
-}
-
-extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
-{
- int cpu;
- dhd_info_t *dhd = dhdp->info;
-
- cpu = get_cpu();
- put_cpu();
- dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu);
-
- return;
-}
-
-extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
-{
- int cpu;
- dhd_info_t *dhd = dhdp->info;
-
- cpu = get_cpu();
- put_cpu();
- dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu);
-
- return;
-}
-
-extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
-}
-
-extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
-}
-#endif /* DHD_LB_STATS */
-
-#endif /* DHD_LB */
+#endif /* BCMSDIO */
#ifdef USE_WFA_CERT_CONF
int g_frameburst = 1;
@@ -1933,7 +732,6 @@
#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
-#ifdef PCIE_FULL_DONGLE
#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
@@ -1947,7 +745,6 @@
#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
-#endif /* PCIE_FULL_DONGLE */
/* Control fw roaming */
#ifdef BCMCCX
@@ -1975,6 +772,15 @@
char iface_name[IFNAMSIZ] = {'\0'};
module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+#ifdef WL_VIF_SUPPORT
+/* Virtual inteface name */
+char vif_name[IFNAMSIZ] = "wlan";
+module_param_string(vif_name, vif_name, IFNAMSIZ, 0);
+
+int vif_num = 0;
+module_param(vif_num, int, 0);
+#endif /* WL_VIF_SUPPORT */
+
/* The following are specific to the SDIO dongle */
/* IOCTL response timeout */
@@ -2033,7 +839,7 @@
module_param(dhd_use_idsup, uint, 0);
#endif /* BCMSUP_4WAY_HANDSHAKE */
-#if defined(OEM_ANDROID)
+#if (defined(OEM_ANDROID) && !defined(BCMQT))
/* Allow delayed firmware download for debug purpose */
int allow_delay_fwdl = FALSE;
#else
@@ -2048,6 +854,16 @@
#endif // endif
module_param(enable_ecounter, uint, 0);
+/* TCM verification flag */
+uint dhd_tcm_test_enable = FALSE;
+module_param(dhd_tcm_test_enable, uint, 0644);
+
+/* WAR to avoid system hang during FW trap */
+#ifdef DHD_FW_COREDUMP
+uint disable_bug_on = FALSE;
+module_param(disable_bug_on, uint, 0);
+#endif /* DHD_FW_COREDUMP */
+
extern char dhd_version[];
extern char fw_version[];
extern char clm_version[];
@@ -2064,6 +880,9 @@
#ifdef DHD_PM_CONTROL_FROM_FILE
bool g_pm_control;
+#ifdef DHD_EXPORT_CNTL_FILE
+int pmmode_val;
+#endif /* DHD_EXPORT_CNTL_FILE */
void sec_control_pm(dhd_pub_t *dhd, uint *);
#endif /* DHD_PM_CONTROL_FROM_FILE */
@@ -2127,11 +946,8 @@
}
#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
- KERNEL_VERSION(2, 6, 39))
dhd_mmc_suspend = suspend;
smp_mb();
-#endif // endif
return ret;
}
@@ -2150,6 +966,11 @@
static void dhd_os_rxflock(dhd_pub_t *pub);
static void dhd_os_rxfunlock(dhd_pub_t *pub);
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+static void
+dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event);
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+
/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
typedef struct dhd_dev_priv {
dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
@@ -2195,18 +1016,16 @@
}
/* Return interface pointer */
-static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
+struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
{
ASSERT(ifidx < DHD_MAX_IFS);
- if (ifidx >= DHD_MAX_IFS)
+ if (!dhdp || !dhdp->info || ifidx >= DHD_MAX_IFS)
return NULL;
return dhdp->info->iflist[ifidx];
}
-#ifdef PCIE_FULL_DONGLE
-
/** Dummy objects are defined with state representing bad|down.
* Performance gains from reducing branch conditionals, instruction parallelism,
* dual issue, reducing load shadows, avail of larger pipelines.
@@ -2267,12 +1086,15 @@
static void
dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
{
+#ifdef PCIE_FULL_DONGLE
int prio;
+#endif // endif
ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+#ifdef PCIE_FULL_DONGLE
/*
* Flush and free all packets in all flowring's queues belonging to sta.
* Packets in flow ring will be flushed later.
@@ -2282,7 +1104,6 @@
if (flowid != FLOWID_INVALID) {
unsigned long flags;
- flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
flow_ring_node_t * flow_ring_node;
#ifdef DHDTCPACK_SUPPRESS
@@ -2293,22 +1114,28 @@
#endif /* DHDTCPACK_SUPPRESS */
flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
- DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
- flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
+ if (flow_ring_node) {
+ flow_queue_t *queue = &flow_ring_node->queue;
- if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
- void * pkt;
- while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
- PKTFREE(dhdp->osh, pkt, TRUE);
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
+
+ if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
+ void * pkt;
+ while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) !=
+ NULL) {
+ PKTFREE(dhdp->osh, pkt, TRUE);
+ }
}
- }
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
+ }
}
sta->flowid[prio] = FLOWID_INVALID;
}
+#endif /* PCIE_FULL_DONGLE */
id16_map_free(dhdp->staid_allocator, sta->idx);
DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
@@ -2382,7 +1209,10 @@
static int
dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
{
- int idx, prio, sta_pool_memsz;
+ int idx, sta_pool_memsz;
+#ifdef PCIE_FULL_DONGLE
+ int prio;
+#endif /* PCIE_FULL_DONGLE */
dhd_sta_t * sta;
dhd_sta_pool_t * sta_pool;
void * staid_allocator;
@@ -2420,9 +1250,11 @@
/* Now place them into the pre-allocated free pool. */
for (idx = 1; idx <= max_sta; idx++) {
sta = &sta_pool[idx];
+#ifdef PCIE_FULL_DONGLE
for (prio = 0; prio < (int)NUMPRIO; prio++) {
sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
}
+#endif /* PCIE_FULL_DONGLE */
dhd_sta_free(dhdp, sta);
}
@@ -2456,7 +1288,10 @@
static void
dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
{
- int idx, prio, sta_pool_memsz;
+ int idx, sta_pool_memsz;
+#ifdef PCIE_FULL_DONGLE
+ int prio;
+#endif /* PCIE_FULL_DONGLE */
dhd_sta_t * sta;
dhd_sta_pool_t * sta_pool;
void *staid_allocator;
@@ -2495,9 +1330,11 @@
/* Now place them into the pre-allocated free pool. */
for (idx = 1; idx <= max_sta; idx++) {
sta = &sta_pool[idx];
+#ifdef PCIE_FULL_DONGLE
for (prio = 0; prio < (int)NUMPRIO; prio++) {
sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
}
+#endif /* PCIE_FULL_DONGLE */
dhd_sta_free(dhdp, sta);
}
}
@@ -2715,498 +1552,21 @@
}
#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
-#else
-static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
-static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
-static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
-static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
-static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
-dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
-dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
-void dhd_del_sta(void *pub, int ifidx, void *ea) {}
-#endif /* PCIE_FULL_DONGLE */
-
-#if defined(DHD_LB)
-
-#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP) || \
- defined(DHD_LB_RXP)
-/**
- * dhd_tasklet_schedule - Function that runs in IPI context of the destination
- * CPU and schedules a tasklet.
- * @tasklet: opaque pointer to the tasklet
- */
-INLINE void
-dhd_tasklet_schedule(void *tasklet)
-{
- tasklet_schedule((struct tasklet_struct *)tasklet);
-}
-/**
- * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
- * @tasklet: tasklet to be scheduled
- * @on_cpu: cpu core id
- *
- * If the requested cpu is online, then an IPI is sent to this cpu via the
- * smp_call_function_single with no wait and the tasklet_schedule function
- * will be invoked to schedule the specified tasklet on the requested CPU.
- */
-INLINE void
-dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
-{
- const int wait = 0;
- smp_call_function_single(on_cpu,
- dhd_tasklet_schedule, (void *)tasklet, wait);
-}
-
-/**
- * dhd_work_schedule_on - Executes the passed work in a given CPU
- * @work: work to be scheduled
- * @on_cpu: cpu core id
- *
- * If the requested cpu is online, then an IPI is sent to this cpu via the
- * schedule_work_on and the work function
- * will be invoked to schedule the specified work on the requested CPU.
- */
-
-INLINE void
-dhd_work_schedule_on(struct work_struct *work, int on_cpu)
-{
- schedule_work_on(on_cpu, work);
-}
-#endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP || DHD_LB_RXP */
-
-#if defined(DHD_LB_TXC)
-/**
- * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
- * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
- * freeing the packets placed in the tx_compl workq
- */
+#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
void
-dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
+dhd_axi_error_dispatch(dhd_pub_t *dhdp)
{
dhd_info_t *dhd = dhdp->info;
- int curr_cpu, on_cpu;
-
- if (dhd->rx_napi_netdev == NULL) {
- DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
- return;
- }
-
- DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
- /*
- * If the destination CPU is NOT online or is same as current CPU
- * no need to schedule the work
- */
- curr_cpu = get_cpu();
- put_cpu();
-
- on_cpu = atomic_read(&dhd->tx_compl_cpu);
-
- if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
- dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
- } else {
- schedule_work(&dhd->tx_compl_dispatcher_work);
- }
+ schedule_work(&dhd->axi_error_dispatcher_work);
}
-static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
+static void dhd_axi_error_dispatcher_fn(struct work_struct * work)
{
struct dhd_info *dhd =
- container_of(work, struct dhd_info, tx_compl_dispatcher_work);
- int cpu;
-
- get_online_cpus();
- cpu = atomic_read(&dhd->tx_compl_cpu);
- if (!cpu_online(cpu))
- dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
- else
- dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
- put_online_cpus();
+ container_of(work, struct dhd_info, axi_error_dispatcher_work);
+ dhd_axi_error(&dhd->pub);
}
-#endif /* DHD_LB_TXC */
-
-#if defined(DHD_LB_RXC)
-/**
- * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
- * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
- * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
- * placed in the rx_compl workq.
- *
- * @dhdp: pointer to dhd_pub object
- */
-void
-dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- int curr_cpu, on_cpu;
-
- if (dhd->rx_napi_netdev == NULL) {
- DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
- return;
- }
-
- DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
- /*
- * If the destination CPU is NOT online or is same as current CPU
- * no need to schedule the work
- */
- curr_cpu = get_cpu();
- put_cpu();
- on_cpu = atomic_read(&dhd->rx_compl_cpu);
-
- if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
- dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
- } else {
- schedule_work(&dhd->rx_compl_dispatcher_work);
- }
-}
-
-static void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
-{
- struct dhd_info *dhd =
- container_of(work, struct dhd_info, rx_compl_dispatcher_work);
- int cpu;
-
- get_online_cpus();
- cpu = atomic_read(&dhd->rx_compl_cpu);
- if (!cpu_online(cpu))
- dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
- else {
- dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
- }
- put_online_cpus();
-}
-#endif /* DHD_LB_RXC */
-
-#if defined(DHD_LB_TXP)
-static void dhd_tx_dispatcher_work(struct work_struct * work)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- struct dhd_info *dhd =
- container_of(work, struct dhd_info, tx_dispatcher_work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- dhd_tasklet_schedule(&dhd->tx_tasklet);
-}
-
-static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp)
-{
- int cpu;
- int net_tx_cpu;
- dhd_info_t *dhd = dhdp->info;
-
- preempt_disable();
- cpu = atomic_read(&dhd->tx_cpu);
- net_tx_cpu = atomic_read(&dhd->net_tx_cpu);
-
- /*
- * Now if the NET_TX has pushed the packet in the same
- * CPU that is chosen for Tx processing, seperate it out
- * i.e run the TX processing tasklet in compl_cpu
- */
- if (net_tx_cpu == cpu)
- cpu = atomic_read(&dhd->tx_compl_cpu);
-
- if (!cpu_online(cpu)) {
- /*
- * Ooohh... but the Chosen CPU is not online,
- * Do the job in the current CPU itself.
- */
- dhd_tasklet_schedule(&dhd->tx_tasklet);
- } else {
- /*
- * Schedule tx_dispatcher_work to on the cpu which
- * in turn will schedule tx_tasklet.
- */
- dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu);
- }
- preempt_enable();
-}
-
-/**
- * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
- * on another cpu. The tx_tasklet will take care of actually putting
- * the skbs into appropriate flow ring and ringing H2D interrupt
- *
- * @dhdp: pointer to dhd_pub object
- */
-static void
-dhd_lb_tx_dispatch(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- int curr_cpu;
-
- curr_cpu = get_cpu();
- put_cpu();
-
- /* Record the CPU in which the TX request from Network stack came */
- atomic_set(&dhd->net_tx_cpu, curr_cpu);
-
- /* Schedule the work to dispatch ... */
- dhd_tx_dispatcher_fn(dhdp);
-}
-#endif /* DHD_LB_TXP */
-
-#if defined(DHD_LB_RXP)
-/**
- * dhd_napi_poll - Load balance napi poll function to process received
- * packets and send up the network stack using netif_receive_skb()
- *
- * @napi: napi object in which context this poll function is invoked
- * @budget: number of packets to be processed.
- *
- * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
- * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
- * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
- * packet tag and sendup.
- */
-static int
-dhd_napi_poll(struct napi_struct *napi, int budget)
-{
- int ifid;
- const int pkt_count = 1;
- const int chan = 0;
- struct sk_buff * skb;
- unsigned long flags;
- struct dhd_info *dhd;
- int processed = 0;
- struct sk_buff_head rx_process_queue;
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd = container_of(napi, struct dhd_info, rx_napi_struct);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
-
- DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
- __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
- __skb_queue_head_init(&rx_process_queue);
-
- /* extract the entire rx_napi_queue into local rx_process_queue */
- spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
- skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
- spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
-
- while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
- OSL_PREFETCH(skb->data);
-
- ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
-
- DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
- __FUNCTION__, skb, ifid));
-
- dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
- processed++;
- }
-
- DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
-
- DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
- napi_complete(napi);
-
- return budget - 1;
-}
-
-/**
- * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
- * poll list. This function may be invoked via the smp_call_function_single
- * from a remote CPU.
- *
- * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
- * after the napi_struct is added to the softnet data's poll_list
- *
- * @info: pointer to a dhd_info struct
- */
-static void
-dhd_napi_schedule(void *info)
-{
- dhd_info_t *dhd = (dhd_info_t *)info;
-
- DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
- __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
-
- /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
- if (napi_schedule_prep(&dhd->rx_napi_struct)) {
- __napi_schedule(&dhd->rx_napi_struct);
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
- }
-
- /*
- * If the rx_napi_struct was already running, then we let it complete
- * processing all its packets. The rx_napi_struct may only run on one
- * core at a time, to avoid out-of-order handling.
- */
-}
-
-/**
- * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
- * action after placing the dhd's rx_process napi object in the the remote CPU's
- * softnet data's poll_list.
- *
- * @dhd: dhd_info which has the rx_process napi object
- * @on_cpu: desired remote CPU id
- */
-static INLINE int
-dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
-{
- int wait = 0; /* asynchronous IPI */
- DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
- __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
-
- if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
- DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
- __FUNCTION__, on_cpu));
- }
-
- DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
-
- return 0;
-}
-
-/*
- * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
- * Why should we do this?
- * The candidacy algorithm is run from the call back function
- * registered to CPU hotplug notifier. This call back happens from Worker
- * context. The dhd_napi_schedule_on is also from worker context.
- * Note that both of this can run on two different CPUs at the same time.
- * So we can possibly have a window where a given CPUn is being brought
- * down from CPUm while we try to run a function on CPUn.
- * To prevent this its better have the whole code to execute an SMP
- * function under get_online_cpus.
- * This function call ensures that hotplug mechanism does not kick-in
- * until we are done dealing with online CPUs
- * If the hotplug worker is already running, no worries because the
- * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
- *
- * The below mentioned code structure is proposed in
- * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
- * for the question
- * Q: I need to ensure that a particular cpu is not removed when there is some
- * work specific to this cpu is in progress
- *
- * According to the documentation calling get_online_cpus is NOT required, if
- * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
- * run from Work Queue context we have to call these functions
- */
-static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- struct dhd_info *dhd =
- container_of(work, struct dhd_info, rx_napi_dispatcher_work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
-
- dhd_napi_schedule(dhd);
-}
-
-/**
- * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
- * to run on another CPU. The rx_napi_struct's poll function will retrieve all
- * the packets enqueued into the rx_napi_queue and sendup.
- * The producer's rx packet queue is appended to the rx_napi_queue before
- * dispatching the rx_napi_struct.
- */
-void
-dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
-{
- unsigned long flags;
- dhd_info_t *dhd = dhdp->info;
- int curr_cpu;
- int on_cpu;
-#ifdef DHD_LB_IRQSET
- cpumask_t cpus;
-#endif /* DHD_LB_IRQSET */
-
- if (dhd->rx_napi_netdev == NULL) {
- DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
- return;
- }
-
- DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
- skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
-
- /* append the producer's queue of packets to the napi's rx process queue */
- spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
- skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
- spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
-
- /*
- * If the destination CPU is NOT online or is same as current CPU
- * no need to schedule the work
- */
- curr_cpu = get_cpu();
- put_cpu();
-
- preempt_disable();
- on_cpu = atomic_read(&dhd->rx_napi_cpu);
-#ifdef DHD_LB_IRQSET
- if (cpumask_and(&cpus, cpumask_of(curr_cpu), dhd->cpumask_primary) ||
- (!cpu_online(on_cpu))) {
-#else
- if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
-#endif /* DHD_LB_IRQSET */
- DHD_INFO(("%s : curr_cpu : %d, cpumask : 0x%lx\n", __FUNCTION__,
- curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
- dhd_napi_schedule(dhd);
- } else {
- DHD_INFO(("%s : schedule to curr_cpu : %d, cpumask : 0x%lx\n",
- __FUNCTION__, curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
- dhd_work_schedule_on(&dhd->rx_napi_dispatcher_work, on_cpu);
- }
- preempt_enable();
-}
-
-/**
- * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
- */
-void
-dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
-{
- dhd_info_t *dhd = dhdp->info;
-
- DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
- pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
- DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
- __skb_queue_tail(&dhd->rx_pend_queue, pkt);
-}
-#endif /* DHD_LB_RXP */
-
-#ifdef DHD_LB_IRQSET
-void
-dhd_irq_set_affinity(dhd_pub_t *dhdp)
-{
- unsigned int irq = (unsigned int)-1;
- int err = BCME_OK;
-
- if (!dhdp) {
- DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__));
- return;
- }
-
- if (!dhdp->bus) {
- DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__));
- return;
- }
-
- dhdpcie_get_pcieirq(dhdp->bus, &irq);
- err = irq_set_affinity(irq, dhdp->info->cpumask_primary);
- if (err)
- DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n",
- __FUNCTION__, *cpumask_bits(dhdp->info->cpumask_primary)));
-}
-#endif /* DHD_LB_IRQSET */
-#endif /* DHD_LB */
+#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
/** Returns dhd iflist index corresponding the the bssidx provided by apps */
int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
@@ -3309,6 +1669,68 @@
return 0;
}
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
+static int dhd_wait_for_file_dump(dhd_pub_t *dhdp)
+{
+ struct net_device *primary_ndev;
+ struct bcm_cfg80211 *cfg;
+ unsigned long flags = 0;
+ primary_ndev = dhd_linux_get_primary_netdev(dhdp);
+
+ if (!primary_ndev) {
+ DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ cfg = wl_get_cfg(primary_ndev);
+
+ if (!cfg) {
+ DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_GENERAL_LOCK(dhdp, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+ DHD_OS_WAKE_LOCK(dhdp);
+ /* check for hal started and only then send event if not clear dump state here */
+ if (wl_cfg80211_is_hal_started(cfg)) {
+ int timeleft = 0;
+
+ DHD_ERROR(("[DUMP] %s: HAL started. send urgent event\n", __FUNCTION__));
+ dhd_dbg_send_urgent_evt(dhdp, NULL, 0);
+
+ DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
+ &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0);
+ if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) {
+ DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ }
+ } else {
+ DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__));
+ }
+ DHD_OS_WAKE_UNLOCK(dhdp);
+ /* In case of dhd_os_busbusy_wait_bitmask() timeout,
+ * hal dump bit will not be cleared. Hence clearing it here.
+ */
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+ return BCME_OK;
+}
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_CORE_DUMP */
+
#ifdef PKT_FILTER_SUPPORT
#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
static bool
@@ -3467,12 +1889,15 @@
uint roamvar = 1;
#endif /* ENABLE_FW_ROAM_SUSPEND */
#ifdef ENABLE_BCN_LI_BCN_WAKEUP
- int bcn_li_bcn;
+ int bcn_li_bcn = 1;
#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
uint nd_ra_filter = 0;
#ifdef ENABLE_IPMCAST_FILTER
int ipmcast_l2filter;
#endif /* ENABLE_IPMCAST_FILTER */
+#ifdef CUSTOM_EVENT_PM_WAKE
+ uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
+#endif /* CUSTOM_EVENT_PM_WAKE */
#endif /* DHD_USE_EARLYSUSPEND */
#ifdef PASS_ALL_MCAST_PKTS
struct dhd_info *dhdinfo;
@@ -3537,18 +1962,22 @@
dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
#endif /* APF */
#endif /* PKT_FILTER_SUPPORT */
+#ifdef ARP_OFFLOAD_SUPPORT
+ dhd_arp_offload_enable(dhd, TRUE);
+#endif /* ARP_OFFLOAD_SUPPORT */
#ifdef PASS_ALL_MCAST_PKTS
allmulti = 0;
for (i = 0; i < DHD_MAX_IFS; i++) {
- if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
+ if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) {
ret = dhd_iovar(dhd, i, "allmulti",
(char *)&allmulti,
sizeof(allmulti),
NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s allmulti failed %d\n",
- __FUNCTION__, ret));
+ if (ret < 0) {
+ DHD_ERROR(("%s allmulti failed %d\n",
+ __FUNCTION__, ret));
+ }
}
}
#endif /* PASS_ALL_MCAST_PKTS */
@@ -3614,7 +2043,14 @@
sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
#endif /* OEM_ANDROID && BCMPCIE */
-
+#ifdef WL_CFG80211
+ /* Disable cfg80211 feature events during suspend */
+ ret = wl_cfg80211_config_suspend_events(
+ dhd_linux_get_primary_netdev(dhd), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("failed to disable events (%d)\n", ret));
+ }
+#endif /* WL_CFG80211 */
#ifdef DHD_USE_EARLYSUSPEND
#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
@@ -3645,13 +2081,22 @@
}
#endif /* ENABLE_FW_ROAM_SUSPEND */
#ifdef ENABLE_BCN_LI_BCN_WAKEUP
- bcn_li_bcn = 0;
+ if (bcn_li_dtim) {
+ bcn_li_bcn = 0;
+ }
ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
sizeof(bcn_li_bcn), NULL, 0, TRUE);
if (ret < 0) {
DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__, ret));
}
#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+#if defined(WL_CFG80211) && defined(WL_BCNRECV)
+ ret = wl_android_bcnrecv_suspend(dhd_linux_get_primary_netdev(dhd));
+ if (ret != BCME_OK) {
+ DHD_ERROR(("failed to stop beacon recv event on"
+ " suspend state (%d)\n", ret));
+ }
+#endif /* WL_CFG80211 && WL_BCNRECV */
#ifdef NDO_CONFIG_SUPPORT
if (dhd->ndo_enable) {
if (!dhd->ndo_host_ip_overflow) {
@@ -3699,6 +2144,26 @@
DHD_ERROR(("failed to set intr_width (%d)\n", ret));
}
#endif /* DYNAMIC_SWOOB_DURATION */
+#ifdef CUSTOM_EVENT_PM_WAKE
+ pm_awake_thresh = CUSTOM_EVENT_PM_WAKE * 4;
+ ret = dhd_iovar(dhd, 0, "const_awake_thresh",
+ (char *)&pm_awake_thresh,
+ sizeof(pm_awake_thresh), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set const_awake_thresh failed %d\n",
+ __FUNCTION__, ret));
+ }
+#endif /* CUSTOM_EVENT_PM_WAKE */
+#ifdef CONFIG_SILENT_ROAM
+ if (!dhd->sroamed) {
+ ret = dhd_sroam_set_mon(dhd, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set sroam failed %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+ dhd->sroamed = FALSE;
+#endif /* CONFIG_SILENT_ROAM */
#endif /* DHD_USE_EARLYSUSPEND */
} else {
#ifdef PKT_FILTER_SUPPORT
@@ -3719,6 +2184,16 @@
dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
sizeof(power_mode), TRUE, 0);
#endif /* SUPPORT_PM2_ONLY */
+#if defined(WL_CFG80211) && defined(WL_BCNRECV)
+ ret = wl_android_bcnrecv_resume(dhd_linux_get_primary_netdev(dhd));
+ if (ret != BCME_OK) {
+ DHD_ERROR(("failed to resume beacon recv state (%d)\n",
+ ret));
+ }
+#endif /* WL_CF80211 && WL_BCNRECV */
+#ifdef ARP_OFFLOAD_SUPPORT
+ dhd_arp_offload_enable(dhd, FALSE);
+#endif /* ARP_OFFLOAD_SUPPORT */
#ifdef PKT_FILTER_SUPPORT
/* disable pkt filter */
dhd_enable_packet_filter(0, dhd);
@@ -3802,7 +2277,6 @@
}
#endif /* ENABLE_FW_ROAM_SUSPEND */
#ifdef ENABLE_BCN_LI_BCN_WAKEUP
- bcn_li_bcn = 1;
ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
sizeof(bcn_li_bcn), NULL, 0, TRUE);
if (ret < 0) {
@@ -3845,9 +2319,32 @@
DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret));
}
#endif /* ENABLE_IPMCAST_FILTER */
+#ifdef CUSTOM_EVENT_PM_WAKE
+ ret = dhd_iovar(dhd, 0, "const_awake_thresh",
+ (char *)&pm_awake_thresh,
+ sizeof(pm_awake_thresh), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set const_awake_thresh failed %d\n",
+ __FUNCTION__, ret));
+ }
+#endif /* CUSTOM_EVENT_PM_WAKE */
+#ifdef CONFIG_SILENT_ROAM
+ ret = dhd_sroam_set_mon(dhd, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set sroam failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* CONFIG_SILENT_ROAM */
#endif /* DHD_USE_EARLYSUSPEND */
+#ifdef WL_CFG80211
+ /* Enable cfg80211 feature events during resume */
+ ret = wl_cfg80211_config_suspend_events(
+ dhd_linux_get_primary_netdev(dhd), TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("failed to enable events (%d)\n", ret));
+ }
+#endif /* WL_CFG80211 */
#ifdef DHD_LB_IRQSET
- dhd_irq_set_affinity(dhd);
+ dhd_irq_set_affinity(dhd, dhd->info->cpumask_primary);
#endif /* DHD_LB_IRQSET */
}
}
@@ -3939,14 +2436,13 @@
if (tmo->increment > tmo->tick)
tmo->increment = tmo->tick;
} else {
- wait_queue_head_t delay_wait;
- DECLARE_WAITQUEUE(wait, current);
- init_waitqueue_head(&delay_wait);
- add_wait_queue(&delay_wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- (void)schedule_timeout(1);
- remove_wait_queue(&delay_wait, &wait);
- set_current_state(TASK_RUNNING);
+ /*
+ * OSL_SLEEP() is corresponding to usleep_range(). In non-atomic
+ * context where the exact wakeup time is flexible, it would be good
+ * to use usleep_range() instead of udelay(). It takes a few advantages
+ * such as improving responsiveness and reducing power.
+ */
+ OSL_SLEEP(jiffies_to_msecs(1));
}
return 0;
@@ -4044,11 +2540,7 @@
_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
{
struct net_device *dev;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
struct netdev_hw_addr *ha;
-#else
- struct dev_mc_list *mclist;
-#endif // endif
uint32 allmulti, cnt;
wl_ioctl_t ioc;
@@ -4067,18 +2559,10 @@
dev = dhd->iflist[i]->net;
if (!dev)
continue;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_lock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
cnt_iface[i] = netdev_mc_count(dev);
cnt += cnt_iface[i];
-#else
- cnt += dev->mc_count;
-#endif /* LINUX >= 2.6.35 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_unlock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
/* Determine initial value of allmulti flag */
allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
@@ -4092,17 +2576,9 @@
dev = dhd->iflist[ifidx]->net;
if (!dev)
return;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_lock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
cnt = netdev_mc_count(dev);
-#else
- cnt = dev->mc_count;
-#endif /* LINUX >= 2.6.35 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_unlock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
/* Determine initial value of allmulti flag */
allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
@@ -4138,10 +2614,7 @@
DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i));
dev = dhd->iflist[i]->net;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_lock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
@@ -4159,23 +2632,11 @@
cnt_iface[i], MAC2STRDBG(ha->addr)));
cnt_iface[i]--;
}
-#else /* LINUX < 2.6.35 */
- for (mclist = dev->mc_list; (mclist && (cnt_iface[i] > 0));
- cnt_iface[i]--, mclist = mclist->next) {
- memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
- bufp += ETHER_ADDR_LEN;
- }
-#endif /* LINUX >= 2.6.35 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_unlock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
}
}
#else /* !MCAST_LIST_ACCUMULATION */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_lock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
@@ -4190,16 +2651,7 @@
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif // endif
-#else /* LINUX < 2.6.35 */
- for (mclist = dev->mc_list; (mclist && (cnt > 0));
- cnt--, mclist = mclist->next) {
- memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
- bufp += ETHER_ADDR_LEN;
- }
-#endif /* LINUX >= 2.6.35 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_unlock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
#endif /* MCAST_LIST_ACCUMULATION */
memset(&ioc, 0, sizeof(ioc));
@@ -4523,6 +2975,7 @@
dhd_net_if_unlock_local(dhd);
}
+#ifndef DHD_DIRECT_SET_MAC
static void
dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
{
@@ -4575,6 +3028,7 @@
DHD_OS_WAKE_UNLOCK(&dhd->pub);
dhd_net_if_unlock_local(dhd);
}
+#endif /* DHD_DIRECT_SET_MAC */
static void
dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
@@ -4656,8 +3110,14 @@
memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
dhdif->set_macaddress = TRUE;
dhd_net_if_unlock_local(dhd);
+#ifdef DHD_DIRECT_SET_MAC
+ /* It needs to update new mac address on this context */
+ ret = _dhd_set_mac_address(dhd, ifidx, dhdif->mac_addr);
+ dhdif->set_macaddress = FALSE;
+#else
dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
+#endif // endif
return ret;
}
@@ -4746,15 +3206,7 @@
*/
bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
__FUNCTION__, __LINE__);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
netif_rx_ni(skb);
-#else
- ulong flags;
- netif_rx(skb);
- local_irq_save(flags);
- RAISE_RX_SOFTIRQ();
- local_irq_restore(flags);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
}
}
@@ -4770,6 +3222,9 @@
int ret = BCME_OK;
dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
struct ether_header *eh = NULL;
+ bool pkt_ether_type_802_1x = FALSE;
+ uint8 pkt_flow_prio;
+
#if defined(DHD_L2_FILTER)
dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
#endif // endif
@@ -4785,14 +3240,18 @@
if (dhdp->busstate == DHD_BUS_SUSPEND) {
DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
PKTCFREE(dhdp->osh, pktbuf, TRUE);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
- return -ENODEV;
-#else
return NETDEV_TX_BUSY;
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */
}
#endif /* PCIE_FULL_DONGLE */
+ /* Reject if pktlen > MAX_MTU_SZ */
+ if (PKTLEN(dhdp->osh, pktbuf) > MAX_MTU_SZ) {
+ /* free the packet here since the caller won't */
+ dhdp->tx_big_packets++;
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return BCME_ERROR;
+ }
+
#ifdef DHD_L2_FILTER
/* if dhcp_unicast is enabled, we need to convert the */
/* broadcast DHCP ACK/REPLY packets to Unicast. */
@@ -4845,28 +3304,19 @@
/* back up 802.1x's priority */
dhdp->prio_8021x = prio;
#endif /* DHD_LOSSLESS_ROAMING */
+ pkt_ether_type_802_1x = TRUE;
DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
atomic_inc(&dhd->pend_8021x_cnt);
#if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
wl_handle_wps_states(dhd_idx2net(dhdp, ifidx),
pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE);
#endif /* WL_CFG80211 && WL_WPS_SYNC */
-#if defined(DHD_8021X_DUMP)
- dhd_dump_eapol_4way_message(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
-#endif /* DHD_8021X_DUMP */
}
-
- if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
-#ifdef DHD_DHCP_DUMP
- dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
-#endif /* DHD_DHCP_DUMP */
-#ifdef DHD_ICMP_DUMP
- dhd_icmp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
-#endif /* DHD_ICMP_DUMP */
- }
+ dhd_dump_pkt(dhdp, ifidx, pktdata,
+ (uint32)PKTLEN(dhdp->osh, pktbuf), TRUE, NULL, NULL);
} else {
- PKTCFREE(dhdp->osh, pktbuf, TRUE);
- return BCME_ERROR;
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return BCME_ERROR;
}
{
@@ -4892,18 +3342,39 @@
#endif /* !PKTPRIO_OVERRIDE */
}
+ BCM_REFERENCE(pkt_ether_type_802_1x);
+ BCM_REFERENCE(pkt_flow_prio);
+
+#ifdef SUPPORT_SET_TID
+ dhd_set_tid_based_on_uid(dhdp, pktbuf);
+#endif /* SUPPORT_SET_TID */
+
#ifdef PCIE_FULL_DONGLE
/*
* Lkup the per interface hash table, for a matching flowring. If one is not
* available, allocate a unique flowid and add a flowring entry.
* The found or newly created flowid is placed into the pktbuf's tag.
*/
- ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
+
+#ifdef DHD_LOSSLESS_ROAMING
+ /* For LLR override and use flowring with prio 7 for 802.1x packets */
+ if (pkt_ether_type_802_1x) {
+ pkt_flow_prio = PRIO_8021D_NC;
+ } else
+#endif /* DHD_LOSSLESS_ROAMING */
+ {
+ pkt_flow_prio = dhdp->flow_prio_map[(PKTPRIO(pktbuf))];
+ }
+
+ ret = dhd_flowid_update(dhdp, ifidx, pkt_flow_prio, pktbuf);
if (ret != BCME_OK) {
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
+ atomic_dec(&dhd->pend_8021x_cnt);
+ }
PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
return ret;
}
-#endif // endif
+#endif /* PCIE_FULL_DONGLE */
#ifdef PROP_TXSTATUS
if (dhd_wlfc_is_supported(dhdp)) {
@@ -5014,37 +3485,11 @@
return ret;
}
-#if defined(DHD_LB_TXP)
-
+#ifdef CFI_CHECK
+netdev_tx_t BCMFASTPATH
+#else /* CFI_CHECK */
int BCMFASTPATH
-dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net,
- int ifidx, void *skb)
-{
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt);
-
- /* If the feature is disabled run-time do TX from here */
- if (atomic_read(&dhd->lb_txp_active) == 0) {
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
- return __dhd_sendpkt(&dhd->pub, ifidx, skb);
- }
-
- /* Store the address of net device and interface index in the Packet tag */
- DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net);
- DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx);
-
- /* Enqueue the skb into tx_pend_queue */
- skb_queue_tail(&dhd->tx_pend_queue, skb);
-
- DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net));
-
- /* Dispatch the Tx job to be processed by the tx_tasklet */
- dhd_lb_tx_dispatch(&dhd->pub);
-
- return NETDEV_TX_OK;
-}
-#endif /* DHD_LB_TXP */
-
-int BCMFASTPATH
+#endif /* CFI_CHECK */
dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
{
int ret;
@@ -5059,7 +3504,11 @@
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (dhd_query_bus_erros(&dhd->pub)) {
+#ifdef CFI_CHECK
+ return NETDEV_TX_BUSY;
+#else
return -ENODEV;
+#endif /* CFI_CHECK */
}
DHD_GENERAL_LOCK(&dhd->pub, flags);
@@ -5077,11 +3526,7 @@
DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
dhd_os_busbusy_wake(&dhd->pub);
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
- return -ENODEV;
-#else
return NETDEV_TX_BUSY;
-#endif // endif
}
#endif /* DHD_PCIE_RUNTIMEPM */
@@ -5098,11 +3543,7 @@
#endif /* PCIE_FULL_DONGLE */
dhd_os_busbusy_wake(&dhd->pub);
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
- return -ENODEV;
-#else
return NETDEV_TX_BUSY;
-#endif // endif
}
DHD_OS_WAKE_LOCK(&dhd->pub);
@@ -5110,6 +3551,7 @@
#if defined(DHD_HANG_SEND_UP_TEST)
if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) {
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
dhd->pub.busstate = DHD_BUS_DOWN;
}
#endif /* DHD_HANG_SEND_UP_TEST */
@@ -5132,11 +3574,7 @@
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
- return -ENODEV;
-#else
return NETDEV_TX_BUSY;
-#endif // endif
}
ifp = DHD_DEV_IFP(net);
@@ -5151,11 +3589,7 @@
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
- return -ENODEV;
-#else
return NETDEV_TX_BUSY;
-#endif // endif
}
DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
@@ -5234,6 +3668,12 @@
}
#endif /* DHD_PSTA */
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) {
+ ifp->tsyncack_txed ++;
+ }
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
#ifdef DHDTCPACK_SUPPRESS
if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
/* If this packet has been hold or got freed, just return */
@@ -5285,11 +3725,7 @@
DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
/* Return ok: we always eat the packet */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
- return 0;
-#else
return NETDEV_TX_OK;
-#endif // endif
}
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
@@ -5352,7 +3788,11 @@
"error: dhd_start_xmit():%d\n", ret);
}
+#ifdef CFI_CHECK
+netdev_tx_t BCMFASTPATH
+#else
int BCMFASTPATH
+#endif /* CFI_CHECK */
dhd_start_xmit_wrapper(struct sk_buff *skb, struct net_device *net)
{
struct dhd_rx_tx_work *start_xmit_work;
@@ -5370,7 +3810,11 @@
if (!start_xmit_work) {
netdev_err(net,
"error: failed to alloc start_xmit_work\n");
+#ifdef CFI_CHECK
+ ret = NETDEV_TX_BUSY;
+#else
ret = -ENOMEM;
+#endif /* CFI_CHECK */
goto exit;
}
@@ -5378,18 +3822,27 @@
start_xmit_work->skb = skb;
start_xmit_work->net = net;
queue_work(dhd->tx_wq, &start_xmit_work->work);
+#ifdef CFI_CHECK
+ ret = NETDEV_TX_OK;
+#else
ret = NET_XMIT_SUCCESS;
+#endif /* CFI_CHECK */
} else if (dhd->pub.busstate == DHD_BUS_DATA) {
ret = dhd_start_xmit(skb, net);
} else {
/* when bus is down */
+#ifdef CFI_CHECK
+ ret = NETDEV_TX_BUSY;
+#else
ret = -ENODEV;
+#endif /* CFI_CHECK */
}
exit:
return ret;
}
+
void
dhd_bus_wakeup_work(dhd_pub_t *dhdp)
{
@@ -5408,6 +3861,25 @@
}
#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+static void
+__dhd_txflowcontrol(dhd_pub_t *dhdp, struct net_device *net, bool state)
+{
+
+ if ((state == ON) && (dhdp->txoff == FALSE)) {
+ netif_stop_queue(net);
+ dhd_prot_update_pktid_txq_stop_cnt(dhdp);
+ } else if (state == ON) {
+ DHD_ERROR(("%s: Netif Queue has already stopped\n", __FUNCTION__));
+ }
+ if ((state == OFF) && (dhdp->txoff == TRUE)) {
+ netif_wake_queue(net);
+ dhd_prot_update_pktid_txq_start_cnt(dhdp);
+ } else if (state == OFF) {
+ DHD_ERROR(("%s: Netif Queue has already started\n", __FUNCTION__));
+ }
+}
+
void
dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
{
@@ -5427,60 +3899,21 @@
#endif // endif
if (ifidx == ALL_INTERFACES) {
- /* Flow control on all active interfaces */
- dhdp->txoff = state;
for (i = 0; i < DHD_MAX_IFS; i++) {
if (dhd->iflist[i]) {
net = dhd->iflist[i]->net;
- if (state == ON)
- netif_stop_queue(net);
- else
- netif_wake_queue(net);
+ __dhd_txflowcontrol(dhdp, net, state);
}
}
} else {
if (dhd->iflist[ifidx]) {
net = dhd->iflist[ifidx]->net;
- if (state == ON)
- netif_stop_queue(net);
- else
- netif_wake_queue(net);
+ __dhd_txflowcontrol(dhdp, net, state);
}
}
+ dhdp->txoff = state;
}
-#ifdef DHD_RX_DUMP
-typedef struct {
- uint16 type;
- const char *str;
-} PKTTYPE_INFO;
-
-static const PKTTYPE_INFO packet_type_info[] =
-{
- { ETHER_TYPE_IP, "IP" },
- { ETHER_TYPE_ARP, "ARP" },
- { ETHER_TYPE_BRCM, "BRCM" },
- { ETHER_TYPE_802_1X, "802.1X" },
-#ifdef BCMWAPI_WAI
- { ETHER_TYPE_WAI, "WAPI" },
-#endif /* BCMWAPI_WAI */
- { 0, ""}
-};
-
-static const char *_get_packet_type_str(uint16 type)
-{
- int i;
- int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
-
- for (i = 0; i < n; i++) {
- if (packet_type_info[i].type == type)
- return packet_type_info[i].str;
- }
-
- return packet_type_info[n].str;
-}
-#endif /* DHD_RX_DUMP */
-
#ifdef DHD_MCAST_REGEN
/*
* Description: This function is called to do the reverse translation
@@ -5527,15 +3960,7 @@
* does netif_rx, disables irq, raise NET_IF_RX softirq and
* enables interrupts back
*/
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
netif_rx_ni(skb);
-#else
- ulong flags;
- netif_rx(skb);
- local_irq_save(flags);
- RAISE_RX_SOFTIRQ();
- local_irq_restore(flags);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
}
static int
@@ -5557,12 +3982,7 @@
*/
pktlen = skb->len + ETH_HLEN;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
pktdata = (void *)skb_mac_header(skb);
-#else
- pktdata = (void *)skb->mac.raw;
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
-
ret = wl_host_event_get_data(pktdata, pktlen, &evu);
if (ret != BCME_OK) {
@@ -5582,24 +4002,16 @@
return ret;
}
-#define DHD_EVENT_LOGTRACE_BOUND 12
-#define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 1
+/*
+ * dhd_event_logtrace_process_items processes
+ * each skb from evt_trace_queue.
+ * Returns TRUE if more packets to be processed
+ * else returns FALSE
+ */
-static void
-dhd_event_logtrace_process(struct work_struct * work)
+static int
+dhd_event_logtrace_process_items(dhd_info_t *dhd)
{
-/* Ignore compiler warnings due to -Werror=cast-qual */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- struct delayed_work *dw = to_delayed_work(work);
- struct dhd_info *dhd =
- container_of(dw, struct dhd_info, event_log_dispatcher_work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
-
dhd_pub_t *dhdp;
struct sk_buff *skb;
uint32 qlen;
@@ -5607,14 +4019,14 @@
if (!dhd) {
DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
- return;
+ return 0;
}
dhdp = &dhd->pub;
if (!dhdp) {
DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
- return;
+ return 0;
}
qlen = skb_queue_len(&dhd->evt_trace_queue);
@@ -5643,6 +4055,9 @@
dhd_event_logtrace_pkt_process(dhdp, skb);
}
+ /* Dummy sleep so that scheduler kicks in after processing any logprints */
+ OSL_SLEEP(0);
+
/* Send packet up if logtrace_pkt_sendup is TRUE */
if (dhdp->logtrace_pkt_sendup) {
#ifdef DHD_USE_STATIC_CTRLBUF
@@ -5683,11 +4098,202 @@
}
}
- /* Reschedule the workqueue if more packets to be processed */
- if (qlen >= DHD_EVENT_LOGTRACE_BOUND) {
- schedule_delayed_work(&dhd->event_log_dispatcher_work,
+ /* Reschedule if more packets to be processed */
+ return (qlen >= DHD_EVENT_LOGTRACE_BOUND);
+}
+
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+static int
+dhd_logtrace_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+ dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
+ int ret;
+
+ while (1) {
+ dhdp->logtrace_thr_ts.entry_time = OSL_LOCALTIME_NS();
+ if (!binary_sema_down(tsk)) {
+ dhdp->logtrace_thr_ts.sem_down_time = OSL_LOCALTIME_NS();
+ SMP_RD_BARRIER_DEPENDS();
+ if (dhd->pub.dongle_reset == FALSE) {
+ do {
+ /* Check terminated before processing the items */
+ if (tsk->terminated) {
+ DHD_ERROR(("%s: task terminated\n", __FUNCTION__));
+ goto exit;
+ }
+#ifdef EWP_EDL
+ /* check if EDL is being used */
+ if (dhd->pub.dongle_edl_support) {
+ ret = dhd_prot_process_edl_complete(&dhd->pub,
+ &dhd->event_data);
+ } else {
+ ret = dhd_event_logtrace_process_items(dhd);
+ }
+#else
+ ret = dhd_event_logtrace_process_items(dhd);
+#endif /* EWP_EDL */
+ /* if ret > 0, bound has reached so to be fair to other
+ * processes need to yield the scheduler.
+ * The comment above yield()'s definition says:
+ * If you want to use yield() to wait for something,
+ * use wait_event().
+ * If you want to use yield() to be 'nice' for others,
+ * use cond_resched().
+ * If you still want to use yield(), do not!
+ */
+ if (ret > 0) {
+ cond_resched();
+ OSL_SLEEP(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS);
+ } else if (ret < 0) {
+ DHD_ERROR(("%s: ERROR should not reach here\n",
+ __FUNCTION__));
+ }
+ } while (ret > 0);
+ }
+ if (tsk->flush_ind) {
+ DHD_ERROR(("%s: flushed\n", __FUNCTION__));
+ dhdp->logtrace_thr_ts.flush_time = OSL_LOCALTIME_NS();
+ tsk->flush_ind = 0;
+ complete(&tsk->flushed);
+ }
+ } else {
+ DHD_ERROR(("%s: unexpted break\n", __FUNCTION__));
+ dhdp->logtrace_thr_ts.unexpected_break_time = OSL_LOCALTIME_NS();
+ break;
+ }
+ }
+exit:
+ complete_and_exit(&tsk->completed, 0);
+ dhdp->logtrace_thr_ts.complete_time = OSL_LOCALTIME_NS();
+}
+#else
+static void
+dhd_event_logtrace_process(struct work_struct * work)
+{
+ int ret = 0;
+/* Ignore compiler warnings due to -Werror=cast-qual */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ struct delayed_work *dw = to_delayed_work(work);
+ struct dhd_info *dhd =
+ container_of(dw, struct dhd_info, event_log_dispatcher_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+#ifdef EWP_EDL
+ if (dhd->pub.dongle_edl_support) {
+ ret = dhd_prot_process_edl_complete(&dhd->pub, &dhd->event_data);
+ } else {
+ ret = dhd_event_logtrace_process_items(dhd);
+ }
+#else
+ ret = dhd_event_logtrace_process_items(dhd);
+#endif /* EWP_EDL */
+
+ if (ret > 0) {
+ schedule_delayed_work(&(dhd)->event_log_dispatcher_work,
msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS));
}
+
+ return;
+}
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+
+void
+dhd_schedule_logtrace(void *dhd_info)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhd_info;
+
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+ if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
+ binary_sema_up(&dhd->thr_logtrace_ctl);
+ } else {
+ DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
+ dhd->thr_logtrace_ctl.thr_pid));
+ }
+#else
+ schedule_delayed_work(&dhd->event_log_dispatcher_work, 0);
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+ return;
+}
+
+void
+dhd_cancel_logtrace_process_sync(dhd_info_t *dhd)
+{
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+ if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
+ PROC_STOP_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
+ } else {
+ DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
+ dhd->thr_logtrace_ctl.thr_pid));
+ }
+#else
+ cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+}
+
+void
+dhd_flush_logtrace_process(dhd_info_t *dhd)
+{
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+ if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
+ PROC_FLUSH_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
+ } else {
+ DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
+ dhd->thr_logtrace_ctl.thr_pid));
+ }
+#else
+ flush_delayed_work(&dhd->event_log_dispatcher_work);
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+}
+
+int
+dhd_init_logtrace_process(dhd_info_t *dhd)
+{
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+ dhd->thr_logtrace_ctl.thr_pid = DHD_PID_KT_INVALID;
+ PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl, 0, "dhd_logtrace_thread");
+ if (dhd->thr_logtrace_ctl.thr_pid < 0) {
+ DHD_ERROR(("%s: init logtrace process failed\n", __FUNCTION__));
+ return BCME_ERROR;
+ } else {
+ DHD_ERROR(("%s: thr_logtrace_ctl(%ld) succedded\n", __FUNCTION__,
+ dhd->thr_logtrace_ctl.thr_pid));
+ }
+#else
+ INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+ return BCME_OK;
+}
+
+int
+dhd_reinit_logtrace_process(dhd_info_t *dhd)
+{
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+ /* Re-init only if PROC_STOP from dhd_stop was called
+ * which can be checked via thr_pid
+ */
+ if (dhd->thr_logtrace_ctl.thr_pid < 0) {
+ PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl,
+ 0, "dhd_logtrace_thread");
+ if (dhd->thr_logtrace_ctl.thr_pid < 0) {
+ DHD_ERROR(("%s: reinit logtrace process failed\n", __FUNCTION__));
+ return BCME_ERROR;
+ } else {
+ DHD_ERROR(("%s: thr_logtrace_ctl(%ld) succedded\n", __FUNCTION__,
+ dhd->thr_logtrace_ctl.thr_pid));
+ }
+ }
+#else
+ /* No need to re-init for WQ as calcel_delayed_work_sync will
+ * will not delete the WQ
+ */
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+ return BCME_OK;
}
void
@@ -5701,7 +4307,7 @@
#endif /* PCIE_FULL_DONGLE */
skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
- schedule_delayed_work(&dhd->event_log_dispatcher_work, 0);
+ dhd_schedule_logtrace(dhd);
}
void
@@ -5718,6 +4324,42 @@
#endif /* DHD_USE_STATIC_CTRLBUF */
}
}
+
+#ifdef BCMPCIE
+void
+dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg)
+{
+ struct sk_buff *skb = NULL;
+ uint32 pktsize = 0;
+ void *pkt = NULL;
+ info_buf_payload_hdr_t *infobuf = NULL;
+ dhd_info_t *dhd = dhdp->info;
+ uint8 *pktdata = NULL;
+
+ if (!msg)
+ return;
+
+ /* msg = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>| */
+ infobuf = (info_buf_payload_hdr_t *)(msg + sizeof(uint32));
+ pktsize = (uint32)(ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) +
+ sizeof(uint32));
+ pkt = PKTGET(dhdp->osh, pktsize, FALSE);
+ if (!pkt) {
+ DHD_ERROR(("%s: skb alloc failed ! not sending event log up.\n", __FUNCTION__));
+ } else {
+ PKTSETLEN(dhdp->osh, pkt, pktsize);
+ pktdata = PKTDATA(dhdp->osh, pkt);
+ memcpy(pktdata, msg, pktsize);
+ /* For infobuf packets assign skb->dev with
+ * Primary interface n/w device
+ */
+ skb = PKTTONATIVE(dhdp->osh, pkt);
+ skb->dev = dhd->iflist[0]->net;
+ /* Send pkt UP */
+ dhd_netif_rx_ni(skb);
+ }
+}
+#endif /* BCMPCIE */
#endif /* SHOW_LOGTRACE */
/** Called when a frame is received by the dongle on interface 'ifidx' */
@@ -5739,10 +4381,7 @@
void *skbhead = NULL;
void *skbprev = NULL;
uint16 protocol;
-#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) || \
- defined(DHD_ICMP_DUMP) || defined(DHD_WAKE_STATUS) || defined(WL_WPS_SYNC)
unsigned char *dump_data;
-#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP || DHD_ICMP_DUMP || DHD_WAKE_STATUS */
#ifdef DHD_MCAST_REGEN
uint8 interface_role;
if_flow_lkup_t *if_flow_lkup;
@@ -5754,6 +4393,7 @@
#endif /* DHD_WAKE_STATUS */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ BCM_REFERENCE(dump_data);
for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
struct ether_header *eh;
@@ -5799,16 +4439,39 @@
}
#endif /* DHD_WAKE_STATUS */
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+
+ if (ifidx >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n",
+ __FUNCTION__, ifidx));
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ } else {
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ }
+ continue;
+ }
+
ifp = dhd->iflist[ifidx];
if (ifp == NULL) {
DHD_ERROR(("%s: ifp is NULL. drop packet\n",
__FUNCTION__));
- PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ } else {
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ }
continue;
}
- eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
-
/* Dropping only data packets before registering net device to avoid kernel panic */
#ifndef PROP_TXSTATUS_VSDB
if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
@@ -5896,6 +4559,29 @@
}
#endif /* MCAST_REGEN */
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ if (dhd_tcpdata_get_flag(dhdp, pktbuf) == FLAG_SYNC) {
+ int delta_sec;
+ int delta_sync;
+ int sync_per_sec;
+ u64 curr_time = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+ ifp->tsync_rcvd ++;
+ delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed;
+ delta_sec = curr_time - ifp->last_sync;
+ if (delta_sec > 1) {
+ sync_per_sec = delta_sync/delta_sec;
+ if (sync_per_sec > TCP_SYNC_FLOOD_LIMIT) {
+ schedule_work(&ifp->blk_tsfl_work);
+ DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! "
+ "sync recvied %d pkt/sec \n",
+ ifidx, sync_per_sec));
+ }
+ dhd_reset_tcpsync_info_by_ifp(ifp);
+ }
+
+ }
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
#ifdef DHDTCPACK_SUPPRESS
dhd_tcpdata_info_get(dhdp, pktbuf);
#endif // endif
@@ -5922,26 +4608,45 @@
}
#endif /* DHD_PSTA */
-#ifdef PCIE_FULL_DONGLE
- if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
- (!ifp->ap_isolate)) {
- eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
- if (ETHER_ISUCAST(eh->ether_dhost)) {
- if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
- dhd_sendpkt(dhdp, ifidx, pktbuf);
- continue;
- }
- } else {
- void *npktbuf = NULL;
- if ((ntoh16(eh->ether_type) != ETHER_TYPE_IAPP_L2_UPDATE) &&
- !DHD_PKT_FLAGS_IS_NO_FWD(pktbuf) &&
- (npktbuf = PKTDUP(dhdp->osh, pktbuf)) != NULL) {
- dhd_sendpkt(dhdp, ifidx, npktbuf);
+ DHD_TRACE(("\nAp isolate in dhd is %d\n", ifp->ap_isolate));
+ if (ifidx >= 0 && dhdp != NULL && dhdp->info != NULL &&
+ dhdp->info->iflist[ifidx] != NULL) {
+ if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
+ (!ifp->ap_isolate)) {
+ DHD_TRACE(("%s: MACADDR: " MACDBG " ifidx %d\n",
+ __FUNCTION__,
+ MAC2STRDBG(dhdp->info->iflist[ifidx]->mac_addr),
+ ifidx));
+ DHD_TRACE(("%s: DEST: " MACDBG " ifidx %d\n",
+ __FUNCTION__, MAC2STRDBG(eh->ether_dhost), ifidx));
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+ if (ETHER_ISUCAST(eh->ether_dhost)) {
+ if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
+ DHD_TRACE(("\nPacket not for us send down\n"));
+ dhd_sendpkt(dhdp, ifidx, pktbuf);
+ continue;
+ }
+ } else {
+ void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
+ if (npktbuf) {
+ DHD_TRACE(("\ncalling bcmc dhd_sendpkt"
+ "and send dup up\n"));
+ dhd_sendpkt(dhdp, ifidx, npktbuf);
+ }
}
}
}
-#endif /* PCIE_FULL_DONGLE */
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ if (IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
+ (ifp->recv_reassoc_evt == TRUE) && (ifp->post_roam_evt == FALSE) &&
+ (dhd_is_4way_msg((char *)(skb->data)) == EAPOL_4WAY_M1)) {
+ DHD_ERROR(("%s: Reassoc is in progress. "
+ "Drop EAPOL M1 frame\n", __FUNCTION__));
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
/* Get the protocol, maintain skb around eth_type_trans()
* The main reason for this hack is for the limitation of
* Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
@@ -5953,60 +4658,22 @@
*/
eth = skb->data;
len = skb->len;
-
-#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) || \
- defined(DHD_ICMP_DUMP) || defined(DHD_WAKE_STATUS) || defined(WL_WPS_SYNC)
dump_data = skb->data;
-#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP || DHD_ICMP_DUMP || DHD_WAKE_STATUS */
-
protocol = (skb->data[12] << 8) | skb->data[13];
+
if (protocol == ETHER_TYPE_802_1X) {
DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
#if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
wl_handle_wps_states(ifp->net, dump_data, len, FALSE);
#endif /* WL_CFG80211 && WL_WPS_SYNC */
-#ifdef DHD_8021X_DUMP
- dhd_dump_eapol_4way_message(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
-#endif /* DHD_8021X_DUMP */
- }
-
- if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
-#ifdef DHD_DHCP_DUMP
- dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
-#endif /* DHD_DHCP_DUMP */
-#ifdef DHD_ICMP_DUMP
- dhd_icmp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
-#endif /* DHD_ICMP_DUMP */
- }
-#ifdef DHD_RX_DUMP
- DHD_ERROR(("RX DUMP[%s] - %s\n",
- dhd_ifname(dhdp, ifidx), _get_packet_type_str(protocol)));
- if (protocol != ETHER_TYPE_BRCM) {
- if (dump_data[0] == 0xFF) {
- DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
-
- if ((dump_data[12] == 8) &&
- (dump_data[13] == 6)) {
- DHD_ERROR(("%s: ARP %d\n",
- __FUNCTION__, dump_data[0x15]));
- }
- } else if (dump_data[0] & 1) {
- DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
- __FUNCTION__, MAC2STRDBG(dump_data)));
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ if (dhd_is_4way_msg((uint8 *)(skb->data)) == EAPOL_4WAY_M3) {
+ OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M3_RXED);
}
-#ifdef DHD_RX_FULL_DUMP
- {
- int k;
- for (k = 0; k < skb->len; k++) {
- DHD_ERROR(("%02X ", dump_data[k]));
- if ((k & 15) == 15)
- DHD_ERROR(("\n"));
- }
- DHD_ERROR(("\n"));
- }
-#endif /* DHD_RX_FULL_DUMP */
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
}
-#endif /* DHD_RX_DUMP */
+ dhd_rx_pkt_dump(dhdp, ifidx, dump_data, len);
+ dhd_dump_pkt(dhdp, ifidx, dump_data, len, FALSE, NULL, NULL);
#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
if (pkt_wake) {
@@ -6036,16 +4703,10 @@
if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
bcm_event_msg_u_t evu;
- int ret_event;
- int event_type;
+ int ret_event, event_type;
+ void *pkt_data = skb_mac_header(skb);
- ret_event = wl_host_event_get_data(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
- skb_mac_header(skb),
-#else
- skb->mac.raw,
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
- len, &evu);
+ ret_event = wl_host_event_get_data(pkt_data, len, &evu);
if (ret_event != BCME_OK) {
DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
@@ -6078,13 +4739,7 @@
}
#endif /* SHOW_LOGTRACE */
- ret_event = dhd_wl_host_event(dhd, ifidx,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
- skb_mac_header(skb),
-#else
- skb->mac.raw,
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
- len, &event, &data);
+ ret_event = dhd_wl_host_event(dhd, ifidx, pkt_data, len, &event, &data);
wl_event_to_host_order(&event);
#if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
@@ -6278,28 +4933,19 @@
bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
__FUNCTION__, __LINE__);
+#if defined(ARGOS_NOTIFY_CB)
+ argos_register_notifier_deinit();
+#endif // endif
#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
+ DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
#if defined(DHD_LB_RXP)
- DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
netif_receive_skb(skb);
- DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
#else /* !defined(DHD_LB_RXP) */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
- DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
netif_rx_ni(skb);
+#endif /* defined(DHD_LB_RXP) */
DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
-#else
- ulong flags;
- DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
- netif_rx(skb);
- DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
- local_irq_save(flags);
- RAISE_RX_SOFTIRQ();
- local_irq_restore(flags);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
-#endif /* !defined(DHD_LB_RXP) */
}
}
}
@@ -6358,7 +5004,6 @@
{
dhd_info_t *dhd = DHD_DEV_INFO(net);
dhd_if_t *ifp;
- int ifidx;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
@@ -6367,17 +5012,10 @@
goto error;
}
- ifidx = dhd_net2idx(dhd, net);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
- goto error;
- }
-
- ifp = dhd->iflist[ifidx];
-
+ ifp = dhd_get_ifp_by_ndev(&dhd->pub, net);
if (!ifp) {
- ASSERT(ifp);
- DHD_ERROR(("%s: ifp is NULL\n", __FUNCTION__));
+ /* return empty stats */
+ DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
goto error;
}
@@ -6554,15 +5192,16 @@
void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
{
dhd_os_runtimepm_timer(dhdp, 0);
- dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
- DHD_ERROR(("DHD Runtime PM Disabled \n"));
+ dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
}
void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
{
- if (dhd_get_idletime(dhdp)) {
- dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
- DHD_ERROR(("DHD Runtime PM Enabled \n"));
+ /* Enable Runtime PM except for MFG Mode */
+ if (!(dhdp->op_mode & DHD_FLAG_MFG_MODE)) {
+ if (dhd_get_idletime(dhdp)) {
+ dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
+ }
}
}
@@ -6697,9 +5336,6 @@
while (1) {
if (down_interruptible(&tsk->sema) == 0) {
void *skb;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
- ulong flags;
-#endif // endif
#ifdef ENABLE_ADAPTIVE_SCHED
dhd_sched_policy(dhd_rxf_prio);
#endif /* ENABLE_ADAPTIVE_SCHED */
@@ -6719,15 +5355,7 @@
PKTSETNEXT(pub->osh, skb, NULL);
bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
__FUNCTION__, __LINE__);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
netif_rx_ni(skb);
-#else
- netif_rx(skb);
- local_irq_save(flags);
- RAISE_RX_SOFTIRQ();
- local_irq_restore(flags);
-
-#endif // endif
skb = skbnext;
}
#if defined(WAIT_DEQUEUE)
@@ -6870,6 +5498,7 @@
}
return;
} else {
+ dhd_bus_set_dpc_sched_time(dhdp);
tasklet_schedule(&dhd->tasklet);
}
}
@@ -6975,7 +5604,6 @@
}
#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
static void
dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
@@ -6988,9 +5616,7 @@
struct ethtool_ops dhd_ethtool_ops = {
.get_drvinfo = dhd_ethtool_get_drvinfo
};
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
static int
dhd_ethtool(dhd_info_t *dhd, void *uaddr)
{
@@ -7014,9 +5640,7 @@
/* Copy out any request driver name */
if (copy_from_user(&info, uaddr, sizeof(info)))
return -EFAULT;
-//tianyan@2021.7.27 modify for add wifi6 module start
- strncpy(drvname, info.driver, 32);
-//tianyan@2021.7.27 modify for add wifi6 module end
+ strncpy(drvname, info.driver, sizeof(info.driver));
drvname[sizeof(info.driver)-1] = '\0';
/* clear struct for return */
@@ -7102,7 +5726,6 @@
return 0;
}
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
{
@@ -7137,10 +5760,14 @@
dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
#ifdef BCMPCIE
} else if (dhdp->d3ackcnt_timeout) {
- dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT;
+ dhdp->hang_reason = dhdp->is_sched_error ?
+ HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR :
+ HANG_REASON_D3_ACK_TIMEOUT;
#endif /* BCMPCIE */
} else {
- dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT;
+ dhdp->hang_reason = dhdp->is_sched_error ?
+ HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR :
+ HANG_REASON_IOCTL_RESP_TIMEOUT;
}
}
net_os_send_hang_message(net);
@@ -7240,19 +5867,9 @@
bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
__FUNCTION__, __LINE__);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
netif_rx_ni(dhd->monitor_skb);
DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
-#else
- ulong flags;
- DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
- netif_rx(dhd->monitor_skb);
- DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
- local_irq_save(flags);
- RAISE_RX_SOFTIRQ();
- local_irq_restore(flags);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
}
dhd->monitor_skb = NULL;
@@ -7266,11 +5883,19 @@
#define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
#define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
+#ifdef CFI_CHECK
+static netdev_tx_t
+#else
static int
+#endif /* CFI_CHECK */
dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
{
PKTFREE(NULL, skb, FALSE);
+#ifdef CFI_CHECK
+ return NETDEV_TX_OK;
+#else
return 0;
+#endif /* CFI_CHECK */
}
#if defined(BT_OVER_SDIO)
@@ -7489,13 +6114,9 @@
dhd_pub_t *dhdp = (dhd_pub_t *)handle;
dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- dhdp->hang_was_sent = 0;
+ dhdp->hang_was_sent = 0;
- dhd_os_send_hang_message(&dhd->pub);
-#else
- DHD_ERROR(("%s: unsupported\n", __FUNCTION__));
-#endif // endif
+ dhd_os_send_hang_message(&dhd->pub);
}
EXPORT_SYMBOL(dhd_bus_retry_hang_recovery);
@@ -7521,22 +6142,23 @@
};
static void
-dhd_add_monitor_if(void *handle, void *event_info, u8 event)
+dhd_add_monitor_if(dhd_info_t *dhd)
{
- dhd_info_t *dhd = handle;
struct net_device *dev;
char *devname;
-
- if (event != DHD_WQ_WORK_IF_ADD) {
- DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
- return;
- }
+ uint32 scan_suppress = FALSE;
+ int ret = BCME_OK;
if (!dhd) {
DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
return;
}
+ if (dhd->monitor_dev) {
+ DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__));
+ return;
+ }
+
dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
if (!dev) {
DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
@@ -7557,77 +6179,139 @@
dev->type = ARPHRD_IEEE80211_RADIOTAP;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
- dev->hard_start_xmit = dhd_monitor_start;
- dev->do_ioctl = dhd_monitor_ioctl;
- dev->get_stats = dhd_monitor_get_stats;
-#else
dev->netdev_ops = &netdev_monitor_ops;
-#endif // endif
- if (register_netdev(dev)) {
+ if (register_netdevice(dev)) {
DHD_ERROR(("%s, register_netdev failed for %s\n",
__FUNCTION__, dev->name));
free_netdev(dev);
+ return;
}
+
+ if (FW_SUPPORTED((&dhd->pub), monitor)) {
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* Disable RuntimePM in monitor mode */
+ DHD_DISABLE_RUNTIME_PM(&dhd->pub);
+ DHD_ERROR(("%s : disable runtime PM in monitor mode\n", __FUNCTION__));
+#endif /* DHD_PCIE_RUNTIME_PM */
+ scan_suppress = TRUE;
+ /* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */
+ ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
+ sizeof(scan_suppress), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
+ }
+ }
+
dhd->monitor_dev = dev;
}
static void
-dhd_del_monitor_if(void *handle, void *event_info, u8 event)
+dhd_del_monitor_if(dhd_info_t *dhd)
{
- dhd_info_t *dhd = handle;
-
- if (event != DHD_WQ_WORK_IF_DEL) {
- DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
- return;
- }
+ int ret = BCME_OK;
+ uint32 scan_suppress = FALSE;
if (!dhd) {
DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
return;
}
+ if (!dhd->monitor_dev) {
+ DHD_ERROR(("%s: monitor i/f doesn't exist", __FUNCTION__));
+ return;
+ }
+
+ if (FW_SUPPORTED((&dhd->pub), monitor)) {
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* Enable RuntimePM */
+ DHD_ENABLE_RUNTIME_PM(&dhd->pub);
+ DHD_ERROR(("%s : enabled runtime PM\n", __FUNCTION__));
+#endif /* DHD_PCIE_RUNTIME_PM */
+ scan_suppress = FALSE;
+ /* Unset the SCAN SUPPRESS Flag in the firmware to enable scan */
+ ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
+ sizeof(scan_suppress), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
+ }
+ }
+
if (dhd->monitor_dev) {
- unregister_netdev(dhd->monitor_dev);
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
- MFREE(dhd->osh, dhd->monitor_dev->priv, DHD_MON_DEV_PRIV_SIZE);
- MFREE(dhd->osh, dhd->monitor_dev, sizeof(struct net_device));
-#else
- free_netdev(dhd->monitor_dev);
-#endif /* 2.6.24 */
-
+ if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) {
+ free_netdev(dhd->monitor_dev);
+ } else {
+ unregister_netdevice(dhd->monitor_dev);
+ }
dhd->monitor_dev = NULL;
}
}
static void
-dhd_set_monitor(dhd_pub_t *dhd, int ifidx, int val)
+dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val)
{
- dhd_info_t *info = dhd->info;
+ dhd_info_t *dhd = pub->info;
DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
- if ((val && info->monitor_dev) || (!val && !info->monitor_dev)) {
- DHD_ERROR(("%s: Mismatched params, return\n", __FUNCTION__));
- return;
- }
- /* Delete monitor */
+ dhd_net_if_lock_local(dhd);
if (!val) {
- info->monitor_type = val;
- dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_DEL,
- dhd_del_monitor_if, DHD_WQ_WORK_PRIORITY_LOW);
- return;
+ /* Delete monitor */
+ dhd_del_monitor_if(dhd);
+ } else {
+ /* Add monitor */
+ dhd_add_monitor_if(dhd);
}
-
- /* Add monitor */
- info->monitor_type = val;
- dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_ADD,
- dhd_add_monitor_if, DHD_WQ_WORK_PRIORITY_LOW);
+ dhd->monitor_type = val;
+ dhd_net_if_unlock_local(dhd);
}
#endif /* WL_MONITOR */
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+/*
+ * Helper function:
+ * Used for RTE console message time syncing with Host printk
+ */
+void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp)
+{
+ dhd_info_t *info = dhdp->info;
+
+ /* Ideally the "state" should be always TRUE */
+ dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL,
+ DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH,
+ dhd_deferred_work_rte_log_time_sync,
+ DHD_WQ_WORK_PRIORITY_LOW);
+}
+
+void
+dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd_info = handle;
+ dhd_pub_t *dhd;
+
+ if (event != DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhd_info) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ dhd = &dhd_info->pub;
+
+ /*
+ * Function to send IOVAR for console timesyncing
+ * between Host and Dongle.
+ * If the IOVAR fails,
+ * 1. dhd_rte_time_sync_ms is set to 0 and
+ * 2. HOST Dongle console time sync will *not* happen.
+ */
+ dhd_h2d_log_time_sync(dhd);
+}
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+
int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
{
int bcmerror = BCME_OK;
@@ -7705,12 +6389,23 @@
bcmerror = BCME_UNSUPPORTED;
goto done;
}
+
bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
#ifdef WL_MONITOR
/* Intercept monitor ioctl here, add/del monitor if */
if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
- dhd_set_monitor(pub, ifidx, *(int32*)data_buf);
+ int val = 0;
+ if (data_buf != NULL && buflen != 0) {
+ if (buflen >= 4) {
+ val = *(int*)data_buf;
+ } else if (buflen >= 2) {
+ val = *(short*)data_buf;
+ } else {
+ val = *(char*)data_buf;
+ }
+ }
+ dhd_set_monitor(pub, ifidx, val);
}
#endif /* WL_MONITOR */
@@ -7740,6 +6435,9 @@
void __user *ioc_buf_user = NULL; /**< buffer in user space */
u16 buflen = 0;
+#ifdef ENABLE_INSMOD_NO_FW_LOAD
+ allow_delay_fwdl = 1;
+#endif /* ENABLE_INSMOD_NO_FW_LOAD */
if (atomic_read(&exit_in_progress)) {
DHD_ERROR(("%s module exit in progress\n", __func__));
bcmerror = BCME_DONGLE_DOWN;
@@ -7764,6 +6462,15 @@
ifidx = dhd_net2idx(dhd, net);
DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
+#if defined(WL_STATIC_IF)
+ /* skip for static ndev when it is down */
+ if (dhd_is_static_ndev(&dhd->pub, net) && !(net->flags & IFF_UP)) {
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -1;
+ }
+#endif /* WL_STATIC_iF */
+
if (ifidx == DHD_BAD_IF) {
DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
DHD_PERIM_UNLOCK(&dhd->pub);
@@ -7782,14 +6489,12 @@
}
#endif /* defined(WL_WIRELESS_EXT) */
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
if (cmd == SIOCETHTOOL) {
ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
return ret;
}
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
#if defined(OEM_ANDROID)
if (cmd == SIOCDEVPRIVATE+1) {
@@ -7900,7 +6605,7 @@
static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
{
if (dhd) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
mutex_init(&dhd->cpufreq_fix);
#endif // endif
dhd->cpufreq_fix_status = FALSE;
@@ -7910,7 +6615,7 @@
static void dhd_fix_cpu_freq(dhd_info_t *dhd)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
mutex_lock(&dhd->cpufreq_fix);
#endif // endif
if (dhd && !dhd->cpufreq_fix_status) {
@@ -7922,18 +6627,18 @@
dhd->cpufreq_fix_status = TRUE;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
mutex_unlock(&dhd->cpufreq_fix);
#endif // endif
}
static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
mutex_lock(&dhd ->cpufreq_fix);
#endif // endif
if (dhd && dhd->cpufreq_fix_status != TRUE) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
mutex_unlock(&dhd->cpufreq_fix);
#endif // endif
return;
@@ -7946,7 +6651,7 @@
DHD_ERROR(("pm_qos_add_requests called\n"));
dhd->cpufreq_fix_status = FALSE;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
mutex_unlock(&dhd->cpufreq_fix);
#endif // endif
}
@@ -8019,16 +6724,28 @@
ifidx = dhd_net2idx(dhd, net);
BCM_REFERENCE(ifidx);
- /* Set state and stop OS transmissions */
- netif_stop_queue(net);
+ DHD_ERROR(("%s: ######### dhd_stop called for ifidx=%d #########\n", __FUNCTION__, ifidx));
+
#if defined(WL_STATIC_IF) && defined(WL_CFG80211)
/* If static if is operational, don't reset the chip */
- if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) {
- DHD_INFO(("[STATIC_IF] static if operational. Avoiding chip reset!\n"));
+ if (static_if_ndev_get_state(cfg, net) == NDEV_STATE_FW_IF_CREATED) {
+ DHD_ERROR(("static if operational. skip chip reset.\n"));
skip_reset = true;
+ wl_cfg80211_sta_ifdown(net);
goto exit;
}
#endif /* WL_STATIC_IF && WL_CFG80211 */
+
+#if defined(WL_VIF_SUPPORT)
+ if (vif_num > 0) {
+ DHD_ERROR(("virtual if operational. skip chip reset.\n"));
+ skip_reset = true;
+ wl_cfg80211_sta_ifdown(net);
+ goto exit;
+ }
+#endif /* WL_VIF_SUPPORT */
+
+ DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
#ifdef WL_CFG80211
/* Disable Runtime PM before interface down */
@@ -8052,12 +6769,19 @@
* when the primary Interface is brought down. [ifconfig wlan0 down]
*/
if (!dhd_download_fw_on_driverload) {
+ DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_OFF), ifidx, 0);
if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
int i;
#ifdef WL_CFG80211_P2P_DEV_IF
wl_cfg80211_del_p2p_wdev(net);
#endif /* WL_CFG80211_P2P_DEV_IF */
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ dhd_cleanup_m4_state_work(&dhd->pub, ifidx);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#ifdef DHD_PKTDUMP_ROAM
+ dhd_dump_pkt_clear(&dhd->pub);
+#endif /* DHD_PKTDUMP_ROAM */
dhd_net_if_lock_local(dhd);
for (i = 1; i < DHD_MAX_IFS; i++)
@@ -8083,8 +6807,8 @@
cancel_work_sync(dhd->dhd_deferred_wq);
#ifdef SHOW_LOGTRACE
- /* Wait till event_log_dispatcher_work finishes */
- cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
+ /* Wait till event logs work/kthread finishes */
+ dhd_cancel_logtrace_process_sync(dhd);
#endif /* SHOW_LOGTRACE */
#if defined(DHD_LB_RXP)
@@ -8096,6 +6820,9 @@
#endif /* DHD_LB_TXP */
}
+#if defined(ARGOS_NOTIFY_CB)
+ argos_register_notifier_deinit();
+#endif // endif
#ifdef DHDTCPACK_SUPPRESS
dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
#endif /* DHDTCPACK_SUPPRESS */
@@ -8179,6 +6906,7 @@
#endif /* SUPPORT_DEEP_SLEEP */
#endif /* defined(WL_CFG80211) && defined(OEM_ANDROID) */
dhd->pub.hang_was_sent = 0;
+ dhd->pub.hang_was_pending = 0;
/* Clear country spec for for built-in type driver */
if (!dhd_download_fw_on_driverload) {
@@ -8291,8 +7019,7 @@
}
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID) && \
- defined(BCMSDIO)
+#if defined(OEM_ANDROID) && defined(BCMSDIO)
if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
}
@@ -8304,15 +7031,23 @@
DHD_PERIM_LOCK(&dhd->pub);
dhd->pub.dongle_trap_occured = 0;
dhd->pub.hang_was_sent = 0;
+ dhd->pub.hang_was_pending = 0;
dhd->pub.hang_reason = 0;
dhd->pub.iovar_timeout_occured = 0;
#ifdef PCIE_FULL_DONGLE
dhd->pub.d3ack_timeout_occured = 0;
+ dhd->pub.livelock_occured = 0;
+ dhd->pub.pktid_audit_failed = 0;
#endif /* PCIE_FULL_DONGLE */
+ dhd->pub.iface_op_failed = 0;
+ dhd->pub.scan_timeout_occurred = 0;
+ dhd->pub.scan_busy_occurred = 0;
+ dhd->pub.smmu_fault_occurred = 0;
#ifdef DHD_LOSSLESS_ROAMING
dhd->pub.dequeue_prec_map = ALLPRIO;
#endif // endif
+
#if defined(OEM_ANDROID) && !defined(WL_CFG80211)
/*
* Force start if ifconfig_up gets called before START command
@@ -8348,9 +7083,21 @@
#if defined(WL_CFG80211) && defined(OEM_ANDROID)
if (!dhd_download_fw_on_driverload) {
DHD_ERROR(("\n%s\n", dhd_version));
+ DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_ON), ifidx, 0);
#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
g_first_broadcast_scan = TRUE;
#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+#ifdef SHOW_LOGTRACE
+ /* dhd_cancel_logtrace_process_sync is called in dhd_stop
+ * for built-in models. Need to start logtrace kthread before
+ * calling wifi on, because once wifi is on, EDL will be in action
+ * any moment, and if kthread is not active, FW event logs will
+ * not be available
+ */
+ if (dhd_reinit_logtrace_process(dhd) != BCME_OK) {
+ goto exit;
+ }
+#endif /* SHOW_LOGTRACE */
#if defined(BT_OVER_SDIO)
ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
wl_android_set_wifi_on_flag(TRUE);
@@ -8445,9 +7192,6 @@
skb_queue_head_init(&dhd->rx_napi_queue);
} /* rx_napi_netdev == NULL */
#endif /* DHD_LB_RXP */
-#ifdef DHD_LB_IRQSET
- dhd_irq_set_affinity(&dhd->pub);
-#endif /* DHD_LB_IRQSET */
#if defined(DHD_LB_TXP)
/* Use the variant that uses locks */
@@ -8476,6 +7220,18 @@
#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
}
+#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
+ dhd_bus_aspm_enable_rc_ep(dhd->pub.bus, TRUE);
+#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
+#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+ dhd_irq_set_affinity(&dhd->pub, cpumask_of(0));
+#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+#ifdef DHD_LB_IRQSET
+ dhd_irq_set_affinity(&dhd->pub, dhd->cpumask_primary);
+#endif /* DHD_LB_IRQSET */
+#if defined(ARGOS_NOTIFY_CB)
+ argos_register_notifier_init(net);
+#endif // endif
#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
#if defined(SET_RPS_CPUS)
dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
@@ -8489,8 +7245,6 @@
#endif /* WL_CFG80211 */
}
- /* Allow transmit calls */
- netif_start_queue(net);
dhd->pub.up = 1;
if (wl_event_enable) {
@@ -8523,8 +7277,7 @@
DHD_OS_WAKE_UNLOCK(&dhd->pub);
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID) && \
- defined(BCMSDIO)
+#if defined(OEM_ANDROID) && defined(BCMSDIO)
mutex_unlock(&_dhd_sdio_mutex_lock_);
#endif // endif
#endif /* MULTIPLE_SUPPLICANT */
@@ -8532,6 +7285,47 @@
return ret;
}
+/*
+ * ndo_start handler for primary ndev
+ */
+static int
+dhd_pri_open(struct net_device *net)
+{
+ s32 ret;
+
+ ret = dhd_open(net);
+ if (unlikely(ret)) {
+ DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
+ return ret;
+ }
+
+ /* Allow transmit calls */
+ netif_start_queue(net);
+ DHD_ERROR(("[%s] tx queue started\n", net->name));
+ return ret;
+}
+
+/*
+ * ndo_stop handler for primary ndev
+ */
+static int
+dhd_pri_stop(struct net_device *net)
+{
+ s32 ret;
+
+ /* stop tx queue */
+ netif_stop_queue(net);
+ DHD_ERROR(("[%s] tx queue stopped\n", net->name));
+
+ ret = dhd_stop(net);
+ if (unlikely(ret)) {
+ DHD_ERROR(("dhd_stop failed: %d\n", ret));
+ return ret;
+ }
+
+ return ret;
+}
+
#if defined(WL_STATIC_IF) && defined(WL_CFG80211)
/*
* For static I/Fs, the firmware interface init
@@ -8547,13 +7341,16 @@
cfg = wl_get_cfg(net);
primary_netdev = bcmcfg_to_prmry_ndev(cfg);
- if (!IS_CFG80211_STATIC_IF(cfg, net)) {
+ if (!is_static_iface(cfg, net)) {
DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
ret = BCME_OK;
goto done;
}
DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
+ /* Ensure fw is initialized. If it is already initialized,
+ * dhd_open will return success.
+ */
ret = dhd_open(primary_netdev);
if (unlikely(ret)) {
DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
@@ -8561,6 +7358,10 @@
}
ret = wl_cfg80211_static_if_open(net);
+ if (!ret) {
+ /* Allow transmit calls */
+ netif_start_queue(net);
+ }
done:
return ret;
}
@@ -8571,23 +7372,34 @@
struct bcm_cfg80211 *cfg;
struct net_device *primary_netdev = NULL;
int ret = BCME_OK;
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
+ /* Ensure queue is disabled */
+ netif_tx_disable(net);
+
cfg = wl_get_cfg(net);
- if (!IS_CFG80211_STATIC_IF(cfg, net)) {
+ if (!is_static_iface(cfg, net)) {
DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
return BCME_OK;
}
ret = wl_cfg80211_static_if_close(net);
+ if (dhd->pub.up == 0) {
+ /* If fw is down, return */
+ DHD_ERROR(("fw down\n"));
+ return BCME_OK;
+ }
/* If STA iface is not in operational, invoke dhd_close from this
* context.
*/
primary_netdev = bcmcfg_to_prmry_ndev(cfg);
if (!(primary_netdev->flags & IFF_UP)) {
ret = dhd_stop(primary_netdev);
+ } else {
+ DHD_ERROR(("Skipped dhd_stop, as sta is operational\n"));
}
return ret;
@@ -8604,13 +7416,12 @@
}
#ifdef MULTIPLE_SUPPLICANT
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID) && \
- defined(BCMSDIO)
+#if defined(OEM_ANDROID) && defined(BCMSDIO)
if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
return 0;
}
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+#endif /* OEM_ANDROID & BCMSDIO */
#endif /* MULTIPLE_SUPPLICANT */
/* && defined(OEM_ANDROID) && defined(BCMSDIO) */
@@ -8800,34 +7611,47 @@
}
#endif /* WL_NATOE */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
-#define IFP_NET_DESTRUCTOR ifp->net->priv_destructor
-#else
-#define IFP_NET_DESTRUCTOR ifp->net->destructor
-#endif // endif
-
-#ifdef WL_STATIC_IF
-static s32
+/* This API maps ndev to ifp inclusive of static IFs */
+static dhd_if_t *
dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
{
+ dhd_if_t *ifp = NULL;
+#ifdef WL_STATIC_IF
u32 ifidx = (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1);
- dhd_if_t *ifp;
+#else
+ u32 ifidx = (DHD_MAX_IFS - 1);
+#endif /* WL_STATIC_IF */
dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
-
do {
ifp = dhdinfo->iflist[ifidx];
if (ifp && (ifp->net == ndev)) {
- DHD_ERROR(("match found for %s. ifidx:%d\n",
+ DHD_TRACE(("match found for %s. ifidx:%d\n",
ndev->name, ifidx));
- return ifidx;
+ return ifp;
}
} while (ifidx--);
DHD_ERROR(("no entry found for %s\n", ndev->name));
- return BCME_ERROR;
+ return NULL;
}
+bool
+dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
+{
+ dhd_if_t *ifp = NULL;
+
+ if (!dhdp || !ndev) {
+ DHD_ERROR(("wrong input\n"));
+ ASSERT(0);
+ return false;
+ }
+
+ ifp = dhd_get_ifp_by_ndev(dhdp, ndev);
+ return (ifp && (ifp->static_if == true));
+}
+
+#ifdef WL_STATIC_IF
/* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name
* are not known. For e.g: static i/f case. This function lets to update it once
* it is known.
@@ -8846,15 +7670,13 @@
ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
- if ((cur_idx = dhd_get_ifp_by_ndev(dhdp, ndev)) < 0) {
+ if ((ifp = dhd_get_ifp_by_ndev(dhdp, ndev)) == NULL) {
return -ENODEV;
}
-
- /* Static Iface holds data in the last */
- ifp = dhdinfo->iflist[cur_idx];
+ cur_idx = ifp->idx;
if (if_state == NDEV_STATE_OS_IF_CREATED) {
- /* mark static if for future use */
+ /* mark static if */
ifp->static_if = TRUE;
return BCME_OK;
}
@@ -8869,6 +7691,14 @@
return -EINVAL;
}
+ /* For static if delete case, cleanup the if before ifidx update */
+ if ((if_state == NDEV_STATE_FW_IF_DELETED) ||
+ (if_state == NDEV_STATE_FW_IF_FAILED)) {
+ dhd_cleanup_if(ifp->net);
+ dev_priv = DHD_DEV_PRIV(ndev);
+ dev_priv->ifidx = ifidx;
+ }
+
/* update the iflist ifidx slot with cached info */
dhdinfo->iflist[ifidx] = ifp;
dhdinfo->iflist[cur_idx] = NULL;
@@ -8882,16 +7712,12 @@
/* initialize the dongle provided if name */
if (dngl_name) {
strlcpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
- } else if (ndev->name) {
+ } else if (ndev->name[0] != '\0') {
strlcpy(ifp->dngl_name, ndev->name, IFNAMSIZ);
}
- if (mac != NULL)
- memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
- } else if ((if_state == NDEV_STATE_FW_IF_DELETED) ||
- (if_state == NDEV_STATE_FW_IF_FAILED)) {
- dev_priv = DHD_DEV_PRIV(ndev);
- dev_priv->ifidx = ifidx;
- netif_tx_disable(ifp->net);
+ if (mac != NULL) {
+ (void)memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN);
+ }
}
DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n",
ifidx, cur_idx, if_state));
@@ -8980,6 +7806,12 @@
ifp->net->name[IFNAMSIZ - 1] = '\0';
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
+#define IFP_NET_DESTRUCTOR ifp->net->priv_destructor
+#else
+#define IFP_NET_DESTRUCTOR ifp->net->destructor
+#endif // endif
+
#ifdef WL_CFG80211
if (ifidx == 0) {
IFP_NET_DESTRUCTOR = free_netdev;
@@ -9000,11 +7832,9 @@
strncpy(ifp->dngl_name, name, IFNAMSIZ);
}
-#ifdef PCIE_FULL_DONGLE
/* Initialize STA info list */
INIT_LIST_HEAD(&ifp->sta_list);
DHD_IF_STA_LIST_LOCK_INIT(ifp);
-#endif /* PCIE_FULL_DONGLE */
#ifdef DHD_L2_FILTER
ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
@@ -9013,6 +7843,20 @@
DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ INIT_DELAYED_WORK(&ifp->m4state_work, dhd_m4_state_handler);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ ifp->recv_reassoc_evt = FALSE;
+ ifp->post_roam_evt = FALSE;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler);
+ dhd_reset_tcpsync_info_by_ifp(ifp);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
return ifp->net;
fail:
@@ -9038,6 +7882,55 @@
return NULL;
}
+static void
+dhd_cleanup_ifp(dhd_pub_t *dhdp, dhd_if_t *ifp)
+{
+#ifdef PCIE_FULL_DONGLE
+ s32 ifidx = 0;
+ if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+#endif /* PCIE_FULL_DONGLE */
+
+ if (ifp != NULL) {
+ if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) {
+ DHD_ERROR(("Wrong idx:%d \n", ifp->idx));
+ ASSERT(0);
+ return;
+ }
+#ifdef DHD_L2_FILTER
+ bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
+ NULL, FALSE, dhdpub->tickcnt);
+ deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
+ ifp->phnd_arp_table = NULL;
+#endif /* DHD_L2_FILTER */
+
+ dhd_if_del_sta_list(ifp);
+#ifdef PCIE_FULL_DONGLE
+ /* Delete flowrings of virtual interface */
+ ifidx = ifp->idx;
+ if ((ifidx != 0) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP)) {
+ dhd_flow_rings_delete(dhdp, ifidx);
+ }
+#endif /* PCIE_FULL_DONGLE */
+ }
+}
+
+void
+dhd_cleanup_if(struct net_device *net)
+{
+ dhd_info_t *dhdinfo = DHD_DEV_INFO(net);
+ dhd_pub_t *dhdp = &dhdinfo->pub;
+ dhd_if_t *ifp;
+
+ if (!(ifp = dhd_get_ifp_by_ndev(dhdp, net)) ||
+ (ifp->idx >= DHD_MAX_IFS)) {
+ DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp, ifp ? ifp->idx : -1));
+ ASSERT(0);
+ return;
+ }
+
+ dhd_cleanup_ifp(dhdp, ifp);
+}
+
/* unregister and free the the net_device interface associated with the indexed
* slot, also free the slot memory and set the slot pointer to NULL
*/
@@ -9048,15 +7941,21 @@
dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
dhd_if_t *ifp;
unsigned long flags;
- u32 timeout;
-#ifdef PCIE_FULL_DONGLE
- if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdpub->if_flow_lkup;
-#endif /* PCIE_FULL_DONGLE */
+ long timeout;
ifp = dhdinfo->iflist[ifidx];
if (ifp != NULL) {
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ cancel_delayed_work_sync(&ifp->m4state_work);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ cancel_work_sync(&ifp->blk_tsfl_work);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
#ifdef WL_STATIC_IF
+ /* static IF will be handled in detach */
if (ifp->static_if) {
DHD_TRACE(("Skip del iface for static interface\n"));
return BCME_OK;
@@ -9110,21 +8009,7 @@
ifp->del_in_progress = false;
DHD_GENERAL_UNLOCK(dhdpub, flags);
}
-#ifdef DHD_L2_FILTER
- bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
- NULL, FALSE, dhdpub->tickcnt);
- deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
- ifp->phnd_arp_table = NULL;
-#endif /* DHD_L2_FILTER */
-
- dhd_if_del_sta_list(ifp);
-#ifdef PCIE_FULL_DONGLE
- /* Delete flowrings of virtual interface */
- if ((ifidx != 0) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP)) {
- dhd_flow_rings_delete(dhdpub, ifidx);
- }
-#endif /* PCIE_FULL_DONGLE */
-
+ dhd_cleanup_ifp(dhdpub, ifp);
DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
@@ -9134,10 +8019,9 @@
return BCME_OK;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
static struct net_device_ops dhd_ops_pri = {
- .ndo_open = dhd_open,
- .ndo_stop = dhd_stop,
+ .ndo_open = dhd_pri_open,
+ .ndo_stop = dhd_pri_stop,
.ndo_get_stats = dhd_get_stats,
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
.ndo_do_ioctl = dhd_ioctl_entry_wrapper,
@@ -9174,7 +8058,6 @@
.ndo_set_multicast_list = dhd_set_multicast_list,
#endif // endif
};
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
int
dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf,
@@ -9421,40 +8304,12 @@
return error;
} /* dhd_init_static_strs_array */
-static int
-dhd_trace_open_proc(struct inode *inode, struct file *file)
-{
- return single_open(file, 0, NULL);
-}
-
-ssize_t
-dhd_trace_read_proc(struct file *file, char __user *buffer, size_t tt, loff_t *loff)
-{
- trace_buf_info_t *trace_buf_info;
- int ret = BCME_ERROR;
-
- ASSERT(g_dhd_pub);
- mutex_lock(&g_dhd_pub->dhd_trace_lock);
- trace_buf_info = (trace_buf_info_t *)MALLOC(g_dhd_pub->osh,
- sizeof(trace_buf_info_t));
- if (trace_buf_info != NULL) {
- dhd_get_read_buf_ptr(g_dhd_pub, trace_buf_info);
- if (copy_to_user(buffer, (void*)trace_buf_info->buf, MIN(trace_buf_info->size, tt)))
- return -EFAULT;
- if (trace_buf_info->availability == BUF_NOT_AVAILABLE)
- ret = BUF_NOT_AVAILABLE;
- else
- ret = trace_buf_info->size;
- MFREE(g_dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t));
- } else
- DHD_ERROR(("Memory allocation Failed\n"));
-
- mutex_unlock(&g_dhd_pub->dhd_trace_lock);
- return ret;
-}
#endif /* SHOW_LOGTRACE */
#ifdef DHD_ERPOM
+uint enable_erpom = 0;
+module_param(enable_erpom, int, 0);
+
int
dhd_wlan_power_off_handler(void *handler, unsigned char reason)
{
@@ -9468,11 +8323,7 @@
/* save core dump to a file */
if (dhdp->memdump_enabled) {
#ifdef DHD_SSSR_DUMP
- if (dhdp->sssr_inited) {
- dhdp->info->no_wq_sssrdump = TRUE;
- dhd_bus_sssr_dump(dhdp);
- dhdp->info->no_wq_sssrdump = FALSE;
- }
+ dhdp->collect_sssr = TRUE;
#endif /* DHD_SSSR_DUMP */
dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT;
dhd_bus_mem_dump(dhdp);
@@ -9508,6 +8359,7 @@
}
#endif /* DHD_ERPOM */
+
/** Called once for each hardware (dongle) instance that this DHD manages */
dhd_pub_t *
dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
@@ -9529,6 +8381,11 @@
dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+#ifdef PCIE_FULL_DONGLE
+ ASSERT(sizeof(dhd_pkttag_fd_t) <= OSL_PKTTAG_SZ);
+ ASSERT(sizeof(dhd_pkttag_fr_t) <= OSL_PKTTAG_SZ);
+#endif /* PCIE_FULL_DONGLE */
+
/* will implement get_ids for DBUS later */
#if defined(BCMSDIO)
dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
@@ -9553,6 +8410,7 @@
#ifdef DUMP_IOCTL_IOV_LIST
dll_init(&(dhd->pub.dump_iovlist_head));
#endif /* DUMP_IOCTL_IOV_LIST */
+ dhd->pub.dhd_console_ms = dhd_console_ms; /* assigns default value */
dhd->adapter = adapter;
#ifdef BT_OVER_SDIO
dhd->pub.is_bt_recovery_required = FALSE;
@@ -9560,7 +8418,6 @@
#endif /* BT_OVER_SDIO */
g_dhd_pub = &dhd->pub;
- DHD_INFO(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
#ifdef DHD_DEBUG
dll_init(&(dhd->pub.mw_list_head));
@@ -9599,6 +8456,7 @@
/* Link to bus module */
dhd->pub.bus = bus;
dhd->pub.hdrlen = bus_hdrlen;
+ dhd->pub.txoff = FALSE;
/* Set network interface name if it was provided as module parameter */
if (iface_name[0]) {
@@ -9624,11 +8482,7 @@
/* initialize the l2_filter_cnt */
dhd->pub.l2_filter_cnt = 0;
#endif // endif
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
- net->open = NULL;
-#else
net->netdev_ops = NULL;
-#endif // endif
mutex_init(&dhd->dhd_iovar_mutex);
sema_init(&dhd->proto_sem, 1);
@@ -9668,7 +8522,6 @@
init_waitqueue_head(&dhd->dmaxfer_wait);
init_waitqueue_head(&dhd->pub.tx_completion_wait);
dhd->pub.dhd_bus_busy_state = 0;
-
/* Initialize the spinlocks */
spin_lock_init(&dhd->sdlock);
spin_lock_init(&dhd->txqlock);
@@ -9691,17 +8544,17 @@
DHD_OS_WAKE_LOCK_INIT(dhd);
dhd->wakelock_counter = 0;
/* wakelocks prevent a system from going into a low power state */
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
-#endif /* CONFIG_HAS_WAKELOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
mutex_init(&dhd->dhd_net_if_mutex);
mutex_init(&dhd->dhd_suspend_mutex);
#if defined(PKT_FILTER_SUPPORT) && defined(APF)
mutex_init(&dhd->dhd_apf_mutex);
#endif /* PKT_FILTER_SUPPORT && APF */
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) && defined(OEM_ANDROID) */
+#endif /* defined(OEM_ANDROID) */
dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
/* Attach and link in the protocol */
@@ -9744,13 +8597,12 @@
}
#endif /* SHOW_LOGTRACE */
-#ifdef DEBUGABILITY
/* attach debug if support */
if (dhd_os_dbg_attach(&dhd->pub)) {
DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
goto fail;
}
-
+#ifdef DEBUGABILITY
#if defined(SHOW_LOGTRACE) && defined(DBG_RING_LOG_INIT_DEFAULT)
/* enable verbose ring to support dump_trace_buf */
dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0);
@@ -9764,13 +8616,24 @@
#endif /* DBG_PKT_MON */
#endif /* DEBUGABILITY */
+#ifdef DHD_STATUS_LOGGING
+ dhd->pub.statlog = dhd_attach_statlog(&dhd->pub, MAX_STATLOG_ITEM,
+ MAX_STATLOG_REQ_ITEM, STATLOG_LOGBUF_LEN);
+ if (dhd->pub.statlog == NULL) {
+ DHD_ERROR(("%s: alloc statlog failed\n", __FUNCTION__));
+ }
+#endif /* DHD_STATUS_LOGGING */
+
#ifdef DHD_LOG_DUMP
dhd_log_dump_init(&dhd->pub);
#endif /* DHD_LOG_DUMP */
-
+#ifdef DHD_PKTDUMP_ROAM
+ dhd_dump_pkt_init(&dhd->pub);
+#endif /* DHD_PKTDUMP_ROAM */
#ifdef DHD_PKT_LOGGING
dhd_os_attach_pktlog(&dhd->pub);
#endif /* DHD_PKT_LOGGING */
+
#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
dhd->pub.hang_info = MALLOCZ(osh, VENDOR_SEND_HANG_EXT_INFO_LEN);
if (dhd->pub.hang_info == NULL) {
@@ -9814,9 +8677,7 @@
#ifdef DHD_PCIE_RUNTIMEPM
/* Setup up the runtime PM Idlecount timer */
- init_timer(&dhd->rpm_timer);
- dhd->rpm_timer.data = (ulong)dhd;
- dhd->rpm_timer.function = dhd_runtimepm;
+ init_timer_compat(&dhd->rpm_timer, dhd_runtimepm, dhd);
dhd->rpm_timer_valid = FALSE;
dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
@@ -9828,9 +8689,9 @@
#ifdef SHOW_LOGTRACE
skb_queue_head_init(&dhd->evt_trace_queue);
- if (proc_create("dhd_trace", S_IRUSR, NULL, &proc_file_fops) == NULL)
- DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n"));
- mutex_init(&dhd->pub.dhd_trace_lock);
+
+ /* Create ring proc entries */
+ dhd_dbg_ring_proc_create(&dhd->pub);
#endif /* SHOW_LOGTRACE */
/* Set up the bottom half handler */
@@ -9890,6 +8751,9 @@
}
#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
+#if defined(OEM_ANDROID)
+ INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process);
+#endif /* #if OEM_ANDROID */
#ifdef DEBUG_CPU_FREQ
dhd->new_freq = alloc_percpu(int);
dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
@@ -9946,6 +8810,11 @@
#endif /* DHD_LB_TXP_DEFAULT_ENAB */
#endif /* DHD_LB_TXP */
+#ifdef DHD_LB_RXP
+ /* Trun ON the feature by default */
+ atomic_set(&dhd->lb_rxp_active, 1);
+#endif /* DHD_LB_RXP */
+
/* Initialize the Load Balancing Tasklets and Napi object */
#if defined(DHD_LB_TXC)
tasklet_init(&dhd->tx_compl_tasklet,
@@ -9953,7 +8822,6 @@
INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
#endif /* DHD_LB_TXC */
-
#if defined(DHD_LB_RXC)
tasklet_init(&dhd->rx_compl_tasklet,
dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
@@ -9981,19 +8849,39 @@
dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
#endif /* DHD_LB */
+#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
+ INIT_WORK(&dhd->axi_error_dispatcher_work, dhd_axi_error_dispatcher_fn);
+#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+
#if defined(BCMPCIE)
dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
if (dhd->pub.extended_trap_data == NULL) {
DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
}
+#ifdef DNGL_AXI_ERROR_LOGGING
+ dhd->pub.axi_err_dump = MALLOCZ(osh, sizeof(dhd_axi_error_dump_t));
+ if (dhd->pub.axi_err_dump == NULL) {
+ DHD_ERROR(("%s: Failed to alloc axi_err_dump\n", __FUNCTION__));
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
#endif /* BCMPCIE && ETD */
#ifdef SHOW_LOGTRACE
- INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
+ if (dhd_init_logtrace_process(dhd) != BCME_OK) {
+ goto fail;
+ }
#endif /* SHOW_LOGTRACE */
DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
+#ifdef EWP_EDL
+ if (host_edl_support) {
+ if (DHD_EDL_MEM_INIT(&dhd->pub) != BCME_OK) {
+ host_edl_support = FALSE;
+ }
+ }
+#endif /* EWP_EDL */
+
(void)dhd_sysfs_init(dhd);
#ifdef WL_NATOE
@@ -10016,17 +8904,50 @@
}
#endif /* DHD_DUMP_MNGR */
#ifdef DHD_FW_COREDUMP
+ /* Set memdump default values */
+#ifdef CUSTOMER_HW4_DEBUG
+ dhd->pub.memdump_enabled = DUMP_DISABLED;
+#elif defined(OEM_ANDROID)
+ dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON;
+#else
+ dhd->pub.memdump_enabled = DUMP_MEMFILE;
+#endif /* CUSTOMER_HW4_DEBUG */
/* Check the memdump capability */
dhd_get_memdump_info(&dhd->pub);
#endif /* DHD_FW_COREDUMP */
#ifdef DHD_ERPOM
- pom_handler = &dhd->pub.pom_wlan_handler;
- pom_handler->func_id = WLAN_FUNC_ID;
- pom_handler->handler = (void *)g_dhd_pub;
- pom_handler->power_off = dhd_wlan_power_off_handler;
- pom_handler->power_on = dhd_wlan_power_on_handler;
- pom_func_register(pom_handler);
+ if (enable_erpom) {
+ pom_handler = &dhd->pub.pom_wlan_handler;
+ pom_handler->func_id = WLAN_FUNC_ID;
+ pom_handler->handler = (void *)g_dhd_pub;
+ pom_handler->power_off = dhd_wlan_power_off_handler;
+ pom_handler->power_on = dhd_wlan_power_on_handler;
+
+ dhd->pub.pom_func_register = NULL;
+ dhd->pub.pom_func_deregister = NULL;
+ dhd->pub.pom_toggle_reg_on = NULL;
+
+ dhd->pub.pom_func_register = symbol_get(pom_func_register);
+ dhd->pub.pom_func_deregister = symbol_get(pom_func_deregister);
+ dhd->pub.pom_toggle_reg_on = symbol_get(pom_toggle_reg_on);
+
+ symbol_put(pom_func_register);
+ symbol_put(pom_func_deregister);
+ symbol_put(pom_toggle_reg_on);
+
+ if (!dhd->pub.pom_func_register ||
+ !dhd->pub.pom_func_deregister ||
+ !dhd->pub.pom_toggle_reg_on) {
+ DHD_ERROR(("%s, enable_erpom enabled through module parameter but "
+ "POM is not loaded\n", __FUNCTION__));
+ ASSERT(0);
+ goto fail;
+ }
+ dhd->pub.pom_func_register(pom_handler);
+ dhd->pub.enable_erpom = TRUE;
+
+ }
#endif /* DHD_ERPOM */
return &dhd->pub;
@@ -10101,7 +9022,6 @@
if (dhdinfo->fw_path[0] == '\0') {
if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
fw = adapter->fw_path;
-
}
if (dhdinfo->nv_path[0] == '\0') {
if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
@@ -10272,7 +9192,9 @@
uint chipid = dhd_bus_chip_id(dhdp);
uint config_chipid;
-#ifdef BCM4361_CHIP
+#ifdef BCM4375_CHIP
+ config_chipid = BCM4375_CHIP_ID;
+#elif defined(BCM4361_CHIP)
config_chipid = BCM4361_CHIP_ID;
#elif defined(BCM4359_CHIP)
config_chipid = BCM4359_CHIP_ID;
@@ -10371,16 +9293,37 @@
DHD_TRACE(("Enter %s:\n", __FUNCTION__));
dhdp->dongle_trap_occured = 0;
+#ifdef DHD_SSSR_DUMP
+ /* Flag to indicate sssr dump is collected */
+ dhdp->sssr_dump_collected = 0;
+#endif /* DHD_SSSR_DUMP */
dhdp->iovar_timeout_occured = 0;
#ifdef PCIE_FULL_DONGLE
dhdp->d3ack_timeout_occured = 0;
+ dhdp->livelock_occured = 0;
+ dhdp->pktid_audit_failed = 0;
#endif /* PCIE_FULL_DONGLE */
+ dhd->pub.iface_op_failed = 0;
+ dhd->pub.scan_timeout_occurred = 0;
+ dhd->pub.scan_busy_occurred = 0;
+ /* Clear induced error during initialize */
+ dhd->pub.dhd_induce_error = DHD_INDUCE_ERROR_CLEAR;
+
+ /* set default value for now. Will be updated again in dhd_preinit_ioctls()
+ * after querying FW
+ */
+ dhdp->event_log_max_sets = NUM_EVENT_LOG_SETS;
+ dhdp->event_log_max_sets_queried = FALSE;
+ dhdp->smmu_fault_occurred = 0;
+#ifdef DNGL_AXI_ERROR_LOGGING
+ dhdp->axi_error = FALSE;
+#endif /* DNGL_AXI_ERROR_LOGGING */
DHD_PERIM_LOCK(dhdp);
/* try to download image and nvram to the dongle */
if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
/* Indicate FW Download has not yet done */
- dhd->pub.fw_download_done = FALSE;
+ dhd->pub.fw_download_status = FW_DOWNLOAD_IN_PROGRESS;
DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path));
#if defined(DHD_DEBUG) && defined(BCMSDIO)
fw_download_start = OSL_SYSUPTIME();
@@ -10397,7 +9340,7 @@
return ret;
}
/* Indicate FW Download has succeeded */
- dhd->pub.fw_download_done = TRUE;
+ dhd->pub.fw_download_status = FW_DOWNLOAD_DONE;
}
if (dhd->pub.busstate != DHD_BUS_LOAD) {
DHD_PERIM_UNLOCK(dhdp);
@@ -10512,10 +9455,12 @@
DHD_PERIM_UNLOCK(dhdp);
return ret;
}
-#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810)
+
+#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
+ defined(CONFIG_SOC_EXYNOS9820)
DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
-#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 */
+#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
#if defined(DHD_DEBUG) && defined(BCMSDIO)
f2_sync_end = OSL_SYSUPTIME();
@@ -10533,6 +9478,7 @@
#endif /* ARP_OFFLOAD_SUPPORT */
DHD_PERIM_UNLOCK(dhdp);
+
return 0;
}
#ifdef WLTDLS
@@ -10664,6 +9610,10 @@
uint32 reason = ntoh32(event->reason);
unsigned long flags;
+ /* No handling needed for peer discovered reason */
+ if (reason == WLC_E_TDLS_PEER_DISCOVERED) {
+ return BCME_ERROR;
+ }
if (reason == WLC_E_TDLS_PEER_CONNECTED)
connect = TRUE;
else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
@@ -10870,6 +9820,44 @@
#endif /* WLAIBSS */
#if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
+#ifdef WL_BAM
+static int
+dhd_check_adps_bad_ap(dhd_pub_t *dhd)
+{
+ struct net_device *ndev;
+ struct bcm_cfg80211 *cfg;
+ struct wl_profile *profile;
+ struct ether_addr bssid;
+
+ if (!dhd_is_associated(dhd, 0, NULL)) {
+ DHD_ERROR(("%s - not associated\n", __FUNCTION__));
+ return BCME_OK;
+ }
+
+ ndev = dhd_linux_get_primary_netdev(dhd);
+ if (!ndev) {
+ DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ profile = wl_get_profile_by_netdev(cfg, ndev);
+ memcpy(bssid.octet, profile->bssid, ETHER_ADDR_LEN);
+ if (wl_adps_bad_ap_check(cfg, &bssid)) {
+ if (wl_adps_enabled(cfg, ndev)) {
+ wl_adps_set_suspend(cfg, ndev, ADPS_SUSPEND);
+ }
+ }
+
+ return BCME_OK;
+}
+#endif /* WL_BAM */
+
int
dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
{
@@ -10914,6 +9902,12 @@
}
}
+#ifdef WL_BAM
+ if (on) {
+ dhd_check_adps_bad_ap(dhd);
+ }
+#endif /* WL_BAM */
+
exit:
if (iov_buf) {
MFREE(dhd->osh, iov_buf, len);
@@ -10932,6 +9926,7 @@
uint32 buf_key_b4_m4 = 1;
uint8 msglen;
eventmsgs_ext_t *eventmask_msg = NULL;
+ uint32 event_log_max_sets = 0;
char* iov_buf = NULL;
int ret2 = 0;
uint32 wnm_cap = 0;
@@ -10957,6 +9952,7 @@
int wlfc_enable = TRUE;
#ifndef DISABLE_11N
uint32 hostreorder = 1;
+ uint chipid = 0;
#endif /* DISABLE_11N */
#endif /* PROP_TXSTATUS */
#endif // endif
@@ -10968,10 +9964,9 @@
#ifdef SUPPORT_SET_CAC
uint32 cac = 1;
#endif /* SUPPORT_SET_CAC */
-
-#if defined(DHD_NON_DMA_M2M_CORRUPTION)
- dhd_pcie_dmaxfer_lpbk_t pcie_dmaxfer_lpbk;
-#endif /* DHD_NON_DMA_M2M_CORRUPTION */
+#ifdef DHD_BUS_MEM_ACCESS
+ uint32 enable_memuse = 1;
+#endif /* DHD_BUS_MEM_ACCESS */
#ifdef OEM_ANDROID
#ifdef DHD_ENABLE_LPC
@@ -10992,7 +9987,7 @@
#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
#if defined(ARP_OFFLOAD_SUPPORT)
- int arpoe = 1;
+ int arpoe = 0;
#endif // endif
int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
@@ -11087,9 +10082,18 @@
#ifdef DISABLE_PRUNED_SCAN
uint32 scan_features = 0;
#endif /* DISABLE_PRUNED_SCAN */
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ uint32 hostwake_oob = 0;
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef EVENT_LOG_RATE_HC
+ /* threshold number of lines per second */
+#define EVENT_LOG_RATE_HC_THRESHOLD 1000
+ uint32 event_log_rate_hc = EVENT_LOG_RATE_HC_THRESHOLD;
+#endif /* EVENT_LOG_RATE_HC */
#if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
uint32 btmdelta = WBTEXT_BTMDELTA;
#endif /* WBTEXT && WBTEXT_BTMDELTA */
+ wl_wlc_version_t wlc_ver;
#ifdef PKT_FILTER_SUPPORT
dhd_pkt_filter_enable = TRUE;
@@ -11097,19 +10101,21 @@
dhd->apf_set = FALSE;
#endif /* APF */
#endif /* PKT_FILTER_SUPPORT */
-#ifdef WLTDLS
- dhd->tdls_enable = FALSE;
- dhd_tdls_set_mode(dhd, false);
-#endif /* WLTDLS */
dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
#ifdef ENABLE_MAX_DTIM_IN_SUSPEND
dhd->max_dtim_enable = TRUE;
#else
dhd->max_dtim_enable = FALSE;
#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
+ dhd->disable_dtim_in_suspend = FALSE;
#ifdef CUSTOM_SET_OCLOFF
dhd->ocl_off = FALSE;
#endif /* CUSTOM_SET_OCLOFF */
+#ifdef SUPPORT_SET_TID
+ dhd->tid_mode = SET_TID_OFF;
+ dhd->target_uid = 0;
+ dhd->target_tid = 0;
+#endif /* SUPPORT_SET_TID */
DHD_TRACE(("Enter %s\n", __FUNCTION__));
dhd->op_mode = 0;
@@ -11128,6 +10134,33 @@
#endif /* !SUPPORT_MULTIPLE_CHIPS */
}
#endif /* CUSTOMER_HW4_DEBUG */
+
+ /* query for 'ver' to get version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+ ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+ if (ret < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ else {
+ bcmstrtok(&ptr, "\n", 0);
+ /* Print fw version info */
+ DHD_ERROR(("Firmware version = %s\n", buf));
+ strncpy(fw_version, buf, FW_VER_STR_LEN);
+ fw_version[FW_VER_STR_LEN-1] = '\0';
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ dhd_set_version_info(dhd, buf);
+#endif /* BCMSDIO || BCMPCIE */
+ }
+
+#ifdef BOARD_HIKEY
+ /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */
+ if (strstr(fw_version, "WLTEST") != NULL) {
+ DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n",
+ __FUNCTION__));
+ op_mode = DHD_FLAG_MFG_MODE;
+ }
+#endif /* BOARD_HIKEY */
+
if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
(op_mode == DHD_FLAG_MFG_MODE)) {
dhd->op_mode = DHD_FLAG_MFG_MODE;
@@ -11148,6 +10181,46 @@
dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
}
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob,
+ sizeof(hostwake_oob), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__));
+ } else {
+ if (hostwake_oob == 0) {
+ DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
+ __FUNCTION__));
+ ret = BCME_UNSUPPORTED;
+ goto done;
+ } else {
+ DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__));
+ }
+ }
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+ ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr,
+ sizeof(dhd->axierror_logbuf_addr), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__));
+ dhd->axierror_logbuf_addr = 0;
+ } else {
+ DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n", __FUNCTION__,
+ dhd->axierror_logbuf_addr));
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#ifdef EVENT_LOG_RATE_HC
+ ret = dhd_iovar(dhd, 0, "event_log_rate_hc", (char *)&event_log_rate_hc,
+ sizeof(event_log_rate_hc), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s event_log_rate_hc set failed %d\n", __FUNCTION__, ret));
+ } else {
+ DHD_ERROR(("%s event_log_rate_hc set with threshold:%d\n", __FUNCTION__,
+ event_log_rate_hc));
+ }
+#endif /* EVENT_LOG_RATE_HC */
+
#ifdef GET_CUSTOM_MAC_ENABLE
ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
if (!ret) {
@@ -11228,7 +10301,7 @@
memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
#endif /* SET_RANDOM_MAC_SOFTAP */
#ifdef USE_DYNAMIC_F2_BLKSIZE
- dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+ dhdsdio_func_blocksize(dhd, 2, sd_f2_blocksize);
#endif /* USE_DYNAMIC_F2_BLKSIZE */
#ifdef SOFTAP_UAPSD_OFF
ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
@@ -11255,6 +10328,7 @@
dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
#endif /* USE_DYNAMIC_F2_BLKSIZE */
#ifndef CUSTOM_SET_ANTNPM
+#ifndef IGUANA_LEGACY_CHIPS
if (FW_SUPPORTED(dhd, rsdb)) {
wl_config_t rsdb_mode;
memset(&rsdb_mode, 0, sizeof(rsdb_mode));
@@ -11265,6 +10339,7 @@
__FUNCTION__, ret));
}
}
+#endif /* IGUANA_LEGACY_CHIPS */
#endif /* !CUSTOM_SET_ANTNPM */
} else {
uint32 concurrent_mode = 0;
@@ -11318,10 +10393,6 @@
#endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
}
-#ifdef RSDB_MODE_FROM_FILE
- (void)dhd_rsdb_mode_from_file(dhd);
-#endif /* RSDB_MODE_FROM_FILE */
-
#ifdef DISABLE_PRUNED_SCAN
if (FW_SUPPORTED(dhd, rsdb)) {
ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
@@ -11431,14 +10502,10 @@
#ifdef BCMCCX
ret = dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE);
#endif /* BCMCCX */
+
#ifdef WLTDLS
-#ifdef ENABLE_TDLS_AUTO_MODE
- /* by default TDLS on and auto mode on */
- _dhd_tdls_enable(dhd, true, true, NULL);
-#else
- /* by default TDLS on and auto mode off */
- _dhd_tdls_enable(dhd, true, false, NULL);
-#endif /* ENABLE_TDLS_AUTO_MODE */
+ dhd->tdls_enable = FALSE;
+ dhd_tdls_set_mode(dhd, false);
#endif /* WLTDLS */
#ifdef DHD_ENABLE_LPC
@@ -11461,22 +10528,20 @@
#ifdef WLADPS
if (dhd->op_mode & DHD_FLAG_STA_MODE) {
-#ifdef ADPS_MODE_FROM_FILE
- dhd_adps_mode_from_file(dhd);
-#else
if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK) {
DHD_ERROR(("%s dhd_enable_adps failed %d\n",
__FUNCTION__, ret));
}
-#endif /* ADPS_MODE_FROM_FILE */
}
#endif /* WLADPS */
#ifdef DHD_PM_CONTROL_FROM_FILE
sec_control_pm(dhd, &power_mode);
#else
+#ifndef H2_BRING_UP
/* Set PowerSave mode */
(void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
+#endif // endif
#endif /* DHD_PM_CONTROL_FROM_FILE */
#if defined(BCMSDIO)
@@ -11580,6 +10645,32 @@
}
#endif /* OEM_ANDROID */
+ ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets,
+ sizeof(event_log_max_sets), FALSE);
+ if (ret == BCME_OK) {
+ dhd->event_log_max_sets = event_log_max_sets;
+ } else {
+ dhd->event_log_max_sets = NUM_EVENT_LOG_SETS;
+ }
+ /* Make sure max_sets is set first with wmb and then sets_queried,
+ * this will be used during parsing the logsets in the reverse order.
+ */
+ OSL_SMP_WMB();
+ dhd->event_log_max_sets_queried = TRUE;
+ DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n",
+ __FUNCTION__, dhd->event_log_max_sets, ret));
+#ifdef DHD_BUS_MEM_ACCESS
+ ret = dhd_iovar(dhd, 0, "enable_memuse", (char *)&enable_memuse,
+ sizeof(enable_memuse), iovbuf, sizeof(iovbuf), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: enable_memuse is failed ret=%d\n",
+ __FUNCTION__, ret));
+ } else {
+ DHD_ERROR(("%s: enable_memuse = %d\n",
+ __FUNCTION__, enable_memuse));
+ }
+#endif /* DHD_BUS_MEM_ACCESS */
+
#ifdef DISABLE_TXBFR
ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
0, TRUE);
@@ -11708,14 +10799,19 @@
}
#endif /* BCMSUP_4WAY_HANDSHAKE */
#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
+ ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
+ NULL, 0, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__, ret));
+ vht_features = 0;
+ } else {
#ifdef SUPPORT_2G_VHT
- vht_features = 0x3; /* 2G support */
+ vht_features |= 0x3; /* 2G support */
#endif /* SUPPORT_2G_VHT */
#ifdef SUPPORT_5G_1024QAM_VHT
- if (BCM4347_CHIP(dhd_get_chipid(dhd))) {
vht_features |= 0x6; /* 5G 1024 QAM support */
- }
#endif /* SUPPORT_5G_1024QAM_VHT */
+ }
if (vht_features) {
ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
NULL, 0, TRUE);
@@ -11745,6 +10841,13 @@
DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
}
#endif /* DISABLE_11N_PROPRIETARY_RATES */
+#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
+#if defined(DISABLE_HE_ENAB)
+ control_he_enab = 0;
+#endif /* DISABLE_HE_ENAB */
+ dhd_control_he_enab(dhd, control_he_enab);
+#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
+
#ifdef CUSTOM_PSPRETEND_THR
/* Turn off MPC in AP mode */
ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr,
@@ -11821,7 +10924,9 @@
#endif /* PNO_SUPPORT */
/* enable dongle roaming event */
#ifdef WL_CFG80211
+#if !defined(ROAM_EVT_DISABLE)
setbit(eventmask, WLC_E_ROAM);
+#endif /* !ROAM_EVT_DISABLE */
setbit(eventmask, WLC_E_BSSID);
#endif /* WL_CFG80211 */
#ifdef BCMCCX
@@ -11834,6 +10939,9 @@
#ifdef RTT_SUPPORT
setbit(eventmask, WLC_E_PROXD);
#endif /* RTT_SUPPORT */
+#if !defined(WL_CFG80211) && !defined(OEM_ANDROID)
+ setbit(eventmask, WLC_E_ESCAN_RESULT);
+#endif // endif
#ifdef WL_CFG80211
setbit(eventmask, WLC_E_ESCAN_RESULT);
setbit(eventmask, WLC_E_AP_STARTED);
@@ -11922,6 +11030,35 @@
#ifdef WL_NATOE
setbit(eventmask_msg->mask, WLC_E_NATOE_NFCT);
#endif /* WL_NATOE */
+#ifdef WL_NAN
+ setbit(eventmask_msg->mask, WLC_E_SLOTTED_BSS_PEER_OP);
+#endif /* WL_NAN */
+#ifdef WL_MBO
+ setbit(eventmask_msg->mask, WLC_E_MBO);
+#endif /* WL_MBO */
+#ifdef WL_BCNRECV
+ setbit(eventmask_msg->mask, WLC_E_BCNRECV_ABORTED);
+#endif /* WL_BCNRECV */
+#ifdef WL_CAC_TS
+ setbit(eventmask_msg->mask, WLC_E_ADDTS_IND);
+ setbit(eventmask_msg->mask, WLC_E_DELTS_IND);
+#endif /* WL_CAC_TS */
+#ifdef WL_CHAN_UTIL
+ setbit(eventmask_msg->mask, WLC_E_BSS_LOAD);
+#endif /* WL_CHAN_UTIL */
+#ifdef WL_SAE
+ setbit(eventmask_msg->mask, WLC_E_EXT_AUTH_REQ);
+ setbit(eventmask_msg->mask, WLC_E_EXT_AUTH_FRAME_RX);
+ setbit(eventmask_msg->mask, WLC_E_MGMT_FRAME_TXSTATUS);
+ setbit(eventmask_msg->mask, WLC_E_MGMT_FRAME_OFF_CHAN_COMPLETE);
+#endif /* WL_SAE */
+#ifndef CONFIG_SOC_S5E5515
+ setbit(eventmask_msg->mask, WLC_E_IND_DOS_STATUS);
+#endif // endif
+#ifdef ENABLE_HOGSQS
+ setbit(eventmask_msg->mask, WLC_E_LDF_HOGGER);
+#endif /* ENABLE_HOGSQS */
+
/* Write updated Event mask */
eventmask_msg->ver = EVENTMSGS_VER;
eventmask_msg->command = EVENTMSGS_SET_MASK;
@@ -12068,58 +11205,93 @@
if (ret < 0)
DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
else {
- char *clmver_temp_buf = NULL;
+ char *ver_temp_buf = NULL;
- if ((clmver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
+ if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
DHD_ERROR(("Couldn't find \"Data:\"\n"));
} else {
- ptr = (clmver_temp_buf + strlen("Data:"));
- if ((clmver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
+ ptr = (ver_temp_buf + strlen("Data:"));
+ if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
DHD_ERROR(("Couldn't find New line character\n"));
} else {
memset(clm_version, 0, CLM_VER_STR_LEN);
- strncpy(clm_version, clmver_temp_buf,
- MIN(strlen(clmver_temp_buf), CLM_VER_STR_LEN - 1));
- DHD_ERROR(("clm version = %s\n", clm_version));
+ strncpy(clm_version, ver_temp_buf,
+ MIN(strlen(ver_temp_buf) + 1, CLM_VER_STR_LEN - 1));
+ DHD_INFO(("CLM version = %s\n", clm_version));
}
}
+
+#if defined(CUSTOMER_HW4_DEBUG)
+ if ((ver_temp_buf = bcmstrstr(ptr, "Customization:")) == NULL) {
+ DHD_ERROR(("Couldn't find \"Customization:\"\n"));
+ } else {
+ char tokenlim;
+ ptr = (ver_temp_buf + strlen("Customization:"));
+ if ((ver_temp_buf = bcmstrtok(&ptr, "(\n", &tokenlim)) == NULL) {
+ DHD_ERROR(("Couldn't find project blob version"
+ "or New line character\n"));
+ } else if (tokenlim == '(') {
+ snprintf(clm_version,
+ CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ",
+ clm_version, ver_temp_buf);
+ DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version));
+ if ((ver_temp_buf = bcmstrtok(&ptr, "\n", &tokenlim)) == NULL) {
+ DHD_ERROR(("Couldn't find New line character\n"));
+ } else {
+ snprintf(clm_version,
+ strlen(clm_version) + strlen(ver_temp_buf),
+ "%s%s", clm_version, ver_temp_buf);
+ DHD_INFO(("[INFO]CLM/Blob/project version = %s\n",
+ clm_version));
+
+ }
+ } else if (tokenlim == '\n') {
+ snprintf(clm_version,
+ strlen(clm_version) + strlen(", Blob ver = Major : ") + 1,
+ "%s, Blob ver = Major : ", clm_version);
+ snprintf(clm_version,
+ strlen(clm_version) + strlen(ver_temp_buf) + 1,
+ "%s%s", clm_version, ver_temp_buf);
+ DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version));
+ }
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
+ if (strlen(clm_version)) {
+ DHD_ERROR(("CLM version = %s\n", clm_version));
+ } else {
+ DHD_ERROR(("Couldn't find CLM version!\n"));
+ }
}
- /* query for 'ver' to get version info from firmware */
- memset(buf, 0, sizeof(buf));
- ptr = buf;
- ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+#ifdef WRITE_WLANINFO
+ sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
+#endif /* WRITE_WLANINFO */
+
+ /* query for 'wlc_ver' to get version info from firmware */
+ memset(&wlc_ver, 0, sizeof(wl_wlc_version_t));
+ ret = dhd_iovar(dhd, 0, "wlc_ver", NULL, 0, (char *)&wlc_ver,
+ sizeof(wl_wlc_version_t), FALSE);
if (ret < 0)
DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
else {
- bcmstrtok(&ptr, "\n", 0);
- /* Print fw version info */
- DHD_ERROR(("Firmware version = %s\n", buf));
- strncpy(fw_version, buf, FW_VER_STR_LEN);
- fw_version[FW_VER_STR_LEN-1] = '\0';
-#if defined(BCMSDIO) || defined(BCMPCIE)
- dhd_set_version_info(dhd, buf);
-#endif /* BCMSDIO || BCMPCIE */
-#ifdef WRITE_WLANINFO
- sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
-#endif /* WRITE_WLANINFO */
+ dhd->wlc_ver_major = wlc_ver.wlc_ver_major;
+ dhd->wlc_ver_minor = wlc_ver.wlc_ver_minor;
}
#endif /* defined(OEM_ANDROID) */
#ifdef GEN_SOFTAP_INFO_FILE
sec_save_softap_info();
#endif /* GEN_SOFTAP_INFO_FILE */
-#if defined(BCMSDIO)
+#if defined(BCMSDIO) && !defined(BCMSPI)
dhd_txglom_enable(dhd, TRUE);
-#endif /* defined(BCMSDIO) */
+#endif /* BCMSDIO && !BCMSPI */
#if defined(BCMSDIO)
#ifdef PROP_TXSTATUS
if (disable_proptx ||
#ifdef PROP_TXSTATUS_VSDB
/* enable WLFC only if the firmware is VSDB when it is in STA mode */
- (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
- dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
+ (!FW_SUPPORTED(dhd, ap)) ||
#endif /* PROP_TXSTATUS_VSDB */
FALSE) {
wlfc_enable = FALSE;
@@ -12137,9 +11309,10 @@
#ifndef DISABLE_11N
ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
NULL, 0, TRUE);
+ chipid = dhd_bus_chip_id(dhd);
if (ret2 < 0) {
DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
- if (ret2 != BCME_UNSUPPORTED)
+ if (ret2 != BCME_UNSUPPORTED && chipid != BCM4373_CHIP_ID)
ret = ret2;
if (ret == BCME_NOTDOWN) {
@@ -12152,8 +11325,8 @@
ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
sizeof(hostreorder), NULL, 0, TRUE);
DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
- if (ret2 != BCME_UNSUPPORTED)
- ret = ret2;
+ if (ret2 != BCME_UNSUPPORTED && chipid != BCM4373_CHIP_ID)
+ ret = ret2;
}
if (ret2 != BCME_OK)
hostreorder = 0;
@@ -12242,31 +11415,11 @@
}
#endif /* !WBTEXT */
-#if defined(DHD_NON_DMA_M2M_CORRUPTION)
+#ifdef DHD_NON_DMA_M2M_CORRUPTION
/* check pcie non dma loopback */
- if (dhd->op_mode == DHD_FLAG_MFG_MODE) {
- memset(&pcie_dmaxfer_lpbk, 0, sizeof(dhd_pcie_dmaxfer_lpbk_t));
- pcie_dmaxfer_lpbk.u.length = PCIE_DMAXFER_LPBK_LENGTH;
- pcie_dmaxfer_lpbk.lpbkmode = M2M_NON_DMA_LPBK;
- pcie_dmaxfer_lpbk.wait = TRUE;
-
- if ((ret = dhd_bus_iovar_op(dhd, "pcie_dmaxfer", NULL, 0,
- (char *)&pcie_dmaxfer_lpbk, sizeof(dhd_pcie_dmaxfer_lpbk_t),
- IOV_SET)) < 0) {
- DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d\n",
- ret));
+ if (dhd->op_mode == DHD_FLAG_MFG_MODE &&
+ (dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) {
goto done;
- }
-
- if (pcie_dmaxfer_lpbk.u.status != BCME_OK) {
- DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d"
- " Status : %d\n", ret, pcie_dmaxfer_lpbk.u.status));
- ret = BCME_ERROR;
- goto done;
- } else {
-
- DHD_ERROR(("successful to check PCIe Non DMA Loopback Test\n"));
- }
}
#endif /* DHD_NON_DMA_M2M_CORRUPTION */
@@ -12279,17 +11432,17 @@
| WL_WNM_BSSTRANS | WL_WNM_MAXIDLE
#endif // endif
;
+#if defined(WL_MBO) && defined(WL_OCE)
+ if (FW_SUPPORTED(dhd, estm)) {
+ wnm_cap |= WL_WNM_ESTM;
+ }
+#endif /* WL_MBO && WL_OCE */
if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
DHD_ERROR(("failed to set WNM capabilities\n"));
}
if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) {
- if (dhd_start_ecounters(dhd) != BCME_OK) {
- DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
- } else if (dhd_start_event_ecounters(dhd) != BCME_OK) {
- DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
- }
-
+ dhd_ecounter_configure(dhd, TRUE);
}
/* store the preserve log set numbers */
@@ -12315,6 +11468,11 @@
}
#endif /* WL_MONITOR */
+#ifdef CONFIG_SILENT_ROAM
+ dhd->sroam_turn_on = TRUE;
+ dhd->sroamed = FALSE;
+#endif /* CONFIG_SILENT_ROAM */
+
done:
if (eventmask_msg) {
@@ -12559,7 +11717,6 @@
if (!ifa || !(ifa->ifa_dev->dev))
return NOTIFY_DONE;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
/* Filter notifications meant for non Broadcom devices */
if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
(ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
@@ -12568,7 +11725,6 @@
#endif /* WL_ENABLE_P2P_IF */
return NOTIFY_DONE;
}
-#endif /* LINUX_VERSION_CODE */
dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
if (!dhd)
@@ -12597,8 +11753,14 @@
DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
- if (dhd->pub.busstate != DHD_BUS_DATA) {
- DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
+ /*
+ * Skip if Bus is not in a state to transport the IOVAR
+ * (or) the Dongle is not ready.
+ */
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub) ||
+ dhd->pub.busstate == DHD_BUS_LOAD) {
+ DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n",
+ __FUNCTION__, dhd->pub.busstate));
if (dhd->pend_ipaddr) {
DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
__FUNCTION__, dhd->pend_ipaddr));
@@ -12751,12 +11913,10 @@
struct ipv6_work_info_t *ndo_info;
int idx;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
/* Filter notifications meant for non Broadcom devices */
if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
return NOTIFY_DONE;
}
-#endif /* LINUX_VERSION_CODE */
dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
if (!dhd) {
@@ -12793,6 +11953,29 @@
}
#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+/* Network attach to be invoked from the bus probe handlers */
+int
+dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock)
+{
+ struct net_device *primary_ndev;
+ BCM_REFERENCE(primary_ndev);
+
+ /* Register primary net device */
+ if (dhd_register_if(dhdp, 0, need_rtnl_lock) != 0) {
+ return BCME_ERROR;
+ }
+
+#if defined(WL_CFG80211)
+ primary_ndev = dhd_linux_get_primary_netdev(dhdp);
+ if (wl_cfg80211_net_attach(primary_ndev) < 0) {
+ /* fail the init */
+ dhd_remove_if(dhdp, 0, TRUE);
+ return BCME_ERROR;
+ }
+#endif /* WL_CFG80211 */
+ return BCME_OK;
+}
+
int
dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
{
@@ -12814,38 +11997,18 @@
net = ifp->net;
ASSERT(net && (ifp->idx == ifidx));
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
- ASSERT(!net->open);
- net->get_stats = dhd_get_stats;
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- net->do_ioctl = dhd_ioctl_entry_wrapper;
- net->hard_start_xmit = dhd_start_xmit_wrapper;
-#else
- net->do_ioctl = dhd_ioctl_entry;
- net->hard_start_xmit = dhd_start_xmit;
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
- net->set_mac_address = dhd_set_mac_address;
- net->set_multicast_list = dhd_set_multicast_list;
- net->open = net->stop = NULL;
-#else
ASSERT(!net->netdev_ops);
net->netdev_ops = &dhd_ops_virt;
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
/* Ok, link into the network layer... */
if (ifidx == 0) {
/*
* device functions for the primary interface only
*/
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
- net->open = dhd_open;
- net->stop = dhd_stop;
-#else
net->netdev_ops = &dhd_ops_pri;
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+ memcpy(dhd->iflist[0]->mac_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
} else {
/*
* We have to use the primary MAC for virtual interfaces
@@ -12862,14 +12025,13 @@
DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
__func__, net->name));
temp_addr[0] |= 0x02;
+ memcpy(dhd->iflist[ifidx]->mac_addr, temp_addr, ETHER_ADDR_LEN);
}
#endif /* defined(OEM_ANDROID) */
}
net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
net->ethtool_ops = &dhd_ethtool_ops;
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
#if defined(WL_WIRELESS_EXT)
#if WIRELESS_EXT < 19
@@ -12909,8 +12071,7 @@
wl_iw_iscan_set_scan_broadcast_prep(net, 1);
#endif // endif
-#if defined(OEM_ANDROID) && (defined(BCMPCIE) || (defined(BCMLXSDMMC) && \
- (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))))
+#if defined(OEM_ANDROID) && (defined(BCMPCIE) || defined(BCMLXSDMMC))
if (ifidx == 0) {
#ifdef BCMLXSDMMC
up(&dhd_registration_sem);
@@ -12947,19 +12108,57 @@
}
#endif /* ENABLE_INSMOD_NO_FW_LOAD */
}
-#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
-
+#endif /* OEM_ANDROID && (BCMPCIE || BCMLXSDMMC) */
return 0;
fail:
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
- net->open = NULL;
-#else
net->netdev_ops = NULL;
-#endif // endif
return err;
}
+#ifdef WL_VIF_SUPPORT
+#define MAX_VIF_NUM 8
+int
+dhd_register_vif(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ dhd_if_t *ifp;
+ struct net_device *net;
+ int err = BCME_OK, i;
+ char viface_name[IFNAMSIZ] = {'\0'};
+ ifp = dhd->iflist[0];
+ net = ifp->net;
+ if (vif_num && vif_num > MAX_VIF_NUM)
+ vif_num = MAX_VIF_NUM;
+ /* Set virtual interface name if it was provided as module parameter */
+ if (vif_name[0]) {
+ int len;
+ char ch;
+ strncpy(viface_name, vif_name, IFNAMSIZ);
+ viface_name[IFNAMSIZ - 1] = 0;
+ len = strlen(viface_name);
+ ch = viface_name[len - 1];
+ if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
+ strcat(viface_name, "%d");
+ } else {
+ DHD_ERROR(("%s check vif_name\n", __FUNCTION__));
+ return BCME_BADOPTION;
+ }
+
+ DHD_INFO(("%s Virtual interface [%s]:\n", __FUNCTION__, viface_name));
+ rtnl_lock();
+ for (i = 0; i < vif_num; i++) {
+ if (wl_cfg80211_add_if(wl_get_cfg(net), net, WL_IF_TYPE_STA, viface_name, NULL)
+ == NULL) {
+ DHD_ERROR(("%s error Virtual interface [%s], i:%d\n", __FUNCTION__,
+ viface_name, i));
+ break;
+ }
+ }
+ rtnl_unlock();
+ return err;
+}
+#endif /* WL_VIF_SUPPORT */
void
dhd_bus_detach(dhd_pub_t *dhdp)
{
@@ -13024,6 +12223,7 @@
DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
+ DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
dhd->pub.up = 0;
if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
/* Give sufficient time for threads to start running in case
@@ -13135,6 +12335,9 @@
if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
free_netdev(ifp->net);
} else {
+#if defined(ARGOS_NOTIFY_CB)
+ argos_register_notifier_deinit();
+#endif // endif
#ifdef SET_RPS_CPUS
custom_rps_map_clear(ifp->net->_rx);
#endif /* SET_RPS_CPUS */
@@ -13235,6 +12438,10 @@
}
#endif /* DHD_LB */
+#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
+ cancel_work_sync(&dhd->axi_error_dispatcher_work);
+#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+
DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
#ifdef WL_CFG80211
@@ -13261,21 +12468,35 @@
dhd_os_dbg_detach_pkt_monitor(dhdp);
dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
#endif /* DBG_PKT_MON */
- dhd_os_dbg_detach(dhdp);
}
#endif /* DEBUGABILITY */
+ if (dhdp->dbg) {
+ dhd_os_dbg_detach(dhdp);
+ }
#ifdef DHD_PKT_LOGGING
- dhd_os_detach_pktlog(dhdp);
+ dhd_os_detach_pktlog(dhdp);
#endif /* DHD_PKT_LOGGING */
+#ifdef DHD_STATUS_LOGGING
+ dhd_detach_statlog(dhdp);
+#endif /* DHD_STATUS_LOGGING */
+#ifdef DHD_PKTDUMP_ROAM
+ dhd_dump_pkt_deinit(dhdp);
+#endif /* DHD_PKTDUMP_ROAM */
#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
- if (dhd->pub.hang_info) {
- MFREE(dhd->pub.osh, dhd->pub.hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN);
- }
+ if (dhd->pub.hang_info) {
+ MFREE(dhd->pub.osh, dhd->pub.hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN);
+ }
#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
#ifdef SHOW_LOGTRACE
/* Release the skbs from queue for WLC_E_TRACE event */
dhd_event_logtrace_flush_queue(dhdp);
+ /* Wait till event logtrace context finishes */
+ dhd_cancel_logtrace_process_sync(dhd);
+
+ /* Remove ring proc entries */
+ dhd_dbg_ring_proc_destroy(&dhd->pub);
+
if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
if (dhd->event_data.fmts) {
MFREE(dhd->pub.osh, dhd->event_data.fmts,
@@ -13322,10 +12543,10 @@
dhd->new_freq = NULL;
cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
#endif // endif
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
dhd->wakelock_wd_counter = 0;
wake_lock_destroy(&dhd->wl_wdwake);
-#endif /* CONFIG_HAS_WAKELOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
DHD_OS_WAKE_LOCK_DESTROY(dhd);
@@ -13354,13 +12575,17 @@
dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
#endif /* DHD_DEBUG */
#ifdef WL_MONITOR
- dhd_del_monitor_if(dhd, NULL, DHD_WQ_WORK_IF_DEL);
+ dhd_del_monitor_if(dhd);
#endif /* WL_MONITOR */
#ifdef DHD_ERPOM
- pom_func_deregister(&dhdp->pom_wlan_handler);
+ if (dhdp->enable_erpom) {
+ dhdp->pom_func_deregister(&dhdp->pom_wlan_handler);
+ }
#endif /* DHD_ERPOM */
+ cancel_work_sync(&dhd->dhd_hang_process_work);
+
/* Prefer adding de-init code above this comment unless necessary.
* The idea is to cancel work queue, sysfs and flags at the end.
*/
@@ -13377,16 +12602,15 @@
MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
dhdp->extended_trap_data = NULL;
}
+#ifdef DNGL_AXI_ERROR_LOGGING
+ if (dhdp->axi_err_dump)
+ {
+ MFREE(dhdp->osh, dhdp->axi_err_dump, sizeof(dhd_axi_error_dump_t));
+ dhdp->axi_err_dump = NULL;
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
#endif /* BCMPCIE */
-#ifdef SHOW_LOGTRACE
- /* Wait till event_log_dispatcher_work finishes */
- cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
- mutex_lock(&dhd->pub.dhd_trace_lock);
- remove_proc_entry("dhd_trace", NULL);
- mutex_unlock(&dhd->pub.dhd_trace_lock);
-#endif /* SHOW_LOGTRACE */
-
#ifdef DHD_DUMP_MNGR
if (dhd->pub.dump_file_manage) {
MFREE(dhd->pub.osh, dhd->pub.dump_file_manage,
@@ -13394,7 +12618,7 @@
}
#endif /* DHD_DUMP_MNGR */
dhd_sysfs_exit(dhd);
- dhd->pub.fw_download_done = FALSE;
+ dhd->pub.fw_download_status = FW_UNLOADED;
#if defined(BT_OVER_SDIO)
mutex_destroy(&dhd->bus_user_lock);
@@ -13577,27 +12801,25 @@
return NOTIFY_DONE;
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
#if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
- defined(CONFIG_ARCH_SDM845)
+ defined(CONFIG_ARCH_SDM845) || defined(CONFIG_SOC_EXYNOS9820) || \
+ defined(CONFIG_ARCH_SM8150)
deferred_module_init_sync(dhd_module_init);
#else
deferred_module_init(dhd_module_init);
#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 || CONFIG_SOC_EXYNOS8895
- * CONFIG_SOC_EXYNOS9810 || CONFIG_ARCH_SDM845
+ * CONFIG_SOC_EXYNOS9810 || CONFIG_ARCH_SDM845 || CONFIG_SOC_EXYNOS9820
+ * CONFIG_ARCH_SM8150
*/
#elif defined(USE_LATE_INITCALL_SYNC)
late_initcall_sync(dhd_module_init);
#else
late_initcall(dhd_module_init);
#endif /* USE_LATE_INITCALL_SYNC */
-#else
-module_init(dhd_module_init);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
module_exit(dhd_module_exit);
@@ -13725,11 +12947,7 @@
int timeout;
/* Convert timeout in millsecond to jiffies */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
-#else
- timeout = dhd_ioctl_timeout_msec * HZ / 1000;
-#endif // endif
DHD_PERIM_UNLOCK(pub);
@@ -13756,11 +12974,7 @@
int timeout;
/* Convert timeout in millsecond to jiffies */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT);
-#else
- timeout = D3_ACK_RESP_TIMEOUT * HZ / 1000;
-#endif // endif
DHD_PERIM_UNLOCK(pub);
@@ -13791,11 +13005,7 @@
* so that IOCTL timeout should not get affected.
*/
/* Convert timeout in millsecond to jiffies */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
-#else
- timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
-#endif // endif
timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
@@ -13814,11 +13024,7 @@
int timeout;
/* Convert timeout in millsecond to jiffies */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
-#else
- timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
-#endif // endif
timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
@@ -13838,11 +13044,7 @@
int timeout;
/* Convert timeout in millsecond to jiffies */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
-#else
- timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
-#endif // endif
timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait,
((*var & bitmask) == condition), timeout);
@@ -13855,9 +13057,12 @@
{
int ret = 0;
dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
+
+ timeout = msecs_to_jiffies(IOCTL_DMAXFER_TIMEOUT);
DHD_PERIM_UNLOCK(pub);
- ret = wait_event_interruptible(dhd->dmaxfer_wait, (*condition));
+ ret = wait_event_timeout(dhd->dmaxfer_wait, (*condition), timeout);
DHD_PERIM_LOCK(pub);
return ret;
@@ -13881,11 +13086,8 @@
wake_up(&dhd->tx_completion_wait);
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
/* Fix compilation error for FC11 */
-INLINE
-#endif // endif
-int
+INLINE int
dhd_os_busbusy_wake(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
@@ -13986,6 +13188,7 @@
if (dhd->rpm_timer_valid == FALSE) {
mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
dhd->rpm_timer_valid = TRUE;
+ DHD_ERROR(("DHD Runtime PM Enabled \n"));
}
} else {
/* tick is zero, we have to stop the timer */
@@ -13994,6 +13197,7 @@
dhd->rpm_timer_valid = FALSE;
DHD_GENERAL_UNLOCK(pub, flags);
del_timer_sync(&dhd->rpm_timer);
+ DHD_ERROR(("DHD Runtime PM Disabled \n"));
/* we have already released the lock, so just go to exit */
goto exit;
}
@@ -14397,25 +13601,21 @@
void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
{
-#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#if defined(BCMSDIO)
struct dhd_info *dhdinfo = dhd->info;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
-#else
- int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
dhd_os_sdunlock(dhd);
wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
dhd_os_sdlock(dhd);
-#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+#endif /* defined(BCMSDIO) */
return;
} /* dhd_init_static_strs_array */
void dhd_wait_event_wakeup(dhd_pub_t *dhd)
{
-#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#if defined(BCMSDIO)
struct dhd_info *dhdinfo = dhd->info;
if (waitqueue_active(&dhdinfo->ctrl_wait))
wake_up(&dhdinfo->ctrl_wait);
@@ -14484,7 +13684,13 @@
dhd->pub.iovar_timeout_occured = 0;
#ifdef PCIE_FULL_DONGLE
dhd->pub.d3ack_timeout_occured = 0;
+ dhd->pub.livelock_occured = 0;
+ dhd->pub.pktid_audit_failed = 0;
#endif /* PCIE_FULL_DONGLE */
+ dhd->pub.iface_op_failed = 0;
+ dhd->pub.scan_timeout_occurred = 0;
+ dhd->pub.scan_busy_occurred = 0;
+ dhd->pub.smmu_fault_occurred = 0;
}
if (ret) {
@@ -14548,8 +13754,11 @@
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
- if (dhd)
+ if (dhd) {
+ DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n",
+ __FUNCTION__, val));
dhd->pub.suspend_bcn_li_dtim = val;
+ }
return 0;
}
@@ -14573,6 +13782,27 @@
return 0;
}
+#ifdef DISABLE_DTIM_IN_SUSPEND
+int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (dhd) {
+ DHD_ERROR(("%s: Disable bcn_li_dtim in suspend %s\n",
+ __FUNCTION__, (val ? "Enable" : "Disable")));
+ if (val) {
+ dhd->pub.disable_dtim_in_suspend = TRUE;
+ } else {
+ dhd->pub.disable_dtim_in_suspend = FALSE;
+ }
+ } else {
+ return -1;
+ }
+
+ return 0;
+}
+#endif /* DISABLE_DTIM_IN_SUSPEND */
+
#ifdef PKT_FILTER_SUPPORT
int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
{
@@ -14652,12 +13882,21 @@
dhd_pub_t *dhd = (&ptr->pub);
int feature_set = 0;
- if (FW_SUPPORTED(dhd, sta))
+ if (FW_SUPPORTED(dhd, sta)) {
+#if defined(OEM_ANDROID)
+ feature_set |= WIFI_FEATURE_SET_LATENCY_MODE;
+ feature_set |= WIFI_FEATURE_SET_TX_POWER_LIMIT;
+#endif /* OEM_ANDROID */
feature_set |= WIFI_FEATURE_INFRA;
+ }
if (FW_SUPPORTED(dhd, dualband))
feature_set |= WIFI_FEATURE_INFRA_5G;
- if (FW_SUPPORTED(dhd, p2p))
+ if (FW_SUPPORTED(dhd, p2p)) {
feature_set |= WIFI_FEATURE_P2P;
+#if defined(OEM_ANDROID)
+ feature_set |= WIFI_FEATURE_P2P_RAND_MAC;
+#endif /* OEM_ANDROID */
+ }
if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
feature_set |= WIFI_FEATURE_SOFT_AP;
if (FW_SUPPORTED(dhd, tdls))
@@ -14678,7 +13917,7 @@
feature_set |= WIFI_FEATURE_LINKSTAT;
#endif /* LINKSTAT_SUPPORT */
-#ifdef PNO_SUPPORT
+#if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
if (dhd_is_pno_supported(dhd)) {
feature_set |= WIFI_FEATURE_PNO;
#ifdef GSCAN_SUPPORT
@@ -14686,7 +13925,7 @@
feature_set |= WIFI_FEATURE_HAL_EPNO;
#endif /* GSCAN_SUPPORT */
}
-#endif /* PNO_SUPPORT */
+#endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
#ifdef RSSI_MONITOR_SUPPORT
if (FW_SUPPORTED(dhd, rssi_mon)) {
feature_set |= WIFI_FEATURE_RSSI_MONITOR;
@@ -14696,7 +13935,8 @@
feature_set |= WIFI_FEATURE_HOTSPOT;
#endif /* WL11U */
#ifdef NDO_CONFIG_SUPPORT
- feature_set |= WIFI_FEATURE_CONFIG_NDO;
+ if (FW_SUPPORTED(dhd, ndoe))
+ feature_set |= WIFI_FEATURE_CONFIG_NDO;
#endif /* NDO_CONFIG_SUPPORT */
#ifdef KEEP_ALIVE
feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
@@ -14775,10 +14015,17 @@
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
- if (nodfs)
+ if (nodfs) {
+ if (dhd->pub.dhd_cflags & WLAN_PLAT_NODFS_FLAG) {
+ return 0;
+ }
dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
- else
+ } else {
+ if (!(dhd->pub.dhd_cflags & WLAN_PLAT_NODFS_FLAG)) {
+ return 0;
+ }
dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
+ }
dhd->pub.force_country_change = TRUE;
return 0;
}
@@ -14818,9 +14065,10 @@
* ignore dropping of 'const' qualifier in 'list_entry' macro
* this pragma disables the warning only for the following function
*/
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
-
+#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6 */
static int
dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
{
@@ -15016,8 +14264,9 @@
return ret;
}
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#pragma GCC diagnostic pop
-
+#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */
#endif /* NDO_CONFIG_SUPPORT */
#ifdef PNO_SUPPORT
@@ -15352,6 +14601,7 @@
return err;
}
#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
+
#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
/* Linux wrapper to call common dhd_pno_get_gscan */
void *
@@ -15451,7 +14701,7 @@
return err;
}
-#ifdef RTT_SUPPORT
+#if defined(RTT_SUPPORT) && defined(WL_CFG80211)
/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
int
dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
@@ -15741,20 +14991,16 @@
#if defined(PKT_FILTER_SUPPORT) && defined(APF)
static void _dhd_apf_lock_local(dhd_info_t *dhd)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
if (dhd) {
mutex_lock(&dhd->dhd_apf_mutex);
}
-#endif // endif
}
static void _dhd_apf_unlock_local(dhd_info_t *dhd)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
if (dhd) {
mutex_unlock(&dhd->dhd_apf_mutex);
}
-#endif // endif
}
static int
@@ -16071,28 +15317,28 @@
}
#endif /* PKT_FILTER_SUPPORT && APF */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(OEM_ANDROID)
-static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
+#if defined(OEM_ANDROID)
+static void dhd_hang_process(struct work_struct *work_data)
{
- dhd_info_t *dhd;
struct net_device *dev;
+#ifdef IFACE_HANG_FORCE_DEV_CLOSE
+ struct net_device *ndev;
+ uint8 i = 0;
+#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
+/* Ignore compiler warnings due to -Werror=cast-qual */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ struct dhd_info *dhd =
+ container_of(work_data, dhd_info_t, dhd_hang_process_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
- dhd = (dhd_info_t *)dhd_info;
dev = dhd->iflist[0]->net;
if (dev) {
- /*
- * For HW2, dev_close need to be done to recover
- * from upper layer after hang. For Interposer skip
- * dev_close so that dhd iovars can be used to take
- * socramdump after crash, also skip for HW4 as
- * handling of hang event is different
- */
-#if !defined(CUSTOMER_HW2_INTERPOSER) && !defined(BOARD_HIKEY)
- rtnl_lock();
- dev_close(dev);
- rtnl_unlock();
-#endif // endif
#if defined(WL_WIRELESS_EXT)
wl_iw_send_priv_event(dev, "HANG");
#endif // endif
@@ -16100,6 +15346,26 @@
wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
#endif // endif
}
+#ifdef IFACE_HANG_FORCE_DEV_CLOSE
+ /*
+ * For HW2, dev_close need to be done to recover
+ * from upper layer after hang. For Interposer skip
+ * dev_close so that dhd iovars can be used to take
+ * socramdump after crash, also skip for HW4 as
+ * handling of hang event is different
+ */
+
+ rtnl_lock();
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL;
+ if (ndev && (ndev->flags & IFF_UP)) {
+ DHD_ERROR(("ndev->name : %s dev close\n",
+ ndev->name));
+ dev_close(ndev);
+ }
+ }
+ rtnl_unlock();
+#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
}
#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
@@ -16107,7 +15373,7 @@
void dhd_host_recover_link(void)
{
DHD_ERROR(("****** %s ******\n", __FUNCTION__));
- link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+ link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
dhd_bus_set_linkdown(link_recovery, TRUE);
dhd_os_send_hang_message(link_recovery);
}
@@ -16117,66 +15383,81 @@
int dhd_os_send_hang_message(dhd_pub_t *dhdp)
{
int ret = 0;
-
- if (dhdp) {
#ifdef WL_CFG80211
- struct net_device *primary_ndev;
- struct bcm_cfg80211 *cfg;
+ struct net_device *primary_ndev;
+ struct bcm_cfg80211 *cfg;
+#ifdef DHD_FILE_DUMP_EVENT
+ dhd_info_t *dhd_info = NULL;
+#endif /* DHD_FILE_DUMP_EVENT */
+#endif /* WL_CFG80211 */
- primary_ndev = dhd_linux_get_primary_netdev(dhdp);
- if (!primary_ndev) {
- DHD_ERROR(("%s: Cannot find primary netdev\n",
- __FUNCTION__));
- return -ENODEV;
- }
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is null\n", __FUNCTION__));
+ return -EINVAL;
+ }
- cfg = wl_get_cfg(primary_ndev);
- if (!cfg) {
- DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
- return -EINVAL;
- }
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
+ dhd_info = (dhd_info_t *)dhdp->info;
- /* Skip sending HANG event to framework if driver is not ready */
- if (!wl_get_drv_status(cfg, READY, primary_ndev)) {
- DHD_ERROR(("%s: device is not ready\n", __FUNCTION__));
- return -ENODEV;
- }
+ if (dhd_info->scheduled_memdump) {
+ DHD_ERROR_RLMT(("[DUMP]:%s, memdump in progress. return\n", __FUNCTION__));
+ dhdp->hang_was_pending = 1;
+ return BCME_OK;
+ }
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
+
+#ifdef WL_CFG80211
+ primary_ndev = dhd_linux_get_primary_netdev(dhdp);
+ if (!primary_ndev) {
+ DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
+ return -ENODEV;
+ }
+ cfg = wl_get_cfg(primary_ndev);
+ if (!cfg) {
+ DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ /* Skip sending HANG event to framework if driver is not ready */
+ if (!wl_get_drv_status(cfg, READY, primary_ndev)) {
+ DHD_ERROR(("%s: device is not ready\n", __FUNCTION__));
+ return -ENODEV;
+ }
#endif /* WL_CFG80211 */
#if defined(DHD_HANG_SEND_UP_TEST)
- if (dhdp->req_hang_type) {
- DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
- __FUNCTION__, dhdp->req_hang_type));
- dhdp->req_hang_type = 0;
- }
+ if (dhdp->req_hang_type) {
+ DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
+ __FUNCTION__, dhdp->req_hang_type));
+ dhdp->req_hang_type = 0;
+ }
#endif /* DHD_HANG_SEND_UP_TEST */
- if (!dhdp->hang_was_sent) {
+ if (!dhdp->hang_was_sent) {
#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
- dhdp->hang_counts++;
- if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
- DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
- __func__, dhdp->hang_counts));
- BUG_ON(1);
- }
+ dhdp->hang_counts++;
+ if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
+ DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
+ __func__, dhdp->hang_counts));
+ BUG_ON(1);
+ }
#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
#ifdef DHD_DEBUG_UART
- /* If PCIe lane has broken, execute the debug uart application
- * to gether a ramdump data from dongle via uart
- */
- if (!dhdp->info->duart_execute) {
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ /* If PCIe lane has broken, execute the debug uart application
+ * to gether a ramdump data from dongle via uart
+ */
+ if (!dhdp->info->duart_execute) {
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
(void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
- }
-#endif /* DHD_DEBUG_UART */
- dhdp->hang_was_sent = 1;
-#ifdef BT_OVER_SDIO
- dhdp->is_bt_recovery_required = TRUE;
-#endif // endif
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
- DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WQ_WORK_PRIORITY_HIGH);
}
+#endif /* DHD_DEBUG_UART */
+ dhdp->hang_was_sent = 1;
+#ifdef BT_OVER_SDIO
+ dhdp->is_bt_recovery_required = TRUE;
+#endif // endif
+ schedule_work(&dhdp->info->dhd_hang_process_work);
+
}
return ret;
}
@@ -16192,11 +15473,7 @@
#ifdef BT_OVER_SDIO
if (netif_running(dev)) {
#endif /* BT_OVER_SDIO */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
ret = dhd_os_send_hang_message(&dhd->pub);
-#else
- ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
-#endif // endif
#ifdef BT_OVER_SDIO
}
DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
@@ -16236,7 +15513,7 @@
return net_os_send_hang_message(dev);
}
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
+#endif /* OEM_ANDROID */
int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
{
@@ -16268,6 +15545,17 @@
get_customized_country_code(dhd->adapter, country_iso_code, cspec);
#endif /* CUSTOM_COUNTRY_CODE */
}
+#if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE)
+ else {
+ /* Replace the ccode to XZ if ccode is undefined country */
+ if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) {
+ strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ);
+ strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ);
+ strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ);
+ DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code));
+ }
+ }
+#endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */
BCM_REFERENCE(dhd);
}
@@ -16279,6 +15567,7 @@
#endif // endif
if (dhd && dhd->pub.up) {
+ dhd->pub.force_country_change = FALSE;
memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
#ifdef WL_CFG80211
wl_update_wiphybands(cfg, notify);
@@ -16335,7 +15624,7 @@
static void dhd_net_if_lock_local(dhd_info_t *dhd)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
if (dhd)
mutex_lock(&dhd->dhd_net_if_mutex);
#endif // endif
@@ -16343,7 +15632,7 @@
static void dhd_net_if_unlock_local(dhd_info_t *dhd)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
if (dhd)
mutex_unlock(&dhd->dhd_net_if_mutex);
#endif // endif
@@ -16351,7 +15640,7 @@
static void dhd_suspend_lock(dhd_pub_t *pub)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd)
mutex_lock(&dhd->dhd_suspend_mutex);
@@ -16360,7 +15649,7 @@
static void dhd_suspend_unlock(dhd_pub_t *pub)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
+#if defined(OEM_ANDROID)
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd)
mutex_unlock(&dhd->dhd_suspend_mutex);
@@ -16613,6 +15902,21 @@
type_str = "DUE_TO_BT";
break;
#endif /* DHD_ERPOM */
+ case DUMP_TYPE_LOGSET_BEYOND_RANGE:
+ type_str = "LOGSET_BEYOND_RANGE";
+ break;
+ case DUMP_TYPE_CTO_RECOVERY:
+ type_str = "CTO_RECOVERY";
+ break;
+ case DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR:
+ type_str = "SEQUENTIAL_PRIVCMD_ERROR";
+ break;
+ case DUMP_TYPE_PROXD_TIMEOUT:
+ type_str = "PROXD_TIMEOUT";
+ break;
+ case DUMP_TYPE_PKTID_POOL_DEPLETED:
+ type_str = "PKTID_POOL_DEPLETED";
+ break;
default:
type_str = "Unknown_type";
break;
@@ -16622,39 +15926,72 @@
buf[strlen(type_str)] = 0;
}
+void
+dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname)
+{
+ char memdump_type[32];
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ /* Init file name */
+ memset(memdump_path, 0, len);
+ memset(memdump_type, 0, sizeof(memdump_type));
+ dhd_convert_memdump_type_to_str(dhdp->memdump_type, memdump_type, dhdp->debug_dump_subcmd);
+ clear_debug_dump_time(dhdp->debug_dump_time_str);
+ get_debug_dump_time(dhdp->debug_dump_time_str);
+#ifdef CUSTOMER_HW4_DEBUG
+ snprintf(memdump_path, len, "%s%s_%s_" "%s",
+ DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
+#elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
+ snprintf(memdump_path, len, "%s%s_%s_" "%s",
+ DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
+#elif defined(OEM_ANDROID) && (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
+ snprintf(memdump_path, len, "%s%s_%s_" "%s",
+ DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
+#elif defined(OEM_ANDROID)
+ snprintf(memdump_path, len, "%s%s_%s_" "%s",
+ DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
+#else
+ snprintf(memdump_path, len, "%s%s_%s_" "%s",
+ DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
+#endif /* CUSTOMER_HW4_DEBUG */
+ if (strstr(fname, "sssr_dump")) {
+ DHD_SSSR_PRINT_FILEPATH(dhdp, memdump_path);
+ } else {
+ DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
+ memdump_path, FILE_NAME_HAL_TAG));
+ }
+}
+
int
write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
{
int ret = 0;
char memdump_path[128];
char memdump_type[32];
- struct timeval curtime;
uint32 file_mode;
/* Init file name */
memset(memdump_path, 0, sizeof(memdump_path));
memset(memdump_type, 0, sizeof(memdump_type));
- do_gettimeofday(&curtime);
dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, dhd->debug_dump_subcmd);
-#ifdef CUSTOMER_HW4_DEBUG
+ clear_debug_dump_time(dhd->debug_dump_time_str);
get_debug_dump_time(dhd->debug_dump_time_str);
+#ifdef CUSTOMER_HW4_DEBUG
snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
- DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
+ DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
file_mode = O_CREAT | O_WRONLY | O_SYNC;
#elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
- snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
- "/data/misc/wifi/", fname, memdump_type,
- (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
+ DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
file_mode = O_CREAT | O_WRONLY | O_SYNC;
#elif defined(OEM_ANDROID) && (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
- snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
- "/data/misc/wifi/", fname, memdump_type,
- (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
+ DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
file_mode = O_CREAT | O_WRONLY;
#elif defined(OEM_ANDROID)
- snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
- "/root/", fname, memdump_type,
- (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
+ "/root/", fname, memdump_type, dhd->debug_dump_time_str);
/* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
* calling BUG_ON immediately after collecting the socram dump.
* So the file write operation should directly write the contents into the
@@ -16667,17 +16004,15 @@
/* Check if it is live Brix image having /installmedia, else use /data */
if (IS_ERR(fp)) {
DHD_ERROR(("open file %s, try /tmp/\n", memdump_path));
- snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
- "/tmp/", fname, memdump_type,
- (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
+ "/tmp/", fname, memdump_type, dhd->debug_dump_time_str);
} else {
filp_close(fp, NULL);
}
}
#else
- snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
- "/root/", fname, memdump_type,
- (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
+ DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
file_mode = O_CREAT | O_WRONLY;
#endif /* CUSTOMER_HW4_DEBUG */
@@ -16685,7 +16020,7 @@
DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
#ifdef DHD_LOG_DUMP
- dhd_print_buf_addr("write_dump_to_file", buf, size);
+ dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size);
#endif /* DHD_LOG_DUMP */
/* Write file */
@@ -16711,7 +16046,7 @@
spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
if (dhd->wakelock_rx_timeout_enable)
wake_lock_timeout(&dhd->wl_rxwake,
msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
@@ -16772,7 +16107,7 @@
if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
dhd->wakelock_ctrl_timeout_enable = 0;
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
if (wake_lock_active(&dhd->wl_ctrlwake))
wake_unlock(&dhd->wl_ctrlwake);
#endif // endif
@@ -16816,7 +16151,7 @@
struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
-int trace_wklock_onoff = 1;
+atomic_t trace_wklock_onoff;
typedef enum dhd_wklock_type {
DHD_WAKE_LOCK,
DHD_WAKE_UNLOCK,
@@ -16941,6 +16276,7 @@
INIT_HLIST_HEAD(&wklock_history[i]);
#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ atomic_set(&trace_wklock_onoff, 1);
}
static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
@@ -16997,14 +16333,14 @@
if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
wake_lock(&dhd->wl_wifi);
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+#elif defined(BCMSDIO)
dhd_bus_dev_pm_stay_awake(pub);
#endif // endif
}
#ifdef DHD_TRACE_WAKE_LOCK
- if (trace_wklock_onoff) {
+ if (atomic_read(&trace_wklock_onoff)) {
STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
}
#endif /* DHD_TRACE_WAKE_LOCK */
@@ -17021,9 +16357,9 @@
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd) {
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
wake_lock(&dhd->wl_evtwake);
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+#elif defined(BCMSDIO)
dhd_bus_dev_pm_stay_awake(pub);
#endif // endif
}
@@ -17032,25 +16368,25 @@
void
dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
{
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd) {
wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
}
-#endif /* CONFIG_HAS_WAKE_LOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
}
void
dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
{
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd) {
wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
}
-#endif /* CONFIG_HAS_WAKE_LOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
}
int net_os_wake_lock(struct net_device *dev)
@@ -17076,14 +16412,14 @@
if (dhd->wakelock_counter > 0) {
dhd->wakelock_counter--;
#ifdef DHD_TRACE_WAKE_LOCK
- if (trace_wklock_onoff) {
+ if (atomic_read(&trace_wklock_onoff)) {
STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
}
#endif /* DHD_TRACE_WAKE_LOCK */
if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
wake_unlock(&dhd->wl_wifi);
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+#elif defined(BCMSDIO)
dhd_bus_dev_pm_relax(pub);
#endif // endif
}
@@ -17099,9 +16435,9 @@
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd) {
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
wake_unlock(&dhd->wl_evtwake);
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+#elif defined(BCMSDIO)
dhd_bus_dev_pm_relax(pub);
#endif // endif
}
@@ -17109,7 +16445,7 @@
void dhd_pm_wake_unlock(dhd_pub_t *pub)
{
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd) {
@@ -17118,12 +16454,12 @@
wake_unlock(&dhd->wl_pmwake);
}
}
-#endif /* CONFIG_HAS_WAKELOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
}
void dhd_txfl_wake_unlock(dhd_pub_t *pub)
{
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd) {
@@ -17132,26 +16468,25 @@
wake_unlock(&dhd->wl_txflwake);
}
}
-#endif /* CONFIG_HAS_WAKELOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
}
int dhd_os_check_wakelock(dhd_pub_t *pub)
{
-#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
- KERNEL_VERSION(2, 6, 36)))
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
dhd_info_t *dhd;
if (!pub)
return 0;
dhd = (dhd_info_t *)(pub->info);
-#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK || BCMSDIO */
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
/* Indicate to the SD Host to avoid going to suspend if internal locks are up */
if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
(wake_lock_active(&dhd->wl_wdwake))))
return 1;
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+#elif defined(BCMSDIO)
if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
return 1;
#endif // endif
@@ -17161,13 +16496,12 @@
int
dhd_os_check_wakelock_all(dhd_pub_t *pub)
{
-#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
- KERNEL_VERSION(2, 6, 36)))
-#if defined(CONFIG_HAS_WAKELOCK)
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
int l1, l2, l3, l4, l7, l8, l9;
int l5 = 0, l6 = 0;
int c, lock_active;
-#endif /* CONFIG_HAS_WAKELOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
dhd_info_t *dhd;
if (!pub) {
@@ -17177,9 +16511,9 @@
if (!dhd) {
return 0;
}
-#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK || BCMSDIO */
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
c = dhd->wakelock_counter;
l1 = wake_lock_active(&dhd->wl_wifi);
l2 = wake_lock_active(&dhd->wl_wdwake);
@@ -17203,11 +16537,11 @@
__FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9));
return 1;
}
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+#elif defined(BCMSDIO)
if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
return 1;
}
-#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+#endif /* defined(BCMSDIO) */
return 0;
}
@@ -17230,10 +16564,10 @@
if (dhd) {
spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
/* if wakelock_wd_counter was never used : lock it at once */
wake_lock(&dhd->wl_wdwake);
-#endif // endif
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
}
dhd->wakelock_wd_counter++;
ret = dhd->wakelock_wd_counter;
@@ -17253,9 +16587,9 @@
if (dhd->wakelock_wd_counter > 0) {
dhd->wakelock_wd_counter = 0;
if (!dhd->waive_wakelock) {
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
wake_unlock(&dhd->wl_wdwake);
-#endif // endif
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
}
}
spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
@@ -17267,19 +16601,19 @@
void
dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
{
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd) {
wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
}
-#endif /* CONFIG_HAS_WAKELOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
}
void
dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
{
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd) {
@@ -17288,7 +16622,7 @@
wake_unlock(&dhd->wl_intrwake);
}
}
-#endif /* CONFIG_HAS_WAKELOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
}
#endif /* BCMPCIE_OOB_HOST_WAKE */
@@ -17296,19 +16630,19 @@
void
dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
{
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd) {
wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
}
-#endif /* CONFIG_HAS_WAKELOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
}
void
dhd_os_scan_wake_unlock(dhd_pub_t *pub)
{
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd) {
@@ -17317,7 +16651,7 @@
wake_unlock(&dhd->wl_scanwake);
}
}
-#endif /* CONFIG_HAS_WAKELOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
}
#endif /* DHD_USE_SCAN_WAKELOCK */
@@ -17336,7 +16670,7 @@
/* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
if (dhd->waive_wakelock == FALSE) {
#ifdef DHD_TRACE_WAKE_LOCK
- if (trace_wklock_onoff) {
+ if (atomic_read(&trace_wklock_onoff)) {
STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
}
#endif /* DHD_TRACE_WAKE_LOCK */
@@ -17373,21 +16707,21 @@
* the lock in between, do the same by calling wake_unlock or pm_relax
*/
#ifdef DHD_TRACE_WAKE_LOCK
- if (trace_wklock_onoff) {
+ if (atomic_read(&trace_wklock_onoff)) {
STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
}
#endif /* DHD_TRACE_WAKE_LOCK */
if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
wake_lock(&dhd->wl_wifi);
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+#elif defined(BCMSDIO)
dhd_bus_dev_pm_stay_awake(&dhd->pub);
#endif // endif
} else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
wake_unlock(&dhd->wl_wifi);
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+#elif defined(BCMSDIO)
dhd_bus_dev_pm_relax(&dhd->pub);
#endif // endif
}
@@ -17405,7 +16739,7 @@
dhd->wakelock_rx_timeout_enable = 0;
dhd->wakelock_ctrl_timeout_enable = 0;
/* wakelocks prevent a system from going into a low power state */
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
@@ -17418,7 +16752,7 @@
#ifdef DHD_USE_SCAN_WAKELOCK
wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
#endif /* DHD_USE_SCAN_WAKELOCK */
-#endif /* CONFIG_HAS_WAKELOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
#ifdef DHD_TRACE_WAKE_LOCK
dhd_wk_lock_trace_init(dhd);
#endif /* DHD_TRACE_WAKE_LOCK */
@@ -17427,7 +16761,7 @@
void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
{
DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
-#ifdef CONFIG_HAS_WAKELOCK
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
dhd->wakelock_counter = 0;
dhd->wakelock_rx_timeout_enable = 0;
dhd->wakelock_ctrl_timeout_enable = 0;
@@ -17446,7 +16780,7 @@
#ifdef DHD_TRACE_WAKE_LOCK
dhd_wk_lock_trace_deinit(dhd);
#endif /* DHD_TRACE_WAKE_LOCK */
-#endif /* CONFIG_HAS_WAKELOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
}
bool dhd_os_check_if_up(dhd_pub_t *pub)
@@ -17495,9 +16829,6 @@
return -ENODEV;
}
- if (cmd == WLC_SET_PM)
- return ret;
-
DHD_OS_WAKE_LOCK(&dhd->pub);
DHD_PERIM_LOCK(&dhd->pub);
@@ -17602,7 +16933,7 @@
void dhd_wlfc_plat_init(void *dhd)
{
#ifdef USE_DYNAMIC_F2_BLKSIZE
- dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+ dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
#endif /* USE_DYNAMIC_F2_BLKSIZE */
return;
}
@@ -17885,87 +17216,166 @@
return 0;
}
-#ifdef DHD_FW_COREDUMP
-#if defined(CONFIG_X86) && defined(OEM_ANDROID)
-#define MEMDUMPINFO_LIVE "/installmedia/.memdump.info"
-#define MEMDUMPINFO_INST "/data/.memdump.info"
-#endif /* CONFIG_X86 && OEM_ANDROID */
+#ifdef DHD_RND_DEBUG
#ifdef CUSTOMER_HW4_DEBUG
-#define MEMDUMPINFO PLATFORM_PATH".memdump.info"
+#define RNDINFO PLATFORM_PATH".rnd"
#elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
-#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
+#define RNDINFO "/data/misc/wifi/.rnd"
#elif defined(OEM_ANDROID) && (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
-#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
-#elif defined(CONFIG_X86) && defined(OEM_ANDROID) && defined(DHD_FW_COREDUMP)
-#define MEMDUMPINFO MEMDUMPINFO_LIVE
+#define RNDINFO "/data/misc/wifi/.rnd"
+#elif defined(OEM_ANDROID)
+#define RNDINFO_LIVE "/installmedia/.rnd"
+#define RNDINFO_INST "/data/.rnd"
+#define RNDINFO RNDINFO_LIVE
#else /* FC19 and Others */
-#define MEMDUMPINFO "/root/.memdump.info"
+#define RNDINFO "/root/.rnd"
#endif /* CUSTOMER_HW4_DEBUG */
-void dhd_get_memdump_info(dhd_pub_t *dhd)
+#define RND_IN RNDINFO".in"
+#define RND_OUT RNDINFO".out"
+
+int
+dhd_get_rnd_info(dhd_pub_t *dhd)
{
struct file *fp = NULL;
- uint32 mem_val = DUMP_MEMFILE_MAX;
- int ret = 0;
- char *filepath = MEMDUMPINFO;
+ int ret = BCME_ERROR;
+ char *filepath = RND_IN;
+ uint32 file_mode = O_RDONLY;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
/* Read memdump info from the file */
- fp = filp_open(filepath, O_RDONLY, 0);
+ fp = filp_open(filepath, file_mode, 0);
if (IS_ERR(fp)) {
DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
-#if defined(CONFIG_X86) && defined(OEM_ANDROID) && defined(DHD_FW_COREDUMP)
+#if defined(CONFIG_X86) && defined(OEM_ANDROID)
/* Check if it is Live Brix Image */
- if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) {
- goto done;
+ if (bcmstrstr(filepath, RNDINFO_LIVE)) {
+ goto err1;
}
/* Try if it is Installed Brix Image */
- filepath = MEMDUMPINFO_INST;
+ filepath = RNDINFO_INST".in";
DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
- fp = filp_open(filepath, O_RDONLY, 0);
+ fp = filp_open(filepath, file_mode, 0);
if (IS_ERR(fp)) {
DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
- goto done;
+ goto err1;
}
#else /* Non Brix Android platform */
- goto done;
+ goto err1;
#endif /* CONFIG_X86 && OEM_ANDROID */
}
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
/* Handle success case */
- ret = compat_kernel_read(fp, 0, (char *)&mem_val, 4);
+ ret = vfs_read(fp, (char *)&dhd->rnd_len, sizeof(dhd->rnd_len), &pos);
if (ret < 0) {
- DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
- filp_close(fp, NULL);
- goto done;
+ DHD_ERROR(("%s: rnd_len read error, ret=%d\n", __FUNCTION__, ret));
+ goto err2;
}
- mem_val = bcm_atoi((char *)&mem_val);
+ dhd->rnd_buf = MALLOCZ(dhd->osh, dhd->rnd_len);
+ if (!dhd->rnd_buf) {
+ DHD_ERROR(("%s: MALLOC failed\n", __FUNCTION__));
+ goto err2;
+ }
+ ret = vfs_read(fp, (char *)dhd->rnd_buf, dhd->rnd_len, &pos);
+ if (ret < 0) {
+ DHD_ERROR(("%s: rnd_buf read error, ret=%d\n", __FUNCTION__, ret));
+ goto err3;
+ }
+
+ set_fs(old_fs);
filp_close(fp, NULL);
-#ifdef DHD_INIT_DEFAULT_MEMDUMP
- if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX)
- mem_val = DUMP_MEMFILE_BUGON;
-#endif /* DHD_INIT_DEFAULT_MEMDUMP */
+ DHD_ERROR(("%s: RND read from %s\n", __FUNCTION__, filepath));
+ return BCME_OK;
-done:
-#ifdef CUSTOMER_HW4_DEBUG
- dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED;
-#elif defined(OEM_ANDROID)
- dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE_BUGON;
-#else
- dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE;
-#endif /* CUSTOMER_HW4_DEBUG */
-
- DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, dhd->memdump_enabled));
+err3:
+ MFREE(dhd->osh, dhd->rnd_buf, dhd->rnd_len);
+ dhd->rnd_buf = NULL;
+err2:
+ set_fs(old_fs);
+ filp_close(fp, NULL);
+err1:
+ return BCME_ERROR;
}
+int
+dhd_dump_rnd_info(dhd_pub_t *dhd, uint8 *rnd_buf, uint32 rnd_len)
+{
+ struct file *fp = NULL;
+ int ret = BCME_OK;
+ char *filepath = RND_OUT;
+ uint32 file_mode = O_CREAT | O_WRONLY | O_SYNC;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+
+ /* Read memdump info from the file */
+ fp = filp_open(filepath, file_mode, 0664);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+#if defined(CONFIG_X86) && defined(OEM_ANDROID)
+ /* Check if it is Live Brix Image */
+ if (bcmstrstr(filepath, RNDINFO_LIVE)) {
+ goto err1;
+ }
+ /* Try if it is Installed Brix Image */
+ filepath = RNDINFO_INST".out";
+ DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
+ fp = filp_open(filepath, file_mode, 0664);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ goto err1;
+ }
+#else /* Non Brix Android platform */
+ goto err1;
+#endif /* CONFIG_X86 && OEM_ANDROID */
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /* Handle success case */
+ ret = vfs_write(fp, (char *)&rnd_len, sizeof(rnd_len), &pos);
+ if (ret < 0) {
+ DHD_ERROR(("%s: rnd_len write error, ret=%d\n", __FUNCTION__, ret));
+ goto err2;
+ }
+
+ ret = vfs_write(fp, (char *)rnd_buf, rnd_len, &pos);
+ if (ret < 0) {
+ DHD_ERROR(("%s: rnd_buf write error, ret=%d\n", __FUNCTION__, ret));
+ goto err2;
+ }
+
+ set_fs(old_fs);
+ filp_close(fp, NULL);
+ DHD_ERROR(("%s: RND written to %s\n", __FUNCTION__, filepath));
+ return BCME_OK;
+
+err2:
+ set_fs(old_fs);
+ filp_close(fp, NULL);
+err1:
+ return BCME_ERROR;
+
+}
+#endif /* DHD_RND_DEBUG */
+
+#ifdef DHD_FW_COREDUMP
void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
{
unsigned long flags = 0;
dhd_dump_t *dump = NULL;
dhd_info_t *dhd_info = NULL;
+#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ log_dump_type_t type = DLD_BUF_TYPE_ALL;
+#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
dhd_info = (dhd_info_t *)dhdp->info;
dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
@@ -17975,19 +17385,27 @@
}
dump->buf = buf;
dump->bufsize = size;
+#ifdef BCMPCIE
+ dhd_get_hscb_info(dhdp, (void*)(&dump->hscb_buf),
+ (uint32 *)(&dump->hscb_bufsize));
+#else /* BCMPCIE */
+ dump->hscb_bufsize = 0;
+#endif /* BCMPCIE */
-#if defined(CONFIG_ARM64)
- DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__,
- (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
-#elif defined(__ARM_ARCH_7A__)
- DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__,
- (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
-#endif /* __ARM_ARCH_7A__ */
- if (dhdp->memdump_enabled == DUMP_MEMONLY) {
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhdp, "memdump", buf, size);
+#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ /* Print out buffer infomation */
+ dhd_log_dump_buf_addr(dhdp, &type);
+#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+#endif /* DHD_LOG_DUMP */
+
+ if (dhdp->memdump_enabled == DUMP_MEMONLY && (!disable_bug_on)) {
BUG_ON(1);
}
-#if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM)
+#if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM) || \
+ defined(DNGL_AXI_ERROR_LOGGING)
if (
#if defined(DEBUG_DNGL_INIT_FAIL)
(dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) ||
@@ -17995,24 +17413,31 @@
#ifdef DHD_ERPOM
(dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) ||
#endif /* DHD_ERPOM */
+#ifdef DNGL_AXI_ERROR_LOGGING
+ (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) ||
+#endif /* DNGL_AXI_ERROR_LOGGING */
FALSE)
{
+#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
log_dump_type_t *flush_type = NULL;
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
dhd_info->scheduled_memdump = FALSE;
- dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
+ (void)dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
+#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
/* for dongle init fail cases, 'dhd_mem_dump' does
- * not call 'dhd_log_dump', so call it here.
- */
+ * not call 'dhd_log_dump', so call it here.
+ */
flush_type = MALLOCZ(dhdp->osh,
- sizeof(log_dump_type_t));
+ sizeof(log_dump_type_t));
if (flush_type) {
*flush_type = DLD_BUF_TYPE_ALL;
DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
dhd_log_dump(dhdp->info, flush_type, 0);
}
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
return;
}
-#endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM */
+#endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM || DNGL_AXI_ERROR_LOGGING */
dhd_info->scheduled_memdump = TRUE;
/* bus busy bit for mem dump will be cleared in mem dump
@@ -18023,55 +17448,70 @@
DHD_GENERAL_UNLOCK(dhdp, flags);
DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
- DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+ DHD_WQ_WORK_SOC_RAM_DUMP, (void *)dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
}
-static void
+static int
dhd_mem_dump(void *handle, void *event_info, u8 event)
{
dhd_info_t *dhd = handle;
dhd_pub_t *dhdp = NULL;
- dhd_dump_t *dump = event_info;
unsigned long flags = 0;
+ int ret = 0;
+ dhd_dump_t *dump = NULL;
- DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
+ DHD_ERROR(("%s: ENTER, memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
if (!dhd) {
DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return;
+ return -ENODEV;
}
dhdp = &dhd->pub;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return -ENODEV;
+ }
DHD_GENERAL_LOCK(dhdp, flags);
if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
DHD_GENERAL_UNLOCK(dhdp, flags);
DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
+ ret = -ENODEV;
goto exit;
}
DHD_GENERAL_UNLOCK(dhdp, flags);
-#ifdef D2H_MINIDUMP
- /* dump minidump */
- if (dhd_bus_is_minidump_enabled(dhdp)) {
- dhd_d2h_minidump(&dhd->pub);
- } else {
- DHD_ERROR(("minidump is not enabled\n"));
+#ifdef DHD_SSSR_DUMP
+ if (dhdp->sssr_inited && dhdp->collect_sssr) {
+ dhdpcie_sssr_dump(dhdp);
}
-#endif /* D2H_MINIDUMP */
+ dhdp->collect_sssr = FALSE;
+#endif /* DHD_SSSR_DUMP */
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
+ dhd_wait_for_file_dump(dhdp);
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
+ dump = (dhd_dump_t *)event_info;
if (!dump) {
DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
+ ret = -EINVAL;
goto exit;
}
-#ifdef DHD_DEBUG
+ /*
+ * If kernel does not have file write access enabled
+ * then skip writing dumps to files.
+ * The dumps will be pushed to HAL layer which will
+ * write into files
+ */
+#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
+
if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
#ifdef DHD_DEBUG_UART
dhd->pub.memdump_success = FALSE;
#endif /* DHD_DEBUG_UART */
}
-#endif /* DHD_DEBUG */
/* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
* context, no need to schedule another work queue for log dump. In case of
@@ -18102,22 +17542,37 @@
* we cannot wait in case dhd_mem_dump is called directly
* as it may not be in a sleepable context
*/
- if (dhd->scheduled_memdump) {
+ if (dhd->scheduled_memdump) {
uint bitmask = 0;
int timeleft = 0;
#ifdef DHD_SSSR_DUMP
bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP;
#endif // endif
if (bitmask != 0) {
+ DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
&dhdp->dhd_bus_busy_state, bitmask, 0);
if ((timeleft == 0) || (timeleft == 1)) {
- DHD_ERROR(("%s:Timed out on sssr dump,dhd_bus_busy_state=0x%x\n",
+ DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
__FUNCTION__, dhdp->dhd_bus_busy_state));
}
}
}
+ if (dump->hscb_buf && dump->hscb_bufsize) {
+ DHD_ERROR(("%s: write HSCB dump... \n", __FUNCTION__));
+ if (write_dump_to_file(&dhd->pub, dump->hscb_buf,
+ dump->hscb_bufsize, "mem_dump_hscb")) {
+ DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__));
+#ifdef DHD_DEBUG_UART
+ dhd->pub.memdump_success = FALSE;
+#endif /* DHD_DEBUG_UART */
+ }
+ }
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+
+ DHD_ERROR(("%s: memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
#ifdef DHD_LOG_DUMP
dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
@@ -18132,55 +17587,118 @@
dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
#ifdef SHOW_LOGTRACE
- /* Wait till event_log_dispatcher_work finishes */
- cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
+ /* Wait till logtrace context is flushed */
+ dhd_flush_logtrace_process(dhd);
#endif /* SHOW_LOGTRACE */
- BUG_ON(1);
+ DHD_ERROR(("%s: call BUG_ON \n", __FUNCTION__));
+ if (!disable_bug_on) {
+ BUG_ON(1);
+ }
}
+ DHD_ERROR(("%s: No BUG ON, memdump type %u \n", __FUNCTION__, dhd->pub.memdump_type));
exit:
- if (dump)
+ if (dump) {
MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
+ }
DHD_GENERAL_LOCK(dhdp, flags);
DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub);
dhd_os_busbusy_wake(dhdp);
DHD_GENERAL_UNLOCK(dhdp, flags);
dhd->scheduled_memdump = FALSE;
+#ifdef OEM_ANDROID
+ if (dhdp->hang_was_pending) {
+ DHD_ERROR(("%s: Send pending HANG event...\n", __FUNCTION__));
+ dhd_os_send_hang_message(dhdp);
+ dhdp->hang_was_pending = 0;
+ }
+#endif /* OEM_ANDROID */
+ DHD_ERROR(("%s: EXIT \n", __FUNCTION__));
+ return ret;
}
#endif /* DHD_FW_COREDUMP */
-#ifdef D2H_MINIDUMP
-void
-dhd_d2h_minidump(dhd_pub_t *dhdp)
-{
- char d2h_minidump[128];
- dhd_dma_buf_t *minidump_buf;
-
- minidump_buf = dhd_prot_get_minidump_buf(dhdp);
- if (minidump_buf->va == NULL) {
- DHD_ERROR(("%s: minidump_buf is NULL\n", __FUNCTION__));
- return;
- }
-
- /* Init file name */
- memset(d2h_minidump, 0, sizeof(d2h_minidump));
- snprintf(d2h_minidump, sizeof(d2h_minidump), "%s", "d2h_minidump");
-
- if (write_dump_to_file(dhdp, (uint8 *)minidump_buf->va,
- BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN, d2h_minidump)) {
- DHD_ERROR(("%s: failed to dump d2h_minidump to file\n",
- __FUNCTION__));
- }
-}
-#endif /* D2H_MINIDUMP */
-
#ifdef DHD_SSSR_DUMP
+int
+dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len)
+{
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+ int pos = 0, ret = BCME_ERROR;
+ uint dig_buf_size = 0;
+
+ if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
+ } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
+ dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
+ }
+
+ if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_before,
+ NULL, user_buf, dig_buf_size, &pos);
+ }
+ return ret;
+}
+
+int
+dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len)
+{
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+ int pos = 0, ret = BCME_ERROR;
+ uint dig_buf_size = 0;
+
+ if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
+ } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
+ dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
+ }
+
+ if (dhdp->sssr_dig_buf_after) {
+ ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_after,
+ NULL, user_buf, dig_buf_size, &pos);
+ }
+ return ret;
+}
+
+int
+dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core)
+{
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+ int pos = 0, ret = BCME_ERROR;
+
+ if (dhdp->sssr_d11_before[core] &&
+ dhdp->sssr_d11_outofreset[core] &&
+ (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ ret = dhd_export_debug_data((char *)dhdp->sssr_d11_before[core],
+ NULL, user_buf, len, &pos);
+ }
+ return ret;
+}
+
+int
+dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core)
+{
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+ int pos = 0, ret = BCME_ERROR;
+
+ if (dhdp->sssr_d11_after[core] &&
+ dhdp->sssr_d11_outofreset[core]) {
+ ret = dhd_export_debug_data((char *)dhdp->sssr_d11_after[core],
+ NULL, user_buf, len, &pos);
+ }
+ return ret;
+}
static void
-dhd_sssr_dump(void *handle, void *event_info, u8 event)
+dhd_sssr_dump_to_file(dhd_info_t* dhdinfo)
{
- dhd_info_t *dhd = handle;
+ dhd_info_t *dhd = dhdinfo;
dhd_pub_t *dhdp;
int i;
char before_sr_dump[128];
@@ -18198,6 +17716,7 @@
dhdp = &dhd->pub;
DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp);
if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
DHD_GENERAL_UNLOCK(dhdp, flags);
DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__));
@@ -18211,11 +17730,12 @@
memset(after_sr_dump, 0, sizeof(after_sr_dump));
snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
- "sssr_core", i, "before_SR");
+ "sssr_dump_core", i, "before_SR");
snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
- "sssr_core", i, "after_SR");
+ "sssr_dump_core", i, "after_SR");
- if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i]) {
+ if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] &&
+ (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
@@ -18238,9 +17758,9 @@
dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
}
- if (dhdp->sssr_dig_buf_before) {
+ if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before,
- dig_buf_size, "sssr_dig_before_SR")) {
+ dig_buf_size, "sssr_dump_dig_before_SR")) {
DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
__FUNCTION__));
}
@@ -18248,7 +17768,7 @@
if (dhdp->sssr_dig_buf_after) {
if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after,
- dig_buf_size, "sssr_dig_after_SR")) {
+ dig_buf_size, "sssr_dump_dig_after_SR")) {
DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
__FUNCTION__));
}
@@ -18262,25 +17782,31 @@
}
void
-dhd_schedule_sssr_dump(dhd_pub_t *dhdp)
+dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode)
{
- unsigned long flags = 0;
+ dhdp->sssr_dump_mode = dump_mode;
- /* bus busy bit for sssr dump will be cleared in sssr dump
- * work item context, after sssr dump files are created
- */
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
+ /*
+ * If kernel does not have file write access enabled
+ * then skip writing dumps to files.
+ * The dumps will be pushed to HAL layer which will
+ * write into files
+ */
+#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ return;
+#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
- if (dhdp->info->no_wq_sssrdump) {
- dhd_sssr_dump(dhdp->info, 0, 0);
- return;
- }
+ /*
+ * dhd_mem_dump -> dhd_sssr_dump -> dhd_write_sssr_dump
+ * Without workqueue -
+ * DUMP_TYPE_DONGLE_INIT_FAILURE/DUMP_TYPE_DUE_TO_BT/DUMP_TYPE_SMMU_FAULT
+ * : These are called in own handler, not in the interrupt context
+ * With workqueue - all other DUMP_TYPEs : dhd_mem_dump is called in workqueue
+ * Thus, it doesn't neeed to dump SSSR in workqueue
+ */
+ DHD_ERROR(("%s: writing sssr dump to file... \n", __FUNCTION__));
+ dhd_sssr_dump_to_file(dhdp->info);
- DHD_ERROR(("%s: scheduling sssr dump.. \n", __FUNCTION__));
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
- DHD_WQ_WORK_SSSR_DUMP, dhd_sssr_dump, DHD_WQ_WORK_PRIORITY_HIGH);
}
#endif /* DHD_SSSR_DUMP */
@@ -18330,15 +17856,19 @@
}
static void
-dhd_print_buf_addr(char *name, void *buf, unsigned int size)
+dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size)
{
+ if ((dhdp->memdump_enabled == DUMP_MEMONLY) ||
+ (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) ||
+ (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT)) {
#if defined(CONFIG_ARM64)
- DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
- name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
+ DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
+ name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
#elif defined(__ARM_ARCH_7A__)
- DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
- name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
+ DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
+ name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
#endif /* __ARM_ARCH_7A__ */
+ }
}
static void
@@ -18364,31 +17894,51 @@
(unsigned long)dld_buf->front;
}
scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i);
- dhd_print_buf_addr(buf_name, dld_buf, dld_buf_size[i]);
+ dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]);
scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i);
- dhd_print_buf_addr(buf_name, dld_buf->buffer, wr_size);
+ dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size);
scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i);
- dhd_print_buf_addr(buf_name, dld_buf->present, wr_size);
+ dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size);
scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i);
- dhd_print_buf_addr(buf_name, dld_buf->front, wr_size);
+ dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size);
}
-#ifdef DEBUGABILITY_ECNTRS_LOGGING
+#ifdef EWP_ECNTRS_LOGGING
/* periodic flushing of ecounters is NOT supported */
if (*type == DLD_BUF_TYPE_ALL &&
logdump_ecntr_enable &&
dhdp->ecntr_dbg_ring) {
ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
- dhd_print_buf_addr("ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE);
- dhd_print_buf_addr("ecntr_dbg_ring ring_buf", ring->ring_buf,
+ dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE);
+ dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf,
LOG_DUMP_ECNTRS_MAX_BUFSIZE);
}
-#endif /* DEBUGABILITY_ECNTRS_LOGGING */
+#endif /* EWP_ECNTRS_LOGGING */
-#if defined(BCMPCIE)
+#ifdef DHD_STATUS_LOGGING
+ if (dhdp->statlog) {
+ dhd_print_buf_addr(dhdp, "statlog_logbuf", dhd_statlog_get_logbuf(dhdp),
+ dhd_statlog_get_logbuf_len(dhdp));
+ }
+#endif /* DHD_STATUS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ /* periodic flushing of ecounters is NOT supported */
+ if (*type == DLD_BUF_TYPE_ALL &&
+ logdump_rtt_enable &&
+ dhdp->rtt_dbg_ring) {
+
+ ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
+ dhd_print_buf_addr(dhdp, "rtt_dbg_ring", ring, LOG_DUMP_RTT_MAX_BUFSIZE);
+ dhd_print_buf_addr(dhdp, "rtt_dbg_ring ring_buf", ring->ring_buf,
+ LOG_DUMP_RTT_MAX_BUFSIZE);
+ }
+#endif /* EWP_RTT_LOGGING */
+
+#ifdef BCMPCIE
if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) {
- dhd_print_buf_addr("extended_trap_data", dhdp->extended_trap_data,
+ dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data,
BCMPCIE_EXT_TRAP_DATA_MAXLEN);
}
#endif /* BCMPCIE */
@@ -18396,14 +17946,15 @@
#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
/* if health check event was received */
if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
- dhd_print_buf_addr("health_chk_event_data", dhdp->health_chk_event_data,
+ dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data,
HEALTH_CHK_BUF_SIZE);
}
#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
/* append the concise debug information */
if (dhdp->concise_dbg_buf) {
- dhd_print_buf_addr("concise_dbg_buf", dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf,
+ CONCISE_DUMP_BUFLEN);
}
}
@@ -18476,30 +18027,367 @@
}
#endif /* CUSTOMER_HW4_DEBUG */
-/* Must hold 'dhd_os_logdump_lock' before calling this function ! */
-static int
-do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type)
+#ifdef DHD_SSSR_DUMP
+int
+dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len)
{
- int ret = 0, i = 0;
- struct file *fp = NULL;
- mm_segment_t old_fs;
- loff_t pos = 0;
- unsigned int wr_size = 0;
- char dump_path[128];
- uint32 file_mode;
- unsigned long flags = 0;
- struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
- size_t log_size = 0;
- size_t fspace_remain = 0;
- struct kstat stat;
- char time_str[128];
- char *ts = NULL;
- uint32 remain_len = 0;
+ int i = 0;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ /* core 0 */
+ i = 0;
+ if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
+ (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ arr_len[SSSR_C0_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
+ DHD_ERROR(("%s: arr_len[SSSR_C0_D11_BEFORE] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C0_D11_BEFORE]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C0_D11_BEFORE",
+ dhd->sssr_d11_before[i], arr_len[SSSR_C0_D11_BEFORE]);
+#endif /* DHD_LOG_DUMP */
+ }
+ if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
+ arr_len[SSSR_C0_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
+ DHD_ERROR(("%s: arr_len[SSSR_C0_D11_AFTER] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C0_D11_AFTER]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C0_D11_AFTER",
+ dhd->sssr_d11_after[i], arr_len[SSSR_C0_D11_AFTER]);
+#endif /* DHD_LOG_DUMP */
+ }
+
+ /* core 1 */
+ i = 1;
+ if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
+ (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ arr_len[SSSR_C1_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
+ DHD_ERROR(("%s: arr_len[SSSR_C1_D11_BEFORE] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C1_D11_BEFORE]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C1_D11_BEFORE",
+ dhd->sssr_d11_before[i], arr_len[SSSR_C1_D11_BEFORE]);
+#endif /* DHD_LOG_DUMP */
+ }
+ if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
+ arr_len[SSSR_C1_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
+ DHD_ERROR(("%s: arr_len[SSSR_C1_D11_AFTER] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C1_D11_AFTER]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C1_D11_AFTER",
+ dhd->sssr_d11_after[i], arr_len[SSSR_C1_D11_AFTER]);
+#endif /* DHD_LOG_DUMP */
+ }
+
+ if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
+ arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
+ arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
+ DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
+ arr_len[SSSR_DIG_BEFORE]));
+ DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
+ arr_len[SSSR_DIG_AFTER]));
+#ifdef DHD_LOG_DUMP
+ if (dhd->sssr_dig_buf_before) {
+ dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
+ dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
+ }
+ if (dhd->sssr_dig_buf_after) {
+ dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
+ dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
+ }
+#endif /* DHD_LOG_DUMP */
+ } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
+ dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
+ arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
+ arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
+ DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
+ arr_len[SSSR_DIG_BEFORE]));
+ DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
+ arr_len[SSSR_DIG_AFTER]));
+#ifdef DHD_LOG_DUMP
+ if (dhd->sssr_dig_buf_before) {
+ dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
+ dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
+ }
+ if (dhd->sssr_dig_buf_after) {
+ dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
+ dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
+ }
+#endif /* DHD_LOG_DUMP */
+ }
+ return BCME_OK;
+}
+
+void
+dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len)
+{
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+
+ if (dhdp->sssr_dump_collected) {
+ dhdpcie_sssr_dump_get_before_after_len(dhdp, arr_len);
+ }
+}
+#endif /* DHD_SSSR_DUMP */
+
+uint32
+dhd_get_time_str_len()
+{
+ char *ts = NULL, time_str[128];
+
+ ts = dhd_log_dump_get_timestamp();
+ snprintf(time_str, sizeof(time_str),
+ "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
+ return strlen(time_str);
+}
+
+#ifdef BCMPCIE
+uint32
+dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp)
+{
+ int length = 0;
log_dump_section_hdr_t sec_hdr;
- dhd_info_t *dhd_info = NULL;
+ dhd_info_t *dhd_info;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (dhdp->extended_trap_data) {
+ length = (strlen(EXT_TRAP_LOG_HDR)
+ + sizeof(sec_hdr) + BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ }
+ return length;
+}
+#endif /* BCMPCIE */
+
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+uint32
+dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp)
+{
+ int length = 0;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
+ length = (strlen(HEALTH_CHK_LOG_HDR)
+ + sizeof(sec_hdr) + HEALTH_CHK_BUF_SIZE);
+ }
+ return length;
+}
+#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
+
+uint32
+dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp)
+{
+ int length = 0;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+ uint32 remain_len = 0;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (dhdp->concise_dbg_buf) {
+ remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ if (remain_len <= 0) {
+ DHD_ERROR(("%s: error getting concise debug info !\n",
+ __FUNCTION__));
+ return length;
+ }
+ length = (strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr) +
+ (CONCISE_DUMP_BUFLEN - remain_len));
+ }
+ return length;
+}
+
+uint32
+dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp)
+{
+ int length = 0;
+ dhd_info_t *dhd_info;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
+ length = dhd_log_dump_cookie_len(dhdp);
+ }
+ return length;
+
+}
+
+#ifdef DHD_DUMP_PCIE_RINGS
+uint32
+dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp)
+{
+ int length = 0;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+ uint16 h2d_flowrings_total;
+ uint32 remain_len = 0;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (dhdp->concise_dbg_buf) {
+ remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ if (remain_len <= 0) {
+ DHD_ERROR(("%s: error getting concise debug info !\n",
+ __FUNCTION__));
+ return length;
+ }
+ }
+
+ length += strlen(FLOWRING_DUMP_HDR);
+ length += CONCISE_DUMP_BUFLEN - remain_len;
+ length += sizeof(sec_hdr);
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhdp);
+ length += ((H2DRING_TXPOST_ITEMSIZE
+ * H2DRING_TXPOST_MAX_ITEM * h2d_flowrings_total)
+ + (D2HRING_TXCMPLT_ITEMSIZE * D2HRING_TXCMPLT_MAX_ITEM)
+ + (H2DRING_RXPOST_ITEMSIZE * H2DRING_RXPOST_MAX_ITEM)
+ + (D2HRING_RXCMPLT_ITEMSIZE * D2HRING_RXCMPLT_MAX_ITEM)
+ + (H2DRING_CTRL_SUB_ITEMSIZE * H2DRING_CTRL_SUB_MAX_ITEM)
+ + (D2HRING_CTRL_CMPLT_ITEMSIZE * D2HRING_CTRL_CMPLT_MAX_ITEM)
+#ifdef EWP_EDL
+ + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
+#else
+ + (H2DRING_INFO_BUFPOST_ITEMSIZE * H2DRING_DYNAMIC_INFO_MAX_ITEM)
+ + (D2HRING_INFO_BUFCMPLT_ITEMSIZE * D2HRING_DYNAMIC_INFO_MAX_ITEM));
+#endif /* EWP_EDL */
+ return length;
+}
+#endif /* DHD_DUMP_PCIE_RINGS */
+
+#ifdef EWP_ECNTRS_LOGGING
+uint32
+dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd_info;
+ log_dump_section_hdr_t sec_hdr;
+ int length = 0;
+ dhd_dbg_ring_t *ring;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (logdump_ecntr_enable && dhdp->ecntr_dbg_ring) {
+ ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
+ length = ring->ring_size + strlen(ECNTRS_LOG_HDR) + sizeof(sec_hdr);
+ }
+ return length;
+}
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+uint32
+dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd_info;
+ log_dump_section_hdr_t sec_hdr;
+ int length = 0;
+ dhd_dbg_ring_t *ring;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
+ ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
+ length = ring->ring_size + strlen(RTT_LOG_HDR) + sizeof(sec_hdr);
+ }
+ return length;
+}
+#endif /* EWP_RTT_LOGGING */
+
+int
+dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, int type, void *pos)
+{
+ int ret = BCME_OK;
+ struct dhd_log_dump_buf *dld_buf;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+
+ dld_buf = &g_dld_buf[type];
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ } else if (!dhdp) {
+ return BCME_ERROR;
+ }
DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
+ dhd_init_sec_hdr(&sec_hdr);
+
+ /* write the section header first */
+ ret = dhd_export_debug_data(dld_hdrs[type].hdr_str, fp, user_buf,
+ strlen(dld_hdrs[type].hdr_str), pos);
+ if (ret < 0)
+ goto exit;
+ len -= (uint32)strlen(dld_hdrs[type].hdr_str);
+ len -= (uint32)sizeof(sec_hdr);
+ sec_hdr.type = dld_hdrs[type].sec_type;
+ sec_hdr.length = len;
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+ ret = dhd_export_debug_data(dld_buf->buffer, fp, user_buf, len, pos);
+ if (ret < 0)
+ goto exit;
+
+exit:
+ return ret;
+}
+
+static int
+dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type)
+{
+ unsigned long flags = 0;
+#ifdef EWP_EDL
+ int i = 0;
+#endif /* EWP_EDL */
+ dhd_info_t *dhd_info = NULL;
+
/* if dhdp is null, its extremely unlikely that log dump will be scheduled
* so not freeing 'type' here is ok, even if we want to free 'type'
* we cannot do so, since 'dhdp->osh' is unavailable
@@ -18515,21 +18403,9 @@
return BCME_ERROR;
}
- DHD_GENERAL_LOCK(dhdp, flags);
- if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
- DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
- dhd_os_busbusy_wake(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
- MFREE(dhdp->osh, type, sizeof(*type));
- DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
- return BCME_ERROR;
- }
- DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
-
dhd_info = (dhd_info_t *)dhdp->info;
/* in case of trap get preserve logs from ETD */
-#if defined(BCMPCIE) && defined(DEBUGABILITY_ETD_PRSRV_LOGS)
+#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
if (dhdp->dongle_trap_occured &&
dhdp->extended_trap_data) {
dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data,
@@ -18540,7 +18416,27 @@
/* flush the event work items to get any fw events/logs
* flush_work is a blocking call
*/
- flush_delayed_work(&dhd_info->event_log_dispatcher_work);
+#ifdef EWP_EDL
+ if (dhd_info->pub.dongle_edl_support) {
+ /* wait till existing edl items are processed */
+ dhd_flush_logtrace_process(dhd_info);
+ /* dhd_flush_logtrace_process will ensure the work items in the ring
+ * (EDL ring) from rd to wr are processed. But if wr had
+ * wrapped around, only the work items from rd to ring-end are processed.
+ * So to ensure that the work items at the
+ * beginning of ring are also processed in the wrap around case, call
+ * it twice
+ */
+ for (i = 0; i < 2; i++) {
+ /* blocks till the edl items are processed */
+ dhd_flush_logtrace_process(dhd_info);
+ }
+ } else {
+ dhd_flush_logtrace_process(dhd_info);
+ }
+#else
+ dhd_flush_logtrace_process(dhd_info);
+#endif /* EWP_EDL */
#ifdef CUSTOMER_HW4_DEBUG
/* print last 'x' KB of preserve buffer data to kmsg console
@@ -18550,46 +18446,462 @@
dhd_log_dump_print_tail(dhdp,
&g_dld_buf[DLD_BUF_TYPE_PRESERVE], logdump_prsrv_tailsize);
#endif /* CUSTOMER_HW4_DEBUG */
+ return BCME_OK;
+}
- /* change to KERNEL_DS address limit */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
+int
+dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp, char *dump_path, int size)
+{
+ dhd_info_t *dhd_info;
- /* Init file name */
- memset(dump_path, 0, sizeof(dump_path));
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ memset(dump_path, 0, size);
+
switch (dhdp->debug_dump_subcmd) {
case CMD_UNWANTED:
- snprintf(dump_path, sizeof(dump_path), "%s",
+ snprintf(dump_path, size, "%s",
DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
DHD_DUMP_SUBSTR_UNWANTED);
break;
case CMD_DISCONNECTED:
- snprintf(dump_path, sizeof(dump_path), "%s",
+ snprintf(dump_path, size, "%s",
DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
DHD_DUMP_SUBSTR_DISCONNECTED);
break;
default:
- snprintf(dump_path, sizeof(dump_path), "%s",
+ snprintf(dump_path, size, "%s",
DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE);
}
if (!dhdp->logdump_periodic_flush) {
get_debug_dump_time(dhdp->debug_dump_time_str);
- snprintf(dump_path, sizeof(dump_path), "%s_" "%s",
- dump_path, dhdp->debug_dump_time_str);
+ snprintf(dump_path + strlen(dump_path),
+ size - strlen(dump_path),
+ "_%s", dhdp->debug_dump_time_str);
}
+ return BCME_OK;
+}
- memset(time_str, 0, sizeof(time_str));
+uint32
+dhd_get_dld_len(int log_type)
+{
+ unsigned long wr_size = 0;
+ unsigned long buf_size = 0;
+ unsigned long flags = 0;
+ struct dhd_log_dump_buf *dld_buf;
+ log_dump_section_hdr_t sec_hdr;
+
+ /* calculate the length of the log */
+ dld_buf = &g_dld_buf[log_type];
+ buf_size = (unsigned long)dld_buf->max -
+ (unsigned long)dld_buf->buffer;
+
+ if (dld_buf->wraparound) {
+ wr_size = buf_size;
+ } else {
+ /* need to hold the lock before accessing 'present' and 'remain' ptrs */
+ spin_lock_irqsave(&dld_buf->lock, flags);
+ wr_size = (unsigned long)dld_buf->present -
+ (unsigned long)dld_buf->front;
+ spin_unlock_irqrestore(&dld_buf->lock, flags);
+ }
+ return (wr_size + sizeof(sec_hdr) + strlen(dld_hdrs[log_type].hdr_str));
+}
+
+static void
+dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size)
+{
+ char *ts = NULL;
+ memset(time_str, 0, size);
+ ts = dhd_log_dump_get_timestamp();
+ snprintf(time_str, size,
+ "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
+}
+
+int
+dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos)
+{
+ char *ts = NULL;
+ int ret = 0;
+ char time_str[128];
+
+ memset_s(time_str, sizeof(time_str), 0, sizeof(time_str));
ts = dhd_log_dump_get_timestamp();
snprintf(time_str, sizeof(time_str),
"\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
+ /* write the timestamp hdr to the file first */
+ ret = dhd_export_debug_data(time_str, fp, user_buf, strlen(time_str), pos);
+ if (ret < 0) {
+ DHD_ERROR(("write file error, err = %d\n", ret));
+ }
+ return ret;
+}
+
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+int
+dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ int ret = BCME_OK;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ dhd_init_sec_hdr(&sec_hdr);
+
+ if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
+ /* write the section header first */
+ ret = dhd_export_debug_data(HEALTH_CHK_LOG_HDR, fp, user_buf,
+ strlen(HEALTH_CHK_LOG_HDR), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)strlen(HEALTH_CHK_LOG_HDR);
+ sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK;
+ sec_hdr.length = HEALTH_CHK_BUF_SIZE;
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)sizeof(sec_hdr);
+ /* write the log */
+ ret = dhd_export_debug_data((char *)dhdp->health_chk_event_data, fp,
+ user_buf, len, pos);
+ if (ret < 0)
+ goto exit;
+ }
+exit:
+ return ret;
+}
+#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
+
+#ifdef BCMPCIE
+int
+dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ int ret = BCME_OK;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ dhd_init_sec_hdr(&sec_hdr);
+
+ /* append extended trap data to the file in case of traps */
+ if (dhdp->dongle_trap_occured &&
+ dhdp->extended_trap_data) {
+ /* write the section header first */
+ ret = dhd_export_debug_data(EXT_TRAP_LOG_HDR, fp, user_buf,
+ strlen(EXT_TRAP_LOG_HDR), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)strlen(EXT_TRAP_LOG_HDR);
+ sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP;
+ sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
+ ret = dhd_export_debug_data((uint8 *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)sizeof(sec_hdr);
+ /* write the log */
+ ret = dhd_export_debug_data((uint8 *)dhdp->extended_trap_data, fp,
+ user_buf, len, pos);
+ if (ret < 0)
+ goto exit;
+ }
+exit:
+ return ret;
+}
+#endif /* BCMPCIE */
+
+int
+dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ int ret = BCME_OK;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ dhd_init_sec_hdr(&sec_hdr);
+
+ ret = dhd_export_debug_data(DHD_DUMP_LOG_HDR, fp, user_buf, strlen(DHD_DUMP_LOG_HDR), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)strlen(DHD_DUMP_LOG_HDR);
+ sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP;
+ sec_hdr.length = len;
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)sizeof(sec_hdr);
+
+ if (dhdp->concise_dbg_buf) {
+ dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, len, pos);
+ if (ret < 0)
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+int
+dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ int ret = BCME_OK;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
+ ret = dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, (unsigned long *)pos);
+ }
+ return ret;
+}
+
+#ifdef DHD_DUMP_PCIE_RINGS
+int
+dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ log_dump_section_hdr_t sec_hdr;
+ int ret = BCME_OK;
+ uint32 remain_len = 0;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ dhd_init_sec_hdr(&sec_hdr);
+
+ remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ memset(dhdp->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
+
+ /* write the section header first */
+ ret = dhd_export_debug_data(FLOWRING_DUMP_HDR, fp, user_buf,
+ strlen(FLOWRING_DUMP_HDR), pos);
+ if (ret < 0)
+ goto exit;
+
+ /* Write the ring summary */
+ ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf,
+ (CONCISE_DUMP_BUFLEN - remain_len), pos);
+ if (ret < 0)
+ goto exit;
+
+ sec_hdr.type = LOG_DUMP_SECTION_FLOWRING;
+ sec_hdr.length = len;
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+
+ /* write the log */
+ ret = dhd_d2h_h2d_ring_dump(dhdp, fp, user_buf, (unsigned long *)pos, TRUE);
+ if (ret < 0)
+ goto exit;
+
+exit:
+ return ret;
+}
+#endif /* DHD_DUMP_PCIE_RINGS */
+
+#ifdef EWP_ECNTRS_LOGGING
+int
+dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ log_dump_section_hdr_t sec_hdr;
+ int ret = BCME_OK;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ dhd_init_sec_hdr(&sec_hdr);
+
+ if (logdump_ecntr_enable &&
+ dhdp->ecntr_dbg_ring) {
+ sec_hdr.type = LOG_DUMP_SECTION_ECNTRS;
+ ret = dhd_dump_debug_ring(dhdp, dhdp->ecntr_dbg_ring,
+ user_buf, &sec_hdr, ECNTRS_LOG_HDR, len, LOG_DUMP_SECTION_ECNTRS);
+ }
+ return ret;
+
+}
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+int
+dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ log_dump_section_hdr_t sec_hdr;
+ int ret = BCME_OK;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ dhd_init_sec_hdr(&sec_hdr);
+
+ if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
+ ret = dhd_dump_debug_ring(dhdp, dhdp->rtt_dbg_ring,
+ user_buf, &sec_hdr, RTT_LOG_HDR, len, LOG_DUMP_SECTION_RTT);
+ }
+ return ret;
+
+}
+#endif /* EWP_RTT_LOGGING */
+
+#ifdef DHD_STATUS_LOGGING
+int
+dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp) {
+ return BCME_ERROR;
+ }
+
+ return dhd_statlog_write_logdump(dhdp, user_buf, fp, len, pos);
+}
+
+uint32
+dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd_info;
+ uint32 length = 0;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (dhdp) {
+ length = dhd_statlog_get_logbuf_len(dhdp);
+ }
+
+ return length;
+}
+#endif /* DHD_STATUS_LOGGING */
+
+void
+dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr)
+{
+ /* prep the section header */
+ memset(sec_hdr, 0, sizeof(*sec_hdr));
+ sec_hdr->magic = LOG_DUMP_MAGIC;
+ sec_hdr->timestamp = local_clock();
+}
+
+/* Must hold 'dhd_os_logdump_lock' before calling this function ! */
+static int
+do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type)
+{
+ int ret = 0, i = 0;
+ struct file *fp = NULL;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+ char dump_path[128];
+ uint32 file_mode;
+ unsigned long flags = 0;
+ size_t log_size = 0;
+ size_t fspace_remain = 0;
+ struct kstat stat;
+ char time_str[128];
+ unsigned int len = 0;
+ log_dump_section_hdr_t sec_hdr;
+
+ DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
+
+ DHD_GENERAL_LOCK(dhdp, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
+ goto exit1;
+ }
+ DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+ if ((ret = dhd_log_flush(dhdp, type)) < 0) {
+ goto exit1;
+ }
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ dhd_get_debug_dump_file_name(NULL, dhdp, dump_path, sizeof(dump_path));
+
+ DHD_ERROR(("debug_dump_path = %s\n", dump_path));
DHD_ERROR(("DHD version: %s\n", dhd_version));
DHD_ERROR(("F/W version: %s\n", fw_version));
- DHD_ERROR(("debug_dump_path = %s\n", dump_path));
dhd_log_dump_buf_addr(dhdp, type);
+ dhd_get_time_str(dhdp, time_str, 128);
+
/* if this is the first time after dhd is loaded,
* or, if periodic flush is disabled, clear the log file
*/
@@ -18614,20 +18926,20 @@
if (IS_ERR(fp)) {
ret = PTR_ERR(fp);
DHD_ERROR(("open file error, err = %d\n", ret));
- goto exit;
+ goto exit2;
}
DHD_ERROR(("debug_dump_path = %s\n", dump_path));
#else
ret = PTR_ERR(fp);
DHD_ERROR(("open file error, err = %d\n", ret));
- goto exit;
+ goto exit2;
#endif /* CONFIG_X86 && OEM_ANDROID */
}
ret = vfs_stat(dump_path, &stat);
if (ret < 0) {
DHD_ERROR(("file stat error, err = %d\n", ret));
- goto exit;
+ goto exit2;
}
/* if some one else has changed the file */
@@ -18661,7 +18973,7 @@
ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR);
if (ret < 0) {
DHD_ERROR(("file seek last posn error ! err = %d \n", ret));
- goto exit;
+ goto exit2;
}
pos = fp->f_pos;
@@ -18674,159 +18986,107 @@
pos = fp->f_pos;
}
}
- /* write the timestamp hdr to the file first */
- ret = vfs_write(fp, time_str, strlen(time_str), &pos);
- if (ret < 0) {
- DHD_ERROR(("write file error, err = %d\n", ret));
- goto exit;
- }
- /* prep the section header */
- memset(&sec_hdr, 0, sizeof(sec_hdr));
- sec_hdr.magic = LOG_DUMP_MAGIC;
- sec_hdr.timestamp = local_clock();
+ dhd_print_time_str(0, fp, len, &pos);
for (i = 0; i < DLD_BUFFER_NUM; ++i) {
- unsigned int buf_size = 0;
if (*type != DLD_BUF_TYPE_ALL && i != *type)
continue;
- /* calculate the length of the log */
- dld_buf = &g_dld_buf[i];
- buf_size = (unsigned long)dld_buf->max -
- (unsigned long)dld_buf->buffer;
- if (dld_buf->wraparound) {
- wr_size = buf_size;
- } else {
- /* need to hold the lock before accessing 'present' and 'remain' ptrs */
- spin_lock_irqsave(&dld_buf->lock, flags);
- wr_size = (unsigned long)dld_buf->present -
- (unsigned long)dld_buf->front;
- spin_unlock_irqrestore(&dld_buf->lock, flags);
- }
-
- /* write the section header first */
- sec_hdr.type = dld_hdrs[i].sec_type;
- sec_hdr.length = wr_size;
- vfs_write(fp, dld_hdrs[i].hdr_str, strlen(dld_hdrs[i].hdr_str), &pos);
- vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
- /* write the log */
- ret = vfs_write(fp, dld_buf->buffer, wr_size, &pos);
- if (ret < 0) {
- DHD_ERROR(("write file error, err = %d\n", ret));
- goto exit;
- }
-
- /* re-init dhd_log_dump_buf structure */
- spin_lock_irqsave(&dld_buf->lock, flags);
- dld_buf->wraparound = 0;
- dld_buf->present = dld_buf->front;
- dld_buf->remain = buf_size;
- bzero(dld_buf->buffer, buf_size);
- spin_unlock_irqrestore(&dld_buf->lock, flags);
-
+ len = dhd_get_dld_len(i);
+ dhd_get_dld_log_dump(NULL, dhdp, 0, fp, len, i, &pos);
if (*type != DLD_BUF_TYPE_ALL)
break;
}
-#ifdef DEBUGABILITY_ECNTRS_LOGGING
+#ifdef EWP_ECNTRS_LOGGING
/* periodic flushing of ecounters is NOT supported */
if (*type == DLD_BUF_TYPE_ALL &&
logdump_ecntr_enable &&
dhdp->ecntr_dbg_ring) {
dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring,
- fp, (unsigned long *)&pos, &sec_hdr);
+ fp, (unsigned long *)&pos,
+ &sec_hdr, ECNTRS_LOG_HDR, LOG_DUMP_SECTION_ECNTRS);
}
-#endif /* DEBUGABILITY_ECNTRS_LOGGING */
+#endif /* EWP_ECNTRS_LOGGING */
-#ifdef BCMPCIE
- /* append extended trap data to the file in case of traps */
- if (dhdp->dongle_trap_occured &&
- dhdp->extended_trap_data) {
- /* write the section header first */
- vfs_write(fp, EXT_TRAP_LOG_HDR, strlen(EXT_TRAP_LOG_HDR), &pos);
- sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP;
- sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
- vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
- /* write the log */
- ret = vfs_write(fp, (char *)dhdp->extended_trap_data,
- BCMPCIE_EXT_TRAP_DATA_MAXLEN, &pos);
- if (ret < 0) {
- DHD_ERROR(("write file error of ext trap info,"
- " err = %d\n", ret));
- goto exit;
- }
- }
-#endif /* BCMPCIE */
-
-#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
- /* if health check event was received, dump to file */
- if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
- /* write the section header first */
- vfs_write(fp, HEALTH_CHK_LOG_HDR, strlen(HEALTH_CHK_LOG_HDR), &pos);
- sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK;
- sec_hdr.length = HEALTH_CHK_BUF_SIZE;
- vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
- /* write the log */
- ret = vfs_write(fp, (char *)dhdp->health_chk_event_data,
- HEALTH_CHK_BUF_SIZE, &pos);
- if (ret < 0) {
- DHD_ERROR(("write file error of health chk info,"
- " err = %d\n", ret));
- goto exit;
- }
- }
-#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
-
- /* append the concise debug information to the file.
- * This is the information which is seen
- * when a 'dhd dump' iovar is fired
- */
- if (dhdp->concise_dbg_buf) {
- remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
- if (remain_len <= 0) {
- DHD_ERROR(("%s: error getting concise debug info !\n",
- __FUNCTION__));
- goto exit;
- } else {
- /* write the section header first */
- vfs_write(fp, DHD_DUMP_LOG_HDR, strlen(DHD_DUMP_LOG_HDR), &pos);
- sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP;
- sec_hdr.length = CONCISE_DUMP_BUFLEN - remain_len;
- vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
- /* write the log */
- ret = vfs_write(fp, dhdp->concise_dbg_buf,
- CONCISE_DUMP_BUFLEN - remain_len, &pos);
- if (ret < 0) {
- DHD_ERROR(("write file error of concise debug info,"
- " err = %d\n", ret));
- goto exit;
+#ifdef DHD_STATUS_LOGGING
+ if (dhdp->statlog) {
+ /* write the statlog */
+ len = dhd_get_status_log_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_status_log_data(NULL, dhdp, 0, fp,
+ len, &pos) < 0) {
+ goto exit2;
}
}
}
+#endif /* DHD_STATUS_LOGGING */
- if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
- ret = dhd_log_dump_cookie_to_file(dhdp, fp, (unsigned long *)&pos);
- if (ret < 0) {
- DHD_ERROR(("write file error of cooke info, err = %d\n", ret));
- goto exit;
- }
+#ifdef EWP_RTT_LOGGING
+ /* periodic flushing of ecounters is NOT supported */
+ if (*type == DLD_BUF_TYPE_ALL &&
+ logdump_rtt_enable &&
+ dhdp->rtt_dbg_ring) {
+ dhd_log_dump_ring_to_file(dhdp, dhdp->rtt_dbg_ring,
+ fp, (unsigned long *)&pos,
+ &sec_hdr, RTT_LOG_HDR, LOG_DUMP_SECTION_RTT);
}
+#endif /* EWP_RTT_LOGGING */
+
+#ifdef BCMPCIE
+ len = dhd_get_ext_trap_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
+ }
+#endif /* BCMPCIE */
+
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT) && defined(BCMPCIE)
+ len = dhd_get_health_chk_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
+ }
+#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT && BCMPCIE */
+
+ len = dhd_get_dhd_dump_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_dump_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
+ }
+
+ len = dhd_get_cookie_log_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_cookie_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
+ }
+
+#ifdef DHD_DUMP_PCIE_RINGS
+ len = dhd_get_flowring_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_flowring_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
+ }
+#endif // endif
if (dhdp->logdump_periodic_flush) {
/* store the last position written to in the file for future use */
dhdp->last_file_posn = pos;
}
-exit:
- MFREE(dhdp->osh, type, sizeof(*type));
+exit2:
if (!IS_ERR(fp) && fp != NULL) {
filp_close(fp, NULL);
DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
__FUNCTION__, dump_path));
}
set_fs(old_fs);
+exit1:
+ if (type) {
+ MFREE(dhdp->osh, type, sizeof(*type));
+ }
DHD_GENERAL_LOCK(dhdp, flags);
DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
dhd_os_busbusy_wake(dhdp);
@@ -18842,51 +19102,34 @@
}
#endif /* DHD_LOG_DUMP */
-#ifdef BCMASSERT_LOG
-#ifdef CUSTOMER_HW4_DEBUG
-#define ASSERTINFO PLATFORM_PATH".assert.info"
-#elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
-#define ASSERTINFO "/data/misc/wifi/.assert.info"
-#elif defined(OEM_ANDROID)
-#define ASSERTINFO "/installmedia/.assert.info"
-#else
-#define ASSERTINFO "/root/.assert.info"
-#endif /* CUSTOMER_HW4_DEBUG */
-void dhd_get_assert_info(dhd_pub_t *dhd)
+/* This function writes data to the file pointed by fp, OR
+ * copies data to the user buffer sent by upper layer(HAL).
+ */
+int
+dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, int buf_len, void *pos)
{
- struct file *fp = NULL;
- char *filepath = ASSERTINFO;
- int mem_val = -1;
+ int ret = BCME_OK;
- /*
- * Read assert info from the file
- * 0: Trigger Kernel crash by panic()
- * 1: Print out the logs and don't trigger Kernel panic. (default)
- * 2: Trigger Kernel crash by BUG()
- * File doesn't exist: Keep default value (1).
- */
- fp = filp_open(filepath, O_RDONLY, 0);
- if (IS_ERR(fp)) {
- DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
- } else {
- int ret = compat_kernel_read(fp, 0, (char *)&mem_val, 4);
+ if (fp) {
+ ret = vfs_write(fp, mem_buf, buf_len, (loff_t *)pos);
if (ret < 0) {
- DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
- } else {
- mem_val = bcm_atoi((char *)&mem_val);
- DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
+ DHD_ERROR(("write file error, err = %d\n", ret));
+ goto exit;
}
- filp_close(fp, NULL);
+ } else {
+ {
+ ret = copy_to_user((void *)((uintptr_t)user_buf + (*(int *)pos)),
+ mem_buf, buf_len);
+ if (ret) {
+ DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
+ goto exit;
+ }
+ }
+ (*(int *)pos) += buf_len;
}
-#ifdef CUSTOMER_HW4_DEBUG
- /* By default. set to 1, No Kernel Panic */
- g_assert_type = (mem_val >= 0) ? mem_val : 1;
-#else
- /* By default. set to 0, Kernel Panic */
- g_assert_type = (mem_val >= 0) ? mem_val : 0;
-#endif // endif
+exit:
+ return ret;
}
-#endif /* BCMASSERT_LOG */
/*
* This call is to get the memdump size so that,
@@ -18934,9 +19177,7 @@
orig_len = *size;
if (dhdp->soc_ram) {
if (orig_len >= dhdp->soc_ram_length) {
- memcpy(*buf, dhdp->soc_ram, dhdp->soc_ram_length);
- /* reset the storage of dump */
- memset(dhdp->soc_ram, 0, dhdp->soc_ram_length);
+ *buf = dhdp->soc_ram;
*size = dhdp->soc_ram_length;
} else {
ret = BCME_BUFTOOSHORT;
@@ -18972,6 +19213,92 @@
return BCME_OK;
}
+#ifdef DHD_PKT_LOGGING
+int
+dhd_os_get_pktlog_dump(void *dev, const void *user_buf, uint32 len)
+{
+ int ret = BCME_OK;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ if (user_buf == NULL) {
+ DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ ret = dhd_pktlog_dump_write_memory(dhdp, user_buf, len);
+ if (ret < 0) {
+ DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ return ret;
+}
+
+uint32
+dhd_os_get_pktlog_dump_size(struct net_device *dev)
+{
+ uint32 size = 0;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ size = dhd_pktlog_get_dump_length(dhdp);
+ if (size == 0) {
+ DHD_ERROR(("%s(): fail to get pktlog size, err = %d\n", __FUNCTION__, size));
+ }
+ return size;
+}
+
+void
+dhd_os_get_pktlogdump_filename(struct net_device *dev, char *dump_path, int len)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ dhd_pktlog_get_filename(dhdp, dump_path, len);
+}
+#endif /* DHD_PKT_LOGGING */
+#ifdef DNGL_AXI_ERROR_LOGGING
+int
+dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len)
+{
+ int ret = BCME_OK;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ loff_t pos = 0;
+ if (user_buf == NULL) {
+ DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ ret = dhd_export_debug_data((char *)dhdp->axi_err_dump,
+ NULL, user_buf, sizeof(dhd_axi_error_dump_t), &pos);
+
+ if (ret < 0) {
+ DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ return ret;
+}
+
+int
+dhd_os_get_axi_error_dump_size(struct net_device *dev)
+{
+ int size = -1;
+
+ size = sizeof(dhd_axi_error_dump_t);
+ if (size < 0) {
+ DHD_ERROR(("%s(): fail to get axi error size, err = %d\n", __FUNCTION__, size));
+ }
+ return size;
+}
+
+void
+dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len)
+{
+ snprintf(dump_path, len, "%s",
+ DHD_COMMON_DUMP_PATH DHD_DUMP_AXI_ERROR_FILENAME);
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
{
return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
@@ -19298,6 +19625,182 @@
}
#endif // endif
+#if defined(ARGOS_NOTIFY_CB)
+
+static int argos_status_notifier_wifi_cb(struct notifier_block *notifier,
+ unsigned long speed, void *v);
+static int argos_status_notifier_p2p_cb(struct notifier_block *notifier,
+ unsigned long speed, void *v);
+
+int
+argos_register_notifier_init(struct net_device *net)
+{
+ int ret = 0;
+
+ DHD_INFO(("DHD: %s: \n", __FUNCTION__));
+ argos_rps_ctrl_data.wlan_primary_netdev = net;
+ argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
+
+ if (argos_wifi.notifier_call == NULL) {
+ argos_wifi.notifier_call = argos_status_notifier_wifi_cb;
+ ret = sec_argos_register_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
+ if (ret < 0) {
+ DHD_ERROR(("DHD:Failed to register WIFI notifier, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+
+ if (argos_p2p.notifier_call == NULL) {
+ argos_p2p.notifier_call = argos_status_notifier_p2p_cb;
+ ret = sec_argos_register_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
+ if (ret < 0) {
+ DHD_ERROR(("DHD:Failed to register P2P notifier, ret=%d\n", ret));
+ sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
+ goto exit;
+ }
+ }
+
+ return 0;
+
+exit:
+ if (argos_wifi.notifier_call) {
+ argos_wifi.notifier_call = NULL;
+ }
+
+ if (argos_p2p.notifier_call) {
+ argos_p2p.notifier_call = NULL;
+ }
+
+ return ret;
+}
+
+int
+argos_register_notifier_deinit(void)
+{
+ DHD_INFO(("DHD: %s: \n", __FUNCTION__));
+
+ if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
+ DHD_ERROR(("DHD: primary_net_dev is null %s: \n", __FUNCTION__));
+ return -1;
+ }
+#ifndef DHD_LB
+ custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
+#endif /* !DHD_LB */
+
+ if (argos_p2p.notifier_call) {
+ sec_argos_unregister_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
+ argos_p2p.notifier_call = NULL;
+ }
+
+ if (argos_wifi.notifier_call) {
+ sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
+ argos_wifi.notifier_call = NULL;
+ }
+
+ argos_rps_ctrl_data.wlan_primary_netdev = NULL;
+ argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
+
+ return 0;
+}
+
+int
+argos_status_notifier_wifi_cb(struct notifier_block *notifier,
+ unsigned long speed, void *v)
+{
+ dhd_info_t *dhd;
+ dhd_pub_t *dhdp;
+#if defined(ARGOS_NOTIFY_CB)
+ unsigned int pcie_irq = 0;
+#endif /* ARGOS_NOTIFY_CB */
+ DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
+
+ if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
+ goto exit;
+ }
+
+ dhd = DHD_DEV_INFO(argos_rps_ctrl_data.wlan_primary_netdev);
+ if (dhd == NULL) {
+ goto exit;
+ }
+
+ dhdp = &dhd->pub;
+ if (dhdp == NULL || !dhdp->up) {
+ goto exit;
+ }
+ /* Check if reported TPut value is more than threshold value */
+ if (speed > RPS_TPUT_THRESHOLD) {
+ if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 0) {
+ /* It does not need to configre rps_cpus
+ * if Load Balance is enabled
+ */
+#ifndef DHD_LB
+ int err = 0;
+
+ if (cpu_online(RPS_CPUS_WLAN_CORE_ID)) {
+ err = custom_rps_map_set(
+ argos_rps_ctrl_data.wlan_primary_netdev->_rx,
+ RPS_CPUS_MASK, strlen(RPS_CPUS_MASK));
+ } else {
+ DHD_ERROR(("DHD: %s: RPS_Set fail,"
+ " Core=%d Offline\n", __FUNCTION__,
+ RPS_CPUS_WLAN_CORE_ID));
+ err = -1;
+ }
+
+ if (err < 0) {
+ DHD_ERROR(("DHD: %s: Failed to RPS_CPUs. "
+ "speed=%ld, error=%d\n",
+ __FUNCTION__, speed, err));
+ } else {
+#endif /* !DHD_LB */
+#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
+ if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) {
+ DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_ON(%d)\n",
+ __FUNCTION__, TCPACK_SUP_HOLD));
+ dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD);
+ }
+#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
+ argos_rps_ctrl_data.argos_rps_cpus_enabled = 1;
+#ifndef DHD_LB
+ DHD_ERROR(("DHD: %s: Set RPS_CPUs, speed=%ld\n",
+ __FUNCTION__, speed));
+ }
+#endif /* !DHD_LB */
+ }
+ } else {
+ if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 1) {
+#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
+ if (dhdp->tcpack_sup_mode != TCPACK_SUP_OFF) {
+ DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_OFF\n",
+ __FUNCTION__));
+ dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+ }
+#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
+#ifndef DHD_LB
+ /* It does not need to configre rps_cpus
+ * if Load Balance is enabled
+ */
+ custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
+ DHD_ERROR(("DHD: %s: Clear RPS_CPUs, speed=%ld\n", __FUNCTION__, speed));
+ OSL_SLEEP(DELAY_TO_CLEAR_RPS_CPUS);
+#endif /* !DHD_LB */
+ argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
+ }
+ }
+
+exit:
+ return NOTIFY_OK;
+}
+
+int
+argos_status_notifier_p2p_cb(struct notifier_block *notifier,
+ unsigned long speed, void *v)
+{
+ DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
+ return argos_status_notifier_wifi_cb(notifier, speed, v);
+}
+#endif // endif
+
#ifdef DHD_DEBUG_PAGEALLOC
void
@@ -19356,118 +19859,13 @@
return NULL;
}
-#ifdef DHD_DHCP_DUMP
-static void
-dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx)
-{
- struct bootp_fmt *b = (struct bootp_fmt *) &pktdata[ETHER_HDR_LEN];
- struct iphdr *h = &b->ip_header;
- uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->ip_header.tot_len);
- int dhcp_type = 0, len, opt_len;
-
- /* check IP header */
- if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP) {
- return;
- }
-
- /* check UDP port for bootp (67, 68) */
- if (b->udp_header.source != htons(67) && b->udp_header.source != htons(68) &&
- b->udp_header.dest != htons(67) && b->udp_header.dest != htons(68)) {
- return;
- }
-
- /* check header length */
- if (ntohs(h->tot_len) < ntohs(b->udp_header.len) + sizeof(struct iphdr)) {
- return;
- }
-
- len = ntohs(b->udp_header.len) - sizeof(struct udphdr);
- opt_len = len
- - (sizeof(*b) - sizeof(struct iphdr) - sizeof(struct udphdr) - sizeof(b->options));
-
- /* parse bootp options */
- if (opt_len >= 4 && !memcmp(b->options, bootp_magic_cookie, 4)) {
- ptr = &b->options[4];
- while (ptr < end && *ptr != 0xff) {
- opt = ptr++;
- if (*opt == 0) {
- continue;
- }
- ptr += *ptr + 1;
- if (ptr >= end) {
- break;
- }
- /* 53 is dhcp type */
- if (*opt == 53) {
- if (opt[1]) {
- dhcp_type = opt[2];
- DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n",
- ifname, dhcp_types[dhcp_type],
- tx ? "TX" : "RX", dhcp_ops[b->op]));
- break;
- }
- }
- }
- }
-}
-#endif /* DHD_DHCP_DUMP */
-
-#ifdef DHD_ICMP_DUMP
-static void
-dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx)
-{
- uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
- struct iphdr *iph = (struct iphdr *)pkt;
- struct icmphdr *icmph;
-
- /* check IP header */
- if (iph->ihl != 5 || iph->version != 4 || iph->protocol != IP_PROT_ICMP) {
- return;
- }
-
- icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr));
- if (icmph->type == ICMP_ECHO) {
- DHD_ERROR_MEM(("PING REQUEST[%s] [%s] : SEQNUM=%d\n",
- ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
- } else if (icmph->type == ICMP_ECHOREPLY) {
- DHD_ERROR_MEM(("PING REPLY[%s] [%s] : SEQNUM=%d\n",
- ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
- } else {
- DHD_ERROR_MEM(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n",
- ifname, tx ? "TX" : "RX", icmph->type, icmph->code));
- }
-}
-#endif /* DHD_ICMP_DUMP */
-
-#ifdef SHOW_LOGTRACE
-void
-dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *trace_buf_info)
-{
- dhd_dbg_ring_status_t ring_status;
- uint32 rlen;
-
- rlen = dhd_dbg_pull_single_from_ring(dhd_pub, FW_VERBOSE_RING_ID, trace_buf_info->buf,
- TRACE_LOG_BUF_MAX_SIZE, TRUE);
- trace_buf_info->size = rlen;
- trace_buf_info->availability = NEXT_BUF_NOT_AVAIL;
- if (rlen == 0) {
- trace_buf_info->availability = BUF_NOT_AVAILABLE;
- return;
- }
- dhd_dbg_get_ring_status(dhd_pub, FW_VERBOSE_RING_ID, &ring_status);
- if (ring_status.written_bytes != ring_status.read_bytes) {
- trace_buf_info->availability = NEXT_BUF_AVAIL;
- }
-}
-#endif /* SHOW_LOGTRACE */
-
-bool
+fw_download_status_t
dhd_fw_download_status(dhd_pub_t * dhd_pub)
{
- return dhd_pub->fw_download_done;
+ return dhd_pub->fw_download_status;
}
-int
+static int
dhd_create_to_notifier_skt(void)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
@@ -19517,86 +19915,43 @@
{
struct nlmsghdr *nlh;
struct sk_buff *skb_out;
+ int ret = BCME_ERROR;
BCM_REFERENCE(skb);
if (sender_pid == 0) {
DHD_INFO(("Invalid PID 0\n"));
- return -1;
+ skb_out = NULL;
+ goto err;
}
if ((skb_out = nlmsg_new(size, 0)) == NULL) {
DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
- return -1;
+ ret = BCME_NOMEM;
+ goto err;
}
nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
+ if (nlh == NULL) {
+ DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__));
+ goto err;
+ }
NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
- memcpy(nlmsg_data(nlh), (char *)data, size);
+ (void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size);
- if ((nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
- DHD_INFO(("Error sending message\n"));
+ if ((ret = nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
+ DHD_ERROR(("Error sending message, ret:%d\n", ret));
+ /* skb is already freed inside nlmsg_unicast() on error case */
+ /* explicitly making skb_out to NULL to avoid double free */
+ skb_out = NULL;
+ goto err;
}
- return 0;
-}
-
-static ssize_t
-show_enable_ecounter(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- unsigned long onoff;
-
- onoff = enable_ecounter;
- ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
- onoff);
+ return BCME_OK;
+err:
+ if (skb_out) {
+ nlmsg_free(skb_out);
+ }
return ret;
}
-static ssize_t
-ecounter_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long onoff;
- dhd_info_t *dhd = (dhd_info_t *)dev;
- dhd_pub_t *dhdp;
-
- if (!dhd) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return count;
- }
- dhdp = &dhd->pub;
- if (!FW_SUPPORTED(dhdp, ecounters)) {
- DHD_ERROR(("%s: ecounters not supported by FW\n", __FUNCTION__));
- return count;
- }
-
- onoff = bcm_strtoul(buf, NULL, 10);
-
- sscanf(buf, "%lu", &onoff);
- if (onoff != 0 && onoff != 1) {
- return -EINVAL;
- }
-
- if (enable_ecounter == onoff) {
- DHD_ERROR(("%s: ecounters already %d\n", __FUNCTION__, enable_ecounter));
- return count;
- }
-
- enable_ecounter = onoff;
- if (enable_ecounter) {
- if (dhd_start_ecounters(dhdp) != BCME_OK) {
- DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
- } else if (dhd_start_event_ecounters(dhdp) != BCME_OK) {
- DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
- }
- } else {
- if (dhd_stop_ecounters(dhdp) != BCME_OK) {
- DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__));
- } else if (dhd_stop_event_ecounters(dhdp) != BCME_OK) {
- DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__));
- }
- }
-
- return count;
-}
-
static void
dhd_process_daemon_msg(struct sk_buff *skb)
{
@@ -19617,6 +19972,12 @@
return (bool)logdump_ecntr_enable;
}
+bool
+dhd_log_dump_rtt_enabled(void)
+{
+ return (bool)logdump_rtt_enable;
+}
+
void
dhd_log_dump_init(dhd_pub_t *dhd)
{
@@ -19709,7 +20070,7 @@
dld_buf->enable = 1;
}
-#ifdef DEBUGABILITY_ECNTRS_LOGGING
+#ifdef EWP_ECNTRS_LOGGING
/* now use the rest of the pre-alloc'd memory for filter and ecounter log */
dhd->ecntr_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
if (!dhd->ecntr_dbg_ring)
@@ -19718,7 +20079,7 @@
ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
ret = dhd_dbg_ring_init(dhd, ring, ECNTR_RING_ID,
ECNTR_RING_NAME, LOG_DUMP_ECNTRS_MAX_BUFSIZE,
- bufptr);
+ bufptr, TRUE);
if (ret != BCME_OK) {
DHD_ERROR(("%s: unable to init ecntr ring !\n",
__FUNCTION__));
@@ -19730,7 +20091,30 @@
DHD_DBG_RING_UNLOCK(ring->lock, flags);
bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE;
-#endif /* DEBUGABILITY_ECNTRS_LOGGING */
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
+ dhd->rtt_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
+ if (!dhd->rtt_dbg_ring)
+ goto fail;
+
+ ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
+ ret = dhd_dbg_ring_init(dhd, ring, RTT_RING_ID,
+ RTT_RING_NAME, LOG_DUMP_RTT_MAX_BUFSIZE,
+ bufptr, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: unable to init ecntr ring !\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->state = RING_ACTIVE;
+ ring->threshold = 0;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ bufptr += LOG_DUMP_RTT_MAX_BUFSIZE;
+#endif /* EWP_RTT_LOGGING */
/* Concise buffer is used as intermediate buffer for following purposes
* a) pull ecounters records temporarily before
@@ -19785,7 +20169,7 @@
MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
}
-#ifdef DEBUGABILITY_ECNTRS_LOGGING
+#ifdef EWP_ECNTRS_LOGGING
if (dhd->ecntr_dbg_ring) {
ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
dhd_dbg_ring_deinit(dhd, ring);
@@ -19794,7 +20178,18 @@
MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
dhd->ecntr_dbg_ring = NULL;
}
-#endif /* DEBUGABILITY_ECNTRS_LOGGING */
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ if (dhd->rtt_dbg_ring) {
+ ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
+ dhd_dbg_ring_deinit(dhd, ring);
+ ring->ring_buf = NULL;
+ ring->ring_size = 0;
+ MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
+ dhd->rtt_dbg_ring = NULL;
+ }
+#endif /* EWP_RTT_LOGGING */
#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
if (prealloc_buf) {
@@ -19849,7 +20244,7 @@
}
#endif /* DHD_EVENT_LOG_FILTER */
-#ifdef DEBUGABILITY_ECNTRS_LOGGING
+#ifdef EWP_ECNTRS_LOGGING
if (dhd->ecntr_dbg_ring) {
ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
dhd_dbg_ring_deinit(dhd, ring);
@@ -19858,7 +20253,18 @@
MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
dhd->ecntr_dbg_ring = NULL;
}
-#endif /* DEBUGABILITY_ECNTRS_LOGGING */
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ if (dhd->rtt_dbg_ring) {
+ ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
+ dhd_dbg_ring_deinit(dhd, ring);
+ ring->ring_buf = NULL;
+ ring->ring_size = 0;
+ MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
+ dhd->rtt_dbg_ring = NULL;
+ }
+#endif /* EWP_RTT_LOGGING */
/* 'general' buffer points to start of the pre-alloc'd memory */
dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL];
@@ -19968,9 +20374,9 @@
unsigned long rem_nsec;
ts_nsec = local_clock();
- rem_nsec = do_div(ts_nsec, 1000000000);
+ rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
snprintf(buf, sizeof(buf), "%5lu.%06lu",
- (unsigned long)ts_nsec, rem_nsec / 1000);
+ (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC);
return buf;
}
@@ -19994,379 +20400,6 @@
}
#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-#ifdef DHD_LB_TXP
-#define DHD_LB_TXBOUND 64
-/*
- * Function that performs the TX processing on a given CPU
- */
-bool
-dhd_lb_tx_process(dhd_info_t *dhd)
-{
- struct sk_buff *skb;
- int cnt = 0;
- struct net_device *net;
- int ifidx;
- bool resched = FALSE;
-
- DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__));
- if (dhd == NULL) {
- DHD_ERROR((" Null pointer DHD \r\n"));
- return resched;
- }
-
- BCM_REFERENCE(net);
-
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
-
- /* Base Loop to perform the actual Tx */
- do {
- skb = skb_dequeue(&dhd->tx_pend_queue);
- if (skb == NULL) {
- DHD_TRACE(("Dequeued a Null Packet \r\n"));
- break;
- }
- cnt++;
-
- net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
- ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
-
- DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb,
- net, ifidx));
-
- __dhd_sendpkt(&dhd->pub, ifidx, skb);
-
- if (cnt >= DHD_LB_TXBOUND) {
- resched = TRUE;
- break;
- }
-
- } while (1);
-
- DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
-
- return resched;
-}
-
-void
-dhd_lb_tx_handler(unsigned long data)
-{
- dhd_info_t *dhd = (dhd_info_t *)data;
-
- if (dhd_lb_tx_process(dhd)) {
- dhd_tasklet_schedule(&dhd->tx_tasklet);
- }
-}
-
-#endif /* DHD_LB_TXP */
-
-/* ----------------------------------------------------------------------------
- * Infrastructure code for sysfs interface support for DHD
- *
- * What is sysfs interface?
- * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
- *
- * Why sysfs interface?
- * This is the Linux standard way of changing/configuring Run Time parameters
- * for a driver. We can use this interface to control "linux" specific driver
- * parameters.
- *
- * -----------------------------------------------------------------------------
- */
-
-#include <linux/sysfs.h>
-#include <linux/kobject.h>
-
-#if defined(DHD_TRACE_WAKE_LOCK)
-
-/* Function to show the history buffer */
-static ssize_t
-show_wklock_trace(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- dhd_info_t *dhd = (dhd_info_t *)dev;
-
- buf[ret] = '\n';
- buf[ret+1] = 0;
-
- dhd_wk_lock_stats_dump(&dhd->pub);
- return ret+1;
-}
-
-/* Function to enable/disable wakelock trace */
-static ssize_t
-wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long onoff;
- unsigned long flags;
- dhd_info_t *dhd = (dhd_info_t *)dev;
-
- onoff = bcm_strtoul(buf, NULL, 10);
- if (onoff != 0 && onoff != 1) {
- return -EINVAL;
- }
-
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- trace_wklock_onoff = onoff;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
- if (trace_wklock_onoff) {
- printk("ENABLE WAKLOCK TRACE\n");
- } else {
- printk("DISABLE WAKELOCK TRACE\n");
- }
-
- return (ssize_t)(onoff+1);
-}
-#endif /* DHD_TRACE_WAKE_LOCK */
-
-#if defined(DHD_LB_TXP)
-static ssize_t
-show_lbtxp(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- unsigned long onoff;
- dhd_info_t *dhd = (dhd_info_t *)dev;
-
- onoff = atomic_read(&dhd->lb_txp_active);
- ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
- onoff);
- return ret;
-}
-
-static ssize_t
-lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long onoff;
- dhd_info_t *dhd = (dhd_info_t *)dev;
- int i;
-
- onoff = bcm_strtoul(buf, NULL, 10);
-
- sscanf(buf, "%lu", &onoff);
- if (onoff != 0 && onoff != 1) {
- return -EINVAL;
- }
- atomic_set(&dhd->lb_txp_active, onoff);
-
- /* Since the scheme is changed clear the counters */
- for (i = 0; i < NR_CPUS; i++) {
- DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
- DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
- }
-
- return count;
-}
-
-#endif /* DHD_LB_TXP */
-
-#ifdef DHD_LOG_DUMP
-static ssize_t
-show_logdump_periodic_flush(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- unsigned long val;
-
- val = logdump_periodic_flush;
- ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val);
- return ret;
-}
-
-static ssize_t
-logdump_periodic_flush_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long val;
-
- val = bcm_strtoul(buf, NULL, 10);
-
- sscanf(buf, "%lu", &val);
- if (val != 0 && val != 1) {
- return -EINVAL;
- }
- logdump_periodic_flush = val;
- return count;
-}
-static ssize_t
-show_logdump_ecntr(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- unsigned long val;
-
- val = logdump_ecntr_enable;
- ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val);
- return ret;
-}
-
-static ssize_t
-logdump_ecntr_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long val;
-
- val = bcm_strtoul(buf, NULL, 10);
-
- sscanf(buf, "%lu", &val);
- if (val != 0 && val != 1) {
- return -EINVAL;
- }
- logdump_ecntr_enable = val;
- return count;
-}
-
-#endif /* DHD_LOG_DUMP */
-/*
- * Generic Attribute Structure for DHD.
- * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
- * to instantiate an object of type dhd_attr, populate it with
- * the required show/store functions (ex:- dhd_attr_cpumask_primary)
- * and add the object to default_attrs[] array, that gets registered
- * to the kobject of dhd (named bcm-dhd).
- */
-
-struct dhd_attr {
- struct attribute attr;
- ssize_t(*show)(struct dhd_info *, char *);
- ssize_t(*store)(struct dhd_info *, const char *, size_t count);
-};
-
-#if defined(DHD_TRACE_WAKE_LOCK)
-static struct dhd_attr dhd_attr_wklock =
- __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
-#endif /* defined(DHD_TRACE_WAKE_LOCK */
-
-#if defined(DHD_LB_TXP)
-static struct dhd_attr dhd_attr_lbtxp =
- __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff);
-#endif /* DHD_LB_TXP */
-#ifdef DHD_LOG_DUMP
-static struct dhd_attr dhd_attr_logdump_periodic_flush =
- __ATTR(logdump_periodic_flush, 0660, show_logdump_periodic_flush,
- logdump_periodic_flush_onoff);
-static struct dhd_attr dhd_attr_logdump_ecntr =
- __ATTR(logdump_ecntr_enable, 0660, show_logdump_ecntr,
- logdump_ecntr_onoff);
-#endif /* DHD_LOG_DUMP */
-
-static struct dhd_attr dhd_attr_ecounters =
- __ATTR(ecounters, 0660, show_enable_ecounter, ecounter_onoff);
-
-/* Attribute object that gets registered with "bcm-dhd" kobject tree */
-static struct attribute *default_attrs[] = {
-#if defined(DHD_TRACE_WAKE_LOCK)
- &dhd_attr_wklock.attr,
-#endif // endif
-#if defined(DHD_LB_TXP)
- &dhd_attr_lbtxp.attr,
-#endif /* DHD_LB_TXP */
-#ifdef DHD_LOG_DUMP
- &dhd_attr_logdump_periodic_flush.attr,
- &dhd_attr_logdump_ecntr.attr,
-#endif // endif
- &dhd_attr_ecounters.attr,
- NULL
-};
-
-#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
-#define to_attr(a) container_of(a, struct dhd_attr, attr)
-
-/*
- * bcm-dhd kobject show function, the "attr" attribute specifices to which
- * node under "bcm-dhd" the show function is called.
- */
-static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd_info_t *dhd = to_dhd(kobj);
- struct dhd_attr *d_attr = to_attr(attr);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- int ret;
-
- if (d_attr->show)
- ret = d_attr->show(dhd, buf);
- else
- ret = -EIO;
-
- return ret;
-}
-
-/*
- * bcm-dhd kobject show function, the "attr" attribute specifices to which
- * node under "bcm-dhd" the store function is called.
- */
-static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd_info_t *dhd = to_dhd(kobj);
- struct dhd_attr *d_attr = to_attr(attr);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- int ret;
-
- if (d_attr->store)
- ret = d_attr->store(dhd, buf, count);
- else
- ret = -EIO;
-
- return ret;
-
-}
-
-static struct sysfs_ops dhd_sysfs_ops = {
- .show = dhd_show,
- .store = dhd_store,
-};
-
-static struct kobj_type dhd_ktype = {
- .sysfs_ops = &dhd_sysfs_ops,
- .default_attrs = default_attrs,
-};
-
-/* Create a kobject and attach to sysfs interface */
-static int dhd_sysfs_init(dhd_info_t *dhd)
-{
- int ret = -1;
-
- if (dhd == NULL) {
- DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
- return ret;
- }
-
- /* Initialize the kobject */
- ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
- if (ret) {
- kobject_put(&dhd->dhd_kobj);
- DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
- return ret;
- }
-
- /*
- * We are always responsible for sending the uevent that the kobject
- * was added to the system.
- */
- kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
-
- return ret;
-}
-
-/* Done with the kobject and detach the sysfs interface */
-static void dhd_sysfs_exit(dhd_info_t *dhd)
-{
- if (dhd == NULL) {
- DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
- return;
- }
-
- /* Releae the kobject */
- kobject_put(&dhd->dhd_kobj);
-}
-
#ifdef DHD_DEBUG_UART
bool
dhd_debug_uart_is_running(struct net_device *dev)
@@ -20399,7 +20432,8 @@
if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
#endif // endif
{
- if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN ||
+ if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_RC_DETECT ||
+ dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_EP_DETECT ||
#ifdef DHD_FW_COREDUMP
dhdp->memdump_success == FALSE ||
#endif // endif
@@ -20429,13 +20463,13 @@
{
struct file *fp;
char *filepath = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
-
fp = filp_open(filepath, O_RDONLY, 0);
if (IS_ERR(fp)) {
- DHD_ERROR(("%s: ----- blob file dosen't exist -----\n", __FUNCTION__));
+ DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__,
+ filepath));
dhdp->is_blob = FALSE;
} else {
- DHD_ERROR(("%s: ----- blob file exist -----\n", __FUNCTION__));
+ DHD_ERROR(("%s: ----- blob file exists (%s)-----\n", __FUNCTION__, filepath));
dhdp->is_blob = TRUE;
#if defined(CONCATE_BLOB)
strncat(fw_path, "_blob", strlen("_blob"));
@@ -20480,11 +20514,13 @@
}
#endif /* PCIE_FULL_DONGLE */
/* ---------------------------- End of sysfs implementation ------------------------------------- */
+
#ifdef SET_PCIE_IRQ_CPU_CORE
void
-dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set)
+dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd)
{
- unsigned int irq;
+ unsigned int pcie_irq = 0;
+
if (!dhdp) {
DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
return;
@@ -20495,11 +20531,40 @@
return;
}
- if (dhdpcie_get_pcieirq(dhdp->bus, &irq)) {
+ DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__, affinity_cmd));
+
+ if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) {
+ DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__));
return;
}
- set_irq_cpucore(irq, set);
+ /*
+ irq_set_affinity() assign dedicated CPU core PCIe interrupt
+ If dedicated CPU core is not on-line,
+ PCIe interrupt scheduled on CPU core 0
+ */
+ switch (affinity_cmd) {
+ case PCIE_IRQ_AFFINITY_OFF:
+ break;
+ case PCIE_IRQ_AFFINITY_BIG_CORE_ANY:
+#if defined(CONFIG_ARCH_SM8150)
+ irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_primary);
+ irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
+#else /* Exynos and Others */
+ irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
+#endif /* CONFIG_ARCH_SM8150 */
+ break;
+#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
+ case PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS:
+ DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
+ __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE));
+ irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE));
+ break;
+#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
+ default:
+ DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
+ __FUNCTION__, affinity_cmd));
+ }
}
#endif /* SET_PCIE_IRQ_CPU_CORE */
@@ -20594,6 +20659,62 @@
return ret;
}
+#ifdef DHD_BANDSTEER
+/*
+ * Function return true only if there exactly two GO interfaces
+ * TODO: Make it flexible to have AP + AP
+ */
+s32
+dhd_bandsteer_get_ifaces(void *pub, void *ifaces)
+{
+ dhd_if_t *iflist; /* For supporting multiple interfaces */
+ uint8 idx;
+ uint8 ap_idx_count = 0;
+ dhd_pub_t *dhd = (dhd_pub_t *) pub;
+ dhd_bandsteer_iface_info_t *bsd_ifp = (dhd_bandsteer_iface_info_t *)ifaces;
+
+ DHD_INFO(("%s: entered\n", __FUNCTION__));
+ for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+ iflist = dhd->info->iflist[idx];
+ if (iflist == NULL) {
+ continue;
+ }
+
+ if (iflist->net != NULL) {
+ if (iflist->net->ieee80211_ptr != NULL) {
+ if (
+ (iflist->net->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) ||
+ (iflist->net->ieee80211_ptr->iftype == NL80211_IFTYPE_AP)) {
+ ap_idx_count++;
+ if (ap_idx_count > 2) {
+ continue;
+ }
+ bsd_ifp->ndev = iflist->net;
+ bsd_ifp->bssidx = iflist->bssidx;
+ bsd_ifp++;
+ }
+ }
+ }
+ }
+ if (ap_idx_count == 2) {
+ return BCME_OK;
+ } else {
+ return BCME_ERROR;
+ }
+}
+
+void
+dhd_bandsteer_schedule_work_on_timeout(dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac)
+{
+ dhd_bandsteer_context_t *dhd_bandsteer_cntx = dhd_bandsteer_mac->dhd_bandsteer_cntx;
+ dhd_pub_t *dhd = (dhd_pub_t *) dhd_bandsteer_cntx->dhd_pub;
+
+ dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
+ (void *)dhd_bandsteer_mac, DHD_WQ_WORK_BANDSTEER_STEP_MOVE,
+ dhd_bandsteer_workqueue_wrapper, DHD_WQ_WORK_PRIORITY_LOW);
+}
+#endif /* DHD_BANDSTEER */
+
#ifdef FILTER_IE
int dhd_read_from_file(dhd_pub_t *dhd)
{
@@ -20921,14 +21042,14 @@
DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason));
dhdp->req_hang_type = reason;
fw_test_code = 102; /* resumed on timeour */
- dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
+ (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
WLC_SET_VAR, TRUE, 0);
break;
case HANG_REASON_DONGLE_TRAP:
DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason));
dhdp->req_hang_type = reason;
fw_test_code = 99; /* dongle trap */
- dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
+ (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
WLC_SET_VAR, TRUE, 0);
break;
case HANG_REASON_D3_ACK_TIMEOUT:
@@ -20939,7 +21060,8 @@
DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason));
dhdp->req_hang_type = reason;
break;
- case HANG_REASON_PCIE_LINK_DOWN:
+ case HANG_REASON_PCIE_LINK_DOWN_RC_DETECT:
+ case HANG_REASON_PCIE_LINK_DOWN_EP_DETECT:
case HANG_REASON_MSGBUF_LIVELOCK:
dhdp->req_hang_type = 0;
DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
@@ -20965,7 +21087,7 @@
#endif /* DHD_HANG_SEND_UP_TEST */
#ifdef DHD_ERPOM
-void
+static void
dhd_error_recovery(void *handle, void *event_info, u8 event)
{
dhd_info_t *dhd = handle;
@@ -20985,12 +21107,12 @@
return;
}
- ret = dhd_bus_perform_flr_with_quiesce(dhdp);
- if (ret != BCME_OK) {
+ ret = dhd_bus_perform_flr_with_quiesce(dhdp, dhdp->bus, FALSE);
+ if (ret != BCME_DNGL_DEVRESET) {
DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d,"
"toggle REG_ON\n", __FUNCTION__, ret));
/* toggle REG_ON */
- pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN);
+ dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN);
return;
}
}
@@ -20998,8 +21120,10 @@
void
dhd_schedule_reset(dhd_pub_t *dhdp)
{
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
- DHD_WQ_WORK_ERROR_RECOVERY, dhd_error_recovery, DHD_WQ_WORK_PRIORITY_HIGH);
+ if (dhdp->enable_erpom) {
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
+ DHD_WQ_WORK_ERROR_RECOVERY, dhd_error_recovery, DHD_WQ_WORK_PRIORITY_HIGH);
+ }
}
#endif /* DHD_ERPOM */
@@ -21014,8 +21138,8 @@
return;
}
- if (dhd_pktlog_write_file(&dhd->pub)) {
- DHD_ERROR(("%s: writing pktlog dump to the file failed\n", __FUNCTION__));
+ if (dhd_pktlog_dump_write_file(&dhd->pub)) {
+ DHD_ERROR(("%s: writing pktlog dump file failed\n", __FUNCTION__));
return;
}
}
@@ -21104,81 +21228,6 @@
}
#endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */
-#define KIRQ_PRINT_BUF_LEN 256
-
-void
-dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num)
-{
- unsigned long flags = 0;
- struct irq_desc *desc;
- int i; /* cpu iterator */
- struct bcmstrbuf strbuf;
- char tmp_buf[KIRQ_PRINT_BUF_LEN];
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
- desc = irq_to_desc(irq_num);
- if (!desc) {
- DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__));
- return;
- }
- bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN);
- raw_spin_lock_irqsave(&desc->lock, flags);
- bcm_bprintf(&strbuf, "dhd irq %u:", irq_num);
- for_each_online_cpu(i)
- bcm_bprintf(&strbuf, "%10u ",
- desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0);
- if (desc->irq_data.chip) {
- if (desc->irq_data.chip->name)
- bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name);
- else
- bcm_bprintf(&strbuf, " %8s", "-");
- } else {
- bcm_bprintf(&strbuf, " %8s", "None");
- }
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
- if (desc->irq_data.domain)
- bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq);
-#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
- bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
-#endif // endif
-#endif /* LINUX VERSION > 3.1.0 */
-
- if (desc->name)
- bcm_bprintf(&strbuf, "-%-8s", desc->name);
-
- DHD_ERROR(("%s\n", strbuf.origbuf));
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-#endif /* LINUX VERSION > 2.6.28 */
-}
-
-void
-dhd_show_kirqstats(dhd_pub_t *dhd)
-{
- unsigned int irq = -1;
-#ifdef BCMPCIE
- dhdpcie_get_pcieirq(dhd->bus, &irq);
-#endif /* BCMPCIE */
-#ifdef BCMSDIO
- irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num;
-#endif /* BCMSDIO */
- if (irq != -1) {
-#ifdef BCMPCIE
- DHD_ERROR(("DUMP data kernel irq stats : \n"));
-#endif /* BCMPCIE */
-#ifdef BCMSDIO
- DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
-#endif /* BCMSDIO */
- dhd_print_kirqstats(dhd, irq);
- }
-#ifdef BCMPCIE_OOB_HOST_WAKE
- irq = dhdpcie_get_oob_irq_num(dhd->bus);
- if (irq) {
- DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
- dhd_print_kirqstats(dhd, irq);
- }
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-}
-
void
dhd_print_tasklet_status(dhd_pub_t *dhd)
{
@@ -21211,6 +21260,11 @@
#define DHD_RING_MAGIC 0x20170910
#define DHD_RING_IDX_INVALID 0xffffffff
+#define DHD_RING_SYNC_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
+#define DHD_RING_SYNC_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
+#define DHD_RING_SYNC_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
+#define DHD_RING_SYNC_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
+
typedef struct {
uint32 elem_size;
uint32 elem_cnt;
@@ -21226,11 +21280,27 @@
} dhd_fixed_ring_info_t;
typedef struct {
+ uint32 elem_size;
+ uint32 elem_cnt;
+ uint32 idx; /* -1 : not started */
+ uint32 rsvd; /* reserved for future use */
+
+ /* protected elements during serialization */
+ atomic_t ring_locked;
+ /* check the overwriting */
+ uint32 ring_overwrited;
+
+ /* saved data elements */
+ void *elem;
+} dhd_singleidx_ring_info_t;
+
+typedef struct {
uint32 magic;
uint32 type;
- struct mutex ring_sync; /* pointer to mutex */
+ void *ring_sync; /* spinlock for sync */
union {
dhd_fixed_ring_info_t fixed;
+ dhd_singleidx_ring_info_t single;
};
} dhd_ring_info_t;
@@ -21241,7 +21311,8 @@
}
void *
-dhd_ring_init(uint8 *buf, uint32 buf_size, uint32 elem_size, uint32 elem_cnt)
+dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
+ uint32 elem_cnt, uint32 type)
{
dhd_ring_info_t *ret_ring;
@@ -21249,29 +21320,46 @@
DHD_RING_ERR(("NO RING BUFFER\n"));
return NULL;
}
+
if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) {
DHD_RING_ERR(("RING SIZE IS TOO SMALL\n"));
return NULL;
}
+ if (type != DHD_RING_TYPE_FIXED && type != DHD_RING_TYPE_SINGLE_IDX) {
+ DHD_RING_ERR(("UNSUPPORTED RING TYPE\n"));
+ return NULL;
+ }
+
ret_ring = (dhd_ring_info_t *)buf;
- ret_ring->type = DHD_RING_TYPE_FIXED;
- mutex_init(&ret_ring->ring_sync);
- ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
- ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
- ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
- ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
- ret_ring->fixed.elem_size = elem_size;
- ret_ring->fixed.elem_cnt = elem_cnt;
+ ret_ring->type = type;
+ ret_ring->ring_sync = DHD_RING_SYNC_LOCK_INIT(dhdp->osh);
ret_ring->magic = DHD_RING_MAGIC;
+
+ if (type == DHD_RING_TYPE_FIXED) {
+ ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
+ ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
+ ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
+ ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
+ ret_ring->fixed.elem_size = elem_size;
+ ret_ring->fixed.elem_cnt = elem_cnt;
+ } else {
+ ret_ring->single.idx = DHD_RING_IDX_INVALID;
+ atomic_set(&ret_ring->single.ring_locked, 0);
+ ret_ring->single.ring_overwrited = 0;
+ ret_ring->single.rsvd = 0;
+ ret_ring->single.elem = buf + sizeof(dhd_ring_info_t);
+ ret_ring->single.elem_size = elem_size;
+ ret_ring->single.elem_cnt = elem_cnt;
+ }
+
return ret_ring;
}
void
-dhd_ring_deinit(void *_ring)
+dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring)
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- dhd_fixed_ring_info_t *fixed;
if (!ring) {
return;
}
@@ -21280,15 +21368,66 @@
return;
}
- mutex_destroy(&ring->ring_sync);
- fixed = &ring->fixed;
- memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
- fixed->elem_size = fixed->elem_cnt = 0;
+ if (ring->type != DHD_RING_TYPE_FIXED &&
+ ring->type != DHD_RING_TYPE_SINGLE_IDX) {
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK_DEINIT(dhdp->osh, ring->ring_sync);
+ ring->ring_sync = NULL;
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ dhd_fixed_ring_info_t *fixed = &ring->fixed;
+ memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
+ fixed->elem_size = fixed->elem_cnt = 0;
+ } else {
+ dhd_singleidx_ring_info_t *single = &ring->single;
+ memset(single->elem, 0, single->elem_size * single->elem_cnt);
+ single->elem_size = single->elem_cnt = 0;
+ }
ring->type = 0;
ring->magic = 0;
- return;
}
+static inline uint32
+__dhd_ring_ptr2idx(void *ring, void *ptr, char *sig, uint32 type)
+{
+ uint32 diff;
+ uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID;
+ uint32 elem_size, elem_cnt;
+ void *elem;
+
+ if (type == DHD_RING_TYPE_FIXED) {
+ dhd_fixed_ring_info_t *fixed = (dhd_fixed_ring_info_t *)ring;
+ elem_size = fixed->elem_size;
+ elem_cnt = fixed->elem_cnt;
+ elem = fixed->elem;
+ } else if (type == DHD_RING_TYPE_SINGLE_IDX) {
+ dhd_singleidx_ring_info_t *single = (dhd_singleidx_ring_info_t *)ring;
+ elem_size = single->elem_size;
+ elem_cnt = single->elem_cnt;
+ elem = single->elem;
+ } else {
+ DHD_RING_ERR(("UNSUPPORTED RING TYPE %d\n", type));
+ return ret_idx;
+ }
+
+ if (ptr < elem) {
+ DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
+ return ret_idx;
+ }
+ diff = (uint32)((uint8 *)ptr - (uint8 *)elem);
+ if (diff % elem_size != 0) {
+ DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
+ return ret_idx;
+ }
+ ret_idx = diff / elem_size;
+ if (ret_idx >= elem_cnt) {
+ DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", elem_cnt, ret_idx));
+ }
+ return ret_idx;
+}
+
+/* Sub functions for fixed ring */
/* get counts between two indexes of ring buffer (internal only) */
static inline int
__dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end)
@@ -21370,30 +21509,8 @@
return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
}
-static inline uint32
-__dhd_fixed_ring_ptr2idx(dhd_fixed_ring_info_t *ring, void *ptr, char *sig)
-{
- uint32 diff;
- uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID;
-
- if (ptr < ring->elem) {
- DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, ring->elem));
- return ret_idx;
- }
- diff = (uint32)((uint8 *)ptr - (uint8 *)ring->elem);
- if (diff % ring->elem_size != 0) {
- DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, ring->elem));
- return ret_idx;
- }
- ret_idx = diff / ring->elem_size;
- if (ret_idx >= ring->elem_cnt) {
- DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", ring->elem_cnt, ret_idx));
- }
- return ret_idx;
-}
-
static inline void *
-__dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev)
+__dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
{
uint32 cur_idx;
@@ -21402,7 +21519,7 @@
return NULL;
}
- cur_idx = __dhd_fixed_ring_ptr2idx(ring, prev, "NEXT");
+ cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
if (cur_idx >= ring->elem_cnt) {
return NULL;
}
@@ -21417,7 +21534,7 @@
}
static inline void *
-__dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev)
+__dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
{
uint32 cur_idx;
@@ -21425,7 +21542,7 @@
DHD_RING_ERR(("EMPTY RING\n"));
return NULL;
}
- cur_idx = __dhd_fixed_ring_ptr2idx(ring, prev, "PREV");
+ cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
if (cur_idx >= ring->elem_cnt) {
return NULL;
}
@@ -21439,7 +21556,7 @@
}
static inline void
-__dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr)
+__dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr, uint32 type)
{
uint32 first_idx;
uint32 last_idx;
@@ -21452,7 +21569,7 @@
}
if (first_ptr) {
- first_idx = __dhd_fixed_ring_ptr2idx(ring, first_ptr, "LCK FIRST");
+ first_idx = __dhd_ring_ptr2idx(ring, first_ptr, "LCK FIRST", type);
if (first_idx >= ring->elem_cnt) {
return;
}
@@ -21461,7 +21578,7 @@
}
if (last_ptr) {
- last_idx = __dhd_fixed_ring_ptr2idx(ring, last_ptr, "LCK LAST");
+ last_idx = __dhd_ring_ptr2idx(ring, last_ptr, "LCK LAST", type);
if (last_idx >= ring->elem_cnt) {
return;
}
@@ -21567,23 +21684,170 @@
return;
}
+static inline void
+__dhd_fixed_ring_set_read_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
+{
+ ring->read_idx = idx;
+}
+
+static inline void
+__dhd_fixed_ring_set_write_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
+{
+ ring->write_idx = idx;
+}
+
+static inline uint32
+__dhd_fixed_ring_get_read_idx(dhd_fixed_ring_info_t *ring)
+{
+ return ring->read_idx;
+}
+
+static inline uint32
+__dhd_fixed_ring_get_write_idx(dhd_fixed_ring_info_t *ring)
+{
+ return ring->write_idx;
+}
+
+/* Sub functions for single index ring */
+static inline void *
+__dhd_singleidx_ring_get_first(dhd_singleidx_ring_info_t *ring)
+{
+ uint32 tmp_idx = 0;
+
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ return NULL;
+ }
+
+ if (ring->ring_overwrited) {
+ tmp_idx = (ring->idx + 1) % ring->elem_cnt;
+ }
+
+ return (uint8 *)ring->elem + (ring->elem_size * tmp_idx);
+}
+
+static inline void *
+__dhd_singleidx_ring_get_last(dhd_singleidx_ring_info_t *ring)
+{
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ return NULL;
+ }
+
+ return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
+}
+
+static inline void *
+__dhd_singleidx_ring_get_empty(dhd_singleidx_ring_info_t *ring)
+{
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ ring->idx = 0;
+ return (uint8 *)ring->elem;
+ }
+
+ /* check the lock is held */
+ if (atomic_read(&ring->ring_locked)) {
+ return NULL;
+ }
+
+ /* check the index rollover */
+ if (!ring->ring_overwrited && ring->idx == (ring->elem_cnt - 1)) {
+ ring->ring_overwrited = 1;
+ }
+
+ ring->idx = (ring->idx + 1) % ring->elem_cnt;
+
+ return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
+}
+
+static inline void *
+__dhd_singleidx_ring_get_next(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
+{
+ uint32 cur_idx;
+
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return NULL;
+ }
+
+ cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
+ if (cur_idx >= ring->elem_cnt) {
+ return NULL;
+ }
+
+ if (cur_idx == ring->idx) {
+ /* no more new record */
+ return NULL;
+ }
+
+ cur_idx = (cur_idx + 1) % ring->elem_cnt;
+
+ return (uint8 *)ring->elem + ring->elem_size * cur_idx;
+}
+
+static inline void *
+__dhd_singleidx_ring_get_prev(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
+{
+ uint32 cur_idx;
+
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return NULL;
+ }
+ cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
+ if (cur_idx >= ring->elem_cnt) {
+ return NULL;
+ }
+
+ if (!ring->ring_overwrited && cur_idx == 0) {
+ /* no more new record */
+ return NULL;
+ }
+
+ cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
+ if (ring->ring_overwrited && cur_idx == ring->idx) {
+ /* no more new record */
+ return NULL;
+ }
+
+ return (uint8 *)ring->elem + ring->elem_size * cur_idx;
+}
+
+static inline void
+__dhd_singleidx_ring_whole_lock(dhd_singleidx_ring_info_t *ring)
+{
+ if (!atomic_read(&ring->ring_locked)) {
+ atomic_set(&ring->ring_locked, 1);
+ }
+}
+
+static inline void
+__dhd_singleidx_ring_whole_unlock(dhd_singleidx_ring_info_t *ring)
+{
+ if (atomic_read(&ring->ring_locked)) {
+ atomic_set(&ring->ring_locked, 0);
+ }
+}
+
/* Get first element : oldest element */
void *
dhd_ring_get_first(void *_ring)
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
ret = __dhd_fixed_ring_get_first(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_first(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
@@ -21592,18 +21856,96 @@
dhd_ring_free_first(void *_ring)
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
__dhd_fixed_ring_free_first(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
- return;
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+void
+dhd_ring_set_read_idx(void *_ring, uint32 read_idx)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ __dhd_fixed_ring_set_read_idx(&ring->fixed, read_idx);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+void
+dhd_ring_set_write_idx(void *_ring, uint32 write_idx)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ __dhd_fixed_ring_set_write_idx(&ring->fixed, write_idx);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+uint32
+dhd_ring_get_read_idx(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ uint32 read_idx = DHD_RING_IDX_INVALID;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return read_idx;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ read_idx = __dhd_fixed_ring_get_read_idx(&ring->fixed);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+
+ return read_idx;
+}
+
+uint32
+dhd_ring_get_write_idx(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ uint32 write_idx = DHD_RING_IDX_INVALID;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return write_idx;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ write_idx = __dhd_fixed_ring_get_write_idx(&ring->fixed);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+
+ return write_idx;
}
/* Get latest element */
@@ -21612,17 +21954,21 @@
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
ret = __dhd_fixed_ring_get_last(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_last(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
@@ -21635,17 +21981,21 @@
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
ret = __dhd_fixed_ring_get_empty(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_empty(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
@@ -21654,17 +22004,21 @@
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
- ret = __dhd_fixed_ring_get_next(&ring->fixed, cur);
+ ret = __dhd_fixed_ring_get_next(&ring->fixed, cur, ring->type);
}
- mutex_unlock(&ring->ring_sync);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_next(&ring->single, cur, ring->type);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
@@ -21673,17 +22027,21 @@
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
- ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur);
+ ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur, ring->type);
}
- mutex_unlock(&ring->ring_sync);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_prev(&ring->single, cur, ring->type);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
@@ -21692,17 +22050,18 @@
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
int cnt = 0;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return cnt;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return cnt;
}
@@ -21711,18 +22070,18 @@
dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr)
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
- __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr);
+ __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr, ring->type);
}
- mutex_unlock(&ring->ring_sync);
- return;
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
}
/* free all lock */
@@ -21730,18 +22089,18 @@
dhd_ring_lock_free(void *_ring)
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
__dhd_fixed_ring_lock_free(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
- return;
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
}
void *
@@ -21749,17 +22108,18 @@
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
ret = __dhd_fixed_ring_lock_get_first(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
@@ -21768,17 +22128,18 @@
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
ret = __dhd_fixed_ring_lock_get_last(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
@@ -21787,17 +22148,18 @@
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
int ret = BCME_ERROR;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return ret;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
ret = __dhd_fixed_ring_lock_get_count(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
@@ -21806,21 +22168,56 @@
dhd_ring_lock_free_first(void *_ring)
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
__dhd_fixed_ring_lock_free_first(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
- return;
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
}
-#ifdef DHD_DUMP_MNGR
+void
+dhd_ring_whole_lock(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ __dhd_singleidx_ring_whole_lock(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+void
+dhd_ring_whole_unlock(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ __dhd_singleidx_ring_whole_unlock(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
#define DHD_VFS_INODE(dir) (dir->d_inode)
#else
@@ -21832,8 +22229,7 @@
#else
#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
-
-static int
+int
dhd_file_delete(char *path)
{
struct path file_path;
@@ -21843,16 +22239,17 @@
err = kern_path(path, 0, &file_path);
if (err < 0) {
+ DHD_ERROR(("Failed to get kern-path delete file: %s error: %d\n", path, err));
return err;
}
- if (FALSE ||
+ if (
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
!d_is_file(file_path.dentry) ||
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0))
- d_really_is_negative(file_path.dentry)
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0) */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0))
+ d_really_is_negative(file_path.dentry) ||
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0) */
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
-)
+ FALSE)
{
err = -EINVAL;
} else {
@@ -21874,7 +22271,7 @@
return err;
}
-
+#ifdef DHD_DUMP_MNGR
static int
dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname)
{
@@ -21898,6 +22295,7 @@
if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) {
strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE);
+ fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0';
fm_ptr->elems[fm_idx].file_idx = 0;
}
@@ -21950,6 +22348,7 @@
/* save dump file path */
strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE);
+ elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0';
/* change file index to next file index */
elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX;
@@ -21959,57 +22358,360 @@
#ifdef DHD_MAP_LOGGING
/* Will be called from SMMU fault handler */
void
-dhd_debug_info_dump(void)
+dhd_smmu_fault_handler(uint32 axid, ulong fault_addr)
{
dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub;
+ uint32 irq = (uint32)-1;
+
DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__));
+ DHD_ERROR(("%s: axid:0x%x, fault_addr:0x%lx", __FUNCTION__, axid, fault_addr));
+ dhdp->smmu_fault_occurred = TRUE;
+#ifdef DNGL_AXI_ERROR_LOGGING
+ dhdp->axi_error = TRUE;
+ dhdp->axi_err_dump->axid = axid;
+ dhdp->axi_err_dump->fault_address = fault_addr;
+#endif /* DNGL_AXI_ERROR_LOGGING */
- DHD_OS_WAKE_LOCK(dhdp);
- dhd_prot_debug_info_print(dhdp);
- osl_dma_map_dump();
-#ifdef DHD_FW_COREDUMP
- /* Load the dongle side dump to host memory */
- dhdp->memdump_enabled = DUMP_MEMONLY;
- dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
- dhd_bus_mem_dump(dhdp);
-#endif /* DHD_FW_COREDUMP */
- DHD_OS_WAKE_UNLOCK(dhdp);
-}
-EXPORT_SYMBOL(dhd_debug_info_dump);
-#endif /* DHD_MAP_LOGGING */
-int
-dhd_get_host_whitelist_region(void *buf, uint len)
-{
- dma_wl_addr_region_host_t *host_reg;
- uint64 wl_end;
-
- if ((wlreg_len_h == 0) && (wlreg_len_l == 0)) {
- return BCME_RANGE;
+ /* Disable PCIe IRQ */
+ dhdpcie_get_pcieirq(dhdp->bus, &irq);
+ if (irq != (uint32)-1) {
+ disable_irq_nosync(irq);
}
- host_reg = (dma_wl_addr_region_host_t *)buf;
- wl_end = wlreg_len_h + wlreg_h;
- wl_end = (wl_end & MASK_32_BITS) << 32;
- wl_end += wlreg_l;
- wl_end += wlreg_len_l;
- /* Now write whitelist region(s) */
- host_reg->hreg_start.addr_low = wlreg_l;
- host_reg->hreg_start.addr_high = wlreg_h;
- host_reg->hreg_end.addr_low = EXTRACT_LOW32(wl_end);
- host_reg->hreg_end.addr_high = EXTRACT_HIGH32(wl_end);
- return BCME_OK;
+ /* Take debug information first */
+ DHD_OS_WAKE_LOCK(dhdp);
+ dhd_prot_smmu_fault_dump(dhdp);
+ DHD_OS_WAKE_UNLOCK(dhdp);
+
+ /* Take AXI information if possible */
+#ifdef DNGL_AXI_ERROR_LOGGING
+#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
+ dhd_axi_error_dispatch(dhdp);
+#else
+ dhd_axi_error(dhdp);
+#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+#endif /* DNGL_AXI_ERROR_LOGGING */
}
+EXPORT_SYMBOL(dhd_smmu_fault_handler);
+#endif /* DHD_MAP_LOGGING */
+
+#ifdef DHD_WIFI_SHUTDOWN
+void wifi_plat_dev_drv_shutdown(struct platform_device *pdev)
+{
+ dhd_pub_t *dhd_pub = NULL;
+ dhd_info_t *dhd_info = NULL;
+ dhd_if_t *dhd_if = NULL;
+
+ DHD_ERROR(("%s enter\n", __FUNCTION__));
+ dhd_pub = g_dhd_pub;
+
+ if (dhd_os_check_if_up(dhd_pub)) {
+ dhd_info = (dhd_info_t *)dhd_pub->info;
+ dhd_if = dhd_info->iflist[0];
+ ASSERT(dhd_if);
+ ASSERT(dhd_if->net);
+ if (dhd_if && dhd_if->net) {
+ dhd_stop(dhd_if->net);
+ }
+ }
+}
+#endif /* DHD_WIFI_SHUTDOWN */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
-inline int
+int
compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count)
{
return (int)kernel_read(file, addr, (size_t)count, &offset);
}
#else
-inline int
+int
compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count)
{
return kernel_read(file, offset, addr, count);
}
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+static void dhd_blk_tsfl_handler(struct work_struct * work)
+{
+ dhd_if_t *ifp = NULL;
+ dhd_pub_t *dhdp = NULL;
+ /* Ignore compiler warnings due to -Werror=cast-qual */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif /* STRICT_GCC_WARNINGS && __GNUC__ */
+ ifp = container_of(work, dhd_if_t, blk_tsfl_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif /* STRICT_GCC_WARNINGS && __GNUC__ */
+ if (ifp) {
+ dhdp = &ifp->info->pub;
+ if (dhdp) {
+ if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)||
+ (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n"));
+ wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED);
+ } else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)||
+ (dhdp->op_mode & DHD_FLAG_STA_MODE)) {
+ DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n"));
+ wl_cfg80211_disassoc(ifp->net, WLAN_REASON_UNSPECIFIED);
+ }
+ }
+ }
+}
+void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp)
+{
+ ifp->tsync_rcvd = 0;
+ ifp->tsyncack_txed = 0;
+ ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+}
+void dhd_reset_tcpsync_info_by_dev(struct net_device *dev)
+{
+ dhd_if_t *ifp = NULL;
+ if (dev) {
+ ifp = DHD_DEV_IFP(dev);
+ }
+ if (ifp) {
+ ifp->tsync_rcvd = 0;
+ ifp->tsyncack_txed = 0;
+ ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+ }
+}
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+static void dhd_m4_state_handler(struct work_struct *work)
+{
+ dhd_if_t *ifp = NULL;
+ /* Ignore compiler warnings due to -Werror=cast-qual */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ struct delayed_work *dw = to_delayed_work(work);
+ ifp = container_of(dw, dhd_if_t, m4state_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
+ if (ifp && ifp->net &&
+ (OSL_ATOMIC_READ(ifp->info->pub->osh, &ifp->m4state) == M4_TXFAILED)) {
+ DHD_ERROR(("Disassoc for 4WAY_HANDSHAKE_TIMEOUT at %s\n",
+ ifp->net->name));
+ wl_cfg80211_disassoc(ifp->net, WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT);
+ }
+}
+
+void
+dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh;
+ uint16 type;
+
+ if (!success) {
+ dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
+
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+ type = ntoh16(eh->ether_type);
+ if (type == ETHER_TYPE_802_1X) {
+ if (dhd_is_4way_msg((uint8 *)eh) == EAPOL_4WAY_M4) {
+ dhd_if_t *ifp = NULL;
+ ifp = dhd->iflist[ifidx];
+ if (!ifp || !ifp->net) {
+ return;
+ }
+
+ DHD_INFO(("%s: M4 TX failed on %d.\n",
+ __FUNCTION__, ifidx));
+
+ OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M4_TXFAILED);
+ schedule_delayed_work(&ifp->m4state_work,
+ msecs_to_jiffies(MAX_4WAY_TIMEOUT_MS));
+ }
+ }
+ }
+}
+
+void
+dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx)
+{
+ dhd_info_t *dhdinfo;
+ dhd_if_t *ifp;
+
+ if ((ifidx < 0) || (ifidx >= DHD_MAX_IFS)) {
+ DHD_ERROR(("%s: invalid ifidx %d\n", __FUNCTION__, ifidx));
+ return;
+ }
+
+ dhdinfo = (dhd_info_t *)(dhdp->info);
+ if (!dhdinfo) {
+ DHD_ERROR(("%s: dhdinfo is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ ifp = dhdinfo->iflist[ifidx];
+ if (ifp) {
+ cancel_delayed_work_sync(&ifp->m4state_work);
+ }
+}
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#ifdef DHD_HP2P
+unsigned long
+dhd_os_hp2plock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+ unsigned long flags = 0;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->hp2p_lock, flags);
+ }
+
+ return flags;
+}
+
+void
+dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ spin_unlock_irqrestore(&dhd->hp2p_lock, flags);
+ }
+}
+#endif /* DHD_HP2P */
+#ifdef DNGL_AXI_ERROR_LOGGING
+static void
+dhd_axi_error_dump(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = (dhd_info_t *)handle;
+ dhd_pub_t *dhdp = NULL;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ goto exit;
+ }
+
+ dhdp = &dhd->pub;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ goto exit;
+ }
+
+ /**
+ * First save axi error information to a file
+ * because panic should happen right after this.
+ * After dhd reset, dhd reads the file, and do hang event process
+ * to send axi error stored on the file to Bigdata server
+ */
+ if (dhdp->axi_err_dump->etd_axi_error_v1.version != HND_EXT_TRAP_AXIERROR_VERSION_1) {
+ DHD_ERROR(("%s: Invalid AXI version: 0x%x\n",
+ __FUNCTION__, dhdp->axi_err_dump->etd_axi_error_v1.version));
+ }
+
+ DHD_OS_WAKE_LOCK(dhdp);
+#ifdef DHD_FW_COREDUMP
+#ifdef DHD_SSSR_DUMP
+ dhdp->collect_sssr = TRUE;
+#endif /* DHD_SSSR_DUMP */
+ DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
+ dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+#endif /* DHD_FW_COREDUMP */
+ DHD_OS_WAKE_UNLOCK(dhdp);
+
+exit:
+ /* Trigger kernel panic after taking necessary dumps */
+ BUG_ON(1);
+}
+
+void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type)
+{
+ DHD_ERROR(("%s: scheduling axi_error_dump.. \n", __FUNCTION__));
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ type, DHD_WQ_WORK_AXI_ERROR_DUMP,
+ dhd_axi_error_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#ifdef BCMPCIE
+static void
+dhd_cto_recovery_handler(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ dhd_pub_t *dhdp = NULL;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ BUG_ON(1);
+ return;
+ }
+
+ dhdp = &dhd->pub;
+ dhdpcie_cto_recovery_handler(dhdp);
+}
+
+void
+dhd_schedule_cto_recovery(dhd_pub_t *dhdp)
+{
+ DHD_ERROR(("%s: scheduling cto recovery.. \n", __FUNCTION__));
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ NULL, DHD_WQ_WORK_CTO_RECOVERY,
+ dhd_cto_recovery_handler, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+#endif /* BCMPCIE */
+
+#ifdef SUPPORT_SET_TID
+/*
+ * Set custom TID value for UDP frame based on UID value.
+ * This will be triggered by android private command below.
+ * DRIVER SET_TID <Mode:uint8> <Target UID:uint32> <Custom TID:uint8>
+ * Mode 0(SET_TID_OFF) : Disable changing TID
+ * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames
+ * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID
+*/
+void
+dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt)
+{
+ struct ether_header *eh = NULL;
+ struct sock *sk = NULL;
+ uint8 *pktdata = NULL;
+ uint8 *ip_hdr = NULL;
+ uint8 cur_prio;
+ uint8 prio;
+ uint32 uid;
+
+ if (dhdp->tid_mode == SET_TID_OFF) {
+ return;
+ }
+
+ pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt);
+ eh = (struct ether_header *) pktdata;
+ ip_hdr = (uint8 *)eh + ETHER_HDR_LEN;
+
+ if (IPV4_PROT(ip_hdr) != IP_PROT_UDP) {
+ return;
+ }
+
+ cur_prio = PKTPRIO(pkt);
+ prio = dhdp->target_tid;
+ uid = dhdp->target_uid;
+
+ if ((cur_prio == prio) ||
+ (cur_prio != PRIO_8021D_BE)) {
+ return;
+ }
+
+ sk = ((struct sk_buff*)(pkt))->sk;
+
+ if ((dhdp->tid_mode == SET_TID_ALL_UDP) ||
+ (sk && (uid == __kuid_val(sock_i_uid(sk))))) {
+ PKTSETPRIO(pkt, prio);
+ }
+}
+#endif /* SUPPORT_SET_TID */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux.h
index eae8536..7b78578 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux.h
@@ -1,7 +1,9 @@
/*
* DHD Linux header file (dhd_linux exports for cfg80211 and other components)
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -38,6 +40,11 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/fs.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 1))
+#include <linux/time64.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 1)) */
+
#include <dngl_stats.h>
#include <dhd.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 1))
@@ -54,6 +61,187 @@
#if defined(CONFIG_WIFI_CONTROL_FUNC)
#include <linux/wlan_plat.h>
#endif // endif
+#ifdef PCIE_FULL_DONGLE
+#include <etd.h>
+#endif /* PCIE_FULL_DONGLE */
+#ifdef WL_MONITOR
+#include <bcmmsgbuf.h>
+#define MAX_RADIOTAP_SIZE 256 /* Maximum size to hold HE Radiotap header format */
+#define MAX_MON_PKT_SIZE (4096 + MAX_RADIOTAP_SIZE)
+#endif /* WL_MONITOR */
+
+#define FILE_DUMP_MAX_WAIT_TIME 4000
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#ifdef BLOCK_IPV6_PACKET
+#define HEX_PREF_STR "0x"
+#define UNI_FILTER_STR "010000000000"
+#define ZERO_ADDR_STR "000000000000"
+#define ETHER_TYPE_STR "0000"
+#define IPV6_FILTER_STR "20"
+#define ZERO_TYPE_STR "00"
+#endif /* BLOCK_IPV6_PACKET */
+
+typedef struct dhd_if_event {
+ struct list_head list;
+ wl_event_data_if_t event;
+ char name[IFNAMSIZ+1];
+ uint8 mac[ETHER_ADDR_LEN];
+} dhd_if_event_t;
+
+/* Interface control information */
+typedef struct dhd_if {
+ struct dhd_info *info; /* back pointer to dhd_info */
+ /* OS/stack specifics */
+ struct net_device *net;
+ int idx; /* iface idx in dongle */
+ uint subunit; /* subunit */
+ uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
+ bool set_macaddress;
+ bool set_multicast;
+ uint8 bssidx; /* bsscfg index for the interface */
+ bool attached; /* Delayed attachment when unset */
+ bool txflowcontrol; /* Per interface flow control indicator */
+ char name[IFNAMSIZ+1]; /* linux interface name */
+ char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
+ struct net_device_stats stats;
+ struct list_head sta_list; /* sll of associated stations */
+ spinlock_t sta_list_lock; /* lock for manipulating sll */
+ uint32 ap_isolate; /* ap-isolation settings */
+#ifdef DHD_L2_FILTER
+ bool parp_enable;
+ bool parp_discard;
+ bool parp_allnode;
+ arp_table_t *phnd_arp_table;
+ /* for Per BSS modification */
+ bool dhcp_unicast;
+ bool block_ping;
+ bool grat_arp;
+ bool block_tdls;
+#endif /* DHD_L2_FILTER */
+#ifdef DHD_MCAST_REGEN
+ bool mcast_regen_bss_enable;
+#endif // endif
+ bool rx_pkt_chainable; /* set all rx packet to chainable config by default */
+ cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */
+ uint8 tx_paths_active;
+ bool del_in_progress;
+ bool static_if; /* used to avoid some operations on static_if */
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ struct delayed_work m4state_work;
+ atomic_t m4state;
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ bool recv_reassoc_evt;
+ bool post_roam_evt;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ uint32 tsync_rcvd;
+ uint32 tsyncack_txed;
+ u64 last_sync;
+ struct work_struct blk_tsfl_work;
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+#if defined(BCMSDIO)
+ int role;
+#endif /* BCMSDIO */
+} dhd_if_t;
+
+struct ipv6_work_info_t {
+ uint8 if_idx;
+ char ipv6_addr[IPV6_ADDR_LEN];
+ unsigned long event;
+};
+
+typedef struct dhd_dump {
+ uint8 *buf;
+ int bufsize;
+ uint8 *hscb_buf;
+ int hscb_bufsize;
+} dhd_dump_t;
+#ifdef DNGL_AXI_ERROR_LOGGING
+typedef struct dhd_axi_error_dump {
+ ulong fault_address;
+ uint32 axid;
+ struct hnd_ext_trap_axi_error_v1 etd_axi_error_v1;
+} dhd_axi_error_dump_t;
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+struct dhd_rx_tx_work {
+ struct work_struct work;
+ struct sk_buff *skb;
+ struct net_device *net;
+ struct dhd_pub *pub;
+};
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#if defined(DHD_LB)
+#if !defined(PCIE_FULL_DONGLE)
+#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE"
+#endif /* !PCIE_FULL_DONGLE */
+#endif /* DHD_LB */
+
+#if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \
+ defined(DHD_LB_STATS)
+#if !defined(DHD_LB)
+#error "DHD loadbalance derivatives are supported only if DHD_LB is defined"
+#endif /* !DHD_LB */
+#endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */
+
+#if defined(DHD_LB)
+/* Dynamic CPU selection for load balancing */
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+#include <asm/atomic.h>
+
+#if !defined(DHD_LB_PRIMARY_CPUS)
+#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
+#endif // endif
+#if !defined(DHD_LB_SECONDARY_CPUS)
+#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
+#endif // endif
+
+#define HIST_BIN_SIZE 9
+
+#if defined(DHD_LB_TXP)
+/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
+typedef struct dhd_tx_lb_pkttag_fr {
+ struct net_device *net;
+ int ifidx;
+} dhd_tx_lb_pkttag_fr_t;
+
+#define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp)
+#define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net)
+
+#define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx)
+#define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx)
+#endif /* DHD_LB_TXP */
+
+#endif /* DHD_LB */
+
+#ifdef FILTER_IE
+#define FILTER_IE_PATH "/etc/wifi/filter_ie"
+#define FILTER_IE_BUFSZ 1024 /* ioc buffsize for FILTER_IE */
+#define FILE_BLOCK_READ_SIZE 256
+#define WL_FILTER_IE_IOV_HDR_SIZE OFFSETOF(wl_filter_ie_iov_v1_t, tlvs)
+#endif /* FILTER_IE */
+
+#define NULL_CHECK(p, s, err) \
+ do { \
+ if (!(p)) { \
+ printk("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
+ err = BCME_ERROR; \
+ return err; \
+ } \
+ } while (0)
#if !defined(CONFIG_WIFI_CONTROL_FUNC)
#define WLAN_PLAT_NODFS_FLAG 0x01
@@ -64,6 +252,9 @@
int (*set_carddetect)(int val);
void *(*mem_prealloc)(int section, unsigned long size);
int (*get_mac_addr)(unsigned char *buf);
+#ifdef BCMSDIO
+ int (*get_wake_irq)(void);
+#endif // endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 58)) || defined(CUSTOM_COUNTRY_CODE)
void *(*get_country_code)(char *ccode, u32 flags);
#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 58)) || defined (CUSTOM_COUNTRY_CODE) */
@@ -106,6 +297,95 @@
} dhd_sta_t;
typedef dhd_sta_t dhd_sta_pool_t;
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+typedef enum {
+ M3_RXED,
+ M4_TXFAILED
+} msg_4way_state_t;
+#define MAX_4WAY_TIMEOUT_MS 2000
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+extern uint32 report_hang_privcmd_err;
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+
+#if defined(ARGOS_NOTIFY_CB)
+int argos_register_notifier_init(struct net_device *net);
+int argos_register_notifier_deinit(void);
+
+extern int sec_argos_register_notifier(struct notifier_block *n, char *label);
+extern int sec_argos_unregister_notifier(struct notifier_block *n, char *label);
+
+typedef struct {
+ struct net_device *wlan_primary_netdev;
+ int argos_rps_cpus_enabled;
+} argos_rps_ctrl;
+
+#define RPS_TPUT_THRESHOLD 300
+#define DELAY_TO_CLEAR_RPS_CPUS 300
+#endif // endif
+
+#if defined(BT_OVER_SDIO)
+extern void wl_android_set_wifi_on_flag(bool enable);
+#endif /* BT_OVER_SDIO */
+
+#ifdef DHD_LOG_DUMP
+/* 0: DLD_BUF_TYPE_GENERAL, 1: DLD_BUF_TYPE_PRESERVE
+* 2: DLD_BUF_TYPE_SPECIAL
+*/
+#define DLD_BUFFER_NUM 3
+
+#ifndef CUSTOM_LOG_DUMP_BUFSIZE_MB
+#define CUSTOM_LOG_DUMP_BUFSIZE_MB 4 /* DHD_LOG_DUMP_BUF_SIZE 4 MB static memory in kernel */
+#endif /* CUSTOM_LOG_DUMP_BUFSIZE_MB */
+
+#define LOG_DUMP_TOTAL_BUFSIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+
+/*
+ * Below are different sections that use the prealloced buffer
+ * and sum of the sizes of these should not cross LOG_DUMP_TOTAL_BUFSIZE
+ */
+#define LOG_DUMP_GENERAL_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define LOG_DUMP_PRESERVE_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define LOG_DUMP_ECNTRS_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define LOG_DUMP_RTT_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define LOG_DUMP_FILTER_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+
+#if LOG_DUMP_TOTAL_BUFSIZE < (LOG_DUMP_GENERAL_MAX_BUFSIZE + \
+ LOG_DUMP_PRESERVE_MAX_BUFSIZE + LOG_DUMP_ECNTRS_MAX_BUFSIZE + LOG_DUMP_RTT_MAX_BUFSIZE \
+ + LOG_DUMP_FILTER_MAX_BUFSIZE)
+#error "LOG_DUMP_TOTAL_BUFSIZE is lesser than sum of all rings"
+#endif // endif
+
+/* Special buffer is allocated as separately in prealloc */
+#define LOG_DUMP_SPECIAL_MAX_BUFSIZE (8 * 1024)
+
+#define LOG_DUMP_MAX_FILESIZE (8 *1024 * 1024) /* 8 MB default */
+#ifdef CONFIG_LOG_BUF_SHIFT
+/* 15% of kernel log buf size, if for example klog buf size is 512KB
+* 15% of 512KB ~= 80KB
+*/
+#define LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE \
+ (15 * ((1 << CONFIG_LOG_BUF_SHIFT)/100))
+#endif /* CONFIG_LOG_BUF_SHIFT */
+
+#define LOG_DUMP_COOKIE_BUFSIZE 1024u
+
+typedef struct {
+ char *hdr_str;
+ log_dump_section_type_t sec_type;
+} dld_hdr_t;
+
+typedef struct {
+ int attr;
+ char *hdr_str;
+ log_dump_section_type_t sec_type;
+ int log_type;
+} dld_log_hdr_t;
+
+#define DHD_PRINT_BUF_NAME_LEN 30
+#endif /* DHD_LOG_DUMP */
+
int dhd_wifi_platform_register_drv(void);
void dhd_wifi_platform_unregister_drv(void);
wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num,
@@ -136,7 +416,11 @@
int dhd_enable_adps(dhd_pub_t *dhd, uint8 on);
#endif /* WLADPS || WLADPS_PRIVATE_CMD */
+#ifdef DHDTCPSYNC_FLOOD_BLK
+extern void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp);
+extern void dhd_reset_tcpsync_info_by_dev(struct net_device *dev);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
-inline int compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count);
+int compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count);
#endif /* __DHD_LINUX_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_exportfs.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_exportfs.c
new file mode 100644
index 0000000..a5ceced
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_exportfs.c
@@ -0,0 +1,1515 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+#include <linux/kobject.h>
+#include <linux/proc_fs.h>
+#include <linux/sysfs.h>
+#include <osl.h>
+#include <dhd_dbg.h>
+#include <dhd_linux_priv.h>
+#ifdef DHD_ADPS_BAM_EXPORT
+#include <wl_bam.h>
+#endif // endif
+
+#ifndef BCMDHDX
+#define SYSFS_DIR_BCMDHD "bcmdhd"
+#define SYSFS_DIR_WIFI "wifi0"
+#define PROCFS_DIR_TRACE "dhd_trace"
+#define PROCFS_DIR_ENCOUNTERS "dhd_ecounters"
+#define PROCFS_DIR_RTT "dhd_rtt"
+#else
+#define SYSFS_DIR_BCMDHD "bcmdhdx"
+#define SYSFS_DIR_WIFI "wifi1"
+#define PROCFS_DIR_WIFI "dhdx_trace"
+#define PROCFS_DIR_ENCOUNTERS "dhdx_ecounters"
+#define PROCFS_DIR_RTT "dhdx_rtt"
+#endif /* BCMDHDX */
+
+#ifdef SHOW_LOGTRACE
+extern dhd_pub_t* g_dhd_pub;
+static int dhd_ring_proc_open(struct inode *inode, struct file *file);
+ssize_t dhd_ring_proc_read(struct file *file, char *buffer, size_t tt, loff_t *loff);
+
+static const struct file_operations dhd_ring_proc_fops = {
+ .open = dhd_ring_proc_open,
+ .read = dhd_ring_proc_read,
+ .release = single_release,
+};
+
+static int
+dhd_ring_proc_open(struct inode *inode, struct file *file)
+{
+ int ret = BCME_ERROR;
+ if (inode) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+ ret = single_open(file, 0, PDE_DATA(inode));
+#else
+ /* This feature is not supported for lower kernel versions */
+ ret = single_open(file, 0, NULL);
+#endif // endif
+ } else {
+ DHD_ERROR(("%s: inode is NULL\n", __FUNCTION__));
+ }
+ return ret;
+}
+
+ssize_t
+dhd_ring_proc_read(struct file *file, char __user *buffer, size_t tt, loff_t *loff)
+{
+ trace_buf_info_t *trace_buf_info;
+ int ret = BCME_ERROR;
+ dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)((struct seq_file *)(file->private_data))->private;
+
+ if (ring == NULL) {
+ DHD_ERROR(("%s: ring is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ ASSERT(g_dhd_pub);
+
+ trace_buf_info = (trace_buf_info_t *)MALLOCZ(g_dhd_pub->osh, sizeof(trace_buf_info_t));
+ if (trace_buf_info) {
+ dhd_dbg_read_ring_into_trace_buf(ring, trace_buf_info);
+ if (copy_to_user(buffer, (void*)trace_buf_info->buf, MIN(trace_buf_info->size, tt)))
+ {
+ ret = -EFAULT;
+ goto exit;
+ }
+ if (trace_buf_info->availability == BUF_NOT_AVAILABLE)
+ ret = BUF_NOT_AVAILABLE;
+ else
+ ret = trace_buf_info->size;
+ } else
+ DHD_ERROR(("Memory allocation Failed\n"));
+
+exit:
+ if (trace_buf_info) {
+ MFREE(g_dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t));
+ }
+ return ret;
+}
+
+void
+dhd_dbg_ring_proc_create(dhd_pub_t *dhdp)
+{
+#ifdef DEBUGABILITY
+ dhd_dbg_ring_t *dbg_verbose_ring = NULL;
+
+ dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhdp, FW_VERBOSE_RING_ID);
+ if (dbg_verbose_ring) {
+ if (!proc_create_data(PROCFS_DIR_TRACE, S_IRUSR, NULL, &dhd_ring_proc_fops,
+ dbg_verbose_ring)) {
+ DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n"));
+ } else {
+ DHD_ERROR(("Created /proc/dhd_trace procfs interface\n"));
+ }
+ } else {
+ DHD_ERROR(("dbg_verbose_ring is NULL, /proc/dhd_trace not created\n"));
+ }
+#endif /* DEBUGABILITY */
+
+#ifdef EWP_ECNTRS_LOGGING
+ if (!proc_create_data(PROCFS_DIR_ENCOUNTERS, S_IRUSR, NULL, &dhd_ring_proc_fops,
+ dhdp->ecntr_dbg_ring)) {
+ DHD_ERROR(("Failed to create /proc/dhd_ecounters procfs interface\n"));
+ } else {
+ DHD_ERROR(("Created /proc/dhd_ecounters procfs interface\n"));
+ }
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ if (!proc_create_data(PROCFS_DIR_RTT, S_IRUSR, NULL, &dhd_ring_proc_fops,
+ dhdp->rtt_dbg_ring)) {
+ DHD_ERROR(("Failed to create /proc/dhd_rtt procfs interface\n"));
+ } else {
+ DHD_ERROR(("Created /proc/dhd_rtt procfs interface\n"));
+ }
+#endif /* EWP_RTT_LOGGING */
+}
+
+void
+dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp)
+{
+#ifdef DEBUGABILITY
+ remove_proc_entry(PROCFS_DIR_TRACE, NULL);
+#endif /* DEBUGABILITY */
+
+#ifdef EWP_ECNTRS_LOGGING
+ remove_proc_entry(PROCFS_DIR_ENCOUNTERS, NULL);
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ remove_proc_entry(PROCFS_DIR_RTT, NULL);
+#endif /* EWP_RTT_LOGGING */
+
+}
+#endif /* SHOW_LOGTRACE */
+
+/* ----------------------------------------------------------------------------
+ * Infrastructure code for sysfs interface support for DHD
+ *
+ * What is sysfs interface?
+ * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
+ *
+ * Why sysfs interface?
+ * This is the Linux standard way of changing/configuring Run Time parameters
+ * for a driver. We can use this interface to control "linux" specific driver
+ * parameters.
+ *
+ * -----------------------------------------------------------------------------
+ */
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+extern atomic_t trace_wklock_onoff;
+
+/* Function to show the history buffer */
+static ssize_t
+show_wklock_trace(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ buf[ret] = '\n';
+ buf[ret+1] = 0;
+
+ dhd_wk_lock_stats_dump(&dhd->pub);
+ return ret+1;
+}
+
+/* Function to enable/disable wakelock trace */
+static ssize_t
+wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ BCM_REFERENCE(dhd);
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+
+ atomic_set(&trace_wklock_onoff, onoff);
+ if (atomic_read(&trace_wklock_onoff)) {
+ printk("ENABLE WAKLOCK TRACE\n");
+ } else {
+ printk("DISABLE WAKELOCK TRACE\n");
+ }
+
+ return (ssize_t)(onoff+1);
+}
+#endif /* DHD_TRACE_WAKE_LOCK */
+
+#if defined(DHD_LB_TXP)
+static ssize_t
+show_lbtxp(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ onoff = atomic_read(&dhd->lb_txp_active);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
+}
+
+static ssize_t
+lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ int i;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+ atomic_set(&dhd->lb_txp_active, onoff);
+
+ /* Since the scheme is changed clear the counters */
+ for (i = 0; i < NR_CPUS; i++) {
+ DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
+ DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
+ }
+
+ return count;
+}
+
+#endif /* DHD_LB_TXP */
+
+#if defined(DHD_LB_RXP)
+static ssize_t
+show_lbrxp(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ onoff = atomic_read(&dhd->lb_rxp_active);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
+}
+
+static ssize_t
+lbrxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ int i, j;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+ atomic_set(&dhd->lb_rxp_active, onoff);
+
+ /* Since the scheme is changed clear the counters */
+ for (i = 0; i < NR_CPUS; i++) {
+ DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
+ }
+ }
+
+ return count;
+}
+#endif /* DHD_LB_RXP */
+
+#ifdef DHD_LOG_DUMP
+extern int logdump_periodic_flush;
+extern int logdump_ecntr_enable;
+static ssize_t
+show_logdump_periodic_flush(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long val;
+
+ val = logdump_periodic_flush;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val);
+ return ret;
+}
+
+static ssize_t
+logdump_periodic_flush_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long val;
+
+ val = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &val);
+ if (val != 0 && val != 1) {
+ return -EINVAL;
+ }
+ logdump_periodic_flush = val;
+ return count;
+}
+
+static ssize_t
+show_logdump_ecntr(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long val;
+
+ val = logdump_ecntr_enable;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val);
+ return ret;
+}
+
+static ssize_t
+logdump_ecntr_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long val;
+
+ val = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &val);
+ if (val != 0 && val != 1) {
+ return -EINVAL;
+ }
+ logdump_ecntr_enable = val;
+ return count;
+}
+
+#endif /* DHD_LOG_DUMP */
+
+extern uint enable_ecounter;
+static ssize_t
+show_enable_ecounter(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+
+ onoff = enable_ecounter;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
+}
+
+static ssize_t
+ecounter_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ dhd_pub_t *dhdp;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return count;
+ }
+ dhdp = &dhd->pub;
+ if (!FW_SUPPORTED(dhdp, ecounters)) {
+ DHD_ERROR(("%s: ecounters not supported by FW\n", __FUNCTION__));
+ return count;
+ }
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+
+ if (enable_ecounter == onoff) {
+ DHD_ERROR(("%s: ecounters already %d\n", __FUNCTION__, enable_ecounter));
+ return count;
+ }
+
+ enable_ecounter = onoff;
+ dhd_ecounter_configure(dhdp, enable_ecounter);
+
+ return count;
+}
+
+/*
+ * Generic Attribute Structure for DHD.
+ * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
+ * to instantiate an object of type dhd_attr, populate it with
+ * the required show/store functions (ex:- dhd_attr_cpumask_primary)
+ * and add the object to default_attrs[] array, that gets registered
+ * to the kobject of dhd (named bcm-dhd).
+ */
+
+struct dhd_attr {
+ struct attribute attr;
+ ssize_t(*show)(struct dhd_info *, char *);
+ ssize_t(*store)(struct dhd_info *, const char *, size_t count);
+};
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+static struct dhd_attr dhd_attr_wklock =
+ __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
+#endif /* defined(DHD_TRACE_WAKE_LOCK */
+
+#if defined(DHD_LB_TXP)
+static struct dhd_attr dhd_attr_lbtxp =
+ __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff);
+#endif /* DHD_LB_TXP */
+
+#if defined(DHD_LB_RXP)
+static struct dhd_attr dhd_attr_lbrxp =
+ __ATTR(lbrxp, 0660, show_lbrxp, lbrxp_onoff);
+#endif /* DHD_LB_RXP */
+
+#ifdef DHD_LOG_DUMP
+static struct dhd_attr dhd_attr_logdump_periodic_flush =
+ __ATTR(logdump_periodic_flush, 0660, show_logdump_periodic_flush,
+ logdump_periodic_flush_onoff);
+static struct dhd_attr dhd_attr_logdump_ecntr =
+ __ATTR(logdump_ecntr_enable, 0660, show_logdump_ecntr,
+ logdump_ecntr_onoff);
+#endif /* DHD_LOG_DUMP */
+
+static struct dhd_attr dhd_attr_ecounters =
+ __ATTR(ecounters, 0660, show_enable_ecounter, ecounter_onoff);
+
+/* Attribute object that gets registered with "bcm-dhd" kobject tree */
+static struct attribute *default_attrs[] = {
+#if defined(DHD_TRACE_WAKE_LOCK)
+ &dhd_attr_wklock.attr,
+#endif // endif
+#if defined(DHD_LB_TXP)
+ &dhd_attr_lbtxp.attr,
+#endif /* DHD_LB_TXP */
+#if defined(DHD_LB_RXP)
+ &dhd_attr_lbrxp.attr,
+#endif /* DHD_LB_RXP */
+#ifdef DHD_LOG_DUMP
+ &dhd_attr_logdump_periodic_flush.attr,
+ &dhd_attr_logdump_ecntr.attr,
+#endif // endif
+ &dhd_attr_ecounters.attr,
+ NULL
+};
+
+#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
+#define to_attr(a) container_of(a, struct dhd_attr, attr)
+
+/*
+ * bcm-dhd kobject show function, the "attr" attribute specifices to which
+ * node under "bcm-dhd" the show function is called.
+ */
+static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ dhd_info_t *dhd = to_dhd(kobj);
+ struct dhd_attr *d_attr = to_attr(attr);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ int ret;
+
+ if (d_attr->show)
+ ret = d_attr->show(dhd, buf);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+/*
+ * bcm-dhd kobject show function, the "attr" attribute specifices to which
+ * node under "bcm-dhd" the store function is called.
+ */
+static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ dhd_info_t *dhd = to_dhd(kobj);
+ struct dhd_attr *d_attr = to_attr(attr);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ int ret;
+
+ if (d_attr->store)
+ ret = d_attr->store(dhd, buf, count);
+ else
+ ret = -EIO;
+
+ return ret;
+
+}
+
+static struct sysfs_ops dhd_sysfs_ops = {
+ .show = dhd_show,
+ .store = dhd_store,
+};
+
+static struct kobj_type dhd_ktype = {
+ .sysfs_ops = &dhd_sysfs_ops,
+ .default_attrs = default_attrs,
+};
+
+#ifdef DHD_MAC_ADDR_EXPORT
+struct ether_addr sysfs_mac_addr;
+static ssize_t
+show_mac_addr(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, MACF,
+ (uint32)sysfs_mac_addr.octet[0], (uint32)sysfs_mac_addr.octet[1],
+ (uint32)sysfs_mac_addr.octet[2], (uint32)sysfs_mac_addr.octet[3],
+ (uint32)sysfs_mac_addr.octet[4], (uint32)sysfs_mac_addr.octet[5]);
+
+ return ret;
+}
+
+static ssize_t
+set_mac_addr(struct dhd_info *dev, const char *buf, size_t count)
+{
+ if (!bcm_ether_atoe(buf, &sysfs_mac_addr)) {
+ DHD_ERROR(("Invalid Mac Address \n"));
+ return -EINVAL;
+ }
+
+ DHD_ERROR(("Mac Address set with "MACDBG"\n", MAC2STRDBG(&sysfs_mac_addr)));
+
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_macaddr =
+ __ATTR(mac_addr, 0660, show_mac_addr, set_mac_addr);
+#endif /* DHD_MAC_ADDR_EXPORT */
+
+#ifdef DHD_FW_COREDUMP
+
+#ifdef CUSTOMER_HW4_DEBUG
+#define MEMDUMPINFO PLATFORM_PATH".memdump.info"
+#elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
+#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
+#elif defined(OEM_ANDROID) && (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
+#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
+#elif defined(OEM_ANDROID) && defined(DHD_FW_COREDUMP)
+#define MEMDUMPINFO_LIVE "/installmedia/.memdump.info"
+#define MEMDUMPINFO_INST "/data/.memdump.info"
+#define MEMDUMPINFO MEMDUMPINFO_LIVE
+#else /* FC19 and Others */
+#define MEMDUMPINFO "/root/.memdump.info"
+#endif /* CUSTOMER_HW4_DEBUG */
+
+uint32
+get_mem_val_from_file(void)
+{
+ struct file *fp = NULL;
+ uint32 mem_val = DUMP_MEMFILE_MAX;
+ char *p_mem_val = NULL;
+ char *filepath = MEMDUMPINFO;
+ int ret = 0;
+
+ /* Read memdump info from the file */
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+#if defined(CONFIG_X86) && defined(OEM_ANDROID) && defined(DHD_FW_COREDUMP)
+ /* Check if it is Live Brix Image */
+ if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) {
+ goto done;
+ }
+ /* Try if it is Installed Brix Image */
+ filepath = MEMDUMPINFO_INST;
+ DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ goto done;
+ }
+#else /* Non Brix Android platform */
+ goto done;
+#endif /* CONFIG_X86 && OEM_ANDROID */
+ }
+
+ /* Handle success case */
+ ret = compat_kernel_read(fp, 0, (char *)&mem_val, sizeof(uint32));
+ if (ret < 0) {
+ DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
+ filp_close(fp, NULL);
+ goto done;
+ }
+
+ p_mem_val = (char*)&mem_val;
+ p_mem_val[sizeof(uint32) - 1] = '\0';
+ mem_val = bcm_atoi(p_mem_val);
+
+ filp_close(fp, NULL);
+
+done:
+ return mem_val;
+}
+
+void dhd_get_memdump_info(dhd_pub_t *dhd)
+{
+#ifndef DHD_EXPORT_CNTL_FILE
+ uint32 mem_val = DUMP_MEMFILE_MAX;
+
+ mem_val = get_mem_val_from_file();
+ if (mem_val != DUMP_MEMFILE_MAX)
+ dhd->memdump_enabled = mem_val;
+#ifdef DHD_INIT_DEFAULT_MEMDUMP
+ if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX)
+ mem_val = DUMP_MEMFILE_BUGON;
+#endif /* DHD_INIT_DEFAULT_MEMDUMP */
+#else
+#ifdef DHD_INIT_DEFAULT_MEMDUMP
+ if (dhd->memdump_enabled == 0 || dhd->memdump_enabled == DUMP_MEMFILE_MAX)
+ dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
+#endif /* DHD_INIT_DEFAULT_MEMDUMP */
+#endif /* !DHD_EXPORT_CNTL_FILE */
+#ifdef BCMQT
+ /* In QT environment collecting memdump on FW TRAP, IOVAR timeouts,
+ * is taking more time and makes system unresponsive so disabling it.
+ * if needed memdump can be collected through 'dhd upload' command.
+ */
+ dhd->memdump_enabled = DUMP_DISABLED;
+#endif // endif
+ DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, dhd->memdump_enabled));
+}
+
+#ifdef DHD_EXPORT_CNTL_FILE
+static ssize_t
+show_memdump_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ dhd_pub_t *dhdp;
+
+ if (!dev) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ dhdp = &dev->pub;
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", dhdp->memdump_enabled);
+ return ret;
+}
+
+static ssize_t
+set_memdump_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long memval;
+ dhd_pub_t *dhdp;
+
+ if (!dev) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return count;
+ }
+ dhdp = &dev->pub;
+
+ memval = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &memval);
+
+ dhdp->memdump_enabled = (uint32)memval;
+
+ DHD_ERROR(("%s: MEMDUMP ENABLED = %iu\n", __FUNCTION__, dhdp->memdump_enabled));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_memdump =
+ __ATTR(memdump, 0660, show_memdump_info, set_memdump_info);
+#endif /* DHD_EXPORT_CNTL_FILE */
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef BCMASSERT_LOG
+#ifdef CUSTOMER_HW4_DEBUG
+#define ASSERTINFO PLATFORM_PATH".assert.info"
+#elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
+#define ASSERTINFO "/data/misc/wifi/.assert.info"
+#elif defined(OEM_ANDROID)
+#define ASSERTINFO "/installmedia/.assert.info"
+#else
+#define ASSERTINFO "/root/.assert.info"
+#endif /* CUSTOMER_HW4_DEBUG */
+int
+get_assert_val_from_file(void)
+{
+ struct file *fp = NULL;
+ char *filepath = ASSERTINFO;
+ char *p_mem_val = NULL;
+ int mem_val = -1;
+
+ /*
+ * Read assert info from the file
+ * 0: Trigger Kernel crash by panic()
+ * 1: Print out the logs and don't trigger Kernel panic. (default)
+ * 2: Trigger Kernel crash by BUG()
+ * File doesn't exist: Keep default value (1).
+ */
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ } else {
+ int ret = compat_kernel_read(fp, 0, (char *)&mem_val, sizeof(uint32));
+ if (ret < 0) {
+ DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
+ } else {
+ p_mem_val = (char *)&mem_val;
+ p_mem_val[sizeof(uint32) - 1] = '\0';
+ mem_val = bcm_atoi(p_mem_val);
+ DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
+ }
+ filp_close(fp, NULL);
+ }
+
+#ifdef CUSTOMER_HW4_DEBUG
+ mem_val = (mem_val >= 0) ? mem_val : 1;
+#else
+ mem_val = (mem_val >= 0) ? mem_val : 0;
+#endif /* CUSTOMER_HW4_DEBUG */
+ return mem_val;
+}
+
+void dhd_get_assert_info(dhd_pub_t *dhd)
+{
+#ifndef DHD_EXPORT_CNTL_FILE
+ int mem_val = -1;
+
+ mem_val = get_assert_val_from_file();
+
+ g_assert_type = mem_val;
+#endif /* !DHD_EXPORT_CNTL_FILE */
+}
+
+#ifdef DHD_EXPORT_CNTL_FILE
+static ssize_t
+show_assert_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (!dev) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%d\n", g_assert_type);
+ return ret;
+
+}
+
+static ssize_t
+set_assert_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long assert_val;
+
+ assert_val = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &assert_val);
+
+ g_assert_type = (uint32)assert_val;
+
+ DHD_ERROR(("%s: ASSERT ENABLED = %lu\n", __FUNCTION__, assert_val));
+ return count;
+
+}
+
+static struct dhd_attr dhd_attr_cntl_assert =
+ __ATTR(assert, 0660, show_assert_info, set_assert_info);
+#endif /* DHD_EXPORT_CNTL_FILE */
+#endif /* BCMASSERT_LOG */
+
+#ifdef DHD_EXPORT_CNTL_FILE
+#if defined(WRITE_WLANINFO)
+static ssize_t
+show_wifiver_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s", version_info);
+ return ret;
+}
+
+static ssize_t
+set_wifiver_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ DHD_ERROR(("Do not set version info\n"));
+ return -EINVAL;
+}
+
+static struct dhd_attr dhd_attr_cntl_wifiver =
+ __ATTR(wifiver, 0660, show_wifiver_info, set_wifiver_info);
+#endif /* WRITE_WLANINFO */
+
+#if defined(USE_CID_CHECK)
+char cidinfostr[MAX_VNAME_LEN];
+
+static ssize_t
+show_cid_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s", cidinfostr);
+ return ret;
+}
+
+static ssize_t
+set_cid_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ int len = strlen(buf) + 1;
+ int maxstrsz;
+ maxstrsz = MAX_VNAME_LEN;
+
+ scnprintf(cidinfostr, ((len > maxstrsz) ? maxstrsz : len), "%s", buf);
+ DHD_INFO(("%s : CID info string\n", cidinfostr));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_cidinfo =
+ __ATTR(cid, 0660, show_cid_info, set_cid_info);
+#endif /* USE_CID_CHECK */
+
+#if defined(GEN_SOFTAP_INFO_FILE)
+char softapinfostr[SOFTAP_INFO_BUF_SZ];
+static ssize_t
+show_softap_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s", softapinfostr);
+ return ret;
+}
+
+static ssize_t
+set_softap_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ DHD_ERROR(("Do not set sofap related info\n"));
+ return -EINVAL;
+}
+
+static struct dhd_attr dhd_attr_cntl_softapinfo =
+ __ATTR(softap, 0660, show_softap_info, set_softap_info);
+#endif /* GEN_SOFTAP_INFO_FILE */
+
+#if defined(MIMO_ANT_SETTING)
+unsigned long antsel;
+
+static ssize_t
+show_ant_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%lu\n", antsel);
+ return ret;
+}
+
+static ssize_t
+set_ant_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long ant_val;
+
+ ant_val = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &ant_val);
+
+ /*
+ * Check value
+ * 0 - Not set, handle same as file not exist
+ */
+ if (ant_val > 3) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n",
+ __FUNCTION__, ant_val));
+ return -EINVAL;
+ }
+
+ antsel = ant_val;
+ DHD_ERROR(("[WIFI_SEC] %s: Set Antinfo val = %lu \n", __FUNCTION__, antsel));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_antinfo =
+ __ATTR(ant, 0660, show_ant_info, set_ant_info);
+#endif /* MIMO_ANT_SETTING */
+
+#ifdef DHD_PM_CONTROL_FROM_FILE
+extern bool g_pm_control;
+extern uint32 pmmode_val;
+static ssize_t
+show_pm_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (!g_pm_control) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "PM mode is not set\n");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", pmmode_val);
+ }
+ return ret;
+}
+
+static ssize_t
+set_pm_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long pm_val;
+
+ pm_val = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &pm_val);
+
+ if (pm_val > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n",
+ __FUNCTION__, pm_val));
+ return -EINVAL;
+ }
+
+ if (!pm_val) {
+ g_pm_control = TRUE;
+ } else {
+ g_pm_control = FALSE;
+ }
+
+ pmmode_val = (uint32)pm_val;
+ DHD_ERROR(("[WIFI_SEC] %s: Set pminfo val = %u\n", __FUNCTION__, pmmode_val));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_pminfo =
+ __ATTR(pm, 0660, show_pm_info, set_pm_info);
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+
+#ifdef LOGTRACE_FROM_FILE
+unsigned long logtrace_val = 1;
+
+static ssize_t
+show_logtrace_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%lu\n", logtrace_val);
+ return ret;
+}
+
+static ssize_t
+set_logtrace_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ logtrace_val = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: LOGTRACE On/Off from sysfs = %lu\n",
+ __FUNCTION__, logtrace_val));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_logtraceinfo =
+ __ATTR(logtrace, 0660, show_logtrace_info, set_logtrace_info);
+#endif /* LOGTRACE_FROM_FILE */
+
+#ifdef USE_WFA_CERT_CONF
+#ifdef BCMSDIO
+uint32 bus_txglom = VALUENOTSET;
+
+static ssize_t
+show_bustxglom(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (bus_txglom == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%s\n", "bustxglom not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", bus_txglom);
+ }
+ return ret;
+}
+
+static ssize_t
+set_bustxglom(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = (uint32)bcm_atoi(buf);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ bus_txglom = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: BUS TXGLOM On/Off from sysfs = %u\n",
+ __FUNCTION__, bus_txglom));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_bustxglom =
+ __ATTR(bustxglom, 0660, show_bustxglom, set_bustxglom);
+#endif /* BCMSDIO */
+
+#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
+uint32 roam_off = VALUENOTSET;
+
+static ssize_t
+show_roamoff(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (roam_off == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "roam_off not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", roam_off);
+ }
+ return ret;
+}
+
+static ssize_t
+set_roamoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = bcm_atoi(buf);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ roam_off = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: ROAM On/Off from sysfs = %u\n",
+ __FUNCTION__, roam_off));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_roamoff =
+ __ATTR(roamoff, 0660, show_roamoff, set_roamoff);
+#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
+
+#ifdef USE_WL_FRAMEBURST
+uint32 frameburst = VALUENOTSET;
+
+static ssize_t
+show_frameburst(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (frameburst == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "frameburst not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", frameburst);
+ }
+ return ret;
+}
+
+static ssize_t
+set_frameburst(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = bcm_atoi(buf);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ frameburst = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n",
+ __FUNCTION__, frameburst));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_frameburst =
+ __ATTR(frameburst, 0660, show_frameburst, set_frameburst);
+#endif /* USE_WL_FRAMEBURST */
+
+#ifdef USE_WL_TXBF
+uint32 txbf = VALUENOTSET;
+
+static ssize_t
+show_txbf(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (txbf == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "txbf not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", txbf);
+ }
+ return ret;
+}
+
+static ssize_t
+set_txbf(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = bcm_atoi(buf);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ txbf = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n",
+ __FUNCTION__, txbf));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_txbf =
+ __ATTR(txbf, 0660, show_txbf, set_txbf);
+#endif /* USE_WL_TXBF */
+
+#ifdef PROP_TXSTATUS
+uint32 proptx = VALUENOTSET;
+
+static ssize_t
+show_proptx(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (proptx == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "proptx not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", proptx);
+ }
+ return ret;
+}
+
+static ssize_t
+set_proptx(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ proptx = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n",
+ __FUNCTION__, txbf));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_proptx =
+ __ATTR(proptx, 0660, show_proptx, set_proptx);
+
+#endif /* PROP_TXSTATUS */
+#endif /* USE_WFA_CERT_CONF */
+#endif /* DHD_EXPORT_CNTL_FILE */
+
+#if defined(DHD_ADPS_BAM_EXPORT) && defined(WL_BAM)
+#define BAD_AP_MAC_ADDR_ELEMENT_NUM 6
+wl_bad_ap_mngr_t *g_bad_ap_mngr = NULL;
+
+static ssize_t
+show_adps_bam_list(struct dhd_info *dev, char *buf)
+{
+ int offset = 0;
+ ssize_t ret = 0;
+
+ wl_bad_ap_info_t *bad_ap;
+ wl_bad_ap_info_entry_t *entry;
+
+ if (g_bad_ap_mngr == NULL)
+ return ret;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ list_for_each_entry(entry, &g_bad_ap_mngr->list, list) {
+ bad_ap = &entry->bad_ap;
+
+ ret = scnprintf(buf + offset, PAGE_SIZE - 1, MACF"\n",
+ bad_ap->bssid.octet[0], bad_ap->bssid.octet[1],
+ bad_ap->bssid.octet[2], bad_ap->bssid.octet[3],
+ bad_ap->bssid.octet[4], bad_ap->bssid.octet[5]);
+
+ offset += ret;
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
+ return offset;
+}
+
+static ssize_t
+store_adps_bam_list(struct dhd_info *dev, const char *buf, size_t count)
+{
+ int ret;
+ size_t len;
+ int offset;
+ char tmp[128];
+ wl_bad_ap_info_t bad_ap;
+
+ if (g_bad_ap_mngr == NULL)
+ return count;
+
+ len = count;
+ offset = 0;
+ do {
+ ret = sscanf(buf + offset, MACF"\n",
+ (uint32 *)&bad_ap.bssid.octet[0], (uint32 *)&bad_ap.bssid.octet[1],
+ (uint32 *)&bad_ap.bssid.octet[2], (uint32 *)&bad_ap.bssid.octet[3],
+ (uint32 *)&bad_ap.bssid.octet[4], (uint32 *)&bad_ap.bssid.octet[5]);
+ if (ret != BAD_AP_MAC_ADDR_ELEMENT_NUM) {
+ DHD_ERROR(("%s - fail to parse bad ap data\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ ret = wl_bad_ap_mngr_add(g_bad_ap_mngr, &bad_ap);
+ if (ret < 0)
+ return ret;
+
+ ret = snprintf(tmp, ARRAYSIZE(tmp), MACF"\n",
+ bad_ap.bssid.octet[0], bad_ap.bssid.octet[1],
+ bad_ap.bssid.octet[2], bad_ap.bssid.octet[3],
+ bad_ap.bssid.octet[4], bad_ap.bssid.octet[5]);
+ if (ret < 0) {
+ DHD_ERROR(("%s - fail to get bad ap data length(%d)\n", __FUNCTION__, ret));
+ return ret;
+ }
+
+ len -= ret;
+ offset += ret;
+ } while (len > 0);
+
+ return count;
+}
+
+static struct dhd_attr dhd_attr_adps_bam =
+ __ATTR(bad_ap_list, 0660, show_adps_bam_list, store_adps_bam_list);
+#endif /* DHD_ADPS_BAM_EXPORT && WL_BAM */
+
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+uint32 report_hang_privcmd_err = 1;
+
+static ssize_t
+show_hang_privcmd_err(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%u\n", report_hang_privcmd_err);
+ return ret;
+}
+
+static ssize_t
+set_hang_privcmd_err(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 val;
+
+ val = bcm_atoi(buf);
+ sscanf(buf, "%u", &val);
+
+ report_hang_privcmd_err = val ? 1 : 0;
+ DHD_INFO(("%s: Set report HANG for private cmd error: %d\n",
+ __FUNCTION__, report_hang_privcmd_err));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_hang_privcmd_err =
+ __ATTR(hang_privcmd_err, 0660, show_hang_privcmd_err, set_hang_privcmd_err);
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+
+#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
+uint8 control_he_enab = 1;
+#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
+
+#if defined(CUSTOM_CONTROL_HE_ENAB)
+static ssize_t
+show_control_he_enab(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", control_he_enab);
+ return ret;
+}
+
+static ssize_t
+set_control_he_enab(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 val;
+
+ val = bcm_atoi(buf);
+
+ control_he_enab = val ? 1 : 0;
+ DHD_ERROR(("%s: Set control he enab: %d\n", __FUNCTION__, control_he_enab));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_control_he_enab=
+__ATTR(control_he_enab, 0660, show_control_he_enab, set_control_he_enab);
+#endif /* CUSTOM_CONTROL_HE_ENAB */
+/* Attribute object that gets registered with "wifi" kobject tree */
+static struct attribute *control_file_attrs[] = {
+#ifdef DHD_MAC_ADDR_EXPORT
+ &dhd_attr_cntl_macaddr.attr,
+#endif /* DHD_MAC_ADDR_EXPORT */
+#ifdef DHD_EXPORT_CNTL_FILE
+#ifdef DHD_FW_COREDUMP
+ &dhd_attr_cntl_memdump.attr,
+#endif /* DHD_FW_COREDUMP */
+#ifdef BCMASSERT_LOG
+ &dhd_attr_cntl_assert.attr,
+#endif /* BCMASSERT_LOG */
+#ifdef WRITE_WLANINFO
+ &dhd_attr_cntl_wifiver.attr,
+#endif /* WRITE_WLANINFO */
+#ifdef USE_CID_CHECK
+ &dhd_attr_cntl_cidinfo.attr,
+#endif /* USE_CID_CHECK */
+#ifdef GEN_SOFTAP_INFO_FILE
+ &dhd_attr_cntl_softapinfo.attr,
+#endif /* GEN_SOFTAP_INFO_FILE */
+#ifdef MIMO_ANT_SETTING
+ &dhd_attr_cntl_antinfo.attr,
+#endif /* MIMO_ANT_SETTING */
+#ifdef DHD_PM_CONTROL_FROM_FILE
+ &dhd_attr_cntl_pminfo.attr,
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+#ifdef LOGTRACE_FROM_FILE
+ &dhd_attr_cntl_logtraceinfo.attr,
+#endif /* LOGTRACE_FROM_FILE */
+#ifdef USE_WFA_CERT_CONF
+#ifdef BCMSDIO
+ &dhd_attr_cntl_bustxglom.attr,
+#endif /* BCMSDIO */
+ &dhd_attr_cntl_roamoff.attr,
+#ifdef USE_WL_FRAMEBURST
+ &dhd_attr_cntl_frameburst.attr,
+#endif /* USE_WL_FRAMEBURST */
+#ifdef USE_WL_TXBF
+ &dhd_attr_cntl_txbf.attr,
+#endif /* USE_WL_TXBF */
+#ifdef PROP_TXSTATUS
+ &dhd_attr_cntl_proptx.attr,
+#endif /* PROP_TXSTATUS */
+#endif /* USE_WFA_CERT_CONF */
+#endif /* DHD_EXPORT_CNTL_FILE */
+#ifdef DHD_ADPS_BAM_EXPORT
+ &dhd_attr_adps_bam.attr,
+#endif /* DHD_ADPS_BAM_EXPORT */
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+ &dhd_attr_hang_privcmd_err.attr,
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+#if defined(CUSTOM_CONTROL_HE_ENAB)
+ &dhd_attr_control_he_enab.attr,
+#endif /* CUSTOM_CONTROL_HE_ENAB */
+ NULL
+};
+
+#define to_cntl_dhd(k) container_of(k, struct dhd_info, dhd_conf_file_kobj)
+
+/*
+ * wifi kobject show function, the "attr" attribute specifices to which
+ * node under "sys/wifi" the show function is called.
+ */
+static ssize_t dhd_cntl_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ dhd_info_t *dhd = to_cntl_dhd(kobj);
+ struct dhd_attr *d_attr = to_attr(attr);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ int ret;
+
+ if (d_attr->show)
+ ret = d_attr->show(dhd, buf);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+/*
+ * wifi kobject show function, the "attr" attribute specifices to which
+ * node under "sys/wifi" the store function is called.
+ */
+static ssize_t dhd_cntl_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ dhd_info_t *dhd = to_cntl_dhd(kobj);
+ struct dhd_attr *d_attr = to_attr(attr);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ int ret;
+
+ if (d_attr->store)
+ ret = d_attr->store(dhd, buf, count);
+ else
+ ret = -EIO;
+
+ return ret;
+
+}
+
+static struct sysfs_ops dhd_sysfs_cntl_ops = {
+ .show = dhd_cntl_show,
+ .store = dhd_cntl_store,
+};
+
+static struct kobj_type dhd_cntl_file_ktype = {
+ .sysfs_ops = &dhd_sysfs_cntl_ops,
+ .default_attrs = control_file_attrs,
+};
+
+/* Create a kobject and attach to sysfs interface */
+int dhd_sysfs_init(dhd_info_t *dhd)
+{
+ int ret = -1;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
+ return ret;
+ }
+
+ /* Initialize the kobject */
+ ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, SYSFS_DIR_BCMDHD);
+ if (ret) {
+ kobject_put(&dhd->dhd_kobj);
+ DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
+ return ret;
+ }
+ ret = kobject_init_and_add(&dhd->dhd_conf_file_kobj,
+ &dhd_cntl_file_ktype, NULL, SYSFS_DIR_WIFI);
+ if (ret) {
+ kobject_put(&dhd->dhd_conf_file_kobj);
+ DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
+ return ret;
+ }
+
+ /*
+ * We are always responsible for sending the uevent that the kobject
+ * was added to the system.
+ */
+ kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
+ kobject_uevent(&dhd->dhd_conf_file_kobj, KOBJ_ADD);
+
+ return ret;
+}
+
+/* Done with the kobject and detach the sysfs interface */
+void dhd_sysfs_exit(dhd_info_t *dhd)
+{
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
+ return;
+ }
+
+ /* Releae the kobject */
+ kobject_put(&dhd->dhd_kobj);
+ kobject_put(&dhd->dhd_conf_file_kobj);
+}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_lb.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_lb.c
new file mode 100644
index 0000000..0cb4f99
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_lb.c
@@ -0,0 +1,1333 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <dhd_linux_priv.h>
+
+extern dhd_pub_t* g_dhd_pub;
+
+#if defined(DHD_LB)
+
+void
+dhd_lb_set_default_cpus(dhd_info_t *dhd)
+{
+ /* Default CPU allocation for the jobs */
+ atomic_set(&dhd->rx_napi_cpu, 1);
+ atomic_set(&dhd->rx_compl_cpu, 2);
+ atomic_set(&dhd->tx_compl_cpu, 2);
+ atomic_set(&dhd->tx_cpu, 2);
+ atomic_set(&dhd->net_tx_cpu, 0);
+}
+
+void
+dhd_cpumasks_deinit(dhd_info_t *dhd)
+{
+ free_cpumask_var(dhd->cpumask_curr_avail);
+ free_cpumask_var(dhd->cpumask_primary);
+ free_cpumask_var(dhd->cpumask_primary_new);
+ free_cpumask_var(dhd->cpumask_secondary);
+ free_cpumask_var(dhd->cpumask_secondary_new);
+}
+
+int
+dhd_cpumasks_init(dhd_info_t *dhd)
+{
+ int id;
+ uint32 cpus, num_cpus = num_possible_cpus();
+ int ret = 0;
+
+ DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__,
+ DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS));
+
+ if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
+ DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
+ cpumask_clear(dhd->cpumask_primary);
+ cpumask_clear(dhd->cpumask_secondary);
+
+ if (num_cpus > 32) {
+ DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus));
+ ASSERT(0);
+ }
+
+ cpus = DHD_LB_PRIMARY_CPUS;
+ for (id = 0; id < num_cpus; id++) {
+ if (isset(&cpus, id))
+ cpumask_set_cpu(id, dhd->cpumask_primary);
+ }
+
+ cpus = DHD_LB_SECONDARY_CPUS;
+ for (id = 0; id < num_cpus; id++) {
+ if (isset(&cpus, id))
+ cpumask_set_cpu(id, dhd->cpumask_secondary);
+ }
+
+ return ret;
+fail:
+ dhd_cpumasks_deinit(dhd);
+ return ret;
+}
+
+/*
+ * The CPU Candidacy Algorithm
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * The available CPUs for selection are divided into two groups
+ * Primary Set - A CPU mask that carries the First Choice CPUs
+ * Secondary Set - A CPU mask that carries the Second Choice CPUs.
+ *
+ * There are two types of Job, that needs to be assigned to
+ * the CPUs, from one of the above mentioned CPU group. The Jobs are
+ * 1) Rx Packet Processing - napi_cpu
+ * 2) Completion Processiong (Tx, RX) - compl_cpu
+ *
+ * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
+ * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
+ * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
+ * If there are more processors free, it assigns one to compl_cpu.
+ * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
+ * CPU, as much as possible.
+ *
+ * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
+ * would allow Tx completion skb's to be released into a local free pool from
+ * which the rx buffer posts could have been serviced. it is important to note
+ * that a Tx packet may not have a large enough buffer for rx posting.
+ */
+void dhd_select_cpu_candidacy(dhd_info_t *dhd)
+{
+ uint32 primary_available_cpus; /* count of primary available cpus */
+ uint32 secondary_available_cpus; /* count of secondary available cpus */
+ uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
+ uint32 compl_cpu = 0; /* cpu selected for completion jobs */
+ uint32 tx_cpu = 0; /* cpu selected for tx processing job */
+
+ cpumask_clear(dhd->cpumask_primary_new);
+ cpumask_clear(dhd->cpumask_secondary_new);
+
+ /*
+ * Now select from the primary mask. Even if a Job is
+ * already running on a CPU in secondary group, we still move
+ * to primary CPU. So no conditional checks.
+ */
+ cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
+ dhd->cpumask_curr_avail);
+
+ cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
+ dhd->cpumask_curr_avail);
+
+ primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
+
+ if (primary_available_cpus > 0) {
+ napi_cpu = cpumask_first(dhd->cpumask_primary_new);
+
+ /* If no further CPU is available,
+ * cpumask_next returns >= nr_cpu_ids
+ */
+ tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
+ if (tx_cpu >= nr_cpu_ids)
+ tx_cpu = 0;
+
+ /* In case there are no more CPUs, do completions & Tx in same CPU */
+ compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new);
+ if (compl_cpu >= nr_cpu_ids)
+ compl_cpu = tx_cpu;
+ }
+
+ DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
+ __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
+
+ /* -- Now check for the CPUs from the secondary mask -- */
+ secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
+
+ DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
+ __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
+
+ if (secondary_available_cpus > 0) {
+ /* At this point if napi_cpu is unassigned it means no CPU
+ * is online from Primary Group
+ */
+ if (napi_cpu == 0) {
+ napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
+ tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
+ compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
+ } else if (tx_cpu == 0) {
+ tx_cpu = cpumask_first(dhd->cpumask_secondary_new);
+ compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
+ } else if (compl_cpu == 0) {
+ compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
+ }
+
+ /* If no CPU was available for tx processing, choose CPU 0 */
+ if (tx_cpu >= nr_cpu_ids)
+ tx_cpu = 0;
+
+ /* If no CPU was available for completion, choose CPU 0 */
+ if (compl_cpu >= nr_cpu_ids)
+ compl_cpu = 0;
+ }
+ if ((primary_available_cpus == 0) &&
+ (secondary_available_cpus == 0)) {
+ /* No CPUs available from primary or secondary mask */
+ napi_cpu = 1;
+ compl_cpu = 0;
+ tx_cpu = 2;
+ }
+
+ DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
+ __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
+
+ ASSERT(napi_cpu < nr_cpu_ids);
+ ASSERT(compl_cpu < nr_cpu_ids);
+ ASSERT(tx_cpu < nr_cpu_ids);
+
+ atomic_set(&dhd->rx_napi_cpu, napi_cpu);
+ atomic_set(&dhd->tx_compl_cpu, compl_cpu);
+ atomic_set(&dhd->rx_compl_cpu, compl_cpu);
+ atomic_set(&dhd->tx_cpu, tx_cpu);
+
+ return;
+}
+
+/*
+ * Function to handle CPU Hotplug notifications.
+ * One of the task it does is to trigger the CPU Candidacy algorithm
+ * for load balancing.
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+
+int dhd_cpu_startup_callback(unsigned int cpu)
+{
+ dhd_info_t *dhd = g_dhd_pub->info;
+
+ DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu));
+ DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
+ cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+
+ return 0;
+}
+
+int dhd_cpu_teardown_callback(unsigned int cpu)
+{
+ dhd_info_t *dhd = g_dhd_pub->info;
+
+ DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu));
+ DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
+ cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+
+ return 0;
+}
+#else
+int
+dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned long int cpu = (unsigned long int)hcpu;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
+ if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) {
+ DHD_INFO(("%s(): LB data is not initialized yet.\n",
+ __FUNCTION__));
+ return NOTIFY_BAD;
+ }
+
+ switch (action)
+ {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
+ cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+ break;
+
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
+ cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+#endif /* LINUX_VERSION_CODE < 4.10.0 */
+
+int dhd_register_cpuhp_callback(dhd_info_t *dhd)
+{
+ int cpuhp_ret = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+ cpuhp_ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dhd",
+ dhd_cpu_startup_callback, dhd_cpu_teardown_callback);
+
+ if (cpuhp_ret < 0) {
+ DHD_ERROR(("%s(): cpuhp_setup_state failed %d RX LB won't happen \r\n",
+ __FUNCTION__, cpuhp_ret));
+ }
+#else
+ /*
+ * If we are able to initialize CPU masks, lets register to the
+ * CPU Hotplug framework to change the CPU for each job dynamically
+ * using candidacy algorithm.
+ */
+ dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
+ register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */
+#endif /* LINUX_VERSION_CODE < 4.10.0 */
+ return cpuhp_ret;
+}
+
+int dhd_unregister_cpuhp_callback(dhd_info_t *dhd)
+{
+ int ret = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+ /* Don't want to call tear down while unregistering */
+ cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
+#else
+ if (dhd->cpu_notifier.notifier_call != NULL) {
+ unregister_cpu_notifier(&dhd->cpu_notifier);
+ }
+#endif // endif
+ return ret;
+}
+
+#if defined(DHD_LB_STATS)
+void dhd_lb_stats_init(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ int i, j, num_cpus = num_possible_cpus();
+ int alloc_size = sizeof(uint32) * num_cpus;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
+ __FUNCTION__));
+ return;
+ }
+
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
+
+ DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
+ DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
+
+ dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->napi_percpu_run_cnt) {
+ DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
+
+ DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
+
+ dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->rxc_percpu_run_cnt) {
+ DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
+
+ DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
+
+ dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->txc_percpu_run_cnt) {
+ DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
+
+ dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->cpu_online_cnt) {
+ DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
+
+ dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->cpu_offline_cnt) {
+ DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
+
+ dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->txp_percpu_run_cnt) {
+ DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
+
+ dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->tx_start_percpu_run_cnt) {
+ DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
+
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->napi_rx_hist[j]) {
+ DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
+ __FUNCTION__, j));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
+ }
+ }
+#ifdef DHD_LB_TXC
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->txc_hist[j]) {
+ DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
+ __FUNCTION__, j));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->txc_hist[j][i]);
+ }
+ }
+#endif /* DHD_LB_TXC */
+#ifdef DHD_LB_RXC
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->rxc_hist[j]) {
+ DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
+ __FUNCTION__, j));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]);
+ }
+ }
+#endif /* DHD_LB_RXC */
+ return;
+}
+
+void dhd_lb_stats_deinit(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ int j, num_cpus = num_possible_cpus();
+ int alloc_size = sizeof(uint32) * num_cpus;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
+ __FUNCTION__));
+ return;
+ }
+
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
+
+ if (dhd->napi_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size);
+ dhd->napi_percpu_run_cnt = NULL;
+ }
+ if (dhd->rxc_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size);
+ dhd->rxc_percpu_run_cnt = NULL;
+ }
+ if (dhd->txc_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size);
+ dhd->txc_percpu_run_cnt = NULL;
+ }
+ if (dhd->cpu_online_cnt) {
+ MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size);
+ dhd->cpu_online_cnt = NULL;
+ }
+ if (dhd->cpu_offline_cnt) {
+ MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size);
+ dhd->cpu_offline_cnt = NULL;
+ }
+
+ if (dhd->txp_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size);
+ dhd->txp_percpu_run_cnt = NULL;
+ }
+ if (dhd->tx_start_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size);
+ dhd->tx_start_percpu_run_cnt = NULL;
+ }
+
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ if (dhd->napi_rx_hist[j]) {
+ MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size);
+ dhd->napi_rx_hist[j] = NULL;
+ }
+#ifdef DHD_LB_TXC
+ if (dhd->txc_hist[j]) {
+ MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size);
+ dhd->txc_hist[j] = NULL;
+ }
+#endif /* DHD_LB_TXC */
+#ifdef DHD_LB_RXC
+ if (dhd->rxc_hist[j]) {
+ MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size);
+ dhd->rxc_hist[j] = NULL;
+ }
+#endif /* DHD_LB_RXC */
+ }
+
+ return;
+}
+
+void dhd_lb_stats_dump_histo(dhd_pub_t *dhdp,
+ struct bcmstrbuf *strbuf, uint32 **hist)
+{
+ int i, j;
+ uint32 *per_cpu_total;
+ uint32 total = 0;
+ uint32 num_cpus = num_possible_cpus();
+
+ per_cpu_total = (uint32 *)MALLOC(dhdp->osh, sizeof(uint32) * num_cpus);
+ if (!per_cpu_total) {
+ DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__));
+ return;
+ }
+ bzero(per_cpu_total, sizeof(uint32) * num_cpus);
+
+ bcm_bprintf(strbuf, "CPU: \t\t");
+ for (i = 0; i < num_cpus; i++)
+ bcm_bprintf(strbuf, "%d\t", i);
+ bcm_bprintf(strbuf, "\nBin\n");
+
+ for (i = 0; i < HIST_BIN_SIZE; i++) {
+ bcm_bprintf(strbuf, "%d:\t\t", 1<<i);
+ for (j = 0; j < num_cpus; j++) {
+ bcm_bprintf(strbuf, "%d\t", hist[i][j]);
+ }
+ bcm_bprintf(strbuf, "\n");
+ }
+ bcm_bprintf(strbuf, "Per CPU Total \t");
+ total = 0;
+ for (i = 0; i < num_cpus; i++) {
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ per_cpu_total[i] += (hist[j][i] * (1<<j));
+ }
+ bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
+ total += per_cpu_total[i];
+ }
+ bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
+
+ if (per_cpu_total) {
+ MFREE(dhdp->osh, per_cpu_total, sizeof(uint32) * num_cpus);
+ per_cpu_total = NULL;
+ }
+ return;
+}
+
+void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
+{
+ int i, num_cpus = num_possible_cpus();
+
+ bcm_bprintf(strbuf, "CPU: \t");
+ for (i = 0; i < num_cpus; i++)
+ bcm_bprintf(strbuf, "%d\t", i);
+ bcm_bprintf(strbuf, "\n");
+
+ bcm_bprintf(strbuf, "Val: \t");
+ for (i = 0; i < num_cpus; i++)
+ bcm_bprintf(strbuf, "%u\t", *(p+i));
+ bcm_bprintf(strbuf, "\n");
+ return;
+}
+
+void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ dhd_info_t *dhd;
+
+ if (dhdp == NULL || strbuf == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
+ __FUNCTION__, dhdp, strbuf));
+ return;
+ }
+
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
+
+ bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
+
+ bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
+
+ bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
+ dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
+ dhd->txc_sched_cnt);
+
+#ifdef DHD_LB_RXP
+ bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
+ bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
+ dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->napi_rx_hist);
+#endif /* DHD_LB_RXP */
+
+#ifdef DHD_LB_RXC
+ bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
+ bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
+ dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->rxc_hist);
+#endif /* DHD_LB_RXC */
+
+#ifdef DHD_LB_TXC
+ bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
+ bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
+ dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->txc_hist);
+#endif /* DHD_LB_TXC */
+
+#ifdef DHD_LB_TXP
+ bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt);
+
+ bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt);
+#endif /* DHD_LB_TXP */
+}
+
+/* Given a number 'n' returns 'm' that is next larger power of 2 after n */
+static inline uint32 next_larger_power2(uint32 num)
+{
+ num--;
+ num |= (num >> 1);
+ num |= (num >> 2);
+ num |= (num >> 4);
+ num |= (num >> 8);
+ num |= (num >> 16);
+
+ return (num + 1);
+}
+
+void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu)
+{
+ uint32 bin_power;
+ uint32 *p;
+ bin_power = next_larger_power2(count);
+
+ switch (bin_power) {
+ case 1: p = bin[0] + cpu; break;
+ case 2: p = bin[1] + cpu; break;
+ case 4: p = bin[2] + cpu; break;
+ case 8: p = bin[3] + cpu; break;
+ case 16: p = bin[4] + cpu; break;
+ case 32: p = bin[5] + cpu; break;
+ case 64: p = bin[6] + cpu; break;
+ case 128: p = bin[7] + cpu; break;
+ default : p = bin[8] + cpu; break;
+ }
+
+ *p = *p + 1;
+ return;
+}
+
+void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
+{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu);
+
+ return;
+}
+
+void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
+{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu);
+
+ return;
+}
+
+void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
+{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu);
+
+ return;
+}
+
+void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
+}
+
+void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
+}
+#endif /* DHD_LB_STATS */
+
+#endif /* DHD_LB */
+#if defined(DHD_LB)
+/**
+ * dhd_tasklet_schedule - Function that runs in IPI context of the destination
+ * CPU and schedules a tasklet.
+ * @tasklet: opaque pointer to the tasklet
+ */
+INLINE void
+dhd_tasklet_schedule(void *tasklet)
+{
+ tasklet_schedule((struct tasklet_struct *)tasklet);
+}
+/**
+ * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
+ * @tasklet: tasklet to be scheduled
+ * @on_cpu: cpu core id
+ *
+ * If the requested cpu is online, then an IPI is sent to this cpu via the
+ * smp_call_function_single with no wait and the tasklet_schedule function
+ * will be invoked to schedule the specified tasklet on the requested CPU.
+ */
+INLINE void
+dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
+{
+ const int wait = 0;
+ smp_call_function_single(on_cpu,
+ dhd_tasklet_schedule, (void *)tasklet, wait);
+}
+
+/**
+ * dhd_work_schedule_on - Executes the passed work in a given CPU
+ * @work: work to be scheduled
+ * @on_cpu: cpu core id
+ *
+ * If the requested cpu is online, then an IPI is sent to this cpu via the
+ * schedule_work_on and the work function
+ * will be invoked to schedule the specified work on the requested CPU.
+ */
+
+INLINE void
+dhd_work_schedule_on(struct work_struct *work, int on_cpu)
+{
+ schedule_work_on(on_cpu, work);
+}
+
+#if defined(DHD_LB_TXC)
+/**
+ * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
+ * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
+ * freeing the packets placed in the tx_compl workq
+ */
+void
+dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu, on_cpu;
+
+ if (dhd->rx_napi_netdev == NULL) {
+ DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
+ /*
+ * If the destination CPU is NOT online or is same as current CPU
+ * no need to schedule the work
+ */
+ curr_cpu = get_cpu();
+ put_cpu();
+
+ on_cpu = atomic_read(&dhd->tx_compl_cpu);
+
+ if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+ dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
+ } else {
+ schedule_work(&dhd->tx_compl_dispatcher_work);
+ }
+}
+
+static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
+{
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, tx_compl_dispatcher_work);
+ int cpu;
+
+ get_online_cpus();
+ cpu = atomic_read(&dhd->tx_compl_cpu);
+ if (!cpu_online(cpu))
+ dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
+ else
+ dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
+ put_online_cpus();
+}
+#endif /* DHD_LB_TXC */
+
+#if defined(DHD_LB_RXC)
+/**
+ * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
+ * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
+ * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
+ * placed in the rx_compl workq.
+ *
+ * @dhdp: pointer to dhd_pub object
+ */
+void
+dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu, on_cpu;
+
+ if (dhd->rx_napi_netdev == NULL) {
+ DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
+ /*
+ * If the destination CPU is NOT online or is same as current CPU
+ * no need to schedule the work
+ */
+ curr_cpu = get_cpu();
+ put_cpu();
+ on_cpu = atomic_read(&dhd->rx_compl_cpu);
+
+ if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+ dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
+ } else {
+ schedule_work(&dhd->rx_compl_dispatcher_work);
+ }
+}
+
+void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
+{
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, rx_compl_dispatcher_work);
+ int cpu;
+
+ get_online_cpus();
+ cpu = atomic_read(&dhd->rx_compl_cpu);
+ if (!cpu_online(cpu))
+ dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
+ else {
+ dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
+ }
+ put_online_cpus();
+}
+#endif /* DHD_LB_RXC */
+
+#if defined(DHD_LB_TXP)
+void dhd_tx_dispatcher_work(struct work_struct * work)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, tx_dispatcher_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ dhd_tasklet_schedule(&dhd->tx_tasklet);
+}
+
+void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp)
+{
+ int cpu;
+ int net_tx_cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ preempt_disable();
+ cpu = atomic_read(&dhd->tx_cpu);
+ net_tx_cpu = atomic_read(&dhd->net_tx_cpu);
+
+ /*
+ * Now if the NET_TX has pushed the packet in the same
+ * CPU that is chosen for Tx processing, seperate it out
+ * i.e run the TX processing tasklet in compl_cpu
+ */
+ if (net_tx_cpu == cpu)
+ cpu = atomic_read(&dhd->tx_compl_cpu);
+
+ if (!cpu_online(cpu)) {
+ /*
+ * Ooohh... but the Chosen CPU is not online,
+ * Do the job in the current CPU itself.
+ */
+ dhd_tasklet_schedule(&dhd->tx_tasklet);
+ } else {
+ /*
+ * Schedule tx_dispatcher_work to on the cpu which
+ * in turn will schedule tx_tasklet.
+ */
+ dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu);
+ }
+ preempt_enable();
+}
+
+/**
+ * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
+ * on another cpu. The tx_tasklet will take care of actually putting
+ * the skbs into appropriate flow ring and ringing H2D interrupt
+ *
+ * @dhdp: pointer to dhd_pub object
+ */
+void
+dhd_lb_tx_dispatch(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu;
+
+ curr_cpu = get_cpu();
+ put_cpu();
+
+ /* Record the CPU in which the TX request from Network stack came */
+ atomic_set(&dhd->net_tx_cpu, curr_cpu);
+
+ /* Schedule the work to dispatch ... */
+ dhd_tx_dispatcher_fn(dhdp);
+}
+#endif /* DHD_LB_TXP */
+
+#if defined(DHD_LB_RXP)
+/**
+ * dhd_napi_poll - Load balance napi poll function to process received
+ * packets and send up the network stack using netif_receive_skb()
+ *
+ * @napi: napi object in which context this poll function is invoked
+ * @budget: number of packets to be processed.
+ *
+ * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
+ * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
+ * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
+ * packet tag and sendup.
+ */
+int
+dhd_napi_poll(struct napi_struct *napi, int budget)
+{
+ int ifid;
+ const int pkt_count = 1;
+ const int chan = 0;
+ struct sk_buff * skb;
+ unsigned long flags;
+ struct dhd_info *dhd;
+ int processed = 0;
+ struct sk_buff_head rx_process_queue;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ dhd = container_of(napi, struct dhd_info, rx_napi_struct);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
+ DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
+ __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
+ __skb_queue_head_init(&rx_process_queue);
+
+ /* extract the entire rx_napi_queue into local rx_process_queue */
+ spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
+ skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
+ spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
+
+ while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
+ OSL_PREFETCH(skb->data);
+
+ ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
+
+ DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
+ __FUNCTION__, skb, ifid));
+
+ dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
+ processed++;
+ }
+
+ DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
+
+ DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
+ napi_complete(napi);
+
+ return budget - 1;
+}
+
+/**
+ * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
+ * poll list. This function may be invoked via the smp_call_function_single
+ * from a remote CPU.
+ *
+ * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
+ * after the napi_struct is added to the softnet data's poll_list
+ *
+ * @info: pointer to a dhd_info struct
+ */
+static void
+dhd_napi_schedule(void *info)
+{
+ dhd_info_t *dhd = (dhd_info_t *)info;
+
+ DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
+ __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
+
+ /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
+ if (napi_schedule_prep(&dhd->rx_napi_struct)) {
+ __napi_schedule(&dhd->rx_napi_struct);
+#ifdef WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE
+ raise_softirq(NET_RX_SOFTIRQ);
+#endif /* WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE */
+ }
+
+ /*
+ * If the rx_napi_struct was already running, then we let it complete
+ * processing all its packets. The rx_napi_struct may only run on one
+ * core at a time, to avoid out-of-order handling.
+ */
+}
+
+/**
+ * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
+ * action after placing the dhd's rx_process napi object in the the remote CPU's
+ * softnet data's poll_list.
+ *
+ * @dhd: dhd_info which has the rx_process napi object
+ * @on_cpu: desired remote CPU id
+ */
+static INLINE int
+dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
+{
+ int wait = 0; /* asynchronous IPI */
+ DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
+ __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
+
+ if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
+ DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
+ __FUNCTION__, on_cpu));
+ }
+
+ DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
+
+ return 0;
+}
+
+/*
+ * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
+ * Why should we do this?
+ * The candidacy algorithm is run from the call back function
+ * registered to CPU hotplug notifier. This call back happens from Worker
+ * context. The dhd_napi_schedule_on is also from worker context.
+ * Note that both of this can run on two different CPUs at the same time.
+ * So we can possibly have a window where a given CPUn is being brought
+ * down from CPUm while we try to run a function on CPUn.
+ * To prevent this its better have the whole code to execute an SMP
+ * function under get_online_cpus.
+ * This function call ensures that hotplug mechanism does not kick-in
+ * until we are done dealing with online CPUs
+ * If the hotplug worker is already running, no worries because the
+ * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
+ *
+ * The below mentioned code structure is proposed in
+ * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
+ * for the question
+ * Q: I need to ensure that a particular cpu is not removed when there is some
+ * work specific to this cpu is in progress
+ *
+ * According to the documentation calling get_online_cpus is NOT required, if
+ * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
+ * run from Work Queue context we have to call these functions
+ */
+void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, rx_napi_dispatcher_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ int cpu;
+
+ get_online_cpus();
+ cpu = atomic_read(&dhd->rx_napi_cpu);
+
+ if (!cpu_online(cpu))
+ dhd_napi_schedule(dhd);
+ else
+ dhd_napi_schedule_on(dhd, cpu);
+
+ put_online_cpus();
+}
+
+/**
+ * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
+ * to run on another CPU. The rx_napi_struct's poll function will retrieve all
+ * the packets enqueued into the rx_napi_queue and sendup.
+ * The producer's rx packet queue is appended to the rx_napi_queue before
+ * dispatching the rx_napi_struct.
+ */
+void
+dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
+{
+ unsigned long flags;
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu;
+ int on_cpu;
+#ifdef DHD_LB_IRQSET
+ cpumask_t cpus;
+#endif /* DHD_LB_IRQSET */
+
+ if (dhd->rx_napi_netdev == NULL) {
+ DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
+ skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
+
+ /* append the producer's queue of packets to the napi's rx process queue */
+ spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
+ skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
+ spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
+
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
+
+ /* if LB RXP is disabled directly schedule NAPI */
+ if (atomic_read(&dhd->lb_rxp_active) == 0) {
+ dhd_napi_schedule(dhd);
+ return;
+ }
+
+ /*
+ * If the destination CPU is NOT online or is same as current CPU
+ * no need to schedule the work
+ */
+ curr_cpu = get_cpu();
+ put_cpu();
+
+ preempt_disable();
+ on_cpu = atomic_read(&dhd->rx_napi_cpu);
+#ifdef DHD_LB_IRQSET
+ if (cpumask_and(&cpus, cpumask_of(curr_cpu), dhd->cpumask_primary) ||
+ (!cpu_online(on_cpu))) {
+#else
+ if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+#endif /* DHD_LB_IRQSET */
+ DHD_INFO(("%s : curr_cpu : %d, cpumask : 0x%lx\n", __FUNCTION__,
+ curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
+ dhd_napi_schedule(dhd);
+ } else {
+ DHD_INFO(("%s : schedule to curr_cpu : %d, cpumask : 0x%lx\n",
+ __FUNCTION__, curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
+ schedule_work(&dhd->rx_napi_dispatcher_work);
+ DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
+ }
+ preempt_enable();
+}
+
+/**
+ * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
+ */
+void
+dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
+{
+ dhd_info_t *dhd = dhdp->info;
+
+ DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
+ pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
+ DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
+ __skb_queue_tail(&dhd->rx_pend_queue, pkt);
+}
+#endif /* DHD_LB_RXP */
+#endif /* DHD_LB */
+
+#if defined(DHD_LB_IRQSET) || defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+void
+dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask)
+{
+ unsigned int irq = (unsigned int)-1;
+ int err = BCME_OK;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhdp->bus) {
+ DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_ERROR(("%s : irq set affinity cpu:0x%lx\n",
+ __FUNCTION__, *cpumask_bits(cpumask)));
+
+ dhdpcie_get_pcieirq(dhdp->bus, &irq);
+ err = irq_set_affinity(irq, cpumask);
+ if (err)
+ DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n",
+ __FUNCTION__, *cpumask_bits(cpumask)));
+}
+#endif /* DHD_LB_IRQSET || DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+
+#if defined(DHD_LB_TXP)
+
+int BCMFASTPATH
+dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net,
+ int ifidx, void *skb)
+{
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt);
+
+ /* If the feature is disabled run-time do TX from here */
+ if (atomic_read(&dhd->lb_txp_active) == 0) {
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
+ return __dhd_sendpkt(&dhd->pub, ifidx, skb);
+ }
+
+ /* Store the address of net device and interface index in the Packet tag */
+ DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net);
+ DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx);
+
+ /* Enqueue the skb into tx_pend_queue */
+ skb_queue_tail(&dhd->tx_pend_queue, skb);
+
+ DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net));
+
+ /* Dispatch the Tx job to be processed by the tx_tasklet */
+ dhd_lb_tx_dispatch(&dhd->pub);
+
+ return NETDEV_TX_OK;
+}
+#endif /* DHD_LB_TXP */
+
+#ifdef DHD_LB_TXP
+#define DHD_LB_TXBOUND 64
+/*
+ * Function that performs the TX processing on a given CPU
+ */
+bool
+dhd_lb_tx_process(dhd_info_t *dhd)
+{
+ struct sk_buff *skb;
+ int cnt = 0;
+ struct net_device *net;
+ int ifidx;
+ bool resched = FALSE;
+
+ DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__));
+ if (dhd == NULL) {
+ DHD_ERROR((" Null pointer DHD \r\n"));
+ return resched;
+ }
+
+ BCM_REFERENCE(net);
+
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
+
+ /* Base Loop to perform the actual Tx */
+ do {
+ skb = skb_dequeue(&dhd->tx_pend_queue);
+ if (skb == NULL) {
+ DHD_TRACE(("Dequeued a Null Packet \r\n"));
+ break;
+ }
+ cnt++;
+
+ net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
+ ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
+
+ DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb,
+ net, ifidx));
+
+ __dhd_sendpkt(&dhd->pub, ifidx, skb);
+
+ if (cnt >= DHD_LB_TXBOUND) {
+ resched = TRUE;
+ break;
+ }
+
+ } while (1);
+
+ DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
+
+ return resched;
+}
+
+void
+dhd_lb_tx_handler(unsigned long data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+
+ if (dhd_lb_tx_process(dhd)) {
+ dhd_tasklet_schedule(&dhd->tx_tasklet);
+ }
+}
+
+#endif /* DHD_LB_TXP */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_pktdump.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_pktdump.c
new file mode 100644
index 0000000..50c61f6
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_pktdump.c
@@ -0,0 +1,1326 @@
+/*
+ * Packet dump helper functions
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+#include <ethernet.h>
+#include <bcmutils.h>
+#include <bcmevent.h>
+#include <bcmendian.h>
+#include <bcmtlv.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <bcmip.h>
+#include <bcmudp.h>
+#include <bcmdhcp.h>
+#include <bcmarp.h>
+#include <bcmicmp.h>
+#include <dhd_linux_pktdump.h>
+
+#define DHD_PKTDUMP(arg) DHD_ERROR(arg)
+#define DHD_PKTDUMP_MEM(arg) DHD_ERROR_MEM(arg)
+#define PACKED_STRUCT __attribute__ ((packed))
+
+#define EAPOL_HDR_LEN 4
+
+/* EAPOL types */
+#define EAP_PACKET 0
+#define EAPOL_START 1
+#define EAPOL_LOGOFF 2
+#define EAPOL_KEY 3
+#define EAPOL_ASF 4
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY 1
+#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY 254 /* WPA */
+
+/* EAPOL-Key header field size */
+#define AKW_BLOCK_LEN 8
+#define WPA_KEY_REPLAY_LEN 8
+#define WPA_KEY_NONCE_LEN 32
+#define WPA_KEY_IV_LEN 16
+#define WPA_KEY_RSC_LEN 8
+#define WPA_KEY_ID_LEN 8
+#define WPA_KEY_MIC_LEN 16
+#define WPA_MAX_KEY_SIZE 32
+#define WPA_KEY_DATA_LEN (WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN)
+
+/* Key information bit */
+#define KEYINFO_TYPE_MASK (1 << 3)
+#define KEYINFO_INSTALL_MASK (1 << 6)
+#define KEYINFO_KEYACK_MASK (1 << 7)
+#define KEYINFO_KEYMIC_MASK (1 << 8)
+#define KEYINFO_SECURE_MASK (1 << 9)
+#define KEYINFO_ERROR_MASK (1 << 10)
+#define KEYINFO_REQ_MASK (1 << 11)
+
+/* EAP Code */
+#define EAP_CODE_REQUEST 1 /* Request */
+#define EAP_CODE_RESPONSE 2 /* Response */
+#define EAP_CODE_SUCCESS 3 /* Success */
+#define EAP_CODE_FAILURE 4 /* Failure */
+
+/* EAP Type */
+#define EAP_TYPE_RSVD 0 /* Reserved */
+#define EAP_TYPE_IDENT 1 /* Identify */
+#define EAP_TYPE_NOTI 2 /* Notification */
+#define EAP_TYPE_TLS 13 /* EAP-TLS */
+#define EAP_TYPE_LEAP 17 /* Cisco-LEAP */
+#define EAP_TYPE_TTLS 21 /* EAP-TTLS */
+#define EAP_TYPE_AKA 23 /* EAP-AKA */
+#define EAP_TYPE_PEAP 25 /* EAP-PEAP */
+#define EAP_TYPE_FAST 43 /* EAP-FAST */
+#define EAP_TYPE_PSK 47 /* EAP-PSK */
+#define EAP_TYPE_AKAP 50 /* EAP-AKA' */
+#define EAP_TYPE_EXP 254 /* Reserved for Expended Type */
+
+/* WSC */
+#define EAP_HDR_LEN 5
+#define EAP_WSC_NONCE_OFFSET 10
+#define EAP_WSC_DATA_OFFSET (OFFSETOF(eap_wsc_fmt_t, data))
+#define EAP_WSC_MIN_DATA_LEN ((EAP_HDR_LEN) + (EAP_WSC_DATA_OFFSET))
+#define WFA_VID "\x00\x37\x2A" /* WFA SMI code */
+#define WFA_VID_LEN 3 /* WFA VID length */
+#define WFA_VTYPE 1u /* WFA Vendor type */
+
+/* WSC opcode */
+#define WSC_OPCODE_UPNP 0
+#define WSC_OPCODE_START 1
+#define WSC_OPCODE_ACK 2
+#define WSC_OPCODE_NACK 3
+#define WSC_OPCODE_MSG 4
+#define WSC_OPCODE_DONE 5
+#define WSC_OPCODE_FRAG_ACK 6
+
+/* WSC flag */
+#define WSC_FLAG_MF 1 /* more fragements */
+#define WSC_FLAG_LF 2 /* length field */
+
+/* WSC message code */
+#define WSC_ATTR_MSG 0x1022
+#define WSC_MSG_M1 0x04
+#define WSC_MSG_M2 0x05
+#define WSC_MSG_M3 0x07
+#define WSC_MSG_M4 0x08
+#define WSC_MSG_M5 0x09
+#define WSC_MSG_M6 0x0A
+#define WSC_MSG_M7 0x0B
+#define WSC_MSG_M8 0x0C
+
+/* Debug prints */
+typedef enum pkt_cnt_type {
+ PKT_CNT_TYPE_INVALID = 0,
+ PKT_CNT_TYPE_ARP = 1,
+ PKT_CNT_TYPE_DNS = 2,
+ PKT_CNT_TYPE_MAX = 3
+} pkt_cnt_type_t;
+
+typedef struct pkt_cnt {
+ uint32 tx_cnt;
+ uint32 tx_err_cnt;
+ uint32 rx_cnt;
+} pkt_cnt_t;
+
+typedef struct pkt_cnt_log {
+ bool enabled;
+ uint16 reason;
+ timer_list_compat_t pktcnt_timer;
+ pkt_cnt_t arp_cnt;
+ pkt_cnt_t dns_cnt;
+} pkt_cnts_log_t;
+
+#define PKT_CNT_TIMER_INTERNVAL_MS 5000 /* packet count timeout(ms) */
+#define PKT_CNT_RSN_VALID(rsn) \
+ (((rsn) > (PKT_CNT_RSN_INVALID)) && ((rsn) < (PKT_CNT_RSN_MAX)))
+
+#ifdef DHD_PKTDUMP_ROAM
+static const char pkt_cnt_msg[][20] = {
+ "INVALID",
+ "ROAM_SUCCESS",
+ "GROUP_KEY_UPDATE",
+ "INVALID"
+};
+#endif /* DHD_PKTDUMP_ROAM */
+
+static const char tx_pktfate[][30] = {
+ "TX_PKT_FATE_ACKED", /* 0: WLFC_CTL_PKTFLAG_DISCARD */
+ "TX_PKT_FATE_FW_QUEUED", /* 1: WLFC_CTL_PKTFLAG_D11SUPPRESS */
+ "TX_PKT_FATE_FW_QUEUED", /* 2: WLFC_CTL_PKTFLAG_WLSUPPRESS */
+ "TX_PKT_FATE_FW_DROP_INVALID", /* 3: WLFC_CTL_PKTFLAG_TOSSED_BYWLC */
+ "TX_PKT_FATE_SENT", /* 4: WLFC_CTL_PKTFLAG_DISCARD_NOACK */
+ "TX_PKT_FATE_FW_DROP_OTHER", /* 5: WLFC_CTL_PKTFLAG_SUPPRESS_ACKED */
+ "TX_PKT_FATE_FW_DROP_EXPTIME", /* 6: WLFC_CTL_PKTFLAG_EXPIRED */
+ "TX_PKT_FATE_FW_DROP_OTHER", /* 7: WLFC_CTL_PKTFLAG_DROPPED */
+ "TX_PKT_FATE_FW_PKT_FREE", /* 8: WLFC_CTL_PKTFLAG_MKTFREE */
+};
+
+#define DBGREPLAY " Replay Counter: %02x%02x%02x%02x%02x%02x%02x%02x"
+#define REPLAY_FMT(key) ((const eapol_key_hdr_t *)(key))->replay[0], \
+ ((const eapol_key_hdr_t *)(key))->replay[1], \
+ ((const eapol_key_hdr_t *)(key))->replay[2], \
+ ((const eapol_key_hdr_t *)(key))->replay[3], \
+ ((const eapol_key_hdr_t *)(key))->replay[4], \
+ ((const eapol_key_hdr_t *)(key))->replay[5], \
+ ((const eapol_key_hdr_t *)(key))->replay[6], \
+ ((const eapol_key_hdr_t *)(key))->replay[7]
+#define TXFATE_FMT " TX_PKTHASH:0x%X TX_PKT_FATE:%s"
+#define TX_PKTHASH(pkthash) ((pkthash) ? (*pkthash) : (0))
+#define TX_FATE_STR(fate) (((*fate) <= (WLFC_CTL_PKTFLAG_MKTFREE)) ? \
+ (tx_pktfate[(*fate)]) : "TX_PKT_FATE_FW_DROP_OTHER")
+#define TX_FATE(fate) ((fate) ? (TX_FATE_STR(fate)) : "N/A")
+#define TX_FATE_ACKED(fate) ((fate) ? ((*fate) == (WLFC_CTL_PKTFLAG_DISCARD)) : (0))
+
+#define EAP_PRINT(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP(("ETHER_TYPE_802_1X[%s] [TX]: " \
+ str TXFATE_FMT "\n", ifname, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("ETHER_TYPE_802_1X[%s] [RX]: " \
+ str "\n", ifname)); \
+ } \
+ } while (0)
+
+#define EAP_PRINT_REPLAY(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP(("ETHER_TYPE_802_1X[%s] [TX]: " \
+ str DBGREPLAY TXFATE_FMT "\n", ifname, \
+ REPLAY_FMT(eap_key), TX_PKTHASH(pkthash), \
+ TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("ETHER_TYPE_802_1X[%s] [RX]: " \
+ str DBGREPLAY "\n", ifname, \
+ REPLAY_FMT(eap_key))); \
+ } \
+ } while (0)
+
+#define EAP_PRINT_OTHER(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP(("ETHER_TYPE_802_1X[%s] [TX]: " \
+ str "ver %d, type %d" TXFATE_FMT "\n", ifname, \
+ eapol_hdr->version, eapol_hdr->type, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("ETHER_TYPE_802_1X[%s] [RX]: " \
+ str "ver %d, type %d\n", ifname, \
+ eapol_hdr->version, eapol_hdr->type)); \
+ } \
+ } while (0)
+
+#define EAP_PRINT_OTHER_4WAY(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP(("ETHER_TYPE_802_1X[%s] [TX]: " str \
+ "ver %d type %d keytype %d keyinfo 0x%02X" \
+ TXFATE_FMT "\n", ifname, eapol_hdr->version, \
+ eapol_hdr->type, eap_key->type, \
+ (uint32)hton16(eap_key->key_info), \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("ETHER_TYPE_802_1X[%s] [RX]: " str \
+ "ver %d type %d keytype %d keyinfo 0x%02X\n", \
+ ifname, eapol_hdr->version, eapol_hdr->type, \
+ eap_key->type, (uint32)hton16(eap_key->key_info))); \
+ } \
+ } while (0)
+
+/* EAPOL header */
+typedef struct eapol_header {
+ struct ether_header eth; /* 802.3/Ethernet header */
+ uint8 version; /* EAPOL protocol version */
+ uint8 type; /* EAPOL type */
+ uint16 length; /* Length of body */
+ uint8 body[1]; /* Body (optional) */
+} PACKED_STRUCT eapol_header_t;
+
+/* EAP header */
+typedef struct eap_header_fmt {
+ uint8 code;
+ uint8 id;
+ uint16 len;
+ uint8 type;
+ uint8 data[1];
+} PACKED_STRUCT eap_header_fmt_t;
+
+/* WSC EAP format */
+typedef struct eap_wsc_fmt {
+ uint8 oui[3];
+ uint32 ouitype;
+ uint8 opcode;
+ uint8 flags;
+ uint8 data[1];
+} PACKED_STRUCT eap_wsc_fmt_t;
+
+/* EAPOL-Key */
+typedef struct eapol_key_hdr {
+ uint8 type; /* Key Descriptor Type */
+ uint16 key_info; /* Key Information (unaligned) */
+ uint16 key_len; /* Key Length (unaligned) */
+ uint8 replay[WPA_KEY_REPLAY_LEN]; /* Replay Counter */
+ uint8 nonce[WPA_KEY_NONCE_LEN]; /* Nonce */
+ uint8 iv[WPA_KEY_IV_LEN]; /* Key IV */
+ uint8 rsc[WPA_KEY_RSC_LEN]; /* Key RSC */
+ uint8 id[WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */
+ uint8 mic[WPA_KEY_MIC_LEN]; /* Key MIC */
+ uint16 data_len; /* Key Data Length */
+ uint8 data[WPA_KEY_DATA_LEN]; /* Key data */
+} PACKED_STRUCT eapol_key_hdr_t;
+
+msg_eapol_t
+dhd_is_4way_msg(uint8 *pktdata)
+{
+ eapol_header_t *eapol_hdr;
+ eapol_key_hdr_t *eap_key;
+ msg_eapol_t type = EAPOL_OTHER;
+ bool pair, ack, mic, kerr, req, sec, install;
+ uint16 key_info;
+
+ if (!pktdata) {
+ DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__));
+ return type;
+ }
+
+ eapol_hdr = (eapol_header_t *)pktdata;
+ eap_key = (eapol_key_hdr_t *)(eapol_hdr->body);
+ if (eap_key->type != EAPOL_WPA2_KEY) {
+ return type;
+ }
+
+ key_info = hton16(eap_key->key_info);
+ pair = !!(key_info & KEYINFO_TYPE_MASK);
+ ack = !!(key_info & KEYINFO_KEYACK_MASK);
+ mic = !!(key_info & KEYINFO_KEYMIC_MASK);
+ kerr = !!(key_info & KEYINFO_ERROR_MASK);
+ req = !!(key_info & KEYINFO_REQ_MASK);
+ sec = !!(key_info & KEYINFO_SECURE_MASK);
+ install = !!(key_info & KEYINFO_INSTALL_MASK);
+
+ if (pair && !install && ack && !mic && !sec && !kerr && !req) {
+ type = EAPOL_4WAY_M1;
+ } else if (pair && !install && !ack && mic && !sec && !kerr && !req) {
+ type = EAPOL_4WAY_M2;
+ } else if (pair && ack && mic && sec && !kerr && !req) {
+ type = EAPOL_4WAY_M3;
+ } else if (pair && !install && !ack && mic && sec && !req && !kerr) {
+ type = EAPOL_4WAY_M4;
+ } else if (!pair && !install && ack && mic && sec && !req && !kerr) {
+ type = EAPOL_GROUPKEY_M1;
+ } else if (!pair && !install && !ack && mic && sec && !req && !kerr) {
+ type = EAPOL_GROUPKEY_M2;
+ } else {
+ type = EAPOL_OTHER;
+ }
+
+ return type;
+}
+
+void
+dhd_dump_pkt(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, uint32 pktlen,
+ bool tx, uint32 *pkthash, uint16 *pktfate)
+{
+ struct ether_header *eh;
+ uint16 ether_type;
+
+ if (!pktdata || pktlen < ETHER_HDR_LEN) {
+ return;
+ }
+
+#if defined(BCMPCIE) && defined(DHD_PKT_LOGGING)
+ if (tx && !pkthash && !pktfate) {
+ return;
+ }
+#endif /* BCMPCIE && DHD_PKT_LOGGING */
+
+ eh = (struct ether_header *)pktdata;
+ ether_type = ntoh16(eh->ether_type);
+ if (ether_type == ETHER_TYPE_802_1X) {
+ dhd_dump_eapol_message(dhdp, ifidx, pktdata, pktlen,
+ tx, pkthash, pktfate);
+ }
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+ dhd_dhcp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
+ dhd_icmp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
+ dhd_dns_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
+ }
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_ARP) {
+ dhd_arp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
+ }
+}
+
+#ifdef DHD_PKTDUMP_ROAM
+static void
+dhd_dump_pkt_cnts_inc(dhd_pub_t *dhdp, bool tx, uint16 *pktfate, uint16 pkttype)
+{
+ pkt_cnts_log_t *pktcnts;
+ pkt_cnt_t *cnt;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+ if (!pktcnts) {
+ DHD_ERROR(("%s: pktcnts is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!pktcnts->enabled || (tx && !pktfate)) {
+ return;
+ }
+
+ if (pkttype == PKT_CNT_TYPE_ARP) {
+ cnt = (pkt_cnt_t *)&pktcnts->arp_cnt;
+ } else if (pkttype == PKT_CNT_TYPE_DNS) {
+ cnt = (pkt_cnt_t *)&pktcnts->dns_cnt;
+ } else {
+ /* invalid packet type */
+ return;
+ }
+
+ if (tx) {
+ TX_FATE_ACKED(pktfate) ? cnt->tx_cnt++ : cnt->tx_err_cnt++;
+ } else {
+ cnt->rx_cnt++;
+ }
+}
+
+static void
+dhd_dump_pkt_timer(unsigned long data)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)data;
+ pkt_cnts_log_t *pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+
+ pktcnts->enabled = FALSE;
+
+ /* print out the packet counter value */
+ DHD_PKTDUMP(("============= PACKET COUNT SUMMARY ============\n"));
+ DHD_PKTDUMP(("- Reason: %s\n", pkt_cnt_msg[pktcnts->reason]));
+ DHD_PKTDUMP(("- Duration: %d msec(s)\n", PKT_CNT_TIMER_INTERNVAL_MS));
+ DHD_PKTDUMP(("- ARP PACKETS: tx_success:%d tx_fail:%d rx_cnt:%d\n",
+ pktcnts->arp_cnt.tx_cnt, pktcnts->arp_cnt.tx_err_cnt,
+ pktcnts->arp_cnt.rx_cnt));
+ DHD_PKTDUMP(("- DNS PACKETS: tx_success:%d tx_fail:%d rx_cnt:%d\n",
+ pktcnts->dns_cnt.tx_cnt, pktcnts->dns_cnt.tx_err_cnt,
+ pktcnts->dns_cnt.rx_cnt));
+ DHD_PKTDUMP(("============= END OF COUNT SUMMARY ============\n"));
+}
+
+void
+dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp || !dhdp->pktcnts) {
+ DHD_ERROR(("%s: dhdp or dhdp->pktcnts is NULL\n",
+ __FUNCTION__));
+ return;
+ }
+
+ if (!PKT_CNT_RSN_VALID(rsn)) {
+ DHD_ERROR(("%s: invalid reason code %d\n",
+ __FUNCTION__, rsn));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+ if (timer_pending(&pktcnts->pktcnt_timer)) {
+ del_timer_sync(&pktcnts->pktcnt_timer);
+ }
+
+ bzero(&pktcnts->arp_cnt, sizeof(pkt_cnt_t));
+ bzero(&pktcnts->dns_cnt, sizeof(pkt_cnt_t));
+ pktcnts->reason = rsn;
+ pktcnts->enabled = TRUE;
+ mod_timer(&pktcnts->pktcnt_timer,
+ jiffies + msecs_to_jiffies(PKT_CNT_TIMER_INTERNVAL_MS));
+ DHD_PKTDUMP(("%s: Arm the pktcnt timer. reason=%d\n",
+ __FUNCTION__, rsn));
+}
+
+void
+dhd_dump_pkt_init(dhd_pub_t *dhdp)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)MALLOCZ(dhdp->osh, sizeof(pkt_cnts_log_t));
+ if (!pktcnts) {
+ DHD_ERROR(("%s: failed to allocate memory for pktcnts\n",
+ __FUNCTION__));
+ return;
+ }
+
+ /* init timers */
+ init_timer_compat(&pktcnts->pktcnt_timer, dhd_dump_pkt_timer, dhdp);
+ dhdp->pktcnts = pktcnts;
+}
+
+void
+dhd_dump_pkt_deinit(dhd_pub_t *dhdp)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp || !dhdp->pktcnts) {
+ DHD_ERROR(("%s: dhdp or pktcnts is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+ pktcnts->enabled = FALSE;
+ del_timer_sync(&pktcnts->pktcnt_timer);
+ MFREE(dhdp->osh, dhdp->pktcnts, sizeof(pkt_cnts_log_t));
+ dhdp->pktcnts = NULL;
+}
+
+void
+dhd_dump_pkt_clear(dhd_pub_t *dhdp)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp || !dhdp->pktcnts) {
+ DHD_ERROR(("%s: dhdp or pktcnts is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+ pktcnts->enabled = FALSE;
+ del_timer_sync(&pktcnts->pktcnt_timer);
+ pktcnts->reason = 0;
+ bzero(&pktcnts->arp_cnt, sizeof(pkt_cnt_t));
+ bzero(&pktcnts->dns_cnt, sizeof(pkt_cnt_t));
+}
+
+bool
+dhd_dump_pkt_enabled(dhd_pub_t *dhdp)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp || !dhdp->pktcnts) {
+ return FALSE;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+
+ return pktcnts->enabled;
+}
+#else
+static INLINE void
+dhd_dump_pkt_cnts_inc(dhd_pub_t *dhdp, bool tx, uint16 *pktfate, uint16 pkttype) { }
+static INLINE bool
+dhd_dump_pkt_enabled(dhd_pub_t *dhdp) { return FALSE; }
+#endif /* DHD_PKTDUMP_ROAM */
+
+#ifdef DHD_8021X_DUMP
+static void
+dhd_dump_wsc_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
+ uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate)
+{
+ eapol_header_t *eapol_hdr;
+ eap_header_fmt_t *eap_hdr;
+ eap_wsc_fmt_t *eap_wsc;
+ char *ifname;
+ uint16 eap_len;
+ bool cond;
+
+ if (!pktdata) {
+ DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (pktlen < (ETHER_HDR_LEN + EAPOL_HDR_LEN)) {
+ DHD_ERROR(("%s: invalid pkt length\n", __FUNCTION__));
+ return;
+ }
+
+ eapol_hdr = (eapol_header_t *)pktdata;
+ eap_hdr = (eap_header_fmt_t *)(eapol_hdr->body);
+ if (eap_hdr->type != EAP_TYPE_EXP) {
+ return;
+ }
+
+ eap_len = ntoh16(eap_hdr->len);
+ if (eap_len < EAP_WSC_MIN_DATA_LEN) {
+ return;
+ }
+
+ eap_wsc = (eap_wsc_fmt_t *)(eap_hdr->data);
+ if (bcmp(eap_wsc->oui, (const uint8 *)WFA_VID, WFA_VID_LEN) ||
+ (ntoh32(eap_wsc->ouitype) != WFA_VTYPE)) {
+ return;
+ }
+
+ if (eap_wsc->flags) {
+ return;
+ }
+
+ ifname = dhd_ifname(dhd, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+
+ if (eap_wsc->opcode == WSC_OPCODE_MSG) {
+ const uint8 *tlv_buf = (const uint8 *)(eap_wsc->data);
+ const uint8 *msg;
+ uint16 msglen;
+ uint16 wsc_data_len = (uint16)(eap_len - EAP_HDR_LEN - EAP_WSC_DATA_OFFSET);
+ bcm_xtlv_opts_t opt = BCM_XTLV_OPTION_IDBE | BCM_XTLV_OPTION_LENBE;
+
+ msg = bcm_get_data_from_xtlv_buf(tlv_buf, wsc_data_len,
+ WSC_ATTR_MSG, &msglen, opt);
+ if (msg && msglen) {
+ switch (*msg) {
+ case WSC_MSG_M1:
+ DHD_STATLOG_DATA(dhd, ST(WPS_M1), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M1");
+ break;
+ case WSC_MSG_M2:
+ DHD_STATLOG_DATA(dhd, ST(WPS_M2), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M2");
+ break;
+ case WSC_MSG_M3:
+ DHD_STATLOG_DATA(dhd, ST(WPS_M3), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M3");
+ break;
+ case WSC_MSG_M4:
+ DHD_STATLOG_DATA(dhd, ST(WPS_M4), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M4");
+ break;
+ case WSC_MSG_M5:
+ DHD_STATLOG_DATA(dhd, ST(WPS_M5), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M5");
+ break;
+ case WSC_MSG_M6:
+ DHD_STATLOG_DATA(dhd, ST(WPS_M6), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M6");
+ break;
+ case WSC_MSG_M7:
+ DHD_STATLOG_DATA(dhd, ST(WPS_M7), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M7");
+ break;
+ case WSC_MSG_M8:
+ DHD_STATLOG_DATA(dhd, ST(WPS_M8), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M8");
+ break;
+ default:
+ break;
+ }
+ }
+ } else if (eap_wsc->opcode == WSC_OPCODE_START) {
+ DHD_STATLOG_DATA(dhd, ST(WSC_START), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WSC Start");
+ } else if (eap_wsc->opcode == WSC_OPCODE_DONE) {
+ DHD_STATLOG_DATA(dhd, ST(WSC_DONE), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WSC Done");
+ }
+}
+
+static void
+dhd_dump_eap_packet(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
+ uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate)
+{
+ eapol_header_t *eapol_hdr;
+ eap_header_fmt_t *eap_hdr;
+ char *ifname;
+ bool cond;
+
+ if (!pktdata) {
+ DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ eapol_hdr = (eapol_header_t *)pktdata;
+ eap_hdr = (eap_header_fmt_t *)(eapol_hdr->body);
+ ifname = dhd_ifname(dhd, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+
+ if (eap_hdr->code == EAP_CODE_REQUEST ||
+ eap_hdr->code == EAP_CODE_RESPONSE) {
+ bool isreq = (eap_hdr->code == EAP_CODE_REQUEST);
+ switch (eap_hdr->type) {
+ case EAP_TYPE_IDENT:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_IDENTITY), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, Identity");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_IDENTITY), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, Identity");
+ }
+ break;
+ case EAP_TYPE_TLS:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_TLS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, TLS");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_TLS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, TLS");
+ }
+ break;
+ case EAP_TYPE_LEAP:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_LEAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, LEAP");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_LEAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, LEAP");
+ }
+ break;
+ case EAP_TYPE_TTLS:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_TTLS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, TTLS");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_TTLS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, TTLS");
+ }
+ break;
+ case EAP_TYPE_AKA:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_AKA), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, AKA");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_AKA), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, AKA");
+ }
+ break;
+ case EAP_TYPE_PEAP:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_PEAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, PEAP");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_PEAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, PEAP");
+ }
+ break;
+ case EAP_TYPE_FAST:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_FAST), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, FAST");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_FAST), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, FAST");
+ }
+ break;
+ case EAP_TYPE_PSK:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_PSK), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, PSK");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_PSK), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, PSK");
+ }
+ break;
+ case EAP_TYPE_AKAP:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_AKAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, AKAP");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_AKAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, AKAP");
+ }
+ break;
+ case EAP_TYPE_EXP:
+ dhd_dump_wsc_message(dhd, ifidx, pktdata, pktlen, tx,
+ pkthash, pktfate);
+ break;
+ default:
+ break;
+ }
+ } else if (eap_hdr->code == EAP_CODE_SUCCESS) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_SUCCESS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Success");
+ } else if (eap_hdr->code == EAP_CODE_FAILURE) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_FAILURE), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Failure");
+ }
+}
+
+static void
+dhd_dump_eapol_4way_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ eapol_header_t *eapol_hdr;
+ eapol_key_hdr_t *eap_key;
+ msg_eapol_t type;
+ char *ifname;
+ bool cond;
+
+ if (!pktdata) {
+ DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ type = dhd_is_4way_msg(pktdata);
+ ifname = dhd_ifname(dhd, ifidx);
+ eapol_hdr = (eapol_header_t *)pktdata;
+ eap_key = (eapol_key_hdr_t *)(eapol_hdr->body);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+
+ if (eap_key->type != EAPOL_WPA2_KEY) {
+ EAP_PRINT_OTHER("NON EAPOL_WPA2_KEY");
+ return;
+ }
+
+ switch (type) {
+ case EAPOL_4WAY_M1:
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_M1), ifidx, tx, cond);
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M1");
+ break;
+ case EAPOL_4WAY_M2:
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_M2), ifidx, tx, cond);
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M2");
+ break;
+ case EAPOL_4WAY_M3:
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_M3), ifidx, tx, cond);
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M3");
+ break;
+ case EAPOL_4WAY_M4:
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_M4), ifidx, tx, cond);
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M4");
+ break;
+ case EAPOL_GROUPKEY_M1:
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_GROUPKEY_M1), ifidx, tx, cond);
+ EAP_PRINT_REPLAY("EAPOL Packet, GROUP Key handshake, M1");
+ break;
+ case EAPOL_GROUPKEY_M2:
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_GROUPKEY_M2), ifidx, tx, cond);
+ EAP_PRINT_REPLAY("EAPOL Packet, GROUP Key handshake, M2");
+ if (ifidx == 0 && tx && pktfate) {
+ dhd_dump_mod_pkt_timer(dhd, PKT_CNT_RSN_GRPKEY_UP);
+ }
+ break;
+ default:
+ DHD_STATLOG_DATA(dhd, ST(8021X_OTHER), ifidx, tx, cond);
+ EAP_PRINT_OTHER("OTHER 4WAY");
+ break;
+ }
+}
+
+void
+dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
+ uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate)
+{
+ char *ifname;
+ eapol_header_t *eapol_hdr = (eapol_header_t *)pktdata;
+ bool cond;
+
+ if (!pktdata) {
+ DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ eapol_hdr = (eapol_header_t *)pktdata;
+ ifname = dhd_ifname(dhd, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+
+ if (eapol_hdr->type == EAP_PACKET) {
+ dhd_dump_eap_packet(dhd, ifidx, pktdata, pktlen, tx,
+ pkthash, pktfate);
+ } else if (eapol_hdr->type == EAPOL_START) {
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_START), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, EAPOL-Start");
+ } else if (eapol_hdr->type == EAPOL_KEY) {
+ dhd_dump_eapol_4way_message(dhd, ifidx, pktdata, tx,
+ pkthash, pktfate);
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(8021X_OTHER), ifidx, tx, cond);
+ EAP_PRINT_OTHER("OTHER 8021X");
+ }
+}
+#endif /* DHD_8021X_DUMP */
+
+#ifdef DHD_DHCP_DUMP
+#define BOOTP_CHADDR_LEN 16
+#define BOOTP_SNAME_LEN 64
+#define BOOTP_FILE_LEN 128
+#define BOOTP_MIN_DHCP_OPT_LEN 312
+#define BOOTP_MAGIC_COOKIE_LEN 4
+
+#define DHCP_MSGTYPE_DISCOVER 1
+#define DHCP_MSGTYPE_OFFER 2
+#define DHCP_MSGTYPE_REQUEST 3
+#define DHCP_MSGTYPE_DECLINE 4
+#define DHCP_MSGTYPE_ACK 5
+#define DHCP_MSGTYPE_NAK 6
+#define DHCP_MSGTYPE_RELEASE 7
+#define DHCP_MSGTYPE_INFORM 8
+
+#define DHCP_PRINT(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP((str " %s[%s][%s] [TX] -" TXFATE_FMT "\n", \
+ typestr, opstr, ifname, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP((str " %s[%s][%s] [RX]\n", \
+ typestr, opstr, ifname)); \
+ } \
+ } while (0)
+
+typedef struct bootp_fmt {
+ struct ipv4_hdr iph;
+ struct bcmudp_hdr udph;
+ uint8 op;
+ uint8 htype;
+ uint8 hlen;
+ uint8 hops;
+ uint32 transaction_id;
+ uint16 secs;
+ uint16 flags;
+ uint32 client_ip;
+ uint32 assigned_ip;
+ uint32 server_ip;
+ uint32 relay_ip;
+ uint8 hw_address[BOOTP_CHADDR_LEN];
+ uint8 server_name[BOOTP_SNAME_LEN];
+ uint8 file_name[BOOTP_FILE_LEN];
+ uint8 options[BOOTP_MIN_DHCP_OPT_LEN];
+} PACKED_STRUCT bootp_fmt_t;
+
+static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 };
+static char dhcp_ops[][10] = {
+ "NA", "REQUEST", "REPLY"
+};
+static char dhcp_types[][10] = {
+ "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
+};
+
+static const int dhcp_types_stat[9] = {
+ ST(INVALID), ST(DHCP_DISCOVER), ST(DHCP_OFFER), ST(DHCP_REQUEST),
+ ST(DHCP_DECLINE), ST(DHCP_ACK), ST(DHCP_NAK), ST(DHCP_RELEASE),
+ ST(DHCP_INFORM)
+};
+
+void
+dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ bootp_fmt_t *b = (bootp_fmt_t *)&pktdata[ETHER_HDR_LEN];
+ struct ipv4_hdr *iph = &b->iph;
+ uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->iph.tot_len);
+ int dhcp_type = 0, len, opt_len;
+ char *ifname = NULL, *typestr = NULL, *opstr = NULL;
+ bool cond;
+
+ /* check IP header */
+ if ((IPV4_HLEN(iph) < IPV4_HLEN_MIN) ||
+ IP_VER(iph) != IP_VER_4 ||
+ IPV4_PROT(iph) != IP_PROT_UDP) {
+ return;
+ }
+
+ /* check UDP port for bootp (67, 68) */
+ if (b->udph.src_port != htons(DHCP_PORT_SERVER) &&
+ b->udph.src_port != htons(DHCP_PORT_CLIENT) &&
+ b->udph.dst_port != htons(DHCP_PORT_SERVER) &&
+ b->udph.dst_port != htons(DHCP_PORT_CLIENT)) {
+ return;
+ }
+
+ /* check header length */
+ if (ntohs(iph->tot_len) < ntohs(b->udph.len) + sizeof(struct bcmudp_hdr)) {
+ return;
+ }
+
+ ifname = dhd_ifname(dhdp, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+ len = ntohs(b->udph.len) - sizeof(struct bcmudp_hdr);
+ opt_len = len - (sizeof(*b) - sizeof(struct ipv4_hdr) -
+ sizeof(struct bcmudp_hdr) - sizeof(b->options));
+
+ /* parse bootp options */
+ if (opt_len >= BOOTP_MAGIC_COOKIE_LEN &&
+ !memcmp(b->options, bootp_magic_cookie, BOOTP_MAGIC_COOKIE_LEN)) {
+ ptr = &b->options[BOOTP_MAGIC_COOKIE_LEN];
+ while (ptr < end && *ptr != 0xff) {
+ opt = ptr++;
+ if (*opt == 0) {
+ continue;
+ }
+ ptr += *ptr + 1;
+ if (ptr >= end) {
+ break;
+ }
+ if (*opt == DHCP_OPT_MSGTYPE) {
+ if (opt[1]) {
+ dhcp_type = opt[2];
+ typestr = dhcp_types[dhcp_type];
+ opstr = dhcp_ops[b->op];
+ DHD_STATLOG_DATA(dhdp, dhcp_types_stat[dhcp_type],
+ ifidx, tx, cond);
+ DHCP_PRINT("DHCP");
+ break;
+ }
+ }
+ }
+ }
+}
+#endif /* DHD_DHCP_DUMP */
+
+#ifdef DHD_ICMP_DUMP
+#define ICMP_TYPE_DEST_UNREACH 3
+#define ICMP_ECHO_SEQ_OFFSET 6
+#define ICMP_ECHO_SEQ(h) (*(uint16 *)((uint8 *)(h) + (ICMP_ECHO_SEQ_OFFSET)))
+#define ICMP_PING_PRINT(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP_MEM((str "[%s][TX] : SEQNUM=%d" \
+ TXFATE_FMT "\n", ifname, seqnum, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM((str "[%s][RX] : SEQNUM=%d\n", \
+ ifname, seqnum)); \
+ } \
+ } while (0)
+
+#define ICMP_PRINT(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP_MEM((str "[%s][TX] : TYPE=%d, CODE=%d" \
+ TXFATE_FMT "\n", ifname, type, code, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM((str "[%s][RX] : TYPE=%d," \
+ " CODE=%d\n", ifname, type, code)); \
+ } \
+ } while (0)
+
+void
+dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
+ struct ipv4_hdr *iph = (struct ipv4_hdr *)pkt;
+ struct bcmicmp_hdr *icmph;
+ char *ifname;
+ bool cond;
+ uint16 seqnum, type, code;
+
+ /* check IP header */
+ if ((IPV4_HLEN(iph) < IPV4_HLEN_MIN) ||
+ IP_VER(iph) != IP_VER_4 ||
+ IPV4_PROT(iph) != IP_PROT_ICMP) {
+ return;
+ }
+
+ /* check header length */
+ if (ntohs(iph->tot_len) - IPV4_HLEN(iph) < sizeof(struct bcmicmp_hdr)) {
+ return;
+ }
+
+ ifname = dhd_ifname(dhdp, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+ icmph = (struct bcmicmp_hdr *)((uint8 *)pkt + sizeof(struct ipv4_hdr));
+ seqnum = 0;
+ type = icmph->type;
+ code = icmph->code;
+ if (type == ICMP_TYPE_ECHO_REQUEST) {
+ seqnum = ntoh16(ICMP_ECHO_SEQ(icmph));
+ DHD_STATLOG_DATA(dhdp, ST(ICMP_PING_REQ), ifidx, tx, cond);
+ ICMP_PING_PRINT("PING REQUEST");
+ } else if (type == ICMP_TYPE_ECHO_REPLY) {
+ seqnum = ntoh16(ICMP_ECHO_SEQ(icmph));
+ DHD_STATLOG_DATA(dhdp, ST(ICMP_PING_RESP), ifidx, tx, cond);
+ ICMP_PING_PRINT("PING REPLY");
+ } else if (type == ICMP_TYPE_DEST_UNREACH) {
+ DHD_STATLOG_DATA(dhdp, ST(ICMP_DEST_UNREACH), ifidx, tx, cond);
+ ICMP_PRINT("ICMP DEST UNREACH");
+ } else {
+ DHD_STATLOG_DATA(dhdp, ST(ICMP_OTHER), ifidx, tx, cond);
+ ICMP_PRINT("ICMP OTHER");
+ }
+}
+#endif /* DHD_ICMP_DUMP */
+
+#ifdef DHD_ARP_DUMP
+#define ARP_PRINT(str) \
+ do { \
+ if (tx) { \
+ if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
+ DHD_PKTDUMP((str "[%s] [TX]" TXFATE_FMT "\n", \
+ ifname, TX_PKTHASH(pkthash), \
+ TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM((str "[%s] [TX]" TXFATE_FMT "\n", \
+ ifname, TX_PKTHASH(pkthash), \
+ TX_FATE(pktfate))); \
+ } \
+ } else { \
+ DHD_PKTDUMP_MEM((str "[%s] [RX]\n", ifname)); \
+ } \
+ } while (0)
+
+#define ARP_PRINT_OTHER(str) \
+ do { \
+ if (tx) { \
+ if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
+ DHD_PKTDUMP((str "[%s] [TX] op_code=%d" \
+ TXFATE_FMT "\n", ifname, opcode, \
+ TX_PKTHASH(pkthash), \
+ TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM((str "[%s] [TX] op_code=%d" \
+ TXFATE_FMT "\n", ifname, opcode, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } \
+ } else { \
+ DHD_PKTDUMP_MEM((str "[%s] [RX] op_code=%d\n", \
+ ifname, opcode)); \
+ } \
+ } while (0)
+
+void
+dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
+ struct bcmarp *arph = (struct bcmarp *)pkt;
+ char *ifname;
+ uint16 opcode;
+ bool cond, dump_enabled;
+
+ /* validation check */
+ if (arph->htype != hton16(HTYPE_ETHERNET) ||
+ arph->hlen != ETHER_ADDR_LEN ||
+ arph->plen != 4) {
+ return;
+ }
+
+ ifname = dhd_ifname(dhdp, ifidx);
+ opcode = ntoh16(arph->oper);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+ dump_enabled = dhd_dump_pkt_enabled(dhdp);
+ if (opcode == ARP_OPC_REQUEST) {
+ DHD_STATLOG_DATA(dhdp, ST(ARP_REQ), ifidx, tx, cond);
+ ARP_PRINT("ARP REQUEST");
+ } else if (opcode == ARP_OPC_REPLY) {
+ DHD_STATLOG_DATA(dhdp, ST(ARP_RESP), ifidx, tx, cond);
+ ARP_PRINT("ARP RESPONSE");
+ } else {
+ ARP_PRINT_OTHER("ARP OTHER");
+ }
+
+ if (ifidx == 0) {
+ dhd_dump_pkt_cnts_inc(dhdp, tx, pktfate, PKT_CNT_TYPE_ARP);
+ }
+}
+#endif /* DHD_ARP_DUMP */
+
+#ifdef DHD_DNS_DUMP
+typedef struct dns_fmt {
+ struct ipv4_hdr iph;
+ struct bcmudp_hdr udph;
+ uint16 id;
+ uint16 flags;
+ uint16 qdcount;
+ uint16 ancount;
+ uint16 nscount;
+ uint16 arcount;
+} PACKED_STRUCT dns_fmt_t;
+
+#define UDP_PORT_DNS 53
+#define DNS_QR_LOC 15
+#define DNS_OPCODE_LOC 11
+#define DNS_RCODE_LOC 0
+#define DNS_QR_MASK ((0x1) << (DNS_QR_LOC))
+#define DNS_OPCODE_MASK ((0xF) << (DNS_OPCODE_LOC))
+#define DNS_RCODE_MASK ((0xF) << (DNS_RCODE_LOC))
+#define GET_DNS_QR(flags) (((flags) & (DNS_QR_MASK)) >> (DNS_QR_LOC))
+#define GET_DNS_OPCODE(flags) (((flags) & (DNS_OPCODE_MASK)) >> (DNS_OPCODE_LOC))
+#define GET_DNS_RCODE(flags) (((flags) & (DNS_RCODE_MASK)) >> (DNS_RCODE_LOC))
+#define DNS_UNASSIGNED_OPCODE(flags) ((GET_DNS_OPCODE(flags) >= (6)))
+
+static const char dns_opcode_types[][11] = {
+ "QUERY", "IQUERY", "STATUS", "UNASSIGNED", "NOTIFY", "UPDATE"
+};
+
+#define DNSOPCODE(op) \
+ (DNS_UNASSIGNED_OPCODE(flags) ? "UNASSIGNED" : dns_opcode_types[op])
+
+#define DNS_REQ_PRINT(str) \
+ do { \
+ if (tx) { \
+ if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
+ DHD_PKTDUMP((str "[%s] [TX] ID:0x%04X OPCODE:%s" \
+ TXFATE_FMT "\n", ifname, id, DNSOPCODE(opcode), \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM((str "[%s] [TX] ID:0x%04X OPCODE:%s" \
+ TXFATE_FMT "\n", ifname, id, DNSOPCODE(opcode), \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } \
+ } else { \
+ DHD_PKTDUMP_MEM((str "[%s] [RX] ID:0x%04X OPCODE:%s\n", \
+ ifname, id, DNSOPCODE(opcode))); \
+ } \
+ } while (0)
+
+#define DNS_RESP_PRINT(str) \
+ do { \
+ if (tx) { \
+ if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
+ DHD_PKTDUMP((str "[%s] [TX] ID:0x%04X OPCODE:%s RCODE:%d" \
+ TXFATE_FMT "\n", ifname, id, DNSOPCODE(opcode), \
+ GET_DNS_RCODE(flags), TX_PKTHASH(pkthash), \
+ TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM((str "[%s] [TX] ID:0x%04X OPCODE:%s RCODE:%d" \
+ TXFATE_FMT "\n", ifname, id, DNSOPCODE(opcode), \
+ GET_DNS_RCODE(flags), TX_PKTHASH(pkthash), \
+ TX_FATE(pktfate))); \
+ } \
+ } else { \
+ DHD_PKTDUMP_MEM((str "[%s] [RX] ID:0x%04X OPCODE:%s RCODE:%d\n", \
+ ifname, id, DNSOPCODE(opcode), GET_DNS_RCODE(flags))); \
+ } \
+ } while (0)
+
+void
+dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ dns_fmt_t *dnsh = (dns_fmt_t *)&pktdata[ETHER_HDR_LEN];
+ struct ipv4_hdr *iph = &dnsh->iph;
+ uint16 flags, opcode, id;
+ char *ifname;
+ bool cond, dump_enabled;
+
+ /* check IP header */
+ if ((IPV4_HLEN(iph) < IPV4_HLEN_MIN) ||
+ IP_VER(iph) != IP_VER_4 ||
+ IPV4_PROT(iph) != IP_PROT_UDP) {
+ return;
+ }
+
+ /* check UDP port for DNS */
+ if (dnsh->udph.src_port != hton16(UDP_PORT_DNS) &&
+ dnsh->udph.dst_port != hton16(UDP_PORT_DNS)) {
+ return;
+ }
+
+ /* check header length */
+ if (ntoh16(iph->tot_len) < (ntoh16(dnsh->udph.len) +
+ sizeof(struct bcmudp_hdr))) {
+ return;
+ }
+
+ ifname = dhd_ifname(dhdp, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+ dump_enabled = dhd_dump_pkt_enabled(dhdp);
+ flags = hton16(dnsh->flags);
+ opcode = GET_DNS_OPCODE(flags);
+ id = hton16(dnsh->id);
+ if (GET_DNS_QR(flags)) {
+ /* Response */
+ DHD_STATLOG_DATA(dhdp, ST(DNS_RESP), ifidx, tx, cond);
+ DNS_RESP_PRINT("DNS RESPONSE");
+ } else {
+ /* Request */
+ DHD_STATLOG_DATA(dhdp, ST(DNS_QUERY), ifidx, tx, cond);
+ DNS_REQ_PRINT("DNS REQUEST");
+ }
+
+ if (ifidx == 0) {
+ dhd_dump_pkt_cnts_inc(dhdp, tx, pktfate, PKT_CNT_TYPE_DNS);
+ }
+}
+#endif /* DHD_DNS_DUMP */
+
+#ifdef DHD_RX_DUMP
+void
+dhd_rx_pkt_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, uint32 pktlen)
+{
+ struct ether_header *eh;
+ uint16 protocol;
+ char *pkttype = "UNKNOWN";
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!pktdata) {
+ DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ eh = (struct ether_header *)pktdata;
+ protocol = hton16(eh->ether_type);
+ BCM_REFERENCE(pktlen);
+
+ switch (protocol) {
+ case ETHER_TYPE_IP:
+ pkttype = "IP";
+ break;
+ case ETHER_TYPE_ARP:
+ pkttype = "ARP";
+ break;
+ case ETHER_TYPE_BRCM:
+ pkttype = "BRCM";
+ break;
+ case ETHER_TYPE_802_1X:
+ pkttype = "802.1X";
+ break;
+ case ETHER_TYPE_WAI:
+ pkttype = "WAPI";
+ break;
+ default:
+ break;
+ }
+
+ DHD_PKTDUMP(("RX DUMP[%s] - %s\n", dhd_ifname(dhdp, ifidx), pkttype));
+ if (protocol != ETHER_TYPE_BRCM) {
+ if (pktdata[0] == 0xFF) {
+ DHD_PKTDUMP(("%s: BROADCAST\n", __FUNCTION__));
+ } else if (pktdata[0] & 1) {
+ DHD_PKTDUMP(("%s: MULTICAST: " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(pktdata)));
+ }
+#ifdef DHD_RX_FULL_DUMP
+ {
+ int k;
+ for (k = 0; k < pktlen; k++) {
+ DHD_PKTDUMP(("%02X ", pktdata[k]));
+ if ((k & 15) == 15)
+ DHD_PKTDUMP(("\n"));
+ }
+ DHD_PKTDUMP(("\n"));
+ }
+#endif /* DHD_RX_FULL_DUMP */
+ }
+}
+#endif /* DHD_RX_DUMP */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_pktdump.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_pktdump.h
new file mode 100644
index 0000000..e06487f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_pktdump.h
@@ -0,0 +1,125 @@
+/*
+ * Header file for the Packet dump helper functions
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef __DHD_LINUX_PKTDUMP_H_
+#define __DHD_LINUX_PKTDUMP_H_
+
+#include <typedefs.h>
+#include <dhd.h>
+
+typedef enum {
+ EAPOL_OTHER = 0,
+ EAPOL_4WAY_M1,
+ EAPOL_4WAY_M2,
+ EAPOL_4WAY_M3,
+ EAPOL_4WAY_M4,
+ EAPOL_GROUPKEY_M1,
+ EAPOL_GROUPKEY_M2
+} msg_eapol_t;
+
+typedef enum pkt_cnt_rsn {
+ PKT_CNT_RSN_INVALID = 0,
+ PKT_CNT_RSN_ROAM = 1,
+ PKT_CNT_RSN_GRPKEY_UP = 2,
+ PKT_CNT_RSN_MAX = 3
+} pkt_cnt_rsn_t;
+
+extern msg_eapol_t dhd_is_4way_msg(uint8 *pktdata);
+extern void dhd_dump_pkt(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
+ uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate);
+
+#ifdef DHD_PKTDUMP_ROAM
+extern void dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn);
+extern void dhd_dump_pkt_init(dhd_pub_t *dhdp);
+extern void dhd_dump_pkt_deinit(dhd_pub_t *dhdp);
+extern void dhd_dump_pkt_clear(dhd_pub_t *dhdp);
+#else
+static INLINE void dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn) { }
+static INLINE void dhd_dump_pkt_init(dhd_pub_t *dhdp) { }
+static INLINE void dhd_dump_pkt_deinit(dhd_pub_t *dhdp) { }
+static INLINE void dhd_dump_pkt_clear(dhd_pub_t *dhdp) { }
+#endif /* DHD_PKTDUMP_ROAM */
+
+/* Rx packet dump */
+#ifdef DHD_RX_DUMP
+extern void dhd_rx_pkt_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, uint32 pktlen);
+#else
+static INLINE void dhd_rx_pkt_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, uint32 pktlen) { }
+#endif /* DHD_RX_DUMP */
+
+/* DHCP packet dump */
+#ifdef DHD_DHCP_DUMP
+extern void dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_DHCP_DUMP */
+
+/* DNS packet dump */
+#ifdef DHD_DNS_DUMP
+extern void dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_dns_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_DNS_DUMP */
+
+/* ICMP packet dump */
+#ifdef DHD_ICMP_DUMP
+extern void dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_ICMP_DUMP */
+
+/* ARP packet dump */
+#ifdef DHD_ARP_DUMP
+extern void dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_arp_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_ARP_DUMP */
+
+/* 802.1X packet dump */
+#ifdef DHD_8021X_DUMP
+extern void dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx,
+ uint8 *pktdata, uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx,
+ uint8 *pktdata, uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_8021X_DUMP */
+
+#endif /* __DHD_LINUX_PKTDUMP_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c
index 191f9d2..ee49f34 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c
@@ -1,7 +1,9 @@
/*
* Linux platform device for DHD WLAN adapter
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -52,6 +54,14 @@
#define WIFI_PLAT_NAME2 "bcm4329_wlan"
#define WIFI_PLAT_EXT "bcmdhd_wifi_platform"
+#ifdef USE_CUSTOM_MSM_PCIE
+#include <linux/msm_pcie.h>
+#endif /* USE_CUSTOM_MSM_PCIE */
+
+#ifdef DHD_WIFI_SHUTDOWN
+extern void wifi_plat_dev_drv_shutdown(struct platform_device *pdev);
+#endif // endif
+
#ifdef CONFIG_DTS
struct regulator *wifi_regulator = NULL;
#endif /* CONFIG_DTS */
@@ -83,10 +93,11 @@
extern void* wl_cfg80211_get_dhdp(struct net_device *dev);
-#ifdef BOARD_HIKEY
+#if defined(BOARD_HIKEY) || defined(USE_CUSTOM_MSM_PCIE)
extern int dhd_wlan_init(void);
extern int dhd_wlan_deinit(void);
-#endif /* BOARD_HIKEY */
+extern void post_power_operation(int on);
+#endif /* defined(BOARD_HIKEY) || defined(USE_CUSTOM_MSM_PCIE) */
#ifdef ENABLE_4335BT_WAR
extern int bcm_bt_lock(int cookie);
@@ -176,7 +187,7 @@
plat_data = adapter->wifi_plat_data;
- DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+ DHD_ERROR(("%s = %d, delay: %lu msec\n", __FUNCTION__, on, msec));
if (plat_data->set_power) {
#ifdef ENABLE_4335BT_WAR
if (on) {
@@ -197,10 +208,14 @@
if (msec && !err)
OSL_SLEEP(msec);
- if (on && !err)
+ if (on && !err) {
is_power_on = TRUE;
- else
+#ifdef USE_CUSTOM_POST_PWR_OPER
+ post_power_operation(on);
+#endif /* USE_CUSTOM_MSM_PCIE */
+ } else {
is_power_on = FALSE;
+ }
#endif /* CONFIG_DTS */
@@ -237,6 +252,11 @@
int err = 0;
struct wifi_platform_data *plat_data;
+#ifdef CONFIG_DTS
+ if (wifi_regulator)
+ return err;
+#endif /* CONFIG_DTS */
+
if (!adapter || !adapter->wifi_plat_data)
return -EINVAL;
plat_data = adapter->wifi_plat_data;
@@ -328,6 +348,7 @@
return -1;
}
+#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
/* This is to get the irq for the OOB */
gpio = of_get_gpio(pdev->dev.of_node, 0);
@@ -347,6 +368,7 @@
adapter->intr_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL |
IORESOURCE_IRQ_SHAREABLE;
end:
+#endif /* defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE) */
#endif /* CONFIG_DTS */
wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
@@ -411,6 +433,9 @@
.remove = wifi_plat_dev_drv_remove,
.suspend = wifi_plat_dev_drv_suspend,
.resume = wifi_plat_dev_drv_resume,
+#ifdef DHD_WIFI_SHUTDOWN
+ .shutdown = wifi_plat_dev_drv_shutdown,
+#endif // endif
.driver = {
.name = WIFI_PLAT_NAME,
#ifdef CONFIG_DTS
@@ -424,6 +449,9 @@
.remove = wifi_plat_dev_drv_remove,
.suspend = wifi_plat_dev_drv_suspend,
.resume = wifi_plat_dev_drv_resume,
+#ifdef DHD_WIFI_SHUTDOWN
+ .shutdown = wifi_plat_dev_drv_shutdown,
+#endif // endif
.driver = {
.name = WIFI_PLAT_NAME2,
}
@@ -434,8 +462,7 @@
static int wifi_platdev_match(struct device *dev, void *data)
#endif /* LINUX_VERSION_CODE */
{
- const char *name = data;
-
+ const char *name = (const char*)data;
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
@@ -462,10 +489,9 @@
dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
-#if defined(BOARD_HIKEY) && !defined(CONFIG_BCMDHD)
- /* For modular based dhd for Hikey CONFIG_BCMDHD will not be defined */
+#if defined(BOARD_HIKEY_MODULAR) || defined(USE_CUSTOM_MSM_PCIE)
dhd_wlan_init();
-#endif /* BOARD_HIKEY && CONFIG_BCMDHD */
+#endif /* defined(BOARD_HIKEY_MODULAR) || defined(USE_CUSTOM_MSM_PCIE) */
#if !defined(CONFIG_DTS)
if (!dts_enabled) {
@@ -549,11 +575,6 @@
DHD_ERROR(("unregister wifi platform drivers\n"));
-#if defined(BOARD_HIKEY) && !defined(CONFIG_BCMDHD)
- /* For modular based dhd for Hikey CONFIG_BCMDHD will not be defined */
- dhd_wlan_deinit();
-#endif /* BOARD_HIKEY && CONFIG_BCMDHD */
-
if (dev1)
platform_driver_unregister(&wifi_platform_dev_driver);
if (dev2)
@@ -566,6 +587,9 @@
wifi_platform_bus_enumerate(adapter, FALSE);
}
}
+#ifdef BOARD_HIKEY_MODULAR
+ dhd_wlan_deinit();
+#endif /* BOARD_HIKEY_MODULAR */
#endif /* !defined(CONFIG_DTS) */
kfree(dhd_wifi_platdata->adapters);
@@ -750,6 +774,7 @@
BCM_REFERENCE(i);
BCM_REFERENCE(adapter);
+
/* Sanity check on the module parameters
* - Both watchdog and DPC as tasklets are ok
* - If both watchdog and DPC are threads, TX must be deferred
@@ -764,7 +789,7 @@
if (dhd_wifi_platdata == NULL) {
DHD_ERROR(("DHD wifi platform data is required for Android build\n"));
- DHD_ERROR(("DHD registeing bus directly\n"));
+ DHD_ERROR(("DHD registering bus directly\n"));
/* x86 bring-up PC needs no power-up operations */
err = dhd_bus_register();
return err;
@@ -795,6 +820,7 @@
}
err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY);
if (err) {
+ DHD_ERROR(("%s: wifi pwr on error ! \n", __FUNCTION__));
dhd_bus_unreg_sdio_notify();
/* WL_REG_ON state unknown, Power off forcely */
wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_priv.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_priv.h
new file mode 100644
index 0000000..53dd103
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_priv.h
@@ -0,0 +1,427 @@
+/*
+ * DHD Linux header file - contains private structure definition of the Linux specific layer
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef __DHD_LINUX_PRIV_H__
+#define __DHD_LINUX_PRIV_H__
+
+#include <osl.h>
+
+#ifdef SHOW_LOGTRACE
+#include <linux/syscalls.h>
+#include <event_log.h>
+#endif /* SHOW_LOGTRACE */
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+#include <dhd_linux.h>
+#include <dhd_bus.h>
+
+#ifdef PCIE_FULL_DONGLE
+#include <bcmmsgbuf.h>
+#include <dhd_flowring.h>
+#endif /* PCIE_FULL_DONGLE */
+
+/*
+ * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c
+ * Local private structure (extension of pub)
+ */
+typedef struct dhd_info {
+#if defined(WL_WIRELESS_EXT)
+ wl_iw_t iw; /* wireless extensions state (must be first) */
+#endif /* defined(WL_WIRELESS_EXT) */
+ dhd_pub_t pub;
+ /* for supporting multiple interfaces.
+ * static_ifs hold the net ifaces without valid FW IF
+ */
+ dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS];
+
+ void *adapter; /* adapter information, interrupt, fw path etc. */
+ char fw_path[PATH_MAX]; /* path to firmware image */
+ char nv_path[PATH_MAX]; /* path to nvram vars file */
+#ifdef DHD_UCODE_DOWNLOAD
+ char uc_path[PATH_MAX]; /* path to ucode image */
+#endif /* DHD_UCODE_DOWNLOAD */
+
+ /* serialize dhd iovars */
+ struct mutex dhd_iovar_mutex;
+
+ struct semaphore proto_sem;
+#ifdef PROP_TXSTATUS
+ spinlock_t wlfc_spinlock;
+
+#endif /* PROP_TXSTATUS */
+ wait_queue_head_t ioctl_resp_wait;
+ wait_queue_head_t d3ack_wait;
+ wait_queue_head_t dhd_bus_busy_state_wait;
+ wait_queue_head_t dmaxfer_wait;
+ uint32 default_wd_interval;
+
+ timer_list_compat_t timer;
+ bool wd_timer_valid;
+#ifdef DHD_PCIE_RUNTIMEPM
+ timer_list_compat_t rpm_timer;
+ bool rpm_timer_valid;
+ tsk_ctl_t thr_rpm_ctl;
+#endif /* DHD_PCIE_RUNTIMEPM */
+ struct tasklet_struct tasklet;
+ spinlock_t sdlock;
+ spinlock_t txqlock;
+ spinlock_t dhd_lock;
+
+ struct semaphore sdsem;
+ tsk_ctl_t thr_dpc_ctl;
+ tsk_ctl_t thr_wdt_ctl;
+
+ tsk_ctl_t thr_rxf_ctl;
+ spinlock_t rxf_lock;
+ bool rxthread_enabled;
+
+ /* Wakelocks */
+#if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
+ struct wake_lock wl_wifi; /* Wifi wakelock */
+ struct wake_lock wl_rxwake; /* Wifi rx wakelock */
+ struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
+ struct wake_lock wl_wdwake; /* Wifi wd wakelock */
+ struct wake_lock wl_evtwake; /* Wifi event wakelock */
+ struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */
+ struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ struct wake_lock wl_intrwake; /* Host wakeup wakelock */
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ struct wake_lock wl_scanwake; /* Wifi scan wakelock */
+#endif /* DHD_USE_SCAN_WAKELOCK */
+#endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
+
+#if defined(OEM_ANDROID)
+ /* net_device interface lock, prevent race conditions among net_dev interface
+ * calls and wifi_on or wifi_off
+ */
+ struct mutex dhd_net_if_mutex;
+ struct mutex dhd_suspend_mutex;
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+ struct mutex dhd_apf_mutex;
+#endif /* PKT_FILTER_SUPPORT && APF */
+#endif /* OEM_ANDROID */
+ spinlock_t wakelock_spinlock;
+ spinlock_t wakelock_evt_spinlock;
+ uint32 wakelock_counter;
+ int wakelock_wd_counter;
+ int wakelock_rx_timeout_enable;
+ int wakelock_ctrl_timeout_enable;
+ bool waive_wakelock;
+ uint32 wakelock_before_waive;
+
+ /* Thread to issue ioctl for multicast */
+ wait_queue_head_t ctrl_wait;
+ atomic_t pend_8021x_cnt;
+ dhd_attach_states_t dhd_state;
+#ifdef SHOW_LOGTRACE
+ dhd_event_log_t event_data;
+#endif /* SHOW_LOGTRACE */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+ struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ u32 pend_ipaddr;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef DHDTCPACK_SUPPRESS
+ spinlock_t tcpack_lock;
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef FIX_CPU_MIN_CLOCK
+ bool cpufreq_fix_status;
+ struct mutex cpufreq_fix;
+ struct pm_qos_request dhd_cpu_qos;
+#ifdef FIX_BUS_MIN_CLOCK
+ struct pm_qos_request dhd_bus_qos;
+#endif /* FIX_BUS_MIN_CLOCK */
+#endif /* FIX_CPU_MIN_CLOCK */
+ void *dhd_deferred_wq;
+#ifdef DEBUG_CPU_FREQ
+ struct notifier_block freq_trans;
+ int __percpu *new_freq;
+#endif // endif
+ unsigned int unit;
+ struct notifier_block pm_notifier;
+#ifdef DHD_PSTA
+ uint32 psta_mode; /* PSTA or PSR */
+#endif /* DHD_PSTA */
+#ifdef DHD_WET
+ uint32 wet_mode;
+#endif /* DHD_WET */
+#ifdef DHD_DEBUG
+ dhd_dump_t *dump;
+ struct timer_list join_timer;
+ u32 join_timeout_val;
+ bool join_timer_active;
+ uint scan_time_count;
+ struct timer_list scan_timer;
+ bool scan_timer_active;
+#endif // endif
+#if defined(DHD_LB)
+ /* CPU Load Balance dynamic CPU selection */
+
+ /* Variable that tracks the currect CPUs available for candidacy */
+ cpumask_var_t cpumask_curr_avail;
+
+ /* Primary and secondary CPU mask */
+ cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
+ cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
+
+ struct notifier_block cpu_notifier;
+
+ /* Tasklet to handle Tx Completion packet freeing */
+ struct tasklet_struct tx_compl_tasklet;
+ atomic_t tx_compl_cpu;
+
+ /* Tasklet to handle RxBuf Post during Rx completion */
+ struct tasklet_struct rx_compl_tasklet;
+ atomic_t rx_compl_cpu;
+
+ /* Napi struct for handling rx packet sendup. Packets are removed from
+ * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
+ * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
+ * to run to rx_napi_cpu.
+ */
+ struct sk_buff_head rx_pend_queue ____cacheline_aligned;
+ struct sk_buff_head rx_napi_queue ____cacheline_aligned;
+ struct napi_struct rx_napi_struct ____cacheline_aligned;
+ atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
+ struct net_device *rx_napi_netdev; /* netdev of primary interface */
+
+ struct work_struct rx_napi_dispatcher_work;
+ struct work_struct tx_compl_dispatcher_work;
+ struct work_struct tx_dispatcher_work;
+ struct work_struct rx_compl_dispatcher_work;
+
+ /* Number of times DPC Tasklet ran */
+ uint32 dhd_dpc_cnt;
+ /* Number of times NAPI processing got scheduled */
+ uint32 napi_sched_cnt;
+ /* Number of times NAPI processing ran on each available core */
+ uint32 *napi_percpu_run_cnt;
+ /* Number of times RX Completions got scheduled */
+ uint32 rxc_sched_cnt;
+ /* Number of times RX Completion ran on each available core */
+ uint32 *rxc_percpu_run_cnt;
+ /* Number of times TX Completions got scheduled */
+ uint32 txc_sched_cnt;
+ /* Number of times TX Completions ran on each available core */
+ uint32 *txc_percpu_run_cnt;
+ /* CPU status */
+ /* Number of times each CPU came online */
+ uint32 *cpu_online_cnt;
+ /* Number of times each CPU went offline */
+ uint32 *cpu_offline_cnt;
+
+ /* Number of times TX processing run on each core */
+ uint32 *txp_percpu_run_cnt;
+ /* Number of times TX start run on each core */
+ uint32 *tx_start_percpu_run_cnt;
+
+ /* Tx load balancing */
+
+ /* TODO: Need to see if batch processing is really required in case of TX
+ * processing. In case of RX the Dongle can send a bunch of rx completions,
+ * hence we took a 3 queue approach
+ * enque - adds the skbs to rx_pend_queue
+ * dispatch - uses a lock and adds the list of skbs from pend queue to
+ * napi queue
+ * napi processing - copies the pend_queue into a local queue and works
+ * on it.
+ * But for TX its going to be 1 skb at a time, so we are just thinking
+ * of using only one queue and use the lock supported skb queue functions
+ * to add and process it. If its in-efficient we'll re-visit the queue
+ * design.
+ */
+
+ /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
+ /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */
+ /*
+ * From the Tasklet that actually sends out data
+ * copy the list tx_pend_queue into tx_active_queue. There by we need
+ * to spinlock to only perform the copy the rest of the code ie to
+ * construct the tx_pend_queue and the code to process tx_active_queue
+ * can be lockless. The concept is borrowed as is from RX processing
+ */
+ /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */
+
+ /* Control TXP in runtime, enable by default */
+ atomic_t lb_txp_active;
+
+ /* Control RXP in runtime, enable by default */
+ atomic_t lb_rxp_active;
+
+ /*
+ * When the NET_TX tries to send a TX packet put it into tx_pend_queue
+ * For now, the processing tasklet will also direcly operate on this
+ * queue
+ */
+ struct sk_buff_head tx_pend_queue ____cacheline_aligned;
+
+ /* Control RXP in runtime, enable by default */
+ /* cpu on which the DHD Tx is happenning */
+ atomic_t tx_cpu;
+
+ /* CPU on which the Network stack is calling the DHD's xmit function */
+ atomic_t net_tx_cpu;
+
+ /* Tasklet context from which the DHD's TX processing happens */
+ struct tasklet_struct tx_tasklet;
+
+ /*
+ * Consumer Histogram - NAPI RX Packet processing
+ * -----------------------------------------------
+ * On Each CPU, when the NAPI RX Packet processing call back was invoked
+ * how many packets were processed is captured in this data structure.
+ * Now its difficult to capture the "exact" number of packets processed.
+ * So considering the packet counter to be a 32 bit one, we have a
+ * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
+ * processed is rounded off to the next power of 2 and put in the
+ * approriate "bin" the value in the bin gets incremented.
+ * For example, assume that in CPU 1 if NAPI Rx runs 3 times
+ * and the packet count processed is as follows (assume the bin counters are 0)
+ * iteration 1 - 10 (the bin counter 2^4 increments to 1)
+ * iteration 2 - 30 (the bin counter 2^5 increments to 1)
+ * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
+ */
+ uint32 *napi_rx_hist[HIST_BIN_SIZE];
+ uint32 *txc_hist[HIST_BIN_SIZE];
+ uint32 *rxc_hist[HIST_BIN_SIZE];
+#endif /* DHD_LB */
+#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
+ struct work_struct axi_error_dispatcher_work;
+#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+#ifdef SHOW_LOGTRACE
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+ tsk_ctl_t thr_logtrace_ctl;
+#else
+ struct delayed_work event_log_dispatcher_work;
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+#endif /* SHOW_LOGTRACE */
+
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+ struct kobject dhd_kobj;
+ struct kobject dhd_conf_file_kobj;
+ struct timer_list timesync_timer;
+#if defined(BT_OVER_SDIO)
+ char btfw_path[PATH_MAX];
+#endif /* defined (BT_OVER_SDIO) */
+#ifdef WL_MONITOR
+ struct net_device *monitor_dev; /* monitor pseudo device */
+ struct sk_buff *monitor_skb;
+ uint monitor_len;
+ uint monitor_type; /* monitor pseudo device */
+#endif /* WL_MONITOR */
+#if defined(BT_OVER_SDIO)
+ struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
+ int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
+#endif /* BT_OVER_SDIO */
+#ifdef SHOW_LOGTRACE
+ struct sk_buff_head evt_trace_queue ____cacheline_aligned;
+#endif // endif
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ struct workqueue_struct *tx_wq;
+ struct workqueue_struct *rx_wq;
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#ifdef DHD_DEBUG_UART
+ bool duart_execute;
+#endif /* DHD_DEBUG_UART */
+ struct mutex logdump_lock;
+ /* indicates mem_dump was scheduled as work queue or called directly */
+ bool scheduled_memdump;
+ struct work_struct dhd_hang_process_work;
+#ifdef DHD_HP2P
+ spinlock_t hp2p_lock;
+#endif /* DHD_HP2P */
+} dhd_info_t;
+
+extern int dhd_sysfs_init(dhd_info_t *dhd);
+extern void dhd_sysfs_exit(dhd_info_t *dhd);
+extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp);
+extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp);
+
+int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf);
+
+#if defined(DHD_LB)
+#if defined(DHD_LB_TXP)
+int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb);
+void dhd_tx_dispatcher_work(struct work_struct * work);
+void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
+void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
+void dhd_lb_tx_handler(unsigned long data);
+#endif /* DHD_LB_TXP */
+
+#if defined(DHD_LB_RXP)
+int dhd_napi_poll(struct napi_struct *napi, int budget);
+void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
+void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
+void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
+#endif /* DHD_LB_RXP */
+
+void dhd_lb_set_default_cpus(dhd_info_t *dhd);
+void dhd_cpumasks_deinit(dhd_info_t *dhd);
+int dhd_cpumasks_init(dhd_info_t *dhd);
+
+void dhd_select_cpu_candidacy(dhd_info_t *dhd);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int dhd_cpu_startup_callback(unsigned int cpu);
+int dhd_cpu_teardown_callback(unsigned int cpu);
+#else
+int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu);
+#endif /* LINUX_VERSION_CODE < 4.10.0 */
+
+int dhd_register_cpuhp_callback(dhd_info_t *dhd);
+int dhd_unregister_cpuhp_callback(dhd_info_t *dhd);
+
+#if defined(DHD_LB_TXC)
+void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
+#endif /* DHD_LB_TXC */
+
+#if defined(DHD_LB_RXC)
+void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
+void dhd_rx_compl_dispatcher_fn(struct work_struct * work);
+#endif /* DHD_LB_RXC */
+
+#endif /* DHD_LB */
+
+#if defined(DHD_LB_IRQSET) || defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask);
+#endif /* DHD_LB_IRQSET || DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+
+#endif /* __DHD_LINUX_PRIV_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_sched.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
index 5784387..e0bab98 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
@@ -1,7 +1,9 @@
/*
* Expose some of the kernel scheduler routines
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -35,17 +37,13 @@
int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
{
int rc = 0;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
rc = sched_setscheduler(p, policy, param);
-#endif /* LinuxVer */
return rc;
}
int get_scheduler_policy(struct task_struct *p)
{
int rc = SCHED_NORMAL;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
rc = p->policy;
-#endif /* LinuxVer */
return rc;
}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_wq.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_wq.c
index 15bb6e1..54de6a7 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_wq.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_wq.c
@@ -2,7 +2,9 @@
* Broadcom Dongle Host Driver (DHD), Generic work queue framework
* Generic interface to handle dhd deferred work events
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -85,15 +87,11 @@
struct kfifo *fifo;
gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
- fifo = kfifo_init(buf, size, flags, lock);
-#else
fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
if (!fifo) {
return NULL;
}
kfifo_init(fifo, buf, size);
-#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
return fifo;
}
@@ -101,10 +99,7 @@
dhd_kfifo_free(struct kfifo *fifo)
{
kfifo_free(fifo);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
- /* FC11 releases the fifo memory */
kfree(fifo);
-#endif // endif
}
/* deferred work functions */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_wq.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_wq.h
index 4590977..ddbdd42 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_wq.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_linux_wq.h
@@ -2,7 +2,9 @@
* Broadcom Dongle Host Driver (DHD), Generic work queue framework
* Generic interface to handle dhd deferred work events
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -39,7 +41,6 @@
DHD_WQ_WORK_SET_MCAST_LIST,
DHD_WQ_WORK_IPV6_NDO,
DHD_WQ_WORK_HANG_MSG,
- DHD_WQ_WORK_SSSR_DUMP,
DHD_WQ_WORK_DHD_LOG_DUMP,
DHD_WQ_WORK_PKTLOG_DUMP,
DHD_WQ_WORK_INFORM_DHD_MON,
@@ -54,6 +55,12 @@
#ifdef DHD_ERPOM
DHD_WQ_WORK_ERROR_RECOVERY,
#endif /* DHD_ERPOM */
+ DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH,
+ DHD_WQ_WORK_AXI_ERROR_DUMP,
+ DHD_WQ_WORK_CTO_RECOVERY,
+#ifdef DHD_BANDSTEER
+ DHD_WQ_WORK_BANDSTEER_STEP_MOVE,
+#endif /* DHD_BANDSTEER */
DHD_MAX_WQ_EVENTS
};
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_mschdbg.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_mschdbg.c
index 1f9a448..71f77a4 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_mschdbg.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_mschdbg.c
@@ -3,7 +3,9 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -651,7 +653,8 @@
while (len >= (int)WL_MSCH_EVENT_LOG_HEAD_SIZE) {
msch_event_log_profiler_event_data_t *p =
(msch_event_log_profiler_event_data_t *)data;
- event_log_hdr_t hdr;
+ /* TODO: How to parse MSCH if extended event tag is present ??? */
+ prcd_event_log_hdr_t hdr;
int size = WL_MSCH_EVENT_LOG_HEAD_SIZE + p->hdr.count * sizeof(uint32);
if (len < size || size > sizeof(msch_event_log_profiler_event_data_t)) {
break;
@@ -661,9 +664,15 @@
dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss);
MSCH_EVENT_HEAD(0);
MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag));
+ bzero(&hdr, sizeof(hdr));
hdr.tag = EVENT_LOG_TAG_MSCHPROFILE;
hdr.count = p->hdr.count + 1;
- hdr.fmt_num = ntoh16(p->hdr.fmt_num);
+ /* exclude LSB 2 bits which indicate binary/non-binary data */
+ hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2;
+ hdr.fmt_num_raw = ntoh16(p->hdr.fmt_num);
+ if (ntoh16(p->hdr.fmt_num) == DHD_OW_BI_RAW_EVENT_LOG_FMT) {
+ hdr.binary_payload = TRUE;
+ }
dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0);
}
lastMessages = TRUE;
@@ -730,21 +739,22 @@
}
void
-wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, event_log_hdr_t *log_hdr,
+wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, prcd_event_log_hdr_t *plog_hdr,
uint32 *log_ptr)
{
uint32 log_pyld_len;
head_log = "CONSOLE";
- if (log_hdr->count == 0) {
+ if (plog_hdr->count == 0) {
return;
}
- log_pyld_len = (log_hdr->count - 1) * DATA_UNIT_FOR_LOG_CNT;
+ log_pyld_len = (plog_hdr->count - 1) * DATA_UNIT_FOR_LOG_CNT;
- if (log_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) {
+ if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) {
msch_event_log_profiler_event_data_t *p =
(msch_event_log_profiler_event_data_t *)log_ptr;
- event_log_hdr_t hdr;
+ /* TODO: How to parse MSCH if extended event tag is present ??? */
+ prcd_event_log_hdr_t hdr;
uint32 s, ss;
if (log_pyld_len < OFFSETOF(msch_event_log_profiler_event_data_t, data) ||
@@ -755,9 +765,15 @@
dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss);
MSCH_EVENT_HEAD(0);
MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag));
+ bzero(&hdr, sizeof(hdr));
hdr.tag = EVENT_LOG_TAG_MSCHPROFILE;
hdr.count = p->hdr.count + 1;
- hdr.fmt_num = ntoh16(p->hdr.fmt_num);
+ /* exclude LSB 2 bits which indicate binary/non-binary data */
+ hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2;
+ hdr.fmt_num_raw = ntoh16(p->hdr.fmt_num);
+ if (ntoh16(p->hdr.fmt_num) == DHD_OW_BI_RAW_EVENT_LOG_FMT) {
+ hdr.binary_payload = TRUE;
+ }
dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0);
} else {
msch_collect_tlv_t *p = (msch_collect_tlv_t *)log_ptr;
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_mschdbg.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_mschdbg.h
index 9ace1d9..76e32f5 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_mschdbg.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_mschdbg.h
@@ -3,7 +3,9 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -33,7 +35,7 @@
extern void wl_mschdbg_event_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int type,
void *data, int len);
extern void wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr,
- event_log_hdr_t *log_hdr, uint32 *log_ptr);
+ prcd_event_log_hdr_t *plog_hdr, uint32 *log_ptr);
#endif /* SHOW_LOGTRACE */
#endif /* _dhd_mschdbg_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_msgbuf.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_msgbuf.c
index ec1f5f6..9c43430 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_msgbuf.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_msgbuf.c
@@ -3,7 +3,9 @@
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -35,6 +37,7 @@
#include <bcmutils.h>
#include <bcmmsgbuf.h>
#include <bcmendian.h>
+#include <bcmstdlib_s.h>
#include <dngl_stats.h>
#include <dhd.h>
@@ -68,7 +71,11 @@
#ifdef DHD_PKT_LOGGING
#include <dhd_pktlog.h>
+#include <dhd_linux_pktdump.h>
#endif /* DHD_PKT_LOGGING */
+#ifdef DHD_EWPR_VER2
+#include <dhd_bitpack.h>
+#endif /* DHD_EWPR_VER2 */
extern char dhd_version[];
extern char fw_version[];
@@ -169,10 +176,19 @@
* Dongle advertizes host side sync mechanism requirements.
*/
-#define PCIE_D2H_SYNC_WAIT_TRIES (512UL)
-#define PCIE_D2H_SYNC_NUM_OF_STEPS (5UL)
+#define PCIE_D2H_SYNC_WAIT_TRIES (512U)
+#define PCIE_D2H_SYNC_NUM_OF_STEPS (5U)
#define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */
+#define HWA_DB_TYPE_RXPOST (0x0050)
+#define HWA_DB_TYPE_TXCPLT (0x0060)
+#define HWA_DB_TYPE_RXCPLT (0x0170)
+#define HWA_DB_INDEX_VALUE(val) ((uint32)(val) << 16)
+
+#define HWA_ENAB_BITMAP_RXPOST (1U << 0) /* 1A */
+#define HWA_ENAB_BITMAP_RXCPLT (1U << 1) /* 2B */
+#define HWA_ENAB_BITMAP_TXCPLT (1U << 2) /* 4B */
+
/**
* Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
*
@@ -182,6 +198,18 @@
typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
+/**
+ * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
+ * For EDL messages.
+ *
+ * On success: return cmn_msg_hdr_t::msg_type
+ * On failure: return 0 (invalid msg_type)
+ */
+#ifdef EWP_EDL
+typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
+ volatile cmn_msg_hdr_t *msg);
+#endif /* EWP_EDL */
+
/*
* +----------------------------------------------------------------------------
*
@@ -289,6 +317,64 @@
#define DHD_DMA_PAD (128)
#endif // endif
+/*
+ * +----------------------------------------------------------------------------
+ * Flowring Pool
+ *
+ * Unlike common rings, which are attached very early on (dhd_prot_attach),
+ * flowrings are dynamically instantiated. Moreover, flowrings may require a
+ * larger DMA-able buffer. To avoid issues with fragmented cache coherent
+ * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
+ * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
+ *
+ * Each DMA-able buffer may be allocated independently, or may be carved out
+ * of a single large contiguous region that is registered with the protocol
+ * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
+ * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
+ *
+ * No flowring pool action is performed in dhd_prot_attach(), as the number
+ * of h2d rings is not yet known.
+ *
+ * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
+ * determine the number of flowrings required, and a pool of msgbuf_rings are
+ * allocated and a DMA-able buffer (carved or allocated) is attached.
+ * See: dhd_prot_flowrings_pool_attach()
+ *
+ * A flowring msgbuf_ring object may be fetched from this pool during flowring
+ * creation, using the flowid. Likewise, flowrings may be freed back into the
+ * pool on flowring deletion.
+ * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
+ *
+ * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
+ * are detached (returned back to the carved region or freed), and the pool of
+ * msgbuf_ring and any objects allocated against it are freed.
+ * See: dhd_prot_flowrings_pool_detach()
+ *
+ * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
+ * state as-if upon an attach. All DMA-able buffers are retained.
+ * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
+ * pool attach will notice that the pool persists and continue to use it. This
+ * will avoid the case of a fragmented DMA-able region.
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+/* Conversion of a flowid to a flowring pool index */
+#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
+ ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
+
+/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
+#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
+ (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
+ DHD_FLOWRINGS_POOL_OFFSET(flowid)
+
+/* Traverse each flowring in the flowring pool, assigning ring and flowid */
+#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
+ for ((flowid) = DHD_FLOWRING_START_FLOWID, \
+ (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
+ (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
+ (ring)++, (flowid)++)
+
/* Used in loopback tests */
typedef struct dhd_dmaxfer {
dhd_dma_buf_t srcmem;
@@ -298,6 +384,7 @@
uint32 len;
bool in_progress;
uint64 start_usec;
+ uint64 time_taken;
uint32 d11_lpbk;
int status;
} dhd_dmaxfer_t;
@@ -331,6 +418,7 @@
#endif /* TXP_FLUSH_NITEMS */
uint8 ring_type;
+ uint16 hwa_db_type; /* hwa type non-zero for Data path rings */
uint8 n_completion_ids;
bool create_pending;
uint16 create_req_id;
@@ -354,6 +442,7 @@
/** DHD protocol handle. Is an opaque type to other DHD software layers. */
typedef struct dhd_prot {
osl_t *osh; /* OSL handle */
+ uint16 rxbufpost_sz;
uint16 rxbufpost;
uint16 max_rxbufpost;
uint16 max_eventbufpost;
@@ -366,7 +455,7 @@
uint16 cur_ts_bufs_posted;
/* Flow control mechanism based on active transmits pending */
- uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */
+ osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
uint16 h2d_max_txpost;
uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
@@ -378,6 +467,7 @@
msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
+ msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */
msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
@@ -413,6 +503,9 @@
uint32 flowring_num;
d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
+#ifdef EWP_EDL
+ d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
+#endif /* EWP_EDL */
ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
ulong d2h_sync_wait_tot; /* total wait loops */
@@ -426,6 +519,18 @@
void *pktid_tx_map; /* pktid map for tx path */
bool metadata_dbg;
void *pktid_map_handle_ioctl;
+#ifdef DHD_MAP_PKTID_LOGGING
+ void *pktid_dma_map; /* pktid map for DMA MAP */
+ void *pktid_dma_unmap; /* pktid map for DMA UNMAP */
+#endif /* DHD_MAP_PKTID_LOGGING */
+ uint32 pktid_depleted_cnt; /* pktid depleted count */
+ /* netif tx queue stop count */
+ uint8 pktid_txq_stop_cnt;
+ /* netif tx queue start count */
+ uint8 pktid_txq_start_cnt;
+ uint64 ioctl_fillup_time; /* timestamp for ioctl fillup */
+ uint64 ioctl_ack_time; /* timestamp for ioctl ack */
+ uint64 ioctl_cmplt_time; /* timestamp for ioctl completion */
/* Applications/utilities can read tx and rx metadata using IOVARs */
uint16 rx_metadata_offset;
@@ -460,8 +565,29 @@
bool no_retry;
bool no_aggr;
bool fixed_rate;
+ dhd_dma_buf_t host_scb_buf; /* scb host offload buffer */
+#ifdef DHD_HP2P
+ msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
+ msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
+#endif /* DHD_HP2P */
+ bool no_tx_resource;
} dhd_prot_t;
+#ifdef DHD_EWPR_VER2
+#define HANG_INFO_BASE64_BUFFER_SIZE 640
+#endif // endif
+
+#ifdef DHD_DUMP_PCIE_RINGS
+static
+int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
+ const void *user_buf, unsigned long *file_posn);
+#ifdef EWP_EDL
+static
+int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
+ unsigned long *file_posn);
+#endif /* EWP_EDL */
+#endif /* DHD_DUMP_PCIE_RINGS */
+
extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
/* Convert a dmaaddr_t to a base_addr with htol operations */
@@ -469,9 +595,7 @@
/* APIs for managing a DMA-able buffer */
static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
-static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
-static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
/* msgbuf ring management */
static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
@@ -578,9 +702,21 @@
static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
+#ifdef DHD_HP2P
+static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
+#endif /* DHD_HP2P */
+#ifdef EWP_EDL
+static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
+#endif // endif
static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
+#ifdef DHD_HP2P
+static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus);
+static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus);
+static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid);
+static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
+#endif // endif
typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
/** callback functions for messages generated by the dongle */
@@ -657,14 +793,6 @@
static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
-#ifdef D2H_MINIDUMP
-dhd_dma_buf_t *
-dhd_prot_get_minidump_buf(dhd_pub_t *dhd)
-{
- return &dhd->prot->fw_trap_buf;
-}
-#endif /* D2H_MINIDUMP */
-
bool
dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
{
@@ -725,7 +853,6 @@
uint16 ring_type, uint32 id);
static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
uint8 type, uint32 id);
-static uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd);
/**
* dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
@@ -742,15 +869,31 @@
{
uint32 ring_seqnum = ring->seqnum;
+ if (dhd_query_bus_erros(dhd)) {
+ return;
+ }
+
DHD_ERROR((
"LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
- " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d>\n",
+ " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
- ring->dma_buf.va, msg, ring->curr_rd));
+ ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
+ /* Try to resume if already suspended or suspend in progress */
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ /* Skip if still in suspended or suspend in progress */
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
+ __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
+ goto exit;
+ }
+
dhd_bus_dump_console_buffer(dhd->bus);
dhd_prot_debug_info_print(dhd);
@@ -762,6 +905,7 @@
}
#endif /* DHD_FW_COREDUMP */
+exit:
dhd_schedule_reset(dhd);
#ifdef OEM_ANDROID
@@ -773,6 +917,7 @@
dhd_os_send_hang_message(dhd);
#endif /* SUPPORT_LINKDOWN_RECOVERY */
#endif /* OEM_ANDROID */
+ dhd->livelock_occured = TRUE;
}
/**
@@ -820,10 +965,16 @@
msg_seqnum = *marker;
if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
ring->seqnum++; /* next expected sequence number */
- goto dma_completed;
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
+ goto dma_completed;
+ }
}
- total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
+ total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
if (total_tries > prot->d2h_sync_wait_max)
prot->d2h_sync_wait_max = total_tries;
@@ -899,7 +1050,14 @@
num_words);
if (prot_checksum == 0U) { /* checksum is OK */
ring->seqnum++; /* next expected sequence number */
- goto dma_completed;
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to
+ * LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
+ goto dma_completed;
+ }
}
}
@@ -937,9 +1095,188 @@
dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen)
{
- return msg->msg_type;
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
+ DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
+ return MSG_TYPE_INVALID;
+ } else {
+ return msg->msg_type;
+ }
}
+#ifdef EWP_EDL
+/**
+ * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t
+ * header values at both the beginning and end of the payload.
+ * The cmn_msg_hdr_t is placed at the start and end of the payload
+ * in each work item in the EDL ring.
+ * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field
+ * and the length of the payload in the 'request_id' field.
+ * Structure of each work item in the EDL ring:
+ * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t |
+ * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is
+ * too costly on the dongle side and might take up too many ARM cycles,
+ * hence the xorcsum sync method is not being used for EDL ring.
+ */
+static int
+BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg)
+{
+ uint32 tries;
+ int msglen = 0, len = 0;
+ uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
+ dhd_prot_t *prot = dhd->prot;
+ uint32 step = 0;
+ uint32 delay = PCIE_D2H_SYNC_DELAY;
+ uint32 total_tries = 0;
+ volatile cmn_msg_hdr_t *trailer = NULL;
+ volatile uint8 *buf = NULL;
+ bool valid_msg = FALSE;
+
+ BCM_REFERENCE(delay);
+ /*
+ * For retries we have to make some sort of stepper algorithm.
+ * We see that every time when the Dongle comes out of the D3
+ * Cold state, the first D2H mem2mem DMA takes more time to
+ * complete, leading to livelock issues.
+ *
+ * Case 1 - Apart from Host CPU some other bus master is
+ * accessing the DDR port, probably page close to the ring
+ * so, PCIE does not get a change to update the memory.
+ * Solution - Increase the number of tries.
+ *
+ * Case 2 - The 50usec delay given by the Host CPU is not
+ * sufficient for the PCIe RC to start its work.
+ * In this case the breathing time of 50usec given by
+ * the Host CPU is not sufficient.
+ * Solution: Increase the delay in a stepper fashion.
+ * This is done to ensure that there are no
+ * unwanted extra delay introdcued in normal conditions.
+ */
+ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
+ for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
+ /* First verify if the seqnumber has been updated,
+ * if yes, only then validate the header and trailer.
+ * Once seqnum, header and trailer have been validated, it means
+ * that the complete message has arrived.
+ */
+ valid_msg = FALSE;
+ if (msg->epoch == ring_seqnum &&
+ msg->msg_type == MSG_TYPE_INFO_PYLD &&
+ msg->request_id > 0 &&
+ msg->request_id <= ring->item_len) {
+ /* proceed to check trailer only if header is valid */
+ buf = (volatile uint8 *)msg;
+ msglen = sizeof(cmn_msg_hdr_t) + msg->request_id;
+ buf += msglen;
+ if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) {
+ trailer = (volatile cmn_msg_hdr_t *)buf;
+ valid_msg = (trailer->epoch == ring_seqnum) &&
+ (trailer->msg_type == msg->msg_type) &&
+ (trailer->request_id == msg->request_id);
+ if (!valid_msg) {
+ DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u"
+ " expected, seqnum=%u; reqid=%u. Retrying... \n",
+ __FUNCTION__, trailer->epoch, trailer->request_id,
+ msg->epoch, msg->request_id));
+ }
+ } else {
+ DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n",
+ __FUNCTION__, msg->request_id));
+ }
+
+ if (valid_msg) {
+ /* data is OK */
+ ring->seqnum++; /* next expected sequence number */
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
+ goto dma_completed;
+ }
+ }
+ } else {
+ DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
+ " msg_type=0x%x, request_id=%u."
+ " Retrying...\n",
+ __FUNCTION__, ring_seqnum, msg->epoch,
+ msg->msg_type, msg->request_id));
+ }
+
+ total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
+
+ if (total_tries > prot->d2h_sync_wait_max)
+ prot->d2h_sync_wait_max = total_tries;
+
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
+ OSL_DELAY(delay * step); /* Add stepper delay */
+
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+ } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
+
+ DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__));
+ DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u"
+ " msgtype=0x%x; expected-msgtype=0x%x"
+ " length=%u; expected-max-length=%u", __FUNCTION__,
+ msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD,
+ msg->request_id, ring->item_len));
+ dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL);
+ if (trailer && msglen > 0 &&
+ (msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
+ DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u"
+ " msgtype=0x%x; expected-msgtype=0x%x"
+ " length=%u; expected-length=%u", __FUNCTION__,
+ trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD,
+ trailer->request_id, msg->request_id));
+ dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer,
+ sizeof(*trailer), DHD_ERROR_VAL);
+ }
+
+ if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len)
+ len = msglen + sizeof(cmn_msg_hdr_t);
+ else
+ len = ring->item_len;
+
+ dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
+ (volatile uchar *) msg, len);
+
+ ring->seqnum++; /* skip this message */
+ return BCME_ERROR; /* invalid msg_type 0 -> noop callback */
+
+dma_completed:
+ DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__,
+ msg->epoch, msg->request_id));
+
+ prot->d2h_sync_wait_tot += tries;
+ return BCME_OK;
+}
+
+/**
+ * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
+ * need to try to sync. This noop sync handler will be bound when the dongle
+ * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
+ */
+static int BCMFASTPATH
+dhd_prot_d2h_sync_edl_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg)
+{
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
+ DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
+ return BCME_ERROR;
+ } else {
+ if (msg->msg_type == MSG_TYPE_INFO_PYLD)
+ return BCME_OK;
+ else
+ return msg->msg_type;
+ }
+}
+#endif /* EWP_EDL */
+
INLINE void
dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
{
@@ -971,14 +1308,33 @@
prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+ if (HWA_ACTIVE(dhd)) {
+ prot->d2hring_tx_cpln.hwa_db_type =
+ (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXCPLT) ? HWA_DB_TYPE_TXCPLT : 0;
+ prot->d2hring_rx_cpln.hwa_db_type =
+ (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXCPLT) ? HWA_DB_TYPE_RXCPLT : 0;
+ DHD_ERROR(("%s: TXCPLT hwa_db_type:0x%x RXCPLT hwa_db_type:0x%x\n",
+ __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type,
+ prot->d2hring_rx_cpln.hwa_db_type));
+ }
+
if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
+#ifdef EWP_EDL
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
+#endif /* EWP_EDL */
DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
} else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
+#ifdef EWP_EDL
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
+#endif /* EWP_EDL */
DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
} else {
prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
+#ifdef EWP_EDL
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
+#endif /* EWP_EDL */
DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
}
}
@@ -991,6 +1347,14 @@
{
dhd_prot_t *prot = dhd->prot;
prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
+
+ if (HWA_ACTIVE(dhd)) {
+ prot->h2dring_rxp_subn.hwa_db_type =
+ (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXPOST) ? HWA_DB_TYPE_RXPOST : 0;
+ DHD_ERROR(("%s: RXPOST hwa_db_type:0x%x\n",
+ __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type));
+ }
+
prot->h2dring_rxp_subn.current_phase = 0;
prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
@@ -1044,21 +1408,22 @@
* returns BCME_OK=0 on success
* returns non-zero negative error value on failure.
*/
-static int
+int
dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
{
uint32 dma_pad = 0;
osl_t *osh = dhd->osh;
uint16 dma_align = DMA_ALIGN_LEN;
+ uint32 rem = 0;
ASSERT(dma_buf != NULL);
ASSERT(dma_buf->va == NULL);
ASSERT(dma_buf->len == 0);
- /* Pad the buffer length by one extra cacheline size.
- * Required for D2H direction.
- */
- dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
+ /* Pad the buffer length to align to cacheline size. */
+ rem = (buf_len % DHD_DMA_PAD);
+ dma_pad = rem ? (DHD_DMA_PAD - rem) : 0;
+
dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
@@ -1100,7 +1465,7 @@
* dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
* dhd_dma_buf_alloc().
*/
-static void
+void
dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
{
osl_t *osh = dhd->osh;
@@ -1145,6 +1510,133 @@
/*
* +---------------------------------------------------------------------------+
+ * DHD_MAP_PKTID_LOGGING
+ * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
+ * debugging in customer platform.
+ * +---------------------------------------------------------------------------+
+ */
+
+#ifdef DHD_MAP_PKTID_LOGGING
+typedef struct dhd_pktid_log_item {
+ dmaaddr_t pa; /* DMA bus address */
+ uint64 ts_nsec; /* Timestamp: nsec */
+ uint32 size; /* DMA map/unmap size */
+ uint32 pktid; /* Packet ID */
+ uint8 pkttype; /* Packet Type */
+ uint8 rsvd[7]; /* Reserved for future use */
+} dhd_pktid_log_item_t;
+
+typedef struct dhd_pktid_log {
+ uint32 items; /* number of total items */
+ uint32 index; /* index of pktid_log_item */
+ dhd_pktid_log_item_t map[0]; /* metadata storage */
+} dhd_pktid_log_t;
+
+typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
+
+#define MAX_PKTID_LOG (2048)
+#define DHD_PKTID_LOG_ITEM_SZ (sizeof(dhd_pktid_log_item_t))
+#define DHD_PKTID_LOG_SZ(items) (uint32)((sizeof(dhd_pktid_log_t)) + \
+ ((DHD_PKTID_LOG_ITEM_SZ) * (items)))
+
+#define DHD_PKTID_LOG_INIT(dhd, hdl) dhd_pktid_logging_init((dhd), (hdl))
+#define DHD_PKTID_LOG_FINI(dhd, hdl) dhd_pktid_logging_fini((dhd), (hdl))
+#define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype) \
+ dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
+#define DHD_PKTID_LOG_DUMP(dhd) dhd_pktid_logging_dump((dhd))
+
+static dhd_pktid_log_handle_t *
+dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
+{
+ dhd_pktid_log_t *log;
+ uint32 log_size;
+
+ log_size = DHD_PKTID_LOG_SZ(num_items);
+ log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
+ if (log == NULL) {
+ DHD_ERROR(("%s: MALLOC failed for size %d\n",
+ __FUNCTION__, log_size));
+ return (dhd_pktid_log_handle_t *)NULL;
+ }
+
+ log->items = num_items;
+ log->index = 0;
+
+ return (dhd_pktid_log_handle_t *)log; /* opaque handle */
+}
+
+static void
+dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
+{
+ dhd_pktid_log_t *log;
+ uint32 log_size;
+
+ if (handle == NULL) {
+ DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ log = (dhd_pktid_log_t *)handle;
+ log_size = DHD_PKTID_LOG_SZ(log->items);
+ MFREE(dhd->osh, handle, log_size);
+}
+
+static void
+dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
+ uint32 pktid, uint32 len, uint8 pkttype)
+{
+ dhd_pktid_log_t *log;
+ uint32 idx;
+
+ if (handle == NULL) {
+ DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ log = (dhd_pktid_log_t *)handle;
+ idx = log->index;
+ log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
+ log->map[idx].pa = pa;
+ log->map[idx].pktid = pktid;
+ log->map[idx].size = len;
+ log->map[idx].pkttype = pkttype;
+ log->index = (idx + 1) % (log->items); /* update index */
+}
+
+void
+dhd_pktid_logging_dump(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ dhd_pktid_log_t *map_log, *unmap_log;
+ uint64 ts_sec, ts_usec;
+
+ if (prot == NULL) {
+ DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
+ unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
+ OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
+ if (map_log && unmap_log) {
+ DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
+ "current time=[%5lu.%06lu]\n", __FUNCTION__,
+ map_log->index, unmap_log->index,
+ (unsigned long)ts_sec, (unsigned long)ts_usec));
+ DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
+ "pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
+ (uint64)__virt_to_phys((ulong)(map_log->map)),
+ (uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
+ (uint64)__virt_to_phys((ulong)(unmap_log->map)),
+ (uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
+ }
+}
+#endif /* DHD_MAP_PKTID_LOGGING */
+
+/* +----------------- End of DHD_MAP_PKTID_LOGGING -----------------------+ */
+
+/*
+ * +---------------------------------------------------------------------------+
* PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
* Main purpose is to save memory on the dongle, has other purposes as well.
* The packet id map, also includes storage for some packet parameters that
@@ -1156,7 +1648,7 @@
#define DHD_PCIE_PKTID
#define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */
#define MAX_RX_PKTID (1024)
-#define MAX_TX_PKTID (3072 * 2)
+#define MAX_TX_PKTID (3072 * 12)
/* On Router, the pktptr serves as a pktid. */
@@ -1176,6 +1668,8 @@
PKTTYPE_TSBUF_RX
} dhd_pkttype_t;
+#define DHD_PKTID_MIN_AVAIL_COUNT 512U
+#define DHD_PKTID_DEPLETED_MAX_COUNT (DHD_PKTID_MIN_AVAIL_COUNT * 2U)
#define DHD_PKTID_INVALID (0U)
#define DHD_IOCTL_REQ_PKTID (0xFFFE)
#define DHD_FAKE_PKTID (0xFACE)
@@ -1185,6 +1679,10 @@
#define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA
#define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9
#define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8
+#ifdef DHD_HP2P
+#define DHD_D2H_HPPRING_TXREQ_PKTID 0xFFF7
+#define DHD_D2H_HPPRING_RXREQ_PKTID 0xFFF6
+#endif /* DHD_HP2P */
#define IS_FLOWRING(ring) \
((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
@@ -1226,7 +1724,6 @@
static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
void *dmah, void *secdma, dhd_pkttype_t pkttype);
-
/* Return an allocated pktid, retrieving previously saved pkt and metadata */
static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
@@ -1490,6 +1987,11 @@
out:
DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+
+ if (error != BCME_OK) {
+ dhd->pktid_audit_failed = TRUE;
+ }
+
return error;
}
@@ -1516,6 +2018,11 @@
const int test_for, void *msg, uint32 msg_len, const char *func)
{
int ret = BCME_OK;
+
+ if (dhd_query_bus_erros(dhdp)) {
+ return BCME_ERROR;
+ }
+
ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
if (ret == BCME_ERROR) {
DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
@@ -1679,12 +2186,17 @@
locker->state = LOCKER_IS_FREE;
data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
if (data_tx) {
- dhd->prot->active_tx_count--;
+ OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
}
#ifdef DHD_PKTID_AUDIT_RING
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
#endif /* DHD_PKTID_AUDIT_RING */
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
+ locker->pa, nkey, locker->len,
+ locker->pkttype);
+#endif /* DHD_MAP_PKTID_LOGGING */
{
if (SECURE_DMA_ENAB(dhd->osh))
@@ -1947,6 +2459,9 @@
locker->pkttype = pkttype;
locker->pkt = pkt;
locker->state = LOCKER_IS_BUSY; /* make this locker busy */
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
+#endif /* DHD_MAP_PKTID_LOGGING */
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
}
@@ -2074,6 +2589,10 @@
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
#endif /* DHD_PKTID_AUDIT_MAP */
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
+ (uint32)locker->len, pkttype);
+#endif /* DHD_MAP_PKTID_LOGGING */
*pa = locker->pa; /* return contents of locker */
*len = (uint32)locker->len;
@@ -2320,11 +2839,7 @@
/* FW going to DMA extended trap data,
* allocate buffer for the maximum extended trap data.
*/
-#ifdef D2H_MINIDUMP
- uint32 trap_buf_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN;
-#else
uint32 trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
-#endif /* D2H_MINIDUMP */
/* Allocate prot structure */
if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
@@ -2342,6 +2857,7 @@
dhd->dma_h2d_ring_upd_support = FALSE;
dhd->dma_ring_upd_overwrite = FALSE;
+ dhd->hwa_inited = 0;
dhd->idma_inited = 0;
dhd->ifrm_inited = 0;
dhd->dar_inited = 0;
@@ -2458,6 +2974,20 @@
}
#endif /* IOCTLRESP_USE_CONSTMEM */
+#ifdef DHD_MAP_PKTID_LOGGING
+ prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
+ if (prot->pktid_dma_map == NULL) {
+ DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
+ __FUNCTION__));
+ }
+
+ prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
+ if (prot->pktid_dma_unmap == NULL) {
+ DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
+ __FUNCTION__));
+ }
+#endif /* DHD_MAP_PKTID_LOGGING */
+
/* Initialize the work queues to be used by the Load Balancing logic */
#if defined(DHD_LB_TXC)
{
@@ -2509,14 +3039,43 @@
return BCME_NOMEM;
} /* dhd_prot_attach */
+static int
+dhd_alloc_host_scbs(dhd_pub_t *dhd)
+{
+ int ret = BCME_OK;
+ sh_addr_t base_addr;
+ dhd_prot_t *prot = dhd->prot;
+ uint32 host_scb_size = 0;
+
+ if (dhd->hscb_enable) {
+ /* read number of bytes to allocate from F/W */
+ dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0);
+ if (host_scb_size) {
+ /* alloc array of host scbs */
+ ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size);
+ /* write host scb address to F/W */
+ if (ret == BCME_OK) {
+ dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ HOST_SCB_ADDR, 0);
+ } else {
+ DHD_TRACE(("dhd_alloc_host_scbs: dhd_dma_buf_alloc error\n"));
+ }
+ } else {
+ DHD_TRACE(("dhd_alloc_host_scbs: host_scb_size is 0.\n"));
+ }
+ } else {
+ DHD_TRACE(("dhd_alloc_host_scbs: Host scb not supported in F/W.\n"));
+ }
+
+ return ret;
+}
+
void
dhd_set_host_cap(dhd_pub_t *dhd)
{
uint32 data = 0;
dhd_prot_t *prot = dhd->prot;
-#ifdef D2H_MINIDUMP
- uint16 host_trap_addr_len;
-#endif /* D2H_MINIDUMP */
if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
if (dhd->h2d_phase_supported) {
@@ -2553,6 +3112,12 @@
data |= HOSTCAP_FAST_DELETE_RING;
}
+ if (dhdpcie_bus_get_pcie_hwa_supported(dhd->bus)) {
+ DHD_ERROR(("HWA inited\n"));
+ /* TODO: Is hostcap needed? */
+ dhd->hwa_inited = TRUE;
+ }
+
if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
DHD_ERROR(("IDMA inited\n"));
data |= HOSTCAP_H2D_IDMA;
@@ -2575,12 +3140,45 @@
data |= HOSTCAP_UR_FW_NO_TRAP;
-#ifdef D2H_MINIDUMP
- if (dhd_bus_is_minidump_enabled(dhd)) {
- data |= HOSTCAP_EXT_TRAP_DBGBUF;
- DHD_ERROR(("ALLOW D2H MINIDUMP!!\n"));
+ if (dhd->hscb_enable) {
+ data |= HOSTCAP_HSCB;
}
-#endif /* D2H_MINIDUMP */
+
+#ifdef EWP_EDL
+ if (dhd->dongle_edl_support) {
+ data |= HOSTCAP_EDL_RING;
+ DHD_ERROR(("Enable EDL host cap\n"));
+ } else {
+ DHD_ERROR(("DO NOT SET EDL host cap\n"));
+ }
+#endif /* EWP_EDL */
+
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable) {
+ data |= HOSTCAP_PKT_TIMESTAMP;
+ data |= HOSTCAP_PKT_HP2P;
+ DHD_ERROR(("Enable HP2P in host cap\n"));
+ } else {
+ DHD_ERROR(("HP2P not enabled in host cap\n"));
+ }
+#endif // endif
+
+#ifdef DHD_DB0TS
+ if (dhd->db0ts_capable) {
+ data |= HOSTCAP_DB0_TIMESTAMP;
+ DHD_ERROR(("Enable DB0 TS in host cap\n"));
+ } else {
+ DHD_ERROR(("DB0 TS not enabled in host cap\n"));
+ }
+#endif /* DHD_DB0TS */
+ if (dhd->extdtxs_in_txcpl) {
+ DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
+ data |= HOSTCAP_PKT_TXSTATUS;
+ }
+ else {
+ DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
+ }
+
DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
__FUNCTION__,
prot->active_ipc_version, prot->host_ipc_version,
@@ -2589,15 +3187,6 @@
dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
-#ifdef D2H_MINIDUMP
- if (dhd_bus_is_minidump_enabled(dhd)) {
- /* Dongle expects the host_trap_addr_len in terms of words */
- host_trap_addr_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN/ 4;
- dhd_bus_cmn_writeshared(dhd->bus, &host_trap_addr_len,
- sizeof(host_trap_addr_len), DNGL_TO_HOST_TRAP_ADDR_LEN, 0);
- }
-#endif /* D2H_MINIDUMP */
-
}
}
@@ -2618,6 +3207,10 @@
uint32 idmacontrol;
uint32 waitcount = 0;
+#ifdef WL_MONITOR
+ dhd->monitor_enable = FALSE;
+#endif /* WL_MONITOR */
+
/**
* A user defined value can be assigned to global variable h2d_max_txpost via
* 1. DHD IOVAR h2d_max_txpost, before firmware download
@@ -2645,7 +3238,7 @@
prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
prot->cur_ioctlresp_bufs_posted = 0;
- prot->active_tx_count = 0;
+ OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
prot->data_seq_no = 0;
prot->ioctl_seq_no = 0;
prot->rxbufpost = 0;
@@ -2675,10 +3268,16 @@
prot->device_ipc_version = dhd->bus->api.fw_rev;
prot->host_ipc_version = PCIE_SHARED_VERSION;
+ prot->no_tx_resource = FALSE;
/* Init the host API version */
dhd_set_host_cap(dhd);
+ /* alloc and configure scb host address for dongle */
+ if ((ret = dhd_alloc_host_scbs(dhd))) {
+ return ret;
+ }
+
/* Register the interrupt function upfront */
/* remove corerev checks in data path */
/* do this after host/fw negotiation for DAR */
@@ -2732,9 +3331,9 @@
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
D2H_DMA_INDX_RD_BUF, 0);
}
-
/* Signal to the dongle that common ring init is complete */
- dhd_bus_hostready(dhd->bus);
+ if (dhd->hostrdy_after_init)
+ dhd_bus_hostready(dhd->bus);
/*
* If the DMA-able buffers for flowring needs to come from a specific
@@ -2758,7 +3357,11 @@
/* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
* Waiting just before configuring doorbell
*/
+#ifdef BCMQT
+#define IDMA_ENABLE_WAIT 100
+#else
#define IDMA_ENABLE_WAIT 10
+#endif // endif
if (IDMA_ACTIVE(dhd)) {
/* wait for idma_en bit in IDMAcontrol register to be set */
/* Loop till idma_en is not set */
@@ -2770,7 +3373,11 @@
DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
waitcount, idmacontrol));
+#ifdef BCMQT
+ OSL_DELAY(200000); /* 200msec for BCMQT */
+#else
OSL_DELAY(1000); /* 1ms as its onetime only */
+#endif // endif
idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
IDMAControl(buscorerev), 0, 0);
}
@@ -2789,8 +3396,6 @@
/* Post to dongle host configured soft doorbells */
dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
- /* Post buffers for packet reception and ioctl/event responses */
- dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
dhd_msgbuf_rxbuf_post_event_bufs(dhd);
@@ -2810,8 +3415,15 @@
* done before that.
*/
- /* See if info rings could be created */
- if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
+ /* See if info rings could be created, info rings should be created
+ * only if dongle does not support EDL
+ */
+#ifdef EWP_EDL
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
+#else
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
+#endif /* EWP_EDL */
+ {
if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
/* For now log and proceed, further clean up action maybe necessary
* when we have more clarity.
@@ -2821,6 +3433,29 @@
}
}
+#ifdef EWP_EDL
+ /* Create Enhanced Debug Lane rings (EDL) if dongle supports it */
+ if (dhd->dongle_edl_support) {
+ if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* EWP_EDL */
+
+#ifdef DHD_HP2P
+ /* create HPP txcmpl/rxcmpl rings */
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
+ if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
+ /* For now log and proceed, further clean up action maybe necessary
+ * when we have more clarity.
+ */
+ DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* DHD_HP2P */
+
return BCME_OK;
} /* dhd_prot_init */
@@ -2843,6 +3478,7 @@
dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
+ dhd_dma_buf_free(dhd, &prot->host_scb_buf);
/* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
@@ -2865,6 +3501,14 @@
/* detach info rings */
dhd_prot_detach_info_rings(dhd);
+#ifdef EWP_EDL
+ dhd_prot_detach_edl_rings(dhd);
+#endif // endif
+#ifdef DHD_HP2P
+ /* detach HPP rings */
+ dhd_prot_detach_hp2p_rings(dhd);
+#endif /* DHD_HP2P */
+
/* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
* handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
* they will be part of pktid_ctrl_map handler and PKT memory is allocated using
@@ -2880,6 +3524,10 @@
#ifdef IOCTLRESP_USE_CONSTMEM
DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
#endif // endif
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
+ DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
+#endif /* DHD_MAP_PKTID_LOGGING */
#if defined(DHD_LB_TXC)
if (prot->tx_compl_prod.buffer)
@@ -2934,6 +3582,11 @@
if (prot->d2hring_info_cpln) {
dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
}
+#ifdef EWP_EDL
+ if (prot->d2hring_edl) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_edl);
+ }
+#endif /* EWP_EDL */
/* Reset all DMA-able buffers allocated during prot attach */
dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
@@ -2942,6 +3595,7 @@
dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
+ dhd_dma_buf_reset(dhd, &prot->host_scb_buf);
dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
@@ -2958,7 +3612,7 @@
prot->cur_event_bufs_posted = 0;
prot->cur_ioctlresp_bufs_posted = 0;
- prot->active_tx_count = 0;
+ OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
prot->data_seq_no = 0;
prot->ioctl_seq_no = 0;
prot->ioctl_state = 0;
@@ -2974,6 +3628,15 @@
dhd_flow_rings_deinit(dhd);
}
+#ifdef DHD_HP2P
+ if (prot->d2hring_hp2p_txcpl) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
+ }
+ if (prot->d2hring_hp2p_rxcpl) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
+ }
+#endif /* DHD_HP2P */
+
/* Reset PKTID map */
DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
@@ -3037,7 +3700,7 @@
* to other CPU cores
*/
static INLINE void
-dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp)
+dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx)
{
bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
@@ -3062,7 +3725,6 @@
int curr_cpu;
curr_cpu = get_cpu();
- BCM_REFERENCE(curr_cpu);
put_cpu();
DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
@@ -3088,6 +3750,9 @@
DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
#if defined(BCMPCIE)
dhd_txcomplete(dhd, pkt, true);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ dhd_eap_txcomplete(dhd, pkt, TRUE, txstatus->cmn_hdr.if_id);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
#endif // endif
PKTFREE(dhd->osh, pkt, TRUE);
@@ -3155,6 +3820,11 @@
BCMPCIE_H2D_COMMON_MSGRINGS;
}
+ if (prot->d2hring_info_cpln) {
+ /* for d2hring re-entry case, clear inited flag */
+ prot->d2hring_info_cpln->inited = FALSE;
+ }
+
if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
}
@@ -3240,7 +3910,10 @@
if (ret != BCME_OK)
return ret;
+ prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
+ prot->h2dring_info_subn->current_phase = 0;
prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
prot->h2dring_info_subn->n_completion_ids = 1;
@@ -3270,6 +3943,235 @@
}
}
+#ifdef DHD_HP2P
+static int
+dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+ uint16 ringid;
+
+ /* Last 2 dynamic ring indices are used by hp2p rings */
+ ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2;
+
+ if (prot->d2hring_hp2p_txcpl == NULL) {
+ prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_hp2p_txcpl == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
+ dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
+ __FUNCTION__));
+ goto err2;
+ }
+ } else {
+ /* for re-entry case, clear inited flag */
+ prot->d2hring_hp2p_txcpl->inited = FALSE;
+ }
+ if (prot->d2hring_hp2p_rxcpl == NULL) {
+ prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_hp2p_rxcpl == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ /* create the hp2p rx completion ring next to hp2p tx compl ring
+ * ringid = id next to hp2p tx compl ring
+ */
+ ringid = ringid + 1;
+
+ DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
+ dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
+ __FUNCTION__));
+ goto err1;
+ }
+ } else {
+ /* for re-entry case, clear inited flag */
+ prot->d2hring_hp2p_rxcpl->inited = FALSE;
+ }
+
+ return ret;
+err1:
+ MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
+ prot->d2hring_hp2p_rxcpl = NULL;
+
+err2:
+ MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
+ prot->d2hring_hp2p_txcpl = NULL;
+ return ret;
+} /* dhd_check_create_hp2p_rings */
+
+int
+dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_OK;
+
+ dhd->hp2p_ring_active = FALSE;
+
+ if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s: hp2p rings aren't created! \n",
+ __FUNCTION__));
+ return ret;
+ }
+
+ if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) {
+ DHD_INFO(("hp2p tx completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
+ prot->d2hring_hp2p_txcpl->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
+ BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) {
+ DHD_INFO(("hp2p rx completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
+ prot->d2hring_hp2p_rxcpl->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
+ BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
+ * so can not cleanup if one ring was created while the other failed
+ */
+ return BCME_OK;
+} /* dhd_prot_init_hp2p_rings */
+
+static void
+dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
+{
+ if (dhd->prot->d2hring_hp2p_txcpl) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
+ dhd->prot->d2hring_hp2p_txcpl = NULL;
+ }
+ if (dhd->prot->d2hring_hp2p_rxcpl) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
+ dhd->prot->d2hring_hp2p_rxcpl = NULL;
+ }
+}
+#endif /* DHD_HP2P */
+
+#ifdef EWP_EDL
+static int
+dhd_check_create_edl_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+ uint16 ringid;
+
+ {
+ /* dongle may increase max_submission_rings so keep
+ * ringid at end of dynamic rings (re-use info ring cpl ring id)
+ */
+ ringid = dhd->bus->max_tx_flowrings +
+ (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
+ BCMPCIE_H2D_COMMON_MSGRINGS + 1;
+ }
+
+ if (prot->d2hring_edl) {
+ prot->d2hring_edl->inited = FALSE;
+ return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
+ }
+
+ if (prot->d2hring_edl == NULL) {
+ prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_edl == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
+ ringid));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
+ D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
+ __FUNCTION__));
+ goto err;
+ }
+ }
+
+ return ret;
+err:
+ MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
+ prot->d2hring_edl = NULL;
+
+ return ret;
+} /* dhd_check_create_btlog_rings */
+
+int
+dhd_prot_init_edl_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+
+ if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s: EDL rings aren't created! \n",
+ __FUNCTION__));
+ return ret;
+ }
+
+ if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) {
+ DHD_INFO(("EDL completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
+ BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ return BCME_OK;
+} /* dhd_prot_init_btlog_rings */
+
+static void
+dhd_prot_detach_edl_rings(dhd_pub_t *dhd)
+{
+ if (dhd->prot->d2hring_edl) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t));
+ dhd->prot->d2hring_edl = NULL;
+ }
+}
+#endif /* EWP_EDL */
+
/**
* Initialize protocol: sync w/dongle state.
* Sets dongle media info (iswl, drv_version, mac address).
@@ -3278,10 +4180,8 @@
{
int ret = 0;
wlc_rev_info_t revinfo;
-
-#ifndef OEM_ANDROID
char buf[128];
-#endif /* OEM_ANDROID */
+ dhd_prot_t *prot = dhd->prot;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
@@ -3323,12 +4223,53 @@
DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
+ /* Get the RxBuf post size */
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: GET RxBuf post FAILED, default to %d\n",
+ __FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
+ prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+ } else {
+ memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz), buf, sizeof(uint16));
+ if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
+ DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
+ __FUNCTION__, prot->rxbufpost_sz, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
+ prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+ } else {
+ DHD_ERROR(("%s: RxBuf Post : %d\n", __FUNCTION__, prot->rxbufpost_sz));
+ }
+ }
+
+ /* Post buffers for packet reception */
+ dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
+
DHD_SSSR_DUMP_INIT(dhd);
dhd_process_cid_mac(dhd, TRUE);
ret = dhd_preinit_ioctls(dhd);
dhd_process_cid_mac(dhd, FALSE);
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+#ifdef DHD_HP2P
+ if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable) {
+ if (dhd->hp2p_enable) {
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40;
+ } else {
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
+ }
+#else
+ if (FW_SUPPORTED(dhd, h2dlogts)) {
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
+#endif // endif
+ dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
+ /* This is during initialization. */
+ dhd_h2d_log_time_sync(dhd);
+ } else {
+ dhd->dhd_rte_time_sync_ms = 0;
+ }
+#endif /* DHD_H2D_LOG_TIME_SYNC || DHD_HP2P */
/* Always assumes wl for now */
dhd->iswl = TRUE;
done:
@@ -3564,7 +4505,6 @@
dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
{
void *p, **pktbuf;
- uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
uint8 *rxbuf_post_tmp;
host_rxbuf_post_t *rxbuf_post;
void *msg_start;
@@ -3577,12 +4517,8 @@
msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
void *lcl_buf;
uint16 lcl_buf_size;
+ uint16 pktsz = prot->rxbufpost_sz;
-#ifdef WL_MONITOR
- if (dhd->monitor_enable) {
- pktsz = DHD_MAX_MON_FLOWRING_RX_BUFPOST_PKTSZ;
- }
-#endif /* WL_MONITOR */
/* allocate a local buffer to store pkt buffer va, pa and length */
lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
RX_BUF_BURST;
@@ -4297,11 +5233,21 @@
uint8 *msg_addr;
uint32 msg_len;
+ if (dhd_query_bus_erros(dhd)) {
+ more = FALSE;
+ break;
+ }
+
if (dhd->hang_was_sent) {
more = FALSE;
break;
}
+ if (dhd->smmu_fault_occurred) {
+ more = FALSE;
+ break;
+ }
+
DHD_RING_LOCK(ring->ring_lock, flags);
/* Get the message from ring */
msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
@@ -4332,15 +5278,289 @@
return more;
}
-/** called when DHD needs to check for 'receive complete' messages from the dongle */
+#ifdef EWP_EDL
+bool
+dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring = prot->d2hring_edl;
+ unsigned long flags = 0;
+ uint32 items = 0;
+ uint16 rd = 0;
+ uint16 depth = 0;
+
+ if (ring == NULL)
+ return FALSE;
+ if (ring->inited != TRUE)
+ return FALSE;
+ if (ring->item_len == 0) {
+ DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n",
+ __FUNCTION__, ring->idx, ring->item_len));
+ return FALSE;
+ }
+
+ if (dhd_query_bus_erros(dhd)) {
+ return FALSE;
+ }
+
+ if (dhd->hang_was_sent) {
+ return FALSE;
+ }
+
+ /* in this DPC context just check if wr index has moved
+ * and schedule deferred context to actually process the
+ * work items.
+ */
+ /* update the write index */
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ if (dhd->dma_d2h_ring_upd_support) {
+ /* DMAing write/read indices supported */
+ ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx);
+ }
+ rd = ring->rd;
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ depth = ring->max_items;
+ /* check for avail space, in number of ring items */
+ items = READ_AVAIL_SPACE(ring->wr, rd, depth);
+ if (items == 0) {
+ /* no work items in edl ring */
+ return FALSE;
+ }
+ if (items > ring->max_items) {
+ DHD_ERROR(("\r\n======================= \r\n"));
+ DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
+ __FUNCTION__, ring, ring->name, ring->max_items, items));
+ DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n",
+ ring->wr, ring->rd, depth));
+ DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
+ dhd->busstate, dhd->bus->wait_for_d3_ack));
+ DHD_ERROR(("\r\n======================= \r\n"));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (ring->wr >= ring->max_items) {
+ dhd->bus->read_shm_fail = TRUE;
+ }
+#else
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
+ dhd_bus_mem_dump(dhd);
+
+ }
+#endif /* DHD_FW_COREDUMP */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ dhd_schedule_reset(dhd);
+
+ return FALSE;
+ }
+
+ if (items > D2HRING_EDL_WATERMARK) {
+ DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
+ " rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items,
+ ring->rd, ring->wr, depth));
+ }
+
+ dhd_schedule_logtrace(dhd->info);
+
+ return FALSE;
+}
+
+/* This is called either from work queue context of 'event_log_dispatcher_work' or
+* from the kthread context of dhd_logtrace_thread
+*/
+int
+dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data)
+{
+ dhd_prot_t *prot = NULL;
+ msgbuf_ring_t *ring = NULL;
+ int err = 0;
+ unsigned long flags = 0;
+ cmn_msg_hdr_t *msg = NULL;
+ uint8 *msg_addr = NULL;
+ uint32 max_items_to_process = 0, n = 0;
+ uint32 num_items = 0, new_items = 0;
+ uint16 depth = 0;
+ volatile uint16 wr = 0;
+
+ if (!dhd || !dhd->prot)
+ return 0;
+
+ prot = dhd->prot;
+ ring = prot->d2hring_edl;
+ if (!ring || !evt_decode_data) {
+ return 0;
+ }
+
+ if (dhd->hang_was_sent) {
+ return FALSE;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ ring->curr_rd = ring->rd;
+ wr = ring->wr;
+ depth = ring->max_items;
+ /* check for avail space, in number of ring items
+ * Note, that this will only give the # of items
+ * from rd to wr if wr>=rd, or from rd to ring end
+ * if wr < rd. So in the latter case strictly speaking
+ * not all the items are read. But this is OK, because
+ * these will be processed in the next doorbell as rd
+ * would have wrapped around. Processing in the next
+ * doorbell is acceptable since EDL only contains debug data
+ */
+ num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
+
+ if (num_items == 0) {
+ /* no work items in edl ring */
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ return 0;
+ }
+
+ DHD_INFO(("%s: EDL work items [%u] available \n",
+ __FUNCTION__, num_items));
+
+ /* if space is available, calculate address to be read */
+ msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len);
+
+ max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(msg_addr);
+
+ n = max_items_to_process;
+ while (n > 0) {
+ msg = (cmn_msg_hdr_t *)msg_addr;
+ /* wait for DMA of work item to complete */
+ if ((err = prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
+ DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL "
+ "ring; err = %d\n", __FUNCTION__, err));
+ }
+
+ /*
+ * Update the curr_rd to the current index in the ring, from where
+ * the work item is fetched. This way if the fetched work item
+ * fails in LIVELOCK, we can print the exact read index in the ring
+ * that shows up the corrupted work item.
+ */
+ if ((ring->curr_rd + 1) >= ring->max_items) {
+ ring->curr_rd = 0;
+ } else {
+ ring->curr_rd += 1;
+ }
+
+ if (err != BCME_OK) {
+ return 0;
+ }
+
+ /* process the edl work item, i.e, the event log */
+ err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
+
+ /* Dummy sleep so that scheduler kicks in after processing any logprints */
+ OSL_SLEEP(0);
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(msg_addr + ring->item_len);
+
+ msg_addr += ring->item_len;
+ --n;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ /* update host ring read pointer */
+ if ((ring->rd + max_items_to_process) >= ring->max_items)
+ ring->rd = 0;
+ else
+ ring->rd += max_items_to_process;
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ /* Now after processing max_items_to_process update dongle rd index.
+ * The TCM rd index is updated only if bus is not
+ * in D3. Else, the rd index is updated from resume
+ * context in - 'dhdpcie_bus_suspend'
+ */
+ DHD_GENERAL_LOCK(dhd, flags);
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
+ DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+ __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_EDL_RING_TCM_RD_UPDATE(dhd);
+ }
+
+ /* if num_items > bound, then anyway we will reschedule and
+ * this function runs again, so that if in between the DPC has
+ * updated the wr index, then the updated wr is read. But if
+ * num_items <= bound, and if DPC executes and updates the wr index
+ * when the above while loop is running, then the updated 'wr' index
+ * needs to be re-read from here, If we don't do so, then till
+ * the next time this function is scheduled
+ * the event logs will not be processed.
+ */
+ if (num_items <= DHD_EVENT_LOGTRACE_BOUND) {
+ /* read the updated wr index if reqd. and update num_items */
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ if (wr != (volatile uint16)ring->wr) {
+ wr = (volatile uint16)ring->wr;
+ new_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
+ DHD_INFO(("%s: new items [%u] avail in edl\n",
+ __FUNCTION__, new_items));
+ num_items += new_items;
+ }
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ }
+
+ /* if # of items processed is less than num_items, need to re-schedule
+ * the deferred ctx
+ */
+ if (max_items_to_process < num_items) {
+ DHD_INFO(("%s: EDL bound hit / new items found, "
+ "items processed=%u; remaining=%u, "
+ "resched deferred ctx...\n",
+ __FUNCTION__, max_items_to_process,
+ num_items - max_items_to_process));
+ return (num_items - max_items_to_process);
+ }
+
+ return 0;
+
+}
+
+void
+dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = NULL;
+ unsigned long flags = 0;
+ msgbuf_ring_t *ring = NULL;
+
+ if (!dhd)
+ return;
+
+ prot = dhd->prot;
+ if (!prot || !prot->d2hring_edl)
+ return;
+
+ ring = prot->d2hring_edl;
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ dhd_prot_upd_read_idx(dhd, ring);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+}
+#endif /* EWP_EDL */
+
+/* called when DHD needs to check for 'receive complete' messages from the dongle */
bool BCMFASTPATH
-dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
+dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype)
{
bool more = FALSE;
uint n = 0;
dhd_prot_t *prot = dhd->prot;
- msgbuf_ring_t *ring = &prot->d2hring_rx_cpln;
- uint16 item_len = ring->item_len;
+ msgbuf_ring_t *ring;
+ uint16 item_len;
host_rxbuf_cmpl_t *msg = NULL;
uint8 *msg_addr;
uint32 msg_len;
@@ -4355,14 +5575,30 @@
uint32 pktid;
int i;
uint8 sync;
+ ts_timestamp_t *ts;
+ BCM_REFERENCE(ts);
+#ifdef DHD_HP2P
+ if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl)
+ ring = prot->d2hring_hp2p_rxcpl;
+ else
+#endif /* DHD_HP2P */
+ ring = &prot->d2hring_rx_cpln;
+ item_len = ring->item_len;
while (1) {
if (dhd_is_device_removed(dhd))
break;
+ if (dhd_query_bus_erros(dhd))
+ break;
+
if (dhd->hang_was_sent)
break;
+ if (dhd->smmu_fault_occurred) {
+ break;
+ }
+
pkt_cnt = 0;
pktqhead = pkt_newidx = NULL;
pkt_cnt_newidx = 0;
@@ -4458,13 +5694,18 @@
}
/* Actual length of the packet */
PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
+
#if defined(WL_MONITOR)
- if (dhd_monitor_enabled(dhd, ifidx) &&
- (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)) {
- dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
- continue;
+ if (dhd_monitor_enabled(dhd, ifidx)) {
+ if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
+ dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
+ continue;
+ } else {
+ DHD_ERROR(("Received non 802.11 packet, "
+ "when monitor mode is enabled\n"));
+ }
}
-#endif // endif
+#endif /* WL_MONITOR */
if (msg->flags & BCMPCIE_PKT_FLAGS_NO_FORWARD) {
DHD_PKT_FLAGS_SET_NO_FWD(pkt);
@@ -4486,6 +5727,15 @@
}
}
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
+#ifdef DHD_HP2P_DEBUG
+ bcm_print_bytes("Rxcpl", (uchar *)msg, sizeof(host_rxbuf_cmpl_t));
+#endif /* DHD_HP2P_DEBUG */
+ dhd_update_hp2p_rxstats(dhd, msg);
+ }
+#endif /* DHD_HP2P */
+
#ifdef DHD_LBUF_AUDIT
PKTAUDIT(dhd->osh, pkt);
#endif // endif
@@ -4545,12 +5795,17 @@
}
/* Call lb_dispatch only if packets are queued */
- if (n) {
+ if (n &&
+#ifdef WL_MONITOR
+ !(dhd_monitor_enabled(dhd, ifidx)) &&
+#endif /* WL_MONITOR */
+ TRUE) {
DHD_LB_DISPATCH_RX_COMPL(dhd);
DHD_LB_DISPATCH_RX_PROCESS(dhd);
}
return more;
+
}
/**
@@ -4579,23 +5834,40 @@
/** called when DHD needs to check for 'transmit complete' messages from the dongle */
bool BCMFASTPATH
-dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
+dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype)
{
bool more = TRUE;
uint n = 0;
- msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
+ msgbuf_ring_t *ring;
unsigned long flags;
+#ifdef DHD_HP2P
+ if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl)
+ ring = dhd->prot->d2hring_hp2p_txcpl;
+ else
+#endif /* DHD_HP2P */
+ ring = &dhd->prot->d2hring_tx_cpln;
+
/* Process all the messages - DTOH direction */
while (!dhd_is_device_removed(dhd)) {
uint8 *msg_addr;
uint32 msg_len;
+ if (dhd_query_bus_erros(dhd)) {
+ more = FALSE;
+ break;
+ }
+
if (dhd->hang_was_sent) {
more = FALSE;
break;
}
+ if (dhd->smmu_fault_occurred) {
+ more = FALSE;
+ break;
+ }
+
DHD_RING_LOCK(ring->ring_lock, flags);
/* Get the address of the next message to be read from ring */
msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
@@ -4677,10 +5949,18 @@
uint8 *msg_addr;
uint32 msg_len;
+ if (dhd_query_bus_erros(dhd)) {
+ break;
+ }
+
if (dhd->hang_was_sent) {
break;
}
+ if (dhd->smmu_fault_occurred) {
+ break;
+ }
+
DHD_RING_LOCK(ring->ring_lock, flags);
/* Get the address of the next message to be read from ring */
msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
@@ -4732,6 +6012,11 @@
goto done;
}
+ if (dhd->smmu_fault_occurred) {
+ ret = BCME_ERROR;
+ goto done;
+ }
+
msg = (cmn_msg_hdr_t *)buf;
/* Wait until DMA completes, then fetch msg_type */
@@ -4852,6 +6137,32 @@
DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
}
}
+#ifdef DHD_HP2P
+ else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
+ if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
+ if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
+ DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
+ dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for a ring, create not pending\n"));
+ } else {
+ DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+ else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
+ if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
+ if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
+ DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
+ dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n"));
+ } else {
+ DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+#endif /* DHD_HP2P */
else {
DHD_ERROR(("don;t know how to pair with original request\n"));
}
@@ -4898,6 +6209,8 @@
}
#endif // endif
+ dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
+
DHD_GENERAL_LOCK(dhd, flags);
if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
(dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
@@ -4929,6 +6242,15 @@
unsigned long flags;
dhd_dma_buf_t retbuf;
+ /* Check for ioctl timeout induce flag, which is set by firing
+ * dhd iovar to induce IOCTL timeout. If flag is set,
+ * return from here, which results in to IOCTL timeout.
+ */
+ if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
+ DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
+ return;
+ }
+
memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
@@ -4954,6 +6276,8 @@
return;
}
+ dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
+
/* Clear Response pending bit */
prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
DHD_GENERAL_UNLOCK(dhd, flags);
@@ -5023,6 +6347,24 @@
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
}
+int
+dhd_prot_check_tx_resource(dhd_pub_t *dhd)
+{
+ return dhd->prot->no_tx_resource;
+}
+
+void
+dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd)
+{
+ dhd->prot->pktid_txq_stop_cnt++;
+}
+
+void
+dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd)
+{
+ dhd->prot->pktid_txq_start_cnt++;
+}
+
/** called on MSG_TYPE_TX_STATUS message received from dongle */
static void BCMFASTPATH
dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
@@ -5038,14 +6380,22 @@
void *secdma;
bool pkt_fate;
msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
+#if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_HP2P)
flow_info_t *flow_info;
uint64 tx_status_latency;
+#endif /* TX_STATUS_LATENCY_STATS || DHD_HP2P */
+#if defined(TX_STATUS_LATENCY_STATS)
flow_ring_node_t *flow_ring_node;
uint16 flowid;
+#endif // endif
+ ts_timestamp_t *ts;
+ BCM_REFERENCE(ts);
txstatus = (host_txbuf_cmpl_t *)msg;
+#if defined(TX_STATUS_LATENCY_STATS)
flowid = txstatus->compl_hdr.flow_ring_id;
flow_ring_node = DHD_FLOW_RING(dhd, flowid);
+#endif // endif
/* locks required to protect circular buffer accesses */
DHD_RING_LOCK(ring->ring_lock, flags);
@@ -5058,16 +6408,9 @@
#endif // endif
DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
- if (prot->active_tx_count) {
- prot->active_tx_count--;
-
- /* Release the Lock when no more tx packets are pending */
- if (prot->active_tx_count == 0)
- DHD_TXFL_WAKE_UNLOCK(dhd);
- } else {
+ if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
DHD_ERROR(("Extra packets are freed\n"));
}
-
ASSERT(pktid != 0);
pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
@@ -5088,6 +6431,11 @@
return;
}
+ if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
+ dhd->prot->no_tx_resource = FALSE;
+ dhd_bus_start_queue(dhd->bus);
+ }
+
if (SECURE_DMA_ENAB(dhd->osh)) {
int offset = 0;
BCM_REFERENCE(offset);
@@ -5101,11 +6449,13 @@
DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
}
+#ifdef TX_STATUS_LATENCY_STATS
/* update the tx status latency for flowid */
flow_info = &flow_ring_node->flow_info;
tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
flow_info->cum_tx_status_latency += tx_status_latency;
flow_info->num_tx_status++;
+#endif /* TX_STATUS_LATENCY_STATS */
#if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
{
int elem_ix;
@@ -5160,12 +6510,21 @@
ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
#ifdef DHD_PKT_LOGGING
if (dhd->d11_tx_status) {
- DHD_PKTLOG_TXS(dhd, pkt, pktid,
- ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
+ uint16 status = ltoh16(txstatus->compl_hdr.status) &
+ WLFC_CTL_PKTFLAG_MASK;
+ uint32 pkthash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
+ DHD_PKTLOG_TXS(dhd, pkt, pktid, status);
+ dhd_dump_pkt(dhd, ltoh32(txstatus->cmn_hdr.if_id),
+ (uint8 *)PKTDATA(dhd->osh, pkt), len, TRUE,
+ &pkthash, &status);
}
#endif /* DHD_PKT_LOGGING */
+
#if defined(BCMPCIE)
dhd_txcomplete(dhd, pkt, pkt_fate);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
#endif // endif
#if DHD_DBG_SHOW_METADATA
@@ -5182,17 +6541,23 @@
}
#endif /* DHD_DBG_SHOW_METADATA */
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
+#ifdef DHD_HP2P_DEBUG
+ bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
+#endif /* DHD_HP2P_DEBUG */
+ dhd_update_hp2p_txstats(dhd, txstatus);
+ }
+#endif /* DHD_HP2P */
+
#ifdef DHD_LBUF_AUDIT
PKTAUDIT(dhd->osh, pkt);
#endif // endif
- DHD_RING_UNLOCK(ring->ring_lock, flags);
- PKTFREE(dhd->osh, pkt, TRUE);
- DHD_RING_LOCK(ring->ring_lock, flags);
DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
txstatus->tx_status);
DHD_RING_UNLOCK(ring->ring_lock, flags);
-
+ PKTFREE(dhd->osh, pkt, TRUE);
return;
} /* dhd_prot_txstatus_process */
@@ -5265,6 +6630,12 @@
pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
dhd->prot->rx_dataoffset));
+ if (dhd->debug_buf_dest_support) {
+ if (resp->dest < DEBUG_BUF_DEST_MAX) {
+ dhd->debug_buf_dest_stat[resp->dest]++;
+ }
+ }
+
pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
if (!pkt)
return;
@@ -5315,6 +6686,8 @@
return 0;
}
+#define MAX_MTU_SZ (1600u)
+
#define PKTBUF pktbuf
/**
@@ -5338,10 +6711,26 @@
msgbuf_ring_t *ring;
flow_ring_table_t *flow_ring_table;
flow_ring_node_t *flow_ring_node;
+#ifdef DHD_PKT_LOGGING
+ uint32 pkthash;
+#endif /* DHD_PKT_LOGGING */
if (dhd->flow_ring_table == NULL) {
+ DHD_ERROR(("dhd flow_ring_table is NULL\n"));
return BCME_NORESOURCE;
}
+#ifdef DHD_PCIE_PKTID
+ if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
+ if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
+ dhd_bus_stop_queue(dhd->bus);
+ dhd->prot->no_tx_resource = TRUE;
+ }
+ dhd->prot->pktid_depleted_cnt++;
+ goto err_no_res;
+ } else {
+ dhd->prot->pktid_depleted_cnt = 0;
+ }
+#endif /* DHD_PCIE_PKTID */
flowid = DHD_PKT_GET_FLOWID(PKTBUF);
flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
@@ -5372,19 +6761,22 @@
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (txdesc == NULL) {
DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
- __FUNCTION__, __LINE__, prot->active_tx_count));
+ __FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
goto err_free_pktid;
}
- DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
-#ifdef DHD_PKT_LOGGING
- DHD_PKTLOG_TX(dhd, PKTBUF, pktid);
-#endif /* DHD_PKT_LOGGING */
-
/* Extract the data pointer and length information */
pktdata = PKTDATA(dhd->osh, PKTBUF);
pktlen = PKTLEN(dhd->osh, PKTBUF);
+ DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
+#ifdef DHD_PKT_LOGGING
+ DHD_PKTLOG_TX(dhd, PKTBUF, pktid);
+ /* Dump TX packet */
+ pkthash = __dhd_dbg_pkt_hash((uintptr_t)PKTBUF, pktid);
+ dhd_dump_pkt(dhd, ifidx, pktdata, pktlen, TRUE, &pkthash, NULL);
+#endif /* DHD_PKT_LOGGING */
+
/* Ethernet header: Copy before we cache flush packet using DMA_MAP */
bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
@@ -5508,9 +6900,17 @@
txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
} else {
- txdesc->metadata_buf_len = htol16(0);
- txdesc->metadata_buf_addr.high_addr = 0;
- txdesc->metadata_buf_addr.low_addr = 0;
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
+ dhd_update_hp2p_txdesc(dhd, txdesc);
+ } else
+#endif /* DHD_HP2P */
+ if (1)
+ {
+ txdesc->metadata_buf_len = htol16(0);
+ txdesc->metadata_buf_addr.high_addr = 0;
+ txdesc->metadata_buf_addr.low_addr = 0;
+ }
}
#ifdef DHD_PKTID_AUDIT_RING
@@ -5526,8 +6926,24 @@
PKTAUDIT(dhd->osh, PKTBUF);
#endif // endif
+ if (pktlen > MAX_MTU_SZ) {
+ DHD_ERROR(("%s: ######## pktlen(%d) > MAX_MTU_SZ(%d) #######\n",
+ __FUNCTION__, pktlen, MAX_MTU_SZ));
+ dhd_prhex("txringitem", (volatile uchar*)txdesc,
+ sizeof(host_txbuf_post_t), DHD_ERROR_VAL);
+ }
+
/* Update the write pointer in TCM & ring bell */
-#ifdef TXP_FLUSH_NITEMS
+#if defined(DHD_HP2P) && defined(TXP_FLUSH_NITEMS)
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
+ dhd_calc_hp2p_burst(dhd, ring, flowid);
+ } else {
+ if ((ring->pend_items_count == prot->txp_threshold) ||
+ ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
+ dhd_prot_txdata_write_flush(dhd, flowid);
+ }
+ }
+#elif defined(TXP_FLUSH_NITEMS)
/* Flush if we have either hit the txp_threshold or if this msg is */
/* occupying the last slot in the flow_ring - before wrap around. */
if ((ring->pend_items_count == prot->txp_threshold) ||
@@ -5537,20 +6953,25 @@
#else
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
-#endif // endif
+#endif /* DHD_HP2P && TXP_FLUSH_NITEMS */
+
+#if defined(TX_STATUS_LATENCY_STATS)
/* set the time when pkt is queued to flowring */
DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
+#endif // endif
- prot->active_tx_count++;
+ OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
/*
* Take a wake lock, do not sleep if we have atleast one packet
* to finish.
*/
- if (prot->active_tx_count >= 1)
- DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
+ DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
DHD_RING_UNLOCK(ring->ring_lock, flags);
+#ifdef TX_STATUS_LATENCY_STATS
+ flow_ring_node->flow_info.num_tx_pkts++;
+#endif /* TX_STATUS_LATENCY_STATS */
return BCME_OK;
err_rollback_idx:
@@ -5580,7 +7001,7 @@
#endif /* DHD_PCIE_PKTID */
DHD_RING_UNLOCK(ring->ring_lock, flags);
-
+err_no_res:
return BCME_NORESOURCE;
} /* dhd_prot_txdata */
@@ -5721,12 +7142,17 @@
uint8 action;
if (dhd->bus->is_linkdown) {
- DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
+ DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
+ goto done;
+ }
+
+ if (dhd_query_bus_erros(dhd)) {
+ DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
goto done;
}
if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
- DHD_ERROR(("%s : bus is down. we have nothing to do -"
+ DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
" bus state: %d, sent hang: %d\n", __FUNCTION__,
dhd->busstate, dhd->hang_was_sent));
goto done;
@@ -5884,6 +7310,10 @@
dhd_dma_buf_free(dhdp, dmmap->dstmem);
MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
+
+ dhdp->bus->dmaxfer_complete = TRUE;
+ dhd_os_dmaxfer_wake(dhdp);
+
dmmap = NULL;
} /* dmaxfer_free_prev_dmaaddr */
@@ -5942,6 +7372,7 @@
dhd_prot_t *prot = dhd->prot;
uint64 end_usec;
pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
+ int buf_free_scheduled;
BCM_REFERENCE(cmplt);
end_usec = OSL_SYSUPTIME_US();
@@ -5954,11 +7385,16 @@
prot->dmaxfer.dstmem.va, prot->dmaxfer.len) ||
cmplt->compl_hdr.status != BCME_OK) {
DHD_ERROR(("DMA loopback failed\n"));
+ /* it is observed that some times the completion
+ * header status is set as OK, but the memcmp fails
+ * hence always explicitly set the dmaxfer status
+ * as error if this happens.
+ */
+ prot->dmaxfer.status = BCME_ERROR;
prhex("XFER SRC: ",
prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
prhex("XFER DST: ",
prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
- prot->dmaxfer.status = BCME_ERROR;
}
else {
switch (prot->dmaxfer.d11_lpbk) {
@@ -5995,16 +7431,20 @@
}
}
- dhd_prepare_schedule_dmaxfer_free(dhd);
+ buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
end_usec -= prot->dmaxfer.start_usec;
- if (end_usec)
+ if (end_usec) {
+ prot->dmaxfer.time_taken = end_usec;
DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
prot->dmaxfer.len, (unsigned long)end_usec,
(prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
+ }
dhd->prot->dmaxfer.in_progress = FALSE;
- dhd->bus->dmaxfer_complete = TRUE;
- dhd_os_dmaxfer_wake(dhd);
+ if (buf_free_scheduled != BCME_OK) {
+ dhd->bus->dmaxfer_complete = TRUE;
+ dhd_os_dmaxfer_wake(dhd);
+ }
}
/** Test functionality.
@@ -6086,17 +7526,30 @@
return BCME_OK;
} /* dhdmsgbuf_dmaxfer_req */
-dma_xfer_status_t
-dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd)
+int
+dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
{
dhd_prot_t *prot = dhd->prot;
if (prot->dmaxfer.in_progress)
- return DMA_XFER_IN_PROGRESS;
- else if (prot->dmaxfer.status == BCME_OK)
- return DMA_XFER_SUCCESS;
+ result->status = DMA_XFER_IN_PROGRESS;
+ else if (prot->dmaxfer.status == 0)
+ result->status = DMA_XFER_SUCCESS;
else
- return DMA_XFER_FAILED;
+ result->status = DMA_XFER_FAILED;
+
+ result->type = prot->dmaxfer.d11_lpbk;
+ result->error_code = prot->dmaxfer.status;
+ result->num_bytes = prot->dmaxfer.len;
+ result->time_taken = prot->dmaxfer.time_taken;
+ if (prot->dmaxfer.time_taken) {
+ /* throughput in kBps */
+ result->tput =
+ (prot->dmaxfer.len * (1000 * 1000 / 1024)) /
+ (uint32)prot->dmaxfer.time_taken;
+ }
+
+ return BCME_OK;
}
/** Called in the process of submitting an ioctl to the dongle */
@@ -6169,18 +7622,25 @@
void
dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
{
-
uint32 intstatus;
dhd_prot_t *prot = dhd->prot;
-
dhd->rxcnt_timeout++;
dhd->rx_ctlerrs++;
dhd->iovar_timeout_occured = TRUE;
- DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d "
- "trans_id %d state %d busstate=%d ioctl_received=%d\n",
- __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd,
- prot->ioctl_trans_id, prot->ioctl_state,
- dhd->busstate, prot->ioctl_received));
+ DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
+ "trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
+ dhd->is_sched_error ? " due to scheduling problem" : "",
+ dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
+ prot->ioctl_state, dhd->busstate, prot->ioctl_received));
+#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
+ if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
+ /* change g_assert_type to trigger Kernel panic */
+ g_assert_type = 2;
+ /* use ASSERT() to trigger panic */
+ ASSERT(0);
+ }
+#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
+
if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
prot->curr_ioctl_cmd == WLC_GET_VAR) {
char iovbuf[32];
@@ -6205,7 +7665,7 @@
/* Check the PCIe link status by reading intstatus register */
intstatus = si_corereg(dhd->bus->sih,
- dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
+ dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
if (intstatus == (uint32)-1) {
DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
dhd->bus->is_linkdown = TRUE;
@@ -6257,6 +7717,11 @@
#endif /* DHD_RECOVER_TIMEOUT */
if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
+ /* check if resumed on time out related to scheduling issue */
+ dhd->is_sched_error = FALSE;
+ if (dhd->bus->isr_entry_time > prot->ioctl_fillup_time) {
+ dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
+ }
dhd_msgbuf_iovar_timeout_dump(dhd);
@@ -6268,6 +7733,7 @@
dhd_bus_mem_dump(dhd);
}
#endif /* DHD_FW_COREDUMP */
+
#ifdef SUPPORT_LINKDOWN_RECOVERY
#ifdef CONFIG_ARCH_MSM
dhd->bus->no_cfg_restore = 1;
@@ -6361,6 +7827,152 @@
return BCME_UNSUPPORTED;
}
+#ifdef DHD_DUMP_PCIE_RINGS
+int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
+ unsigned long *file_posn, bool file_write)
+{
+ dhd_prot_t *prot;
+ msgbuf_ring_t *ring;
+ int ret = 0;
+ uint16 h2d_flowrings_total;
+ uint16 flowid;
+
+ if (!(dhd) || !(dhd->prot)) {
+ goto exit;
+ }
+ prot = dhd->prot;
+
+ /* Below is the same ring dump sequence followed in parser as well. */
+ ring = &prot->h2dring_ctrl_subn;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = &prot->h2dring_rxp_subn;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = &prot->d2hring_ctrl_cpln;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = &prot->d2hring_tx_cpln;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = &prot->d2hring_rx_cpln;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
+ goto exit;
+ }
+ }
+
+#ifdef EWP_EDL
+ if (dhd->dongle_edl_support) {
+ ring = prot->d2hring_edl;
+ if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+ }
+ else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
+#else
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
+#endif /* EWP_EDL */
+ {
+ ring = prot->h2dring_info_subn;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = prot->d2hring_info_cpln;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+ }
+
+exit :
+ return ret;
+}
+
+/* Write to file */
+static
+int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
+ const void *user_buf, unsigned long *file_posn)
+{
+ int ret = 0;
+
+ if (ring == NULL) {
+ DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ if (file) {
+ ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
+ ((unsigned long)(ring->max_items) * (ring->item_len)));
+ if (ret < 0) {
+ DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ }
+ } else if (user_buf) {
+ ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
+ ((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
+ }
+ return ret;
+}
+#endif /* DHD_DUMP_PCIE_RINGS */
+
+#ifdef EWP_EDL
+/* Write to file */
+static
+int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
+ unsigned long *file_posn)
+{
+ int ret = 0, nitems = 0;
+ char *buf = NULL, *ptr = NULL;
+ uint8 *msg_addr = NULL;
+ uint16 rd = 0;
+
+ if (ring == NULL) {
+ DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
+ __FUNCTION__));
+ ret = BCME_ERROR;
+ goto done;
+ }
+
+ buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
+ if (buf == NULL) {
+ DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ goto done;
+ }
+ ptr = buf;
+
+ for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
+ msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
+ memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
+ ptr += D2HRING_EDL_HDR_SIZE;
+ }
+ if (file) {
+ ret = dhd_os_write_file_posn(file, file_posn, buf,
+ (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
+ if (ret < 0) {
+ DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
+ goto done;
+ }
+ }
+ else {
+ ret = dhd_export_debug_data(buf, NULL, user_buf,
+ (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn);
+ }
+
+done:
+ if (buf) {
+ MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
+ }
+ return ret;
+}
+#endif /* EWP_EDL */
+
/** Add prot dump output to a buffer */
void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
{
@@ -6380,6 +7992,9 @@
dhd->prot->rw_index_sz);
bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
h2d_max_txpost, dhd->prot->h2d_max_txpost);
+ bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
+ bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
+ bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
}
/* Update local copy of dongle statistics */
@@ -6450,6 +8065,9 @@
ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
if (ret_buf == NULL) {
+ /* HWA TODO, need to get RD pointer from different array
+ * which HWA will directly write into host memory
+ */
/* if alloc failed , invalidate cached read ptr */
if (dhd->dma_d2h_ring_upd_support) {
ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
@@ -6459,7 +8077,6 @@
/* Check if ring->rd is valid */
if (ring->rd >= ring->max_items) {
DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
- dhd_prot_debug_info_print(dhd);
dhd->bus->read_shm_fail = TRUE;
return NULL;
}
@@ -6557,6 +8174,8 @@
/* copy ioct payload */
ioct_buf = (void *) prot->ioctbuf.va;
+ prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
+
if (buf)
memcpy(ioct_buf, buf, len);
@@ -6596,6 +8215,7 @@
uint32 dma_buf_len = max_items * item_len;
dhd_prot_t *prot = dhd->prot;
uint16 max_flowrings = dhd->bus->max_tx_flowrings;
+ dhd_dma_buf_t *dma_buf = NULL;
ASSERT(ring);
ASSERT(name);
@@ -6616,8 +8236,8 @@
uint16 flowid;
uint32 base_offset;
- dhd_dma_buf_t *dma_buf = &ring->dma_buf;
dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
+ dma_buf = &ring->dma_buf;
flowid = DHD_RINGID_TO_FLOWID(ringid);
base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
@@ -6637,10 +8257,24 @@
(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
} else {
- /* Allocate a dhd_dma_buf */
- dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
- if (dma_buf_alloced != BCME_OK) {
- return BCME_NOMEM;
+#ifdef EWP_EDL
+ if (ring == dhd->prot->d2hring_edl) {
+ /* For EDL ring, memory is alloced during attach,
+ * so just need to copy the dma_buf to the ring's dma_buf
+ */
+ memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
+ dma_buf = &ring->dma_buf;
+ if (dma_buf->va == NULL) {
+ return BCME_NOMEM;
+ }
+ } else
+#endif /* EWP_EDL */
+ {
+ /* Allocate a dhd_dma_buf */
+ dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
+ if (dma_buf_alloced != BCME_OK) {
+ return BCME_NOMEM;
+ }
}
}
@@ -6692,6 +8326,11 @@
ring->wr = 0;
ring->rd = 0;
ring->curr_rd = 0;
+ /* Reset hwa_db_type for all rings,
+ * for data path rings, it will be assigned separately post init
+ * from dhd_prot_d2h_sync_init and dhd_prot_h2d_sync_init
+ */
+ ring->hwa_db_type = 0;
/* CAUTION: ring::base_addr already in Little Endian */
dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
@@ -6766,66 +8405,8 @@
} /* dhd_prot_ring_detach */
-/*
- * +----------------------------------------------------------------------------
- * Flowring Pool
- *
- * Unlike common rings, which are attached very early on (dhd_prot_attach),
- * flowrings are dynamically instantiated. Moreover, flowrings may require a
- * larger DMA-able buffer. To avoid issues with fragmented cache coherent
- * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
- * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
- *
- * Each DMA-able buffer may be allocated independently, or may be carved out
- * of a single large contiguous region that is registered with the protocol
- * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
- * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
- *
- * No flowring pool action is performed in dhd_prot_attach(), as the number
- * of h2d rings is not yet known.
- *
- * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
- * determine the number of flowrings required, and a pool of msgbuf_rings are
- * allocated and a DMA-able buffer (carved or allocated) is attached.
- * See: dhd_prot_flowrings_pool_attach()
- *
- * A flowring msgbuf_ring object may be fetched from this pool during flowring
- * creation, using the flowid. Likewise, flowrings may be freed back into the
- * pool on flowring deletion.
- * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
- *
- * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
- * are detached (returned back to the carved region or freed), and the pool of
- * msgbuf_ring and any objects allocated against it are freed.
- * See: dhd_prot_flowrings_pool_detach()
- *
- * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
- * state as-if upon an attach. All DMA-able buffers are retained.
- * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
- * pool attach will notice that the pool persists and continue to use it. This
- * will avoid the case of a fragmented DMA-able region.
- *
- * +----------------------------------------------------------------------------
- */
-
-/* Conversion of a flowid to a flowring pool index */
-#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
- ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
-
-/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
-#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
- (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
- DHD_FLOWRINGS_POOL_OFFSET(flowid)
-
-/* Traverse each flowring in the flowring pool, assigning ring and flowid */
-#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
- for ((flowid) = DHD_FLOWRING_START_FLOWID, \
- (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
- (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
- (ring)++, (flowid)++)
-
/* Fetch number of H2D flowrings given the total number of h2d rings */
-static uint16
+uint16
dhd_get_max_flow_rings(dhd_pub_t *dhd)
{
if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
@@ -6897,6 +8478,11 @@
DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
goto attach_fail;
}
+ /*
+ * TOD0 - Currently flowrings hwa is disabled and can be enabled like below
+ * (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXPOSTS) ? HWA_DB_TYPE_TXPOSTS : 0;
+ */
+ ring->hwa_db_type = 0;
}
return BCME_OK;
@@ -7105,6 +8691,15 @@
/* cache flush */
OSL_CACHE_FLUSH(p, ring->item_len * nitems);
+ /* For HWA, update db_index and ring mb2 DB and return */
+ if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
+ db_index = HWA_DB_INDEX_VALUE(ring->wr) | ring->hwa_db_type;
+ DHD_TRACE(("%s: ring(%s) wr(%d) hwa_db_type(0x%x) db_index(0x%x)\n",
+ __FUNCTION__, ring->name, ring->wr, ring->hwa_db_type, db_index));
+ prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
+ return;
+ }
+
if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
dhd_prot_dma_indx_set(dhd, ring->wr,
H2D_DMA_INDX_WR_UPD, ring->idx);
@@ -7179,6 +8774,15 @@
uint32 db_index;
uint corerev;
+ /* For HWA, update db_index and ring mb2 DB and return */
+ if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
+ db_index = HWA_DB_INDEX_VALUE(ring->rd) | ring->hwa_db_type;
+ DHD_TRACE(("%s: ring(%s) rd(0x%x) hwa_db_type(0x%x) db_index(0x%x)\n",
+ __FUNCTION__, ring->name, ring->rd, ring->hwa_db_type, db_index));
+ prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
+ return;
+ }
+
/* update read index */
/* If dma'ing h2d indices supported
* update the r -indices in the
@@ -7186,7 +8790,7 @@
*/
if (IDMA_ACTIVE(dhd)) {
dhd_prot_dma_indx_set(dhd, ring->rd,
- D2H_DMA_INDX_RD_UPD, ring->idx);
+ D2H_DMA_INDX_RD_UPD, ring->idx);
db_index = IDMA_IDX1;
if (dhd->bus->sih) {
corerev = dhd->bus->sih->buscorerev;
@@ -7246,9 +8850,12 @@
d2h_ring->msg.flags = ctrl_ring->current_phase;
d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
+ DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
+ ring_to_create->idx, max_h2d_rings));
+
d2h_ring->ring_type = ring_type;
- d2h_ring->max_items = htol16(D2HRING_DYNAMIC_INFO_MAX_ITEM);
- d2h_ring->len_item = htol16(D2HRING_INFO_BUFCMPLT_ITEMSIZE);
+ d2h_ring->max_items = htol16(ring_to_create->max_items);
+ d2h_ring->len_item = htol16(ring_to_create->item_len);
d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
@@ -7256,6 +8863,17 @@
d2h_ring->msg.epoch =
ctrl_ring->seqnum % H2D_EPOCH_MODULO;
ctrl_ring->seqnum++;
+#ifdef EWP_EDL
+ if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) {
+ DHD_ERROR(("%s: sending d2h EDL ring create: "
+ "\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n",
+ __FUNCTION__, ltoh16(d2h_ring->max_items),
+ ltoh16(d2h_ring->len_item),
+ ltoh16(d2h_ring->ring_id),
+ d2h_ring->ring_ptr.low_addr,
+ d2h_ring->ring_ptr.high_addr));
+ }
+#endif /* EWP_EDL */
/* Update the flow_ring's WRITE index */
dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
@@ -7597,7 +9215,6 @@
DHD_ERROR(("\r\n======================= \r\n"));
#ifdef SUPPORT_LINKDOWN_RECOVERY
if (wr >= ring->max_items) {
- dhd_prot_debug_info_print(dhd);
dhd->bus->read_shm_fail = TRUE;
}
#else
@@ -7754,6 +9371,26 @@
flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
+ flow_create_rqst->if_flags = 0;
+
+#ifdef DHD_HP2P
+ /* Create HPP flow ring if HP2P is enabled and TID=7 and AWDL interface */
+ /* and traffic is not multicast */
+ /* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */
+ /* Allow only one HP2P Flow active at a time */
+ if (dhd->hp2p_capable && !dhd->hp2p_ring_active &&
+ flow_ring_node->flow_info.tid == HP2P_PRIO &&
+ (dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
+ !ETHER_ISMULTI(flow_create_rqst->da)) {
+ flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
+ flow_ring_node->hp2p_ring = TRUE;
+ dhd->hp2p_ring_active = TRUE;
+
+ DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
+ __FUNCTION__, flow_ring_node->flow_info.tid,
+ flow_ring_node->flowid));
+ }
+#endif /* DHD_HP2P */
/* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
* currently it is not used for priority. so uses solely for ifrm mask
@@ -7841,24 +9478,83 @@
ltoh16(resp->cmplt.ring_id),
ltoh32(resp->cmn_hdr.request_id)));
if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
- (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID)) {
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
+#ifdef DHD_HP2P
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
+#endif /* DHD_HP2P */
+ TRUE) {
DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
return;
}
if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
- if (!dhd->prot->d2hring_info_cpln->create_pending) {
- DHD_ERROR(("info ring create status for not pending cpl ring\n"));
+#ifdef EWP_EDL
+ if (!dhd->dongle_edl_support)
+#endif // endif
+ {
+ if (!dhd->prot->d2hring_info_cpln->create_pending) {
+ DHD_ERROR(("info ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("info cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_info_cpln->create_pending = FALSE;
+ dhd->prot->d2hring_info_cpln->inited = TRUE;
+ }
+#ifdef EWP_EDL
+ else {
+ if (!dhd->prot->d2hring_edl->create_pending) {
+ DHD_ERROR(("edl ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("edl cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_edl->create_pending = FALSE;
+ dhd->prot->d2hring_edl->inited = TRUE;
+ }
+#endif /* EWP_EDL */
+ }
+
+#ifdef DHD_HP2P
+ if (dhd->prot->d2hring_hp2p_txcpl &&
+ ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
+ if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
+ DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
return;
}
if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
- DHD_ERROR(("info cpl ring create failed with status %d\n",
+ DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
ltoh16(resp->cmplt.status)));
return;
}
- dhd->prot->d2hring_info_cpln->create_pending = FALSE;
- dhd->prot->d2hring_info_cpln->inited = TRUE;
+ dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
+ dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
}
+ if (dhd->prot->d2hring_hp2p_rxcpl &&
+ ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
+ if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
+ DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
+ dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
+ }
+#endif /* DHD_HP2P */
}
static void
@@ -7900,6 +9596,12 @@
if (fmt == NULL) {
fmt = default_fmt;
}
+
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
+ return;
+ }
+
dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
@@ -7955,9 +9657,14 @@
dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf,
" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
}
+ if (dhd->prot->d2hring_edl != NULL) {
+ bcm_bprintf(strbuf, "%14s", "D2HRingEDL");
+ dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, strbuf,
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+ }
bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
- dhd->prot->active_tx_count,
+ OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
@@ -8262,6 +9969,20 @@
uint32 offset;
} hang_info_trap_t;
+#ifdef DHD_EWPR_VER2
+static hang_info_trap_t hang_info_trap_tbl[] = {
+ {"reason", 0},
+ {"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
+ {"stype", 0},
+ TRAP_T_NAME_OFFSET(type),
+ TRAP_T_NAME_OFFSET(epc),
+ {"resrvd", 0},
+ {"resrvd", 0},
+ {"resrvd", 0},
+ {"resrvd", 0},
+ {"", 0}
+};
+#else
static hang_info_trap_t hang_info_trap_tbl[] = {
{"reason", 0},
{"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
@@ -8288,9 +10009,12 @@
TRAP_T_NAME_OFFSET(pc),
{"", 0}
};
+#endif /* DHD_EWPR_VER2 */
#define TAG_TRAP_IS_STATE(tag) \
- ((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || (tag == TAG_TRAP_WLC_STATE))
+ ((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || \
+ (tag == TAG_TRAP_WLC_STATE) || (tag == TAG_TRAP_LOG_DATA) || \
+ (tag == TAG_TRAP_CODE))
static void
copy_hang_info_head(char *dest, trap_t *src, int len, int field_name,
@@ -8359,8 +10083,24 @@
HANG_KEY_DEL);
(*cnt)++;
}
+#ifdef DHD_EWPR_VER2
+ /* put 0 for HG03 ~ HG06 (reserved for future use) */
+ for (i = 0; (i < HANG_INFO_BIGDATA_EXTRA_KEY) && (*cnt < HANG_FIELD_CNT_MAX);
+ i++, (*cnt)++) {
+ if (field_name) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].name,
+ HANG_KEY_DEL);
+ }
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].offset,
+ HANG_KEY_DEL);
+ }
+#endif /* DHD_EWPR_VER2 */
}
-
+#ifndef DHD_EWPR_VER2
static void
copy_hang_info_trap_t(char *dest, trap_t *src, int len, int field_name,
int *bytes_written, int *cnt, char *cookie)
@@ -8387,6 +10127,12 @@
}
}
+/* Ignore compiler warnings due to -Werror=cast-qual */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+
static void
copy_hang_info_stack(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
{
@@ -8453,32 +10199,6 @@
}
static void
-get_hang_info_trap_subtype(dhd_pub_t *dhd, uint32 *subtype)
-{
- uint32 i;
- uint32 *ext_data = dhd->extended_trap_data;
- hnd_ext_trap_hdr_t *hdr;
- const bcm_tlv_t *tlv;
-
- /* First word is original trap_data */
- ext_data++;
-
- /* Followed by the extended trap data header */
- hdr = (hnd_ext_trap_hdr_t *)ext_data;
-
- /* Dump a list of all tags found before parsing data */
- for (i = TAG_TRAP_DEEPSLEEP; i < TAG_TRAP_LAST; i++) {
- tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
- if (tlv) {
- if (!TAG_TRAP_IS_STATE(i)) {
- *subtype = i;
- return;
- }
- }
- }
-}
-
-static void
copy_hang_info_specific(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
{
int remain_len;
@@ -8546,6 +10266,90 @@
(*cnt)++;
}
}
+#endif /* DHD_EWPR_VER2 */
+
+static void
+get_hang_info_trap_subtype(dhd_pub_t *dhd, uint32 *subtype)
+{
+ uint32 i;
+ uint32 *ext_data = dhd->extended_trap_data;
+ hnd_ext_trap_hdr_t *hdr;
+ const bcm_tlv_t *tlv;
+
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+
+ /* Dump a list of all tags found before parsing data */
+ for (i = TAG_TRAP_DEEPSLEEP; i < TAG_TRAP_LAST; i++) {
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
+ if (tlv) {
+ if (!TAG_TRAP_IS_STATE(i)) {
+ *subtype = i;
+ return;
+ }
+ }
+ }
+}
+#ifdef DHD_EWPR_VER2
+static void
+copy_hang_info_etd_base64(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
+{
+ int remain_len;
+ uint32 *ext_data = dhd->extended_trap_data;
+ hnd_ext_trap_hdr_t *hdr;
+ char *base64_out = NULL;
+ int base64_cnt;
+ int max_base64_len = HANG_INFO_BASE64_BUFFER_SIZE;
+
+ if (ext_data == NULL) {
+ return;
+ }
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+
+ if (remain_len <= 0) {
+ DHD_ERROR(("%s: no space to put etd\n", __FUNCTION__));
+ return;
+ }
+
+ if (remain_len < max_base64_len) {
+ DHD_ERROR(("%s: change max base64 length to remain length %d\n", __FUNCTION__,
+ remain_len));
+ max_base64_len = remain_len;
+ }
+
+ base64_out = MALLOCZ(dhd->osh, HANG_INFO_BASE64_BUFFER_SIZE);
+ if (base64_out == NULL) {
+ DHD_ERROR(("%s: MALLOC failed for size %d\n",
+ __FUNCTION__, HANG_INFO_BASE64_BUFFER_SIZE));
+ return;
+ }
+
+ if (hdr->len > 0) {
+ base64_cnt = dhd_base64_encode(hdr->data, hdr->len, base64_out, max_base64_len);
+ if (base64_cnt == 0) {
+ DHD_ERROR(("%s: base64 encoding error\n", __FUNCTION__));
+ }
+ }
+
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s",
+ base64_out);
+ (*cnt)++;
+ MFREE(dhd->osh, base64_out, HANG_INFO_BASE64_BUFFER_SIZE);
+}
+#endif /* DHD_EWPR_VER2 */
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
void
copy_hang_info_trap(dhd_pub_t *dhd)
@@ -8586,6 +10390,16 @@
clear_debug_dump_time(dhd->debug_dump_time_hang_str);
+#ifdef DHD_EWPR_VER2
+ /* stack info & trap info are included in etd data */
+
+ /* extended trap data dump */
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ copy_hang_info_etd_base64(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
+ DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
+ }
+#else
if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
copy_hang_info_stack(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
DHD_INFO(("hang info stack cnt: %d len: %d data: %s\n",
@@ -8604,7 +10418,7 @@
DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
}
-
+#endif /* DHD_EWPR_VER2 */
}
#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
@@ -8615,6 +10429,9 @@
msgbuf_ring_t *ring;
uint16 rd, wr;
uint32 dma_buf_len;
+ uint64 current_time;
+ ulong ring_tcm_rd_addr; /* dongle address */
+ ulong ring_tcm_wr_addr; /* dongle address */
DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
DHD_ERROR(("DHD: %s\n", dhd_version));
@@ -8640,13 +10457,31 @@
DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
h2d_max_txpost, prot->h2d_max_txpost));
+ current_time = OSL_LOCALTIME_NS();
+ DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
+ DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
+ " ioctl_ack_time="SEC_USEC_FMT
+ " ioctl_cmplt_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(prot->ioctl_fillup_time),
+ GET_SEC_USEC(prot->ioctl_ack_time),
+ GET_SEC_USEC(prot->ioctl_cmplt_time)));
+
+ /* Check PCIe INT registers */
+ if (!dhd_pcie_dump_int_regs(dhd)) {
+ DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
+ dhd->bus->is_linkdown = TRUE;
+ }
+
DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
ring = &prot->h2dring_ctrl_subn;
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
if (dhd->bus->is_linkdown) {
DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
@@ -8660,9 +10495,12 @@
ring = &prot->d2hring_ctrl_cpln;
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
if (dhd->bus->is_linkdown) {
DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
@@ -8677,9 +10515,13 @@
ring = prot->h2dring_info_subn;
if (ring) {
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
if (dhd->bus->is_linkdown) {
DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
@@ -8694,9 +10536,13 @@
ring = prot->d2hring_info_cpln;
if (ring) {
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
if (dhd->bus->is_linkdown) {
DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
@@ -8711,10 +10557,14 @@
ring = &prot->d2hring_tx_cpln;
if (ring) {
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
if (dhd->bus->is_linkdown) {
DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
@@ -8729,10 +10579,14 @@
ring = &prot->d2hring_rx_cpln;
if (ring) {
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
if (dhd->bus->is_linkdown) {
DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
@@ -8744,9 +10598,41 @@
}
DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
}
+#ifdef EWP_EDL
+ ring = prot->d2hring_edl;
+ if (ring) {
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ dma_buf_len = ring->max_items * ring->item_len;
+ DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
+ DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("EdlRing: Expected seq num: %d \r\n",
+ ring->seqnum % D2H_EPOCH_MODULO));
+ }
+#endif /* EWP_EDL */
DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
__FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+ DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
+ __FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
+
+ DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
+ DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
+ DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
dhd_pcie_debug_info_dump(dhd);
@@ -8758,45 +10644,50 @@
{
uint32 *ptr;
uint32 value;
- uint32 i;
- uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
- OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
- dhd->prot->d2h_dma_indx_wr_buf.len);
+ if (dhd->prot->d2h_dma_indx_wr_buf.va) {
+ uint32 i;
+ uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
- ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
+ OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
+ dhd->prot->d2h_dma_indx_wr_buf.len);
- bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
+ ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
- bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
- ptr++;
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
+ bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
- ptr++;
- bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
- for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
+ bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
value = ltoh32(*ptr);
- bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
+ bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
+
+ ptr++;
+ bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
+ for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
+ ptr++;
+ }
}
- OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
- dhd->prot->h2d_dma_indx_rd_buf.len);
+ if (dhd->prot->h2d_dma_indx_rd_buf.va) {
+ OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
+ dhd->prot->h2d_dma_indx_rd_buf.len);
- ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
+ ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
- bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
- ptr++;
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
- ptr++;
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
+ bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
+ }
return 0;
}
@@ -9065,6 +10956,7 @@
case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q";
case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE";
case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE";
+ case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP";
case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH";
case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA";
case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA";
@@ -9403,6 +11295,17 @@
bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
}
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
+ if (tlv)
+ {
+ const pcie_hmapviolation_t* hmap;
+ hmap = (const pcie_hmapviolation_t *)tlv->data;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
+ bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
+ bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
+ bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
+ }
+
return BCME_OK;
}
@@ -9549,3 +11452,281 @@
DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
}
+
+uint16
+dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
+{
+ return dhdp->prot->ioctl_trans_id;
+}
+
+int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len)
+{
+ if (!dhd->hscb_enable) {
+ if (len) {
+ /* prevent "Operation not supported" dhd message */
+ *len = 0;
+ return BCME_OK;
+ }
+ return BCME_UNSUPPORTED;
+ }
+
+ if (va) {
+ *va = dhd->prot->host_scb_buf.va;
+ }
+ if (len) {
+ *len = dhd->prot->host_scb_buf.len;
+ }
+
+ return BCME_OK;
+}
+
+#ifdef DHD_BUS_MEM_ACCESS
+int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff)
+{
+ if (!dhd->hscb_enable) {
+ return BCME_UNSUPPORTED;
+ }
+
+ if (dhd->prot->host_scb_buf.va == NULL ||
+ ((uint64)offset + length > (uint64)dhd->prot->host_scb_buf.len)) {
+ return BCME_BADADDR;
+ }
+
+ memcpy(buff, (char*)dhd->prot->host_scb_buf.va + offset, length);
+
+ return BCME_OK;
+}
+#endif /* DHD_BUS_MEM_ACCESS */
+
+#ifdef DHD_HP2P
+uint32
+dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ if (set)
+ dhd->pkt_thresh = (uint16)val;
+
+ val = dhd->pkt_thresh;
+
+ return val;
+}
+
+uint32
+dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ if (set)
+ dhd->time_thresh = (uint16)val;
+
+ val = dhd->time_thresh;
+
+ return val;
+}
+
+uint32
+dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ if (set)
+ dhd->pkt_expiry = (uint16)val;
+
+ val = dhd->pkt_expiry;
+
+ return val;
+}
+
+uint8
+dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
+{
+ uint8 ret = 0;
+ if (set) {
+ dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
+ dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
+
+ if (enable) {
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
+ } else {
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
+ }
+ }
+ ret = dhd->hp2p_infra_enable ? 0x1:0x0;
+ ret <<= 4;
+ ret |= dhd->hp2p_enable ? 0x1:0x0;
+
+ return ret;
+}
+
+static void
+dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
+{
+ ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
+ hp2p_info_t *hp2p_info;
+ uint32 dur1;
+
+ hp2p_info = &dhd->hp2p_info[0];
+ dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
+
+ if (dur1 > (MAX_RX_HIST_BIN - 1)) {
+ dur1 = MAX_RX_HIST_BIN - 1;
+ DHD_ERROR(("%s: 0x%x 0x%x\n",
+ __FUNCTION__, ts->low, ts->high));
+ }
+
+ hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
+ return;
+}
+
+static void
+dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
+{
+ ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
+ uint16 flowid = txstatus->compl_hdr.flow_ring_id;
+ uint32 hp2p_flowid, dur1, dur2;
+ hp2p_info_t *hp2p_info;
+
+ hp2p_flowid = dhd->bus->max_submission_rings -
+ dhd->bus->max_cmn_rings - flowid + 1;
+ hp2p_info = &dhd->hp2p_info[hp2p_flowid];
+ ts = (ts_timestamp_t *)&(txstatus->ts);
+
+ dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
+ if (dur1 > (MAX_TX_HIST_BIN - 1)) {
+ dur1 = MAX_TX_HIST_BIN - 1;
+ DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
+ }
+ hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
+
+ dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
+ if (dur2 > (MAX_TX_HIST_BIN - 1)) {
+ dur2 = MAX_TX_HIST_BIN - 1;
+ DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
+ }
+
+ hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
+ return;
+}
+
+enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
+{
+ hp2p_info_t *hp2p_info;
+ unsigned long flags;
+ dhd_pub_t *dhdp;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
+ hp2p_info = container_of(timer, hp2p_info_t, timer.timer);
+#else
+ hp2p_info = container_of(timer, hp2p_info_t, timer);
+#endif // endif
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ dhdp = hp2p_info->dhd_pub;
+ if (!dhdp) {
+ goto done;
+ }
+
+ DHD_INFO(("%s: pend_item = %d flowid = %d\n",
+ __FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
+ hp2p_info->flowid));
+
+ flags = dhd_os_hp2plock(dhdp);
+
+ dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
+ hp2p_info->hrtimer_init = FALSE;
+ hp2p_info->num_timer_limit++;
+
+ dhd_os_hp2punlock(dhdp, flags);
+done:
+ return HRTIMER_NORESTART;
+}
+
+static void
+dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid)
+{
+ hp2p_info_t *hp2p_info;
+ uint16 hp2p_flowid;
+
+ hp2p_flowid = dhd->bus->max_submission_rings -
+ dhd->bus->max_cmn_rings - flowid + 1;
+ hp2p_info = &dhd->hp2p_info[hp2p_flowid];
+
+ if (ring->pend_items_count == dhd->pkt_thresh) {
+ dhd_prot_txdata_write_flush(dhd, flowid);
+
+ hp2p_info->hrtimer_init = FALSE;
+ hp2p_info->ring = NULL;
+ hp2p_info->num_pkt_limit++;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21)
+ tasklet_hrtimer_cancel(&hp2p_info->timer);
+#else
+ hrtimer_cancel(&hp2p_info->timer);
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
+ DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
+ "hp2p_flowid = %d pkt_thresh = %d\n",
+ __FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
+ } else {
+ if (hp2p_info->hrtimer_init == FALSE) {
+ hp2p_info->hrtimer_init = TRUE;
+ hp2p_info->flowid = flowid;
+ hp2p_info->dhd_pub = dhd;
+ hp2p_info->ring = ring;
+ hp2p_info->num_timer_start++;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21)
+ tasklet_hrtimer_start(&hp2p_info->timer,
+ ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
+#else
+ hrtimer_start(&hp2p_info->timer,
+ ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL_SOFT);
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
+
+ DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
+ __FUNCTION__, flowid, hp2p_flowid));
+ }
+ }
+ return;
+}
+
+static void
+dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
+{
+ uint64 ts;
+
+ ts = local_clock();
+ do_div(ts, 1000);
+
+ txdesc->metadata_buf_len = 0;
+ txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
+ txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
+ txdesc->exp_time = dhd->pkt_expiry;
+
+ DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
+ __FUNCTION__, txdesc->metadata_buf_addr.high_addr,
+ txdesc->metadata_buf_addr.low_addr,
+ txdesc->exp_time));
+
+ return;
+}
+#endif /* DHD_HP2P */
+
+#ifdef DHD_MAP_LOGGING
+void
+dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
+{
+ dhd_prot_debug_info_print(dhdp);
+ OSL_DMA_MAP_DUMP(dhdp->osh);
+#ifdef DHD_MAP_PKTID_LOGGING
+ dhd_pktid_logging_dump(dhdp);
+#endif /* DHD_MAP_PKTID_LOGGING */
+#ifdef DHD_FW_COREDUMP
+ dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
+#ifdef DNGL_AXI_ERROR_LOGGING
+ dhdp->memdump_enabled = DUMP_MEMFILE;
+ dhd_bus_get_mem_dump(dhdp);
+#else
+ dhdp->memdump_enabled = DUMP_MEMONLY;
+ dhd_bus_mem_dump(dhdp);
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#endif /* DHD_FW_COREDUMP */
+}
+#endif /* DHD_MAP_LOGGING */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pcie.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pcie.c
index bc5e7da..ca9caf6 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pcie.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pcie.c
@@ -1,7 +1,9 @@
/*
* DHD Bus Module for PCIE
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -32,11 +34,14 @@
#include <bcmutils.h>
#include <bcmdevs.h>
#include <siutils.h>
+#include <sbpcmcia.h>
+#include <hndoobr.h>
#include <hndsoc.h>
#include <hndpmu.h>
#include <etd.h>
#include <hnd_debug.h>
#include <sbchipc.h>
+#include <sbhndarm.h>
#include <hnd_armtrap.h>
#if defined(DHD_DEBUG)
#include <hnd_cons.h>
@@ -57,12 +62,16 @@
#include <dhd_pcie.h>
#include <bcmpcie.h>
#include <bcmendian.h>
+#include <bcmstdlib_s.h>
#ifdef DHDTCPACK_SUPPRESS
#include <dhd_ip.h>
#endif /* DHDTCPACK_SUPPRESS */
#include <bcmevent.h>
#include <trxhdr.h>
+extern uint32 hw_module_variant;
+#include <pcie_core.h>
+
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
#include <linux/pm_runtime.h>
#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
@@ -71,9 +80,29 @@
#include <debugger.h>
#endif /* DEBUGGER || DHD_DSCOPE */
+#ifdef DNGL_AXI_ERROR_LOGGING
+#include <dhd_linux_wq.h>
+#include <dhd_linux.h>
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+#include <dhd_linux_priv.h>
+#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+
+#include <otpdefs.h>
+#define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */
+
#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
#define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */
+#define DHD_MAX_ITEMS_HPP_TXCPL_RING 512
+#define DHD_MAX_ITEMS_HPP_RXCPL_RING 512
+
+#define ARMCR4REG_CORECAP (0x4/sizeof(uint32))
+#define ARMCR4REG_MPUCTRL (0x90/sizeof(uint32))
+#define ACC_MPU_SHIFT 25
+#define ACC_MPU_MASK (0x1u << ACC_MPU_SHIFT)
+
#define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
#define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
/* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
@@ -103,13 +132,34 @@
extern unsigned int system_rev;
#endif /* SUPPORT_MULTIPLE_BOARD_REV */
+/* DHD module parameter */
+extern uint32 hw_module_variant;
+
+#ifdef EWP_EDL
+extern int host_edl_support;
+#endif // endif
+
+/* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
+uint dma_ring_indices = 0;
+/* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
+bool h2d_phase = 0;
+/* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
+ * defined in dhd_linux.c
+ */
+bool force_trap_bad_h2d_phase = 0;
+
int dhd_dongle_memsize;
int dhd_dongle_ramsize;
+struct dhd_bus *g_dhd_bus = NULL;
+#ifdef DNGL_AXI_ERROR_LOGGING
+static void dhd_log_dump_axi_error(uint8 *axi_err);
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
#if defined(DHD_FW_COREDUMP)
-struct dhd_bus *g_dhd_bus = NULL;
static int dhdpcie_mem_dump(dhd_bus_t *bus);
+static int dhdpcie_get_mem_dump(dhd_bus_t *bus);
#endif /* DHD_FW_COREDUMP */
static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
@@ -149,6 +199,7 @@
bool dongle_isolation, bool reset_flag);
static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
+static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr);
static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
@@ -162,15 +213,13 @@
static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
+static int dhdpcie_sromotp_customvar(dhd_bus_t *bus, uint32 *customvar1, uint32 *customvar2);
static void dhdpcie_fw_trap(dhd_bus_t *bus);
static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
+static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
extern void dhd_dpc_enable(dhd_pub_t *dhdp);
extern void dhd_dpc_kill(dhd_pub_t *dhdp);
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
#ifdef IDLE_TX_FLOW_MGMT
static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
static void dhd_bus_idle_scan(dhd_bus_t *bus);
@@ -180,6 +229,10 @@
extern void exynos_pcie_register_dump(int ch_num);
#endif /* EXYNOS_PCIE_DEBUG */
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus);
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+
#define PCI_VENDOR_ID_BROADCOM 0x14e4
#define PCI_VENDOR_ID_CYPRESS 0x12be
@@ -189,7 +242,7 @@
#define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
-static void dhdpcie_cto_error_recovery(struct dhd_bus *bus);
+static int dhdpcie_cto_error_recovery(struct dhd_bus *bus);
static int dhdpcie_init_d11status(struct dhd_bus *bus);
@@ -198,11 +251,12 @@
extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
-static int dhdpcie_wrt_host_whitelist_region(struct dhd_bus *bus);
-
-#ifdef DHD_SSSR_DUMP
-static int dhdpcie_sssr_dump(dhd_pub_t *dhd);
-#endif /* DHD_SSSR_DUMP */
+#ifdef DHD_HP2P
+extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer);
+static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val);
+#endif // endif
+#define NUM_PATTERNS 2
+static bool dhd_bus_tcm_test(struct dhd_bus *bus);
/* IOVar table */
enum {
@@ -259,6 +313,7 @@
IOV_CTO_PREVENTION,
IOV_PCIE_WD_RESET,
IOV_DUMP_DONGLE,
+ IOV_HWA_ENAB_BMAP,
IOV_IDMA_ENABLE,
IOV_IFRM_ENABLE,
IOV_CLEAR_RING,
@@ -269,9 +324,18 @@
#endif /* DEBUGGER || DHD_DSCOPE */
IOV_INB_DW_ENABLE,
IOV_CTO_THRESHOLD,
-#ifdef D2H_MINIDUMP
- IOV_MINIDUMP_OVERRIDE,
-#endif /* D2H_MINIDUMP */
+ IOV_HSCBSIZE, /* get HSCB buffer size */
+#ifdef DHD_BUS_MEM_ACCESS
+ IOV_HSCBBYTES, /* copy HSCB buffer */
+#endif // endif
+ IOV_HP2P_ENABLE,
+ IOV_HP2P_PKT_THRESHOLD,
+ IOV_HP2P_TIME_THRESHOLD,
+ IOV_HP2P_PKT_EXPIRY,
+ IOV_HP2P_TXCPL_MAXITEMS,
+ IOV_HP2P_RXCPL_MAXITEMS,
+ IOV_EXTDTXS_IN_TXCPL,
+ IOV_HOSTRDY_AFTER_INIT,
IOV_PCIE_LAST /**< unused IOVAR */
};
@@ -297,8 +361,8 @@
{"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
{"sbreg", IOV_SBREG, 0, 0, IOVT_BUFFER, sizeof(uint8) },
#endif /* DHD_PCIE_REG_ACCESS */
- {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, 3 * sizeof(int32) },
- {"pcie_suspend", IOV_PCIE_SUSPEND, 0, 0, IOVT_UINT32, 0 },
+ {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)},
+ {"pcie_suspend", IOV_PCIE_SUSPEND, DHD_IOVF_PWRREQ_BYPASS, 0, IOVT_UINT32, 0 },
{"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 },
{"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 },
{"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 },
@@ -333,6 +397,7 @@
{"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
{"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 },
+ {"hwa_enab_bmap", IOV_HWA_ENAB_BMAP, 0, 0, IOVT_UINT32, 0 },
{"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 },
{"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 },
{"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0 },
@@ -342,13 +407,28 @@
#endif /* DEBUGGER || DHD_DSCOPE */
{"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 },
{"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
-#ifdef D2H_MINIDUMP
- {"minidump_override", IOV_MINIDUMP_OVERRIDE, 0, 0, IOVT_UINT32, 0 },
-#endif /* D2H_MINIDUMP */
+ {"hscbsize", IOV_HSCBSIZE, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_BUS_MEM_ACCESS
+ {"hscbbytes", IOV_HSCBBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
+#endif // endif
+#ifdef DHD_HP2P
+ {"hp2p_enable", IOV_HP2P_ENABLE, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 },
+#endif // endif
+ {"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL, 0, 0, IOVT_UINT32, 0 },
+ {"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT, 0, 0, IOVT_UINT32, 0 },
{NULL, 0, 0, 0, 0, 0 }
};
+#ifdef BCMQT
+#define MAX_READ_TIMEOUT 200 * 1000 * 1000
+#else
#define MAX_READ_TIMEOUT 5 * 1000 * 1000
+#endif // endif
#ifndef DHD_RXBOUND
#define DHD_RXBOUND 64
@@ -444,6 +524,24 @@
return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev) : PCIH2D_DB1_1);
}
+/*
+ * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request
+ */
+static INLINE void
+dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, bool enable)
+{
+ if (enable) {
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev),
+ SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT,
+ SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT);
+ } else {
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev),
+ SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0);
+ }
+}
+
static INLINE void
_dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
{
@@ -526,6 +624,47 @@
}
static INLINE void
+_dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus)
+{
+ uint mask, val;
+
+ mask = SRPWR_DMN_ALL_MASK(bus->sih);
+ val = SRPWR_DMN_ALL_MASK(bus->sih);
+
+ si_srpwr_request(bus->sih, mask, val);
+}
+
+static INLINE void
+dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus)
+{
+ unsigned long flags = 0;
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ _dhd_bus_pcie_pwr_req_pd0123_cmn(bus);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+}
+
+static INLINE void
+_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus)
+{
+ uint mask;
+
+ mask = SRPWR_DMN_ALL_MASK(bus->sih);
+
+ si_srpwr_request(bus->sih, mask, 0);
+}
+
+static INLINE void
+dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus)
+{
+ unsigned long flags = 0;
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+}
+
+static INLINE void
dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus)
{
_dhd_bus_pcie_pwr_req_cmn(bus);
@@ -538,8 +677,9 @@
__FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih)));
if (bus->sih->buscorerev <= 14 ||
si_chipid(bus->sih) == BCM4375_CHIP_ID ||
+ si_chipid(bus->sih) == BCM4362_CHIP_ID ||
+ si_chipid(bus->sih) == BCM43751_CHIP_ID ||
si_chipid(bus->sih) == BCM4361_CHIP_ID ||
- si_chipid(bus->sih) == BCM4359_CHIP_ID ||
si_chipid(bus->sih) == CYW55560_CHIP_ID) {
return FALSE;
} else {
@@ -561,7 +701,14 @@
{
dhd_bus_t *bus = NULL;
int ret = BCME_OK;
-
+ /* customvar1 and customvar2 are customer configurable CIS tuples in OTP.
+ * In dual chip (PCIE) scenario, customvar2 is used as a hint to detect
+ * the chip variants and load the right firmware and NVRAM
+ */
+ /* Below vars are set to 0x0 as OTPed value can not take 0x0 */
+ uint32 customvar1 = 0x0;
+ uint32 customvar2 = 0x0;
+ uint32 otp_hw_module_variant = 0x0;
DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
do {
@@ -597,36 +744,89 @@
break;
}
+ if (!hw_module_variant) {
+ /* For single wifi module */
+ goto enumerate_module;
+ }
+
+ /* read otp variable customvar and store in dhd->customvar1 and dhd->customvar2 */
+ if (dhdpcie_sromotp_customvar(bus, &customvar1, &customvar2)) {
+ DHD_ERROR(("%s: dhdpcie_sromotp_customvar failed\n", __FUNCTION__));
+ break;
+ }
+ if (!customvar2) {
+ DHD_ERROR(("%s:customvar2 is not OTPed"
+ "hw_module_variant=0x%x\n",
+ __FUNCTION__, hw_module_variant));
+ goto enumerate_module;
+ }
+ /* customvar2=0xNNMMLLKK, LL is module variant */
+ otp_hw_module_variant = (customvar2 >> 8) & 0xFF;
+ DHD_TRACE(("%s hw_module_variant=0x%x and"
+ "OTPed-module_variant=0x%x\n", __func__,
+ hw_module_variant, otp_hw_module_variant));
+ if (hw_module_variant != otp_hw_module_variant) {
+ DHD_ERROR(("%s: Not going to enumerate this module as "
+ "hw_module_variant=0x%x and "
+ "OTPed-module_variant=0x%x didn't match\n",
+ __FUNCTION__, hw_module_variant, otp_hw_module_variant));
+ break;
+ }
+ DHD_TRACE(("%s: Going to enumerate this module as "
+ "hw_module_variant=0x%x and "
+ "OTPed-module_variant=0x%x match\n",
+ __FUNCTION__, hw_module_variant, otp_hw_module_variant));
+enumerate_module:
/* software resources */
if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
- ret = BCME_NORESOURCE;
break;
}
+
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->dhd->hostrdy_after_init = TRUE;
bus->db1_for_mb = TRUE;
bus->dhd->hang_report = TRUE;
bus->use_mailbox = FALSE;
bus->use_d0_inform = FALSE;
bus->intr_enabled = FALSE;
bus->flr_force_fail = FALSE;
+ /* By default disable HWA and enable it via iovar */
+ bus->hwa_enab_bmap = 0;
+ /* update the dma indices if set through module parameter. */
+ if (dma_ring_indices != 0) {
+ dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
+ }
+ /* update h2d phase support if set through module parameter */
+ bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE;
+ /* update force trap on bad phase if set through module parameter */
+ bus->dhd->force_dongletrap_on_bad_h2d_phase =
+ force_trap_bad_h2d_phase ? TRUE : FALSE;
#ifdef IDLE_TX_FLOW_MGMT
bus->enable_idle_flowring_mgmt = FALSE;
#endif /* IDLE_TX_FLOW_MGMT */
bus->irq_registered = FALSE;
#ifdef DHD_MSI_SUPPORT
+#ifdef DHD_FORCE_MSI
+ bus->d2h_intr_method = PCIE_MSI;
+#else
bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ?
PCIE_MSI : PCIE_INTX;
+#endif /* DHD_FORCE_MSI */
#else
bus->d2h_intr_method = PCIE_INTX;
#endif /* DHD_MSI_SUPPORT */
+#ifdef DHD_HP2P
+ bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING;
+ bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING;
+#endif /* DHD_HP2P */
+
DHD_TRACE(("%s: EXIT SUCCESS\n",
__FUNCTION__));
-#ifdef DHD_FW_COREDUMP
g_dhd_bus = bus;
-#endif // endif
*bus_ptr = bus;
return ret;
} while (0);
@@ -709,6 +909,88 @@
return bus->sih->chippkg;
}
+/** Conduct Loopback test */
+int
+dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type)
+{
+ dma_xfer_info_t dmaxfer_lpbk;
+ int ret = BCME_OK;
+
+#define PCIE_DMAXFER_LPBK_LENGTH 4096
+ memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t));
+ dmaxfer_lpbk.version = DHD_DMAXFER_VERSION;
+ dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t);
+ dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH;
+ dmaxfer_lpbk.type = type;
+ dmaxfer_lpbk.should_wait = TRUE;
+
+ ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0,
+ (char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET);
+ if (ret < 0) {
+ DHD_ERROR(("failed to start PCIe Loopback Test!!! "
+ "Type:%d Reason:%d\n", type, ret));
+ return ret;
+ }
+
+ if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) {
+ DHD_ERROR(("failed to check PCIe Loopback Test!!! "
+ "Type:%d Status:%d Error code:%d\n", type,
+ dmaxfer_lpbk.status, dmaxfer_lpbk.error_code));
+ ret = BCME_ERROR;
+ } else {
+ DHD_ERROR(("successful to check PCIe Loopback Test"
+ " Type:%d\n", type));
+ }
+#undef PCIE_DMAXFER_LPBK_LENGTH
+
+ return ret;
+}
+
+/* Log the lastest DPC schedule time */
+void
+dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp)
+{
+ dhdp->bus->dpc_sched_time = OSL_LOCALTIME_NS();
+}
+
+/* Check if there is DPC scheduling errors */
+bool
+dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ bool sched_err;
+
+ if (bus->dpc_entry_time < bus->isr_exit_time) {
+ /* Kernel doesn't schedule the DPC after processing PCIe IRQ */
+ sched_err = TRUE;
+ } else if (bus->dpc_entry_time < bus->resched_dpc_time) {
+ /* Kernel doesn't schedule the DPC after DHD tries to reschedule
+ * the DPC due to pending work items to be processed.
+ */
+ sched_err = TRUE;
+ } else {
+ sched_err = FALSE;
+ }
+
+ if (sched_err) {
+ /* print out minimum timestamp info */
+ DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
+ " isr_exit_time="SEC_USEC_FMT
+ " dpc_entry_time="SEC_USEC_FMT
+ "\ndpc_exit_time="SEC_USEC_FMT
+ " dpc_sched_time="SEC_USEC_FMT
+ " resched_dpc_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->isr_entry_time),
+ GET_SEC_USEC(bus->isr_exit_time),
+ GET_SEC_USEC(bus->dpc_entry_time),
+ GET_SEC_USEC(bus->dpc_exit_time),
+ GET_SEC_USEC(bus->dpc_sched_time),
+ GET_SEC_USEC(bus->resched_dpc_time)));
+ }
+
+ return sched_err;
+}
+
/** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
uint32
dhdpcie_bus_intstatus(dhd_bus_t *bus)
@@ -731,21 +1013,29 @@
/* this is a PCIE core register..not a config register... */
intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0);
- intstatus &= intmask;
/* Is device removed. intstatus & intmask read 0xffffffff */
- if (intstatus == (uint32)-1) {
+ if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
+ DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
+ __FUNCTION__, intstatus, intmask));
bus->is_linkdown = TRUE;
dhd_pcie_debug_info_dump(bus->dhd);
#ifdef CUSTOMER_HW4_DEBUG
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(OEM_ANDROID)
- bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+#if defined(OEM_ANDROID)
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
dhd_os_send_hang_message(bus->dhd);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
+#endif /* OEM_ANDROID */
#endif /* CUSTOMER_HW4_DEBUG */
return intstatus;
}
+ intstatus &= intmask;
+
/*
* The fourth argument to si_corereg is the "mask" fields of the register to update
* and the fifth field is the "value" to update. Now if we are interested in only
@@ -761,6 +1051,56 @@
return intstatus;
}
+void
+dhdpcie_cto_recovery_handler(dhd_pub_t *dhd)
+{
+ dhd_bus_t *bus = dhd->bus;
+ int ret;
+
+ /* Disable PCIe Runtime PM to avoid D3_ACK timeout.
+ */
+ DHD_DISABLE_RUNTIME_PM(dhd);
+
+ /* Sleep for 1 seconds so that any AXI timeout
+ * if running on ALP clock also will be captured
+ */
+ OSL_SLEEP(1000);
+
+ /* reset backplane and cto,
+ * then access through pcie is recovered.
+ */
+ ret = dhdpcie_cto_error_recovery(bus);
+ if (!ret) {
+ /* Waiting for backplane reset */
+ OSL_SLEEP(10);
+ /* Dump debug Info */
+ dhd_prot_debug_info_print(bus->dhd);
+ /* Dump console buffer */
+ dhd_bus_dump_console_buffer(bus);
+#if defined(DHD_FW_COREDUMP)
+ /* save core dump or write to a file */
+ if (!bus->is_linkdown && bus->dhd->memdump_enabled) {
+#ifdef DHD_SSSR_DUMP
+ bus->dhd->collect_sssr = TRUE;
+#endif /* DHD_SSSR_DUMP */
+ bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY;
+ dhdpcie_mem_dump(bus);
+ }
+#endif /* DHD_FW_COREDUMP */
+ }
+#ifdef OEM_ANDROID
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ bus->is_linkdown = TRUE;
+ bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT;
+ /* Send HANG event */
+ dhd_os_send_hang_message(bus->dhd);
+#endif /* OEM_ANDROID */
+}
+
/**
* Name: dhdpcie_bus_isr
* Parameters:
@@ -782,15 +1122,17 @@
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
/* verify argument */
if (!bus) {
- DHD_ERROR(("%s : bus is null pointer, exit \n", __FUNCTION__));
+ DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__));
break;
}
if (bus->dhd->dongle_reset) {
+ DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__));
break;
}
if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__));
break;
}
@@ -805,10 +1147,23 @@
intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
if (intstatus & PCI_CTO_INT_MASK) {
- /* reset backplane and cto,
- * then access through pcie is recovered.
+ DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE "
+ "intstat=0x%x enab=%d\n", __FUNCTION__,
+ intstatus, bus->cto_enable));
+ bus->cto_triggered = 1;
+ /*
+ * DAR still accessible
*/
- dhdpcie_cto_error_recovery(bus);
+ dhd_bus_dump_dar_registers(bus);
+
+ /* Disable further PCIe interrupts */
+ dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
+ /* Stop Tx flow */
+ dhd_bus_stop_queue(bus);
+
+ /* Schedule CTO recovery */
+ dhd_schedule_cto_recovery(bus->dhd);
+
return TRUE;
}
}
@@ -822,6 +1177,12 @@
/* Check if the interrupt is ours or not */
if (intstatus == 0) {
+ /* in EFI since we poll for interrupt, this message will flood the logs
+ * so disable this for EFI
+ */
+ DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__));
+ bus->non_ours_irq_count++;
+ bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
break;
}
@@ -831,8 +1192,10 @@
/* return error for 0xFFFFFFFF */
if (intstatus == (uint32)-1) {
+ DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
+ __FUNCTION__, intstatus));
dhdpcie_disable_irq_nosync(bus);
- return BCME_ERROR;
+ break;
}
skip_intstatus_read:
@@ -996,8 +1359,7 @@
OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
bus->saved_config.bar0_win);
- OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, sizeof(uint32),
- bus->saved_config.bar1_win);
+ dhdpcie_setbar1win(bus, bus->saved_config.bar1_win);
return BCME_OK;
}
@@ -1066,22 +1428,54 @@
}
}
-#ifdef DHD_DISABLE_ASPM
-void
-dhd_bus_aspm_enable(dhd_bus_t *bus, bool enable)
+static void
+dhdpcie_cc_watchdog_reset(dhd_bus_t *bus)
{
- uint val;
- val = dhd_pcie_config_read(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
- if (enable) {
- dhd_pcie_config_write(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32),
- (val | PCIE_ASPM_L1_ENAB));
- } else {
- dhd_pcie_config_write(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32),
- (val & (~PCIE_ASPM_L1_ENAB)));
- }
- DHD_ERROR(("%s: %s\n", __FUNCTION__, (enable ? "ENABLE" : "DISABLE")));
+ uint32 wd_en = (bus->sih->buscorerev >= 66) ? WD_SSRESET_PCIE_F0_EN :
+ (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
+ pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en);
}
-#endif /* DHD_DISABLE_ASPM */
+
+void
+dhdpcie_dongle_reset(dhd_bus_t *bus)
+{
+ /* if the pcie link is down, watchdog reset
+ * should not be done, as it may hang
+ */
+ if (bus->is_linkdown) {
+ return;
+ }
+
+ /* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */
+ if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED) {
+#ifdef DHD_USE_BP_RESET
+ /* Backplane reset using SPROM cfg register(0x88) for buscorerev <= 24 */
+ dhd_bus_perform_bp_reset(bus);
+#else
+ /* Legacy chipcommon watchdog reset */
+ dhdpcie_cc_watchdog_reset(bus);
+#endif /* DHD_USE_BP_RESET */
+ }
+}
+
+#ifdef CHIPS_CUSTOMER_HW6
+void
+dhdpcie_bus_mpu_disable(dhd_bus_t *bus)
+{
+ volatile uint32 *cr4_regs;
+ if (BCM4378_CHIP(bus->sih->chip)) {
+ cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
+ if (cr4_regs == NULL) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ return;
+ }
+ if (R_REG(bus->osh, cr4_regs + ARMCR4REG_CORECAP) & ACC_MPU_MASK) {
+ /* bus mpu is supported */
+ W_REG(bus->osh, cr4_regs + ARMCR4REG_MPUCTRL, 0);
+ }
+ }
+}
+#endif /* CHIPS_CUSTOMER_HW6 */
static bool
dhdpcie_dongle_attach(dhd_bus_t *bus)
@@ -1156,13 +1550,39 @@
goto fail;
}
+ /* Configure CTO Prevention functionality */
+#if defined(BCMFPGA_HW)
+ DHD_ERROR(("Disable CTO\n"));
+ bus->cto_enable = FALSE;
+#else
+#if defined(BCMPCIE_CTO_PREVENTION)
+ if (bus->sih->buscorerev >= 24) {
+ DHD_ERROR(("Enable CTO\n"));
+ bus->cto_enable = TRUE;
+ } else
+#endif /* BCMPCIE_CTO_PREVENTION */
+ {
+ DHD_ERROR(("Disable CTO\n"));
+ bus->cto_enable = FALSE;
+ }
+#endif /* BCMFPGA_HW */
+
+ if (PCIECTO_ENAB(bus)) {
+ dhdpcie_cto_init(bus, TRUE);
+ }
+
/* Storing secureboot capability */
bus->sih->secureboot = isset(&secureboot, PCIECFGREG_SECURE_MODE_SHIFT);
if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
- DHD_ERROR(("Enable CTO\n"));
- bus->cto_enable = TRUE;
- dhdpcie_cto_init(bus, bus->cto_enable);
+ /*
+ * HW JIRA - CRWLPCIEGEN2-672
+ * Producer Index Feature which is used by F1 gets reset on F0 FLR
+ * fixed in REV68
+ */
+ if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
+ dhdpcie_ssreset_dis_enum_rst(bus);
+ }
/* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
* dhdpcie_bus_release_dongle() --> si_detach()
@@ -1175,6 +1595,26 @@
dhd_bus_pcie_pwr_req_nolock(bus);
}
+ /* Get info on the ARM and SOCRAM cores... */
+ /* Should really be qualified by device id */
+ if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
+ bus->armrev = si_corerev(bus->sih);
+ bus->coreid = si_coreid(bus->sih);
+ } else {
+ DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* CA7 requires coherent bits on */
+ if (bus->coreid == ARMCA7_CORE_ID) {
+ val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
+ (val | PCIE_BARCOHERENTACCEN_MASK));
+ }
+
/* Olympic EFI requirement - stop driver load if FW is already running
* need to do this here before pcie_watchdog_reset, because
* pcie_watchdog_reset will put the ARM back into halt state
@@ -1186,9 +1626,15 @@
}
BCM_REFERENCE(dongle_isolation);
-#ifndef DONGLE_ENABLE_ISOLATION
- /* Enable CLKREQ# */
- dhdpcie_clkreq(bus->osh, 1, 1);
+
+ /* For inbuilt drivers pcie clk req will be done by RC,
+ * so do not do clkreq from dhd
+ */
+ if (dhd_download_fw_on_driverload)
+ {
+ /* Enable CLKREQ# */
+ dhdpcie_clkreq(bus->osh, 1, 1);
+ }
/*
* bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
@@ -1197,19 +1643,40 @@
* be called.
*/
if (bus->dhd == NULL) {
- dongle_isolation = FALSE; /* dhd_attach not yet happened, do watchdog reset */
+ /* dhd_attach not yet happened, do watchdog reset */
+ dongle_isolation = FALSE;
} else {
dongle_isolation = bus->dhd->dongle_isolation;
}
+
+#ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
/*
* Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
* This is required to avoid spurious interrupts to the Host and bring back
* dongle to a sane state (on host soft-reboot / watchdog-reboot).
*/
if (dongle_isolation == FALSE) {
- pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, WD_SSRESET_PCIE_F0_EN);
+ dhdpcie_dongle_reset(bus);
}
-#endif /* !DONGLE_ENABLE_ISOLATION */
+#endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
+
+ /* need to set the force_bt_quiesce flag here
+ * before calling dhdpcie_dongle_flr_or_pwr_toggle
+ */
+ bus->force_bt_quiesce = TRUE;
+ /*
+ * For buscorerev = 66 and after, F0 FLR should be done independent from F1.
+ * So don't need BT quiesce.
+ */
+ if (bus->sih->buscorerev >= 66) {
+ bus->force_bt_quiesce = FALSE;
+ }
+
+ dhdpcie_dongle_flr_or_pwr_toggle(bus);
+
+#ifdef CHIPS_CUSTOMER_HW6
+ dhdpcie_bus_mpu_disable(bus);
+#endif /* CHIPS_CUSTOMER_HW6 */
si_setcore(bus->sih, PCIE2_CORE_ID, 0);
sbpcieregs = (sbpcieregs_t*)(bus->regs);
@@ -1219,18 +1686,6 @@
val = R_REG(osh, &sbpcieregs->configdata);
W_REG(osh, &sbpcieregs->configdata, val);
- /* Get info on the ARM and SOCRAM cores... */
- /* Should really be qualified by device id */
- if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
- (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
- (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
- (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
- bus->armrev = si_corerev(bus->sih);
- } else {
- DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
- goto fail;
- }
-
if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
/* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not
* adjusted.
@@ -1241,6 +1696,16 @@
goto fail;
}
switch ((uint16)bus->sih->chip) {
+#ifdef CHIPS_CUSTOMER_HW6
+ case BCM4368_CHIP_ID:
+ bus->dongle_ram_base = CA7_4368_RAM_BASE;
+ bus->orig_ramsize = 0x1c0000;
+ break;
+ CASE_BCM4367_CHIP:
+ bus->dongle_ram_base = CA7_4367_RAM_BASE;
+ bus->orig_ramsize = 0x1e0000;
+ break;
+#endif /* CHIPS_CUSTOMER_HW6 */
default:
/* also populate base address */
bus->dongle_ram_base = CA7_4365_RAM_BASE;
@@ -1298,6 +1763,24 @@
case BCM4361_CHIP_ID:
bus->dongle_ram_base = CR4_4347_RAM_BASE;
break;
+ case BCM4362_CHIP_ID:
+ bus->dongle_ram_base = CR4_4362_RAM_BASE;
+ break;
+ case BCM43751_CHIP_ID:
+ bus->dongle_ram_base = CR4_43751_RAM_BASE;
+ break;
+
+ case BCM4373_CHIP_ID:
+ bus->dongle_ram_base = CR4_4373_RAM_BASE;
+ break;
+#ifdef CHIPS_CUSTOMER_HW6
+ case BCM4378_CHIP_GRPID:
+ bus->dongle_ram_base = CR4_4378_RAM_BASE;
+ break;
+ case BCM4377_CHIP_ID:
+ bus->dongle_ram_base = CR4_4377_RAM_BASE;
+ break;
+#endif /* CHIPS_CUSTOMER_HW6 */
case BCM4375_CHIP_ID:
case BCM4369_CHIP_ID:
bus->dongle_ram_base = CR4_4369_RAM_BASE;
@@ -1337,9 +1820,6 @@
/* Set the poll and/or interrupt flags */
bus->intr = (bool)dhd_intr;
-#ifdef DHD_DISABLE_ASPM
- dhd_bus_aspm_enable(bus, FALSE);
-#endif /* DHD_DISABLE_ASPM */
bus->idma_enabled = TRUE;
bus->ifrm_enabled = TRUE;
@@ -1347,9 +1827,23 @@
if (MULTIBP_ENAB(bus->sih)) {
dhd_bus_pcie_pwr_req_clear_nolock(bus);
- }
- bus->force_bt_quiesce = TRUE;
+ /*
+ * One time clearing of Common Power Domain since HW default is set
+ * Needs to be after FLR because FLR resets PCIe enum back to HW defaults
+ * for 4378B0 (rev 68).
+ * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672
+ */
+ si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0);
+
+ /*
+ * WAR to fix ARM cold boot;
+ * Assert WL domain in DAR helps but not enum
+ */
+ if (bus->sih->buscorerev >= 68) {
+ dhd_bus_pcie_pwr_req_wl_domain(bus, TRUE);
+ }
+ }
return 0;
@@ -1386,19 +1880,27 @@
dhdpcie_bus_intr_enable(dhd_bus_t *bus)
{
DHD_TRACE(("%s Enter\n", __FUNCTION__));
- if (bus && bus->sih && !bus->is_linkdown) {
- /* Skip after recieving D3 ACK */
- if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
- return;
+ if (bus) {
+ if (bus->sih && !bus->is_linkdown) {
+ /* Skip after recieving D3 ACK */
+ if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
+ return;
+ }
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ dhpcie_bus_unmask_interrupt(bus);
+ } else {
+ #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
+ dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask,
+ bus->def_intmask, TRUE);
+ #endif
+ si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
+ bus->def_intmask, bus->def_intmask);
+ }
}
- if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
- (bus->sih->buscorerev == 4)) {
- dhpcie_bus_unmask_interrupt(bus);
- } else {
- si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
- bus->def_intmask, bus->def_intmask);
- }
+
}
+
DHD_TRACE(("%s Exit\n", __FUNCTION__));
}
@@ -1420,6 +1922,7 @@
bus->def_intmask, 0);
}
}
+
DHD_TRACE(("%s Exit\n", __FUNCTION__));
}
@@ -1469,12 +1972,18 @@
DHD_TRACE(("%s Enter\n", __FUNCTION__));
DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
dhd_os_sdlock(bus->dhd);
if (bus->sih && !bus->dhd->dongle_isolation) {
+ if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev) &&
+ bus->sih->chip != CYW55560_CHIP_ID) {
+ dhd_bus_pcie_pwr_req_reload_war(bus);
+ }
+
/* Has insmod fails after rmmod issue in Brix Android */
#if !defined(OEM_ANDROID) && !defined(ANDROID)
/* HW4347-909 */
@@ -1483,19 +1992,32 @@
pcie_set_trefup_time_100us(bus->sih);
}
- if (PMUREV(bus->sih->pmurev) > 31) { /* start from 4347 */
+ /* disable fast lpo from 4347 */
+ /* For 4378/4387, do not disable fast lpo because we always enable fast lpo.
+ * it causes insmod/rmmod reload failure.
+ */
+ if ((PMUREV(bus->sih->pmurev) > 31) &&
+ (bus->sih->buscorerev != 66) &&
+ (bus->sih->buscorerev != 68) &&
+ (bus->sih->buscorerev != 69) &&
+ (bus->sih->buscorerev != 70)) {
si_pmu_fast_lpo_disable(bus->sih);
}
-#endif // endif
+#endif /* !OEM_ANDROID && !ANDROID */
/* if the pcie link is down, watchdog reset
* should not be done, as it may hang
*/
if (!bus->is_linkdown) {
-
- pcie_watchdog_reset(bus->osh, bus->sih,
- WD_ENABLE_MASK, WD_SSRESET_PCIE_F0_EN);
+#ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
+ /* for efi, depending on bt over pcie mode
+ * we either power toggle or do F0 FLR
+ * from dhdpcie_bus_release dongle. So no need to
+ * do dongle reset from here
+ */
+ dhdpcie_dongle_reset(bus);
+#endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
}
bus->dhd->is_pcie_watchdog_reset = TRUE;
@@ -1523,11 +2045,31 @@
}
}
+void
+dhd_init_backplane_access_lock(dhd_bus_t *bus)
+{
+ if (!bus->backplane_access_lock) {
+ bus->backplane_access_lock = dhd_os_spin_lock_init(bus->dhd->osh);
+ }
+}
+
+void
+dhd_deinit_backplane_access_lock(dhd_bus_t *bus)
+{
+ if (bus->backplane_access_lock) {
+ dhd_os_spin_lock_deinit(bus->dhd->osh, bus->backplane_access_lock);
+ bus->backplane_access_lock = NULL;
+ }
+}
+
/** Detach and free everything */
void
dhdpcie_bus_release(dhd_bus_t *bus)
{
bool dongle_isolation = FALSE;
+#ifdef BCMQT
+ uint buscorerev = 0;
+#endif /* BCMQT */
osl_t *osh = NULL;
unsigned long flags_bus;
@@ -1554,6 +2096,19 @@
dhdpcie_free_irq(bus);
}
dhd_deinit_bus_lock(bus);
+ dhd_deinit_backplane_access_lock(bus);
+#ifdef BCMQT
+ if (IDMA_ACTIVE(bus->dhd)) {
+ /**
+ * On FPGA during exit path force set "IDMA Control Register"
+ * to default value 0x0. Otherwise host dongle syc for IDMA fails
+ * during next IDMA initilization(without system reboot)
+ */
+ buscorerev = bus->sih->buscorerev;
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ IDMAControl(buscorerev), ~0, 0);
+ }
+#endif /* BCMQT */
/**
* dhdpcie_bus_release_dongle free bus->sih handle, which is needed to
* access Dongle registers.
@@ -1589,6 +2144,7 @@
/* Finally free bus info */
MFREE(osh, bus, sizeof(dhd_bus_t));
+ g_dhd_bus = NULL;
}
DHD_TRACE(("%s: Exit\n", __FUNCTION__));
@@ -1605,14 +2161,20 @@
return;
}
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
+ return;
+ }
+
if (bus->sih) {
if (!dongle_isolation &&
- (bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) {
- pcie_watchdog_reset(bus->osh, bus->sih,
- WD_ENABLE_MASK, WD_SSRESET_PCIE_F0_EN);
+ (bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) {
+ dhdpcie_dongle_reset(bus);
}
+ dhdpcie_dongle_flr_or_pwr_toggle(bus);
+
if (bus->ltrsleep_on_unload) {
si_corereg(bus->sih, bus->sih->buscoreidx,
OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
@@ -1622,10 +2184,18 @@
pcie_serdes_iddqdisable(bus->osh, bus->sih,
(sbpcieregs_t *) bus->regs);
-#ifndef DONGLE_ENABLE_ISOLATION
- /* Disable CLKREQ# */
- dhdpcie_clkreq(bus->osh, 1, 0);
-#endif /* !DONGLE_ENABLE_ISOLATION */
+ /* For inbuilt drivers pcie clk req will be done by RC,
+ * so do not do clkreq from dhd
+ */
+ if (dhd_download_fw_on_driverload)
+ {
+ /* Disable CLKREQ# */
+ dhdpcie_clkreq(bus->osh, 1, 0);
+ }
+
+#ifdef PCIE_SUSPEND_DURING_DETACH
+ dhdpcie_bus_clock_stop(bus);
+#endif /* PCIE_SUSPEND_DURING_DETACH */
if (bus->sih != NULL) {
si_detach(bus->sih);
@@ -1692,7 +2262,6 @@
/** Stop bus module: clear pending frames, disable data flow */
void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
{
- uint32 status;
unsigned long flags, flags_bus;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
@@ -1708,6 +2277,7 @@
DHD_DISABLE_RUNTIME_PM(bus->dhd);
DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
@@ -1719,8 +2289,11 @@
dhdpcie_bus_intr_disable(bus);
DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
- status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
- dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
+ if (!bus->is_linkdown) {
+ uint32 status;
+ status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
+ }
if (!dhd_download_fw_on_driverload) {
dhd_dpc_kill(bus->dhd);
@@ -1900,7 +2473,7 @@
return 0;
}
-#if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
+#if defined(USE_CID_CHECK)
#define MAX_EXTENSION 20
#define MODULE_BCM4361_INDEX 3
@@ -1918,12 +2491,16 @@
#define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1"
#define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0"
#define MAX_VID_LEN 8
-#define MAX_VNAME_LEN 30
-#define CIS_TUPLE_HDR_LEN 2
+#define CIS_TUPLE_HDR_LEN 2
+#if defined(BCM4361_CHIP)
#define CIS_TUPLE_START_ADDRESS 0x18011110
#define CIS_TUPLE_END_ADDRESS 0x18011167
-#define CIS_TUPLE_MAX_COUNT (CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
- + 1) / sizeof(uint32)
+#elif defined(BCM4375_CHIP)
+#define CIS_TUPLE_START_ADDRESS 0x18011120
+#define CIS_TUPLE_END_ADDRESS 0x18011177
+#endif /* defined(BCM4361_CHIP) */
+#define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
+ + 1) / sizeof(uint32))
#define CIS_TUPLE_TAG_START 0x80
#define CIS_TUPLE_TAG_VENDOR 0x81
#define CIS_TUPLE_TAG_BOARDTYPE 0x1b
@@ -1982,6 +2559,31 @@
{ {"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"} }
};
+#define MODULE_BCM4375_INDEX 3
+
+naming_info_t bcm4375_naming_table[] = {
+ { {""}, {""}, {""} },
+ { {"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"} },
+ { {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} },
+ { {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} },
+ { {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} },
+ { {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} },
+ { {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} },
+ { {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} },
+ { {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} },
+ { {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} },
+ { {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} },
+ { {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} },
+ { {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} },
+ { {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} },
+ { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} },
+ { {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} },
+ { {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} },
+ { {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} },
+ { {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} },
+ { {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} }
+};
+
static naming_info_t *
dhd_find_naming_info(naming_info_t table[], int table_size, char *module_type)
{
@@ -2029,7 +2631,7 @@
}
static int
-dhd_parse_board_information_bcm4361(dhd_bus_t *bus, int *boardtype,
+dhd_parse_board_information_bcm(dhd_bus_t *bus, int *boardtype,
unsigned char *vid, int *vid_length)
{
int boardtype_backplane_addr[] = {
@@ -2129,7 +2731,7 @@
}
chip_rev = bus->sih->chiprev;
- if (dhd_parse_board_information_bcm4361(bus, &board_type, vid, &vid_length)
+ if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length)
!= BCME_OK) {
DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__));
return NULL;
@@ -2137,6 +2739,7 @@
DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev));
+#if defined(BCM4361_CHIP)
/* A0 chipset has exception only */
if (chip_rev == CHIP_REV_A0) {
if (board_type == BOARD_TYPE_EPA) {
@@ -2156,10 +2759,19 @@
}
}
}
+#else
+ cid_info = dhd_get_cid_info(vid, vid_length);
+ if (cid_info) {
+ info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
+ if (strstr(cid_info, CID_FEM_MURATA)) {
+ *is_murata_fem = TRUE;
+ }
+ }
+#endif /* BCM4361_CHIP */
return info;
}
-#endif /* SUPPORT_BCM4361_MIXED_MODULES && USE_CID_CHECK */
+#endif /* USE_CID_CHECK */
static int
concate_revision_bcm4361(dhd_bus_t *bus, char *fw_path, char *nv_path)
@@ -2172,7 +2784,7 @@
memset(module_type, 0, sizeof(module_type));
- if (dhd_check_module_bcm4361(module_type,
+ if (dhd_check_module_bcm(module_type,
MODULE_BCM4361_INDEX, &is_murata_fem) == BCME_OK) {
info = dhd_find_naming_info(bcm4361_naming_table,
ARRAYSIZE(bcm4361_naming_table), module_type);
@@ -2206,6 +2818,44 @@
return ret;
}
+static int
+concate_revision_bcm4375(dhd_bus_t *bus, char *fw_path, char *nv_path)
+{
+ int ret = BCME_OK;
+#if defined(SUPPORT_BCM4375_MIXED_MODULES) && defined(USE_CID_CHECK)
+ char module_type[MAX_VNAME_LEN];
+ naming_info_t *info = NULL;
+ bool is_murata_fem = FALSE;
+
+ memset(module_type, 0, sizeof(module_type));
+
+ if (dhd_check_module_bcm(module_type,
+ MODULE_BCM4375_INDEX, &is_murata_fem) == BCME_OK) {
+ info = dhd_find_naming_info(bcm4375_naming_table,
+ ARRAYSIZE(bcm4375_naming_table), module_type);
+ } else {
+ /* in case of .cid.info doesn't exists */
+ info = dhd_find_naming_info_by_chip_rev(bcm4375_naming_table,
+ ARRAYSIZE(bcm4375_naming_table), bus, &is_murata_fem);
+ }
+
+ if (info) {
+ strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
+ strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
+ } else {
+ DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ }
+#else /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
+ char chipver_tag[10] = {0, };
+
+ strcat(fw_path, chipver_tag);
+ strcat(nv_path, chipver_tag);
+#endif /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
+
+ return ret;
+}
+
int
concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
{
@@ -2235,6 +2885,9 @@
case BCM4347_CHIP_ID:
res = concate_revision_bcm4361(bus, fw_path, nv_path);
break;
+ case BCM4375_CHIP_ID:
+ res = concate_revision_bcm4375(bus, fw_path, nv_path);
+ break;
default:
DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
return res;
@@ -2291,6 +2944,7 @@
DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
__FUNCTION__, bus->fw_path, bus->nv_path));
+ dhdpcie_dump_resource(bus);
ret = dhdpcie_download_firmware(bus, osh);
@@ -2368,19 +3022,29 @@
{
int bcmerror = BCME_ERROR;
int offset = 0;
-#if defined(DHD_FW_MEM_CORRUPTION)
- uint8 *p_org_fw = NULL;
- uint32 org_fw_size = 0;
- uint32 fw_write_offset = 0;
-#endif /* DHD_FW_MEM_CORRUPTION */
int len = 0;
bool store_reset;
char *imgbuf = NULL;
- uint8 *memblock = NULL, *memptr;
+ uint8 *memblock = NULL, *memptr = NULL;
int offset_end = bus->ramsize;
+ uint32 file_size = 0, read_len = 0;
struct trx_header *trx_hdr;
bool trx_chk = TRUE;
+#if defined(DHD_FW_MEM_CORRUPTION)
+ if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
+ dhd_tcm_test_enable = TRUE;
+ } else {
+ dhd_tcm_test_enable = FALSE;
+ }
+#endif /* DHD_FW_MEM_CORRUPTION */
+ DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
+ /* TCM check */
+ if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) {
+ DHD_ERROR(("dhd_bus_tcm_test failed\n"));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
/* Should succeed in opening image if it is actually given through registry
@@ -2391,6 +3055,12 @@
goto err;
}
+ file_size = dhd_os_get_image_size(imgbuf);
+ if (!file_size) {
+ DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__));
+ goto err;
+ }
+
memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
if (memblock == NULL) {
DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
@@ -2401,26 +3071,6 @@
memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
}
-#if defined(DHD_FW_MEM_CORRUPTION)
- if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
- org_fw_size = dhd_os_get_image_size(imgbuf);
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
- p_org_fw = (uint8*)DHD_OS_PREALLOC(bus->dhd,
- DHD_PREALLOC_MEMDUMP_RAM, org_fw_size);
-#else
- p_org_fw = (uint8*)VMALLOC(bus->dhd->osh, org_fw_size);
-#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
- if (p_org_fw == NULL) {
- DHD_ERROR(("%s: Failed to allocate memory %d bytes for download check\n",
- __FUNCTION__, org_fw_size));
- bcmerror = BCME_NOMEM;
- goto err;
- } else {
- memset(p_org_fw, 0, org_fw_size);
- }
- }
-#endif /* DHD_FW_MEM_CORRUPTION */
-
/* check if CR4/CA7 */
store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
@@ -2431,6 +3081,14 @@
bcmerror = BCME_ERROR;
goto err;
}
+ read_len += len;
+ if (read_len > file_size) {
+ DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;"
+ " file_size=%u truncating len to %d \n", __FUNCTION__,
+ len, read_len, file_size, (len - (read_len - file_size))));
+ len -= (read_len - file_size);
+ }
+
/* if address is 0, store the reset instruction to be written in 0 */
if (store_reset) {
ASSERT(offset == 0);
@@ -2458,12 +3116,6 @@
goto err;
}
offset += MEMBLOCK;
-#if defined(DHD_FW_MEM_CORRUPTION)
- if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
- memcpy((p_org_fw + fw_write_offset), memptr, len);
- fw_write_offset += len;
- }
-#endif /* DHD_FW_MEM_CORRUPTION */
if (offset >= offset_end) {
DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
@@ -2471,71 +3123,12 @@
bcmerror = BCME_ERROR;
goto err;
}
- }
-#ifdef DHD_FW_MEM_CORRUPTION
- /* Read and compare the downloaded code */
- if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
- unsigned char *p_readback_buf = NULL;
- uint32 compared_len;
- uint32 remaining_len = 0;
- compared_len = 0;
- p_readback_buf = MALLOC(bus->dhd->osh, MEMBLOCK);
- if (p_readback_buf == NULL) {
- DHD_ERROR(("%s: Failed to allocate memory %d bytes for readback buffer\n",
- __FUNCTION__, MEMBLOCK));
- bcmerror = BCME_NOMEM;
- goto compare_err;
- }
- /* Read image to verify downloaded contents. */
- offset = bus->dongle_ram_base;
-
- while (compared_len < org_fw_size) {
- memset(p_readback_buf, DHD_MEMORY_SET_PATTERN, MEMBLOCK);
- remaining_len = org_fw_size - compared_len;
-
- if (remaining_len >= MEMBLOCK) {
- len = MEMBLOCK;
- } else {
- len = remaining_len;
- }
- bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
- (uint8 *)p_readback_buf, len);
- if (bcmerror) {
- DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
- __FUNCTION__, bcmerror, MEMBLOCK, offset));
- goto compare_err;
- }
-
- if (memcmp((p_org_fw + compared_len), p_readback_buf, len) != 0) {
- DHD_ERROR(("%s: Downloaded image is corrupted. offset %d\n",
- __FUNCTION__, compared_len));
- bcmerror = BCME_ERROR;
- goto compare_err;
- }
-
- compared_len += len;
- offset += len;
- }
- DHD_ERROR(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
-
-compare_err:
- if (p_readback_buf) {
- MFREE(bus->dhd->osh, p_readback_buf, MEMBLOCK);
+ if (read_len >= file_size) {
+ break;
}
}
-#endif /* DHD_FW_MEM_CORRUPTION */
-
err:
-#if defined(DHD_FW_MEM_CORRUPTION)
- if (p_org_fw) {
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
- DHD_OS_PREFREE(bus->dhd, p_org_fw, org_fw_size);
-#else
- VMFREE(bus->dhd->osh, p_org_fw, org_fw_size);
-#endif // endif
- }
-#endif /* DHD_FW_MEM_CORRUPTION */
if (memblock) {
MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
}
@@ -2846,6 +3439,9 @@
uint readlen = 0;
uint i = 0;
+ if (!DHD_FWLOG_ON())
+ return 0;
+
/* Don't do anything until FWREADY updates console address */
if (bus->console_addr == 0)
return -1;
@@ -3058,14 +3654,14 @@
if (mbuffer == NULL) {
DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
bcmerror = BCME_NOMEM;
- goto done;
+ goto done2;
}
}
if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
bcmerror = BCME_NOMEM;
- goto done;
+ goto done2;
}
DHD_GENERAL_LOCK(bus->dhd, flags);
DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
@@ -3075,7 +3671,7 @@
dhd_bus_pcie_pwr_req(bus);
}
if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
- goto done;
+ goto done1;
}
bcm_binit(&strbuf, data, size);
@@ -3106,7 +3702,7 @@
if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
bus->pcie_sh->assert_exp_addr,
(uint8 *)str, maxstrlen)) < 0) {
- goto done;
+ goto done1;
}
str[maxstrlen - 1] = '\0';
@@ -3118,7 +3714,7 @@
if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
bus->pcie_sh->assert_file_addr,
(uint8 *)str, maxstrlen)) < 0) {
- goto done;
+ goto done1;
}
str[maxstrlen - 1] = '\0';
@@ -3134,7 +3730,7 @@
if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
bus->dhd->dongle_trap_occured = TRUE;
- goto done;
+ goto done1;
}
dhd_bus_dump_trap_info(bus, &strbuf);
}
@@ -3143,9 +3739,6 @@
if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
DHD_FWLOG(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
- /* wake up IOCTL wait event */
- dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
-
dhd_bus_dump_console_buffer(bus);
dhd_prot_debug_info_print(bus->dhd);
@@ -3153,9 +3746,7 @@
/* save core dump or write to a file */
if (bus->dhd->memdump_enabled) {
#ifdef DHD_SSSR_DUMP
- if (bus->dhd->sssr_inited) {
- dhdpcie_sssr_dump(bus->dhd);
- }
+ bus->dhd->collect_sssr = TRUE;
#endif /* DHD_SSSR_DUMP */
bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
dhdpcie_mem_dump(bus);
@@ -3169,6 +3760,9 @@
bus->dhd->dongle_trap_occured = TRUE;
}
+ /* wake up IOCTL wait event */
+ dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
+
#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
copy_hang_info_trap(bus->dhd);
#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
@@ -3176,15 +3770,16 @@
}
+done1:
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+
DHD_GENERAL_LOCK(bus->dhd, flags);
DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
dhd_os_busbusy_wake(bus->dhd);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
-
-done:
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
- }
+done2:
if (mbuffer)
MFREE(bus->dhd->osh, mbuffer, msize);
if (str)
@@ -3240,51 +3835,47 @@
#if defined(DHD_FW_COREDUMP)
static int
-dhdpcie_mem_dump(dhd_bus_t *bus)
+dhdpcie_get_mem_dump(dhd_bus_t *bus)
{
- int ret = 0;
- int size; /* Full mem size */
- int start = bus->dongle_ram_base; /* Start address */
+ int ret = BCME_OK;
+ int size = 0;
+ int start = 0;
int read_size = 0; /* Read size of each iteration */
- uint8 *buf = NULL, *databuf = NULL;
+ uint8 *p_buf = NULL, *databuf = NULL;
-#ifdef EXYNOS_PCIE_DEBUG
- exynos_pcie_register_dump(1);
-#endif /* EXYNOS_PCIE_DEBUG */
-
-#ifdef SUPPORT_LINKDOWN_RECOVERY
- if (bus->is_linkdown) {
- DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
+ if (!bus) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
return BCME_ERROR;
}
-#endif /* SUPPORT_LINKDOWN_RECOVERY */
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
+ if (!bus->dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
return BCME_ERROR;
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ }
+
+ size = bus->ramsize; /* Full mem size */
+ start = bus->dongle_ram_base; /* Start address */
/* Get full mem size */
- size = bus->ramsize;
- buf = dhd_get_fwdump_buf(bus->dhd, size);
- if (!buf) {
- DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
+ p_buf = dhd_get_fwdump_buf(bus->dhd, size);
+ if (!p_buf) {
+ DHD_ERROR(("%s: Out of memory (%d bytes)\n",
+ __FUNCTION__, size));
return BCME_ERROR;
}
/* Read mem content */
DHD_TRACE_HW4(("Dump dongle memory\n"));
- databuf = buf;
- while (size)
- {
+ databuf = p_buf;
+ while (size > 0) {
read_size = MIN(MEMBLOCK, size);
- if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size)))
- {
+ ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size);
+ if (ret) {
DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
#ifdef DHD_DEBUG_UART
bus->dhd->memdump_success = FALSE;
#endif /* DHD_DEBUG_UART */
- return BCME_ERROR;
+ break;
}
DHD_TRACE(("."));
@@ -3293,11 +3884,53 @@
start += read_size;
databuf += read_size;
}
-#ifdef DHD_DEBUG_UART
- bus->dhd->memdump_success = TRUE;
-#endif /* DHD_DEBUG_UART */
- dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
+ return ret;
+}
+
+static int
+dhdpcie_mem_dump(dhd_bus_t *bus)
+{
+ dhd_pub_t *dhdp;
+ int ret;
+
+#ifdef EXYNOS_PCIE_DEBUG
+ exynos_pcie_register_dump(1);
+#endif /* EXYNOS_PCIE_DEBUG */
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
+ /* panic only for DUMP_MEMFILE_BUGON */
+ ASSERT(bus->dhd->memdump_enabled != DUMP_MEMFILE_BUGON);
+ return BCME_ERROR;
+ }
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+ dhdp = bus->dhd;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
+ return BCME_ERROR;
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+ ret = dhdpcie_get_mem_dump(bus);
+ if (ret) {
+ DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
+ __FUNCTION__, ret));
+ return ret;
+ }
+
+ dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
/* buf, actually soc_ram free handled in dhd_{free,clear} */
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
@@ -3309,6 +3942,17 @@
}
int
+dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
+{
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return dhdpcie_get_mem_dump(dhdp->bus);
+}
+
+int
dhd_bus_mem_dump(dhd_pub_t *dhdp)
{
dhd_bus_t *bus = dhdp->bus;
@@ -3319,7 +3963,13 @@
return BCME_ERROR;
}
- if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
+ /* Try to resume if already suspended or suspend in progress */
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ /* Skip if still in suspended or suspend in progress */
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) {
DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
__FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
return BCME_ERROR;
@@ -3330,29 +3980,6 @@
DHD_OS_WAKE_UNLOCK(dhdp);
return ret;
}
-
-int
-dhd_dongle_mem_dump(void)
-{
- if (!g_dhd_bus) {
- DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
- return -ENODEV;
- }
-
- dhd_bus_dump_console_buffer(g_dhd_bus);
- dhd_prot_debug_info_print(g_dhd_bus->dhd);
-
- g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
- g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
-
-#ifdef DHD_PCIE_RUNTIMEPM
- dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0));
-#endif /* DHD_PCIE_RUNTIMEPM */
-
- dhd_bus_mem_dump(g_dhd_bus->dhd);
- return 0;
-}
-EXPORT_SYMBOL(dhd_dongle_mem_dump);
#endif /* DHD_FW_COREDUMP */
int
@@ -3479,6 +4106,11 @@
flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
+ if (flow_ring_node->prot_info == NULL) {
+ DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__));
+ return BCME_NOTREADY;
+ }
+
#ifdef DHD_LOSSLESS_ROAMING
if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
@@ -3550,8 +4182,13 @@
}
}
+#ifdef DHD_HP2P
+ if (!flow_ring_node->hp2p_ring) {
+ dhd_prot_txdata_write_flush(bus->dhd, flow_id);
+ }
+#else
dhd_prot_txdata_write_flush(bus->dhd, flow_id);
-
+#endif // endif
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
}
@@ -3691,6 +4328,19 @@
void
dhd_bus_start_queue(struct dhd_bus *bus)
{
+ /*
+ * Tx queue has been stopped due to resource shortage (or)
+ * bus is not in a state to turn on.
+ *
+ * Note that we try to re-start network interface only
+ * when we have enough resources, one has to first change the
+ * flag indicating we have all the resources.
+ */
+ if (dhd_prot_check_tx_resource(bus->dhd)) {
+ DHD_ERROR(("%s: Interface NOT started, previously stopped "
+ "due to resource shortage\n", __FUNCTION__));
+ return;
+ }
dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
}
@@ -3742,36 +4392,67 @@
dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
}
+void
+dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr)
+{
+ dhdpcie_os_setbar1win(bus, addr);
+}
+
/** 'offset' is a backplane address */
void
dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
{
- W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ } else {
+ dhdpcie_os_wtcm8(bus, offset, data);
+ }
}
uint8
dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
{
volatile uint8 data;
- data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ data = (uint8)-1;
+ } else {
+ data = dhdpcie_os_rtcm8(bus, offset);
+ }
return data;
}
void
dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
{
- W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ } else {
+ dhdpcie_os_wtcm32(bus, offset, data);
+ }
}
void
dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
{
- W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ } else {
+ dhdpcie_os_wtcm16(bus, offset, data);
+ }
}
#ifdef DHD_SUPPORT_64BIT
void
dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
{
- W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ } else {
+ dhdpcie_os_wtcm64(bus, offset, data);
+ }
}
#endif /* DHD_SUPPORT_64BIT */
@@ -3779,7 +4460,12 @@
dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
{
volatile uint16 data;
- data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ data = (uint16)-1;
+ } else {
+ data = dhdpcie_os_rtcm16(bus, offset);
+ }
return data;
}
@@ -3787,7 +4473,12 @@
dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
{
volatile uint32 data;
- data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ data = (uint32)-1;
+ } else {
+ data = dhdpcie_os_rtcm32(bus, offset);
+ }
return data;
}
@@ -3796,7 +4487,12 @@
dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
{
volatile uint64 data;
- data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ data = (uint64)-1;
+ } else {
+ data = dhdpcie_os_rtcm64(bus, offset);
+ }
return data;
}
#endif /* DHD_SUPPORT_64BIT */
@@ -3932,12 +4628,16 @@
DHD_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
break;
-#ifdef D2H_MINIDUMP
- case DNGL_TO_HOST_TRAP_ADDR_LEN:
- addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, device_trap_debug_buffer_len);
- dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
+ case HOST_SCB_ADDR:
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr);
+#ifdef DHD_SUPPORT_64BIT
+ dhdpcie_bus_wtcm64(bus, addr, (uint64) HTOL64(*(uint64 *)data));
+#else /* !DHD_SUPPORT_64BIT */
+ dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data));
+#endif /* DHD_SUPPORT_64BIT */
+ DHD_INFO(("Wrote host_scb_addr:0x%x\n",
+ (uint32) HTOL32(*(uint32 *)data)));
break;
-#endif /* D2H_MINIDUMP */
default:
break;
@@ -3987,6 +4687,11 @@
*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
break;
+ case HOST_SCB_ADDR:
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size);
+ *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
+ break;
+
default :
break;
}
@@ -4040,15 +4745,19 @@
DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
name, (set ? "set" : "get"), len, plen));
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req(bus);
- }
-
/* Look up var locally; if not found pass to host driver */
if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
goto exit;
}
+ if (MULTIBP_ENAB(bus->sih)) {
+ if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
+ DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__));
+ } else {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+ }
+
/* set up 'params' pointer in case this is a set command so that
* the convenience int and bool code can be common to set and get
*/
@@ -4069,8 +4778,21 @@
bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
exit:
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
+ /* In DEVRESET_QUIESCE/DEVRESET_ON,
+ * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
+ * causes pwr_req_ref count miss-match in pwr req clear function and hang.
+ * In this case, bypass pwr req clear.
+ */
+ if (bcmerror == BCME_DNGL_DEVRESET) {
+ bcmerror = BCME_OK;
+ } else {
+ if (MULTIBP_ENAB(bus->sih)) {
+ if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
+ DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__));
+ } else {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+ }
}
return bcmerror;
} /* dhd_bus_iovar_op */
@@ -4408,53 +5130,116 @@
#define PCIE_FLR_CAPAB_BIT 28
#define PCIE_FUNCTION_LEVEL_RESET_BIT 15
-#define PCIE_SSRESET_STATUS_BIT 13
-#define PCIE_SSRESET_DISABLE_BIT 14
/* Change delays for only QT HW, FPGA and silicon uses same delay */
#ifdef BCMQT_HW
#define DHD_FUNCTION_LEVEL_RESET_DELAY 300000u
#define DHD_SSRESET_STATUS_RETRY_DELAY 10000u
#else
-#define DHD_FUNCTION_LEVEL_RESET_DELAY 55u /* 55 msec delay */
+#define DHD_FUNCTION_LEVEL_RESET_DELAY 70u /* 70 msec delay */
#define DHD_SSRESET_STATUS_RETRY_DELAY 40u
#endif // endif
-#define DHD_SSRESET_STATUS_RETRIES 50u
+/*
+ * Increase SSReset de-assert time to 8ms.
+ * since it takes longer time if re-scan time on 4378B0.
+ */
+#define DHD_SSRESET_STATUS_RETRIES 200u
+
+static void
+dhdpcie_enum_reg_init(dhd_bus_t *bus)
+{
+ /* initialize Function control register (clear bit 4) to HW init value */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0,
+ PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE);
+
+ /* clear IntMask */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0);
+ /* clear IntStatus */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0,
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0));
+
+ /* clear MSIVector */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0);
+ /* clear MSIIntMask */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0);
+ /* clear MSIIntStatus */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0,
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0));
+
+ /* clear PowerIntMask */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0);
+ /* clear PowerIntStatus */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0,
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0));
+
+ /* clear MailboxIntMask */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0);
+ /* clear MailboxInt */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0,
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0));
+}
int
dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
{
- bool flr_capab;
+ uint flr_capab;
uint val;
int retry = 0;
DHD_ERROR(("******** Perform FLR ********\n"));
+ if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
+ if (bus->pcie_mailbox_mask != 0) {
+ dhdpcie_bus_intr_disable(bus);
+ }
+ /* initialize F0 enum registers before FLR for rev66/67 */
+ dhdpcie_enum_reg_init(bus);
+ }
+
/* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT);
- DHD_ERROR(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
+ DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
if (!flr_capab) {
DHD_ERROR(("Chip does not support FLR\n"));
return BCME_UNSUPPORTED;
}
+ /* WAR: Disable FLR reset For H2 chip to perform legacy reset */
+ else if ((bus->sih->chip == CYW55560_CHIP_ID) || (bus->sih->chip == BCM4375_CHIP_ID)) {
+ DHD_INFO(("H2/4375 CHIP return unsupported\n"));
+ return BCME_UNSUPPORTED;
+ }
/* Save pcie config space */
- DHD_ERROR(("Save Pcie Config Space\n"));
+ DHD_INFO(("Save Pcie Config Space\n"));
DHD_PCIE_CONFIG_SAVE(bus);
/* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
- DHD_ERROR(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
+ DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
- DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
- DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
/* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
- DHD_ERROR(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
+ DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u);
if (force_fail) {
@@ -4474,16 +5259,16 @@
}
/* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
- DHD_ERROR(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
+ DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
- DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
- DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
/* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
- DHD_ERROR(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
+ DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
"is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
do {
val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
@@ -4501,11 +5286,11 @@
bus->flr_force_fail = FALSE;
DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
}
- return BCME_ERROR;
+ return BCME_DONGLE_DOWN;
}
/* Restore pcie config space */
- DHD_ERROR(("Restore Pcie Config Space\n"));
+ DHD_INFO(("Restore Pcie Config Space\n"));
DHD_PCIE_CONFIG_RESTORE(bus);
DHD_ERROR(("******** FLR Succedeed ********\n"));
@@ -4513,6 +5298,104 @@
return BCME_OK;
}
+#ifdef DHD_USE_BP_RESET
+#define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */
+
+#define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */
+#define DHD_BP_RESET_STATUS_RETRIES 50u
+
+#define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10
+#define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21
+int
+dhd_bus_perform_bp_reset(struct dhd_bus *bus)
+{
+ uint val;
+ int retry = 0;
+ uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev);
+ int ret = BCME_OK;
+ bool cond;
+
+ DHD_ERROR(("******** Perform BP reset ********\n"));
+
+ /* Disable ASPM */
+ DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
+ PCIECFGREG_LINK_STATUS_CTRL));
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
+ val = val & (~PCIE_ASPM_ENAB);
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
+
+ /* wait for delay usec */
+ DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
+ OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
+
+ /* Set bit 10 of PCIECFGREG_SPROM_CTRL */
+ DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
+ PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
+ val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val);
+
+ /* Wait till bit backplane reset is ASSERTED i,e
+ * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
+ * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
+ * else DAR register will read previous old value
+ */
+ DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
+ "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
+ PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
+ do {
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
+ cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
+ OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
+ } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
+
+ if (cond) {
+ DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
+ PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT));
+ ret = BCME_ERROR;
+ goto aspm_enab;
+ }
+
+ /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
+ DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
+ "dar_clk_ctrl_status_reg(0x%x) is cleared\n",
+ PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg));
+ do {
+ val = si_corereg(bus->sih, bus->sih->buscoreidx,
+ dar_clk_ctrl_status_reg, 0, 0);
+ DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
+ dar_clk_ctrl_status_reg, val));
+ cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT);
+ OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
+ } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
+
+ if (cond) {
+ DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
+ dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT));
+ ret = BCME_ERROR;
+ }
+
+aspm_enab:
+ /* Enable ASPM */
+ DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
+ PCIECFGREG_LINK_STATUS_CTRL));
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
+ val = val | (PCIE_ASPM_L1_ENAB);
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
+
+ DHD_ERROR(("******** BP reset Succedeed ********\n"));
+
+ return ret;
+}
+#endif /* DHD_USE_BP_RESET */
+
int
dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
{
@@ -4527,7 +5410,7 @@
if (flag == TRUE) { /* Turn off WLAN */
/* Removing Power */
DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
-
+ DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
bus->dhd->up = FALSE;
/* wait for other contexts to finish -- if required a call
@@ -4556,6 +5439,7 @@
dhdpcie_free_irq(bus);
}
dhd_deinit_bus_lock(bus);
+ dhd_deinit_backplane_access_lock(bus);
dhd_bus_release_dongle(bus);
dhdpcie_bus_free_resource(bus);
bcmerror = dhdpcie_bus_disable_device(bus);
@@ -4591,6 +5475,7 @@
}
#endif /* CONFIG_ARCH_MSM */
DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
@@ -4666,8 +5551,12 @@
__FUNCTION__, bcmerror));
goto done;
}
+#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
+ dhd_bus_aspm_enable_rc_ep(bus, FALSE);
+#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
#endif /* CONFIG_ARCH_MSM */
bus->is_linkdown = 0;
+ bus->cto_triggered = 0;
#ifdef SUPPORT_LINKDOWN_RECOVERY
bus->read_shm_fail = FALSE;
#endif /* SUPPORT_LINKDOWN_RECOVERY */
@@ -4701,6 +5590,10 @@
bus->dhd->dongle_reset = FALSE;
+#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+ dhd_irq_set_affinity(bus->dhd, cpumask_of(1));
+#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+
bcmerror = dhd_bus_start(dhdp);
if (bcmerror) {
DHD_ERROR(("%s: dhd_bus_start: %d\n",
@@ -4725,6 +5618,7 @@
done:
if (bcmerror) {
DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
}
@@ -4777,6 +5671,67 @@
return -1;
}
#endif /* DHD_PCIE_REG_ACCESS */
+
+/* si_backplane_access() manages a shared resource - BAR0 mapping, hence its
+ * calls shall be serialized. This wrapper function provides such serialization
+ * and shall be used everywjer einstead of direct call of si_backplane_access()
+ *
+ * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet
+ * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar
+ * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race
+ * conditions calls of si_backplane_access() shall be serialized. Presence of
+ * tasklet context implies that serialization shall b ebased on spinlock. Hence
+ * Linux implementation of dhd_pcie_backplane_access_[un]lock() is
+ * spinlock-based.
+ *
+ * Other platforms may add their own implementations of
+ * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not
+ * needed implementation might be empty)
+ */
+static uint
+serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read)
+{
+ uint ret;
+ unsigned long flags;
+ DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
+ ret = si_backplane_access(bus->sih, addr, size, val, read);
+ DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
+ return ret;
+}
+
+static int
+dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd)
+{
+ int h2d_support, d2h_support;
+
+ d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0;
+ h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0;
+ return (d2h_support | (h2d_support << 1));
+
+}
+int
+dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val)
+{
+ int bcmerror = 0;
+ /* Can change it only during initialization/FW download */
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ if ((int_val > 3) || (int_val < 0)) {
+ DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
+ bcmerror = BCME_BADARG;
+ } else {
+ dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
+ dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
+ dhd->dma_ring_upd_overwrite = TRUE;
+ }
+ } else {
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+ __FUNCTION__));
+ bcmerror = BCME_NOTDOWN;
+ }
+
+ return bcmerror;
+
+}
/**
* IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
*
@@ -4854,7 +5809,8 @@
addr = sdreg.offset;
size = sdreg.func;
- if (si_backplane_access(bus->sih, addr, size, (uint *)&int_val, TRUE) != BCME_OK) {
+ if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
+ {
DHD_ERROR(("Invalid size/addr combination \n"));
bcmerror = BCME_ERROR;
break;
@@ -4872,7 +5828,7 @@
addr = sdreg.offset;
size = sdreg.func;
- if (si_backplane_access(bus->sih, addr, size,
+ if (serialized_backplane_access(bus, addr, size,
(uint *)(&sdreg.value), FALSE) != BCME_OK) {
DHD_ERROR(("Invalid size/addr combination \n"));
bcmerror = BCME_ERROR;
@@ -4890,7 +5846,8 @@
addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
size = sdreg.func;
- if (si_backplane_access(bus->sih, addr, size, (uint *)&int_val, TRUE) != BCME_OK) {
+ if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
+ {
DHD_ERROR(("Invalid size/addr combination \n"));
bcmerror = BCME_ERROR;
break;
@@ -4908,7 +5865,7 @@
addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
size = sdreg.func;
- if (si_backplane_access(bus->sih, addr, size,
+ if (serialized_backplane_access(bus, addr, size,
(uint *)(&sdreg.value), FALSE) != BCME_OK) {
DHD_ERROR(("Invalid size/addr combination \n"));
bcmerror = BCME_ERROR;
@@ -4964,36 +5921,37 @@
break;
case IOV_SVAL(IOV_PCIE_DMAXFER): {
- int int_val4 = 0;
- int wait = 0;
- int core_num = 0;
- if (plen >= (int)sizeof(int_val) * 4) {
- bcopy((void*)((uintptr)params + 3 * sizeof(int_val)),
- &int_val4, sizeof(int_val4));
+ dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg;
+
+ if (!dmaxfer)
+ return BCME_BADARG;
+ if (dmaxfer->version != DHD_DMAXFER_VERSION)
+ return BCME_VERSION;
+ if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
+ return BCME_BADLEN;
}
- if (plen >= (int)sizeof(int_val) * 5) {
- bcopy((void*)((uintptr)params + 4 * sizeof(int_val)),
- &wait, sizeof(wait));
- }
- if (plen >= (int)sizeof(core_num) * 6) {
- bcopy((void*)((uintptr)params + 5 * sizeof(core_num)),
- &core_num, sizeof(core_num));
- }
- bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3,
- int_val4, core_num, wait);
- if (wait && bcmerror >= 0) {
- /* get the status of the dma transfer */
- int_val4 = dhdmsgbuf_dmaxfer_status(bus->dhd);
- bcopy(&int_val4, params, sizeof(int_val));
+
+ bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes,
+ dmaxfer->src_delay, dmaxfer->dest_delay,
+ dmaxfer->type, dmaxfer->core_num,
+ dmaxfer->should_wait);
+
+ if (dmaxfer->should_wait && bcmerror >= 0) {
+ bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
}
break;
}
case IOV_GVAL(IOV_PCIE_DMAXFER): {
- int dma_status = 0;
- dma_status = dhdmsgbuf_dmaxfer_status(bus->dhd);
- bcopy(&dma_status, arg, val_size);
- bcmerror = BCME_OK;
+ dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params;
+ if (!dmaxfer)
+ return BCME_BADARG;
+ if (dmaxfer->version != DHD_DMAXFER_VERSION)
+ return BCME_VERSION;
+ if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
+ return BCME_BADLEN;
+ }
+ bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
break;
}
@@ -5274,30 +6232,13 @@
break;
}
case IOV_GVAL(IOV_DMA_RINGINDICES):
- { int h2d_support, d2h_support;
-
- d2h_support = bus->dhd->dma_d2h_ring_upd_support ? 1 : 0;
- h2d_support = bus->dhd->dma_h2d_ring_upd_support ? 1 : 0;
- int_val = d2h_support | (h2d_support << 1);
+ {
+ int_val = dhdpcie_get_dma_ring_indices(bus->dhd);
bcopy(&int_val, arg, sizeof(int_val));
break;
}
case IOV_SVAL(IOV_DMA_RINGINDICES):
- /* Can change it only during initialization/FW download */
- if (bus->dhd->busstate == DHD_BUS_DOWN) {
- if ((int_val > 3) || (int_val < 0)) {
- DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
- bcmerror = BCME_BADARG;
- } else {
- bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
- bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
- bus->dhd->dma_ring_upd_overwrite = TRUE;
- }
- } else {
- DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
- __FUNCTION__));
- bcmerror = BCME_NOTDOWN;
- }
+ bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val);
break;
case IOV_GVAL(IOV_METADATA_DBG):
@@ -5361,11 +6302,13 @@
case IOV_SVAL(IOV_DEVRESET):
switch (int_val) {
case DHD_BUS_DEVRESET_ON:
+ bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
+ break;
case DHD_BUS_DEVRESET_OFF:
- dhd_bus_devreset(bus->dhd, (uint8)int_val);
+ bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
break;
case DHD_BUS_DEVRESET_FLR:
- dhd_bus_perform_flr(bus, bus->flr_force_fail);
+ bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail);
break;
case DHD_BUS_DEVRESET_FLR_FORCE_FAIL:
bus->flr_force_fail = TRUE;
@@ -5395,7 +6338,11 @@
#ifdef DHD_PCIE_RUNTIMEPM
case IOV_GVAL(IOV_IDLETIME):
- int_val = bus->idletime;
+ if (!(bus->dhd->op_mode & DHD_FLAG_MFG_MODE)) {
+ int_val = bus->idletime;
+ } else {
+ int_val = 0;
+ }
bcopy(&int_val, arg, val_size);
break;
@@ -5547,30 +6494,7 @@
break;
case IOV_SVAL(IOV_CTO_PREVENTION):
- {
- uint32 pcie_lnkst;
-
- if (bus->sih->buscorerev < 19) {
- bcmerror = BCME_UNSUPPORTED;
- break;
- }
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
-
- pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, configdata), 0, 0);
-
- if ((bus->sih->buscorerev == 19) &&
- (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
- PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1)) {
- bcmerror = BCME_UNSUPPORTED;
- break;
- }
- bus->cto_enable = bool_val;
- dhdpcie_cto_init(bus, bus->cto_enable);
- DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
- __FUNCTION__, bus->cto_enable));
- }
+ bcmerror = dhdpcie_cto_init(bus, bool_val);
break;
case IOV_GVAL(IOV_CTO_PREVENTION):
@@ -5607,11 +6531,18 @@
case IOV_SVAL(IOV_PCIE_WD_RESET):
if (bool_val) {
- pcie_watchdog_reset(bus->osh, bus->sih,
- WD_ENABLE_MASK, WD_SSRESET_PCIE_F0_EN);
+ /* Legacy chipcommon watchdog reset */
+ dhdpcie_cc_watchdog_reset(bus);
}
break;
+ case IOV_GVAL(IOV_HWA_ENAB_BMAP):
+ int_val = bus->hwa_enab_bmap;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_HWA_ENAB_BMAP):
+ bus->hwa_enab_bmap = (uint8)int_val;
+ break;
case IOV_GVAL(IOV_IDMA_ENABLE):
int_val = bus->idma_enabled;
bcopy(&int_val, arg, val_size);
@@ -5637,22 +6568,104 @@
case IOV_SVAL(IOV_DAR_ENABLE):
bus->dar_enabled = (bool)int_val;
break;
-#ifdef D2H_MINIDUMP
- case IOV_GVAL(IOV_MINIDUMP_OVERRIDE):
- int_val = bus->d2h_minidump_override;
+ case IOV_GVAL(IOV_HSCBSIZE):
+ bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg);
+ break;
+#ifdef DHD_BUS_MEM_ACCESS
+ case IOV_GVAL(IOV_HSCBBYTES):
+ bcmerror = dhd_get_hscb_buff(bus->dhd, int_val, int_val2, (void*)arg);
+ break;
+#endif // endif
+
+#ifdef DHD_HP2P
+ case IOV_SVAL(IOV_HP2P_ENABLE):
+ dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_ENABLE):
+ int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val);
bcopy(&int_val, arg, val_size);
break;
- case IOV_SVAL(IOV_MINIDUMP_OVERRIDE):
- /* Can change it only before FW download */
- if (bus->dhd->busstate != DHD_BUS_DOWN) {
- DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
- __FUNCTION__));
- bcmerror = BCME_NOTDOWN;
- break;
- }
- bus->d2h_minidump_override = (bool)int_val;
+
+ case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD):
+ dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val);
break;
-#endif /* D2H_MINIDUMP */
+
+ case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD):
+ int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD):
+ dhd_prot_time_threshold(bus->dhd, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD):
+ int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HP2P_PKT_EXPIRY):
+ dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_PKT_EXPIRY):
+ int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ return BCME_NOTDOWN;
+ }
+ dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS):
+ int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ return BCME_NOTDOWN;
+ }
+ dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS):
+ int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE);
+ bcopy(&int_val, arg, val_size);
+ break;
+#endif /* DHD_HP2P */
+ case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ return BCME_NOTDOWN;
+ }
+ if (int_val)
+ bus->dhd->extdtxs_in_txcpl = TRUE;
+ else
+ bus->dhd->extdtxs_in_txcpl = FALSE;
+ break;
+
+ case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL):
+ int_val = bus->dhd->extdtxs_in_txcpl;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ return BCME_NOTDOWN;
+ }
+ if (int_val)
+ bus->dhd->hostrdy_after_init = TRUE;
+ else
+ bus->dhd->hostrdy_after_init = FALSE;
+ break;
+
+ case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT):
+ int_val = bus->dhd->hostrdy_after_init;
+ bcopy(&int_val, arg, val_size);
+ break;
+
default:
bcmerror = BCME_UNSUPPORTED;
break;
@@ -5682,6 +6695,48 @@
return 0;
}
+void
+dhd_bus_dump_dar_registers(struct dhd_bus *bus)
+{
+ uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val,
+ dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val;
+ uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg,
+ dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg;
+
+ if (bus->is_linkdown && !bus->cto_triggered) {
+ DHD_ERROR(("%s: link is down\n", __FUNCTION__));
+ return;
+ }
+
+ dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev);
+ dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev);
+ dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev);
+ dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev);
+ dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev);
+ dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev);
+
+ if (bus->sih->buscorerev < 24) {
+ DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n",
+ __FUNCTION__, bus->sih->buscorerev));
+ return;
+ }
+
+ dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0);
+ dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0);
+ dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0);
+ dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0);
+ dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0);
+ dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0);
+
+ DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n",
+ __FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val,
+ dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val));
+
+ DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n",
+ __FUNCTION__, dar_errlog_reg, dar_errlog_val,
+ dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val));
+}
+
/* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
void
dhd_bus_hostready(struct dhd_bus *bus)
@@ -5695,14 +6750,18 @@
return;
}
- DHD_INFO_HW4(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
+ DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
+
if (DAR_PWRREQ(bus)) {
dhd_bus_pcie_pwr_req(bus);
}
+
+ dhd_bus_dump_dar_registers(bus);
+
si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
bus->hostready_count ++;
- DHD_INFO_HW4(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
+ DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
}
/* Clear INTSTATUS */
@@ -5805,7 +6864,7 @@
return -EBUSY;
}
- bus->last_suspend_start_time = OSL_SYSUPTIME_US();
+ bus->last_suspend_start_time = OSL_LOCALTIME_NS();
/* stop all interface network queue. */
dhd_bus_stop_queue(bus);
@@ -5832,7 +6891,6 @@
}
#else
DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
-
/* Clear wait_for_d3_ack before sending D3_INFORM */
bus->wait_for_d3_ack = 0;
/*
@@ -5860,7 +6918,12 @@
__FUNCTION__, intstatus, host_irq_disabled));
dhd_pcie_intr_count_dump(bus->dhd);
dhd_print_tasklet_status(bus->dhd);
- dhd_prot_process_ctrlbuf(bus->dhd);
+ if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
+ !bus->use_mailbox) {
+ dhd_prot_process_ctrlbuf(bus->dhd);
+ } else {
+ dhdpcie_handle_mb_data(bus);
+ }
timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
/* Clear Interrupts */
dhdpcie_bus_clear_intstatus(bus);
@@ -5883,7 +6946,6 @@
if (bus->wait_for_d3_ack) {
DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
-
/* Got D3 Ack. Suspend the bus */
#ifdef OEM_ANDROID
if (active) {
@@ -5960,7 +7022,11 @@
}
#if defined(BCMPCIE_OOB_HOST_WAKE)
- dhdpcie_oob_intr_set(bus, TRUE);
+ if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) {
+ DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__));
+ } else {
+ dhdpcie_oob_intr_set(bus, TRUE);
+ }
#endif /* BCMPCIE_OOB_HOST_WAKE */
DHD_GENERAL_LOCK(bus->dhd, flags);
@@ -5976,28 +7042,38 @@
bus->dhd->d3ackcnt_timeout = 0;
bus->dhd->busstate = DHD_BUS_SUSPEND;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
- DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, "
- "BaseAddress1(0x%x)=0x%x\n", __FUNCTION__,
- PCIECFGREG_BASEADDR0,
- dhd_pcie_config_read(bus->osh,
- PCIECFGREG_BASEADDR0, sizeof(uint32)),
- PCIECFGREG_BASEADDR1,
- dhd_pcie_config_read(bus->osh,
- PCIECFGREG_BASEADDR1, sizeof(uint32))));
+ dhdpcie_dump_resource(bus);
/* Handle Host Suspend */
rc = dhdpcie_pci_suspend_resume(bus, state);
if (!rc) {
- bus->last_suspend_end_time = OSL_SYSUPTIME_US();
+ bus->last_suspend_end_time = OSL_LOCALTIME_NS();
}
#ifdef OEM_ANDROID
}
#endif /* OEM_ANDROID */
} else if (timeleft == 0) { /* D3 ACK Timeout */
+#ifdef DHD_FW_COREDUMP
+ uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
+#endif /* DHD_FW_COREDUMP */
+
+ /* check if the D3 ACK timeout due to scheduling issue */
+ bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) &&
+ bus->isr_entry_time > bus->last_d3_inform_time &&
+ dhd_bus_query_dpc_sched_errors(bus->dhd);
bus->dhd->d3ack_timeout_occured = TRUE;
/* If the D3 Ack has timeout */
bus->dhd->d3ackcnt_timeout++;
- DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n",
- __FUNCTION__, bus->dhd->d3ackcnt_timeout));
+ DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
+ __FUNCTION__, bus->dhd->is_sched_error ?
+ " due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout));
+#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
+ if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) {
+ /* change g_assert_type to trigger Kernel panic */
+ g_assert_type = 2;
+ /* use ASSERT() to trigger panic */
+ ASSERT(0);
+ }
+#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
DHD_BUS_LOCK(bus->bus_lock, flags_bus);
bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
@@ -6006,7 +7082,9 @@
/* resume all interface network queue. */
dhd_bus_start_queue(bus);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
- if (!bus->dhd->dongle_trap_occured) {
+ if (!bus->dhd->dongle_trap_occured &&
+ !bus->is_linkdown &&
+ !bus->cto_triggered) {
uint32 intstatus = 0;
/* Check if PCIe bus status is valid */
@@ -6020,7 +7098,7 @@
dhd_bus_dump_console_buffer(bus);
dhd_prot_debug_info_print(bus->dhd);
#ifdef DHD_FW_COREDUMP
- if (bus->dhd->memdump_enabled) {
+ if (cur_memdump_mode) {
/* write core dump to file */
bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
dhdpcie_mem_dump(bus);
@@ -6038,12 +7116,15 @@
dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
#endif /* OEM_ANDROID */
}
+#if defined(DHD_ERPOM)
+ dhd_schedule_reset(bus->dhd);
+#endif // endif
rc = -ETIMEDOUT;
}
} else {
/* Resume */
DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__));
- bus->last_resume_start_time = OSL_SYSUPTIME_US();
+ bus->last_resume_start_time = OSL_LOCALTIME_NS();
/**
* PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
@@ -6062,11 +7143,7 @@
#endif /* BCMPCIE_OOB_HOST_WAKE */
#endif /* linux && OEM_ANDROID */
rc = dhdpcie_pci_suspend_resume(bus, state);
- DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, BaseAddress1(0x%x)=0x%x\n",
- __FUNCTION__, PCIECFGREG_BASEADDR0,
- dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
- PCIECFGREG_BASEADDR1,
- dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32))));
+ dhdpcie_dump_resource(bus);
DHD_BUS_LOCK(bus->bus_lock, flags_bus);
/* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
@@ -6112,8 +7189,9 @@
dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
}
- bus->last_resume_end_time = OSL_SYSUPTIME_US();
-
+ bus->last_resume_end_time = OSL_LOCALTIME_NS();
+ /* Update TCM rd index for EDL ring */
+ DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd);
}
return rc;
}
@@ -6151,6 +7229,21 @@
return 0;
}
+static uint32
+dhd_apply_d11_war_length(struct dhd_bus *bus, uint32 len, uint32 d11_lpbk)
+{
+ uint16 chipid = si_chipid(bus->sih);
+ if ((chipid == BCM4375_CHIP_ID ||
+ chipid == BCM4362_CHIP_ID ||
+ chipid == BCM43751_CHIP_ID ||
+ chipid == BCM4377_CHIP_ID) &&
+ (d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) {
+ len += 8;
+ }
+ DHD_ERROR(("%s: len %d\n", __FUNCTION__, len));
+ return len;
+}
+
/** Transfers bytes from host to dongle and to host again using DMA */
static int
dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
@@ -6177,25 +7270,39 @@
return BCME_ERROR;
}
+ len = dhd_apply_d11_war_length(bus, len, d11_lpbk);
+
bus->dmaxfer_complete = FALSE;
ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay,
d11_lpbk, core_num);
- if (ret != BCME_OK || !wait)
- return ret;
-
- ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
- if (ret < 0)
- ret = BCME_NOTREADY;
+ if (ret != BCME_OK || !wait) {
+ DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__,
+ ret, wait));
+ } else {
+ ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
+ if (ret < 0)
+ ret = BCME_NOTREADY;
+ }
return ret;
}
+bool
+dhd_bus_is_multibp_capable(struct dhd_bus *bus)
+{
+ return MULTIBP_CAP(bus->sih);
+}
+
+#define PCIE_REV_FOR_4378A0 66 /* dhd_bus_perform_flr_with_quiesce() causes problems */
+#define PCIE_REV_FOR_4378B0 68
+
static int
dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
{
int bcmerror = 0;
volatile uint32 *cr4_regs;
+ bool do_flr;
hs_addrs_t bl_hs_addrs = {NULL, NULL};
if (bus->sih->chip == CYW55560_CHIP_ID) {
@@ -6212,12 +7319,23 @@
DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
return BCME_ERROR;
}
+
+ do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) &&
+ (bus->sih->buscorerev != PCIE_REV_FOR_4378B0));
+
+ if (MULTIBP_ENAB(bus->sih) && !do_flr) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+
/* To enter download state, disable ARM and reset SOCRAM.
* To exit download state, simply reset ARM (default is RAM boot).
*/
if (enter) {
+#ifndef BCMQT /* for performance reasons, skip the FLR for QT */
+#endif /* !BCMQT */
+
/* Make sure BAR1 maps to backplane address 0 */
- dhdpcie_bus_cfg_write_dword(bus, PCI_BAR1_WIN, 4, 0x00000000);
+ dhdpcie_setbar1win(bus, 0x00000000);
bus->alp_only = TRUE;
/* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
@@ -6322,6 +7440,14 @@
DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
goto fail;
}
+ /* write random numbers to sysmem for the purpose of
+ * randomizing heap address space.
+ */
+ if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
+ DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
+ __FUNCTION__));
+ goto fail;
+ }
/* switch back to arm core again */
if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
@@ -6422,11 +7548,6 @@
goto fail;
}
- if ((bcmerror = dhdpcie_wrt_host_whitelist_region(bus)) != BCME_OK) {
- DHD_ERROR(("%s: Failed to write Whitelist region to TCM !\n",
- __FUNCTION__));
- goto fail;
- }
/* switch back to arm core again */
if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
@@ -6434,8 +7555,8 @@
goto fail;
}
+ /* write address 0 with reset instruction */
if (bus->sih->chip != CYW55560_CHIP_ID) {
- /* write address 0 with reset instruction */
bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
@@ -6469,8 +7590,8 @@
/* Set write_vars done bit to let BL jump to mainline FW */
if ((bcmerror = dhdpcie_dongle_host_post_varswrite(bus, &bl_hs_addrs))) {
DHD_ERROR(("%s: error %d dongle_host_post_varswrite\n",
- __FUNCTION__, bcmerror));
- goto fail;
+ __FUNCTION__, bcmerror));
+ goto fail;
}
DHD_ERROR(("%s VARS done bit set, BL can jump to mainline FW\n",
__FUNCTION__));
@@ -6504,6 +7625,10 @@
/* Always return to PCIE core */
si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+ if (MULTIBP_ENAB(bus->sih) && !do_flr) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+
return bcmerror;
} /* dhdpcie_bus_download_state */
@@ -6664,7 +7789,7 @@
if (isset(&d2h_reg, D2H_VALDN_RESULT_SHIFT)) {
DHD_ERROR(("%s: TRX img validation check successful\n",
- __FUNCTION__));
+ __FUNCTION__));
} else {
DHD_ERROR(("%s: TRX img validation check failed\n", __FUNCTION__));
bcmerror = BCME_ERROR;
@@ -6710,6 +7835,7 @@
sbpcieregs_t *pcieregs = NULL;
uint32 reg_val = 0;
int32 idx = 0;
+ int print_interval = D2H_READY_WD_RESET_COUNT / 10;
if (sih && osh) {
pcieregs = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
@@ -6720,11 +7846,16 @@
for (idx = D2H_READY_WD_RESET_COUNT; idx > 0; idx--) {
+#ifdef BCMQT
+ OSL_SLEEP(D2H_READY_WD_RESET_US/1000);
+#else
OSL_DELAY(D2H_READY_WD_RESET_US);
- if (!(idx % D2H_READY_WD_RESET_DBG_PRINT_US)) {
- DHD_ERROR(("Waiting for D2H_READY %d\n",
- idx/D2H_READY_WD_RESET_DBG_PRINT_US));
+#endif // endif
+ if (!(idx % print_interval)) {
+ DHD_ERROR(("Waiting %d us for D2H_READY\n",
+ idx * D2H_READY_WD_RESET_US));
}
+
dhdpcie_handshake_msg_reg_read(sih, osh, &pcieregs->u1.dar_64.d2h_msg_reg0,
®_val);
if (isset(®_val, D2H_READY_SHIFT)) {
@@ -6753,6 +7884,7 @@
sbpcieregs_t *pcieregs = NULL;
uint32 reg_val = 0;
int32 idx = 0;
+ int print_interval = D2H_READY_WD_RESET_COUNT / 10;
if (osh && regva) {
@@ -6764,10 +7896,14 @@
/* Host waits for bootloader to be ready before ChipId access */
for (idx = D2H_READY_WD_RESET_COUNT; idx > 0; idx--) {
+#ifdef BCMQT
+ OSL_SLEEP(D2H_READY_WD_RESET_US/1000);
+#else
OSL_DELAY(D2H_READY_WD_RESET_US);
- if (!(idx % D2H_READY_WD_RESET_DBG_PRINT_US)) {
- DHD_ERROR(("Waiting for D2H_READY %d\n",
- idx/D2H_READY_WD_RESET_DBG_PRINT_US));
+#endif // endif
+ if (!(idx % print_interval)) {
+ DHD_ERROR(("Waiting %d us for D2H_READY\n",
+ idx * D2H_READY_WD_RESET_US));
}
reg_val = R_REG(osh, &pcieregs->u1.dar_64.d2h_msg_reg0);
if (isset(®_val, D2H_READY_SHIFT)) {
@@ -6793,8 +7929,8 @@
uint h2d_reg = 0x00000000;
/* Set NVRAM done bit (Download done is already set) */
- setbit(&h2d_reg, (H2D_DL_DONE_SHIFT));
- setbit(&h2d_reg, (H2D_DL_NVRAM_DONE_SHIFT));
+ setbit(&h2d_reg, H2D_DL_DONE_SHIFT);
+ setbit(&h2d_reg, H2D_DL_NVRAM_DONE_SHIFT);
bcmerror = dhdpcie_handshake_msg_reg_write(bus->sih, bus->osh, addr->h2d, &h2d_reg);
return bcmerror;
@@ -7124,52 +8260,95 @@
void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
{
+ dhd_bus_t *bus;
+ uint64 current_time = OSL_LOCALTIME_NS();
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ bus = dhd->bus;
+ if (!bus) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
"isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
- "dpc_return_busdown_count=%lu\n",
- dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
- dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count,
- dhd->bus->dpc_return_busdown_count);
+ "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
+ bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
+ bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
+ bus->dpc_return_busdown_count, bus->non_ours_irq_count);
#ifdef BCMPCIE_OOB_HOST_WAKE
bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu"
- " oob_intr_disable_count=%lu\n oob_irq_num=%d last_oob_irq_time=%llu\n",
- dhd->bus->oob_intr_count, dhd->bus->oob_intr_enable_count,
- dhd->bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(dhd->bus),
- dhd->bus->last_oob_irq_time);
+ " oob_intr_disable_count=%lu\noob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT
+ " last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT
+ " oob_irq_enabled=%d oob_gpio_level=%d\n",
+ bus->oob_intr_count, bus->oob_intr_enable_count,
+ bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus),
+ GET_SEC_USEC(bus->last_oob_irq_time), GET_SEC_USEC(bus->last_oob_irq_enable_time),
+ GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus),
+ dhdpcie_get_oob_irq_level());
#endif /* BCMPCIE_OOB_HOST_WAKE */
- bcm_bprintf(strbuf, "\ncurrent_time=%llu isr_entry_time=%llu isr_exit_time=%llu\n"
- "dpc_entry_time=%llu last_process_ctrlbuf_time=%llu "
- "last_process_flowring_time=%llu last_process_txcpl_time=%llu\n"
- "last_process_rxcpl_time=%llu last_process_infocpl_time=%llu "
- "dpc_exit_time=%llu resched_dpc_time=%llu\n",
- OSL_SYSUPTIME_US(), dhd->bus->isr_entry_time, dhd->bus->isr_exit_time,
- dhd->bus->dpc_entry_time, dhd->bus->last_process_ctrlbuf_time,
- dhd->bus->last_process_flowring_time, dhd->bus->last_process_txcpl_time,
- dhd->bus->last_process_rxcpl_time, dhd->bus->last_process_infocpl_time,
- dhd->bus->dpc_exit_time, dhd->bus->resched_dpc_time);
- bcm_bprintf(strbuf, "\nlast_suspend_start_time=%llu last_suspend_end_time=%llu"
- " last_resume_start_time=%llu last_resume_end_time=%llu\n",
- dhd->bus->last_suspend_start_time, dhd->bus->last_suspend_end_time,
- dhd->bus->last_resume_start_time, dhd->bus->last_resume_end_time);
+ bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT
+ " isr_exit_time="SEC_USEC_FMT"\ndpc_sched_time="SEC_USEC_FMT
+ " last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n"
+ "last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT
+ " last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT
+ " last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT
+ "\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n"
+ "last_d3_inform_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
+ GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->dpc_sched_time),
+ GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time),
+ GET_SEC_USEC(bus->last_process_ctrlbuf_time),
+ GET_SEC_USEC(bus->last_process_flowring_time),
+ GET_SEC_USEC(bus->last_process_txcpl_time),
+ GET_SEC_USEC(bus->last_process_rxcpl_time),
+ GET_SEC_USEC(bus->last_process_infocpl_time),
+ GET_SEC_USEC(bus->last_process_edl_time),
+ GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
+ GET_SEC_USEC(bus->last_d3_inform_time));
+
+ bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time="
+ SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time="
+ SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time),
+ GET_SEC_USEC(bus->last_suspend_end_time),
+ GET_SEC_USEC(bus->last_resume_start_time),
+ GET_SEC_USEC(bus->last_resume_end_time));
+
+#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
+ bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT
+ " logtrace_thread_sem_down_time="SEC_USEC_FMT
+ "\nlogtrace_thread_flush_time="SEC_USEC_FMT
+ " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
+ "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time));
+#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
}
void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
{
uint32 intstatus = 0;
uint32 intmask = 0;
- uint32 mbintstatus = 0;
+ uint32 d2h_db0 = 0;
uint32 d2h_mb_data = 0;
intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
dhd->bus->pcie_mailbox_int, 0, 0);
intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
dhd->bus->pcie_mailbox_mask, 0, 0);
- mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
+ d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
- bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n",
- intstatus, intmask, mbintstatus);
+ bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
+ intstatus, intmask, d2h_db0);
bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
d2h_mb_data, dhd->bus->def_intmask);
}
@@ -7180,14 +8359,18 @@
int ix = 0;
flow_ring_node_t *flow_ring_node;
flow_info_t *flow_info;
+#ifdef TX_STATUS_LATENCY_STATS
uint8 ifindex;
if_flow_lkup_t *if_flow_lkup;
dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS];
+#endif /* TX_STATUS_LATENCY_STATS */
if (dhdp->busstate != DHD_BUS_DATA)
return;
+#ifdef TX_STATUS_LATENCY_STATS
memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency));
+#endif /* TX_STATUS_LATENCY_STATS */
#ifdef DHD_WAKE_STATUS
bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
@@ -7218,15 +8401,21 @@
bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+ bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
+ dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings);
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
bcm_bprintf(strbuf,
- "%s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
+ "%4s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
"Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
- "Overflows", "RD", "WR");
+ " Overflows", " RD", " WR");
+#ifdef TX_STATUS_LATENCY_STATS
/* Average Tx status/Completion Latency in micro secs */
- bcm_bprintf(strbuf, "%12s", "AvgTxCmpL_Us ");
+ bcm_bprintf(strbuf, "%16s %16s ", " NumTxPkts", " AvgTxCmpL_Us");
+#endif /* TX_STATUS_LATENCY_STATS */
- bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack");
+ bcm_bprintf(strbuf, "\n");
for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
@@ -7235,7 +8424,7 @@
flow_info = &flow_ring_node->flow_info;
bcm_bprintf(strbuf,
- "%3d. %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++,
+ "%4d %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++,
flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
MAC2STRDBG(flow_info->da),
DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
@@ -7245,9 +8434,12 @@
dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
"%4d %4d ");
- bcm_bprintf(strbuf, "%12d ",
+#ifdef TX_STATUS_LATENCY_STATS
+ bcm_bprintf(strbuf, "%16d %16d ",
+ flow_info->num_tx_pkts,
flow_info->num_tx_status ?
- div64_u64(flow_info->cum_tx_status_latency, flow_info->num_tx_status) : 0);
+ DIV_U64_BY_U64(flow_info->cum_tx_status_latency,
+ flow_info->num_tx_status) : 0);
ifindex = flow_info->ifindex;
ASSERT(ifindex < DHD_MAX_IFS);
@@ -7259,11 +8451,12 @@
DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
__FUNCTION__, ifindex, flowid));
}
- bcm_bprintf(strbuf,
- "%5s %6s %5s\n", "NA", "NA", "NA");
+#endif /* TX_STATUS_LATENCY_STATS */
+ bcm_bprintf(strbuf, "\n");
}
- bcm_bprintf(strbuf, "%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStats");
+#ifdef TX_STATUS_LATENCY_STATS
+ bcm_bprintf(strbuf, "\n%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus");
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
for (ix = 0; ix < DHD_MAX_IFS; ix++) {
if (!if_flow_lkup[ix].status) {
@@ -7272,10 +8465,51 @@
bcm_bprintf(strbuf, "%2d %16d %16d\n",
ix,
if_tx_status_latency[ix].num_tx_status ?
- div64_u64(if_tx_status_latency[ix].cum_tx_status_latency,
+ DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency,
if_tx_status_latency[ix].num_tx_status): 0,
if_tx_status_latency[ix].num_tx_status);
}
+#endif /* TX_STATUS_LATENCY_STATS */
+
+#ifdef DHD_HP2P
+ if (dhdp->hp2p_capable) {
+ bcm_bprintf(strbuf, "\n%s %16s %16s", "Flowid", "Tx_t0", "Tx_t1");
+
+ for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) {
+ hp2p_info_t *hp2p_info;
+ int bin;
+
+ hp2p_info = &dhdp->hp2p_info[flowid];
+ if (hp2p_info->num_timer_start == 0)
+ continue;
+
+ bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
+ bcm_bprintf(strbuf, "\n%s", "Bin");
+
+ for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) {
+ bcm_bprintf(strbuf, "\n%2d %20d %16d", bin,
+ hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]);
+ }
+
+ bcm_bprintf(strbuf, "\n%s %16s", "Flowid", "Rx_t0");
+ bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
+ bcm_bprintf(strbuf, "\n%s", "Bin");
+
+ for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) {
+ bcm_bprintf(strbuf, "\n%d %20d", bin,
+ hp2p_info->rx_t0[bin]);
+ }
+
+ bcm_bprintf(strbuf, "\n%s %16s %16s",
+ "Packet limit", "Timer limit", "Timer start");
+ bcm_bprintf(strbuf, "\n%d %24d %16d", hp2p_info->num_pkt_limit,
+ hp2p_info->num_timer_limit, hp2p_info->num_timer_start);
+ }
+
+ bcm_bprintf(strbuf, "\n");
+ }
+#endif /* DHD_HP2P */
+
bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
@@ -7286,6 +8520,158 @@
dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX");
}
+#ifdef DNGL_AXI_ERROR_LOGGING
+bool
+dhd_axi_sig_match(dhd_pub_t *dhdp)
+{
+ uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr);
+
+ if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) {
+ DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n",
+ __FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base,
+ dhdp->bus->dongle_ram_base + dhdp->bus->ramsize));
+ if (axi_tcm_addr >= dhdp->bus->dongle_ram_base &&
+ axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) {
+ uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr +
+ OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature)));
+ if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) {
+ return TRUE;
+ } else {
+ DHD_ERROR(("%s: No AXI signature: 0x%x\n",
+ __FUNCTION__, axi_signature));
+ return FALSE;
+ }
+ } else {
+ DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__));
+ return FALSE;
+ }
+}
+
+void
+dhd_axi_error(dhd_pub_t *dhdp)
+{
+ dhd_axi_error_dump_t *axi_err_dump;
+ uint8 *axi_err_buf = NULL;
+ uint8 *p_axi_err = NULL;
+ uint32 axi_logbuf_addr;
+ uint32 axi_tcm_addr;
+ int err, size;
+
+ OSL_DELAY(75000);
+
+ axi_logbuf_addr = dhdp->axierror_logbuf_addr;
+ if (!axi_logbuf_addr) {
+ DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__));
+ goto sched_axi;
+ }
+
+ axi_err_dump = dhdp->axi_err_dump;
+ if (!axi_err_dump) {
+ goto sched_axi;
+ }
+
+ if (!dhd_axi_sig_match(dhdp)) {
+ goto sched_axi;
+ }
+
+ /* Reading AXI error data for SMMU fault */
+ DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__));
+ axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr);
+ size = sizeof(hnd_ext_trap_axi_error_v1_t);
+ axi_err_buf = MALLOCZ(dhdp->osh, size);
+ if (axi_err_buf == NULL) {
+ DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
+ goto sched_axi;
+ }
+
+ p_axi_err = axi_err_buf;
+ err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size);
+ if (err) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, err, size, axi_tcm_addr));
+ goto sched_axi;
+ }
+
+ /* Dump data to Dmesg */
+ dhd_log_dump_axi_error(axi_err_buf);
+ err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size);
+ if (err) {
+ DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n",
+ __FUNCTION__, err));
+ }
+
+sched_axi:
+ if (axi_err_buf) {
+ MFREE(dhdp->osh, axi_err_buf, size);
+ }
+ dhd_schedule_axi_error_dump(dhdp, NULL);
+}
+
+static void
+dhd_log_dump_axi_error(uint8 *axi_err)
+{
+ dma_dentry_v1_t dma_dentry;
+ dma_fifo_v1_t dma_fifo;
+ int i = 0, j = 0;
+
+ if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) {
+ hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err;
+ DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature));
+ DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version));
+ DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length));
+ DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n",
+ __FUNCTION__, axi_err_v1->dma_fifo_valid_count));
+ DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_status));
+ DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_core));
+ DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_hi));
+ DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_lo));
+ DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_id));
+
+ for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) {
+ dma_fifo = axi_err_v1->dma_fifo[i];
+ DHD_ERROR(("%s: valid:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.valid));
+ DHD_ERROR(("%s: direction:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.direction));
+ DHD_ERROR(("%s: index:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.index));
+ DHD_ERROR(("%s: dpa:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.dpa));
+ DHD_ERROR(("%s: desc_lo:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.desc_lo));
+ DHD_ERROR(("%s: desc_hi:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.desc_hi));
+ DHD_ERROR(("%s: din:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.din));
+ DHD_ERROR(("%s: dout:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.dout));
+ for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) {
+ dma_dentry = axi_err_v1->dma_fifo[i].dentry[j];
+ DHD_ERROR(("%s: ctrl1:%d : 0x%x\n",
+ __FUNCTION__, i, dma_dentry.ctrl1));
+ DHD_ERROR(("%s: ctrl2:%d : 0x%x\n",
+ __FUNCTION__, i, dma_dentry.ctrl2));
+ DHD_ERROR(("%s: addrlo:%d : 0x%x\n",
+ __FUNCTION__, i, dma_dentry.addrlo));
+ DHD_ERROR(("%s: addrhi:%d : 0x%x\n",
+ __FUNCTION__, i, dma_dentry.addrhi));
+ }
+ }
+ }
+ else {
+ DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err)));
+ }
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
/**
* Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
* flow queue to their flow ring.
@@ -7298,6 +8684,10 @@
flow_ring_node_t *flow_ring_node;
struct dhd_bus *bus = dhd->bus;
+ if (dhd_query_bus_erros(dhd)) {
+ return;
+ }
+
/* Hold flowring_list_lock to ensure no race condition while accessing the List */
DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
for (item = dll_head_p(&bus->flowring_active_list);
@@ -7368,6 +8758,13 @@
__FUNCTION__, bus->bus_low_power_state));
return;
}
+
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
(bus->sih->buscorerev == 4)) {
si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
@@ -7402,6 +8799,13 @@
__FUNCTION__, bus->bus_low_power_state));
return;
}
+
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
DHD_INFO(("writing a door bell 2 to the device\n"));
if (DAR_PWRREQ(bus)) {
dhd_bus_pcie_pwr_req(bus);
@@ -7420,9 +8824,27 @@
return;
}
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
if (DAR_PWRREQ(bus)) {
dhd_bus_pcie_pwr_req(bus);
}
+
+#ifdef DHD_DB0TS
+ if (bus->dhd->db0ts_capable) {
+ uint64 ts;
+
+ ts = local_clock();
+ do_div(ts, 1000);
+
+ value = htol32(ts & 0xFFFFFFFF);
+ DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value));
+ }
+#endif /* DHD_DB0TS */
W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
}
@@ -7436,6 +8858,12 @@
return;
}
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
if (DAR_PWRREQ(bus)) {
dhd_bus_pcie_pwr_req(bus);
}
@@ -7452,6 +8880,13 @@
__FUNCTION__, bus->bus_low_power_state));
return;
}
+
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
}
@@ -7498,7 +8933,7 @@
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- bus->dpc_entry_time = OSL_SYSUPTIME_US();
+ bus->dpc_entry_time = OSL_LOCALTIME_NS();
DHD_GENERAL_LOCK(bus->dhd, flags);
/* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
@@ -7527,9 +8962,9 @@
* which has been disabled in the dhdpcie_bus_isr()
*/
dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
- bus->dpc_exit_time = OSL_SYSUPTIME_US();
+ bus->dpc_exit_time = OSL_LOCALTIME_NS();
} else {
- bus->resched_dpc_time = OSL_SYSUPTIME_US();
+ bus->resched_dpc_time = OSL_LOCALTIME_NS();
}
bus->dpc_sched = resched;
@@ -7592,6 +9027,7 @@
done:
if (h2d_mb_data == H2D_HOST_D3_INFORM) {
DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
+ bus->last_d3_inform_time = OSL_LOCALTIME_NS();
bus->d3_inform_cnt++;
}
if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
@@ -7624,11 +9060,21 @@
dhdpcie_bus_clear_intstatus(bus);
dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
- /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
- bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
+ if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
+ /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
+ bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
+ DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__));
+ }
DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
- bus->wait_for_d3_ack = 1;
- dhd_os_d3ack_wake(bus->dhd);
+ /* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout.
+ * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout.
+ */
+ if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
+ bus->wait_for_d3_ack = 1;
+ dhd_os_d3ack_wake(bus->dhd);
+ } else {
+ DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__));
+ }
}
void
dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
@@ -7639,7 +9085,7 @@
DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
- if (d2h_mb_data & D2H_DEV_FWHALT) {
+ if (d2h_mb_data & D2H_DEV_FWHALT) {
DHD_ERROR(("FW trap has happened\n"));
dhdpcie_checkdied(bus, NULL, 0);
#ifdef OEM_ANDROID
@@ -7657,6 +9103,7 @@
BCM_REFERENCE(ds_acked);
if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
goto exit;
}
@@ -7789,6 +9236,7 @@
dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
{
bool resched = FALSE;
+ unsigned long flags_bus;
if (MULTIBP_ENAB(bus->sih)) {
dhd_bus_pcie_pwr_req(bus);
@@ -7805,12 +9253,15 @@
if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
bus->api.handle_mb_data(bus);
- if ((bus->dhd->busstate == DHD_BUS_SUSPEND) || (bus->use_mailbox &&
- (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE))) {
- DHD_ERROR(("%s: Bus is in power save state. "
+ /* Do no process any rings after recieving D3_ACK */
+ DHD_BUS_LOCK(bus->bus_lock, flags_bus);
+ if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
+ DHD_ERROR(("%s: D3 Ack Recieved. "
"Skip processing rest of ring buffers.\n", __FUNCTION__));
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
goto exit;
}
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
/* Validate intstatus only for INTX case */
if ((bus->d2h_intr_method == PCIE_MSI) ||
@@ -7834,14 +9285,47 @@
return resched;
}
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+static void
+dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus)
+{
+ unsigned long time_elapsed;
+
+ /* Poll for timeout value periodically */
+ if ((bus->dhd->busstate == DHD_BUS_DATA) &&
+ (bus->dhd->dhd_rte_time_sync_ms != 0) &&
+ (bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE)) {
+ time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count;
+ /* Compare time is milli seconds */
+ if ((time_elapsed / 1000) >= bus->dhd->dhd_rte_time_sync_ms) {
+ /*
+ * Its fine, if it has crossed the timeout value. No need to adjust the
+ * elapsed time
+ */
+ bus->dhd_rte_time_sync_count += time_elapsed;
+
+ /* Schedule deffered work. Work function will send IOVAR. */
+ dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd);
+ }
+ }
+}
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+
static bool
dhdpci_bus_read_frames(dhd_bus_t *bus)
{
bool more = FALSE;
+ unsigned long flags_bus;
/* First check if there a FW trap */
if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
(bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
+#ifdef DNGL_AXI_ERROR_LOGGING
+ if (bus->dhd->axi_error) {
+ DHD_ERROR(("AXI Error happened\n"));
+ return FALSE;
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
return FALSE;
}
@@ -7850,37 +9334,58 @@
DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
dhd_prot_process_ctrlbuf(bus->dhd);
- bus->last_process_ctrlbuf_time = OSL_SYSUPTIME_US();
+ bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
/* Unlock to give chance for resp to be handled */
DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
- /* Do not process rest of ring buf once bus enters low power state */
- if (!bus->use_mailbox && (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE)) {
- DHD_ERROR(("%s: Bus is in power save state. "
- "Skip processing rest of ring buffers.\n", __FUNCTION__));
+ /* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */
+ DHD_BUS_LOCK(bus->bus_lock, flags_bus);
+ if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
+ DHD_ERROR(("%s: Bus is in power save state (%d). "
+ "Skip processing rest of ring buffers.\n",
+ __FUNCTION__, bus->bus_low_power_state));
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
return FALSE;
}
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
/* update the flow ring cpls */
dhd_update_txflowrings(bus->dhd);
- bus->last_process_flowring_time = OSL_SYSUPTIME_US();
+ bus->last_process_flowring_time = OSL_LOCALTIME_NS();
/* With heavy TX traffic, we could get a lot of TxStatus
* so add bound
*/
- more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound);
- bus->last_process_txcpl_time = OSL_SYSUPTIME_US();
+#ifdef DHD_HP2P
+ more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING);
+#endif /* DHD_HP2P */
+ more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING);
+ bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
/* With heavy RX traffic, this routine potentially could spend some time
* processing RX frames without RX bound
*/
- more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
- bus->last_process_rxcpl_time = OSL_SYSUPTIME_US();
+#ifdef DHD_HP2P
+ more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING);
+#endif /* DHD_HP2P */
+ more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING);
+ bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
/* Process info ring completion messages */
- more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
- bus->last_process_infocpl_time = OSL_SYSUPTIME_US();
+#ifdef EWP_EDL
+ if (!bus->dhd->dongle_edl_support)
+#endif // endif
+ {
+ more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
+ bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
+ }
+#ifdef EWP_EDL
+ else {
+ more |= dhd_prot_process_msgbuf_edl(bus->dhd);
+ bus->last_process_edl_time = OSL_LOCALTIME_NS();
+ }
+#endif /* EWP_EDL */
#ifdef IDLE_TX_FLOW_MGMT
if (bus->enable_idle_flowring_mgmt) {
@@ -7910,19 +9415,23 @@
DHD_OS_WAKE_UNLOCK(bus->dhd);
}
#endif /* DHD_FW_COREDUMP */
- bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
- dhd_os_send_hang_message(bus->dhd);
} else {
DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
#ifdef CONFIG_ARCH_MSM
bus->no_cfg_restore = 1;
#endif /* CONFIG_ARCH_MSM */
bus->is_linkdown = 1;
- bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
- dhd_os_send_hang_message(bus->dhd);
}
+
+ dhd_prot_debug_info_print(bus->dhd);
+ bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
+ dhd_os_send_hang_message(bus->dhd);
+ more = FALSE;
}
#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+ dhdpci_bus_rte_log_time_sync_poll(bus);
+#endif /* DHD_H2D_LOG_TIME_SYNC */
return more;
}
@@ -8059,17 +9568,32 @@
addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
}
+ if (addr == (uint32)-1) {
+ DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ bus->is_linkdown = 1;
+ return BCME_ERROR;
+ }
+
if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
(addr > shaddr)) {
DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
__FUNCTION__, addr));
DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
#ifdef DEBUG_DNGL_INIT_FAIL
+ if (addr != (uint32)-1) { /* skip further PCIE reads if read this addr */
#ifdef CUSTOMER_HW4_DEBUG
- bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
+ bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
#endif /* CUSTOMER_HW4_DEBUG */
- bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
- dhdpcie_mem_dump(bus);
+ if (bus->dhd->memdump_enabled) {
+ bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
+ dhdpcie_mem_dump(bus);
+ }
+ }
#endif /* DEBUG_DNGL_INIT_FAIL */
return BCME_ERROR;
} else {
@@ -8137,6 +9661,9 @@
}
}
+ /* TODO: This need to be selected based on IPC instead of compile time */
+ bus->dhd->hwa_enable = TRUE;
+
if (idma_en) {
bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
@@ -8188,6 +9715,13 @@
{
ring_info_t ring_info;
+ /* boundary check */
+ if (sh->rings_info_ptr > shaddr) {
+ DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
+ __FUNCTION__, sh->rings_info_ptr));
+ return BCME_ERROR;
+ }
+
if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
(uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
return rv;
@@ -8298,18 +9832,68 @@
if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0)
bus->dhd->pcie_txs_metadata_enable = 0;
-#ifdef D2H_MINIDUMP
- bus->d2h_minidump = (sh->flags2 & PCIE_SHARED2_FW_SMALL_MEMDUMP) ? TRUE : FALSE;
- DHD_ERROR(("FW supports minidump ? %s \n", bus->d2h_minidump ? "Y" : "N"));
- if (bus->d2h_minidump_override) {
- bus->d2h_minidump = FALSE;
+ bus->dhd->hscb_enable =
+ (sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB;
+
+#ifdef EWP_EDL
+ if (host_edl_support) {
+ bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE;
+ DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support));
}
- DHD_ERROR(("d2h_minidump: %d d2h_minidump_override: %d\n",
- bus->d2h_minidump, bus->d2h_minidump_override));
-#endif /* D2H_MINIDUMP */
+#endif /* EWP_EDL */
+
+ bus->dhd->debug_buf_dest_support =
+ (sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE;
+ DHD_ERROR(("FW supports debug buf dest ? %s \n",
+ bus->dhd->debug_buf_dest_support ? "Y" : "N"));
+
+#ifdef DHD_HP2P
+ if (bus->dhd->hp2p_enable) {
+ bus->dhd->hp2p_ts_capable =
+ (sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) == PCIE_SHARED2_PKT_TIMESTAMP;
+ bus->dhd->hp2p_capable =
+ (sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P;
+ bus->dhd->hp2p_capable &= bus->dhd->hp2p_ts_capable;
+
+ DHD_ERROR(("FW supports HP2P ? %s \n",
+ bus->dhd->hp2p_capable ? "Y" : "N"));
+
+ if (bus->dhd->hp2p_capable) {
+ bus->dhd->pkt_thresh = HP2P_PKT_THRESH;
+ bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY;
+ bus->dhd->time_thresh = HP2P_TIME_THRESH;
+ for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) {
+ hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr];
+
+ hp2p_info->hrtimer_init = FALSE;
+ hp2p_info->timer.function = &dhd_hp2p_write;
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21))
+ tasklet_hrtimer_init(&hp2p_info->timer,
+ dhd_hp2p_write, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+#else
+ hrtimer_init(&hp2p_info->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_SOFT);
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21 */
+ }
+ }
+ }
+#endif /* DHD_HP2P */
+
+#ifdef DHD_DB0TS
+ bus->dhd->db0ts_capable =
+ (sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0;
+#endif /* DHD_DB0TS */
if (MULTIBP_ENAB(bus->sih)) {
dhd_bus_pcie_pwr_req_clear(bus);
+
+ /*
+ * WAR to fix ARM cold boot;
+ * De-assert WL domain in DAR
+ */
+ if (bus->sih->buscorerev >= 68) {
+ dhd_bus_pcie_pwr_req_wl_domain(bus, FALSE);
+ }
}
return BCME_OK;
} /* dhdpcie_readshared */
@@ -8425,10 +10009,17 @@
if (!bus->dhd)
return 0;
+ if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
+ dhd_bus_pcie_pwr_req_clear_reload_war(bus);
+ }
+
if (MULTIBP_ENAB(bus->sih)) {
dhd_bus_pcie_pwr_req(bus);
}
+ /* Configure AER registers to log the TLP header */
+ dhd_bus_aer_config(bus);
+
/* Make sure we're talking to the core. */
bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
ASSERT(bus->reg != NULL);
@@ -8456,6 +10047,8 @@
dhd_init_bus_lock(bus);
+ dhd_init_backplane_access_lock(bus);
+
/* Set bus state according to enable result */
dhdp->busstate = DHD_BUS_DATA;
bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
@@ -8502,6 +10095,7 @@
{
uint32 addr = 0;
uint32 val = 0;
+
addr = bus->dongle_ram_base + bus->ramsize - 4;
#ifdef DHD_PCIE_RUNTIMEPM
dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
@@ -8512,7 +10106,11 @@
bool
dhdpcie_chipmatch(uint16 vendor, uint16 device)
{
- if (vendor != PCI_VENDOR_ID_BROADCOM && vendor != PCI_VENDOR_ID_CYPRESS) {
+
+ if (vendor == PCI_VENDOR_ID_BROADCOM || vendor == PCI_VENDOR_ID_CYPRESS) {
+ DHD_ERROR(("%s: Supporting vendor %x device %x\n", __FUNCTION__,
+ vendor, device));
+ } else {
DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
vendor, device));
return (-ENODEV);
@@ -8603,6 +10201,14 @@
(device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID)) {
return 0;
}
+ if ((device == BCM4362_D11AX_ID) || (device == BCM4362_D11AX2G_ID) ||
+ (device == BCM4362_D11AX5G_ID) || (device == BCM4362_CHIP_ID)) {
+ return 0;
+ }
+ if ((device == BCM43751_D11AX_ID) || (device == BCM43751_D11AX2G_ID) ||
+ (device == BCM43751_D11AX5G_ID) || (device == BCM43751_CHIP_ID)) {
+ return 0;
+ }
if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
(device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) {
return 0;
@@ -8624,11 +10230,42 @@
return 0;
}
+ if ((device == BCM4373_D11AC_ID) || (device == BCM4373_D11AC2G_ID) ||
+ (device == BCM4373_D11AC5G_ID) || (device == BCM4373_CHIP_ID)) {
+ return 0;
+ }
+
if ((device == BCM4375_D11AX_ID) || (device == BCM4375_D11AX2G_ID) ||
(device == BCM4375_D11AX5G_ID) || (device == BCM4375_CHIP_ID)) {
return 0;
}
+#ifdef CHIPS_CUSTOMER_HW6
+ if ((device == BCM4376_D11AC_ID) || (device == BCM4376_D11AC2G_ID) ||
+ (device == BCM4376_D11AC5G_ID) || (device == BCM4376_CHIP_ID)) {
+ return 0;
+ }
+ if ((device == BCM4377_M_D11AX_ID) || (device == BCM4377_D11AX_ID) ||
+ (device == BCM4377_D11AX2G_ID) || (device == BCM4377_D11AX5G_ID) ||
+ (device == BCM4377_CHIP_ID)) {
+ return 0;
+ }
+ if ((device == BCM4378_D11AC_ID) || (device == BCM4378_D11AC2G_ID) ||
+ (device == BCM4378_D11AC5G_ID) || (device == BCM4378_CHIP_ID)) {
+ return 0;
+ }
+#endif /* CHIPS_CUSTOMER_HW6 */
+#ifdef CHIPS_CUSTOMER_HW6
+ if ((device == BCM4368_D11AC_ID) || (device == BCM4368_D11AC2G_ID) ||
+ (device == BCM4368_D11AC5G_ID) || (device == BCM4368_CHIP_ID)) {
+ return 0;
+ }
+ if ((device == BCM4367_D11AC_ID) || (device == BCM4367_D11AC2G_ID) ||
+ (device == BCM4367_D11AC5G_ID) || (device == BCM4367_CHIP_ID)) {
+ return 0;
+ }
+#endif /* CHIPS_CUSTOMER_HW6 */
+
/* CYW55560 */
if (device == CYW55560_WLAN_ID) {
return 0;
@@ -8637,6 +10274,217 @@
return (-ENODEV);
} /* dhdpcie_chipmatch */
+/*
+ * Name: dhdpcie_sromotp_customvar
+ * Description:
+ * read otp/sprom and parse & store customvar.
+ * A shadow of OTP/SPROM exists in ChipCommon Region
+ * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
+ * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
+ * can also be read from ChipCommon Registers.
+ */
+static int
+dhdpcie_sromotp_customvar(dhd_bus_t *bus, uint32 *customvar1, uint32 *customvar2)
+{
+ uint16 dump_offset = 0;
+ uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
+ /* Table for 65nm OTP Size (in bits) */
+ int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
+ volatile uint16 *nvm_shadow;
+ uint cur_coreid;
+ uint chipc_corerev;
+ chipcregs_t *chipcregs;
+ uint16 *otp_dump;
+ uint8 *cis;
+ uint8 tup, tlen;
+ int i = 0;
+
+ /* Save the current core */
+ cur_coreid = si_coreid(bus->sih);
+ /* Switch to ChipC */
+ chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
+ ASSERT(chipcregs != NULL);
+ chipc_corerev = si_corerev(bus->sih);
+ /* Check ChipcommonCore Rev */
+ if (chipc_corerev < 44) {
+ DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
+ return BCME_UNSUPPORTED;
+ }
+ /* Check ChipID */
+ if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
+ ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
+ ((uint16)bus->sih->chip != BCM4359_CHIP_ID) &&
+ ((uint16)bus->sih->chip != BCM4349_CHIP_ID)) {
+ DHD_ERROR(("%s: supported for chips"
+ "4350/4345/4355/4364/4349/4359 only\n", __FUNCTION__));
+ return BCME_UNSUPPORTED;
+ }
+ /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
+ if (chipcregs->sromcontrol & SRC_PRESENT) {
+ /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
+ sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
+ >> SRC_SIZE_SHIFT))) * 1024;
+ DHD_TRACE(("\nSPROM Present (Size %d bits)\n", sprom_size));
+ }
+ if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
+ DHD_TRACE(("\nOTP Present"));
+ if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
+ == OTPL_WRAP_TYPE_40NM) {
+ /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
+ /* Chipcommon rev51 is a variation on rev45 and does not support
+ * the latest OTP configuration.
+ */
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
+ otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
+ >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
+ DHD_TRACE(("(Size %d bits)\n", otp_size));
+ } else {
+ otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE)
+ >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
+ DHD_TRACE(("(Size %d bits)\n", otp_size));
+ }
+ } else {
+ /* This part is untested since newer chips have 40nm OTP */
+ /* Chipcommon rev51 is a variation on rev45 and does not support
+ * the latest OTP configuration.
+ */
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
+ otp_size = otp_size_65nm[(chipcregs->otplayout &
+ OTPL_ROW_SIZE_MASK) >> OTPL_ROW_SIZE_SHIFT];
+ DHD_TRACE(("(Size %d bits)\n", otp_size));
+ } else {
+ otp_size = otp_size_65nm[(chipcregs->capabilities &
+ CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT];
+ DHD_TRACE(("(Size %d bits)\n", otp_size));
+ DHD_TRACE(("%s: 65nm/130nm OTP Size not tested. \n",
+ __FUNCTION__));
+ }
+ }
+ }
+ /* Chipcommon rev51 is a variation on rev45 and does not support
+ * the latest OTP configuration.
+ */
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
+ if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
+ ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
+ DHD_ERROR(("%s: SPROM and OTP could not be found "
+ "sromcontrol = %x, otplayout = %x \n",
+ __FUNCTION__, chipcregs->sromcontrol,
+ chipcregs->otplayout));
+ return BCME_NOTFOUND;
+ }
+ } else {
+ if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
+ ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
+ DHD_ERROR(("%s: SPROM and OTP could not be found "
+ "sromcontrol = %x, capablities = %x \n",
+ __FUNCTION__, chipcregs->sromcontrol,
+ chipcregs->capabilities));
+ return BCME_NOTFOUND;
+ }
+ }
+ /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
+ if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
+ (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
+ DHD_TRACE(("OTP Strap selected.\n"
+ "\nOTP Shadow in ChipCommon:\n"));
+ dump_size = otp_size / 16 ; /* 16bit words */
+ } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
+ (chipcregs->sromcontrol & SRC_PRESENT)) {
+ DHD_TRACE(("SPROM Strap selected\n"
+ "\nSPROM Shadow in ChipCommon:\n"));
+ /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
+ /* dump_size in 16bit words */
+ dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
+ } else {
+ DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
+ __FUNCTION__));
+ return BCME_NOTFOUND;
+ }
+ if (bus->regs == NULL) {
+ DHD_ERROR(("ChipCommon Regs. not initialized\n"));
+ return BCME_NOTREADY;
+ } else {
+ /* Chipcommon rev51 is a variation on rev45 and does not support
+ * the latest OTP configuration.
+ */
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
+ /* Chip common can read only 8kbits,
+ * for ccrev >= 49 otp size is around 12 kbits so use GCI core
+ */
+ nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
+ } else {
+ /* Point to the SPROM/OTP shadow in ChipCommon */
+ nvm_shadow = chipcregs->sromotp;
+ }
+ if (nvm_shadow == NULL) {
+ DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
+ return BCME_NOTFOUND;
+ }
+ otp_dump = kzalloc(dump_size*2, GFP_KERNEL);
+ if (otp_dump == NULL) {
+ DHD_ERROR(("%s: Insufficient system memory of size %d\n",
+ __FUNCTION__, dump_size));
+ return BCME_NOMEM;
+ }
+ /*
+ * Read 16 bits / iteration.
+ * dump_size & dump_offset in 16-bit words
+ */
+ while (dump_offset < dump_size) {
+ *(otp_dump + dump_offset) = *(nvm_shadow + dump_offset);
+ dump_offset += 0x1;
+ }
+ /* Read from cis tuple start address */
+ cis = (uint8 *)otp_dump + CISTPL_OFFSET;
+ /* parse value of customvar2 tuple */
+ do {
+ tup = cis[i++];
+ if (tup == CISTPL_NULL || tup == CISTPL_END)
+ tlen = 0;
+ else
+ tlen = cis[i++];
+ if ((i + tlen) >= dump_size*2)
+ break;
+ switch (tup) {
+ case CISTPL_BRCM_HNBU:
+ switch (cis[i]) {
+ case HNBU_CUSTOM1:
+ *customvar1 = ((cis[i + 4] << 24) +
+ (cis[i + 3] << 16) +
+ (cis[i + 2] << 8) +
+ cis[i + 1]);
+ DHD_TRACE(("%s : customvar1 [%x]\n",
+ __FUNCTION__, *customvar1));
+ break;
+ case HNBU_CUSTOM2:
+ *customvar2 = ((cis[i + 4] << 24) +
+ (cis[i + 3] << 16) +
+ (cis[i + 2] << 8) +
+ cis[i + 1]);
+ DHD_TRACE(("%s : customvar2 [%x]\n",
+ __FUNCTION__, *customvar2));
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ i += tlen;
+ } while (tup != 0xff);
+
+ if (otp_dump) {
+ kfree(otp_dump);
+ otp_dump = NULL;
+ }
+ }
+ /* Switch back to the original core */
+ si_setcore(bus->sih, cur_coreid, 0);
+ return BCME_OK;
+} /* dhdpcie_sromotp_customvar */
+
/**
* Name: dhdpcie_cc_nvmshadow
*
@@ -8835,6 +10683,13 @@
dhd_tcpack_info_tbl_clean(bus->dhd);
#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_HP2P
+ if (flow_ring_node->hp2p_ring) {
+ bus->dhd->hp2p_ring_active = FALSE;
+ flow_ring_node->hp2p_ring = FALSE;
+ }
+#endif /* DHD_HP2P */
+
/* clean up BUS level info */
DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
@@ -8892,8 +10747,26 @@
DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
+ /* Boundary check of the flowid */
+ if (flowid >= bus->dhd->num_flow_rings) {
+ DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
+ flowid, bus->dhd->num_flow_rings));
+ return;
+ }
+
flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ if (!flow_ring_node) {
+ DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
+ return;
+ }
+
ASSERT(flow_ring_node->flowid == flowid);
+ if (flow_ring_node->flowid != flowid) {
+ DHD_ERROR(("%s: flowid %d is different from the flowid "
+ "of the flow_ring_node %d\n", __FUNCTION__, flowid,
+ flow_ring_node->flowid));
+ return;
+ }
if (status != BCME_OK) {
DHD_ERROR(("%s Flow create Response failure error status = %d \n",
@@ -8981,8 +10854,26 @@
DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
+ /* Boundary check of the flowid */
+ if (flowid >= bus->dhd->num_flow_rings) {
+ DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
+ flowid, bus->dhd->num_flow_rings));
+ return;
+ }
+
flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ if (!flow_ring_node) {
+ DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
+ return;
+ }
+
ASSERT(flow_ring_node->flowid == flowid);
+ if (flow_ring_node->flowid != flowid) {
+ DHD_ERROR(("%s: flowid %d is different from the flowid "
+ "of the flow_ring_node %d\n", __FUNCTION__, flowid,
+ flow_ring_node->flowid));
+ return;
+ }
if (status != BCME_OK) {
DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
@@ -9046,8 +10937,26 @@
return;
}
+ /* Boundary check of the flowid */
+ if (flowid >= bus->dhd->num_flow_rings) {
+ DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
+ flowid, bus->dhd->num_flow_rings));
+ return;
+ }
+
flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ if (!flow_ring_node) {
+ DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
+ return;
+ }
+
ASSERT(flow_ring_node->flowid == flowid);
+ if (flow_ring_node->flowid != flowid) {
+ DHD_ERROR(("%s: flowid %d is different from the flowid "
+ "of the flow_ring_node %d\n", __FUNCTION__, flowid,
+ flow_ring_node->flowid));
+ return;
+ }
flow_ring_node->status = FLOW_RING_STATUS_OPEN;
return;
@@ -9072,6 +10981,18 @@
dhdp->bus->is_linkdown = val;
}
+int
+dhd_bus_get_linkdown(dhd_pub_t *dhdp)
+{
+ return dhdp->bus->is_linkdown;
+}
+
+int
+dhd_bus_get_cto(dhd_pub_t *dhdp)
+{
+ return dhdp->bus->cto_triggered;
+}
+
#ifdef IDLE_TX_FLOW_MGMT
/* resume request */
int
@@ -9324,41 +11245,76 @@
return 0;
}
-void
-dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
+int
+dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable)
{
uint32 val;
-
if (enable) {
dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_BACKPLANE_EN);
- if (bus->cto_threshold == 0) {
- bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
- }
-
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
- ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
- PCIE_CTO_TO_THRESHHOLD_MASK) |
- ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
- PCIE_CTO_CLKCHKCNT_MASK) |
- PCIE_CTO_ENAB_MASK);
} else {
dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_BACKPLANE_EN);
-
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
}
+ return 0;
}
-static void
+int
+dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
+{
+ if (bus->sih->buscorerev < 19) {
+ DHD_INFO(("%s: Unsupported CTO, buscorerev=%d\n",
+ __FUNCTION__, bus->sih->buscorerev));
+ return BCME_UNSUPPORTED;
+ }
+
+ if (bus->sih->buscorerev == 19) {
+ uint32 pcie_lnkst;
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
+
+ pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configdata), 0, 0);
+
+ if (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
+ PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1) {
+ return BCME_UNSUPPORTED;
+ }
+ }
+
+ bus->cto_enable = enable;
+
+ dhdpcie_cto_cfg_init(bus, enable);
+
+ if (enable) {
+ if (bus->cto_threshold == 0) {
+ bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
+ }
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
+ ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
+ PCIE_CTO_TO_THRESHHOLD_MASK) |
+ ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
+ PCIE_CTO_CLKCHKCNT_MASK) |
+ PCIE_CTO_ENAB_MASK);
+ } else {
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
+ }
+
+ DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
+ __FUNCTION__, bus->cto_enable));
+
+ return 0;
+}
+
+static int
dhdpcie_cto_error_recovery(struct dhd_bus *bus)
{
- uint32 pci_intmask, err_status, dar_val;
+ uint32 pci_intmask, err_status;
uint8 i = 0;
uint32 val;
@@ -9372,29 +11328,7 @@
/*
* DAR still accessible
*/
- dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_CLK_CTRL(bus->sih->buscorerev), 0, 0);
- DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_CLK_CTRL(bus->sih->buscorerev), dar_val));
-
- dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_PCIE_PWR_CTRL(bus->sih->buscorerev), 0, 0);
- DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_PCIE_PWR_CTRL(bus->sih->buscorerev), dar_val));
-
- dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_INTSTAT(bus->sih->buscorerev), 0, 0);
- DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_INTSTAT(bus->sih->buscorerev), dar_val));
-
- dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_ERRLOG(bus->sih->buscorerev), 0, 0);
- DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_ERRLOG(bus->sih->buscorerev), dar_val));
-
- dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_ERRADDR(bus->sih->buscorerev), 0, 0);
- DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_ERRADDR(bus->sih->buscorerev), dar_val));
-
- dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_PCIMailBoxInt(bus->sih->buscorerev), 0, 0);
- DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_PCIMailBoxInt(bus->sih->buscorerev), dar_val));
+ dhd_bus_dump_dar_registers(bus);
/* reset backplane */
val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
@@ -9418,7 +11352,7 @@
DHD_ERROR(("cto recovery fail\n"));
DHD_OS_WAKE_UNLOCK(bus->dhd);
- return;
+ return BCME_ERROR;
}
}
@@ -9440,6 +11374,18 @@
PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
DHD_OS_WAKE_UNLOCK(bus->dhd);
+
+ return BCME_OK;
+}
+
+void
+dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus)
+{
+ uint32 val;
+
+ val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
+ val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT));
}
#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
@@ -9511,7 +11457,7 @@
while (first_addr <= last_addr) {
core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr;
- if (si_backplane_access(bus->sih, core_addr, 4, &value, TRUE) != BCME_OK) {
+ if (serialized_backplane_access(bus, core_addr, 4, &value, TRUE) != BCME_OK) {
DHD_ERROR(("Invalid size/addr combination \n"));
}
DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
@@ -9520,6 +11466,18 @@
}
bool
+dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus)
+{
+ if (!bus->dhd)
+ return FALSE;
+ else if (bus->hwa_enab_bmap) {
+ return bus->dhd->hwa_enable;
+ } else {
+ return FALSE;
+ }
+}
+
+bool
dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
{
if (!bus->dhd)
@@ -9587,7 +11545,7 @@
int bcmerror = 0;
struct dhd_bus *bus = dhdp->bus;
- if (si_backplane_access(bus->sih, addr, size, data, read) != BCME_OK) {
+ if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) {
DHD_ERROR(("Invalid size/addr combination \n"));
bcmerror = BCME_ERROR;
}
@@ -9601,17 +11559,19 @@
return dhd->bus->idletime;
}
-#ifdef DHD_SSSR_DUMP
-
static INLINE void
dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
{
OSL_DELAY(1);
- si_backplane_access(dhd->bus->sih, addr, sizeof(uint), val, read);
- DHD_ERROR(("%s: addr:0x%x val:0x%x read:%d\n", __FUNCTION__, addr, *val, read));
+ if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) {
+ DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr));
+ } else {
+ DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read));
+ }
return;
}
+#ifdef DHD_SSSR_DUMP
static int
dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
uint addr_reg, uint data_reg)
@@ -9640,7 +11600,11 @@
addr = data_reg;
/* Read 4 bytes at once and loop for fifo_size / 4 */
for (i = 0; i < fifo_size / 4; i++) {
- si_backplane_access(dhd->bus->sih, addr, sizeof(uint), &val, TRUE);
+ if (serialized_backplane_access(dhd->bus, addr,
+ sizeof(uint), &val, TRUE) != BCME_OK) {
+ DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
buf[i] = val;
OSL_DELAY(1);
}
@@ -9672,7 +11636,12 @@
if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) &&
dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) {
- dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf, fifo_size);
+ int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf,
+ fifo_size);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s: Error reading dig dump from dongle !\n",
+ __FUNCTION__));
+ }
} else {
/* Check if vasip clk is disabled, if yes enable it */
addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
@@ -9685,7 +11654,12 @@
addr = addr_reg;
/* Read 4 bytes at once and loop for fifo_size / 4 */
for (i = 0; i < fifo_size / 4; i++, addr += 4) {
- si_backplane_access(sih, addr, sizeof(uint), &val, TRUE);
+ if (serialized_backplane_access(dhd->bus, addr, sizeof(uint),
+ &val, TRUE) != BCME_OK) {
+ DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__,
+ addr));
+ return BCME_ERROR;
+ }
buf[i] = val;
OSL_DELAY(1);
}
@@ -9703,7 +11677,7 @@
chipc_corerev = si_corerev(sih);
- if (chipc_corerev == 64) {
+ if ((chipc_corerev == 64) || (chipc_corerev == 65)) {
W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0);
/* Read 4 bytes at once and loop for fifo_size / 4 */
@@ -9720,7 +11694,7 @@
return BCME_OK;
}
-#if defined(BCMPCIE) && defined(DHD_LOG_DUMP)
+#if defined(EWP_ETD_PRSRV_LOGS)
void
dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd,
uint8 *ext_trap_data, void *event_decode_data)
@@ -9746,6 +11720,9 @@
hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
if (tlv) {
+ uint32 baseaddr = 0;
+ uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
+
etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
"seq_num=%x; log_arr_addr=%x\n", __FUNCTION__,
@@ -9761,6 +11738,16 @@
DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
return;
}
+
+ /* boundary check */
+ baseaddr = etd_evtlog->log_arr_addr;
+ if ((baseaddr < dhd->bus->dongle_ram_base) ||
+ ((baseaddr + arr_size) > endaddr)) {
+ DHD_ERROR(("%s: Error reading invalid address\n",
+ __FUNCTION__));
+ goto err;
+ }
+
/* read the eventlog_trap_buf_info_t array from dongle memory */
err = dhdpcie_bus_membytes(dhd->bus, FALSE,
(ulong)(etd_evtlog->log_arr_addr),
@@ -9777,6 +11764,14 @@
seqnum = ntoh32(etd_evtlog->seq_num);
memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
for (i = 0; i < (etd_evtlog->num_elements); ++i) {
+ /* boundary check */
+ baseaddr = evtlog_buf_arr[i].buf_addr;
+ if ((baseaddr < dhd->bus->dongle_ram_base) ||
+ ((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
+ DHD_ERROR(("%s: Error reading invalid address\n",
+ __FUNCTION__));
+ goto err;
+ }
/* read each individual event log buf from dongle memory */
err = dhdpcie_bus_membytes(dhd->bus, FALSE,
((ulong)evtlog_buf_arr[i].buf_addr),
@@ -9799,11 +11794,11 @@
}
#endif /* BCMPCIE && DHD_LOG_DUMP */
-static int
-dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd)
+static uint32
+dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val)
{
uint addr;
- uint val;
+ uint val = 0;
DHD_ERROR(("%s\n", __FUNCTION__));
@@ -9812,29 +11807,28 @@
dhd_sbreg_op(dhd, addr, &val, TRUE);
if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
- val = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask;
- dhd_sbreg_op(dhd, addr, &val, FALSE);
+ dhd_sbreg_op(dhd, addr, ®_val, FALSE);
}
return BCME_OK;
}
-static int
+static uint32
dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
{
uint addr;
- uint val;
+ uint val = 0, reg_val = 0;
DHD_ERROR(("%s\n", __FUNCTION__));
/* conditionally clear bits [11:8] of PowerCtrl */
addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
- dhd_sbreg_op(dhd, addr, &val, TRUE);
- if (val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
+ dhd_sbreg_op(dhd, addr, ®_val, TRUE);
+ if (reg_val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
val = 0;
dhd_sbreg_op(dhd, addr, &val, FALSE);
}
- return BCME_OK;
+ return reg_val;
}
static int
@@ -9885,6 +11879,31 @@
return BCME_OK;
}
+static void
+dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd)
+{
+#define TRAP_DATA_MAIN_CORE_BIT_MASK (1 << 1)
+#define TRAP_DATA_AUX_CORE_BIT_MASK (1 << 4)
+ uint trap_data_mask[MAX_NUM_D11CORES] =
+ {TRAP_DATA_MAIN_CORE_BIT_MASK, TRAP_DATA_AUX_CORE_BIT_MASK};
+ int i;
+ /* Apply only for 4375 chip */
+ if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) {
+ for (i = 0; i < MAX_NUM_D11CORES; i++) {
+ if (dhd->sssr_d11_outofreset[i] &&
+ (dhd->dongle_trap_data & trap_data_mask[i])) {
+ dhd->sssr_d11_outofreset[i] = TRUE;
+ } else {
+ dhd->sssr_d11_outofreset[i] = FALSE;
+ }
+ DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with "
+ "trap_data:0x%x-0x%x\n",
+ __FUNCTION__, i, dhd->sssr_d11_outofreset[i],
+ dhd->dongle_trap_data, trap_data_mask[i]));
+ }
+ }
+}
+
static int
dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
{
@@ -9911,6 +11930,8 @@
DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
__FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
}
+ dhdpcie_update_d11_status_from_trapdata(dhd);
+
return BCME_OK;
}
@@ -10120,9 +12141,11 @@
return BCME_OK;
}
-static int
+int
dhdpcie_sssr_dump(dhd_pub_t *dhd)
{
+ uint32 powerctrl_val;
+
if (!dhd->sssr_inited) {
DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
return BCME_ERROR;
@@ -10142,14 +12165,14 @@
}
dhdpcie_clear_intmask_and_timer(dhd);
- dhdpcie_suspend_chipcommon_powerctrl(dhd);
+ powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
dhdpcie_clear_clk_req(dhd);
dhdpcie_pcie_send_ltrsleep(dhd);
/* Wait for some time before Restore */
OSL_DELAY(6000);
- dhdpcie_resume_chipcommon_powerctrl(dhd);
+ dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val);
dhdpcie_bring_d11_outofreset(dhd);
DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
@@ -10157,16 +12180,86 @@
DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
return BCME_ERROR;
}
+ dhd->sssr_dump_collected = TRUE;
+ dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR);
- dhd_schedule_sssr_dump(dhd);
+ return BCME_OK;
+}
+
+static int
+dhdpcie_fis_trigger(dhd_pub_t *dhd)
+{
+ if (!dhd->sssr_inited) {
+ DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* Trigger FIS */
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK);
+ OSL_DELAY(100 * 1000);
return BCME_OK;
}
int
-dhd_bus_sssr_dump(dhd_pub_t *dhd)
+dhd_bus_fis_trigger(dhd_pub_t *dhd)
{
- return dhdpcie_sssr_dump(dhd);
+ return dhdpcie_fis_trigger(dhd);
+}
+
+static int
+dhdpcie_fis_dump(dhd_pub_t *dhd)
+{
+ int i;
+
+ if (!dhd->sssr_inited) {
+ DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* bring up all pmu resources */
+ PMU_REG(dhd->bus->sih, min_res_mask, ~0,
+ PMU_REG(dhd->bus->sih, max_res_mask, 0, 0));
+ OSL_DELAY(10 * 1000);
+
+ for (i = 0; i < MAX_NUM_D11CORES; i++) {
+ dhd->sssr_d11_outofreset[i] = TRUE;
+ }
+
+ dhdpcie_bring_d11_outofreset(dhd);
+ OSL_DELAY(6000);
+
+ /* clear FIS Done */
+ PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK, PMU_CLEAR_FIS_DONE_MASK);
+
+ dhdpcie_d11_check_outofreset(dhd);
+
+ DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
+ if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
+ DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS);
+
+ return BCME_OK;
+}
+
+int
+dhd_bus_fis_dump(dhd_pub_t *dhd)
+{
+ return dhdpcie_fis_dump(dhd);
}
#endif /* DHD_SSSR_DUMP */
@@ -10183,206 +12276,6 @@
}
#endif /* DHD_WAKE_STATUS */
-#define OTP_ADDRESS (SI_ENUM_BASE_DEFAULT + CC_SROM_OTP)
-#define OTP_USER_AREA_OFFSET 0x80
-#define OTP_USER_AREA_ADDR (OTP_ADDRESS + OTP_USER_AREA_OFFSET)
-#define OTP_VERSION_TUPLE_ID 0x15
-#define OTP_VENDOR_TUPLE_ID 0x80
-#define OTP_CIS_REGION_END_TUPLE_ID 0XFF
-#define PMU_RES_STATE_REG_ADDR (SI_ENUM_BASE_DEFAULT + PMU_RES_STATE)
-#define PMU_MINRESMASK_REG_ADDR (SI_ENUM_BASE_DEFAULT + MINRESMASKREG)
-#define OTP_CTRL1_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0xF4)
-#define SPROM_CTRL_REG_ADDR (SI_ENUM_BASE_DEFAULT + CC_SROM_CTRL)
-#define CHIP_COMMON_STATUS_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0x2C)
-#define PMU_OTP_PWR_ON_MASK 0xC47
-
-int
-dhdpcie_get_nvpath_otp(dhd_bus_t *bus, char* program, char *nv_path)
-{
- uint32 val = 0;
- uint16 chip_id = 0;
- uint8 otp_data[2];
- char stepping[3];
- char module_name[5];
- char module_vendor = 0;
- char module_rev[4];
- uint8 tuple_id = 0;
- uint8 tuple_len = 0;
- uint32 cur_offset = 0;
- uint32 version_tuple_offset = 0;
- char module_info[64];
- char progname[32];
- bool srom_present = 0, otp_present = 0;
- uint32 sprom_ctrl = 0;
- uint32 otp_ctrl = 0, minres_mask = 0;
- int i = 0, j = 0, status = BCME_ERROR;
-
- if (!nv_path || !bus) {
- return BCME_ERROR;
- }
-
- /* read chip id first */
- if (si_backplane_access(bus->sih, SI_ENUM_BASE_DEFAULT, 4, &val, TRUE) != BCME_OK) {
- DHD_ERROR(("%s: bkplane access error ! \n", __FUNCTION__));
- }
- else {
- chip_id = val & 0xffff;
- }
-
- /* read SpromCtrl register */
- si_backplane_access(bus->sih, SPROM_CTRL_REG_ADDR, 4, &sprom_ctrl, TRUE);
- val = sprom_ctrl;
-
- /* proceed only if OTP is present - i.e, the 5th bit OtpPresent is set
- * and chip is 4355 or 4364
- */
- if ((val & 0x20) && (chip_id == 0x4355 || chip_id == 0x4364)) {
- otp_present = 1;
-
- /* Check if the 4th bit (sprom_present) in CC Status REG is set */
- si_backplane_access(bus->sih, CHIP_COMMON_STATUS_REG_ADDR, 4, &val, TRUE);
- if (val & 0x10) {
- srom_present = 1;
- }
-
- /* OTP power up sequence */
- /* 1. cache otp ctrl and enable OTP clock through OTPCtrl1 register */
- si_backplane_access(bus->sih, OTP_CTRL1_REG_ADDR, 4, &otp_ctrl, TRUE);
- val = 0x1A0000;
- si_backplane_access(bus->sih, OTP_CTRL1_REG_ADDR, 4, &val, FALSE);
-
- /* 2. enable OTP power through min res mask register in PMU */
- si_backplane_access(bus->sih, PMU_MINRESMASK_REG_ADDR, 4, &minres_mask, TRUE);
- val = minres_mask | PMU_OTP_PWR_ON_MASK;
- si_backplane_access(bus->sih, PMU_MINRESMASK_REG_ADDR, 4, &val, FALSE);
-
- /* 3. if srom is present, need to set OtpSelect 4th bit
- * in SpromCtrl register to read otp
- */
- if (srom_present) {
-
- val = sprom_ctrl | 0x10;
- si_backplane_access(bus->sih, SPROM_CTRL_REG_ADDR, 4, &val, FALSE);
-
- }
- /* Wait for PMU to power up. */
- OSL_DELAY(500);
- si_backplane_access(bus->sih, PMU_RES_STATE_REG_ADDR, 4, &val, TRUE);
- DHD_INFO(("%s: PMU_RES_STATE_REG_ADDR %x \n", __FUNCTION__, val));
-
- si_backplane_access(bus->sih, SI_ENUM_BASE_DEFAULT, 4, &val, TRUE);
- DHD_INFO(("%s: _SI_ENUM_BASE %x \n", __FUNCTION__, val));
-
- si_backplane_access(bus->sih, OTP_ADDRESS, 2, &val, TRUE);
- DHD_INFO(("%s: OTP_ADDRESS %x \n", __FUNCTION__, val));
-
- cur_offset = OTP_USER_AREA_ADDR + 0x40;
- /* read required data from otp to construct FW string name
- * data like - chip info, module info. This is present in the
- * form of a Vendor CIS Tuple whose format is provided by Olympic.
- * The data is in the form of ASCII character strings.
- * The Vendor tuple along with other CIS tuples are present
- * in the OTP user area. A CIS tuple is a TLV format.
- * (T = 1-byte, L = 1-byte, V = n-bytes)
- */
-
- /* Find the version tuple */
- while (tuple_id != OTP_CIS_REGION_END_TUPLE_ID) {
- si_backplane_access(bus->sih, cur_offset,
- 2, (uint *)otp_data, TRUE);
-
- tuple_id = otp_data[0];
- tuple_len = otp_data[1];
- if (tuple_id == OTP_VERSION_TUPLE_ID) {
- version_tuple_offset = cur_offset;
- break;
- }
- /* if its NULL tuple, skip */
- if (tuple_id == 0)
- cur_offset += 1;
- else
- cur_offset += tuple_len + 2;
- }
-
- /* skip the major, minor ver. numbers, manufacturer and product names */
- cur_offset = version_tuple_offset + 6;
-
- /* read the chip info */
- si_backplane_access(bus->sih, cur_offset,
- 2, (uint *)otp_data, TRUE);
- if (otp_data[0] == 's' && otp_data[1] == '=') {
- /* read the stepping */
- cur_offset += 2;
- stepping[2] = 0;
- si_backplane_access(bus->sih, cur_offset,
- 2, (uint *)stepping, TRUE);
- /* read module info */
- memset(module_info, 0, 64);
- cur_offset += 2;
- si_backplane_access(bus->sih, cur_offset,
- 2, (uint *)otp_data, TRUE);
- while (otp_data[0] != OTP_CIS_REGION_END_TUPLE_ID &&
- otp_data[1] != OTP_CIS_REGION_END_TUPLE_ID) {
- memcpy(&module_info[i], otp_data, 2);
- i += 2;
- cur_offset += 2;
- si_backplane_access(bus->sih, cur_offset,
- 2, (uint *)otp_data, TRUE);
- }
- /* replace any null characters found at the beginning
- * and middle of the string
- */
- for (j = 0; j < i; ++j) {
- if (module_info[j] == 0)
- module_info[j] = ' ';
- }
- DHD_ERROR(("OTP chip_info: s=%c%c; module info: %s \n",
- stepping[0], stepping[1], module_info));
- /* extract the module name, revision and vendor
- * information from the module info string
- */
- for (i = 0; module_info[i]; i++) {
- if (module_info[i] == 'M' && module_info[i + 1] == '=') {
- memcpy(module_name, &module_info[i + 2], 4);
- module_name[4] = 0;
- i += 5;
- }
- else if (module_info[i] == 'm' && module_info[i + 1] == '=') {
- memcpy(module_rev, &module_info[i + 2], 3);
- module_rev[3] = 0;
- i += 4;
- }
- else if (module_info[i] == 'V' && module_info[i + 1] == '=') {
- module_vendor = module_info[i + 2];
- i += 2;
- }
- }
-
- /* construct the complete file path to nvram as per
- * olympic conventions
- */
- strncpy(progname, program, sizeof(progname));
- sprintf(nv_path, "P-%s_M-%s_V-%c__m-%s.txt", progname, module_name,
- module_vendor, module_rev);
- DHD_ERROR(("%s NVRAM path = %s\n", __FUNCTION__, nv_path));
- status = BCME_OK;
- }
-
- /* restore back the registers to their previous values */
- if (srom_present) {
- si_backplane_access(bus->sih, SPROM_CTRL_REG_ADDR, 4, &sprom_ctrl, FALSE);
- }
-
- if (otp_present) {
- si_backplane_access(bus->sih, PMU_MINRESMASK_REG_ADDR, 4,
- &minres_mask, FALSE);
- si_backplane_access(bus->sih, OTP_CTRL1_REG_ADDR, 4, &otp_ctrl, FALSE);
- }
-
- }
- return status;
-}
-
/* Writes random number(s) to the TCM. FW upon initialization reads this register
* to fetch the random number, and uses it to randomize heap address space layout.
*/
@@ -10403,23 +12296,41 @@
dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
/* scale back by number of random number counts */
addr -= count;
+
+#ifdef DHD_RND_DEBUG
+ bus->dhd->rnd_buf = NULL;
+ /* get random contents from file */
+ ret = dhd_get_rnd_info(bus->dhd);
+ if (bus->dhd->rnd_buf) {
+ /* write file contents to TCM */
+ DHD_ERROR(("%s: use stored .rnd.in content\n", __FUNCTION__));
+ dhdpcie_bus_membytes(bus, TRUE, addr, bus->dhd->rnd_buf, bus->dhd->rnd_len);
+
+ /* Dump random content to out file */
+ dhd_dump_rnd_info(bus->dhd, bus->dhd->rnd_buf, bus->dhd->rnd_len);
+
+ /* bus->dhd->rnd_buf is allocated in dhd_get_rnd_info, free here */
+ MFREE(bus->dhd->osh, bus->dhd->rnd_buf, bus->dhd->rnd_len);
+ bus->dhd->rnd_buf = NULL;
+ return BCME_OK;
+ }
+#endif /* DHD_RND_DEBUG */
+
/* Now get & write the random number(s) */
ret = dhd_get_random_bytes(rand_buf, count);
if (ret != BCME_OK) {
return ret;
}
dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count);
+
+#ifdef DHD_RND_DEBUG
+ /* Dump random content to out file */
+ dhd_dump_rnd_info(bus->dhd, rand_buf, count);
+#endif /* DHD_RND_DEBUG */
+
return BCME_OK;
}
-#ifdef D2H_MINIDUMP
-bool
-dhd_bus_is_minidump_enabled(dhd_pub_t *dhdp)
-{
- return dhdp->bus->d2h_minidump;
-}
-#endif /* D2H_MINIDUMP */
-
void
dhd_pcie_intr_count_dump(dhd_pub_t *dhd)
{
@@ -10438,17 +12349,28 @@
DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT"\n",
dhdpcie_get_oob_irq_num(bus),
GET_SEC_USEC(bus->last_oob_irq_time)));
+ DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT
+ " last_oob_irq_disable_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->last_oob_irq_enable_time),
+ GET_SEC_USEC(bus->last_oob_irq_disable_time)));
+ DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n",
+ dhdpcie_get_oob_irq_status(bus),
+ dhdpcie_get_oob_irq_level()));
#endif /* BCMPCIE_OOB_HOST_WAKE */
- DHD_ERROR(("dpc_return_busdown_count=%lu\n",
- dhd->bus->dpc_return_busdown_count));
+ DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
+ bus->dpc_return_busdown_count, bus->non_ours_irq_count));
- current_time = OSL_SYSUPTIME_US();
+ current_time = OSL_LOCALTIME_NS();
DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n",
GET_SEC_USEC(current_time)));
DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
" isr_exit_time="SEC_USEC_FMT"\n",
GET_SEC_USEC(bus->isr_entry_time),
GET_SEC_USEC(bus->isr_exit_time)));
+ DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT
+ " last_non_ours_irq_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->dpc_sched_time),
+ GET_SEC_USEC(bus->last_non_ours_irq_time)));
DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
" last_process_ctrlbuf_time="SEC_USEC_FMT"\n",
GET_SEC_USEC(bus->dpc_entry_time),
@@ -10458,13 +12380,17 @@
GET_SEC_USEC(bus->last_process_flowring_time),
GET_SEC_USEC(bus->last_process_txcpl_time)));
DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
- " last_process_infocpl_time="SEC_USEC_FMT"\n",
+ " last_process_infocpl_time="SEC_USEC_FMT
+ " last_process_edl_time="SEC_USEC_FMT"\n",
GET_SEC_USEC(bus->last_process_rxcpl_time),
- GET_SEC_USEC(bus->last_process_infocpl_time)));
+ GET_SEC_USEC(bus->last_process_infocpl_time),
+ GET_SEC_USEC(bus->last_process_edl_time)));
DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
" resched_dpc_time="SEC_USEC_FMT"\n",
GET_SEC_USEC(bus->dpc_exit_time),
GET_SEC_USEC(bus->resched_dpc_time)));
+ DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->last_d3_inform_time)));
DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
" last_suspend_end_time="SEC_USEC_FMT"\n",
@@ -10474,6 +12400,19 @@
" last_resume_end_time="SEC_USEC_FMT"\n",
GET_SEC_USEC(bus->last_resume_start_time),
GET_SEC_USEC(bus->last_resume_end_time)));
+
+#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
+ DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT
+ " logtrace_thread_sem_down_time="SEC_USEC_FMT
+ "\nlogtrace_thread_flush_time="SEC_USEC_FMT
+ " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
+ "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)));
+#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
}
void
@@ -10483,6 +12422,120 @@
}
int
+dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd)
+{
+ uint32 save_idx, val;
+ si_t *sih = dhd->bus->sih;
+ uint32 oob_base, oob_base1;
+ uint32 wrapper_dump_list[] = {
+ AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74,
+ AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74,
+ AI_RESETSTATUS, AI_RESETCTRL,
+ AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD,
+ AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT
+ };
+ uint8 i;
+ hndoobr_reg_t *reg;
+ cr4regs_t *cr4regs;
+ ca7regs_t *ca7regs;
+
+ save_idx = si_coreidx(sih);
+
+ DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__));
+
+ if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) {
+ for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
+ val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
+ DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
+ }
+ }
+
+ if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) {
+ DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__));
+ for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
+ val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
+ DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
+ }
+ DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities));
+ DHD_ERROR(("reg:0x%x val:0x%x\n",
+ (uint)OFFSETOF(cr4regs_t, corecapabilities), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val));
+ }
+
+ if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) {
+ DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities));
+ DHD_ERROR(("reg:0x%x val:0x%x\n",
+ (uint)OFFSETOF(ca7regs_t, corecapabilities), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val));
+ }
+
+ DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__));
+
+ oob_base = si_oobr_baseaddr(sih, FALSE);
+ oob_base1 = si_oobr_baseaddr(sih, TRUE);
+ if (oob_base) {
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE);
+ } else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) {
+ val = R_REG(dhd->osh, ®->intstatus[0]);
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
+ val = R_REG(dhd->osh, ®->intstatus[1]);
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
+ val = R_REG(dhd->osh, ®->intstatus[2]);
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
+ val = R_REG(dhd->osh, ®->intstatus[3]);
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
+ }
+
+ if (oob_base1) {
+ DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__));
+
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE);
+ }
+
+ si_setcoreidx(dhd->bus->sih, save_idx);
+
+ return 0;
+}
+
+int
dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
{
if (dhd->bus->is_linkdown) {
@@ -10538,13 +12591,69 @@
return 0;
}
-int
-dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
+bool
+dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
{
uint32 intstatus = 0;
uint32 intmask = 0;
- uint32 mbintstatus = 0;
+ uint32 d2h_db0 = 0;
uint32 d2h_mb_data = 0;
+
+ DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
+ intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ dhd->bus->pcie_mailbox_int, 0, 0);
+ if (intstatus == (uint32)-1) {
+ DHD_ERROR(("intstatus=0x%x \n", intstatus));
+ return FALSE;
+ }
+
+ intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ dhd->bus->pcie_mailbox_mask, 0, 0);
+ if (intmask == (uint32) -1) {
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
+ return FALSE;
+ }
+
+ d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCID2H_MailBox, 0, 0);
+ if (d2h_db0 == (uint32)-1) {
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
+ intstatus, intmask, d2h_db0));
+ return FALSE;
+ }
+
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
+ intstatus, intmask, d2h_db0));
+ dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
+ DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
+ dhd->bus->def_intmask));
+
+ return TRUE;
+}
+
+void
+dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
+{
+ DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
+ DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
+#ifdef EXTENDED_PCIE_DEBUG_DUMP
+ DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0),
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0),
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
+#endif /* EXTENDED_PCIE_DEBUG_DUMP */
+}
+
+int
+dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
+{
int host_irq_disabled;
DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
@@ -10553,29 +12662,31 @@
dhd_print_tasklet_status(dhd);
dhd_pcie_intr_count_dump(dhd);
- DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
- DHD_ERROR(("Pcie RC Error Status Val=0x%x\n",
- dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
- PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
+ DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
+ dhdpcie_dump_resource(dhd->bus);
+
+ dhd_pcie_dump_rc_conf_space_cap(dhd);
DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
dhd_debug_get_rc_linkcap(dhd->bus)));
-#ifdef CUSTOMER_HW4_DEBUG
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("Skip dumping the PCIe registers due to PCIe Link down\n"));
+ if (dhd->bus->is_linkdown && !dhd->bus->cto_triggered) {
+ DHD_ERROR(("Skip dumping the PCIe Config and Core registers. "
+ "link may be DOWN\n"));
return 0;
}
-#endif /* CUSTOMER_HW4_DEBUG */
DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
- DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x\n",
+ DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x "
+ "PCIE_CFG_PMCSR(0x%x)=0x%x\n",
PCIECFGREG_STATUS_CMD,
dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
PCIECFGREG_BASEADDR0,
dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
PCIECFGREG_BASEADDR1,
- dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32))));
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)),
+ PCIE_CFG_PMCSR,
+ dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_PMCSR, sizeof(uint32))));
DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
"L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
@@ -10584,27 +12695,39 @@
sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1,
dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
sizeof(uint32))));
+#ifdef EXTENDED_PCIE_DEBUG_DUMP
+ DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
+ dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
+ DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
+ "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
+ dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)),
+ PCI_TLP_HDR_LOG2,
+ dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)),
+ PCI_TLP_HDR_LOG3,
+ dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)),
+ PCI_TLP_HDR_LOG4,
+ dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32))));
+ if (dhd->bus->sih->buscorerev >= 24) {
+ DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
+ "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL,
+ sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
+ dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
+ sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2,
+ sizeof(uint32))));
+ dhd_bus_dump_dar_registers(dhd->bus);
+ }
+#endif /* EXTENDED_PCIE_DEBUG_DUMP */
- intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- dhd->bus->pcie_mailbox_int, 0, 0);
- if (intstatus == (uint32)-1) {
- DHD_ERROR(("Skip dumping the PCIe Core registers due to invalid intstatus\n"));
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n"));
return 0;
}
- intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- dhd->bus->pcie_mailbox_mask, 0, 0);
- mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCID2H_MailBox, 0, 0);
- DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
- DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n",
- intstatus, intmask, mbintstatus));
-
- dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
- DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
- dhd->bus->def_intmask));
-
DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
+
DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
"ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
@@ -10615,63 +12738,186 @@
PCIECFGREG_PHY_DBG_CLKREQ3,
dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
+#ifdef EXTENDED_PCIE_DEBUG_DUMP
+ if (dhd->bus->sih->buscorerev >= 24) {
+
+ DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
+ "ltssm_hist_2(0x%x)=0x%x "
+ "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
+ PCIECFGREG_PHY_LTSSM_HIST_1,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
+ PCIECFGREG_PHY_LTSSM_HIST_2,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
+ PCIECFGREG_PHY_LTSSM_HIST_3,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
+
+ DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
+ PCIECFGREG_TREFUP,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
+ PCIECFGREG_TREFUP_EXT,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
+ DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
+ "Function_Intstatus(0x%x)=0x%x "
+ "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
+ "Power_Intmask(0x%x)=0x%x\n",
+ PCIE_CORE_REG_ERRLOG,
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIE_CORE_REG_ERRLOG, 0, 0),
+ PCIE_CORE_REG_ERR_ADDR,
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIE_CORE_REG_ERR_ADDR, 0, 0),
+ PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
+ PCIFunctionIntmask(dhd->bus->sih->buscorerev),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
+ PCIPowerIntstatus(dhd->bus->sih->buscorerev),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
+ PCIPowerIntmask(dhd->bus->sih->buscorerev),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
+ DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
+ "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
+ DHD_ERROR(("err_code(0x%x)=0x%x\n",
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
+
+ dhd_pcie_dump_wrapper_regs(dhd);
+ }
+#endif /* EXTENDED_PCIE_DEBUG_DUMP */
+
dhd_pcie_dma_info_dump(dhd);
return 0;
}
-/*
- * TLV ID for Host whitelist Region.
- */
-#define BCM_NVRAM_WHTLST_SIGNATURE 0xFEED4B1Du
-
-/*
- * For the time being only one whitelist region supported and 64 Bit high and
- * 64 bit low address should be written.
- */
-#define BCM_HOST_WHITELIST_NBYTES 16u
-
-/* Writes host whitelist region to the TCM. FW upon initialization reads this register
- * to fetch whitelist regions, and validate DMA descriptors before programming
- * against these whitelist regions.
- */
-static int
-dhdpcie_wrt_host_whitelist_region(struct dhd_bus *bus)
-{
- int ret;
- bcm_host_whitelist_metadata_t whitelist_data;
- uint8 whtlst_buff[BCM_HOST_WHITELIST_NBYTES];
- bcm_rand_metadata_t rnd_data;
- uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
- ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data) +
- BCM_ENTROPY_HOST_NBYTES + sizeof(whitelist_data));
- whitelist_data.signature = htol32(BCM_NVRAM_WHTLST_SIGNATURE);
- whitelist_data.count = htol32(BCM_HOST_WHITELIST_NBYTES);
- ret = dhd_get_host_whitelist_region((void*)whtlst_buff,
- whitelist_data.count);
- if (ret == BCME_RANGE) {
- DHD_INFO(("%s: No Whitelist region programmed !\n",
- __FUNCTION__));
- return BCME_OK;
- }
- if (ret == BCME_OK) {
- /* write the metadata about whitelist region */
- ret = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&whitelist_data,
- sizeof(whitelist_data));
- if (ret == BCME_OK) {
- /* scale back by number of whitelist region counts */
- addr -= BCM_HOST_WHITELIST_NBYTES;
-
- /* Now write whitelist region(s) */
- ret = dhdpcie_bus_membytes(bus, TRUE, addr, whtlst_buff,
- BCM_HOST_WHITELIST_NBYTES);
- }
- }
- return ret;
-}
-
bool
dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus)
{
return bus->force_bt_quiesce;
}
+
+#ifdef DHD_HP2P
+uint16
+dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx)
+{
+ if (tx)
+ return bus->hp2p_txcpl_max_items;
+ else
+ return bus->hp2p_rxcpl_max_items;
+}
+
+static uint16
+dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val)
+{
+ if (tx)
+ bus->hp2p_txcpl_max_items = val;
+ else
+ bus->hp2p_rxcpl_max_items = val;
+ return val;
+}
+#endif /* DHD_HP2P */
+
+static bool
+dhd_bus_tcm_test(struct dhd_bus *bus)
+{
+ int ret = 0;
+ int size; /* Full mem size */
+ int start; /* Start address */
+ int read_size = 0; /* Read size of each iteration */
+ int num = 0;
+ uint8 *read_buf, *write_buf;
+ uint8 init_val[NUM_PATTERNS] = {
+ 0xFFu, /* 11111111 */
+ 0x00u, /* 00000000 */
+ };
+
+ if (!bus) {
+ DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
+
+ if (!read_buf) {
+ DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
+
+ if (!write_buf) {
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ DHD_ERROR(("%s: start %x, size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize));
+ DHD_ERROR(("%s: memblock size %d, #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS));
+
+ while (num < NUM_PATTERNS) {
+ start = bus->dongle_ram_base;
+ /* Get full mem size */
+ size = bus->ramsize;
+
+ memset(write_buf, init_val[num], MEMBLOCK);
+ while (size > 0) {
+ read_size = MIN(MEMBLOCK, size);
+ memset(read_buf, 0, read_size);
+
+ /* Write */
+ if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) {
+ DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret));
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
+ return FALSE;
+ }
+
+ /* Read */
+ if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) {
+ DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
+ return FALSE;
+ }
+
+ /* Compare */
+ if (memcmp(read_buf, write_buf, read_size)) {
+ DHD_ERROR(("%s: Mismatch at %x, iter : %d\n",
+ __FUNCTION__, start, num));
+ prhex("Readbuf", read_buf, read_size);
+ prhex("Writebuf", write_buf, read_size);
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
+ return FALSE;
+ }
+
+ /* Decrement size and increment start address */
+ size -= read_size;
+ start += read_size;
+ }
+ num++;
+ }
+
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
+
+ DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num));
+ return TRUE;
+}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pcie.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pcie.h
index 9e7dab4..d2e00f4 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pcie.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pcie.h
@@ -1,7 +1,9 @@
/*
* Linux DHD Bus Module for PCIE
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -42,11 +44,13 @@
#endif /* CONFIG_ARCH_MSM */
#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
- defined(CONFIG_SOC_EXYNOS9810)
+ defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
#include <linux/exynos-pci-noti.h>
extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg);
extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg);
-#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 */
+#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895
+ * CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820
+ */
#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
#endif /* SUPPORT_LINKDOWN_RECOVERY */
@@ -89,10 +93,12 @@
#endif /* CONFIG_ARCH_MSM */
#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
- defined(CONFIG_SOC_EXYNOS9810)
+ defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
#define struct_pcie_notify struct exynos_pcie_notify
#define struct_pcie_register_event struct exynos_pcie_register_event
-#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 */
+#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895
+ * CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820
+ */
#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
#endif /* SUPPORT_LINKDOWN_RECOVERY */
@@ -108,6 +114,9 @@
#define IDLE_FLOW_RING_TIMEOUT 5000
#endif /* IDLE_TX_FLOW_MGMT */
+/* HWA enabled and inited */
+#define HWA_ACTIVE(dhd) (((dhd)->hwa_enable) && ((dhd)->hwa_inited))
+
/* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */
#define IDMA_ENAB(dhd) ((dhd)->idma_enable)
#define IDMA_ACTIVE(dhd) (((dhd)->idma_enable) && ((dhd)->idma_inited))
@@ -169,6 +178,17 @@
#define OOB_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_OOB)
#define NO_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_NONE)
+#define PCIE_RELOAD_WAR_ENAB(buscorerev) \
+ ((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || (buscorerev == 70))
+
+/*
+ * HW JIRA - CRWLPCIEGEN2-672
+ * Producer Index Feature which is used by F1 gets reset on F0 FLR
+ * fixed in REV68
+ */
+#define PCIE_ENUM_RESET_WAR_ENAB(buscorerev) \
+ ((buscorerev == 66) || (buscorerev == 67))
+
struct dhd_bus;
struct dhd_pcie_rev {
@@ -233,6 +253,7 @@
sbpcieregs_t *reg; /* Registers for PCIE core */
uint armrev; /* CPU core revision */
+ uint coreid; /* CPU core id */
uint ramrev; /* SOCRAM core revision */
uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */
uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
@@ -322,13 +343,14 @@
#ifdef SUPPORT_LINKDOWN_RECOVERY
#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
- defined(CONFIG_SOC_EXYNOS9810))
+ defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820))
#ifdef CONFIG_ARCH_MSM
uint8 no_cfg_restore;
#endif /* CONFIG_ARCH_MSM */
struct_pcie_register_event pcie_event;
#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY &&
- * (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810))
+ * (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 ||
+ * CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ))
*/
bool read_shm_fail;
#endif /* SUPPORT_LINKDOWN_RECOVERY */
@@ -353,6 +375,7 @@
bool use_mailbox;
bool use_d0_inform;
void *bus_lock;
+ void *backplane_access_lock;
enum dhd_bus_low_power_state bus_low_power_state;
uint32 hostready_count; /* Number of hostready issued */
#if defined(BCMPCIE_OOB_HOST_WAKE)
@@ -364,26 +387,34 @@
ulong isr_intr_disable_count;
ulong suspend_intr_disable_count;
ulong dpc_return_busdown_count;
+ ulong non_ours_irq_count;
#ifdef BCMPCIE_OOB_HOST_WAKE
ulong oob_intr_count;
ulong oob_intr_enable_count;
ulong oob_intr_disable_count;
uint64 last_oob_irq_time;
+ uint64 last_oob_irq_enable_time;
+ uint64 last_oob_irq_disable_time;
#endif /* BCMPCIE_OOB_HOST_WAKE */
uint64 isr_entry_time;
uint64 isr_exit_time;
+ uint64 dpc_sched_time;
uint64 dpc_entry_time;
uint64 dpc_exit_time;
uint64 resched_dpc_time;
+ uint64 last_d3_inform_time;
uint64 last_process_ctrlbuf_time;
uint64 last_process_flowring_time;
uint64 last_process_txcpl_time;
uint64 last_process_rxcpl_time;
uint64 last_process_infocpl_time;
+ uint64 last_process_edl_time;
uint64 last_suspend_start_time;
uint64 last_suspend_end_time;
uint64 last_resume_start_time;
uint64 last_resume_end_time;
+ uint64 last_non_ours_irq_time;
+ uint8 hwa_enab_bmap;
bool idma_enabled;
bool ifrm_enabled;
bool dar_enabled;
@@ -393,17 +424,23 @@
bool chk_pm; /* To avoid counting of wake up from Runtime PM */
#endif /* DHD_PCIE_RUNTIMEPM */
bool _dar_war;
-#ifdef D2H_MINIDUMP
- bool d2h_minidump; /* This flag will be set if Host and FW handshake to collect minidump */
- bool d2h_minidump_override; /* Force disable minidump through dhd IOVAR */
-#endif /* D2H_MINIDUMP */
uint8 dma_chan;
bool cto_enable; /* enable PCIE CTO Prevention and recovery */
uint32 cto_threshold; /* PCIE CTO timeout threshold */
+ bool cto_triggered; /* CTO is triggered */
int pwr_req_ref;
bool flr_force_fail; /* user intends to simulate flr force fail */
bool intr_enabled; /* ready to receive interrupts from dongle */
bool force_bt_quiesce; /* send bt_quiesce command to BT driver. */
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+ ulong dhd_rte_time_sync_count; /* OSL_SYSUPTIME_US() */
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+ bool rc_ep_aspm_cap; /* RC and EP ASPM capable */
+ bool rc_ep_l1ss_cap; /* EC and EP L1SS capable */
+ uint16 hp2p_txcpl_max_items;
+ uint16 hp2p_rxcpl_max_items;
+ /* PCIE coherent status */
+ uint32 coherent_state;
} dhd_bus_t;
#ifdef DHD_MSI_SUPPORT
@@ -434,6 +471,7 @@
extern void dhdpcie_free_irq(dhd_bus_t *bus);
extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value);
extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake);
+extern void dhdpcie_dongle_reset(dhd_bus_t *bus);
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint);
#else
@@ -451,16 +489,33 @@
extern int dhdpcie_disable_irq(dhd_bus_t *bus);
extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus);
extern int dhdpcie_enable_irq(dhd_bus_t *bus);
+
+extern void dhd_bus_dump_dar_registers(struct dhd_bus *bus);
+
extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset);
extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
bool is_write, uint32 writeval);
+extern uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
+ bool is_write, uint32 writeval);
extern uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus);
extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus);
extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus);
extern int dhdpcie_disable_device(dhd_bus_t *bus);
extern int dhdpcie_alloc_resource(dhd_bus_t *bus);
extern void dhdpcie_free_resource(dhd_bus_t *bus);
+extern void dhdpcie_dump_resource(dhd_bus_t *bus);
extern int dhdpcie_bus_request_irq(struct dhd_bus *bus);
+void dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr);
+void dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
+uint8 dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset);
+void dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
+uint16 dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset);
+void dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
+uint32 dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset);
+#ifdef DHD_SUPPORT_64BIT
+void dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
+uint64 dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset);
+#endif // endif
extern int dhdpcie_enable_device(dhd_bus_t *bus);
@@ -468,20 +523,11 @@
extern int dhdpcie_oob_intr_register(dhd_bus_t *bus);
extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus);
extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable);
-extern int dhdpcie_get_oob_irq_num(dhd_bus_t *bus);
+extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus);
+extern int dhdpcie_get_oob_irq_status(struct dhd_bus *bus);
+extern int dhdpcie_get_oob_irq_level(void);
#endif /* BCMPCIE_OOB_HOST_WAKE */
-#ifdef DHD_DISABLE_ASPM
-extern void dhd_bus_aspm_enable(dhd_bus_t *bus, bool enable);
-
-static INLINE void
-dhd_pcie_config_write(osl_t *osh, uint offset, uint size, uint val)
-{
- OSL_DELAY(100);
- return OSL_PCI_WRITE_CONFIG(osh, offset, size, val);
-}
-#endif /* DHD_DISABLE_ASPM */
-
#if defined(CONFIG_ARCH_EXYNOS)
#define SAMSUNG_PCIE_VENDOR_ID 0x144d
#if defined(CONFIG_MACH_UNIVERSAL5433)
@@ -493,7 +539,8 @@
#elif defined(CONFIG_SOC_EXYNOS8890)
#define SAMSUNG_PCIE_DEVICE_ID 0xa544
#define SAMSUNG_PCIE_CH_NUM 0
-#elif defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810)
+#elif defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
+ defined(CONFIG_SOC_EXYNOS9820)
#define SAMSUNG_PCIE_DEVICE_ID 0xecec
#define SAMSUNG_PCIE_CH_NUM 0
#else
@@ -511,8 +558,10 @@
#define MSM_PCIE_DEVICE_ID 0x0104
#elif defined(CONFIG_ARCH_MSM8998)
#define MSM_PCIE_DEVICE_ID 0x0105
-#elif defined(CONFIG_ARCH_SDM845)
+#elif defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150)
#define MSM_PCIE_DEVICE_ID 0x0106
+#elif defined(USE_CUSTOM_MSM_PCIE)
+#define MSM_PCIE_DEVICE_ID MSM_PCIE_CUSTOM_DEVICE_ID
#else
#error "Not supported platform"
#endif // endif
@@ -528,6 +577,11 @@
#define TEGRA_PCIE_DEVICE_ID 0x4347
#endif /* CONFIG_ARCH_TEGRA */
+#if defined(BOARD_HIKEY)
+#define HIKEY_PCIE_VENDOR_ID 0x19e5
+#define HIKEY_PCIE_DEVICE_ID 0x3660
+#endif /* BOARD_HIKEY */
+
#define DUMMY_PCIE_VENDOR_ID 0xffff
#define DUMMY_PCIE_DEVICE_ID 0xffff
@@ -543,12 +597,18 @@
#elif defined(CONFIG_ARCH_TEGRA)
#define PCIE_RC_VENDOR_ID TEGRA_PCIE_VENDOR_ID
#define PCIE_RC_DEVICE_ID TEGRA_PCIE_DEVICE_ID
+#elif defined(BOARD_HIKEY)
+#define PCIE_RC_VENDOR_ID HIKEY_PCIE_VENDOR_ID
+#define PCIE_RC_DEVICE_ID HIKEY_PCIE_DEVICE_ID
#else
/* Use dummy vendor and device IDs */
#define PCIE_RC_VENDOR_ID DUMMY_PCIE_VENDOR_ID
#define PCIE_RC_DEVICE_ID DUMMY_PCIE_DEVICE_ID
#endif /* CONFIG_ARCH_EXYNOS */
+#define DHD_REGULAR_RING 0
+#define DHD_HP2P_RING 1
+
#ifdef USE_EXYNOS_PCIE_RC_PMPATCH
#ifdef CONFIG_MACH_UNIVERSAL5433
extern int exynos_pcie_pm_suspend(void);
@@ -595,12 +655,16 @@
static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;}
static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; }
+static INLINE void
+dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus)
+{ return; }
int dhdpcie_config_check(dhd_bus_t *bus);
int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr);
int dhdpcie_config_save(dhd_bus_t *bus);
int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state);
+extern bool dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus);
extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus);
extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus);
extern bool dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus);
@@ -620,9 +684,14 @@
return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0);
}
-extern int dhdpcie_get_nvpath_otp(dhd_bus_t *bus, char *program, char *nv_path);
+extern int dhdpcie_get_fwpath_otp(dhd_bus_t *bus, char *fw_path, char *nv_path,
+ char *clm_path, char *txcap_path);
extern int dhd_pcie_debug_info_dump(dhd_pub_t *dhd);
extern void dhd_pcie_intr_count_dump(dhd_pub_t *dhd);
extern void dhdpcie_bus_clear_intstatus(dhd_bus_t *bus);
+#ifdef DHD_HP2P
+extern uint16 dhd_bus_get_hp2p_ring_max_size(dhd_bus_t *bus, bool tx);
+#endif // endif
+
#endif /* dhd_pcie_h */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c
index 79b4d35..6e81e06 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c
@@ -1,7 +1,9 @@
/*
* Linux DHD Bus Module for PCIE
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -82,35 +84,22 @@
#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
-#define OSL_PKTTAG_CLEAR(p) \
-do { \
- struct sk_buff *s = (struct sk_buff *)(p); \
- ASSERT(OSL_PKTTAG_SZ == 32); \
- *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
- *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
- *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
- *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
-} while (0)
-
+#ifdef FORCE_TPOWERON
+extern uint32 tpoweron_scale;
+#endif /* FORCE_TPOWERON */
/* user defined data structures */
-typedef struct dhd_pc_res {
- uint32 bar0_size;
- void* bar0_addr;
- uint32 bar1_size;
- void* bar1_addr;
-} pci_config_res, *pPci_config_res;
-
typedef bool (*dhdpcie_cb_fn_t)(void *);
typedef struct dhdpcie_info
{
dhd_bus_t *bus;
- osl_t *osh;
+ osl_t *osh;
struct pci_dev *dev; /* pci device handle */
- volatile char *regs; /* pci device memory va */
- volatile char *tcm; /* pci device memory va */
- uint32 tcm_size; /* pci device memory size */
+ volatile char *regs; /* pci device memory va */
+ volatile char *tcm; /* pci device memory va */
+ uint32 bar1_size; /* pci device memory size */
+ uint32 curr_bar1_win; /* current PCIEBar1Window setting */
struct pcos_info *pcos_info;
uint16 last_intrstatus; /* to cache intrstatus */
int irq;
@@ -205,6 +194,12 @@
static int dhdpcie_pm_system_resume_noirq(struct device * dev);
#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state);
+
+uint32
+dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
+ uint32 writeval);
+
static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
{
vendor: VENDOR_CYPRESS,
@@ -247,13 +242,14 @@
static struct pci_driver dhdpcie_driver = {
node: {&dhdpcie_driver.node, &dhdpcie_driver.node},
+#ifndef BCMDHDX
name: "pcieh",
+#else
+ name: "pciehx",
+#endif /* BCMDHDX */
id_table: dhdpcie_pci_devid,
probe: dhdpcie_pci_probe,
remove: dhdpcie_pci_remove,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
- save_state: NULL,
-#endif // endif
#if defined(DHD_PCIE_RUNTIMEPM) || defined(DHD_PCIE_NATIVE_RUNTIMEPM)
.driver.pm = &dhd_pcie_pm_ops,
#else
@@ -371,6 +367,273 @@
}
#endif /* USE_SMMU_ARCH_MSM */
+#ifdef FORCE_TPOWERON
+static void
+dhd_bus_get_tpoweron(dhd_bus_t *bus)
+{
+
+ uint32 tpoweron_rc;
+ uint32 tpoweron_ep;
+
+ tpoweron_rc = dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
+ tpoweron_ep = dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
+ DHD_ERROR(("%s: tpoweron_rc:0x%x tpoweron_ep:0x%x\n",
+ __FUNCTION__, tpoweron_rc, tpoweron_ep));
+}
+
+static void
+dhd_bus_set_tpoweron(dhd_bus_t *bus, uint16 tpoweron)
+{
+
+ dhd_bus_get_tpoweron(bus);
+ /* Set the tpoweron */
+ DHD_ERROR(("%s tpoweron: 0x%x\n", __FUNCTION__, tpoweron));
+ dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron);
+ dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron);
+
+ dhd_bus_get_tpoweron(bus);
+
+}
+
+static bool
+dhdpcie_chip_req_forced_tpoweron(dhd_bus_t *bus)
+{
+ /*
+ * On Fire's reference platform, coming out of L1.2,
+ * there is a constant delay of 45us between CLKREQ# and stable REFCLK
+ * Due to this delay, with tPowerOn < 50
+ * there is a chance of the refclk sense to trigger on noise.
+ *
+ * Which ever chip needs forced tPowerOn of 50us should be listed below.
+ */
+ if (si_chipid(bus->sih) == BCM4377_CHIP_ID) {
+ return TRUE;
+ }
+ return FALSE;
+}
+#endif /* FORCE_TPOWERON */
+
+static bool
+dhd_bus_aspm_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable)
+{
+ uint32 linkctrl_before;
+ uint32 linkctrl_after = 0;
+ uint8 linkctrl_asm;
+ char *device;
+
+ device = (dev == bus->dev) ? "EP" : "RC";
+
+ linkctrl_before = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
+ FALSE, FALSE, 0);
+ linkctrl_asm = (linkctrl_before & PCIE_ASPM_CTRL_MASK);
+
+ if (enable) {
+ if (linkctrl_asm == PCIE_ASPM_L1_ENAB) {
+ DHD_ERROR(("%s: %s already enabled linkctrl: 0x%x\n",
+ __FUNCTION__, device, linkctrl_before));
+ return FALSE;
+ }
+ /* Enable only L1 ASPM (bit 1) */
+ dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
+ TRUE, (linkctrl_before | PCIE_ASPM_L1_ENAB));
+ } else {
+ if (linkctrl_asm == 0) {
+ DHD_ERROR(("%s: %s already disabled linkctrl: 0x%x\n",
+ __FUNCTION__, device, linkctrl_before));
+ return FALSE;
+ }
+ /* Disable complete ASPM (bit 1 and bit 0) */
+ dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
+ TRUE, (linkctrl_before & (~PCIE_ASPM_ENAB)));
+ }
+
+ linkctrl_after = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
+ FALSE, FALSE, 0);
+ DHD_ERROR(("%s: %s %s, linkctrl_before: 0x%x linkctrl_after: 0x%x\n",
+ __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
+ linkctrl_before, linkctrl_after));
+
+ return TRUE;
+}
+
+static bool
+dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t *bus)
+{
+ uint32 rc_aspm_cap;
+ uint32 ep_aspm_cap;
+
+ /* RC ASPM capability */
+ rc_aspm_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
+ FALSE, FALSE, 0);
+ if (rc_aspm_cap == BCME_ERROR) {
+ DHD_ERROR(("%s RC is not ASPM capable\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ /* EP ASPM capability */
+ ep_aspm_cap = dhdpcie_access_cap(bus->dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
+ FALSE, FALSE, 0);
+ if (ep_aspm_cap == BCME_ERROR) {
+ DHD_ERROR(("%s EP is not ASPM capable\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+bool
+dhd_bus_aspm_enable_rc_ep(dhd_bus_t *bus, bool enable)
+{
+ bool ret;
+
+ if (!bus->rc_ep_aspm_cap) {
+ DHD_ERROR(("%s: NOT ASPM CAPABLE rc_ep_aspm_cap: %d\n",
+ __FUNCTION__, bus->rc_ep_aspm_cap));
+ return FALSE;
+ }
+
+ if (enable) {
+ /* Enable only L1 ASPM first RC then EP */
+ ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
+ ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
+ } else {
+ /* Disable complete ASPM first EP then RC */
+ ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
+ ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
+ }
+
+ return ret;
+}
+
+static void
+dhd_bus_l1ss_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable)
+{
+ uint32 l1ssctrl_before;
+ uint32 l1ssctrl_after = 0;
+ uint8 l1ss_ep;
+ char *device;
+
+ device = (dev == bus->dev) ? "EP" : "RC";
+
+ /* Extendend Capacility Reg */
+ l1ssctrl_before = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
+ l1ss_ep = (l1ssctrl_before & PCIE_EXT_L1SS_MASK);
+
+ if (enable) {
+ if (l1ss_ep == PCIE_EXT_L1SS_ENAB) {
+ DHD_ERROR(("%s: %s already enabled, l1ssctrl: 0x%x\n",
+ __FUNCTION__, device, l1ssctrl_before));
+ return;
+ }
+ dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
+ TRUE, TRUE, (l1ssctrl_before | PCIE_EXT_L1SS_ENAB));
+ } else {
+ if (l1ss_ep == 0) {
+ DHD_ERROR(("%s: %s already disabled, l1ssctrl: 0x%x\n",
+ __FUNCTION__, device, l1ssctrl_before));
+ return;
+ }
+ dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
+ TRUE, TRUE, (l1ssctrl_before & (~PCIE_EXT_L1SS_ENAB)));
+ }
+ l1ssctrl_after = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
+ DHD_ERROR(("%s: %s %s, l1ssctrl_before: 0x%x l1ssctrl_after: 0x%x\n",
+ __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
+ l1ssctrl_before, l1ssctrl_after));
+
+}
+
+static bool
+dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t *bus)
+{
+ uint32 rc_l1ss_cap;
+ uint32 ep_l1ss_cap;
+
+ /* RC Extendend Capacility */
+ rc_l1ss_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
+ if (rc_l1ss_cap == BCME_ERROR) {
+ DHD_ERROR(("%s RC is not l1ss capable\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ /* EP Extendend Capacility */
+ ep_l1ss_cap = dhdpcie_access_cap(bus->dev, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
+ if (ep_l1ss_cap == BCME_ERROR) {
+ DHD_ERROR(("%s EP is not l1ss capable\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+void
+dhd_bus_l1ss_enable_rc_ep(dhd_bus_t *bus, bool enable)
+{
+ bool ret;
+
+ if ((!bus->rc_ep_aspm_cap) || (!bus->rc_ep_l1ss_cap)) {
+ DHD_ERROR(("%s: NOT L1SS CAPABLE rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n",
+ __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
+ return;
+ }
+
+ /* Disable ASPM of RC and EP */
+ ret = dhd_bus_aspm_enable_rc_ep(bus, FALSE);
+
+ if (enable) {
+ /* Enable RC then EP */
+ dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
+ dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
+ } else {
+ /* Disable EP then RC */
+ dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
+ dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
+ }
+
+ /* Enable ASPM of RC and EP only if this API disabled */
+ if (ret == TRUE) {
+ dhd_bus_aspm_enable_rc_ep(bus, TRUE);
+ }
+}
+
+void
+dhd_bus_aer_config(dhd_bus_t *bus)
+{
+ uint32 val;
+
+ DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__));
+ val = dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
+ PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
+ if (val != (uint32)-1) {
+ val &= ~CORR_ERR_AE;
+ dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
+ PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
+ } else {
+ DHD_ERROR(("%s: Invalid EP's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
+ __FUNCTION__, val));
+ }
+
+ DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__));
+ val = dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
+ PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
+ if (val != (uint32)-1) {
+ val &= ~CORR_ERR_AE;
+ dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
+ PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
+ } else {
+ DHD_ERROR(("%s: Invalid RC's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
+ __FUNCTION__, val));
+ }
+}
+
#ifdef DHD_PCIE_RUNTIMEPM
static int dhdpcie_pm_suspend(struct device *dev)
{
@@ -687,19 +950,37 @@
extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp);
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+static void
+dhdpcie_suspend_dump_cfgregs(struct dhd_bus *bus, char *suspend_state)
+{
+ DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, "
+ "BaseAddress1(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
+ suspend_state,
+ PCIECFGREG_BASEADDR0,
+ dhd_pcie_config_read(bus->osh,
+ PCIECFGREG_BASEADDR0, sizeof(uint32)),
+ PCIECFGREG_BASEADDR1,
+ dhd_pcie_config_read(bus->osh,
+ PCIECFGREG_BASEADDR1, sizeof(uint32)),
+ PCIE_CFG_PMCSR,
+ dhd_pcie_config_read(bus->osh,
+ PCIE_CFG_PMCSR, sizeof(uint32))));
+}
+
static int dhdpcie_suspend_dev(struct pci_dev *dev)
{
int ret;
-#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
dhdpcie_info_t *pch = pci_get_drvdata(dev);
dhd_bus_t *bus = pch->bus;
+#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
if (bus->is_linkdown) {
DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
return BCME_ERROR;
}
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
- DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
+ DHD_ERROR(("%s: Enter\n", __FUNCTION__));
+ dhdpcie_suspend_dump_cfgregs(bus, "BEFORE_EP_SUSPEND");
#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
dhd_dpc_tasklet_kill(bus->dhd);
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
@@ -708,9 +989,7 @@
pch->state = pci_store_saved_state(dev);
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
pci_enable_wake(dev, PCI_D0, TRUE);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
if (pci_is_enabled(dev))
-#endif // endif
pci_disable_device(dev);
ret = pci_set_power_state(dev, PCI_D3hot);
@@ -721,6 +1000,7 @@
#ifdef OEM_ANDROID
dev->state_saved = FALSE;
#endif /* OEM_ANDROID */
+ dhdpcie_suspend_dump_cfgregs(bus, "AFTER_EP_SUSPEND");
return ret;
}
@@ -752,15 +1032,20 @@
static int dhdpcie_resume_dev(struct pci_dev *dev)
{
int err = 0;
-#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
dhdpcie_info_t *pch = pci_get_drvdata(dev);
+#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
pci_load_and_free_saved_state(dev, &pch->state);
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
- DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
+ DHD_ERROR(("%s: Enter\n", __FUNCTION__));
#ifdef OEM_ANDROID
dev->state_saved = TRUE;
#endif /* OEM_ANDROID */
pci_restore_state(dev);
+#ifdef FORCE_TPOWERON
+ if (dhdpcie_chip_req_forced_tpoweron(pch->bus)) {
+ dhd_bus_set_tpoweron(pch->bus, tpoweron_scale);
+ }
+#endif /* FORCE_TPOWERON */
err = pci_enable_device(dev);
if (err) {
printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
@@ -772,7 +1057,8 @@
printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
goto out;
}
-
+ BCM_REFERENCE(pch);
+ dhdpcie_suspend_dump_cfgregs(pch->bus, "AFTER_EP_RESUME");
out:
return err;
}
@@ -828,6 +1114,136 @@
return bcmerror;
}
+/**
+ * dhdpcie_os_setbar1win
+ *
+ * Interface function for setting bar1 window in order to allow
+ * os layer to be aware of current window positon.
+ *
+ * @bus: dhd bus context
+ * @addr: new backplane windows address for BAR1
+ */
+void
+dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr)
+{
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ osl_pci_write_config(bus->osh, PCI_BAR1_WIN, 4, addr);
+ pch->curr_bar1_win = addr;
+}
+
+/**
+ * dhdpcie_os_chkbpoffset
+ *
+ * Check the provided address is within the current BAR1 window,
+ * if not, shift the window
+ *
+ * @bus: dhd bus context
+ * @offset: back plane address that the caller wants to access
+ *
+ * Return: new offset for access
+ */
+static ulong
+dhdpcie_os_chkbpoffset(dhdpcie_info_t *pch, ulong offset)
+{
+ /* Determine BAR1 backplane window using window size
+ * Window address mask should be ~(size - 1)
+ */
+ uint32 bpwin = (uint32)(offset & ~(pch->bar1_size - 1));
+
+ if (bpwin != pch->curr_bar1_win) {
+ /* Move BAR1 window */
+ dhdpcie_os_setbar1win(pch->bus, bpwin);
+ }
+
+ return offset - bpwin;
+}
+
+/**
+ * dhdpcie os layer tcm read/write interface
+ */
+void
+dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
+{
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
+}
+
+uint8
+dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint8 data;
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
+ return data;
+}
+
+void
+dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
+{
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
+}
+
+uint16
+dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint16 data;
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
+ return data;
+}
+
+void
+dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
+{
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
+}
+
+uint32
+dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint32 data;
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
+ return data;
+}
+
+#ifdef DHD_SUPPORT_64BIT
+void
+dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
+{
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
+}
+
+uint64
+dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint64 data;
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
+ return data;
+}
+#endif /* DHD_SUPPORT_64BIT */
+
uint32
dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset)
{
@@ -864,16 +1280,15 @@
*/
uint32
-dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
+dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
uint32 writeval)
{
int cap_ptr = 0;
uint32 ret = -1;
uint32 readval;
- if (!(bus->rc_dev)) {
- DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
- __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
+ if (!(pdev)) {
+ DHD_ERROR(("%s: pdev is NULL\n", __FUNCTION__));
return ret;
}
@@ -882,46 +1297,60 @@
/* removing max EXT_CAP_ID check as
* linux kernel definition's max value is not upadted yet as per spec
*/
- cap_ptr = pci_find_ext_capability(bus->rc_dev, cap);
+ cap_ptr = pci_find_ext_capability(pdev, cap);
} else {
/* removing max PCI_CAP_ID_MAX check as
* pervious kernel versions dont have this definition
*/
- cap_ptr = pci_find_capability(bus->rc_dev, cap);
+ cap_ptr = pci_find_capability(pdev, cap);
}
/* Return if capability with given ID not found */
if (cap_ptr == 0) {
- DHD_ERROR(("%s: RC %x:%x PCI Cap(0x%02x) not supported.\n",
- __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, cap));
+ DHD_ERROR(("%s: PCI Cap(0x%02x) not supported.\n",
+ __FUNCTION__, cap));
return BCME_ERROR;
}
if (is_write) {
- ret = pci_write_config_dword(bus->rc_dev, (cap_ptr + offset), writeval);
- if (ret) {
- DHD_ERROR(("%s: pci_write_config_dword failed. cap=%d offset=%d\n",
- __FUNCTION__, cap, offset));
- return BCME_ERROR;
- }
+ pci_write_config_dword(pdev, (cap_ptr + offset), writeval);
ret = BCME_OK;
} else {
- ret = pci_read_config_dword(bus->rc_dev, (cap_ptr + offset), &readval);
-
- if (ret) {
- DHD_ERROR(("%s: pci_read_config_dword failed. cap=%d offset=%d\n",
- __FUNCTION__, cap, offset));
- return BCME_ERROR;
- }
+ pci_read_config_dword(pdev, (cap_ptr + offset), &readval);
ret = readval;
}
return ret;
}
+uint32
+dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
+ uint32 writeval)
+{
+ if (!(bus->rc_dev)) {
+ DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
+ __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
+ return BCME_ERROR;
+ }
+
+ return dhdpcie_access_cap(bus->rc_dev, cap, offset, is_ext, is_write, writeval);
+}
+
+uint32
+dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
+ uint32 writeval)
+{
+ if (!(bus->dev)) {
+ DHD_ERROR(("%s: EP handle is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return dhdpcie_access_cap(bus->dev, cap, offset, is_ext, is_write, writeval);
+}
+
/* API wrapper to read Root Port link capability
* Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found
*/
@@ -935,6 +1364,21 @@
return linkcap;
}
+static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state)
+{
+ if (bus->coreid == ARMCA7_CORE_ID) {
+ if (state) {
+ /* Sleep */
+ bus->coherent_state = dhdpcie_bus_cfg_read_dword(bus,
+ PCIE_CFG_SUBSYSTEM_CONTROL, 4) & PCIE_BARCOHERENTACCEN_MASK;
+ } else {
+ uint32 val = (dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL,
+ 4) & ~PCIE_BARCOHERENTACCEN_MASK) | bus->coherent_state;
+ dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, val);
+ }
+ }
+}
+
int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state)
{
int rc;
@@ -942,6 +1386,7 @@
struct pci_dev *dev = bus->dev;
if (state) {
+ dhdpcie_config_save_restore_coherent(bus, state);
#if !defined(BCMPCIE_OOB_HOST_WAKE)
dhdpcie_pme_active(bus->osh, state);
#endif // endif
@@ -953,11 +1398,21 @@
rc = dhdpcie_resume_host_dev(bus);
if (!rc) {
rc = dhdpcie_resume_dev(dev);
+ if (PCIECTO_ENAB(bus)) {
+ /* reinit CTO configuration
+ * because cfg space got reset at D3 (PERST)
+ */
+ dhdpcie_cto_cfg_init(bus, TRUE);
+ }
+ if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
+ dhdpcie_ssreset_dis_enum_rst(bus);
+ }
#if !defined(BCMPCIE_OOB_HOST_WAKE)
dhdpcie_pme_active(bus->osh, state);
#endif // endif
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(OEM_ANDROID)
+ dhdpcie_config_save_restore_coherent(bus, state);
+#if defined(OEM_ANDROID)
#if defined(DHD_HANG_SEND_UP_TEST)
if (bus->is_linkdown ||
bus->dhd->req_hang_type == HANG_REASON_PCIE_RC_LINK_UP_FAIL) {
@@ -967,12 +1422,11 @@
bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL;
dhd_os_send_hang_message(bus->dhd);
}
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(OEM_ANDROID) */
+#endif /* OEM_ANDROID */
}
return rc;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
static int dhdpcie_device_scan(struct device *dev, void *data)
{
struct pci_dev *pcidev;
@@ -1001,19 +1455,12 @@
return 0;
}
-#endif /* LINUX_VERSION >= 2.6.0 */
int
dhdpcie_bus_register(void)
{
int error = 0;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
- if (!(error = pci_module_init(&dhdpcie_driver)))
- return 0;
-
- DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
-#else
if (!(error = pci_register_driver(&dhdpcie_driver))) {
bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan);
if (!error) {
@@ -1027,7 +1474,6 @@
pci_unregister_driver(&dhdpcie_driver);
error = BCME_ERROR;
}
-#endif /* LINUX_VERSION < 2.6.0 */
return error;
}
@@ -1041,15 +1487,30 @@
int __devinit
dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+#ifndef MULTI_CHIP_SUPPORT
+ /* Don't enumerate more than one device */
+ if (dhdpcie_init_succeeded) {
+ DHD_TRACE(("%s: PCIe Enumeration is already done.\n",
+ __func__));
+ return -ENODEV;
+ }
+#endif /* MULTI_CHIP_SUPPORT */
if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) {
DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
return -ENODEV;
}
+
printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X"
"(good PCI location)\n", pdev->bus->number,
PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device);
+ if (dhdpcie_init_succeeded == TRUE) {
+ DHD_ERROR(("%s(): === Driver Already attached to a BRCM device === \r\n",
+ __FUNCTION__));
+ return -ENODEV;
+ }
+
if (dhdpcie_init (pdev)) {
DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
return -ENODEV;
@@ -1081,7 +1542,7 @@
{
if (pch) {
#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
- if (!dhd_download_fw_on_driverload) {
+ if (pch->default_state) {
pci_load_and_free_saved_state(pch->dev, &pch->default_state);
}
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
@@ -1114,10 +1575,10 @@
#endif /* CONFIG_ARCH_MSM */
#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
- defined(CONFIG_SOC_EXYNOS9810)
+ defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
exynos_pcie_deregister_event(&bus->pcie_event);
#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 ||
- * CONFIG_SOC_EXYNOS9810
+ * CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820
*/
#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
#endif /* SUPPORT_LINKDOWN_RECOVERY */
@@ -1126,10 +1587,12 @@
dhdpcie_bus_release(bus);
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+
if (pci_is_enabled(pdev))
-#endif // endif
pci_disable_device(pdev);
+#if defined(CONFIG_ARCH_MSM)
+ msm_pcie_pm_control(MSM_PCIE_SUSPEND, pdev->bus->number, pdev, NULL, 0);
+#endif // endif
#ifdef BCMPCIE_OOB_HOST_WAKE
/* pcie os info detach */
MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t));
@@ -1282,8 +1745,20 @@
{
phys_addr_t bar0_addr, bar1_addr;
ulong bar1_size;
- struct pci_dev *pdev = NULL;
- pdev = dhdpcie_info->dev;
+ struct pci_dev *pdev = dhdpcie_info->dev;
+#if defined(CONFIG_ARCH_MSM) && !defined(ENABLE_INSMOD_NO_FW_LOAD)
+ int ret;
+ /* enable PCIe link */
+ ret = msm_pcie_pm_control(MSM_PCIE_RESUME, pdev->bus->number,
+ pdev, NULL, MSM_PCIE_CONFIG_NO_CFG_RESTORE);
+ if (ret) {
+ DHD_ERROR(("%s: MSM_PCIE_RESUME failed : %d\n", __FUNCTION__, ret));
+ goto err;
+ }
+ DHD_ERROR(("PCIe:%s:enabled link\n", __FUNCTION__));
+ /* recover the config space of both RC and Endpoint */
+ msm_pcie_recover_config(pdev);
+#endif /* CONFIG_ARCH_MSM && !ENABLE_INSMOD_NO_FW_LOAD */
#ifdef EXYNOS_PCIE_MODULE_PATCH
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
if (bcm_pcie_default_state) {
@@ -1312,9 +1787,9 @@
}
dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
- dhdpcie_info->tcm_size =
+ dhdpcie_info->bar1_size =
(bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
- dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
@@ -1381,10 +1856,41 @@
}
+void dhdpcie_dump_resource(dhd_bus_t *bus)
+{
+ dhdpcie_info_t *pch;
+
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL) {
+ DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ /* BAR0 */
+ DHD_ERROR(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n",
+ __FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0),
+ DONGLE_REG_MAP_SIZE));
+
+ /* BAR1 */
+ DHD_ERROR(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n",
+ __FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 2),
+ pch->bar1_size));
+}
+
#ifdef SUPPORT_LINKDOWN_RECOVERY
#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
(defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
- defined(CONFIG_SOC_EXYNOS9810)))
+ defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)))
void dhdpcie_linkdown_cb(struct_pcie_notify *noti)
{
struct pci_dev *pdev = (struct pci_dev *)noti->user;
@@ -1405,7 +1911,7 @@
#endif /* CONFIG_ARCH_MSM */
bus->is_linkdown = 1;
DHD_OS_WAKE_LOCK(dhd);
- dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+ dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
dhd_os_send_hang_message(dhd);
}
}
@@ -1414,7 +1920,8 @@
}
#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY &&
- * (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810))
+ * (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 || \
+ * CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820))
*/
#endif /* SUPPORT_LINKDOWN_RECOVERY */
@@ -1524,9 +2031,41 @@
dhdpcie_info->bus = bus;
bus->is_linkdown = 0;
+ bus->no_bus_init = FALSE;
+ bus->cto_triggered = 0;
+
+ bus->rc_dev = NULL;
/* Get RC Device Handle */
- bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL);
+ if (bus->dev->bus) {
+ /* self member of structure pci_bus is bridge device as seen by parent */
+ bus->rc_dev = bus->dev->bus->self;
+ DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %pK\n", __FUNCTION__,
+ bus->rc_dev->vendor, bus->rc_dev->device, bus->rc_dev));
+ } else {
+ DHD_ERROR(("%s: unable to get rc_dev as dev->bus is NULL\n", __FUNCTION__));
+ }
+
+ /* if rc_dev is still NULL, try to get from vendor/device IDs */
+ if (bus->rc_dev == NULL) {
+ bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL);
+ DHD_ERROR(("%s: rc_dev from pci_get_device (%x:%x) is %p\n", __FUNCTION__,
+ PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, bus->rc_dev));
+ }
+
+ bus->rc_ep_aspm_cap = dhd_bus_is_rc_ep_aspm_capable(bus);
+ bus->rc_ep_l1ss_cap = dhd_bus_is_rc_ep_l1ss_capable(bus);
+ DHD_ERROR(("%s: rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n",
+ __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
+#ifdef DHD_DISABLE_ASPM
+ dhd_bus_aspm_enable_rc_ep(bus, FALSE);
+#endif /* DHD_DISABLE_ASPM */
+
+#ifdef FORCE_TPOWERON
+ if (dhdpcie_chip_req_forced_tpoweron(bus)) {
+ dhd_bus_set_tpoweron(bus, tpoweron_scale);
+ }
+#endif /* FORCE_TPOWERON */
#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
defined(CONFIG_ARCH_APQ8084)
@@ -1546,18 +2085,17 @@
bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY;
msm_pcie_register_event(&bus->pcie_event);
bus->no_cfg_restore = FALSE;
- bus->no_bus_init = FALSE;
#endif /* CONFIG_ARCH_MSM */
#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
- defined(CONFIG_SOC_EXYNOS9810)
+ defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN;
bus->pcie_event.user = pdev;
bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK;
bus->pcie_event.callback = dhdpcie_linkdown_cb;
exynos_pcie_register_event(&bus->pcie_event);
#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 ||
- * CONFIG_SOC_EXYNOS9810
+ * CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820
*/
#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
bus->read_shm_fail = FALSE;
@@ -1606,10 +2144,17 @@
/* Attach to the OS network interface */
DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
- if (dhd_register_if(bus->dhd, 0, TRUE)) {
+ if (dhd_attach_net(bus->dhd, TRUE)) {
DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
break;
}
+#ifdef WL_VIF_SUPPORT
+ /* Attach to the virtual interface */
+ DHD_TRACE(("%s(): Calling dhd_register_vif() \n", __FUNCTION__));
+ if (dhd_register_vif(bus->dhd) != 0) {
+ DHD_ERROR(("%s(): ERROR.. dhd_register_vif() failed\n", __FUNCTION__));
+ }
+#endif // endif
dhdpcie_init_succeeded = TRUE;
@@ -1699,14 +2244,12 @@
dhdpcie_isr(int irq, void *arg)
{
dhd_bus_t *bus = (dhd_bus_t*)arg;
- int32 ret;
- bus->isr_entry_time = OSL_SYSUPTIME_US();
- ret = dhdpcie_bus_isr(bus);
- bus->isr_exit_time = OSL_SYSUPTIME_US();
- if (ret)
- return TRUE;
- else
- return FALSE;
+ bus->isr_entry_time = OSL_LOCALTIME_NS();
+ if (!dhdpcie_bus_isr(bus)) {
+ DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__));
+ }
+ bus->isr_exit_time = OSL_LOCALTIME_NS();
+ return IRQ_HANDLED;
}
int
@@ -1754,14 +2297,9 @@
int
dhdpcie_irq_disabled(dhd_bus_t *bus)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
struct irq_desc *desc = irq_to_desc(bus->dev->irq);
/* depth will be zero, if enabled */
return desc->depth;
-#else
- /* return ERROR by default as there is no support for lower versions */
- return BCME_ERROR;
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
}
int
@@ -1864,7 +2402,8 @@
return BCME_ERROR;
}
- pci_disable_device(bus->dev);
+ if (pci_is_enabled(bus->dev))
+ pci_disable_device(bus->dev);
return 0;
}
@@ -1913,6 +2452,15 @@
DHD_ERROR(("%s: VID(0x%x) is different from saved VID(0x%x) "
"Skip the bus init\n", __FUNCTION__, vid, saved_vid));
bus->no_bus_init = TRUE;
+ /* Check if the PCIe link is down */
+ if (vid == (uint32)-1) {
+ bus->is_linkdown = 1;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = TRUE;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ }
return BCME_ERROR;
}
}
@@ -1974,9 +2522,9 @@
}
bus->regs = dhdpcie_info->regs;
- dhdpcie_info->tcm_size =
+ dhdpcie_info->bar1_size =
(bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
- dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
if (!dhdpcie_info->tcm) {
DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
REG_UNMAP(dhdpcie_info->regs);
@@ -2067,7 +2615,49 @@
}
#ifdef BCMPCIE_OOB_HOST_WAKE
-int dhdpcie_get_oob_irq_num(dhd_bus_t *bus)
+#ifdef CONFIG_BCMDHD_GET_OOB_STATE
+extern int dhd_get_wlan_oob_gpio(void);
+#endif /* CONFIG_BCMDHD_GET_OOB_STATE */
+
+int dhdpcie_get_oob_irq_level(void)
+{
+ int gpio_level;
+
+#ifdef CONFIG_BCMDHD_GET_OOB_STATE
+ gpio_level = dhd_get_wlan_oob_gpio();
+#else
+ gpio_level = BCME_UNSUPPORTED;
+#endif /* CONFIG_BCMDHD_GET_OOB_STATE */
+ return gpio_level;
+}
+
+int dhdpcie_get_oob_irq_status(struct dhd_bus *bus)
+{
+ dhdpcie_info_t *pch;
+ dhdpcie_os_info_t *dhdpcie_osinfo;
+
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL) {
+ DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
+
+ return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_enabled : 0;
+}
+
+int dhdpcie_get_oob_irq_num(struct dhd_bus *bus)
{
dhdpcie_info_t *pch;
dhdpcie_os_info_t *dhdpcie_osinfo;
@@ -2122,9 +2712,11 @@
if (enable) {
enable_irq(dhdpcie_osinfo->oob_irq_num);
bus->oob_intr_enable_count++;
+ bus->last_oob_irq_enable_time = OSL_LOCALTIME_NS();
} else {
disable_irq_nosync(dhdpcie_osinfo->oob_irq_num);
bus->oob_intr_disable_count++;
+ bus->last_oob_irq_disable_time = OSL_LOCALTIME_NS();
}
dhdpcie_osinfo->oob_irq_enabled = enable;
}
@@ -2138,7 +2730,7 @@
DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__));
bus = (dhd_bus_t *)data;
dhdpcie_oob_intr_set(bus, FALSE);
- bus->last_oob_irq_time = OSL_SYSUPTIME_US();
+ bus->last_oob_irq_time = OSL_LOCALTIME_NS();
bus->oob_intr_count++;
#ifdef DHD_WAKE_STATUS
#ifdef DHD_PCIE_RUNTIMEPM
@@ -2431,3 +3023,172 @@
else
return NULL;
}
+
+#define KIRQ_PRINT_BUF_LEN 256
+
+void
+dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num)
+{
+ unsigned long flags = 0;
+ struct irq_desc *desc;
+ int i; /* cpu iterator */
+ struct bcmstrbuf strbuf;
+ char tmp_buf[KIRQ_PRINT_BUF_LEN];
+
+ desc = irq_to_desc(irq_num);
+ if (!desc) {
+ DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__));
+ return;
+ }
+ bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN);
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ bcm_bprintf(&strbuf, "dhd irq %u:", irq_num);
+ for_each_online_cpu(i)
+ bcm_bprintf(&strbuf, "%10u ",
+ desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0);
+ if (desc->irq_data.chip) {
+ if (desc->irq_data.chip->name)
+ bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name);
+ else
+ bcm_bprintf(&strbuf, " %8s", "-");
+ } else {
+ bcm_bprintf(&strbuf, " %8s", "None");
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+ if (desc->irq_data.domain)
+ bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq);
+#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
+ bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
+#endif // endif
+#endif /* LINUX VERSION > 3.1.0 */
+
+ if (desc->name)
+ bcm_bprintf(&strbuf, "-%-8s", desc->name);
+
+ DHD_ERROR(("%s\n", strbuf.origbuf));
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+void
+dhd_show_kirqstats(dhd_pub_t *dhd)
+{
+ unsigned int irq = -1;
+#ifdef BCMPCIE
+ dhdpcie_get_pcieirq(dhd->bus, &irq);
+#endif /* BCMPCIE */
+#ifdef BCMSDIO
+ irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num;
+#endif /* BCMSDIO */
+ if (irq != -1) {
+#ifdef BCMPCIE
+ DHD_ERROR(("DUMP data kernel irq stats : \n"));
+#endif /* BCMPCIE */
+#ifdef BCMSDIO
+ DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
+#endif /* BCMSDIO */
+ dhd_print_kirqstats(dhd, irq);
+ }
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ irq = dhdpcie_get_oob_irq_num(dhd->bus);
+ if (irq) {
+ DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
+ dhd_print_kirqstats(dhd, irq);
+ }
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+}
+
+#ifdef DHD_FW_COREDUMP
+#ifdef BCMDHDX
+int
+dhdx_dongle_mem_dump()
+{
+ if (!g_dhd_bus) {
+ DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ dhd_bus_dump_console_buffer(g_dhd_bus);
+ dhd_prot_debug_info_print(g_dhd_bus->dhd);
+
+ g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
+ g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ dhd_bus_mem_dump(g_dhd_bus->dhd);
+ return 0;
+}
+EXPORT_SYMBOL(dhdx_dongle_mem_dump);
+#else
+int
+dhd_dongle_mem_dump(void)
+{
+ if (!g_dhd_bus) {
+ DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ dhd_bus_dump_console_buffer(g_dhd_bus);
+ dhd_prot_debug_info_print(g_dhd_bus->dhd);
+
+ g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
+ g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ dhd_bus_mem_dump(g_dhd_bus->dhd);
+ return 0;
+}
+EXPORT_SYMBOL(dhd_dongle_mem_dump);
+#endif /* BCMDHDX */
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef BCMDHDX
+bool
+dhdx_bus_check_driver_up(void)
+{
+ dhd_bus_t *bus;
+ dhd_pub_t *dhdp;
+ bool isup = FALSE;
+
+ bus = (dhd_bus_t *)g_dhd_bus;
+ if (!bus) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return isup;
+ }
+
+ dhdp = bus->dhd;
+ if (dhdp) {
+ isup = dhdp->up;
+ }
+
+ return isup;
+}
+EXPORT_SYMBOL(dhdx_bus_check_driver_up);
+#else
+bool
+dhd_bus_check_driver_up(void)
+{
+ dhd_bus_t *bus;
+ dhd_pub_t *dhdp;
+ bool isup = FALSE;
+
+ bus = (dhd_bus_t *)g_dhd_bus;
+ if (!bus) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return isup;
+ }
+
+ dhdp = bus->dhd;
+ if (dhdp) {
+ isup = dhdp->up;
+ }
+
+ return isup;
+}
+EXPORT_SYMBOL(dhd_bus_check_driver_up);
+#endif /* BCMDHDX */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pno.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pno.c
index b56077a..8176ed3 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pno.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pno.c
@@ -2,7 +2,9 @@
* Broadcom Dongle Host Driver (DHD)
* Prefered Network Offload and Wi-Fi Location Service(WLS) code.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -25,7 +27,7 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_pno.c 702813 2017-06-05 04:15:59Z $
+ * $Id: dhd_pno.c 812762 2019-04-02 09:36:26Z $
*/
#if defined(GSCAN_SUPPORT) && !defined(PNO_SUPPORT)
@@ -80,14 +82,6 @@
#endif /* IL_BIGENDINA */
#ifdef OEM_ANDROID
-#define NULL_CHECK(p, s, err) \
- do { \
- if (!(p)) { \
- printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
- err = BCME_ERROR; \
- return err; \
- } \
- } while (0)
#define PNO_GET_PNOSTATE(dhd) ((dhd_pno_status_info_t *)dhd->pno_state)
#define PNO_BESTNET_LEN WLC_IOCTL_MEDLEN
@@ -214,7 +208,7 @@
convert_fw_rel_time_to_systime(struct timespec64 *ts, uint32 fw_ts_ms)
#else
convert_fw_rel_time_to_systime(struct timespec *ts, uint32 fw_ts_ms)
-#endif // endif
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) */
{
return ((uint64)(TIMESPEC_TO_US(*ts)) - (uint64)(fw_ts_ms * 1000));
}
@@ -235,12 +229,11 @@
if (gscan_params->epno_cfg.num_epno_ssid > 0) {
i = 0;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
&gscan_params->epno_cfg.epno_ssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (i++ == idx) {
memcpy(res->ssid, iter->SSID, iter->SSID_len);
res->ssid_len = iter->SSID_len;
@@ -658,11 +651,9 @@
return BCME_NOMEM;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next, ssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
pfn_elem_buf[i].infra = htod32(1);
pfn_elem_buf[i].auth = htod32(DOT11_OPEN_SYSTEM);
pfn_elem_buf[i].wpa_auth = htod32(iter->wpa_auth);
@@ -686,6 +677,7 @@
break;
}
}
+
err = dhd_iovar(dhd, 0, "pfn_add", (char *)pfn_elem_buf, mem_needed, NULL, 0, TRUE);
if (err < 0) {
DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__));
@@ -818,15 +810,10 @@
}
DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt));
/* preestimate scan count until which scan result this report is going to end */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(siter, snext,
¶ms_batch->get_batch.expired_scan_results_list, list) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
phead = siter->bestnetheader;
while (phead != NULL) {
/* if left_size is less than bestheader total size , stop this */
@@ -842,15 +829,10 @@
bp += nreadsize = snprintf(bp, nleftsize, "trunc\n");
nleftsize -= nreadsize;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
&phead->entry_list, list) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
t_delta = jiffies_to_msecs(jiffies - iter->recorded_time);
#ifdef PNO_DEBUG
_base_bp = bp;
@@ -918,14 +900,9 @@
}
params_batch->get_batch.expired_tot_scan_cnt -= cnt;
/* set FALSE only if the link list is empty after returning the data */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
if (list_empty(¶ms_batch->get_batch.expired_scan_results_list)) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
params_batch->get_batch.batch_started = FALSE;
bp += snprintf(bp, nleftsize, "%s", RESULTS_END_MARKER);
DHD_PNO(("%s", RESULTS_END_MARKER));
@@ -947,10 +924,8 @@
NULL_CHECK(head, "head is NULL", err);
NULL_CHECK(head->next, "head->next is NULL", err);
DHD_PNO(("%s enter\n", __FUNCTION__));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(siter, snext,
head, list) {
if (only_last) {
@@ -979,9 +954,7 @@
MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE);
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
return removed_scan_cnt;
}
@@ -1031,19 +1004,15 @@
case DHD_PNO_LEGACY_MODE: {
struct dhd_pno_ssid *iter, *next;
if (params->params_legacy.nssid > 0) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
¶ms->params_legacy.ssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_del(&iter->list);
MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid));
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+
params->params_legacy.nssid = 0;
params->params_legacy.scan_fr = 0;
params->params_legacy.pno_freq_expo_max = 0;
@@ -1081,18 +1050,13 @@
case DHD_PNO_HOTLIST_MODE: {
struct dhd_pno_bssid *iter, *next;
if (params->params_hotlist.nbssid > 0) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
¶ms->params_hotlist.bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_del(&iter->list);
MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid));
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
}
params->params_hotlist.scan_fr = 0;
params->params_hotlist.nbssid = 0;
@@ -1220,14 +1184,12 @@
goto exit;
}
/* convert dhd_pno_bssid to wl_pfn_bssid */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
cnt = 0;
tmp_bssid = p_pfn_bssid;
list_for_each_entry_safe(iter, next,
&_params->params_hotlist.bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
memcpy(&tmp_bssid->macaddr,
&iter->macaddr, ETHER_ADDR_LEN);
tmp_bssid->flags = iter->flags;
@@ -1240,9 +1202,6 @@
break;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
if (err < 0) {
_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
@@ -1759,18 +1718,13 @@
if (flags & GSCAN_FLUSH_HOTLIST_CFG) {
struct dhd_pno_bssid *iter, *next;
if (_params->params_gscan.nbssid_hotlist > 0) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
&_params->params_gscan.hotlist_bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_del(&iter->list);
MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid));
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
}
_params->params_gscan.nbssid_hotlist = 0;
DHD_PNO(("Flush Hotlist Config\n"));
@@ -1780,18 +1734,13 @@
dhd_epno_ssid_cfg_t *epno_cfg = &_params->params_gscan.epno_cfg;
if (epno_cfg->num_epno_ssid > 0) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
&epno_cfg->epno_ssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_del(&iter->list);
MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid));
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
epno_cfg->num_epno_ssid = 0;
}
memset(&epno_cfg->params, 0, sizeof(wl_ssid_ext_params_t));
@@ -1870,28 +1819,6 @@
return err;
}
-static void *
-dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len)
-{
- gscan_results_cache_t *iter, *results;
- dhd_pno_status_info_t *_pno_state;
- dhd_pno_params_t *_params;
- uint16 num_scan_ids = 0, num_results = 0;
-
- _pno_state = PNO_GET_PNOSTATE(dhd);
- _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
-
- iter = results = _params->params_gscan.gscan_batch_cache;
- while (iter) {
- num_results += iter->tot_count - iter->tot_consumed;
- num_scan_ids++;
- iter = iter->next;
- }
-
- *len = ((num_results << 16) | (num_scan_ids));
- return results;
-}
-
int
dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
void *buf, bool flush)
@@ -1938,6 +1865,15 @@
INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list);
}
+ if ((_params->params_gscan.nbssid_hotlist +
+ ptr->nbssid) > PFN_SWC_MAX_NUM_APS) {
+ DHD_ERROR(("Excessive number of hotlist APs programmed %d\n",
+ (_params->params_gscan.nbssid_hotlist +
+ ptr->nbssid)));
+ err = BCME_RANGE;
+ goto exit;
+ }
+
for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) {
_pno_bssid = (struct dhd_pno_bssid *)MALLOCZ(dhd->osh,
sizeof(struct dhd_pno_bssid));
@@ -2076,7 +2012,7 @@
int mode, i = 0;
uint16 _chan_list[WL_NUMCHANNELS];
int tot_nchan = 0;
- int num_buckets_to_fw, tot_num_buckets, gscan_param_size;
+ int num_buckets_to_fw, tot_num_buckets, gscan_param_size = 0;
dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
wl_pfn_gscan_ch_bucket_cfg_t *ch_bucket = NULL;
wl_pfn_gscan_cfg_t *pfn_gscan_cfg_t = NULL;
@@ -2223,13 +2159,11 @@
ptr = p_pfn_bssid;
/* convert dhd_pno_bssid to wl_pfn_bssid */
DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
&gscan_params->hotlist_bssid_list, list) {
char buffer_hotlist[64];
+ GCC_DIAGNOSTIC_POP();
memcpy(&ptr->macaddr,
&iter->macaddr, ETHER_ADDR_LEN);
BCM_REFERENCE(buffer_hotlist);
@@ -2237,9 +2171,6 @@
ptr->flags = iter->flags;
ptr++;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist);
if (err < 0) {
@@ -2609,11 +2540,11 @@
uint16 count;
uint16 fwcount;
uint16 fwstatus = PFN_INCOMPLETE;
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 20, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
struct timespec64 tm_spec;
#else
struct timespec tm_spec;
-#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)) */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) */
/* Static asserts in _dhd_pno_get_for_batch() below guarantee the v1 and v2
* net_info and subnet_info structures are compatible in size and SSID offset,
@@ -2678,8 +2609,8 @@
__FUNCTION__, err));
goto exit_mutex_unlock;
}
- get_monotonic_boottime(&tm_spec);
+ get_monotonic_boottime(&tm_spec);
if (plbestnet_v1->version == PFN_LBEST_SCAN_RESULT_VERSION_V1) {
fwstatus = plbestnet_v1->status;
fwcount = plbestnet_v1->count;
@@ -2963,7 +2894,30 @@
return err;
}
#endif /* GSCAN_SUPPORT */
+
#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+static void *
+dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len)
+{
+ gscan_results_cache_t *iter, *results;
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
+ uint16 num_scan_ids = 0, num_results = 0;
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+ iter = results = _params->params_gscan.gscan_batch_cache;
+ while (iter) {
+ num_results += iter->tot_count - iter->tot_consumed;
+ num_scan_ids++;
+ iter = iter->next;
+ }
+
+ *len = ((num_results << 16) | (num_scan_ids));
+ return results;
+}
+
void *
dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
void *info, uint32 *len)
@@ -3000,19 +2954,22 @@
ptr->max_scan_cache_size = GSCAN_MAX_AP_CACHE;
ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS;
ptr->max_ap_cache_per_scan = GSCAN_MAX_AP_CACHE_PER_SCAN;
+ ptr->max_rssi_sample_size = PFN_SWC_RSSI_WINDOW_MAX;
ptr->max_scan_reporting_threshold = 100;
- ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS;
+ ptr->max_hotlist_bssids = PFN_HOTLIST_MAX_NUM_APS;
+ ptr->max_hotlist_ssids = 0;
+ ptr->max_significant_wifi_change_aps = 0;
+ ptr->max_bssid_history_entries = 0;
ptr->max_epno_ssid_crc32 = MAX_EPNO_SSID_NUM;
ptr->max_epno_hidden_ssid = MAX_EPNO_HIDDEN_SSID;
ptr->max_white_list_ssid = MAX_WHITELIST_SSID;
ret = (void *)ptr;
*len = sizeof(dhd_pno_gscan_capabilities_t);
break;
-#ifdef GSCAN_SUPPORT
+
case DHD_PNO_GET_BATCH_RESULTS:
ret = dhd_get_gscan_batch_results(dhd, len);
break;
-#endif /* GSCAN_SUPPORT */
case DHD_PNO_GET_CHANNEL_LIST:
if (info) {
uint16 ch_list[WL_NUMCHANNELS];
@@ -3048,7 +3005,7 @@
*len = 0;
} else {
mem_needed = sizeof(uint32) * nchan;
- p = (uint32 *)MALLOCZ(dhd->osh, mem_needed);
+ p = (uint32 *)MALLOC(dhd->osh, mem_needed);
if (!p) {
DHD_ERROR(("%s: Unable to malloc %d bytes\n",
__FUNCTION__, mem_needed));
@@ -3087,7 +3044,6 @@
epno_cfg->num_epno_ssid++;
list_add_tail(&ssid_elem->list, &epno_cfg->epno_ssid_list);
ret = ssid_elem;
- *len = sizeof(dhd_pno_ssid_t);
break;
default:
DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type));
@@ -3163,18 +3119,13 @@
/* this is a first try to get batching results */
if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
/* move the scan_results_list to expired_scan_results_lists */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(siter, snext,
&_params->params_batch.get_batch.scan_results_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_move_tail(&siter->list,
&_params->params_batch.get_batch.expired_scan_results_list);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
_params->params_batch.get_batch.top_node_cnt = 0;
_params->params_batch.get_batch.expired_tot_scan_cnt =
_params->params_batch.get_batch.tot_scan_cnt;
@@ -3467,18 +3418,13 @@
/* This is a first try to get batching results */
if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
/* move the scan_results_list to expired_scan_results_lists */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(siter, snext,
&_params->params_batch.get_batch.scan_results_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_move_tail(&siter->list,
&_params->params_batch.get_batch.expired_scan_results_list);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
/* reset gloval values after moving to expired list */
_params->params_batch.get_batch.top_node_cnt = 0;
_params->params_batch.get_batch.expired_tot_scan_cnt =
@@ -3513,14 +3459,10 @@
dhd_pub_t *dhd;
struct dhd_pno_batch_params *params_batch;
DHD_PNO(("%s enter\n", __FUNCTION__));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
_pno_state = container_of(work, struct dhd_pno_status_info, work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
+
dhd = _pno_state->dhd;
if (dhd == NULL) {
DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
@@ -3673,19 +3615,14 @@
}
i = 0;
/* convert dhd_pno_bssid to wl_pfn_bssid */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
&_params->params_hotlist.bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN);
p_pfn_bssid[i].flags = iter->flags;
i++;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
if (err < 0) {
_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
@@ -4007,23 +3944,18 @@
u32 bi_length = 0;
uint8 channel;
uint32 mem_needed;
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 20, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
struct timespec64 ts;
#else
struct timespec ts;
-#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(4, 20, 0)) */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) */
u32 bi_ie_length = 0;
u32 bi_ie_offset = 0;
*size = 0;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
gscan_result = (wl_gscan_result_t *)data;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
if (!gscan_result) {
DHD_ERROR(("Invalid gscan result (NULL pointer)\n"));
goto exit;
@@ -4103,23 +4035,15 @@
gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
if (event == WLC_E_PFN_NET_FOUND || event == WLC_E_PFN_NET_LOST) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
wl_pfn_scanresults_v1_t *pfn_result = (wl_pfn_scanresults_v1_t *)data;
wl_pfn_scanresults_v2_t *pfn_result_v2 = (wl_pfn_scanresults_v2_t *)data;
wl_pfn_net_info_v1_t *net;
wl_pfn_net_info_v2_t *net_v2;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+
if (pfn_result->version == PFN_SCANRESULT_VERSION_V1) {
- /* Check if count of pfn results is corrupted */
- if (pfn_result->count > EVENT_MAX_NETCNT_V1) {
- DHD_ERROR(("%s event %d: pfn results count %d"
- "exceeds the max limit\n",
- __FUNCTION__, event, pfn_result->count));
+ if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V1)) {
+ DHD_ERROR(("%s event %d: wrong pfn v1 results count %d\n",
+ __FUNCTION__, event, pfn_result->count));
return NULL;
}
count = pfn_result->count;
@@ -4158,11 +4082,9 @@
results[i].rssi, results[i].flags));
}
} else if (pfn_result_v2->version == PFN_SCANRESULT_VERSION_V2) {
- /* Check if count of pfn results is corrupted */
- if (pfn_result_v2->count > EVENT_MAX_NETCNT_V2) {
- DHD_ERROR(("%s event %d: pfn results count %d"
- "exceeds the max limit\n",
- __FUNCTION__, event, pfn_result_v2->count));
+ if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V2)) {
+ DHD_ERROR(("%s event %d: wrong pfn v2 results count %d\n",
+ __FUNCTION__, event, pfn_result->count));
return NULL;
}
count = pfn_result_v2->count;
@@ -4215,25 +4137,18 @@
void *ptr = NULL;
dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
struct dhd_pno_gscan_params *gscan_params;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
wl_pfn_scanresults_v1_t *results_v1 = (wl_pfn_scanresults_v1_t *)event_data;
wl_pfn_scanresults_v2_t *results_v2 = (wl_pfn_scanresults_v2_t *)event_data;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
wifi_gscan_result_t *hotlist_found_array;
wl_pfn_net_info_v1_t *pnetinfo;
wl_pfn_net_info_v2_t *pnetinfo_v2;
gscan_results_cache_t *gscan_hotlist_cache;
- uint32 malloc_size = 0, i, total = 0;
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 20, 0))
- struct timespec64 tm_spec;
+ u32 malloc_size = 0, i, total = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
+ struct timespec64 tm_spec;
#else
- struct timespec tm_spec;
-#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)) */
+ struct timespec tm_spec;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) */
uint16 fwstatus;
uint16 fwcount;
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pno.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pno.h
index 1823bcb..2c5b4f3 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pno.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_pno.h
@@ -2,7 +2,9 @@
* Header file of Broadcom Dongle Host Driver (DHD)
* Prefered Network Offload code and Wi-Fi Location Service(WLS) code.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -25,7 +27,7 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_pno.h 692135 2017-03-26 17:19:39Z $
+ * $Id: dhd_pno.h 805174 2019-02-15 17:26:01Z $
*/
#ifndef __DHD_PNO_H__
@@ -348,6 +350,11 @@
struct ether_addr bssid;
} dhd_epno_results_t;
+typedef struct dhd_pno_swc_evt_param {
+ uint16 results_rxed_so_far;
+ wl_pfn_significant_net_t *change_array;
+} dhd_pno_swc_evt_param_t;
+
typedef struct wifi_gscan_result {
uint64 ts; /* Time of discovery */
char ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated */
@@ -384,8 +391,10 @@
int max_ap_cache_per_scan;
int max_rssi_sample_size;
int max_scan_reporting_threshold;
- int max_hotlist_aps;
+ int max_hotlist_bssids;
+ int max_hotlist_ssids;
int max_significant_wifi_change_aps;
+ int max_bssid_history_entries;
int max_epno_ssid_crc32;
int max_epno_hidden_ssid;
int max_white_list_ssid;
@@ -402,6 +411,8 @@
uint8 bestn;
uint8 mscan;
uint8 buffer_threshold;
+ uint8 swc_nbssid_threshold;
+ uint8 swc_rssi_window_size;
uint8 lost_ap_window;
uint8 nchannel_buckets;
uint8 reason;
@@ -411,9 +422,12 @@
gscan_results_cache_t *gscan_batch_cache;
gscan_results_cache_t *gscan_hotlist_found;
gscan_results_cache_t*gscan_hotlist_lost;
+ uint16 nbssid_significant_change;
uint16 nbssid_hotlist;
+ struct dhd_pno_swc_evt_param param_significant;
struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS];
struct list_head hotlist_bssid_list;
+ struct list_head significant_bssid_list;
dhd_epno_ssid_cfg_t epno_cfg;
uint32 scan_id;
};
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_proto.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_proto.h
index bfbc186..a339232 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_proto.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_proto.h
@@ -4,7 +4,9 @@
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -41,10 +43,21 @@
#define DEFAULT_IOCTL_RESP_TIMEOUT 5000
#ifndef IOCTL_RESP_TIMEOUT
+#ifdef BCMQT
+#define IOCTL_RESP_TIMEOUT 130000 /* In milli second */
+#else
/* In milli second default value for Production FW */
#define IOCTL_RESP_TIMEOUT DEFAULT_IOCTL_RESP_TIMEOUT
+#endif /* BCMQT */
#endif /* IOCTL_RESP_TIMEOUT */
+#ifdef BCMQT
+#define IOCTL_DMAXFER_TIMEOUT 260000 /* In milli second */
+#else
+/* In milli second default value for Production FW */
+#define IOCTL_DMAXFER_TIMEOUT 10000
+#endif /* BCMQT */
+
#ifndef MFG_IOCTL_RESP_TIMEOUT
#define MFG_IOCTL_RESP_TIMEOUT 20000 /* In milli second default value for MFG FW */
#endif /* MFG_IOCTL_RESP_TIMEOUT */
@@ -125,8 +138,8 @@
uint reorder_info_len, void **pkt, uint32 *free_buf_count);
#ifdef BCMPCIE
-extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound);
-extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound);
+extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype);
+extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype);
extern bool dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound);
extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd);
extern int dhd_prot_process_trapbuf(dhd_pub_t * dhd);
@@ -137,7 +150,7 @@
extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx);
extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd,
uint len, uint srcdelay, uint destdelay, uint d11_lpbk, uint core_num);
-extern dma_xfer_status_t dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd);
+extern int dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result);
extern void dhd_dma_buf_init(dhd_pub_t *dhd, void *dma_buf,
void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma);
@@ -159,13 +172,24 @@
extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id);
extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val);
extern void dhd_prot_reset(dhd_pub_t *dhd);
+extern uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd);
#ifdef IDLE_TX_FLOW_MGMT
extern int dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count);
extern int dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
#endif /* IDLE_TX_FLOW_MGMT */
extern int dhd_prot_init_info_rings(dhd_pub_t *dhd);
+#ifdef DHD_HP2P
+extern int dhd_prot_init_hp2p_rings(dhd_pub_t *dhd);
+#endif /* DHD_HP2P */
+extern int dhd_prot_check_tx_resource(dhd_pub_t *dhd);
+
+extern void dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd);
+extern void dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd);
+#else
+static INLINE void dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd) { return; }
+static INLINE void dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd) { return; }
#endif /* BCMPCIE */
#ifdef DHD_LB
@@ -191,6 +215,16 @@
extern void dhd_prot_dma_indx_free(dhd_pub_t *dhd);
+#ifdef EWP_EDL
+int dhd_prot_init_edl_rings(dhd_pub_t *dhd);
+bool dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd);
+int dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data);
+#endif /* EWP_EDL */
+
+/* APIs for managing a DMA-able buffer */
+int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
+void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
+
/********************************
* For version-string expansion *
*/
@@ -202,4 +236,17 @@
#define DHD_PROTOCOL "unknown"
#endif /* proto */
+int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len);
+int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff);
+
+#ifdef DHD_HP2P
+extern uint8 dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable);
+extern uint32 dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val);
+extern uint32 dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val);
+extern uint32 dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val);
+#endif // endif
+
+#ifdef DHD_MAP_LOGGING
+extern void dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp);
+#endif /* DHD_MAP_LOGGING */
#endif /* _dhd_proto_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_rtt.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_rtt.c
index 323d09c..87f2f97 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_rtt.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_rtt.c
@@ -1,7 +1,9 @@
/*
* Broadcom Dongle Host Driver (DHD), RTT
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -40,17 +42,24 @@
#include <linux/sort.h>
#include <dngl_stats.h>
#include <wlioctl.h>
+#include <bcmwifi_rspec.h>
#include <bcmevent.h>
#include <dhd.h>
#include <dhd_linux.h>
#include <dhd_rtt.h>
#include <dhd_dbg.h>
+#include <dhd_bus.h>
#include <wldev_common.h>
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
#endif /* WL_CFG80211 */
+#ifdef WL_NAN
+#include <wl_cfgnan.h>
+#endif /* WL_NAN */
+
static DEFINE_SPINLOCK(noti_list_lock);
+#ifndef NULL_CHECK
#define NULL_CHECK(p, s, err) \
do { \
if (!(p)) { \
@@ -59,12 +68,16 @@
return err; \
} \
} while (0)
+#endif // endif
-#define RTT_IS_ENABLED(rtt_status) (rtt_status->status == RTT_ENABLED)
-#define RTT_IS_STOPPED(rtt_status) (rtt_status->status == RTT_STOPPED)
#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \
(ts).tv_nsec / NSEC_PER_USEC)
+#undef DHD_RTT_MEM
+#undef DHD_RTT_ERR
+#define DHD_RTT_MEM DHD_LOG_MEM
+#define DHD_RTT_ERR DHD_ERROR
+
#define FTM_IOC_BUFSZ 2048 /* ioc buffsize for our module (> BCM_XTLV_HDR_SIZE) */
#define FTM_AVAIL_MAX_SLOTS 32
#define FTM_MAX_CONFIGS 10
@@ -72,9 +85,11 @@
#define FTM_DEFAULT_SESSION 1
#define FTM_BURST_TIMEOUT_UNIT 250 /* 250 ns */
#define FTM_INVALID -1
-#define FTM_DEFAULT_CNT_20M 12
-#define FTM_DEFAULT_CNT_40M 10
-#define FTM_DEFAULT_CNT_80M 5
+#define FTM_DEFAULT_CNT_20M 24u
+#define FTM_DEFAULT_CNT_40M 16u
+#define FTM_DEFAULT_CNT_80M 11u
+/* To handle congestion env, set max dur/timeout */
+#define FTM_MAX_BURST_DUR_TMO_MS 128u
/* convenience macros */
#define FTM_TU2MICRO(_tu) ((uint64)(_tu) << 10)
@@ -91,7 +106,13 @@
/* broadcom specific set to have more accurate data */
#define ENABLE_VHT_ACK
#define CH_MIN_5G_CHANNEL 34
-#define CH_MIN_2G_CHANNEL 1
+
+/* CUR ETH became obsolete with this major version onwards */
+#define RTT_IOV_CUR_ETH_OBSOLETE 12
+
+/* PROXD TIMEOUT */
+#define DHD_RTT_TIMER_INTERVAL_MS 5000u
+#define DHD_NAN_RTT_TIMER_INTERVAL_MS 10000u
struct rtt_noti_callback {
struct list_head list;
@@ -135,6 +156,7 @@
uint32 data32;
uint16 data16;
uint8 data8;
+ uint32 event_mask;
};
} ftm_config_param_info_t;
@@ -157,100 +179,56 @@
rtt_result_ver(uint16 tlvid, const uint8 *p_data);
static int
-dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data,
+dhd_rtt_convert_results_to_host_v1(rtt_result_t *rtt_result, const uint8 *p_data,
uint16 tlvid, uint16 len);
static int
-dhd_rtt_convert_results_to_host_v2(rtt_report_t *rtt_report, const uint8 *p_data,
+dhd_rtt_convert_results_to_host_v2(rtt_result_t *rtt_result, const uint8 *p_data,
uint16 tlvid, uint16 len);
static wifi_rate_t
dhd_rtt_convert_rate_to_host(uint32 ratespec);
+#if defined(WL_CFG80211) && defined(RTT_DEBUG)
+const char *
+ftm_cmdid_to_str(uint16 cmdid);
+#endif /* WL_CFG80211 && RTT_DEBUG */
+
#ifdef WL_CFG80211
static int
dhd_rtt_start(dhd_pub_t *dhd);
+static int dhd_rtt_create_failure_result(rtt_status_info_t *rtt_status,
+ struct ether_addr *addr);
+static void dhd_rtt_handle_rtt_session_end(dhd_pub_t *dhd);
+static void dhd_rtt_timeout_work(struct work_struct *work);
#endif /* WL_CFG80211 */
static const int burst_duration_idx[] = {0, 0, 1, 2, 4, 8, 16, 32, 64, 128, 0, 0};
/* ftm status mapping to host status */
static const ftm_status_map_host_entry_t ftm_status_map_info[] = {
- {WL_PROXD_E_INCOMPLETE, RTT_REASON_FAILURE},
- {WL_PROXD_E_OVERRIDDEN, RTT_REASON_FAILURE},
- {WL_PROXD_E_ASAP_FAILED, RTT_REASON_FAILURE},
- {WL_PROXD_E_NOTSTARTED, RTT_REASON_FAIL_NOT_SCHEDULED_YET},
- {WL_PROXD_E_INVALIDMEAS, RTT_REASON_FAIL_INVALID_TS},
- {WL_PROXD_E_INCAPABLE, RTT_REASON_FAIL_NO_CAPABILITY},
- {WL_PROXD_E_MISMATCH, RTT_REASON_FAILURE},
- {WL_PROXD_E_DUP_SESSION, RTT_REASON_FAILURE},
- {WL_PROXD_E_REMOTE_FAIL, RTT_REASON_FAILURE},
- {WL_PROXD_E_REMOTE_INCAPABLE, RTT_REASON_FAILURE},
- {WL_PROXD_E_SCHED_FAIL, RTT_REASON_FAIL_SCHEDULE},
- {WL_PROXD_E_PROTO, RTT_REASON_FAIL_PROTOCOL},
- {WL_PROXD_E_EXPIRED, RTT_REASON_FAILURE},
- {WL_PROXD_E_TIMEOUT, RTT_REASON_FAIL_TM_TIMEOUT},
- {WL_PROXD_E_NOACK, RTT_REASON_FAIL_NO_RSP},
- {WL_PROXD_E_DEFERRED, RTT_REASON_FAILURE},
- {WL_PROXD_E_INVALID_SID, RTT_REASON_FAILURE},
- {WL_PROXD_E_REMOTE_CANCEL, RTT_REASON_FAILURE},
- {WL_PROXD_E_CANCELED, RTT_REASON_ABORTED},
- {WL_PROXD_E_INVALID_SESSION, RTT_REASON_FAILURE},
- {WL_PROXD_E_BAD_STATE, RTT_REASON_FAILURE},
- {WL_PROXD_E_ERROR, RTT_REASON_FAILURE},
- {WL_PROXD_E_OK, RTT_REASON_SUCCESS}
-};
-
-/* ftm tlv-id mapping */
-static const ftm_strmap_entry_t ftm_tlvid_loginfo[] = {
- /* { WL_PROXD_TLV_ID_xxx, "text for WL_PROXD_TLV_ID_xxx" }, */
- { WL_PROXD_TLV_ID_NONE, "none" },
- { WL_PROXD_TLV_ID_METHOD, "method" },
- { WL_PROXD_TLV_ID_FLAGS, "flags" },
- { WL_PROXD_TLV_ID_CHANSPEC, "chanspec" },
- { WL_PROXD_TLV_ID_TX_POWER, "tx power" },
- { WL_PROXD_TLV_ID_RATESPEC, "ratespec" },
- { WL_PROXD_TLV_ID_BURST_DURATION, "burst duration" },
- { WL_PROXD_TLV_ID_BURST_PERIOD, "burst period" },
- { WL_PROXD_TLV_ID_BURST_FTM_SEP, "burst ftm sep" },
- { WL_PROXD_TLV_ID_BURST_NUM_FTM, "burst num ftm" },
- { WL_PROXD_TLV_ID_NUM_BURST, "num burst" },
- { WL_PROXD_TLV_ID_FTM_RETRIES, "ftm retries" },
- { WL_PROXD_TLV_ID_BSS_INDEX, "BSS index" },
- { WL_PROXD_TLV_ID_BSSID, "bssid" },
- { WL_PROXD_TLV_ID_INIT_DELAY, "burst init delay" },
- { WL_PROXD_TLV_ID_BURST_TIMEOUT, "burst timeout" },
- { WL_PROXD_TLV_ID_EVENT_MASK, "event mask" },
- { WL_PROXD_TLV_ID_FLAGS_MASK, "flags mask" },
- { WL_PROXD_TLV_ID_PEER_MAC, "peer addr" },
- { WL_PROXD_TLV_ID_FTM_REQ, "ftm req" },
- { WL_PROXD_TLV_ID_LCI_REQ, "lci req" },
- { WL_PROXD_TLV_ID_LCI, "lci" },
- { WL_PROXD_TLV_ID_CIVIC_REQ, "civic req" },
- { WL_PROXD_TLV_ID_CIVIC, "civic" },
- { WL_PROXD_TLV_ID_AVAIL, "availability" },
- { WL_PROXD_TLV_ID_SESSION_FLAGS, "session flags" },
- { WL_PROXD_TLV_ID_SESSION_FLAGS_MASK, "session flags mask" },
- { WL_PROXD_TLV_ID_RX_MAX_BURST, "rx max bursts" },
- { WL_PROXD_TLV_ID_RANGING_INFO, "ranging info" },
- { WL_PROXD_TLV_ID_RANGING_FLAGS, "ranging flags" },
- { WL_PROXD_TLV_ID_RANGING_FLAGS_MASK, "ranging flags mask" },
- /* output - 512 + x */
- { WL_PROXD_TLV_ID_STATUS, "status" },
- { WL_PROXD_TLV_ID_COUNTERS, "counters" },
- { WL_PROXD_TLV_ID_INFO, "info" },
- { WL_PROXD_TLV_ID_RTT_RESULT, "rtt result" },
- { WL_PROXD_TLV_ID_AOA_RESULT, "aoa result" },
- { WL_PROXD_TLV_ID_SESSION_INFO, "session info" },
- { WL_PROXD_TLV_ID_SESSION_STATUS, "session status" },
- { WL_PROXD_TLV_ID_SESSION_ID_LIST, "session ids" },
- /* debug tlvs can be added starting 1024 */
- { WL_PROXD_TLV_ID_DEBUG_MASK, "debug mask" },
- { WL_PROXD_TLV_ID_COLLECT, "collect" },
- { WL_PROXD_TLV_ID_STRBUF, "result" },
- { WL_PROXD_TLV_ID_COLLECT_DATA, "collect-data" },
- { WL_PROXD_TLV_ID_RI_RR, "ri_rr" },
- { WL_PROXD_TLV_ID_COLLECT_CHAN_DATA, "chan est"},
- { WL_PROXD_TLV_ID_MF_STATS_DATA, "mf_stats_data"}
+ {WL_PROXD_E_INCOMPLETE, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_OVERRIDDEN, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_ASAP_FAILED, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_NOTSTARTED, RTT_STATUS_FAIL_NOT_SCHEDULED_YET},
+ {WL_PROXD_E_INVALIDMEAS, RTT_STATUS_FAIL_INVALID_TS},
+ {WL_PROXD_E_INCAPABLE, RTT_STATUS_FAIL_NO_CAPABILITY},
+ {WL_PROXD_E_MISMATCH, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_DUP_SESSION, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_REMOTE_FAIL, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_REMOTE_INCAPABLE, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_SCHED_FAIL, RTT_STATUS_FAIL_SCHEDULE},
+ {WL_PROXD_E_PROTO, RTT_STATUS_FAIL_PROTOCOL},
+ {WL_PROXD_E_EXPIRED, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_TIMEOUT, RTT_STATUS_FAIL_TM_TIMEOUT},
+ {WL_PROXD_E_NOACK, RTT_STATUS_FAIL_NO_RSP},
+ {WL_PROXD_E_DEFERRED, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_INVALID_SID, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_REMOTE_CANCEL, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_CANCELED, RTT_STATUS_ABORTED},
+ {WL_PROXD_E_INVALID_SESSION, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_BAD_STATE, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_ERROR, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_OK, RTT_STATUS_SUCCESS}
};
static const ftm_strmap_entry_t ftm_event_type_loginfo[] = {
@@ -293,17 +271,6 @@
};
/*
-* ranging-state --> text string mapping
-*/
-static const ftm_strmap_entry_t ftm_ranging_state_value_loginfo [] = {
- /* wl_proxd_ranging_state_t, text string */
- { WL_PROXD_RANGING_STATE_NONE, "none" },
- { WL_PROXD_RANGING_STATE_NOTSTARTED, "nonstarted" },
- { WL_PROXD_RANGING_STATE_INPROGRESS, "inprogress" },
- { WL_PROXD_RANGING_STATE_DONE, "done" },
-};
-
-/*
* status --> text string mapping
*/
static const ftm_strmap_entry_t ftm_status_value_loginfo[] = {
@@ -345,28 +312,6 @@
{ WL_PROXD_TMU_PICO_SEC, "ps" }
};
-#define RSPEC_BW(rspec) ((rspec) & WL_RSPEC_BW_MASK)
-#define RSPEC_IS20MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_20MHZ)
-#define RSPEC_IS40MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_40MHZ)
-#define RSPEC_IS80MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_80MHZ)
-#define RSPEC_IS160MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_160MHZ)
-
-#define IS_MCS(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) != WL_RSPEC_ENCODE_RATE)
-#define IS_STBC(rspec) (((((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) || \
- (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT)) && \
- (((rspec) & WL_RSPEC_STBC) == WL_RSPEC_STBC))
-#define RSPEC_ISSGI(rspec) (((rspec) & WL_RSPEC_SGI) != 0)
-#define RSPEC_ISLDPC(rspec) (((rspec) & WL_RSPEC_LDPC) != 0)
-#define RSPEC_ISSTBC(rspec) (((rspec) & WL_RSPEC_STBC) != 0)
-#define RSPEC_ISTXBF(rspec) (((rspec) & WL_RSPEC_TXBF) != 0)
-#define RSPEC_ISVHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT)
-#define RSPEC_ISHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT)
-#define RSPEC_ISLEGACY(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE)
-#define RSPEC2RATE(rspec) (RSPEC_ISLEGACY(rspec) ? \
- ((rspec) & RSPEC_RATE_MASK) : rate_rspec2rate(rspec))
-/* return rate in unit of 500Kbps -- for internal use in wlc_rate_sel.c */
-#define RSPEC2KBPS(rspec) rate_rspec2rate(rspec)
-
struct ieee_80211_mcs_rate_info {
uint8 constellation_bits;
uint8 coding_q;
@@ -538,7 +483,7 @@
}
p_entry++; /* next entry */
}
- return RTT_REASON_FAILURE; /* not found */
+ return RTT_STATUS_FAILURE; /* not found */
}
/*
* lookup 'id' (as a key) from a table
@@ -580,8 +525,7 @@
return "invalid";
}
-#ifdef RTT_DEBUG
-
+#if defined(WL_CFG80211) && defined(RTT_DEBUG)
/* define entry, e.g. { WL_PROXD_CMD_xxx, "WL_PROXD_CMD_xxx" } */
#define DEF_STRMAP_ENTRY(id) { (id), #id }
@@ -614,12 +558,12 @@
/*
* map a ftm cmd-id to a text-string for display
*/
-static const char *
+const char *
ftm_cmdid_to_str(uint16 cmdid)
{
return ftm_map_id_to_str((int32) cmdid, &ftm_cmdid_map[0], ARRAYSIZE(ftm_cmdid_map));
}
-#endif /* RTT_DEBUG */
+#endif /* WL_CFG80211 && RTT_DEBUG */
/*
* convert BCME_xxx error codes into related error strings
@@ -690,7 +634,7 @@
status = dhd_getiovar(dhd, 0, "proxd", (char *)p_proxd_iov,
proxd_iovsize, (char **)&p_iovresp, WLC_IOCTL_SMLEN);
if (status != BCME_OK) {
- DHD_ERROR(("%s: failed to send getbuf proxd iovar (CMD ID : %d), status=%d\n",
+ DHD_RTT_ERR(("%s: failed to send getbuf proxd iovar (CMD ID : %d), status=%d\n",
__FUNCTION__, p_subcmd_info->cmdid, status));
return status;
}
@@ -702,7 +646,7 @@
tlvs_len = ltoh16(p_iovresp->len) - WL_PROXD_IOV_HDR_SIZE;
if (tlvs_len < 0) {
- DHD_ERROR(("%s: alert, p_iovresp->len(%d) should not be smaller than %d\n",
+ DHD_RTT_ERR(("%s: alert, p_iovresp->len(%d) should not be smaller than %d\n",
__FUNCTION__, ltoh16(p_iovresp->len), (int) WL_PROXD_IOV_HDR_SIZE));
tlvs_len = 0;
}
@@ -721,7 +665,7 @@
wl_proxd_cmd_t cmdid, uint16 tlvs_bufsize, uint16 *p_out_bufsize)
{
uint16 proxd_iovsize;
- uint16 kflags;
+ uint32 kflags;
wl_proxd_tlv_t *p_tlv;
wl_proxd_iov_t *p_proxd_iov = (wl_proxd_iov_t *) NULL;
@@ -732,7 +676,7 @@
p_proxd_iov = kzalloc(proxd_iovsize, kflags);
if (p_proxd_iov == NULL) {
- DHD_ERROR(("error: failed to allocate %d bytes of memory\n", proxd_iovsize));
+ DHD_RTT_ERR(("error: failed to allocate %d bytes of memory\n", proxd_iovsize));
return NULL;
}
@@ -833,11 +777,6 @@
}
#endif /* WL_CFG80211 */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
-
/* gets the length and returns the version
* of the wl_proxd_collect_event_t version
*/
@@ -971,7 +910,7 @@
}
break;
default:
- DHD_ERROR(("%s: > Unsupported TLV ID %d\n",
+ DHD_RTT_ERR(("%s: > Unsupported TLV ID %d\n",
__FUNCTION__, tlvid));
break;
}
@@ -1033,15 +972,15 @@
expected_rtt_result_ver = rtt_result_ver(tlvid, p_data);
switch (expected_rtt_result_ver) {
case WL_PROXD_RTT_RESULT_VERSION_1:
- ret = dhd_rtt_convert_results_to_host_v1((rtt_report_t *)ctx,
+ ret = dhd_rtt_convert_results_to_host_v1((rtt_result_t *)ctx,
p_data, tlvid, len);
break;
case WL_PROXD_RTT_RESULT_VERSION_2:
- ret = dhd_rtt_convert_results_to_host_v2((rtt_report_t *)ctx,
+ ret = dhd_rtt_convert_results_to_host_v2((rtt_result_t *)ctx,
p_data, tlvid, len);
break;
default:
- DHD_ERROR((" > Unsupported RTT_RESULT version\n"));
+ DHD_RTT_ERR((" > Unsupported RTT_RESULT version\n"));
ret = BCME_UNSUPPORTED;
break;
}
@@ -1066,6 +1005,7 @@
ctx, p_data, len);
break;
case WL_PROXD_TLV_ID_COLLECT_CHAN_DATA:
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_CHAN_DATA\n"));
DHD_RTT(("\tchan est %u\n", (uint32) (len / sizeof(uint32))));
for (i = 0; i < (len/sizeof(chan_data_entry)); i++) {
@@ -1073,6 +1013,7 @@
chan_data_entry = ltoh32_ua(p + i);
DHD_RTT(("\t%u\n", chan_data_entry));
}
+ GCC_DIAGNOSTIC_POP();
break;
case WL_PROXD_TLV_ID_MF_STATS_DATA:
DHD_RTT(("WL_PROXD_TLV_ID_MF_STATS_DATA\n"));
@@ -1080,7 +1021,7 @@
rtt_prhex("", p_data, len);
break;
default:
- DHD_ERROR(("> Unsupported TLV ID %d\n", tlvid));
+ DHD_RTT_ERR(("> Unsupported TLV ID %d\n", tlvid));
ret = BCME_ERROR;
break;
}
@@ -1120,7 +1061,7 @@
ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left,
type, sizeof(uint32), (uint8 *)&flags_mask, BCM_XTLV_OPTION_ALIGN32);
if (ret != BCME_OK) {
- DHD_ERROR(("%s : bcm_pack_xltv_entry() for mask flags failed, status=%d\n",
+ DHD_RTT_ERR(("%s : bcm_pack_xltv_entry() for mask flags failed, status=%d\n",
__FUNCTION__, ret));
goto exit;
}
@@ -1180,6 +1121,7 @@
break;
case WL_PROXD_TLV_ID_BSSID: /* mac address */
case WL_PROXD_TLV_ID_PEER_MAC:
+ case WL_PROXD_TLV_ID_CUR_ETHER_ADDR:
p_src_data = &p_config_param_info->mac_addr;
src_data_size = sizeof(struct ether_addr);
break;
@@ -1196,7 +1138,7 @@
break;
}
if (ret != BCME_OK) {
- DHD_ERROR(("%s bad TLV ID : %d\n",
+ DHD_RTT_ERR(("%s bad TLV ID : %d\n",
__FUNCTION__, p_config_param_info->tlvid));
break;
}
@@ -1205,7 +1147,7 @@
p_config_param_info->tlvid, src_data_size, (uint8 *)p_src_data,
BCM_XTLV_OPTION_ALIGN32);
if (ret != BCME_OK) {
- DHD_ERROR(("%s: bcm_pack_xltv_entry() failed,"
+ DHD_RTT_ERR(("%s: bcm_pack_xltv_entry() failed,"
" status=%d\n", __FUNCTION__, ret));
break;
}
@@ -1247,6 +1189,119 @@
return dhd_rtt_common_set_handler(dhd, &subcmd_info,
WL_PROXD_METHOD_FTM, session_id);
}
+#ifdef WL_NAN
+int
+dhd_rtt_delete_nan_session(dhd_pub_t *dhd)
+{
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct wireless_dev *wdev = ndev_to_wdev(dev);
+ struct wiphy *wiphy = wdev->wiphy;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ wl_cfgnan_terminate_directed_rtt_sessions(dev, cfg);
+ return BCME_OK;
+}
+#endif /* WL_NAN */
+/* API to find out if the given Peer Mac from FTM events
+* is nan-peer. Based on this we will handle the SESSION_END
+* event. For nan-peer FTM_SESSION_END event is ignored and handled in
+* nan-ranging-cancel or nan-ranging-end event.
+*/
+static bool
+dhd_rtt_is_nan_peer(dhd_pub_t *dhd, struct ether_addr *peer_mac)
+{
+#ifdef WL_NAN
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct wireless_dev *wdev = ndev_to_wdev(dev);
+ struct wiphy *wiphy = wdev->wiphy;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_ranging_inst_t *ranging_inst = NULL;
+ bool ret = FALSE;
+
+ if (cfg->nan_enable == FALSE || ETHER_ISNULLADDR(peer_mac)) {
+ goto exit;
+ }
+
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_mac);
+ if (ranging_inst) {
+ DHD_RTT((" RTT peer is of type NAN\n"));
+ ret = TRUE;
+ goto exit;
+ }
+exit:
+ return ret;
+#else
+ return FALSE;
+#endif /* WL_NAN */
+}
+
+#ifdef WL_NAN
+static int
+dhd_rtt_nan_start_session(dhd_pub_t *dhd, rtt_target_info_t *rtt_target)
+{
+ s32 err = BCME_OK;
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct wireless_dev *wdev = ndev_to_wdev(dev);
+ struct wiphy *wiphy = wdev->wiphy;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ wl_nan_ev_rng_rpt_ind_t range_res;
+ nan_ranging_inst_t *ranging_inst = NULL;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ NAN_MUTEX_LOCK();
+
+ bzero(&range_res, sizeof(range_res));
+
+ if (!rtt_status) {
+ err = BCME_NOTENABLED;
+ goto done;
+ }
+
+ if (!cfg->nan_enable) { /* If nan is not enabled report error */
+ err = BCME_NOTENABLED;
+ goto done;
+ }
+
+ /* check if new ranging session allowed */
+ if (!wl_cfgnan_ranging_allowed(cfg)) {
+ /* responder should be in progress because initiator requests are
+ * queued in DHD. Since initiator has more proef cancel responder
+ * sessions
+ */
+ wl_cfgnan_cancel_rng_responders(dev, cfg);
+ }
+
+ ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
+ &rtt_target->addr, NAN_RANGING_ROLE_INITIATOR);
+ if (!ranging_inst) {
+ err = BCME_NORESOURCE;
+ goto done;
+ }
+
+ DHD_RTT(("Trigger nan based range request\n"));
+ err = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
+ cfg, ranging_inst, NULL, NAN_RANGE_REQ_CMD, TRUE);
+ if (unlikely(err)) {
+ goto done;
+ }
+ ranging_inst->range_type = RTT_TYPE_NAN_DIRECTED;
+ ranging_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
+ /* schedule proxd timeout */
+ schedule_delayed_work(&rtt_status->proxd_timeout,
+ msecs_to_jiffies(DHD_NAN_RTT_TIMER_INTERVAL_MS));
+done:
+ if (err) { /* notify failure RTT event to host */
+ DHD_RTT_ERR(("Failed to issue Nan Ranging Request err %d\n", err));
+ dhd_rtt_handle_nan_rtt_session_end(dhd, &rtt_target->addr);
+ /* try to reset geofence */
+ if (ranging_inst) {
+ wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
+ RTT_SCHED_DIR_TRIGGER_FAIL);
+ }
+ }
+ NAN_MUTEX_UNLOCK();
+ return err;
+}
+#endif /* WL_NAN */
static int
dhd_rtt_ftm_config(dhd_pub_t *dhd, wl_proxd_session_id_t session_id,
@@ -1269,7 +1324,7 @@
FTM_IOC_BUFSZ, &proxd_iovsize);
if (p_proxd_iov == NULL) {
- DHD_ERROR(("%s : failed to allocate the iovar (size :%d)\n",
+ DHD_RTT_ERR(("%s : failed to allocate the iovar (size :%d)\n",
__FUNCTION__, FTM_IOC_BUFSZ));
return BCME_NOMEM;
}
@@ -1292,7 +1347,7 @@
ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov,
all_tlvsize + WL_PROXD_IOV_HDR_SIZE, NULL, 0, TRUE);
if (ret != BCME_OK) {
- DHD_ERROR(("%s : failed to set config\n", __FUNCTION__));
+ DHD_RTT_ERR(("%s : failed to set config\n", __FUNCTION__));
}
}
/* clean up */
@@ -1344,7 +1399,7 @@
chanspec = wf_chspec_80(center_chan, primary_chan);
break;
default:
- DHD_ERROR(("doesn't support this bandwith : %d", channel.width));
+ DHD_RTT_ERR(("doesn't support this bandwith : %d", channel.width));
bw = -1;
break;
}
@@ -1365,27 +1420,50 @@
{
int err = BCME_OK;
int idx;
- rtt_status_info_t *rtt_status;
- NULL_CHECK(params, "params is NULL", err);
+ rtt_status_info_t *rtt_status = NULL;
+ struct net_device *dev = NULL;
+ NULL_CHECK(params, "params is NULL", err);
NULL_CHECK(dhd, "dhd is NULL", err);
+
+ dev = dhd_linux_get_primary_netdev(dhd);
rtt_status = GET_RTTSTATE(dhd);
NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ NULL_CHECK(dev, "dev is NULL", err);
+
if (!HAS_11MC_CAP(rtt_status->rtt_capa.proto)) {
- DHD_ERROR(("doesn't support RTT \n"));
- return BCME_ERROR;
+ DHD_RTT_ERR(("doesn't support RTT \n"));
+ err = BCME_ERROR;
+ goto exit;
}
- if (rtt_status->status != RTT_STOPPED) {
- DHD_ERROR(("rtt is already started\n"));
- return BCME_BUSY;
- }
+
DHD_RTT(("%s enter\n", __FUNCTION__));
+ if (params->rtt_target_cnt > 0) {
+#ifdef WL_NAN
+ /* cancel ongoing geofence RTT if there */
+ if ((err = wl_cfgnan_suspend_geofence_rng_session(dev,
+ NULL, RTT_GEO_SUSPN_HOST_DIR_RTT_TRIG, 0)) != BCME_OK) {
+ goto exit;
+ }
+#endif /* WL_NAN */
+ } else {
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ mutex_lock(&rtt_status->rtt_mutex);
+ if (rtt_status->status != RTT_STOPPED) {
+ DHD_RTT_ERR(("rtt is already started\n"));
+ err = BCME_BUSY;
+ goto exit;
+ }
memset(rtt_status->rtt_config.target_info, 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
rtt_status->rtt_config.rtt_target_cnt = params->rtt_target_cnt;
memcpy(rtt_status->rtt_config.target_info,
params->target_info, TARGET_INFO_SIZE(params->rtt_target_cnt));
rtt_status->status = RTT_STARTED;
+ DHD_RTT_MEM(("dhd_rtt_set_cfg: RTT Started, target_cnt = %d\n", params->rtt_target_cnt));
/* start to measure RTT from first device */
/* find next target to trigger RTT */
for (idx = rtt_status->cur_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
@@ -1400,16 +1478,511 @@
}
if (idx < rtt_status->rtt_config.rtt_target_cnt) {
DHD_RTT(("rtt_status->cur_idx : %d\n", rtt_status->cur_idx));
+ rtt_status->rtt_sched_reason = RTT_SCHED_HOST_TRIGGER;
schedule_work(&rtt_status->work);
}
+exit:
+ mutex_unlock(&rtt_status->rtt_mutex);
return err;
}
+#define GEOFENCE_RTT_LOCK(rtt_status) mutex_lock(&(rtt_status)->geofence_mutex)
+#define GEOFENCE_RTT_UNLOCK(rtt_status) mutex_unlock(&(rtt_status)->geofence_mutex)
+
+#ifdef WL_NAN
+/* sets geofence role concurrency state TRUE/FALSE */
+void
+dhd_rtt_set_role_concurrency_state(dhd_pub_t *dhd, bool state)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return;
+ }
+ GEOFENCE_RTT_LOCK(rtt_status);
+ rtt_status->geofence_cfg.role_concurr_state = state;
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+}
+
+/* returns TRUE if geofence role concurrency constraint exists */
+bool
+dhd_rtt_get_role_concurrency_state(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return FALSE;
+ }
+ return rtt_status->geofence_cfg.role_concurr_state;
+}
+
+int8
+dhd_rtt_get_geofence_target_cnt(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return 0;
+ }
+ return rtt_status->geofence_cfg.geofence_target_cnt;
+}
+
+/* sets geofence rtt state TRUE/FALSE */
+void
+dhd_rtt_set_geofence_rtt_state(dhd_pub_t *dhd, bool state)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return;
+ }
+ GEOFENCE_RTT_LOCK(rtt_status);
+ rtt_status->geofence_cfg.rtt_in_progress = state;
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+}
+
+/* returns TRUE if geofence rtt is in progress */
+bool
+dhd_rtt_get_geofence_rtt_state(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ if (!rtt_status) {
+ return FALSE;
+ }
+
+ return rtt_status->geofence_cfg.rtt_in_progress;
+}
+
+/* returns geofence RTT target list Head */
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_target_head(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ rtt_geofence_target_info_t* head = NULL;
+
+ if (!rtt_status) {
+ return NULL;
+ }
+
+ if (rtt_status->geofence_cfg.geofence_target_cnt) {
+ head = &rtt_status->geofence_cfg.geofence_target_info[0];
+ }
+
+ return head;
+}
+
+int8
+dhd_rtt_get_geofence_cur_target_idx(dhd_pub_t *dhd)
+{
+ int8 target_cnt = 0, cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ if (!rtt_status) {
+ goto exit;
+ }
+
+ target_cnt = rtt_status->geofence_cfg.geofence_target_cnt;
+ if (target_cnt == 0) {
+ goto exit;
+ }
+
+ cur_idx = rtt_status->geofence_cfg.cur_target_idx;
+ ASSERT(cur_idx <= target_cnt);
+
+exit:
+ return cur_idx;
+}
+
+void
+dhd_rtt_move_geofence_cur_target_idx_to_next(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ if (!rtt_status) {
+ return;
+ }
+
+ if (rtt_status->geofence_cfg.geofence_target_cnt == 0) {
+ /* Invalidate current idx if no targets */
+ rtt_status->geofence_cfg.cur_target_idx =
+ DHD_RTT_INVALID_TARGET_INDEX;
+ /* Cancel pending retry timer if any */
+ if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
+ cancel_delayed_work(&rtt_status->rtt_retry_timer);
+ }
+ return;
+ }
+ rtt_status->geofence_cfg.cur_target_idx++;
+
+ if (rtt_status->geofence_cfg.cur_target_idx >=
+ rtt_status->geofence_cfg.geofence_target_cnt) {
+ /* Reset once all targets done */
+ rtt_status->geofence_cfg.cur_target_idx = 0;
+ }
+}
+
+/* returns geofence current RTT target */
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_current_target(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ rtt_geofence_target_info_t* cur_target = NULL;
+ int cur_idx = 0;
+
+ if (!rtt_status) {
+ return NULL;
+ }
+
+ cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd);
+ if (cur_idx >= 0) {
+ cur_target = &rtt_status->geofence_cfg.geofence_target_info[cur_idx];
+ }
+
+ return cur_target;
+}
+
+/* returns geofence target from list for the peer */
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_target(dhd_pub_t *dhd, struct ether_addr* peer_addr, int8 *index)
+{
+ int8 i;
+ rtt_status_info_t *rtt_status;
+ int target_cnt;
+ rtt_geofence_target_info_t *geofence_target_info, *tgt = NULL;
+
+ rtt_status = GET_RTTSTATE(dhd);
+
+ if (!rtt_status) {
+ return NULL;
+ }
+
+ target_cnt = rtt_status->geofence_cfg.geofence_target_cnt;
+ geofence_target_info = rtt_status->geofence_cfg.geofence_target_info;
+
+ /* Loop through to find target */
+ for (i = 0; i < target_cnt; i++) {
+ if (geofence_target_info[i].valid == FALSE) {
+ break;
+ }
+ if (!memcmp(peer_addr, &geofence_target_info[i].peer_addr,
+ ETHER_ADDR_LEN)) {
+ *index = i;
+ tgt = &geofence_target_info[i];
+ }
+ }
+ if (!tgt) {
+ DHD_RTT(("dhd_rtt_get_geofence_target: Target not found in list,"
+ " MAC ADDR: "MACDBG" \n", MAC2STRDBG(peer_addr)));
+ }
+ return tgt;
+}
+
+/* add geofence target to the target list */
+int
+dhd_rtt_add_geofence_target(dhd_pub_t *dhd, rtt_geofence_target_info_t *target)
+{
+ int err = BCME_OK;
+ rtt_status_info_t *rtt_status;
+ rtt_geofence_target_info_t *geofence_target_info;
+ int8 geofence_target_cnt, index;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+
+ GEOFENCE_RTT_LOCK(rtt_status);
+
+ /* Get the geofence_target via peer addr, index param is dumm here */
+ geofence_target_info = dhd_rtt_get_geofence_target(dhd, &target->peer_addr, &index);
+ if (geofence_target_info) {
+ DHD_RTT(("Duplicate geofencing RTT add request dropped\n"));
+ err = BCME_OK;
+ goto exit;
+ }
+
+ geofence_target_cnt = rtt_status->geofence_cfg.geofence_target_cnt;
+ if (geofence_target_cnt >= RTT_MAX_GEOFENCE_TARGET_CNT) {
+ DHD_RTT(("Queue full, Geofencing RTT add request dropped\n"));
+ err = BCME_NORESOURCE;
+ goto exit;
+ }
+
+ /* Add Geofence RTT request and increment target count */
+ geofence_target_info = rtt_status->geofence_cfg.geofence_target_info;
+ /* src and dest buffer len same, pointers of same DS statically allocated */
+ (void)memcpy_s(&geofence_target_info[geofence_target_cnt],
+ sizeof(geofence_target_info[geofence_target_cnt]), target,
+ sizeof(*target));
+ geofence_target_info[geofence_target_cnt].valid = TRUE;
+ rtt_status->geofence_cfg.geofence_target_cnt++;
+ if (rtt_status->geofence_cfg.geofence_target_cnt == 1) {
+ /* Adding first target */
+ rtt_status->geofence_cfg.cur_target_idx = 0;
+ }
+
+exit:
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ return err;
+}
+
+/* removes geofence target from the target list */
+int
+dhd_rtt_remove_geofence_target(dhd_pub_t *dhd, struct ether_addr *peer_addr)
+{
+ int err = BCME_OK;
+ rtt_status_info_t *rtt_status;
+ rtt_geofence_target_info_t *geofence_target_info;
+ int8 geofence_target_cnt, j, index = 0;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+
+ GEOFENCE_RTT_LOCK(rtt_status);
+
+ geofence_target_cnt = dhd_rtt_get_geofence_target_cnt(dhd);
+ if (geofence_target_cnt == 0) {
+ DHD_RTT(("Queue Empty, Geofencing RTT remove request dropped\n"));
+ ASSERT(0);
+ goto exit;
+ }
+
+ /* Get the geofence_target via peer addr */
+ geofence_target_info = dhd_rtt_get_geofence_target(dhd, peer_addr, &index);
+ if (geofence_target_info == NULL) {
+ DHD_RTT(("Geofencing RTT target not found, remove request dropped\n"));
+ err = BCME_NOTFOUND;
+ goto exit;
+ }
+
+ /* left shift all the valid entries, as we dont keep holes in list */
+ for (j = index; (j+1) < geofence_target_cnt; j++) {
+ if (geofence_target_info[j].valid == TRUE) {
+ /*
+ * src and dest buffer len same, pointers of same DS
+ * statically allocated
+ */
+ (void)memcpy_s(&geofence_target_info[j], sizeof(geofence_target_info[j]),
+ &geofence_target_info[j + 1],
+ sizeof(geofence_target_info[j + 1]));
+ } else {
+ break;
+ }
+ }
+ rtt_status->geofence_cfg.geofence_target_cnt--;
+ if ((rtt_status->geofence_cfg.geofence_target_cnt == 0) ||
+ (index == rtt_status->geofence_cfg.cur_target_idx)) {
+ /* Move cur_idx to next target */
+ dhd_rtt_move_geofence_cur_target_idx_to_next(dhd);
+ } else if (index < rtt_status->geofence_cfg.cur_target_idx) {
+ /* Decrement cur index if cur target position changed */
+ rtt_status->geofence_cfg.cur_target_idx--;
+ }
+
+exit:
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ return err;
+}
+
+/* deletes/empty geofence target list */
+int
+dhd_rtt_delete_geofence_target_list(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status;
+
+ int err = BCME_OK;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ GEOFENCE_RTT_LOCK(rtt_status);
+ memset_s(&rtt_status->geofence_cfg, sizeof(rtt_geofence_cfg_t),
+ 0, sizeof(rtt_geofence_cfg_t));
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ return err;
+}
+
+int
+dhd_rtt_sched_geofencing_target(dhd_pub_t *dhd)
+{
+ rtt_geofence_target_info_t *geofence_target_info;
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ int ret = BCME_OK;
+ bool geofence_state;
+ bool role_concurrency_state;
+ u8 rtt_invalid_reason = RTT_STATE_VALID;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ NAN_MUTEX_LOCK();
+
+ if ((cfg->nan_init_state == FALSE) ||
+ (cfg->nan_enable == FALSE)) {
+ ret = BCME_NOTENABLED;
+ goto done;
+ }
+ geofence_state = dhd_rtt_get_geofence_rtt_state(dhd);
+ role_concurrency_state = dhd_rtt_get_role_concurrency_state(dhd);
+
+ DHD_RTT_ERR(("dhd_rtt_sched_geofencing_target: sched_reason = %d\n",
+ rtt_status->rtt_sched_reason));
+
+ if (geofence_state == TRUE || role_concurrency_state == TRUE) {
+ ret = BCME_ERROR;
+ DHD_RTT_ERR(("geofencing constraint , sched request dropped,"
+ " geofence_state = %d, role_concurrency_state = %d\n",
+ geofence_state, role_concurrency_state));
+ goto done;
+ }
+
+ /* Get current geofencing target */
+ geofence_target_info = dhd_rtt_get_geofence_current_target(dhd);
+
+ /* call cfg API for trigerring geofencing RTT */
+ if (geofence_target_info) {
+ /* check for dp/others concurrency */
+ rtt_invalid_reason = dhd_rtt_invalid_states(dev,
+ &geofence_target_info->peer_addr);
+ if (rtt_invalid_reason != RTT_STATE_VALID) {
+ ret = BCME_BUSY;
+ DHD_RTT_ERR(("DRV State is not valid for RTT, "
+ "invalid_state = %d\n", rtt_invalid_reason));
+ goto done;
+ }
+
+ ret = wl_cfgnan_trigger_geofencing_ranging(dev,
+ &geofence_target_info->peer_addr);
+ if (ret == BCME_OK) {
+ dhd_rtt_set_geofence_rtt_state(dhd, TRUE);
+ }
+ } else {
+ DHD_RTT(("No RTT target to schedule\n"));
+ ret = BCME_NOTFOUND;
+ }
+
+done:
+ NAN_MUTEX_UNLOCK();
+ return ret;
+}
+#endif /* WL_NAN */
+
+#ifdef WL_CFG80211
+#ifdef WL_NAN
+static void
+dhd_rtt_retry(dhd_pub_t *dhd)
+{
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ rtt_geofence_target_info_t *geofence_target = NULL;
+ nan_ranging_inst_t *ranging_inst = NULL;
+
+ geofence_target = dhd_rtt_get_geofence_current_target(dhd);
+ if (!geofence_target) {
+ DHD_RTT(("dhd_rtt_retry: geofence target null\n"));
+ goto exit;
+ }
+ ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
+ &geofence_target->peer_addr, NAN_RANGING_ROLE_INITIATOR);
+ if (!ranging_inst) {
+ DHD_RTT(("dhd_rtt_retry: ranging instance null\n"));
+ goto exit;
+ }
+ wl_cfgnan_reset_geofence_ranging(cfg,
+ ranging_inst, RTT_SCHED_RTT_RETRY_GEOFENCE);
+
+exit:
+ return;
+}
+
+static void
+dhd_rtt_retry_work(struct work_struct *work)
+{
+ rtt_status_info_t *rtt_status = NULL;
+ dhd_pub_t *dhd = NULL;
+ struct net_device *dev = NULL;
+ struct bcm_cfg80211 *cfg = NULL;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ rtt_status = container_of(work, rtt_status_info_t, proxd_timeout.work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
+ dhd = rtt_status->dhd;
+ if (dhd == NULL) {
+ DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__));
+ goto exit;
+ }
+ dev = dhd_linux_get_primary_netdev(dhd);
+ cfg = wl_get_cfg(dev);
+
+ NAN_MUTEX_LOCK();
+ mutex_lock(&rtt_status->rtt_mutex);
+ (void) dhd_rtt_retry(dhd);
+ mutex_unlock(&rtt_status->rtt_mutex);
+ NAN_MUTEX_UNLOCK();
+
+exit:
+ return;
+}
+#endif /* WL_NAN */
+
+/*
+ * Return zero (0)
+ * for valid RTT state
+ * means if RTT is applicable
+ */
+uint8
+dhd_rtt_invalid_states(struct net_device *ndev, struct ether_addr *peer_addr)
+{
+ uint8 invalid_reason = RTT_STATE_VALID;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ UNUSED_PARAMETER(cfg);
+ UNUSED_PARAMETER(invalid_reason);
+
+ /* Make sure peer addr is not NULL in caller */
+ ASSERT(peer_addr);
+ /*
+ * Keep adding prohibited drv states here
+ * Only generic conditions which block
+ * All RTTs like NDP connection
+ */
+
+#ifdef WL_NAN
+ if (wl_cfgnan_data_dp_exists_with_peer(cfg, peer_addr)) {
+ invalid_reason = RTT_STATE_INV_REASON_NDP_EXIST;
+ DHD_RTT(("NDP in progress/connected, RTT prohibited\n"));
+ goto exit;
+ }
+#endif /* WL_NAN */
+
+ /* Remove below #defines once more exit calls come */
+#ifdef WL_NAN
+exit:
+#endif /* WL_NAN */
+ return invalid_reason;
+}
+#endif /* WL_CFG80211 */
+
+void
+dhd_rtt_schedule_rtt_work_thread(dhd_pub_t *dhd, int sched_reason)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (rtt_status == NULL) {
+ ASSERT(0);
+ } else {
+ rtt_status->rtt_sched_reason = sched_reason;
+ schedule_work(&rtt_status->work);
+ }
+ return;
+}
+
int
dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt)
{
int err = BCME_OK;
-#ifdef WL_CFG8011
+#ifdef WL_CFG80211
int i = 0, j = 0;
rtt_status_info_t *rtt_status;
rtt_results_header_t *entry, *next;
@@ -1420,7 +1993,7 @@
rtt_status = GET_RTTSTATE(dhd);
NULL_CHECK(rtt_status, "rtt_status is NULL", err);
if (rtt_status->status == RTT_STOPPED) {
- DHD_ERROR(("rtt is not started\n"));
+ DHD_RTT_ERR(("rtt is not started\n"));
return BCME_OK;
}
DHD_RTT(("%s enter\n", __FUNCTION__));
@@ -1440,10 +2013,7 @@
/* remove the rtt results in cache */
if (!list_empty(&rtt_status->rtt_results_cache)) {
/* Iterate rtt_results_header list */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(entry, next,
&rtt_status->rtt_results_cache, list) {
list_del(&entry->list);
@@ -1455,21 +2025,14 @@
}
kfree(entry);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
}
/* send the rtt complete event to wake up the user process */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ GCC_DIAGNOSTIC_POP();
iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
/* reinitialize the HEAD */
INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
/* clear information for rtt_config */
@@ -1477,7 +2040,14 @@
memset(rtt_status->rtt_config.target_info, 0,
TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
rtt_status->cur_idx = 0;
+ /* Cancel pending proxd timeout work if any */
+ if (delayed_work_pending(&rtt_status->proxd_timeout)) {
+ cancel_delayed_work(&rtt_status->proxd_timeout);
+ }
dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
+#ifdef WL_NAN
+ dhd_rtt_delete_nan_session(dhd);
+#endif /* WL_NAN */
dhd_rtt_ftm_enable(dhd, FALSE);
}
mutex_unlock(&rtt_status->rtt_mutex);
@@ -1486,10 +2056,109 @@
}
#ifdef WL_CFG80211
+static void
+dhd_rtt_timeout(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status;
+#ifndef DHD_DUMP_ON_RTT_TIMEOUT
+ rtt_target_info_t *rtt_target = NULL;
+ rtt_target_info_t *rtt_target_info = NULL;
+#ifdef WL_NAN
+ nan_ranging_inst_t *ranging_inst = NULL;
+ int ret = BCME_OK;
+ uint32 status;
+ struct net_device *ndev = dhd_linux_get_primary_netdev(dhd);
+ struct bcm_cfg80211 *cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
+#endif /* WL_NAN */
+#endif /* !DHD_DUMP_ON_RTT_TIMEOUT */
+
+ rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ DHD_RTT_ERR(("Proxd timer expired but no RTT status\n"));
+ goto exit;
+ }
+
+ if (RTT_IS_STOPPED(rtt_status)) {
+ DHD_RTT_ERR(("Proxd timer expired but no RTT Request\n"));
+ goto exit;
+ }
+
+#ifdef DHD_DUMP_ON_RTT_TIMEOUT
+ /* Dump, and Panic depending on memdump.info */
+ if (dhd_query_bus_erros(dhd)) {
+ goto exit;
+ }
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* Behave based on user memdump info */
+ dhd->memdump_type = DUMP_TYPE_PROXD_TIMEOUT;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+#else /* DHD_DUMP_ON_RTT_TIMEOUT */
+ /* Cancel RTT for target and proceed to next target */
+ rtt_target_info = rtt_status->rtt_config.target_info;
+ if ((!rtt_target_info) ||
+ (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt)) {
+ goto exit;
+ }
+ rtt_target = &rtt_target_info[rtt_status->cur_idx];
+ WL_ERR(("Proxd timer expired for Target: "MACDBG" \n", MAC2STRDBG(&rtt_target->addr)));
+#ifdef WL_NAN
+ if (rtt_target->peer == RTT_PEER_NAN) {
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, &rtt_target->addr);
+ if (!ranging_inst) {
+ goto exit;
+ }
+ ret = wl_cfgnan_cancel_ranging(ndev, cfg, ranging_inst->range_id,
+ NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
+ __FUNCTION__, ret, status));
+ }
+ } else
+#endif /* WL_NAN */
+ {
+ /* For Legacy RTT */
+ dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
+ }
+ dhd_rtt_create_failure_result(rtt_status, &rtt_target->addr);
+ dhd_rtt_handle_rtt_session_end(dhd);
+#endif /* DHD_DUMP_ON_RTT_TIMEOUT */
+exit:
+ return;
+}
+
+static void
+dhd_rtt_timeout_work(struct work_struct *work)
+{
+ rtt_status_info_t *rtt_status = NULL;
+ dhd_pub_t *dhd = NULL;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ rtt_status = container_of(work, rtt_status_info_t, proxd_timeout.work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
+ dhd = rtt_status->dhd;
+ if (dhd == NULL) {
+ DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+ mutex_lock(&rtt_status->rtt_mutex);
+ (void) dhd_rtt_timeout(dhd);
+ mutex_unlock(&rtt_status->rtt_mutex);
+}
+
static int
dhd_rtt_start(dhd_pub_t *dhd)
{
int err = BCME_OK;
+ int err_at = 0;
char eabuf[ETHER_ADDR_STR_LEN];
char chanbuf[CHANSPEC_STR_LEN];
int pm = PM_OFF;
@@ -1501,33 +2170,42 @@
rtt_target_info_t *rtt_target;
rtt_status_info_t *rtt_status;
struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ u8 rtt_invalid_reason = RTT_STATE_VALID;
+ int rtt_sched_type = RTT_TYPE_INVALID;
+
NULL_CHECK(dhd, "dhd is NULL", err);
rtt_status = GET_RTTSTATE(dhd);
NULL_CHECK(rtt_status, "rtt_status is NULL", err);
DHD_RTT(("Enter %s\n", __FUNCTION__));
+
+ if (RTT_IS_STOPPED(rtt_status)) {
+ DHD_RTT(("No Directed RTT target to process, check for geofence\n"));
+ goto geofence;
+ }
+
if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) {
err = BCME_RANGE;
+ err_at = 1;
DHD_RTT(("%s : idx %d is out of range\n", __FUNCTION__, rtt_status->cur_idx));
if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
- DHD_ERROR(("STA is set as Target/Responder \n"));
- return BCME_ERROR;
+ DHD_RTT_ERR(("STA is set as Target/Responder \n"));
+ err = BCME_ERROR;
+ err_at = 1;
}
goto exit;
}
- if (RTT_IS_STOPPED(rtt_status)) {
- DHD_RTT(("RTT is stopped\n"));
- goto exit;
- }
+
rtt_status->pm = PM_OFF;
err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm));
if (err) {
- DHD_ERROR(("Failed to get the PM value\n"));
+ DHD_RTT_ERR(("Failed to get the PM value\n"));
} else {
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_ERROR(("Failed to set the PM\n"));
+ DHD_RTT_ERR(("Failed to set the PM\n"));
rtt_status->pm_restore = FALSE;
} else {
rtt_status->pm_restore = TRUE;
@@ -1539,11 +2217,38 @@
rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
mutex_unlock(&rtt_status->rtt_mutex);
DHD_RTT(("%s enter\n", __FUNCTION__));
+
+ if (ETHER_ISNULLADDR(rtt_target->addr.octet)) {
+ err = BCME_BADADDR;
+ err_at = 2;
+ DHD_RTT(("RTT Target addr is NULL\n"));
+ goto exit;
+ }
+
+ /* check for dp/others concurrency */
+ rtt_invalid_reason = dhd_rtt_invalid_states(dev, &rtt_target->addr);
+ if (rtt_invalid_reason != RTT_STATE_VALID) {
+ err = BCME_BUSY;
+ err_at = 3;
+ DHD_RTT(("DRV State is not valid for RTT\n"));
+ goto exit;
+ }
+
+#ifdef WL_NAN
+ if (rtt_target->peer == RTT_PEER_NAN) {
+ rtt_sched_type = RTT_TYPE_NAN_DIRECTED;
+ rtt_status->status = RTT_ENABLED;
+ /* Ignore return value..failure taken care inside the API */
+ dhd_rtt_nan_start_session(dhd, rtt_target);
+ goto exit;
+ }
+#endif /* WL_NAN */
if (!RTT_IS_ENABLED(rtt_status)) {
/* enable ftm */
err = dhd_rtt_ftm_enable(dhd, TRUE);
if (err) {
- DHD_ERROR(("failed to enable FTM (%d)\n", err));
+ DHD_RTT_ERR(("failed to enable FTM (%d)\n", err));
+ err_at = 5;
goto exit;
}
}
@@ -1551,7 +2256,8 @@
/* delete session of index default sesession */
err = dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
if (err < 0 && err != BCME_NOTFOUND) {
- DHD_ERROR(("failed to delete session of FTM (%d)\n", err));
+ DHD_RTT_ERR(("failed to delete session of FTM (%d)\n", err));
+ err_at = 6;
goto exit;
}
rtt_status->status = RTT_ENABLED;
@@ -1563,6 +2269,28 @@
ftm_configs[ftm_cfg_cnt++].flags = WL_PROXD_SESSION_FLAG_INITIATOR;
dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS,
ftm_configs, ftm_cfg_cnt);
+
+ memset(ioctl_buf, 0, WLC_IOCTL_SMLEN);
+
+ /* Rand Mac for newer version in place of cur_eth */
+ if (dhd->wlc_ver_major < RTT_IOV_CUR_ETH_OBSOLETE) {
+ err = wldev_iovar_getbuf(dev, "cur_etheraddr", NULL, 0,
+ ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+ if (err) {
+ DHD_RTT_ERR(("WLC_GET_CUR_ETHERADDR failed, error %d\n", err));
+ err_at = 7;
+ goto exit;
+ }
+ memcpy(rtt_target->local_addr.octet, ioctl_buf, ETHER_ADDR_LEN);
+
+ /* local mac address */
+ if (!ETHER_ISNULLADDR(rtt_target->local_addr.octet)) {
+ ftm_params[ftm_param_cnt].mac_addr = rtt_target->local_addr;
+ ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_CUR_ETHER_ADDR;
+ bcm_ether_ntoa(&rtt_target->local_addr, eabuf);
+ DHD_RTT((">\t local %s\n", eabuf));
+ }
+ }
/* target's mac address */
if (!ETHER_ISNULLADDR(rtt_target->addr.octet)) {
ftm_params[ftm_param_cnt].mac_addr = rtt_target->addr;
@@ -1584,15 +2312,18 @@
DHD_RTT((">\t num of burst : %d\n", rtt_target->num_burst));
}
/* number of frame per burst */
- if (rtt_target->num_frames_per_burst == 0) {
- rtt_target->num_frames_per_burst =
- CHSPEC_IS20(rtt_target->chanspec) ? FTM_DEFAULT_CNT_20M :
- CHSPEC_IS40(rtt_target->chanspec) ? FTM_DEFAULT_CNT_40M :
- FTM_DEFAULT_CNT_80M;
+ rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_80M;
+ if (CHSPEC_IS80(rtt_target->chanspec)) {
+ rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_80M;
+ } else if (CHSPEC_IS40(rtt_target->chanspec)) {
+ rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_40M;
+ } else if (CHSPEC_IS20(rtt_target->chanspec)) {
+ rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_20M;
}
ftm_params[ftm_param_cnt].data16 = htol16(rtt_target->num_frames_per_burst);
ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_NUM_FTM;
DHD_RTT((">\t number of frame per burst : %d\n", rtt_target->num_frames_per_burst));
+
/* FTM retry count */
if (rtt_target->num_retries_per_ftm) {
ftm_params[ftm_param_cnt].data8 = rtt_target->num_retries_per_ftm;
@@ -1613,7 +2344,12 @@
ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_PERIOD;
DHD_RTT((">\t burst period : %d ms\n", rtt_target->burst_period));
}
+ /* Setting both duration and timeout to MAX duration
+ * to handle the congestion environments.
+ * Hence ignoring the user config.
+ */
/* burst-duration */
+ rtt_target->burst_duration = FTM_MAX_BURST_DUR_TMO_MS;
if (rtt_target->burst_duration) {
ftm_params[ftm_param_cnt].data_intvl.intvl =
htol32(rtt_target->burst_duration); /* ms */
@@ -1622,45 +2358,63 @@
DHD_RTT((">\t burst duration : %d ms\n",
rtt_target->burst_duration));
}
+ /* burst-timeout */
+ rtt_target->burst_timeout = FTM_MAX_BURST_DUR_TMO_MS;
+ if (rtt_target->burst_timeout) {
+ ftm_params[ftm_param_cnt].data_intvl.intvl =
+ htol32(rtt_target->burst_timeout); /* ms */
+ ftm_params[ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC;
+ ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_TIMEOUT;
+ DHD_RTT((">\t burst timeout : %d ms\n",
+ rtt_target->burst_timeout));
+ }
+ /* event_mask..applicable for only Legacy RTT.
+ * For nan-rtt config happens from firmware
+ */
+ ftm_params[ftm_param_cnt].event_mask = ((1 << WL_PROXD_EVENT_BURST_END) |
+ (1 << WL_PROXD_EVENT_SESSION_END));
+ ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_EVENT_MASK;
+
if (rtt_target->bw && rtt_target->preamble) {
bool use_default = FALSE;
int nss;
int mcs;
switch (rtt_target->preamble) {
- case RTT_PREAMBLE_LEGACY:
- rspec |= WL_RSPEC_ENCODE_RATE; /* 11abg */
- rspec |= WL_RATE_6M;
- break;
- case RTT_PREAMBLE_HT:
- rspec |= WL_RSPEC_ENCODE_HT; /* 11n HT */
- mcs = 0; /* default MCS 0 */
- rspec |= mcs;
- break;
- case RTT_PREAMBLE_VHT:
- rspec |= WL_RSPEC_ENCODE_VHT; /* 11ac VHT */
- mcs = 0; /* default MCS 0 */
- nss = 1; /* default Nss = 1 */
- rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs;
- break;
- default:
- DHD_RTT(("doesn't support this preamble : %d\n", rtt_target->preamble));
- use_default = TRUE;
- break;
+ case RTT_PREAMBLE_LEGACY:
+ rspec |= WL_RSPEC_ENCODE_RATE; /* 11abg */
+ rspec |= WL_RATE_6M;
+ break;
+ case RTT_PREAMBLE_HT:
+ rspec |= WL_RSPEC_ENCODE_HT; /* 11n HT */
+ mcs = 0; /* default MCS 0 */
+ rspec |= mcs;
+ break;
+ case RTT_PREAMBLE_VHT:
+ rspec |= WL_RSPEC_ENCODE_VHT; /* 11ac VHT */
+ mcs = 0; /* default MCS 0 */
+ nss = 1; /* default Nss = 1 */
+ rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs;
+ break;
+ default:
+ DHD_RTT(("doesn't support this preamble : %d\n",
+ rtt_target->preamble));
+ use_default = TRUE;
+ break;
}
switch (rtt_target->bw) {
- case RTT_BW_20:
- rspec |= WL_RSPEC_BW_20MHZ;
- break;
- case RTT_BW_40:
- rspec |= WL_RSPEC_BW_40MHZ;
- break;
- case RTT_BW_80:
- rspec |= WL_RSPEC_BW_80MHZ;
- break;
- default:
- DHD_RTT(("doesn't support this BW : %d\n", rtt_target->bw));
- use_default = TRUE;
- break;
+ case RTT_BW_20:
+ rspec |= WL_RSPEC_BW_20MHZ;
+ break;
+ case RTT_BW_40:
+ rspec |= WL_RSPEC_BW_40MHZ;
+ break;
+ case RTT_BW_80:
+ rspec |= WL_RSPEC_BW_80MHZ;
+ break;
+ default:
+ DHD_RTT(("doesn't support this BW : %d\n", rtt_target->bw));
+ use_default = TRUE;
+ break;
}
if (!use_default) {
ftm_params[ftm_param_cnt].data32 = htol32(rspec);
@@ -1671,25 +2425,49 @@
}
dhd_set_rand_mac_oui(dhd);
dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_GENERAL,
- ftm_params, ftm_param_cnt);
+ ftm_params, ftm_param_cnt);
+ rtt_sched_type = RTT_TYPE_LEGACY;
err = dhd_rtt_start_session(dhd, FTM_DEFAULT_SESSION, TRUE);
if (err) {
- DHD_ERROR(("failed to start session of FTM : error %d\n", err));
+ DHD_RTT_ERR(("failed to start session of FTM : error %d\n", err));
+ err_at = 8;
+ } else {
+ /* schedule proxd timeout */
+ schedule_delayed_work(&rtt_status->proxd_timeout,
+ msecs_to_jiffies(DHD_NAN_RTT_TIMER_INTERVAL_MS));
+
}
+
+ goto exit;
+geofence:
+#ifdef WL_NAN
+ /* sched geofencing rtt */
+ rtt_sched_type = RTT_TYPE_NAN_GEOFENCE;
+ if ((err = dhd_rtt_sched_geofencing_target(dhd)) != BCME_OK) {
+ DHD_RTT_ERR(("geofencing sched failed, err = %d\n", err));
+ err_at = 9;
+ }
+#endif /* WL_NAN */
+
exit:
if (err) {
- DHD_ERROR(("rtt is stopped %s \n", __FUNCTION__));
+ /* RTT Failed */
+ DHD_RTT_ERR(("dhd_rtt_start: Failed & RTT_STOPPED, err = %d,"
+ " err_at = %d, rtt_sched_type = %d, rtt_invalid_reason = %d\n"
+ " sched_reason = %d",
+ err, err_at, rtt_sched_type, rtt_invalid_reason,
+ rtt_status->rtt_sched_reason));
rtt_status->status = RTT_STOPPED;
/* disable FTM */
dhd_rtt_ftm_enable(dhd, FALSE);
if (rtt_status->pm_restore) {
pm = PM_FAST;
- DHD_ERROR(("pm_restore =%d func =%s \n",
+ DHD_RTT_ERR(("pm_restore =%d func =%s \n",
rtt_status->pm_restore, __FUNCTION__));
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_ERROR(("Failed to set PM \n"));
+ DHD_RTT_ERR(("Failed to set PM \n"));
} else {
rtt_status->pm_restore = FALSE;
}
@@ -1711,18 +2489,13 @@
rtt_status = GET_RTTSTATE(dhd);
NULL_CHECK(rtt_status, "rtt_status is NULL", err);
spin_lock_bh(¬i_list_lock);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (iter->noti_fn == noti_fn) {
goto exit;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
cb = kmalloc(sizeof(struct rtt_noti_callback), GFP_ATOMIC);
if (!cb) {
err = -ENOMEM;
@@ -1747,20 +2520,15 @@
rtt_status = GET_RTTSTATE(dhd);
NULL_CHECK(rtt_status, "rtt_status is NULL", err);
spin_lock_bh(¬i_list_lock);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (iter->noti_fn == noti_fn) {
cb = iter;
list_del(&cb->list);
break;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
spin_unlock_bh(¬i_list_lock);
if (cb) {
@@ -1812,7 +2580,7 @@
#define FTM_FRAME_TYPES {"SETUP", "TRIGGER", "TIMESTAMP"}
static int
-dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data,
+dhd_rtt_convert_results_to_host_v1(rtt_result_t *rtt_result, const uint8 *p_data,
uint16 tlvid, uint16 len)
{
int i;
@@ -1842,6 +2610,7 @@
int32 dist = 0;
uint8 num_ftm = 0;
char *ftm_frame_types[] = FTM_FRAME_TYPES;
+ rtt_report_t *rtt_report = &(rtt_result->report);
BCM_REFERENCE(ftm_frame_types);
BCM_REFERENCE(dist);
@@ -1852,6 +2621,8 @@
BCM_REFERENCE(tof_phy_error);
BCM_REFERENCE(bitflips);
BCM_REFERENCE(snr);
+ BCM_REFERENCE(session_state);
+ BCM_REFERENCE(ftm_session_state_value_to_logstr);
NULL_CHECK(rtt_report, "rtt_report is NULL", err);
NULL_CHECK(p_data, "p_data is NULL", err);
@@ -1864,7 +2635,6 @@
session_state = ltoh16_ua(&p_data_info->state);
proxd_status = ltoh32_ua(&p_data_info->status);
bcm_ether_ntoa((&(p_data_info->peer)), eabuf);
- ftm_session_state_value_to_logstr(session_state);
ftm_status_value_to_logstr(proxd_status);
DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n",
eabuf,
@@ -2026,7 +2796,7 @@
}
static int
-dhd_rtt_convert_results_to_host_v2(rtt_report_t *rtt_report, const uint8 *p_data,
+dhd_rtt_convert_results_to_host_v2(rtt_result_t *rtt_result, const uint8 *p_data,
uint16 tlvid, uint16 len)
{
int i;
@@ -2058,6 +2828,7 @@
uint32 chanspec = 0;
uint8 num_ftm = 0;
char *ftm_frame_types[] = FTM_FRAME_TYPES;
+ rtt_report_t *rtt_report = &(rtt_result->report);
BCM_REFERENCE(ftm_frame_types);
BCM_REFERENCE(dist);
@@ -2069,6 +2840,8 @@
BCM_REFERENCE(bitflips);
BCM_REFERENCE(snr);
BCM_REFERENCE(chanspec);
+ BCM_REFERENCE(session_state);
+ BCM_REFERENCE(ftm_session_state_value_to_logstr);
NULL_CHECK(rtt_report, "rtt_report is NULL", err);
NULL_CHECK(p_data, "p_data is NULL", err);
@@ -2080,14 +2853,21 @@
session_state = ltoh16_ua(&p_data_info->state);
proxd_status = ltoh32_ua(&p_data_info->status);
bcm_ether_ntoa((&(p_data_info->peer)), eabuf);
- ftm_session_state_value_to_logstr(session_state);
- ftm_status_value_to_logstr(proxd_status);
- DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n",
- eabuf,
- session_state,
+
+ if (proxd_status != BCME_OK) {
+ DHD_RTT_ERR((">\tTarget(%s) session state=%d(%s), status=%d(%s) "
+ "num_meas_ota %d num_valid_rtt %d result_flags %x\n",
+ eabuf, session_state,
+ ftm_session_state_value_to_logstr(session_state),
+ proxd_status, ftm_status_value_to_logstr(proxd_status),
+ p_data_info->num_meas, p_data_info->num_valid_rtt,
+ p_data_info->flags));
+ } else {
+ DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n",
+ eabuf, session_state,
ftm_session_state_value_to_logstr(session_state),
- proxd_status,
- ftm_status_value_to_logstr(proxd_status)));
+ proxd_status, ftm_status_value_to_logstr(proxd_status)));
+ }
/* show avg_dist (1/256m units), burst_num */
avg_dist = ltoh32_ua(&p_data_info->avg_dist);
if (avg_dist == 0xffffffff) { /* report 'failure' case */
@@ -2096,14 +2876,16 @@
p_data_info->num_valid_rtt)); /* in a session */
avg_dist = FTM_INVALID;
} else {
- DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d\n",
- avg_dist >> 8, /* 1/256m units */
+ DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d "
+ "num_meas_ota=%d, result_flags=%x\n", avg_dist >> 8, /* 1/256m units */
((avg_dist & 0xff) * 625) >> 4,
ltoh16_ua(&p_data_info->burst_num),
p_data_info->num_valid_rtt,
- p_data_info->num_ftm)); /* in a session */
+ p_data_info->num_ftm, p_data_info->num_meas,
+ p_data_info->flags)); /* in a session */
}
-
+ rtt_result->rtt_detail.num_ota_meas = p_data_info->num_meas;
+ rtt_result->rtt_detail.result_flags = p_data_info->flags;
/* show 'avg_rtt' sample */
/* in v2, avg_rtt is the first element of the variable rtt[] */
p_sample_avg = &p_data_info->rtt[0];
@@ -2126,6 +2908,9 @@
/* success num */
rtt_report->success_num = p_data_info->num_valid_rtt;
+ /* num-ftm configured */
+ rtt_report->ftm_num = p_data_info->num_ftm;
+
/* actual number of FTM supported by peer */
rtt_report->num_per_burst_peer = p_data_info->num_ftm;
rtt_report->negotiated_burst_num = p_data_info->num_ftm;
@@ -2134,6 +2919,18 @@
rtt_report->status = ftm_get_statusmap_info(proxd_status,
&ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info));
+ /* Framework expects status as SUCCESS else all results will be
+ * set to zero even if we have partial valid result.
+ * So setting status as SUCCESS if we have a valid_rtt
+ * On burst timeout we stop burst with "timeout" reason and
+ * on msch end we set status as "cancel"
+ */
+ if ((proxd_status == WL_PROXD_E_TIMEOUT ||
+ proxd_status == WL_PROXD_E_CANCELED) &&
+ rtt_report->success_num) {
+ rtt_report->status = RTT_STATUS_SUCCESS;
+ }
+
/* rssi (0.5db) */
rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_sample_avg->rssi)) * 2;
@@ -2160,6 +2957,11 @@
if (avg_dist != FTM_INVALID) {
rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */
rtt_report->distance += (avg_dist & 0xff) * 1000 / 256;
+ /* rtt_sd is in 0.1 ns.
+ * host needs distance_sd in milli mtrs
+ * (0.1 * rtt_sd/2 * 10^-9) * C * 1000
+ */
+ rtt_report->distance_sd = rtt_report->rtt_sd * 15; /* mm */
} else {
rtt_report->distance = FTM_INVALID;
}
@@ -2249,6 +3051,404 @@
}
return err;
}
+#ifdef WL_CFG80211
+/* Common API for handling Session End.
+* This API will flush out the results for a peer MAC.
+*
+* @For legacy FTM session, this API will be called
+* when legacy FTM_SESSION_END event is received.
+* @For legacy Nan-RTT , this API will be called when
+* we are cancelling the nan-ranging session or on
+* nan-ranging-end event.
+*/
+static void
+dhd_rtt_handle_rtt_session_end(dhd_pub_t *dhd)
+{
+
+ int idx;
+ struct rtt_noti_callback *iter;
+ rtt_results_header_t *entry, *next;
+ rtt_result_t *next2;
+ rtt_result_t *rtt_result;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ /* Cancel pending proxd timeout work if any */
+ if (delayed_work_pending(&rtt_status->proxd_timeout)) {
+ cancel_delayed_work(&rtt_status->proxd_timeout);
+ }
+
+ /* find next target to trigger RTT */
+ for (idx = (rtt_status->cur_idx + 1);
+ idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
+ /* skip the disabled device */
+ if (rtt_status->rtt_config.target_info[idx].disable) {
+ continue;
+ } else {
+ /* set the idx to cur_idx */
+ rtt_status->cur_idx = idx;
+ break;
+ }
+ }
+ if (idx < rtt_status->rtt_config.rtt_target_cnt) {
+ /* restart to measure RTT from next device */
+ DHD_INFO(("restart to measure rtt\n"));
+ schedule_work(&rtt_status->work);
+ } else {
+ DHD_RTT(("RTT_STOPPED\n"));
+ rtt_status->status = RTT_STOPPED;
+ /* notify the completed information to others */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
+ }
+ /* remove the rtt results in cache */
+ if (!list_empty(&rtt_status->rtt_results_cache)) {
+ /* Iterate rtt_results_header list */
+ list_for_each_entry_safe(entry, next,
+ &rtt_status->rtt_results_cache, list) {
+ list_del(&entry->list);
+ /* Iterate rtt_result list */
+ list_for_each_entry_safe(rtt_result, next2,
+ &entry->result_list, list) {
+ list_del(&rtt_result->list);
+ kfree(rtt_result);
+ }
+ kfree(entry);
+ }
+ }
+ GCC_DIAGNOSTIC_POP();
+ /* reinitialize the HEAD */
+ INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+ /* clear information for rtt_config */
+ rtt_status->rtt_config.rtt_target_cnt = 0;
+ memset_s(rtt_status->rtt_config.target_info, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT),
+ 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+ rtt_status->cur_idx = 0;
+ }
+}
+#endif /* WL_CFG80211 */
+
+#ifdef WL_CFG80211
+static int
+dhd_rtt_create_failure_result(rtt_status_info_t *rtt_status,
+ struct ether_addr *addr)
+{
+ rtt_results_header_t *rtt_results_header = NULL;
+ rtt_target_info_t *rtt_target_info;
+ int ret = BCME_OK;
+ rtt_result_t *rtt_result;
+
+ /* allocate new header for rtt_results */
+ rtt_results_header = (rtt_results_header_t *)MALLOCZ(rtt_status->dhd->osh,
+ sizeof(rtt_results_header_t));
+ if (!rtt_results_header) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ rtt_target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+ /* Initialize the head of list for rtt result */
+ INIT_LIST_HEAD(&rtt_results_header->result_list);
+ /* same src and dest len */
+ (void)memcpy_s(&rtt_results_header->peer_mac,
+ ETHER_ADDR_LEN, addr, ETHER_ADDR_LEN);
+ list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
+
+ /* allocate rtt_results for new results */
+ rtt_result = (rtt_result_t *)MALLOCZ(rtt_status->dhd->osh,
+ sizeof(rtt_result_t));
+ if (!rtt_result) {
+ ret = -ENOMEM;
+ kfree(rtt_results_header);
+ goto exit;
+ }
+ /* fill out the results from the configuration param */
+ rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst;
+ rtt_result->report.type = RTT_TWO_WAY;
+ DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
+ rtt_result->report_len = RTT_REPORT_SIZE;
+ rtt_result->report.status = RTT_STATUS_FAIL_NO_RSP;
+ /* same src and dest len */
+ (void)memcpy_s(&rtt_result->report.addr, ETHER_ADDR_LEN,
+ &rtt_target_info->addr, ETHER_ADDR_LEN);
+ rtt_result->report.distance = FTM_INVALID;
+ list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
+ rtt_results_header->result_cnt++;
+ rtt_results_header->result_tot_len += rtt_result->report_len;
+exit:
+ return ret;
+}
+
+static bool
+dhd_rtt_get_report_header(rtt_status_info_t *rtt_status,
+ rtt_results_header_t **rtt_results_header, struct ether_addr *addr)
+{
+ rtt_results_header_t *entry;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ /* find a rtt_report_header for this mac address */
+ list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (!memcmp(&entry->peer_mac, addr, ETHER_ADDR_LEN)) {
+ /* found a rtt_report_header for peer_mac in the list */
+ if (rtt_results_header) {
+ *rtt_results_header = entry;
+ }
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+int
+dhd_rtt_handle_nan_rtt_session_end(dhd_pub_t *dhd, struct ether_addr *peer)
+{
+ bool is_new = TRUE;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ mutex_lock(&rtt_status->rtt_mutex);
+ is_new = !dhd_rtt_get_report_header(rtt_status, NULL, peer);
+
+ if (is_new) { /* no FTM result..create failure result */
+ dhd_rtt_create_failure_result(rtt_status, peer);
+ }
+ dhd_rtt_handle_rtt_session_end(dhd);
+ mutex_unlock(&rtt_status->rtt_mutex);
+ return BCME_OK;
+}
+#endif /* WL_CFG80211 */
+
+static bool
+dhd_rtt_is_valid_measurement(rtt_result_t *rtt_result)
+{
+ bool ret = FALSE;
+
+ if (rtt_result && (rtt_result->report.success_num != 0)) {
+ ret = TRUE;
+ }
+ return ret;
+}
+
+static int
+dhd_rtt_parse_result_event(wl_proxd_event_t *proxd_ev_data,
+ int tlvs_len, rtt_result_t *rtt_result)
+{
+ int ret = BCME_OK;
+
+ /* unpack TLVs and invokes the cbfn to print the event content TLVs */
+ ret = bcm_unpack_xtlv_buf((void *) rtt_result,
+ (uint8 *)&proxd_ev_data->tlvs[0], tlvs_len,
+ BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn);
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("%s : Failed to unpack xtlv for an event\n",
+ __FUNCTION__));
+ goto exit;
+ }
+ /* fill out the results from the configuration param */
+ rtt_result->report.type = RTT_TWO_WAY;
+ DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
+ rtt_result->report_len = RTT_REPORT_SIZE;
+ rtt_result->detail_len = sizeof(rtt_result->rtt_detail);
+
+exit:
+ return ret;
+
+}
+
+static int
+dhd_rtt_handle_directed_rtt_burst_end(dhd_pub_t *dhd, struct ether_addr *peer_addr,
+ wl_proxd_event_t *proxd_ev_data, int tlvs_len, rtt_result_t *rtt_result, bool is_nan)
+{
+ rtt_status_info_t *rtt_status;
+ rtt_results_header_t *rtt_results_header = NULL;
+ bool is_new = TRUE;
+ int ret = BCME_OK;
+ int err_at = 0;
+
+ rtt_status = GET_RTTSTATE(dhd);
+ is_new = !dhd_rtt_get_report_header(rtt_status,
+ &rtt_results_header, peer_addr);
+
+ if (tlvs_len > 0) {
+ if (is_new) {
+ /* allocate new header for rtt_results */
+ rtt_results_header = (rtt_results_header_t *)MALLOCZ(rtt_status->dhd->osh,
+ sizeof(rtt_results_header_t));
+ if (!rtt_results_header) {
+ ret = BCME_NORESOURCE;
+ err_at = 1;
+ goto exit;
+ }
+ /* Initialize the head of list for rtt result */
+ INIT_LIST_HEAD(&rtt_results_header->result_list);
+ /* same src and header len */
+ (void)memcpy_s(&rtt_results_header->peer_mac, ETHER_ADDR_LEN,
+ peer_addr, ETHER_ADDR_LEN);
+ list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
+ }
+
+ ret = dhd_rtt_parse_result_event(proxd_ev_data, tlvs_len, rtt_result);
+ if ((ret == BCME_OK) && ((!is_nan) ||
+ dhd_rtt_is_valid_measurement(rtt_result))) {
+ /*
+ * Add to list, if non-nan RTT (legacy) or
+ * valid measurement in nan rtt case
+ */
+ list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
+ rtt_results_header->result_cnt++;
+ rtt_results_header->result_tot_len += rtt_result->report_len +
+ rtt_result->detail_len;
+ } else {
+ err_at = 2;
+ if (ret == BCME_OK) {
+ /* Case for nan rtt invalid measurement */
+ ret = BCME_ERROR;
+ err_at = 3;
+ }
+ goto exit;
+ }
+ } else {
+ ret = BCME_ERROR;
+ err_at = 4;
+ goto exit;
+ }
+
+exit:
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("dhd_rtt_handle_directed_rtt_burst_end: failed, "
+ " ret = %d, err_at = %d\n", ret, err_at));
+ if (rtt_results_header) {
+ list_del(&rtt_results_header->list);
+ kfree(rtt_results_header);
+ rtt_results_header = NULL;
+ }
+ }
+ return ret;
+}
+
+#ifdef WL_NAN
+static void
+dhd_rtt_nan_range_report(struct bcm_cfg80211 *cfg,
+ rtt_result_t *rtt_result)
+{
+ wl_nan_ev_rng_rpt_ind_t range_res;
+ nan_ranging_inst_t *rng_inst = NULL;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ UNUSED_PARAMETER(range_res);
+
+ if (!dhd_rtt_is_valid_measurement(rtt_result)) {
+ /* Drop Invalid Measurements for NAN RTT report */
+ DHD_RTT(("dhd_rtt_nan_range_report: Drop Invalid Measurements\n"));
+ return;
+ }
+ bzero(&range_res, sizeof(range_res));
+ range_res.indication = 0;
+ range_res.dist_mm = rtt_result->report.distance;
+ /* same src and header len, ignoring ret val here */
+ (void)memcpy_s(&range_res.peer_m_addr, ETHER_ADDR_LEN,
+ &rtt_result->report.addr, ETHER_ADDR_LEN);
+ wl_cfgnan_process_range_report(cfg, &range_res);
+ /*
+ * suspend geofence ranging for this target
+ * and move to next target
+ * after valid measurement for the target
+ */
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, &range_res.peer_m_addr);
+ if (rng_inst) {
+ wl_cfgnan_suspend_geofence_rng_session(bcmcfg_to_prmry_ndev(cfg),
+ &rng_inst->peer_addr, RTT_GEO_SUSPN_RANGE_RES_REPORTED, 0);
+ GEOFENCE_RTT_LOCK(rtt_status);
+ dhd_rtt_move_geofence_cur_target_idx_to_next(dhd);
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ wl_cfgnan_reset_geofence_ranging(cfg,
+ rng_inst, RTT_SCHED_RNG_RPT_GEOFENCE);
+ }
+}
+
+static int
+dhd_rtt_handle_nan_burst_end(dhd_pub_t *dhd, struct ether_addr *peer_addr,
+ wl_proxd_event_t *proxd_ev_data, int tlvs_len)
+{
+ struct net_device *ndev = NULL;
+ struct bcm_cfg80211 *cfg = NULL;
+ nan_ranging_inst_t *rng_inst = NULL;
+ rtt_status_info_t *rtt_status = NULL;
+ rtt_result_t *rtt_result = NULL;
+ bool is_geofence = FALSE;
+ int ret = BCME_OK;
+
+ ndev = dhd_linux_get_primary_netdev(dhd);
+ cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
+
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", ret);
+ NAN_MUTEX_LOCK();
+ mutex_lock(&rtt_status->rtt_mutex);
+
+ if ((cfg->nan_enable == FALSE) ||
+ ETHER_ISNULLADDR(peer_addr)) {
+ DHD_RTT_ERR(("Received Burst End with NULL ether addr, "
+ "or nan disable, nan_enable = %d\n", cfg->nan_enable));
+ ret = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
+ if (rng_inst) {
+ is_geofence = (rng_inst->range_type
+ == RTT_TYPE_NAN_GEOFENCE);
+ } else {
+ DHD_RTT_ERR(("Received Burst End without Ranging Instance\n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ /* allocate rtt_results for new results */
+ rtt_result = (rtt_result_t *)MALLOCZ(dhd->osh, sizeof(rtt_result_t));
+ if (!rtt_result) {
+ ret = BCME_NORESOURCE;
+ goto exit;
+ }
+
+ if (is_geofence) {
+ ret = dhd_rtt_parse_result_event(proxd_ev_data, tlvs_len, rtt_result);
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("avilog: dhd_rtt_handle_nan_burst_end: "
+ "dhd_rtt_parse_result_event failed\n"));
+ goto exit;
+ }
+ } else {
+ if (RTT_IS_STOPPED(rtt_status)) {
+ /* Ignore the Proxd event */
+ DHD_RTT((" event handler rtt is stopped \n"));
+ if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
+ DHD_RTT(("Device is target/Responder. Recv the event. \n"));
+ } else {
+ ret = BCME_UNSUPPORTED;
+ goto exit;
+ }
+ }
+ ret = dhd_rtt_handle_directed_rtt_burst_end(dhd, peer_addr,
+ proxd_ev_data, tlvs_len, rtt_result, TRUE);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+
+ }
+
+exit:
+ mutex_unlock(&rtt_status->rtt_mutex);
+ if (ret == BCME_OK) {
+ dhd_rtt_nan_range_report(cfg, rtt_result);
+ }
+ if (rtt_result &&
+ ((ret != BCME_OK) || is_geofence)) {
+ kfree(rtt_result);
+ rtt_result = NULL;
+ }
+ NAN_MUTEX_UNLOCK();
+ return ret;
+}
+#endif /* WL_NAN */
int
dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
@@ -2261,34 +3461,15 @@
wl_proxd_ftm_session_status_t session_status;
const ftm_strmap_entry_t *p_loginfo;
rtt_result_t *rtt_result;
- gfp_t kflags;
#ifdef WL_CFG80211
- int idx;
rtt_status_info_t *rtt_status;
- rtt_target_info_t *rtt_target_info;
- struct rtt_noti_callback *iter;
- rtt_results_header_t *entry, *next, *rtt_results_header = NULL;
- rtt_result_t *next2;
+ rtt_results_header_t *rtt_results_header = NULL;
bool is_new = TRUE;
#endif /* WL_CFG80211 */
DHD_RTT(("Enter %s \n", __FUNCTION__));
NULL_CHECK(dhd, "dhd is NULL", ret);
-#ifdef WL_CFG80211
- rtt_status = GET_RTTSTATE(dhd);
- NULL_CHECK(rtt_status, "rtt_status is NULL", ret);
-
- if (RTT_IS_STOPPED(rtt_status)) {
- /* Ignore the Proxd event */
- DHD_RTT((" event handler rtt is stopped \n"));
- if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
- DHD_RTT(("Device is target/Responder. Recv the event. \n"));
- } else {
- return ret;
- }
- }
-#endif /* WL_CFG80211 */
if (ntoh32_ua((void *)&event->datalen) < OFFSETOF(wl_proxd_event_t, tlvs)) {
DHD_RTT(("%s: wrong datalen:%d\n", __FUNCTION__,
ntoh32_ua((void *)&event->datalen)));
@@ -2296,43 +3477,37 @@
}
event_type = ntoh32_ua((void *)&event->event_type);
if (event_type != WLC_E_PROXD) {
- DHD_ERROR((" failed event \n"));
+ DHD_RTT_ERR((" failed event \n"));
return -EINVAL;
}
if (!event_data) {
- DHD_ERROR(("%s: event_data:NULL\n", __FUNCTION__));
+ DHD_RTT_ERR(("%s: event_data:NULL\n", __FUNCTION__));
return -EINVAL;
}
p_event = (wl_proxd_event_t *) event_data;
version = ltoh16(p_event->version);
if (version < WL_PROXD_API_VERSION) {
- DHD_ERROR(("ignore non-ftm event version = 0x%0x < WL_PROXD_API_VERSION (0x%x)\n",
+ DHD_RTT_ERR(("ignore non-ftm event version = 0x%0x < WL_PROXD_API_VERSION (0x%x)\n",
version, WL_PROXD_API_VERSION));
return ret;
}
-#ifdef WL_CFG80211
- if (!in_atomic()) {
- mutex_lock(&rtt_status->rtt_mutex);
- }
-#endif /* WL_CFG80211 */
- event_type = (wl_proxd_event_type_t) ltoh16(p_event->type);
- kflags = in_softirq()? GFP_ATOMIC : GFP_KERNEL;
+ event_type = (wl_proxd_event_type_t) ltoh16(p_event->type);
DHD_RTT(("event_type=0x%x, ntoh16()=0x%x, ltoh16()=0x%x\n",
p_event->type, ntoh16(p_event->type), ltoh16(p_event->type)));
p_loginfo = ftm_get_event_type_loginfo(event_type);
if (p_loginfo == NULL) {
- DHD_ERROR(("receive an invalid FTM event %d\n", event_type));
+ DHD_RTT_ERR(("receive an invalid FTM event %d\n", event_type));
ret = -EINVAL;
- goto exit; /* ignore this event */
+ return ret; /* ignore this event */
}
/* get TLVs len, skip over event header */
if (ltoh16(p_event->len) < OFFSETOF(wl_proxd_event_t, tlvs)) {
- DHD_ERROR(("invalid FTM event length:%d\n", ltoh16(p_event->len)));
+ DHD_RTT_ERR(("invalid FTM event length:%d\n", ltoh16(p_event->len)));
ret = -EINVAL;
- goto exit;
+ return ret;
}
tlvs_len = ltoh16(p_event->len) - OFFSETOF(wl_proxd_event_t, tlvs);
DHD_RTT(("receive '%s' event: version=0x%x len=%d method=%d sid=%d tlvs_len=%d\n",
@@ -2343,23 +3518,36 @@
ltoh16(p_event->sid),
tlvs_len));
#ifdef WL_CFG80211
- rtt_target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- /* find a rtt_report_header for this mac address */
- list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) {
- if (!memcmp(&entry->peer_mac, &event->addr, ETHER_ADDR_LEN)) {
- /* found a rtt_report_header for peer_mac in the list */
- is_new = FALSE;
- rtt_results_header = entry;
- break;
- }
+#ifdef WL_NAN
+ if ((event_type == WL_PROXD_EVENT_BURST_END) &&
+ dhd_rtt_is_nan_peer(dhd, &event->addr)) {
+ DHD_RTT(("WL_PROXD_EVENT_BURST_END for NAN RTT\n"));
+ ret = dhd_rtt_handle_nan_burst_end(dhd, &event->addr, p_event, tlvs_len);
+ return ret;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+#endif /* WL_NAN */
+
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", ret);
+ mutex_lock(&rtt_status->rtt_mutex);
+
+ if (RTT_IS_STOPPED(rtt_status)) {
+ /* Ignore the Proxd event */
+ DHD_RTT((" event handler rtt is stopped \n"));
+ if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
+ DHD_RTT(("Device is target/Responder. Recv the event. \n"));
+ } else {
+ ret = BCME_NOTREADY;
+ goto exit;
+ }
+ }
+#endif /* WL_CFG80211 */
+
+#ifdef WL_CFG80211
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ is_new = !dhd_rtt_get_report_header(rtt_status,
+ &rtt_results_header, &event->addr);
+ GCC_DIAGNOSTIC_POP();
#endif /* WL_CFG80211 */
switch (event_type) {
case WL_PROXD_EVENT_SESSION_CREATE:
@@ -2372,52 +3560,30 @@
DHD_RTT(("WL_PROXD_EVENT_BURST_START\n"));
break;
case WL_PROXD_EVENT_BURST_END:
- DHD_RTT(("WL_PROXD_EVENT_BURST_END\n"));
-#ifdef WL_CFG80211
- if (is_new) {
- /* allocate new header for rtt_results */
- rtt_results_header = kzalloc(sizeof(rtt_results_header_t), kflags);
- if (!rtt_results_header) {
- ret = -ENOMEM;
- goto exit;
- }
- /* Initialize the head of list for rtt result */
- INIT_LIST_HEAD(&rtt_results_header->result_list);
- rtt_results_header->peer_mac = event->addr;
- list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
+ DHD_RTT(("WL_PROXD_EVENT_BURST_END for Legacy RTT\n"));
+ /* allocate rtt_results for new legacy rtt results */
+ rtt_result = (rtt_result_t *)MALLOCZ(dhd->osh, sizeof(rtt_result_t));
+ if (!rtt_result) {
+ ret = -ENOMEM;
+ goto exit;
}
-#endif /* WL_CFG80211 */
- if (tlvs_len > 0) {
- /* allocate rtt_results for new results */
- rtt_result = kzalloc(sizeof(rtt_result_t), kflags);
- if (!rtt_result) {
- ret = -ENOMEM;
- goto exit;
- }
- /* unpack TLVs and invokes the cbfn to print the event content TLVs */
- ret = bcm_unpack_xtlv_buf((void *) &(rtt_result->report),
- (uint8 *)&p_event->tlvs[0], tlvs_len,
- BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s : Failed to unpack xtlv for an event\n",
- __FUNCTION__));
- goto exit;
- }
-#ifdef WL_CFG80211
- /* fill out the results from the configuration param */
- rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst;
- rtt_result->report.type = RTT_TWO_WAY;
- DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
- rtt_result->report_len = RTT_REPORT_SIZE;
-
- list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
- rtt_results_header->result_cnt++;
- rtt_results_header->result_tot_len += rtt_result->report_len;
-#endif /* WL_CFG80211 */
+ ret = dhd_rtt_handle_directed_rtt_burst_end(dhd, &event->addr,
+ p_event, tlvs_len, rtt_result, FALSE);
+ if (rtt_result && (ret != BCME_OK)) {
+ kfree(rtt_result);
+ rtt_result = NULL;
+ goto exit;
}
break;
case WL_PROXD_EVENT_SESSION_END:
- DHD_RTT(("WL_PROXD_EVENT_SESSION_END\n"));
+ DHD_RTT(("WL_PROXD_EVENT_SESSION_END\n"));
+ if (dhd_rtt_is_nan_peer(dhd, &event->addr)) {
+ /*
+ * Nothing to do for session end for nan peer
+ * All taken care in burst end and nan rng rep
+ */
+ break;
+ }
#ifdef WL_CFG80211
if (!RTT_IS_ENABLED(rtt_status)) {
DHD_RTT(("Ignore the session end evt\n"));
@@ -2430,7 +3596,7 @@
(uint8 *)&p_event->tlvs[0], tlvs_len,
BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn);
if (ret != BCME_OK) {
- DHD_ERROR(("%s : Failed to unpack xtlv for an event\n",
+ DHD_RTT_ERR(("%s : Failed to unpack xtlv for an event\n",
__FUNCTION__));
goto exit;
}
@@ -2438,90 +3604,10 @@
#ifdef WL_CFG80211
/* In case of no result for the peer device, make fake result for error case */
if (is_new) {
- /* allocate new header for rtt_results */
- rtt_results_header = kzalloc(sizeof(rtt_results_header_t), GFP_KERNEL);
- if (!rtt_results_header) {
- ret = -ENOMEM;
- goto exit;
- }
- /* Initialize the head of list for rtt result */
- INIT_LIST_HEAD(&rtt_results_header->result_list);
- rtt_results_header->peer_mac = event->addr;
- list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
-
- /* allocate rtt_results for new results */
- rtt_result = kzalloc(sizeof(rtt_result_t), kflags);
- if (!rtt_result) {
- ret = -ENOMEM;
- kfree(rtt_results_header);
- goto exit;
- }
- /* fill out the results from the configuration param */
- rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst;
- rtt_result->report.type = RTT_TWO_WAY;
- DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
- rtt_result->report_len = RTT_REPORT_SIZE;
- rtt_result->report.status = RTT_REASON_FAIL_NO_RSP;
- rtt_result->report.addr = rtt_target_info->addr;
- rtt_result->report.distance = FTM_INVALID;
- list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
- rtt_results_header->result_cnt++;
- rtt_results_header->result_tot_len += rtt_result->report_len;
+ dhd_rtt_create_failure_result(rtt_status, &event->addr);
}
- /* find next target to trigger RTT */
- for (idx = (rtt_status->cur_idx + 1);
- idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
- /* skip the disabled device */
- if (rtt_status->rtt_config.target_info[idx].disable) {
- continue;
- } else {
- /* set the idx to cur_idx */
- rtt_status->cur_idx = idx;
- break;
- }
- }
- if (idx < rtt_status->rtt_config.rtt_target_cnt) {
- /* restart to measure RTT from next device */
- DHD_ERROR(("restart to measure rtt\n"));
- schedule_work(&rtt_status->work);
- } else {
- DHD_RTT(("RTT_STOPPED\n"));
- rtt_status->status = RTT_STOPPED;
- schedule_work(&rtt_status->work);
- /* notify the completed information to others */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
- iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
- }
- /* remove the rtt results in cache */
- if (!list_empty(&rtt_status->rtt_results_cache)) {
- /* Iterate rtt_results_header list */
- list_for_each_entry_safe(entry, next,
- &rtt_status->rtt_results_cache, list) {
- list_del(&entry->list);
- /* Iterate rtt_result list */
- list_for_each_entry_safe(rtt_result, next2,
- &entry->result_list, list) {
- list_del(&rtt_result->list);
- kfree(rtt_result);
- }
- kfree(entry);
- }
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- /* reinitialize the HEAD */
- INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
- /* clear information for rtt_config */
- rtt_status->rtt_config.rtt_target_cnt = 0;
- memset(rtt_status->rtt_config.target_info, 0,
- TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
- rtt_status->cur_idx = 0;
- }
+ DHD_RTT(("\n Not Nan peer..proceed to notify result and restart\n"));
+ dhd_rtt_handle_rtt_session_end(dhd);
#endif /* WL_CFG80211 */
break;
case WL_PROXD_EVENT_SESSION_RESTART:
@@ -2549,7 +3635,7 @@
DHD_RTT(("WL_PROXD_EVENT_COLLECT\n"));
if (tlvs_len > 0) {
void *buffer = NULL;
- if (!(buffer = kzalloc(tlvs_len, kflags))) {
+ if (!(buffer = (void *)MALLOCZ(dhd->osh, tlvs_len))) {
ret = -ENOMEM;
goto exit;
}
@@ -2559,7 +3645,7 @@
BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn);
kfree(buffer);
if (ret != BCME_OK) {
- DHD_ERROR(("%s : Failed to unpack xtlv for event %d\n",
+ DHD_RTT_ERR(("%s : Failed to unpack xtlv for event %d\n",
__FUNCTION__, event_type));
goto exit;
}
@@ -2569,7 +3655,7 @@
DHD_RTT(("WL_PROXD_EVENT_MF_STATS\n"));
if (tlvs_len > 0) {
void *buffer = NULL;
- if (!(buffer = kzalloc(tlvs_len, kflags))) {
+ if (!(buffer = (void *)MALLOCZ(dhd->osh, tlvs_len))) {
ret = -ENOMEM;
goto exit;
}
@@ -2579,7 +3665,7 @@
BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn);
kfree(buffer);
if (ret != BCME_OK) {
- DHD_ERROR(("%s : Failed to unpack xtlv for event %d\n",
+ DHD_RTT_ERR(("%s : Failed to unpack xtlv for event %d\n",
__FUNCTION__, event_type));
goto exit;
}
@@ -2587,14 +3673,12 @@
break;
default:
- DHD_ERROR(("WLC_E_PROXD: not supported EVENT Type:%d\n", event_type));
+ DHD_RTT_ERR(("WLC_E_PROXD: not supported EVENT Type:%d\n", event_type));
break;
}
exit:
#ifdef WL_CFG80211
- if (!in_atomic()) {
- mutex_unlock(&rtt_status->rtt_mutex);
- }
+ mutex_unlock(&rtt_status->rtt_mutex);
#endif /* WL_CFG80211 */
return ret;
@@ -2606,21 +3690,14 @@
{
rtt_status_info_t *rtt_status;
dhd_pub_t *dhd;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
rtt_status = container_of(work, rtt_status_info_t, work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- if (rtt_status == NULL) {
- DHD_ERROR(("%s : rtt_status is NULL\n", __FUNCTION__));
- return;
- }
+ GCC_DIAGNOSTIC_POP();
+
dhd = rtt_status->dhd;
if (dhd == NULL) {
- DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+ DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__));
return;
}
(void) dhd_rtt_start(dhd);
@@ -2701,7 +3778,7 @@
ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
}
} else {
- DHD_ERROR(("Failed to get the chanspec \n"));
+ DHD_RTT_ERR(("Failed to get the chanspec \n"));
}
return err;
}
@@ -2745,18 +3822,18 @@
wf_chspec_ntoa(chanspec, chanbuf)));
err = wldev_iovar_setint(dev, "chanspec", chanspec);
if (err) {
- DHD_ERROR(("Failed to set the chanspec \n"));
+ DHD_RTT_ERR(("Failed to set the chanspec \n"));
}
}
rtt_status->pm = PM_OFF;
err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm));
DHD_RTT(("Current PM value read %d\n", rtt_status->pm));
if (err) {
- DHD_ERROR(("Failed to get the PM value \n"));
+ DHD_RTT_ERR(("Failed to get the PM value \n"));
} else {
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_ERROR(("Failed to set the PM \n"));
+ DHD_RTT_ERR(("Failed to set the PM \n"));
rtt_status->pm_restore = FALSE;
} else {
rtt_status->pm_restore = TRUE;
@@ -2765,7 +3842,7 @@
if (!RTT_IS_ENABLED(rtt_status)) {
err = dhd_rtt_ftm_enable(dhd, TRUE);
if (err) {
- DHD_ERROR(("Failed to enable FTM (%d)\n", err));
+ DHD_RTT_ERR(("Failed to enable FTM (%d)\n", err));
goto exit;
}
DHD_RTT(("FTM enabled \n"));
@@ -2783,14 +3860,14 @@
exit:
if (err) {
rtt_status->status = RTT_STOPPED;
- DHD_ERROR(("rtt is stopped %s \n", __FUNCTION__));
+ DHD_RTT_ERR(("rtt is stopped %s \n", __FUNCTION__));
dhd_rtt_ftm_enable(dhd, FALSE);
DHD_RTT(("restoring the PM value \n"));
if (rtt_status->pm_restore) {
pm = PM_FAST;
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_ERROR(("Failed to restore PM \n"));
+ DHD_RTT_ERR(("Failed to restore PM \n"));
} else {
rtt_status->pm_restore = FALSE;
}
@@ -2813,7 +3890,7 @@
DHD_RTT(("Enter %s \n", __FUNCTION__));
err = dhd_rtt_ftm_enable(dhd, FALSE);
if (err) {
- DHD_ERROR(("failed to disable FTM (%d)\n", err));
+ DHD_RTT_ERR(("failed to disable FTM (%d)\n", err));
}
rtt_status->status = RTT_STOPPED;
if (rtt_status->pm_restore) {
@@ -2821,7 +3898,7 @@
DHD_RTT(("pm_restore =%d \n", rtt_status->pm_restore));
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_ERROR(("Failed to restore PM \n"));
+ DHD_RTT_ERR(("Failed to restore PM \n"));
} else {
rtt_status->pm_restore = FALSE;
}
@@ -2839,22 +3916,27 @@
int32 drv_up = 1;
int32 version;
rtt_status_info_t *rtt_status;
+ ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS];
+ int ftm_param_cnt = 0;
+
NULL_CHECK(dhd, "dhd is NULL", err);
if (dhd->rtt_state) {
return err;
}
- dhd->rtt_state = kzalloc(sizeof(rtt_status_info_t), GFP_KERNEL);
+ dhd->rtt_state = (rtt_status_info_t *)MALLOCZ(dhd->osh,
+ sizeof(rtt_status_info_t));
if (dhd->rtt_state == NULL) {
err = BCME_NOMEM;
- DHD_ERROR(("%s : failed to create rtt_state\n", __FUNCTION__));
+ DHD_RTT_ERR(("%s : failed to create rtt_state\n", __FUNCTION__));
return err;
}
bzero(dhd->rtt_state, sizeof(rtt_status_info_t));
rtt_status = GET_RTTSTATE(dhd);
rtt_status->rtt_config.target_info =
- kzalloc(TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT), GFP_KERNEL);
+ (rtt_target_info_t *)MALLOCZ(dhd->osh,
+ TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
if (rtt_status->rtt_config.target_info == NULL) {
- DHD_ERROR(("%s failed to allocate the target info for %d\n",
+ DHD_RTT_ERR(("%s failed to allocate the target info for %d\n",
__FUNCTION__, RTT_MAX_TARGET_CNT));
err = BCME_NOMEM;
goto exit;
@@ -2865,7 +3947,7 @@
ret = dhd_rtt_get_version(dhd, &version);
if (ret == BCME_OK && (version == WL_PROXD_API_VERSION)) {
- DHD_ERROR(("%s : FTM is supported\n", __FUNCTION__));
+ DHD_RTT_ERR(("%s : FTM is supported\n", __FUNCTION__));
/* rtt_status->rtt_capa.proto |= RTT_CAP_ONE_WAY; */
rtt_status->rtt_capa.proto |= RTT_CAP_FTM_WAY;
@@ -2883,18 +3965,33 @@
rtt_status->rtt_capa.bw |= RTT_BW_80;
} else {
if ((ret != BCME_OK) || (version == 0)) {
- DHD_ERROR(("%s : FTM is not supported\n", __FUNCTION__));
+ DHD_RTT_ERR(("%s : FTM is not supported\n", __FUNCTION__));
} else {
- DHD_ERROR(("%s : FTM version mismatch between HOST (%d) and FW (%d)\n",
+ DHD_RTT_ERR(("%s : FTM version mismatch between HOST (%d) and FW (%d)\n",
__FUNCTION__, WL_PROXD_API_VERSION, version));
}
}
/* cancel all of RTT request once we got the cancel request */
rtt_status->all_cancel = TRUE;
mutex_init(&rtt_status->rtt_mutex);
+ mutex_init(&rtt_status->geofence_mutex);
INIT_LIST_HEAD(&rtt_status->noti_fn_list);
INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
INIT_WORK(&rtt_status->work, dhd_rtt_work);
+ /* initialize proxd timer */
+ INIT_DELAYED_WORK(&rtt_status->proxd_timeout, dhd_rtt_timeout_work);
+#ifdef WL_NAN
+ /* initialize proxd retry timer */
+ INIT_DELAYED_WORK(&rtt_status->rtt_retry_timer, dhd_rtt_retry_work);
+ /* initialize non zero params of geofenne cfg */
+ rtt_status->geofence_cfg.cur_target_idx = DHD_RTT_INVALID_TARGET_INDEX;
+#endif /* WL_NAN */
+ /* Global proxd config */
+ ftm_params[ftm_param_cnt].event_mask = ((1 << WL_PROXD_EVENT_BURST_END) |
+ (1 << WL_PROXD_EVENT_SESSION_END));
+ ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_EVENT_MASK;
+ dhd_rtt_ftm_config(dhd, 0, FTM_CONFIG_CAT_GENERAL,
+ ftm_params, ftm_param_cnt);
exit:
if (err < 0) {
kfree(rtt_status->rtt_config.target_info);
@@ -2920,11 +4017,7 @@
rtt_status->status = RTT_STOPPED;
DHD_RTT(("rtt is stopped %s \n", __FUNCTION__));
/* clear evt callback list */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
-
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
if (!list_empty(&rtt_status->noti_fn_list)) {
list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) {
list_del(&iter->list);
@@ -2943,9 +4036,8 @@
kfree(rtt_header);
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
+
kfree(rtt_status->rtt_config.target_info);
kfree(dhd->rtt_state);
dhd->rtt_state = NULL;
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_rtt.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_rtt.h
index e9b723a..0e2d0e6 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_rtt.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_rtt.h
@@ -1,7 +1,9 @@
/*
* Broadcom Dongle Host Driver (DHD), RTT
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -38,11 +40,15 @@
#define DEFAULT_RETRY_CNT 6
#define DEFAULT_FTM_FREQ 5180
#define DEFAULT_FTM_CNTR_FREQ0 5210
+#define RTT_MAX_GEOFENCE_TARGET_CNT 8
#define TARGET_INFO_SIZE(count) (sizeof(rtt_target_info_t) * count)
#define TARGET_TYPE(target) (target->type)
+#define RTT_IS_ENABLED(rtt_status) (rtt_status->status == RTT_ENABLED)
+#define RTT_IS_STOPPED(rtt_status) (rtt_status->status == RTT_STOPPED)
+
#ifndef BIT
#define BIT(x) (1 << (x))
#endif // endif
@@ -63,6 +69,11 @@
#define WL_RATE_54M 108 /* in 500kbps units */
#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state)
+/* RTT Retry Timer Interval */
+#define DHD_RTT_RETRY_TIMER_INTERVAL_MS 3000u
+
+#define DHD_RTT_INVALID_TARGET_INDEX -1
+
enum rtt_role {
RTT_INITIATOR = 0,
RTT_TARGET = 1
@@ -83,28 +94,35 @@
RTT_AUTO
} rtt_type_t;
+/* RTT peer type */
typedef enum {
- RTT_PEER_STA,
- RTT_PEER_AP,
- RTT_PEER_P2P,
- RTT_PEER_NAN,
- RTT_PEER_INVALID
+ RTT_PEER_AP = 0x1,
+ RTT_PEER_STA = 0x2,
+ RTT_PEER_P2P_GO = 0x3,
+ RTT_PEER_P2P_CLIENT = 0x4,
+ RTT_PEER_NAN = 0x5,
+ RTT_PEER_INVALID = 0x6
} rtt_peer_type_t;
+/* Ranging status */
typedef enum rtt_reason {
- RTT_REASON_SUCCESS,
- RTT_REASON_FAILURE,
- RTT_REASON_FAIL_NO_RSP,
- RTT_REASON_FAIL_INVALID_TS, /* Invalid timestamp */
- RTT_REASON_FAIL_PROTOCOL, /* 11mc protocol failed */
- RTT_REASON_FAIL_REJECTED,
- RTT_REASON_FAIL_NOT_SCHEDULED_YET,
- RTT_REASON_FAIL_SCHEDULE, /* schedule failed */
- RTT_REASON_FAIL_TM_TIMEOUT,
- RTT_REASON_FAIL_AP_ON_DIFF_CHANNEL,
- RTT_REASON_FAIL_NO_CAPABILITY,
- RTT_REASON_FAIL_BUSY_TRY_LATER,
- RTT_REASON_ABORTED
+ RTT_STATUS_SUCCESS = 0,
+ RTT_STATUS_FAILURE = 1, // general failure status
+ RTT_STATUS_FAIL_NO_RSP = 2, // target STA does not respond to request
+ RTT_STATUS_FAIL_REJECTED = 3, // request rejected. Applies to 2-sided RTT only
+ RTT_STATUS_FAIL_NOT_SCHEDULED_YET = 4,
+ RTT_STATUS_FAIL_TM_TIMEOUT = 5, // timing measurement times out
+ RTT_STATUS_FAIL_AP_ON_DIFF_CHANNEL = 6, // Target on different channel, cannot range
+ RTT_STATUS_FAIL_NO_CAPABILITY = 7, // ranging not supported
+ RTT_STATUS_ABORTED = 8, // request aborted for unknown reason
+ RTT_STATUS_FAIL_INVALID_TS = 9, // Invalid T1-T4 timestamp
+ RTT_STATUS_FAIL_PROTOCOL = 10, // 11mc protocol failed
+ RTT_STATUS_FAIL_SCHEDULE = 11, // request could not be scheduled
+ RTT_STATUS_FAIL_BUSY_TRY_LATER = 12, // responder cannot collaborate at time of request
+ RTT_STATUS_INVALID_REQ = 13, // bad request args
+ RTT_STATUS_NO_WIFI = 14, // WiFi not enabled Responder overrides param info
+ // cannot range with new params
+ RTT_STATUS_FAIL_FTM_PARAM_OVERRIDE = 15
} rtt_reason_t;
enum {
@@ -142,6 +160,13 @@
RTT_RATE_160M
};
+typedef enum ranging_type {
+ RTT_TYPE_INVALID = 0,
+ RTT_TYPE_LEGACY = 1,
+ RTT_TYPE_NAN_DIRECTED = 2,
+ RTT_TYPE_NAN_GEOFENCE = 3
+} ranging_type_t;
+
#define FTM_MAX_NUM_BURST_EXP 14
#define HAS_11MC_CAP(cap) (cap & RTT_CAP_FTM_WAY)
#define HAS_ONEWAY_CAP(cap) (cap & RTT_CAP_ONE_WAY)
@@ -168,6 +193,7 @@
typedef struct rtt_target_info {
struct ether_addr addr;
+ struct ether_addr local_addr;
rtt_type_t type; /* rtt_type */
rtt_peer_type_t peer; /* peer type */
wifi_channel_info_t channel; /* channel information */
@@ -201,8 +227,9 @@
* initiator will request that the responder send
* in a single frame
*/
- uint32 num_frames_per_burst;
- /* num of frames in each RTT burst
+ uint32 num_frames_per_burst;
+ /*
+ * num of frames in each RTT burst
* for single side, measurement result num = frame number
* for 2 side RTT, measurement result num = frame number - 1
*/
@@ -222,35 +249,80 @@
* at the end of the burst_duration it requested.
*/
uint32 burst_duration;
+ uint32 burst_timeout;
uint8 preamble; /* 1 - Legacy, 2 - HT, 4 - VHT */
uint8 bw; /* 5, 10, 20, 40, 80, 160 */
} rtt_target_info_t;
+typedef struct rtt_goefence_target_info {
+ bool valid;
+ struct ether_addr peer_addr;
+} rtt_geofence_target_info_t;
+
typedef struct rtt_config_params {
int8 rtt_target_cnt;
rtt_target_info_t *target_info;
} rtt_config_params_t;
+typedef struct rtt_geofence_cfg {
+ int8 geofence_target_cnt;
+ bool rtt_in_progress;
+ bool role_concurr_state;
+ int8 cur_target_idx;
+ rtt_geofence_target_info_t geofence_target_info[RTT_MAX_GEOFENCE_TARGET_CNT];
+} rtt_geofence_cfg_t;
+
+/*
+ * Keep Adding more reasons
+ * going forward if needed
+ */
+enum rtt_schedule_reason {
+ RTT_SCHED_HOST_TRIGGER = 1, /* On host command for directed RTT */
+ RTT_SCHED_SUB_MATCH = 2, /* on Sub Match for svc with range req */
+ RTT_SCHED_DIR_TRIGGER_FAIL = 3, /* On failure of Directed RTT Trigger */
+ RTT_SCHED_DP_END = 4, /* ON NDP End event from fw */
+ RTT_SCHED_DP_REJECTED = 5, /* On receving reject dp event from fw */
+ RTT_SCHED_RNG_RPT_DIRECTED = 6, /* On Ranging report for directed RTT */
+ RTT_SCHED_RNG_TERM = 7, /* On Range Term Indicator */
+ RTT_SHCED_HOST_DIRECTED_TERM = 8, /* On host terminating directed RTT sessions */
+ RTT_SCHED_RNG_RPT_GEOFENCE = 9, /* On Ranging report for geofence RTT */
+ RTT_SCHED_RTT_RETRY_GEOFENCE = 10 /* On Geofence Retry */
+};
+
+/*
+ * Keep Adding more invalid RTT states
+ * going forward if needed
+ */
+enum rtt_invalid_state {
+ RTT_STATE_VALID = 0, /* RTT state is valid */
+ RTT_STATE_INV_REASON_NDP_EXIST = 1 /* RTT state invalid as ndp exists */
+};
+
typedef struct rtt_status_info {
- dhd_pub_t *dhd;
- int8 status; /* current status for the current entry */
- int8 txchain; /* current device tx chain */
- int pm; /* to save current value of pm */
- int8 pm_restore; /* flag to reset the old value of pm */
- int8 cur_idx; /* current entry to do RTT */
- bool all_cancel; /* cancel all request once we got the cancel requet */
- uint32 flags; /* indicate whether device is configured as initiator or target */
- struct capability {
+ dhd_pub_t *dhd;
+ int8 status; /* current status for the current entry */
+ int8 txchain; /* current device tx chain */
+ int pm; /* to save current value of pm */
+ int8 pm_restore; /* flag to reset the old value of pm */
+ int8 cur_idx; /* current entry to do RTT */
+ bool all_cancel; /* cancel all request once we got the cancel requet */
+ uint32 flags; /* indicate whether device is configured as initiator or target */
+ struct capability {
int32 proto :8;
int32 feature :8;
int32 preamble :8;
int32 bw :8;
} rtt_capa; /* rtt capability */
- struct mutex rtt_mutex;
- rtt_config_params_t rtt_config;
- struct work_struct work;
- struct list_head noti_fn_list;
- struct list_head rtt_results_cache; /* store results for RTT */
+ struct mutex rtt_mutex;
+ struct mutex geofence_mutex;
+ rtt_config_params_t rtt_config;
+ rtt_geofence_cfg_t geofence_cfg;
+ struct work_struct work;
+ struct list_head noti_fn_list;
+ struct list_head rtt_results_cache; /* store results for RTT */
+ int rtt_sched_reason; /* rtt_schedule_reason: what scheduled RTT */
+ struct delayed_work proxd_timeout; /* Proxd Timeout work */
+ struct delayed_work rtt_retry_timer; /* Timer for retry RTT after all targets done */
} rtt_status_info_t;
typedef struct rtt_report {
@@ -297,12 +369,17 @@
struct list_head list;
struct list_head result_list;
} rtt_results_header_t;
-
+struct rtt_result_detail {
+ uint8 num_ota_meas;
+ uint32 result_flags;
+};
/* rtt_result to link all of rtt_report */
typedef struct rtt_result {
struct list_head list;
struct rtt_report report;
int32 report_len; /* total length of rtt_report */
+ struct rtt_result_detail rtt_detail;
+ int32 detail_len;
} rtt_result_t;
/* RTT Capabilities */
@@ -357,6 +434,46 @@
int
dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params);
+void dhd_rtt_set_role_concurrency_state(dhd_pub_t *dhd, bool state);
+
+bool dhd_rtt_get_role_concurrency_state(dhd_pub_t *dhd);
+
+int8 dhd_rtt_get_geofence_target_cnt(dhd_pub_t *dhd);
+
+#ifdef WL_NAN
+void dhd_rtt_set_geofence_rtt_state(dhd_pub_t *dhd, bool state);
+
+bool dhd_rtt_get_geofence_rtt_state(dhd_pub_t *dhd);
+
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_target_head(dhd_pub_t *dhd);
+
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_current_target(dhd_pub_t *dhd);
+
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_target(dhd_pub_t *dhd, struct ether_addr* peer_addr,
+ int8 *index);
+
+int
+dhd_rtt_add_geofence_target(dhd_pub_t *dhd, rtt_geofence_target_info_t *target);
+
+int
+dhd_rtt_remove_geofence_target(dhd_pub_t *dhd, struct ether_addr *peer_addr);
+
+int
+dhd_rtt_delete_geofence_target_list(dhd_pub_t *dhd);
+
+int
+dhd_rtt_delete_nan_session(dhd_pub_t *dhd);
+#endif /* WL_NAN */
+
+uint8
+dhd_rtt_invalid_states(struct net_device *ndev, struct ether_addr *peer_addr);
+
+void
+dhd_rtt_schedule_rtt_work_thread(dhd_pub_t *dhd, int sched_reason);
+
int
dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt);
@@ -386,4 +503,14 @@
int
dhd_rtt_deinit(dhd_pub_t *dhd);
+
+#ifdef WL_CFG80211
+int dhd_rtt_handle_nan_rtt_session_end(dhd_pub_t *dhd,
+ struct ether_addr *peer);
+
+void dhd_rtt_move_geofence_cur_target_idx_to_next(dhd_pub_t *dhd);
+
+int8 dhd_rtt_get_geofence_cur_target_idx(dhd_pub_t *dhd);
+#endif /* WL_CFG80211 */
+
#endif /* __DHD_RTT_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_sdio.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_sdio.c
index 2a93464..7752ed4 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_sdio.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_sdio.c
@@ -1,7 +1,9 @@
/*
* DHD Bus Module for SDIO
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -85,7 +87,7 @@
bool dhd_mp_halting(dhd_pub_t *dhdp);
extern void bcmsdh_waitfor_iodrain(void *sdh);
extern void bcmsdh_reject_ioreqs(void *sdh, bool reject);
-extern bool bcmsdh_fatal_error(void *sdh);
+extern bool bcmsdh_fatal_error(void *sdh);
static int dhdsdio_suspend(void *context);
static int dhdsdio_resume(void *context);
@@ -144,11 +146,22 @@
#define MAX_RX_DATASZ 2048
+#define SDIO_FUNC_BLOCK_SIZE_SHIFT 16
+#define F2_BLOCK_SIZE_256 256
+
/* Maximum milliseconds to wait for F2 to come up */
+#ifdef BCMQT
+#define DHD_WAIT_F2RDY 30000
+#else
#define DHD_WAIT_F2RDY 3000
+#endif /* BCMQT */
/* Maximum usec to wait for HTAVAIL to come up */
+#ifdef BCMQT
+#define DHD_WAIT_HTAVAIL 10000000
+#else
#define DHD_WAIT_HTAVAIL 10000
+#endif /* BCMQT */
/* Bump up limit on waiting for HT to account for first startup;
* if the image is doing a CRC calculation before programming the PMU
@@ -181,9 +194,7 @@
DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
DEFINE_MUTEX(_dhd_sdio_mutex_lock_);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
#endif // endif
#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_HW
@@ -494,6 +505,11 @@
extern bool g_pm_control;
#endif /* DHD_PM_CONTROL_FROM_FILE */
+#ifdef DHD_FW_COREDUMP
+uint tcm_dump_enable;
+module_param(tcm_dump_enable, uint, 0);
+#endif /* DHD_FW_COREDUMP */
+
/* Tx/Rx bounds */
uint dhd_txbound;
uint dhd_rxbound;
@@ -571,9 +587,11 @@
/* Try doing readahead */
static bool dhd_readahead;
+#define TXCTL_CREDITS 2
+
/* To check if there's window offered */
#define DATAOK(bus) \
- (((uint8)(bus->tx_max - bus->tx_seq) > 1) && \
+ (((uint8)(bus->tx_max - bus->tx_seq) > TXCTL_CREDITS) && \
(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
/* To check if there's window offered for ctrl frame */
@@ -645,6 +663,10 @@
#define SDIO_DEVICE_RXDATAINT_MODE_0 1 /* from sdiod rev 4 */
#define SDIO_DEVICE_RXDATAINT_MODE_1 2 /* from sdiod rev 4 */
+#define SDIO_FUNC_BLOCK_SIZE_SHIFT 16
+#define F2_BLOCK 2
+#define DEFAULT_SDIO_F2_BLKSIZE 512
+
#ifdef BCMSPI
#define FRAME_AVAIL_MASK(bus) I_HMB_FRAME_IND
@@ -686,6 +708,8 @@
#endif /* BCMSPI */
+extern uint sd_f2_blocksize;
+
#ifdef SDTEST
static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count);
@@ -698,6 +722,7 @@
#if defined(DHD_FW_COREDUMP)
static int dhdsdio_mem_dump(dhd_bus_t *bus);
+static int dhdsdio_get_mem_dump(dhd_bus_t *bus);
#endif /* DHD_FW_COREDUMP */
static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap);
static int dhdsdio_readshared_console(dhd_bus_t *bus);
@@ -891,7 +916,6 @@
}
#endif /* USE_OOB_GPIO1 */
-#ifndef BCMSPI
/*
* Query if FW is in SR mode
*/
@@ -901,6 +925,14 @@
bool cap = FALSE;
uint32 core_capext, addr, data;
+ /* Query for SRcore in Chip else check for SR mode is enabled in FW */
+ if (si_findcoreidx(bus->sih, SR_CORE_ID, 0) != BADIDX) {
+ if (bus->sih->chip == CYW55500_CHIP_ID ||
+ bus->sih->chip == CYW55560_CHIP_ID) {
+ return true;
+ }
+ }
+
if (bus->sih->chip == BCM43430_CHIP_ID ||
bus->sih->chip == BCM43018_CHIP_ID) {
/* check if fw initialized sr engine */
@@ -920,7 +952,13 @@
(bus->sih->chip == BCM4358_CHIP_ID) ||
(BCM4349_CHIP(bus->sih->chip)) ||
(bus->sih->chip == BCM4350_CHIP_ID) ||
- (bus->sih->chip == BCM43012_CHIP_ID)) {
+#ifdef UNRELEASEDCHIP_FOR_ANDROID
+ (bus->sih->chip == BCM4362_CHIP_ID) ||
+#endif /* UNRELEASEDCHIP_FOR_ANDROID */
+ (bus->sih->chip == BCM43012_CHIP_ID) ||
+ (bus->sih->chip == BCM43014_CHIP_ID) ||
+ (bus->sih->chip == BCM43751_CHIP_ID) ||
+ (bus->sih->chip == BCM4373_CHIP_ID)) {
core_capext = TRUE;
} else {
core_capext = bcmsdh_reg_read(bus->sdh,
@@ -969,7 +1007,9 @@
uint8 val;
int err = 0;
- if (bus->sih->chip == BCM43012_CHIP_ID) {
+ if (bus->sih->chip == BCM43012_CHIP_ID ||
+ bus->sih->chip == CYW55500_CHIP_ID ||
+ bus->sih->chip == CYW55560_CHIP_ID) {
val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
val |= 1 << SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT;
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL,
@@ -993,10 +1033,23 @@
CHIPID(bus->sih->chip) == BCM43018_CHIP_ID ||
CHIPID(bus->sih->chip) == BCM4339_CHIP_ID ||
CHIPID(bus->sih->chip) == BCM43012_CHIP_ID ||
+ CHIPID(bus->sih->chip) == CYW55500_CHIP_ID ||
+ CHIPID(bus->sih->chip) == CYW55560_CHIP_ID ||
+#ifdef UNRELEASEDCHIP_FOR_ANDROID
+ CHIPID(bus->sih->chip) == BCM4362_CHIP_ID ||
+#endif /* UNRELEASEDCHIP_FOR_ANDROID */
+ CHIPID(bus->sih->chip) == BCM43014_CHIP_ID ||
+ CHIPID(bus->sih->chip) == BCM43751_CHIP_ID ||
FALSE)
+#ifdef BCMSPI
+ dhdsdio_wkwlan(bus, FALSE);
+#else
dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC);
+#endif /* BCMSPI */
- if (bus->sih->chip == BCM43012_CHIP_ID) {
+ if (bus->sih->chip == BCM43012_CHIP_ID ||
+ bus->sih->chip == CYW55500_CHIP_ID ||
+ bus->sih->chip == CYW55560_CHIP_ID) {
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_HT_AVAIL_REQ, &err);
} else {
@@ -1009,7 +1062,6 @@
return 0;
}
-#endif /* BCMSPI */
/*
* FIX: Be sure KSO bit is enabled
@@ -1064,11 +1116,13 @@
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
- /* In case of 43012 chip, the chip could go down immediately after KSO bit is cleared.
- * So the further reads of KSO register could fail. Thereby just bailing out immediately
- * after clearing KSO bit, to avoid polling of KSO bit.
+ /* In case of 43012/555x0 chip, the chip could go down immediately after KSO bit is
+ * cleared. So the further reads of KSO register could fail. Thereby just bailing out
+ * immediately after clearing KSO bit, to avoid polling of KSO bit.
*/
- if ((!on) && (bus->sih->chip == BCM43012_CHIP_ID)) {
+ if ((!on) && (bus->sih->chip == BCM43012_CHIP_ID ||
+ bus->sih->chip == CYW55500_CHIP_ID ||
+ bus->sih->chip == CYW55560_CHIP_ID)) {
return err;
}
@@ -1373,12 +1427,10 @@
}
#ifdef OEM_ANDROID
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
else if (ht_avail_error == HT_AVAIL_ERROR_MAX) {
bus->dhd->hang_reason = HANG_REASON_HT_AVAIL_ERROR;
dhd_os_send_hang_message(bus->dhd);
}
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
#endif /* OEM_ANDROID */
return BCME_ERROR;
} else {
@@ -1564,7 +1616,8 @@
}
bus->clkstate = CLK_NONE;
}
-#endif /* BCMSPI */
+#else /* BCMSPI */
+#endif /* !BCMSPI */
return BCME_OK;
}
@@ -1690,6 +1743,9 @@
bcmsdh_info_t *sdh = bus->sdh;
sdpcmd_regs_t *regs = bus->regs;
uint retries = 0;
+#if defined(BCMSDIOH_STD)
+ uint32 sd3_tuning_disable = FALSE;
+#endif /* BCMSDIOH_STD */
DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
(sleep ? "SLEEP" : "WAKE"),
@@ -1763,6 +1819,11 @@
/* Change state */
bus->sleeping = TRUE;
+#if defined(BCMSDIOH_STD)
+ sd3_tuning_disable = TRUE;
+ err = bcmsdh_iovar_op(bus->sdh, "sd3_tuning_disable", NULL, 0,
+ &sd3_tuning_disable, sizeof(sd3_tuning_disable), TRUE);
+#endif /* BCMSDIOH_STD */
#if defined(SUPPORT_P2P_GO_PS)
wake_up(&bus->bus_sleep);
#endif /* LINUX && SUPPORT_P2P_GO_PS */
@@ -1816,6 +1877,11 @@
if (err == 0) {
/* Change state */
bus->sleeping = FALSE;
+#if defined(BCMSDIOH_STD)
+ sd3_tuning_disable = FALSE;
+ err = bcmsdh_iovar_op(bus->sdh, "sd3_tuning_disable", NULL, 0,
+ &sd3_tuning_disable, sizeof(sd3_tuning_disable), TRUE);
+#endif /* BCMSDIOH_STD */
}
}
@@ -1923,7 +1989,7 @@
bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", NULL,
0, &func_blk_size, sizeof(int32), IOV_SET);
if (bcmerr != BCME_OK) {
- DHD_ERROR(("%s: Set F2 Block size error\n", __FUNCTION__));
+ DHD_ERROR(("%s: Set F%d Block size error\n", __FUNCTION__, function_num));
return BCME_ERROR;
}
}
@@ -1971,6 +2037,9 @@
int ret = BCME_ERROR;
osl_t *osh;
uint datalen, prec;
+#ifdef REVERSE_AIFSN
+ uint prio;
+#endif /* REVERSE_AIFSN */
#if defined(DHD_TX_DUMP)
uint8 *dump_data;
#endif /* DHD_TX_DUMP */
@@ -2017,6 +2086,17 @@
#endif /* DHD_TX_DUMP && DHD_TX_FULL_DUMP */
prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+#ifdef REVERSE_AIFSN
+ /* Updating the precedence value if aifsn are reverse than 802.11 */
+ if (bus->dhd->aifsn_reverse) {
+ prio = PKTPRIO(pkt) & PRIOMASK;
+ if (prio == PRIO_8021D_BE) {
+ prec = PRIO2PREC((PRIO_8021D_VI & PRIOMASK));
+ } else if (prio == PRIO_8021D_VI) {
+ prec = PRIO2PREC((PRIO_8021D_BE & PRIOMASK));
+ }
+ }
+#endif /* REVERSE_AIFSN */
/* Check for existing queue, current flow-control, pending event, or pending clock */
if (dhd_deferred_tx || bus->fcstate || pktq_n_pkts_tot(&bus->txq) || bus->dpc_sched ||
@@ -2509,6 +2589,10 @@
#ifdef DHD_LOSSLESS_ROAMING
uint8 *pktdata;
struct ether_header *eh;
+#ifdef BDC
+ struct bdc_header *bdc_header;
+ uint8 data_offset;
+#endif // endif
#endif /* DHD_LOSSLESS_ROAMING */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
@@ -2553,7 +2637,9 @@
pktdata = (uint8 *)PKTDATA(osh, pkts[i]);
#ifdef BDC
/* Skip BDC header */
- pktdata += BDC_HEADER_LEN + ((struct bdc_header *)pktdata)->dataOffset;
+ bdc_header = (struct bdc_header *)pktdata;
+ data_offset = bdc_header->dataOffset;
+ pktdata += BDC_HEADER_LEN + (data_offset << 2);
#endif // endif
eh = (struct ether_header *)pktdata;
if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
@@ -2562,6 +2648,11 @@
/* Restore to original priority for 802.1X packet */
if (prio == PRIO_8021D_NC) {
PKTSETPRIO(pkts[i], dhd->prio_8021x);
+#ifdef BDC
+ /* Restore to original priority in BDC header */
+ bdc_header->priority =
+ (dhd->prio_8021x & BDC_PRIORITY_MASK);
+#endif // endif
}
}
#endif /* DHD_LOSSLESS_ROAMING */
@@ -2765,7 +2856,8 @@
}
#ifdef DHD_FW_COREDUMP
/* Collect socram dump */
- if (bus->dhd->memdump_enabled) {
+ if ((bus->dhd->memdump_enabled) &&
+ (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT)) {
/* collect core dump */
bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT_TX;
dhd_os_sdunlock(bus->dhd);
@@ -3332,6 +3424,11 @@
int rv, i;
uint32 shaddr = 0;
+ /* This temporary WAR for now */
+#if defined(PLATFORM_IMX)
+ return BCME_OK;
+#endif /* defined(PLATFORM_IMX) */
+
if (bus->sih == NULL) {
if (bus->dhd && bus->dhd->dongle_reset) {
DHD_ERROR(("%s: Dongle is in reset state\n", __FUNCTION__));
@@ -3411,11 +3508,6 @@
sdpcm_shared_t *sh = &sh_info;
int retry = 10;
- /* This temporary WAR for now */
-#if defined(PLATFORM_IMX)
- return BCME_OK;
-#endif /* defined(PLATFORM_IMX) */
-
shaddr = bus->dongle_ram_base + bus->ramsize - 4;
i = 0;
@@ -3470,6 +3562,9 @@
uint32 n, idx, addr;
int rv;
+ if (!DHD_FWLOG_ON())
+ return 0;
+
/* Don't do anything until FWREADY updates console address */
if (bus->console_addr == 0)
return 0;
@@ -3750,35 +3845,46 @@
return dhdsdio_mem_dump(bus);
}
-static int
-dhdsdio_mem_dump(dhd_bus_t *bus)
+int
+dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
{
- int ret = 0;
- int size; /* Full mem size */
- uint32 start = bus->dongle_ram_base; /* Start address */
- uint read_size = 0; /* Read size of each iteration */
- uint8 *buf = NULL, *databuf = NULL;
-
- /* Get full mem size */
- size = bus->ramsize;
- buf = dhd_get_fwdump_buf(bus->dhd, size);
- if (!buf) {
- DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
- return -1;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
}
- dhd_os_sdlock(bus->dhd);
+ return dhdsdio_get_mem_dump(dhdp->bus);
+}
+
+static int
+dhdsdio_get_mem_dump(dhd_bus_t *bus)
+{
+ int ret = BCME_ERROR;
+ int size = bus->ramsize; /* Full mem size */
+ uint32 start = bus->dongle_ram_base; /* Start address */
+ uint read_size = 0; /* Read size of each iteration */
+ uint8 *p_buf = NULL, *databuf = NULL;
+
+ /* Get full mem size */
+ p_buf = dhd_get_fwdump_buf(bus->dhd, size);
+ if (!p_buf) {
+ DHD_ERROR(("%s: Out of memory (%d bytes)\n",
+ __FUNCTION__, size));
+ return BCME_ERROR;
+ }
+ if (!tcm_dump_enable) {
+ dhd_os_sdlock(bus->dhd);
+ }
BUS_WAKE(bus);
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
/* Read mem content */
DHD_ERROR(("Dump dongle memory\n"));
- databuf = buf;
- while (size)
- {
+ databuf = p_buf;
+ while (size) {
read_size = MIN(MEMBLOCK, size);
- if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size)))
- {
+ ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size);
+ if (ret) {
DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
ret = BCME_ERROR;
break;
@@ -3797,10 +3903,31 @@
dhd_os_sdunlock(bus->dhd);
- /* schedule a work queue to perform actual memdump. dhd_mem_dump() performs the job */
- if (!ret) {
- /* buf, actually soc_ram free handled in dhd_{free,clear} */
- dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
+ return ret;
+}
+
+static int
+dhdsdio_mem_dump(dhd_bus_t *bus)
+{
+ dhd_pub_t *dhdp;
+ int ret = BCME_ERROR;
+
+ dhdp = bus->dhd;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ ret = dhdsdio_get_mem_dump(bus);
+ if (ret) {
+ DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
+ __FUNCTION__, ret));
+ } else {
+ /* schedule a work queue to perform actual memdump.
+ * dhd_mem_dump() performs the job
+ */
+ dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+ /* soc_ram free handled in dhd_{free,clear} */
}
return ret;
@@ -4020,6 +4147,12 @@
sd1idle = bool_val;
break;
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_CHECKDIED):
+ bcmerror = dhdsdio_checkdied(bus, arg, len);
+ break;
+#endif /* DHD_DEBUG */
+
#ifdef DHD_BUS_MEM_ACCESS
case IOV_SVAL(IOV_MEMBYTES):
case IOV_GVAL(IOV_MEMBYTES):
@@ -4305,6 +4438,7 @@
dhd_txminmax = (uint)int_val;
break;
+#ifdef DHD_DEBUG
case IOV_GVAL(IOV_SERIALCONS):
int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror);
if (bcmerror != 0)
@@ -4316,6 +4450,7 @@
case IOV_SVAL(IOV_SERIALCONS):
dhd_serialconsole(bus, TRUE, bool_val, &bcmerror);
break;
+#endif /* DHD_DEBUG */
#endif /* DHD_DEBUG */
@@ -4383,8 +4518,10 @@
ASSERT(bus->dhd->osh);
/* ASSERT(bus->cl_devid); */
+ /* must release sdlock, since devreset also acquires it */
+ dhd_os_sdunlock(bus->dhd);
dhd_bus_devreset(bus->dhd, (uint8)bool_val);
-
+ dhd_os_sdlock(bus->dhd);
break;
/*
* softap firmware is updated through module parameter or android private command
@@ -4499,11 +4636,11 @@
#ifdef DHD_DEBUG
uint8 *nvram_ularray;
#endif /* DHD_DEBUG */
+
#define TOKENSIZE 4
/* Even if there are no vars are to be written, we still need to set the ramsize. */
varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
varaddr = bus->ramsize - varsize - TOKENSIZE;
-
varaddr += bus->dongle_ram_base;
if (bus->vars) {
@@ -4569,8 +4706,8 @@
MFREE(bus->dhd->osh, nvram_ularray, varsize);
#endif /* DHD_DEBUG */
-
MFREE(bus->dhd->osh, vbuffer, varsize + TOKENSIZE);
+
}
phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
@@ -4586,6 +4723,12 @@
return bcmerror;
}
+bool
+dhd_bus_is_multibp_capable(struct dhd_bus *bus)
+{
+ return MULTIBP_CAP(bus->sih);
+}
+
static int
dhdsdio_download_state(dhd_bus_t *bus, bool enter)
{
@@ -4597,7 +4740,8 @@
if (!bus->sih)
return BCME_ERROR;
- if (bus->sih->chip == CYW55560_CHIP_ID) {
+ if (bus->sih->chip == CYW55500_CHIP_ID ||
+ bus->sih->chip == CYW55560_CHIP_ID) {
/* Host bootloader handshake TCM/REGS addresses init */
bcmerror = dhdsdio_dongle_host_get_handshake_address(bus, &bl_hs_addrs);
if (bcmerror) {
@@ -4672,9 +4816,10 @@
* [done at else] Populate the reset vector
* [done at else] Remove ARM halt
*/
- if (bus->sih->chip == CYW55560_CHIP_ID) {
+ if (bus->sih->chip == CYW55500_CHIP_ID ||
+ bus->sih->chip == CYW55560_CHIP_ID) {
- /* Skip ARM halt and reset in case of 55560 */
+ /* Skip ARM halt and reset in case of 555x0 */
/* Bootloader host pre handshake function */
if ((bcmerror = dhdsdio_dongle_host_pre_handshake
@@ -4692,7 +4837,6 @@
DHD_ERROR(("%s: Shared region not initialized\n",
__FUNCTION__));
}
-
#ifdef DHD_DEBUG
/* Console buffer read - First pass */
if ((bcmerror = dhdsdio_readconsole(bus)) < 0) {
@@ -4753,7 +4897,8 @@
}
} else {
- if (bus->sih->chip == CYW55560_CHIP_ID) {
+ if (bus->sih->chip == CYW55500_CHIP_ID ||
+ bus->sih->chip == CYW55560_CHIP_ID) {
#ifdef DHD_DEBUG
/* Console buffer read - Second pass */
if ((bcmerror = dhdsdio_readconsole(bus)) < 0) {
@@ -4761,7 +4906,6 @@
__FUNCTION__));
}
#endif /* DHD_DEBUG */
-
/* FW and NVRAM download done notification to bootloader */
if ((bcmerror = dhdsdio_dongle_host_post_handshake(bus,
&bl_hs_addrs))) {
@@ -4787,62 +4931,63 @@
}
}
- }
- /* cr4 has no socram, but tcm's */
- /* write vars */
- if ((bcmerror = dhdsdio_write_vars(bus))) {
- DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
- goto fail;
- }
+ /* cr4 has no socram, but tcm's */
+ /* write vars */
+ if ((bcmerror = dhdsdio_write_vars(bus))) {
+ DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+ goto fail;
+ }
#ifdef BCMSDIOLITE
- if (!si_setcore(bus->sih, CC_CORE_ID, 0)) {
- DHD_ERROR(("%s: Can't set to Chip Common core?\n", __FUNCTION__));
- bcmerror = BCME_ERROR;
- goto fail;
- }
+ if (!si_setcore(bus->sih, CC_CORE_ID, 0)) {
+ DHD_ERROR(("%s: Can't set to Chip Common core?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
#else
- if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
- !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
- DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
- bcmerror = BCME_ERROR;
- goto fail;
- }
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+ !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+ DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
#endif // endif
- W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+ W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
- /* switch back to arm core again */
- if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
- DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
- bcmerror = BCME_ERROR;
- goto fail;
- }
- /* write address 0 with reset instruction */
- /* Except for chips eg. CYW55560, which runs bootloader */
- if (bus->sih->chip != CYW55560_CHIP_ID) {
- bcmerror = dhdsdio_membytes(bus, TRUE, 0,
- (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+ /* switch back to arm core again */
+ if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ /* write address 0 with reset instruction */
+ /* Except for chips eg. CYW555x0, which runs bootloader */
+ if (bus->sih->chip != CYW55500_CHIP_ID &&
+ bus->sih->chip != CYW55560_CHIP_ID) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, 0,
+ (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
- if (bcmerror == BCME_OK) {
- uint32 tmp;
+ if (bcmerror == BCME_OK) {
+ uint32 tmp;
- /* verify write */
- bcmerror = dhdsdio_membytes(bus, FALSE, 0, (uint8 *)&tmp,
- sizeof(tmp));
+ /* verify write */
+ bcmerror = dhdsdio_membytes(bus, FALSE, 0, (uint8 *)&tmp,
+ sizeof(tmp));
- if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
- DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
- __FUNCTION__, bus->resetinstr));
- DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
- __FUNCTION__, tmp));
- bcmerror = BCME_SDIO_ERROR;
- goto fail;
+ if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
+ DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
+ __FUNCTION__, bus->resetinstr));
+ DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
+ __FUNCTION__, tmp));
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
}
}
}
/* now remove reset and halt and continue to run CR4 */
-
- if (bus->sih->chip == CYW55560_CHIP_ID) {
+ if (bus->sih->chip == CYW55500_CHIP_ID ||
+ bus->sih->chip == CYW55560_CHIP_ID) {
#ifdef DHD_DEBUG
/* Console buffer read - Final pass */
if ((bcmerror = dhdsdio_readconsole(bus)) < 0) {
@@ -4850,7 +4995,6 @@
__FUNCTION__));
}
#endif /* DHD_DEBUG */
-
/* Set write_vars done bit to let BL jump to mainline FW */
if ((bcmerror = dhdsdio_dongle_host_post_varswrite(bus, &bl_hs_addrs))) {
DHD_ERROR(("%s: error %d dongle_host_post_varswrite\n",
@@ -4876,7 +5020,8 @@
fail:
if (bcmerror) {
- if (bus->sih->chip == CYW55560_CHIP_ID) {
+ if (bus->sih->chip == CYW55500_CHIP_ID ||
+ bus->sih->chip == CYW55560_CHIP_ID) {
/* Read the shared structure to determine console address */
if (dhdsdio_readshared_console(bus) < 0) {
DHD_ERROR(("%s: Shared region not initialized\n",
@@ -5226,10 +5371,11 @@
if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
&bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
bus->blocksize = 0;
- DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ DHD_ERROR(("%s: fail on fn %d %s get\n",
+ __FUNCTION__, fnum, "sd_blocksize"));
} else {
- DHD_INFO(("%s: noted %s update, value now %d\n",
- __FUNCTION__, "sd_blocksize", bus->blocksize));
+ DHD_INFO(("%s: noted fn %d %s update, value now %d\n",
+ __FUNCTION__, fnum, "sd_blocksize", bus->blocksize));
dhdsdio_tune_fifoparam(bus);
}
@@ -5297,6 +5443,7 @@
if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) {
/* if Firmware already hangs disbale any interrupt */
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
bus->hostintmask = 0;
bcmsdh_intr_disable(bus->sdh);
@@ -5316,6 +5463,7 @@
/* Change our idea of bus state */
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
@@ -5449,7 +5597,7 @@
if ((ret = dhdsdio_readconsole(bus)) < 0) {
DHD_ERROR(("%s: Console buffer read failed\n",
__FUNCTION__));
- }
+ }
#endif /* DHD_DEBUG */
}
@@ -5492,7 +5640,9 @@
saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
- if (bus->sih->chip == BCM43012_CHIP_ID) {
+ if (bus->sih->chip == BCM43012_CHIP_ID ||
+ bus->sih->chip == CYW55500_CHIP_ID ||
+ bus->sih->chip == CYW55560_CHIP_ID) {
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
(saveclk | SBSDIO_HT_AVAIL_REQ), &err);
} else {
@@ -5547,6 +5697,7 @@
bus->hostintmask |= I_XMTDATA_AVAIL;
}
#endif /* BCMSPI */
+ /* Tell the device which interrupts are of our interest */
W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
if (bus->sih->buscorerev < 15) {
@@ -5587,6 +5738,13 @@
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
}
+#ifdef DHD_FW_COREDUMP
+ if (bus->dhd->memdump_enabled && tcm_dump_enable) {
+ DHD_ERROR(("%s : Dumping the RAM", __FUNCTION__));
+ dhd_bus_mem_dump(bus->dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+
if (dhdsdio_sr_cap(bus)) {
dhdsdio_sr_init(bus);
/* Masking the chip active interrupt permanantly */
@@ -5598,6 +5756,17 @@
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
}
+#else /* BCMSPI */
+ {
+ if (dhdsdio_sr_cap(bus)) {
+ dhdsdio_sr_init(bus);
+ /* Masking the chip active interrupt permanantly */
+ bus->hostintmask &= ~I_CHIPACTIVE;
+ W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+ DHD_INFO(("%s: disable I_CHIPACTIVE in hostintmask[0x%08x]\n",
+ __FUNCTION__, bus->hostintmask));
+ }
+ }
#endif /* !BCMSPI */
/* If we didn't come up, turn off backplane clock */
@@ -5681,8 +5850,10 @@
fail:
/* If we can't reach the device, signal failure */
- if (err || bcmsdh_regfail(sdh))
+ if (err || bcmsdh_regfail(sdh)) {
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
+ }
}
static void
@@ -6898,7 +7069,11 @@
}
#endif /* SDTEST */
- if (PKTLEN(osh, pkt) == 0) {
+#if defined(BCMSPI)
+ if ((chan == SDPCM_EVENT_CHANNEL) && (bus->sdpcmrev >= 17 && bus->sdpcmrev <= 22)) {
+#else
+ if (PKTLEN(osh, pkt) == 0) {
+#endif /* BCMSPI */
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE(bus->dhd->osh, pkt, FALSE);
dhd_os_sdunlock_rxq(bus->dhd);
@@ -7034,7 +7209,7 @@
if (hmb_data & HMB_DATA_FWHALT) {
DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED : set BUS DOWN\n"));
dhdsdio_checkdied(bus, NULL, 0);
- bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_ERROR(("Not doing bus down untill memdump done \n"));
}
/* Shouldn't be any others */
@@ -7104,6 +7279,7 @@
devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
if (err) {
DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
} else {
ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
@@ -7114,6 +7290,7 @@
clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
}
@@ -7124,6 +7301,7 @@
if (err) {
DHD_ERROR(("%s: error reading DEVCTL: %d\n",
__FUNCTION__, err));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
}
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
@@ -7131,6 +7309,7 @@
if (err) {
DHD_ERROR(("%s: error writing DEVCTL: %d\n",
__FUNCTION__, err));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
}
bus->clkstate = CLK_AVAIL;
@@ -7348,6 +7527,7 @@
} else {
DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n",
__FUNCTION__));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
bus->intstatus = 0;
}
@@ -8102,18 +8282,32 @@
return TRUE;
if (BCM4349_CHIP(chipid))
return TRUE;
+ if (chipid == BCM4373_CHIP_ID)
+ return TRUE;
if (chipid == BCM4364_CHIP_ID)
return TRUE;
if (chipid == BCM43012_CHIP_ID)
return TRUE;
- if (chipid == BCM4369_CHIP_ID)
+ if (chipid == BCM43014_CHIP_ID)
return TRUE;
- if (chipid == CYW55560_CHIP_ID) {
+ if (chipid == BCM4369_CHIP_ID)
+ return TRUE;
+#ifdef CHIPS_CUSTOMER_HW6
+ if (BCM4378_CHIP(chipid)) {
return TRUE;
}
+#endif /* CHIPS_CUSTOMER_HW6 */
+ if (chipid == BCM4362_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43751_CHIP_ID)
+ return TRUE;
+ if (chipid == CYW55500_CHIP_ID)
+ return TRUE;
+ if (chipid == CYW55560_CHIP_ID)
+ return TRUE;
return FALSE;
}
@@ -8125,14 +8319,12 @@
dhd_bus_t *bus;
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
if (mutex_is_locked(&_dhd_sdio_mutex_lock_) == 0) {
DHD_ERROR(("%s : no mutex held. set lock\n", __FUNCTION__));
} else {
DHD_ERROR(("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__));
}
mutex_lock(&_dhd_sdio_mutex_lock_);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
#endif // endif
/* Init global variables at run-time, not as part of the declaration.
@@ -8199,8 +8391,16 @@
}
if (osh == NULL) {
+#if defined(BCMSPI) && defined(BCMPCISPIHOST)
+ /* bcmsdh_probe() calls drvinfo.probe() that is this function with osh as NULL */
+ if (!(osh = osl_attach(sdh, DHD_BUS, TRUE))) {
+ DHD_ERROR(("%s: osl_attach failed!\n", __FUNCTION__));
+ goto forcereturn;
+ }
+#else
DHD_ERROR(("%s: osh is NULL!\n", __FUNCTION__));
goto forcereturn;
+#endif /* BCMSPI && defined(BCMPCISPIHOST) */
}
/* Allocate private bus interface state */
@@ -8269,9 +8469,9 @@
if (dhd_download_fw_on_driverload) {
if ((ret = dhd_bus_start(bus->dhd)) != 0) {
DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__));
-#if !defined(OEM_ANDROID)
+#if !defined(OEM_ANDROID) || defined(BCMQT)
if (ret == BCME_NOTUP)
-#endif /* !OEM_ANDROID */
+#endif /* !OEM_ANDROID || defined(BCMQT) */
goto fail;
}
}
@@ -8293,8 +8493,10 @@
*/
dhdsdio_bus_usr_cnt_inc(bus->dhd);
#endif /* BT_OVER_SDIO */
+
/* Ok, have the per-port tell the stack we're open for business */
- if (dhd_register_if(bus->dhd, 0, TRUE) != 0) {
+ if (dhd_attach_net(bus->dhd, TRUE) != 0)
+ {
DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
goto fail;
}
@@ -8305,10 +8507,8 @@
#endif /* BCMHOST_XTAL_PU_TIME_MOD */
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&_dhd_sdio_mutex_lock_);
DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
#endif // endif
return bus;
@@ -8318,10 +8518,8 @@
forcereturn:
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&_dhd_sdio_mutex_lock_);
DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
#endif // endif
return NULL;
@@ -8337,8 +8535,10 @@
uint fn, numfn;
uint8 *cis[SDIOD_MAX_IOFUNCS];
int32 value;
+ int32 size;
int err = 0;
+ BCM_REFERENCE(value);
bus->alp_only = TRUE;
bus->sih = NULL;
@@ -8409,26 +8609,66 @@
value = F0_BLOCK_SIZE;
else
value = (cis[fn][25]<<8) | cis[fn][24] | (fn<<16);
+ /* Get block size from sd */
+ if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fn, sizeof(int32),
+ &size, sizeof(int32), FALSE) != BCME_OK) {
+ size = 0;
+ DHD_ERROR(("%s: fail on fn %d %s get\n",
+ __FUNCTION__, fn, "sd_blocksize"));
+ } else {
+ DHD_INFO(("%s: Initial value for fn %d %s is %d\n",
+ __FUNCTION__, fn, "sd_blocksize", size));
+ }
+ if (size != 0 && size < value) {
+ value = size;
+ }
+ value = fn << 16 | value;
if (bcmsdh_iovar_op(sdh, "sd_blocksize", NULL, 0, &value,
sizeof(value), TRUE) != BCME_OK) {
bus->blocksize = 0;
- DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__,
- "sd_blocksize"));
+ DHD_ERROR(("%s: fail on fn %d %s set\n", __FUNCTION__,
+ fn, "sd_blocksize"));
}
+
+ /* F2 blocksize value can be taken in order of precedence of
+ * insmod argument
+ * build flag variable
+ * CIS tuple
+ * default parameter
+ */
+ if (fn == F2_BLOCK) {
+ if (!sd_f2_blocksize) {
+#ifdef CUSTOM_SDIO_F2_BLKSIZE
+ sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
+#else
+ sd_f2_blocksize = ((uint16)value ? (uint16)value :
+ DEFAULT_SDIO_F2_BLKSIZE);
+#endif /* CUSTOM_SDIO_F2_BLKSIZE */
+ }
+
+ value = (fn << SDIO_FUNC_BLOCK_SIZE_SHIFT) | sd_f2_blocksize;
+ if (bcmsdh_iovar_op(sdh, "sd_blocksize",
+ NULL, 0, &value, sizeof(fn), TRUE) != BCME_OK) {
+ DHD_ERROR(("%s: Set F2 Block size error\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ }
+
#ifdef DHD_DEBUG
if (DHD_INFO_ON()) {
dhd_dump_cis(fn, cis[fn]);
}
#endif /* DHD_DEBUG */
}
-#else
- BCM_REFERENCE(cis);
- BCM_REFERENCE(fn);
-#endif /* DHD_DEBUG */
while (fn-- > 0) {
ASSERT(cis[fn]);
MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
}
+#else
+ BCM_REFERENCE(cis);
+ BCM_REFERENCE(fn);
+#endif /* !BCMSDIOLITE */
if (err) {
DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
@@ -8506,15 +8746,45 @@
bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
CR4_4349_RAM_BASE: CR4_4349_RAM_BASE_FROM_REV_9);
break;
+ case BCM4373_CHIP_ID:
+ bus->dongle_ram_base = CR4_4373_RAM_BASE;
+ /* Updating F2 Block size to 256 for 4373 to fix TX Transmit
+ * Underflow issue during Bi-Directional Traffic
+ */
+ {
+ uint fn = 2;
+ fn = fn << SDIO_FUNC_BLOCK_SIZE_SHIFT | F2_BLOCK_SIZE_256;
+ if (bcmsdh_iovar_op(sdh, "sd_blocksize",
+ NULL, 0, &fn, sizeof(fn), TRUE) != BCME_OK) {
+ DHD_ERROR(("%s: Set F2 Block size error\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ }
+ break;
case BCM4364_CHIP_ID:
bus->dongle_ram_base = CR4_4364_RAM_BASE;
break;
+ case BCM4362_CHIP_ID:
+ bus->dongle_ram_base = CR4_4362_RAM_BASE;
+ break;
+ case BCM43751_CHIP_ID:
+ bus->dongle_ram_base = CR4_43751_RAM_BASE;
+ break;
case BCM4369_CHIP_ID:
bus->dongle_ram_base = CR4_4369_RAM_BASE;
break;
+ case CYW55500_CHIP_ID:
+ bus->dongle_ram_base = CR4_55500_RAM_BASE;
+ break;
case CYW55560_CHIP_ID:
bus->dongle_ram_base = CR4_55560_RAM_BASE;
break;
+#ifdef CHIPS_CUSTOMER_HW6
+ case BCM4378_CHIP_GRPID:
+ bus->dongle_ram_base = CR4_4378_RAM_BASE;
+ break;
+#endif /* CHIPS_CUSTOMER_HW6 */
default:
bus->dongle_ram_base = 0;
DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
@@ -8524,8 +8794,9 @@
/* 55560, Dedicated space for TCAM patching and TRX Hader at RAMBASE */
/* TCAM Patching - 2048[2K], TRX Header - 32Bytes */
-
- if (bus->sih->chip == CYW55560_CHIP_ID) {
+ if (bus->sih->chip == CYW55500_CHIP_ID) {
+ bus->orig_ramsize -= (CR4_55500_TCAM_SZ + CR4_55500_TRX_HDR_SZ);
+ } else if (bus->sih->chip == CYW55560_CHIP_ID) {
bus->orig_ramsize -= (CR4_55560_TCAM_SZ + CR4_55560_TRX_HDR_SZ);
}
@@ -8647,6 +8918,7 @@
bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
#endif /* !BCMSPI */
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
bus->sleeping = FALSE;
bus->rxflow = FALSE;
@@ -8687,10 +8959,10 @@
if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
&bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
bus->blocksize = 0;
- DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ DHD_ERROR(("%s: fail on fn %d %s get\n", __FUNCTION__, fnum, "sd_blocksize"));
} else {
- DHD_INFO(("%s: Initial value for %s is %d\n",
- __FUNCTION__, "sd_blocksize", bus->blocksize));
+ DHD_INFO(("%s: Initial value for fn %d %s is %d\n",
+ __FUNCTION__, fnum, "sd_blocksize", bus->blocksize));
dhdsdio_tune_fifoparam(bus);
}
@@ -8738,6 +9010,16 @@
ret = dhdsdio_download_firmware(bus, osh, bus->sdh);
+#if defined(BCMSPI) && defined(GSPI_DWORD_MODE)
+ /* Enable the dwordmode in gSPI before first F2 transaction */
+ if (((bus->sih->chip == BCM4329_CHIP_ID) && (bus->sih->chiprev > 1)) ||
+ (bus->sih->chip == BCM43430_CHIP_ID)) {
+ bcmsdh_dwordmode(bus->sdh, TRUE);
+ bus->dwordmode = TRUE;
+ DHD_INFO(("DHD:SPI DWORD mode enabled\n"));
+ }
+#endif /* defined(BCMSPI) && defined(GSPI_DWORD_MODE) */
+
return ret;
}
@@ -8870,9 +9152,10 @@
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
}
if (KSO_ENAB(bus) && (dongle_isolation == FALSE)) {
- if (bus->sih->chip == CYW55560_CHIP_ID) {
+ if (bus->sih->chip == CYW55500_CHIP_ID ||
+ bus->sih->chip == CYW55560_CHIP_ID) {
/*
- * CYW55560 - As part of watchdog reset, ARM gets
+ * CYW555x0 - As part of watchdog reset, ARM gets
* reset and bootloader starts from fresh,
* So, pre wd reset sequcnce defined to make
* sure H2D regs are initialized to zero
@@ -8888,11 +9171,12 @@
si_watchdog(bus->sih, 4);
/*
- * CYW55560 - Once watchdog reset initiated,
+ * CYW555x0 - Once watchdog reset initiated,
* bootloader takes much time to be ready,
* [BL_READY bit set]
*/
- if (bus->sih->chip == CYW55560_CHIP_ID) {
+ if (bus->sih->chip == CYW55500_CHIP_ID ||
+ bus->sih->chip == CYW55560_CHIP_ID) {
if ((bcmerror =
dhdsdio_dongle_host_post_wd_reset_sequence(bus)))
{
@@ -8923,14 +9207,12 @@
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
if (mutex_is_locked(&_dhd_sdio_mutex_lock_) == 0) {
DHD_ERROR(("%s : no mutex held. set lock\n", __FUNCTION__));
} else {
DHD_ERROR(("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__));
}
mutex_lock(&_dhd_sdio_mutex_lock_);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
#endif // endif
if (bus) {
@@ -8941,10 +9223,8 @@
}
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&_dhd_sdio_mutex_lock_);
DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
#endif /* LINUX */
DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
@@ -8988,10 +9268,14 @@
}
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ /* stop all interface network queue. */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
bus->dhd->busstate = DHD_BUS_SUSPEND;
if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
DHD_ERROR(("Tx Request is not ended\n"));
bus->dhd->busstate = DHD_BUS_DATA;
+ /* resume all interface network queue. */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
return -EBUSY;
}
@@ -9017,6 +9301,8 @@
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
if (ret) {
bus->dhd->busstate = DHD_BUS_DATA;
+ /* resume all interface network queue. */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
}
DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
dhd_os_busbusy_wake(bus->dhd);
@@ -9049,6 +9335,8 @@
DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
bus->dhd->busstate = DHD_BUS_DATA;
dhd_os_busbusy_wake(bus->dhd);
+ /* resume all interface network queue. */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
return 0;
@@ -9114,13 +9402,14 @@
DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
image = dhd_os_open_image1(bus->dhd, pfw_path);
- if (image == NULL)
+ if (image == NULL) {
+ DHD_ERROR(("%s: Failed to open fw file !\n", __FUNCTION__));
goto err;
+ }
/* Update the dongle image download block size depending on the F1 block size */
if (sd_f1_blocksize == 512)
memblock_size = MAX_MEMBLOCK;
-
memptr = memblock = MALLOC(bus->dhd->osh, memblock_size + DHD_SDALIGN);
if (memblock == NULL) {
DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
@@ -9157,7 +9446,7 @@
trx_chk = FALSE;
trx_hdr = (struct trx_header *)memptr;
if (trx_hdr->magic == TRX_MAGIC) {
- /* CYW55560, we need to write TRX header at RAMSTART */
+ /* CYW555x0, we need to write TRX header at RAMSTART */
offset -= sizeof(struct trx_header);
}
}
@@ -9385,7 +9674,7 @@
/* Out immediately if no image to download */
if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
- return 0;
+ return bcmerror;
}
/* Keep arm in reset */
@@ -9603,7 +9892,6 @@
/* Expect app to have torn down any connection before calling */
/* Stop the bus, disable F2 */
dhd_bus_stop(bus, FALSE);
-
#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
/* Clean up any pending IRQ */
dhd_enable_oob_intr(bus, FALSE);
@@ -9615,11 +9903,13 @@
dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE);
bus->dhd->dongle_reset = TRUE;
+ DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
bus->dhd->up = FALSE;
dhd_txglom_enable(dhdp, FALSE);
dhd_os_sdunlock(dhdp);
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
@@ -9630,22 +9920,25 @@
} else {
/* App must have restored power to device before calling */
- DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__));
+ DHD_ERROR(("\n\n%s: == Power ON ==\n", __FUNCTION__));
if (bus->dhd->dongle_reset) {
/* Turn on WLAN */
dhd_os_sdlock(dhdp);
- /* Reset SD client */
+ /* Reset SD client -- required if devreset is called
+ * via 'dhd devreset' iovar
+ */
bcmsdh_reset(bus->sdh);
-
/* Attempt to re-attach & download */
if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
(uint32 *)(uintptr)si_enum_base(bus->cl_devid),
bus->cl_devid)) {
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
/* Attempt to download binary to the dongle */
if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh) >= 0) {
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_wlfc.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_wlfc.c
index 03178b3..31cfe7b 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_wlfc.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_wlfc.c
@@ -1,7 +1,9 @@
/*
* DHD PROP_TXSTATUS Module.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -3601,7 +3603,6 @@
mode = WLFC_MODE_AFQ;
} else {
WLFC_SET_AFQ(mode, WLFC_GET_AFQ(fw_caps));
- WLFC_SET_REUSESEQ(mode, WLFC_GET_REUSESEQ(fw_caps));
WLFC_SET_REORDERSUPP(mode, WLFC_GET_REORDERSUPP(fw_caps));
}
ret = dhd_wl_ioctl_set_intiovar(dhd, "wlfc_mode", mode, WLC_SET_VAR, TRUE, 0);
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_wlfc.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_wlfc.h
index 77edd61..5a223f5 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_wlfc.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dhd_wlfc.h
@@ -1,5 +1,7 @@
/*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dngl_stats.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dngl_stats.h
index 0da95ef..faad142 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dngl_stats.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dngl_stats.h
@@ -2,7 +2,9 @@
* Common stats definitions for clients of dongle
* ports
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dngl_wlhdr.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
index 0972fe9..a6b5c0a 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
@@ -1,7 +1,9 @@
/*
* Dongle WL Header definitions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/frag.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/frag.c
index 44f0910..3615cff 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/frag.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/frag.c
@@ -2,49 +2,30 @@
* IE/TLV fragmentation/defragmentation support for
* Broadcom 802.11bang Networking Device Driver
*
- * Copyright 2020 Broadcom
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
*
- * This program is the proprietary software of Broadcom and/or
- * its licensors, and may only be used, duplicated, modified or distributed
- * pursuant to the terms and conditions of a separate, written license
- * agreement executed between you and Broadcom (an "Authorized License").
- * Except as set forth in an Authorized License, Broadcom grants no license
- * (express or implied), right to use, or waiver of any kind with respect to
- * the Software, and Broadcom expressly reserves all rights in and to the
- * Software and all intellectual property rights therein. IF YOU HAVE NO
- * AUTHORIZED LICENSE, THEN YOU HAVE NO RIGHT TO USE THIS SOFTWARE IN ANY
- * WAY, AND SHOULD IMMEDIATELY NOTIFY BROADCOM AND DISCONTINUE ALL USE OF
- * THE SOFTWARE.
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
- * Except as expressly set forth in the Authorized License,
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
*
- * 1. This program, including its structure, sequence and organization,
- * constitutes the valuable trade secrets of Broadcom, and you shall use
- * all reasonable efforts to protect the confidentiality thereof, and to
- * use this information only in connection with your use of Broadcom
- * integrated circuit products.
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
*
- * 2. TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED
- * "AS IS" AND WITH ALL FAULTS AND BROADCOM MAKES NO PROMISES,
- * REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR
- * OTHERWISE, WITH RESPECT TO THE SOFTWARE. BROADCOM SPECIFICALLY
- * DISCLAIMS ANY AND ALL IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY,
- * NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES,
- * ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. YOU ASSUME THE ENTIRE RISK ARISING
- * OUT OF USE OR PERFORMANCE OF THE SOFTWARE.
- *
- * 3. TO THE MAXIMUM EXTENT PERMITTED BY LAW, IN NO EVENT SHALL
- * BROADCOM OR ITS LICENSORS BE LIABLE FOR (i) CONSEQUENTIAL, INCIDENTAL,
- * SPECIAL, INDIRECT, OR EXEMPLARY DAMAGES WHATSOEVER ARISING OUT OF OR
- * IN ANY WAY RELATING TO YOUR USE OF OR INABILITY TO USE THE SOFTWARE EVEN
- * IF BROADCOM HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES; OR (ii)
- * ANY AMOUNT IN EXCESS OF THE AMOUNT ACTUALLY PAID FOR THE SOFTWARE ITSELF
- * OR U.S. $1, WHICHEVER IS GREATER. THESE LIMITATIONS SHALL APPLY
- * NOTWITHSTANDING ANY FAILURE OF ESSENTIAL PURPOSE OF ANY LIMITED REMEDY.
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
* $Id$
*
- * <<Broadcom-WL-IPTag/Proprietary:>>
+ * <<Broadcom-WL-IPTag/Open:>>
*/
#include <bcmutils.h>
@@ -94,7 +75,7 @@
}
/* adjust buf_len to length after ie including it */
- buf_len -= ((const uint8 *)ie - (const uint8 *)buf);
+ buf_len -= (uint)(((const uint8 *)ie - (const uint8 *)buf));
/* update length from fragments, okay if no next ie */
while ((ie = bcm_next_tlv(ie, &buf_len)) &&
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/frag.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/frag.h
index 8f0a9c2..aa5d7fc 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/frag.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/frag.h
@@ -2,49 +2,30 @@
* IE/TLV (de)fragmentation declarations/definitions for
* Broadcom 802.11abgn Networking Device Driver
*
- * Copyright 2020 Broadcom
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
*
- * This program is the proprietary software of Broadcom and/or
- * its licensors, and may only be used, duplicated, modified or distributed
- * pursuant to the terms and conditions of a separate, written license
- * agreement executed between you and Broadcom (an "Authorized License").
- * Except as set forth in an Authorized License, Broadcom grants no license
- * (express or implied), right to use, or waiver of any kind with respect to
- * the Software, and Broadcom expressly reserves all rights in and to the
- * Software and all intellectual property rights therein. IF YOU HAVE NO
- * AUTHORIZED LICENSE, THEN YOU HAVE NO RIGHT TO USE THIS SOFTWARE IN ANY
- * WAY, AND SHOULD IMMEDIATELY NOTIFY BROADCOM AND DISCONTINUE ALL USE OF
- * THE SOFTWARE.
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
- * Except as expressly set forth in the Authorized License,
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
*
- * 1. This program, including its structure, sequence and organization,
- * constitutes the valuable trade secrets of Broadcom, and you shall use
- * all reasonable efforts to protect the confidentiality thereof, and to
- * use this information only in connection with your use of Broadcom
- * integrated circuit products.
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
*
- * 2. TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED
- * "AS IS" AND WITH ALL FAULTS AND BROADCOM MAKES NO PROMISES,
- * REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR
- * OTHERWISE, WITH RESPECT TO THE SOFTWARE. BROADCOM SPECIFICALLY
- * DISCLAIMS ANY AND ALL IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY,
- * NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES,
- * ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. YOU ASSUME THE ENTIRE RISK ARISING
- * OUT OF USE OR PERFORMANCE OF THE SOFTWARE.
- *
- * 3. TO THE MAXIMUM EXTENT PERMITTED BY LAW, IN NO EVENT SHALL
- * BROADCOM OR ITS LICENSORS BE LIABLE FOR (i) CONSEQUENTIAL, INCIDENTAL,
- * SPECIAL, INDIRECT, OR EXEMPLARY DAMAGES WHATSOEVER ARISING OUT OF OR
- * IN ANY WAY RELATING TO YOUR USE OF OR INABILITY TO USE THE SOFTWARE EVEN
- * IF BROADCOM HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES; OR (ii)
- * ANY AMOUNT IN EXCESS OF THE AMOUNT ACTUALLY PAID FOR THE SOFTWARE ITSELF
- * OR U.S. $1, WHICHEVER IS GREATER. THESE LIMITATIONS SHALL APPLY
- * NOTWITHSTANDING ANY FAILURE OF ESSENTIAL PURPOSE OF ANY LIMITED REMEDY.
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
*
- * <<Broadcom-WL-IPTag/Proprietary:>>
+ * <<Broadcom-WL-IPTag/Open:>>
*
* $Id$
*
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hnd_pktpool.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hnd_pktpool.c
index fe7a12d..f54c7c2 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hnd_pktpool.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hnd_pktpool.c
@@ -1,7 +1,9 @@
/*
* HND generic packet pool operation primitives
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hnd_pktq.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hnd_pktq.c
index 2f6a72c..c325a37 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hnd_pktq.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hnd_pktq.c
@@ -1,7 +1,9 @@
/*
* HND generic pktq operation primitives
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hndlhl.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hndlhl.c
index dc04ca3..f220e84 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hndlhl.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hndlhl.c
@@ -2,7 +2,9 @@
* Misc utility routines for accessing lhl specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -55,7 +57,11 @@
{
if (CHIPID(sih->chip) == BCM43012_CHIP_ID) {
/* Enable PMU sleep mode0 */
+#ifdef BCMQT
+ LHL_REG(sih, lhl_top_pwrseq_ctl_adr, LHL_PWRSEQ_CTL, PMU_SLEEP_MODE_0);
+#else
LHL_REG(sih, lhl_top_pwrseq_ctl_adr, LHL_PWRSEQ_CTL, PMU_SLEEP_MODE_2);
+#endif // endif
/* Modify as per the
BCM43012/LHL#LHL-RecommendedsettingforvariousPMUSleepModes:
*/
@@ -338,7 +344,9 @@
PMU_REG(sih, pmuintctrl0, PMU_INTC_ALP_REQ, PMU_INTC_ALP_REQ);
PMU_REG(sih, pmuintmask0, RSRC_INTR_MASK_TIMER_INT_0, RSRC_INTR_MASK_TIMER_INT_0);
+#ifndef BCMQT
LHL_REG(sih, lhl_main_ctl_adr, LHL_FAST_WRITE_EN, LHL_FAST_WRITE_EN);
+#endif /* BCMQT */
PMU_REG(sih, pmucontrol_ext, PCTL_EXT_USE_LHL_TIMER, PCTL_EXT_USE_LHL_TIMER);
}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hndmem.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hndmem.c
index 419068a..d6213f4 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hndmem.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hndmem.c
@@ -1,7 +1,9 @@
/*
* Utility routines for configuring different memories in Broadcom chips.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hndpmu.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hndpmu.c
index 0964063..7a4fd28 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hndpmu.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/hndpmu.c
@@ -2,7 +2,9 @@
* Misc utility routines for accessing PMU corerev specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11.h
index 091511e..8030e49 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11.h
@@ -1,7 +1,9 @@
/*
* Fundamental types and constants relating to 802.11
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -355,6 +357,7 @@
uint8 margin;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tpc_rep dot11_tpc_rep_t;
+#define DOT11_MNG_IE_TPC_REPORT_SIZE (sizeof(dot11_tpc_rep_t))
#define DOT11_MNG_IE_TPC_REPORT_LEN 2 /* length of IE data, not including 2 byte header */
BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
@@ -968,6 +971,9 @@
#define DOT11_ESP_INFO_LIST_AC_VI 2u /* access category of esp information list AC_VI */
#define DOT11_ESP_INFO_LIST_AC_VO 3u /* access category of esp information list AC_VO */
+#define DOT11_ESP_INFO_LIST_DF_MASK 0x18 /* Data Format Mask */
+#define DOT11_ESP_INFO_LIST_BAWS_MASK 0xE0 /* BA window size mask */
+
/* nom_msdu_size */
#define FIXED_MSDU_SIZE 0x8000 /* MSDU size is fixed */
#define MSDU_SIZE_MASK 0x7fff /* (Nominal or fixed) MSDU size */
@@ -1080,7 +1086,6 @@
#define FC_SUBTYPE_REASSOC_RESP 3 /* reassoc. response */
#define FC_SUBTYPE_PROBE_REQ 4 /* probe request */
#define FC_SUBTYPE_PROBE_RESP 5 /* probe response */
-#define FC_SUBTYPE_TIMING_ADV 6 /* timing advertisement */
#define FC_SUBTYPE_BEACON 8 /* beacon */
#define FC_SUBTYPE_ATIM 9 /* ATIM */
#define FC_SUBTYPE_DISASSOC 10 /* disassoc. */
@@ -1139,7 +1144,6 @@
#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP) /* reassoc. response */
#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ) /* probe request */
#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP) /* probe response */
-#define FC_TIMING_ADV FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_TIMING_ADV) /* timing advertisement */
#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON) /* beacon */
#define FC_ATIM FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ATIM) /* ATIM */
#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC) /* disassoc */
@@ -1364,7 +1368,6 @@
#define DOT11_SC_INVALID_PARAMS 38 /* One or more params have invalid values */
#define DOT11_SC_INVALID_PAIRWISE_CIPHER 42 /* invalid pairwise cipher */
#define DOT11_SC_INVALID_AKMP 43 /* Association denied due to invalid AKMP */
-#define DOT11_SC_UNSUP_RSNIE_VER 44 /* Unsupported RSN IE version */
#define DOT11_SC_INVALID_RSNIE_CAP 45 /* invalid RSN IE capabilities */
#define DOT11_SC_DLS_NOT_ALLOWED 48 /* DLS is not allowed in the BSS by policy */
#define DOT11_SC_INVALID_PMKID 53 /* Association denied due to invalid PMKID */
@@ -1390,6 +1393,7 @@
*/
#define DOT11_SC_TRANSMIT_FAILURE 79 /* transmission failure */
+#define DOT11_SC_SAE_HASH_TO_ELEMENT 126 /* SAE hash-to-element */
/* Info Elts, length of INFORMATION portion of Info Elts */
#define DOT11_MNG_DS_PARAM_LEN 1 /* d11 management DS parameter length */
@@ -1472,6 +1476,8 @@
#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73 /* d11 mgmt OBSS Intolerant Channel list */
#define DOT11_MNG_HT_OBSS_ID 74 /* d11 mgmt OBSS HT info */
#define DOT11_MNG_MMIE_ID 76 /* d11 mgmt MIC IE */
+#define DOT11_MNG_NONTRANS_BSSID_CAP_ID 83 /* 11k nontransmitted BSSID capability */
+#define DOT11_MNG_MULTIPLE_BSSIDINDEX_ID 85 /* 11k multiple BSSID index */
#define DOT11_MNG_FMS_DESCR_ID 86 /* 11v FMS descriptor */
#define DOT11_MNG_FMS_REQ_ID 87 /* 11v FMS request id */
#define DOT11_MNG_FMS_RESP_ID 88 /* 11v FMS response id */
@@ -1521,6 +1527,7 @@
#define DOT11_MNG_MESH_CSP_ID 222 /* d11 Mesh Channel Switch Parameter */
#define DOT11_MNG_FILS_IND_ID 240 /* 11ai FILS Indication element */
#define DOT11_MNG_FRAGMENT_ID 242 /* IE's fragment ID */
+#define DOT11_MNG_RSNX_ID 244 /* d11 management RSNXE id */
/* The follwing ID extensions should be defined >= 255
* i.e. the values should include 255 (DOT11_MNG_ID_EXT_ID + ID Extension).
@@ -1540,6 +1547,15 @@
#define DOT11_MNG_SRPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_SRPS_ID)
#define EXT_MNG_BSSCOLOR_CHANGE_ID 42u /* BSS Color Change Announcement */
#define DOT11_MNG_BSSCOLOR_CHANGE_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_BSSCOLOR_CHANGE_ID)
+#define EXT_MNG_HE_6GBAND_CAP_ID 59u /* HE 6 GHz Band Capabilities */
+#define DOT11_MNG_HE_6GBAND_CAP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_6GBAND_CAP_ID)
+
+#define EXT_MNG_SAE_REJECTED_GROUPS_ID 92u /* WPA3 SAE rejected group element */
+#define DOT11_MNG_SAE_REJECTED_GROUPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_SAE_REJECTED_GROUPS_ID)
+
+#define EXT_MNG_SAE_ANTI_CLOGGING_TOKEN_ID 93u /* WPA3 SAE anticlogging token container */
+#define DOT11_MNG_SAE_ANTI_CLOGGING_TOKEN_ID (DOT11_MNG_ID_EXT_ID + \
+ EXT_MNG_SAE_ANTI_CLOGGING_TOKEN_ID)
/* FILS and OCE ext ids */
#define FILS_EXTID_MNG_REQ_PARAMS 2u /* FILS Request Parameters element */
@@ -1552,6 +1568,9 @@
#define FILS_EXTID_MNG_HLP_CONTAINER_ID 5u /* FILS HLP Container element */
#define DOT11_MNG_FILS_HLP_CONTAINER (DOT11_MNG_ID_EXT_ID +\
FILS_EXTID_MNG_HLP_CONTAINER_ID)
+#define FILS_EXTID_MNG_KEY_DELIVERY_ID 7u /* FILS Key Delivery element */
+#define DOT11_MNG_FILS_KEY_DELIVERY (DOT11_MNG_ID_EXT_ID +\
+ FILS_EXTID_MNG_KEY_DELIVERY_ID)
#define FILS_EXTID_MNG_WRAPPED_DATA_ID 8u /* FILS Wrapped Data element */
#define DOT11_MNG_FILS_WRAPPED_DATA (DOT11_MNG_ID_EXT_ID +\
FILS_EXTID_MNG_WRAPPED_DATA_ID)
@@ -1577,6 +1596,11 @@
(_ie)->id_ext = _id; \
} while (0)
+/* Extended RSN Capabilities length in octets (1 octet) */
+#define DOT11_RSNX_CAP_LEN 1
+/* Extended RSN Capabilities */
+#define DOT11_RSNX_SAE_H2E 5
+
/* Rate Defines */
/* Valid rates for the Supported Rates and Extended Supported Rates IEs.
@@ -1674,6 +1698,8 @@
#define DOT11_EXT_CAP_TIMBC 18
/* BSS Transition Management support bit position */
#define DOT11_EXT_CAP_BSSTRANS_MGMT 19
+/* Multiple BSSID support position */
+#define DOT11_EXT_CAP_MULTIBSSID 22
/* Direct Multicast Service */
#define DOT11_EXT_CAP_DMS 26
/* Interworking support bit position */
@@ -1694,13 +1720,13 @@
#define DOT11_EXT_CAP_FTM_INITIATOR 71 /* tentative 11mcd3.0 */
#define DOT11_EXT_CAP_FILS 72 /* FILS Capability */
/* TWT support */
-#define DOT11_EXT_CAP_TWT_REQUESTER 75
-#define DOT11_EXT_CAP_TWT_RESPONDER 76
+#define DOT11_EXT_CAP_TWT_REQUESTER 77
+#define DOT11_EXT_CAP_TWT_RESPONDER 78
/* TODO: Update DOT11_EXT_CAP_MAX_IDX to reflect the highest offset.
* Note: DOT11_EXT_CAP_MAX_IDX must only be used in attach path.
* It will cause ROM invalidation otherwise.
*/
-#define DOT11_EXT_CAP_MAX_IDX 76
+#define DOT11_EXT_CAP_MAX_IDX 79
#define DOT11_EXT_CAP_MAX_BIT_IDX 95 /* !!!update this please!!! */
@@ -3826,15 +3852,15 @@
uchar id;
uchar len;
uchar oui [3];
- uchar data [1]; /* Variable size data */
+ uchar data [1]; /* Variable size data */
} BWL_POST_PACKED_STRUCT;
typedef struct vndr_ie vndr_ie_t;
-#define VNDR_IE_HDR_LEN 2 /* id + len field */
-#define VNDR_IE_MIN_LEN 3 /* size of the oui field */
+#define VNDR_IE_HDR_LEN 2u /* id + len field */
+#define VNDR_IE_MIN_LEN 3u /* size of the oui field */
#define VNDR_IE_FIXED_LEN (VNDR_IE_HDR_LEN + VNDR_IE_MIN_LEN)
-#define VNDR_IE_MAX_LEN 255 /* vendor IE max length, without ID and len */
+#define VNDR_IE_MAX_LEN 255u /* vendor IE max length, without ID and len */
/** BRCM PROP DEVICE PRIMARY MAC ADDRESS IE */
BWL_PRE_PACKED_STRUCT struct member_of_brcm_prop_ie {
@@ -4267,24 +4293,6 @@
} \
} while (0)
-/* Map the mcs code to mcs bit map */
-#define VHT_MCS_CODE_TO_MCS_MAP(mcs_code) \
- ((mcs_code == VHT_CAP_MCS_MAP_0_7) ? VHT_CAP_MCS_0_7_RATEMAP : \
- (mcs_code == VHT_CAP_MCS_MAP_0_8) ? VHT_CAP_MCS_0_8_RATEMAP : \
- (mcs_code == VHT_CAP_MCS_MAP_0_9) ? VHT_CAP_MCS_0_9_RATEMAP : 0)
-
-#define VHT_PROP_MCS_CODE_TO_PROP_MCS_MAP(mcs_code) \
- ((mcs_code == VHT_PROP_MCS_MAP_10_11) ? VHT_PROP_MCS_10_11_RATEMAP : 0)
-
-/* Map the mcs bit map to mcs code */
-#define VHT_MCS_MAP_TO_MCS_CODE(mcs_map) \
- ((mcs_map == VHT_CAP_MCS_0_7_RATEMAP) ? VHT_CAP_MCS_MAP_0_7 : \
- (mcs_map == VHT_CAP_MCS_0_8_RATEMAP) ? VHT_CAP_MCS_MAP_0_8 : \
- (mcs_map == VHT_CAP_MCS_0_9_RATEMAP) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE)
-
-#define VHT_PROP_MCS_MAP_TO_PROP_MCS_CODE(mcs_map) \
- (((mcs_map & 0xc00) == 0xc00) ? VHT_PROP_MCS_MAP_10_11 : VHT_PROP_MCS_MAP_NONE)
-
/** VHT Capabilities Supported Channel Width */
typedef enum vht_cap_chan_width {
VHT_CAP_CHAN_WIDTH_SUPPORT_MANDATORY = 0x00,
@@ -4326,6 +4334,8 @@
VHT_OP_CHAN_WIDTH_80_80 = 3 /* deprecated - IEEE 802.11 REVmc D8.0 Table 11-25 */
} vht_op_chan_width_t;
+#define VHT_OP_INFO_LEN 3
+
/* AID length */
#define AID_IE_LEN 2
/**
@@ -4380,10 +4390,12 @@
#define WPA_OUI_LEN 3 /* WPA OUI length */
#define WPA_OUI_TYPE 1
#define WPA_VERSION 1 /* WPA version */
+#define WPA_VERSION_LEN 2 /* WPA version length */
#define WPA2_OUI "\x00\x0F\xAC" /* WPA2 OUI */
#define WPA2_OUI_LEN 3 /* WPA2 OUI length */
#define WPA2_VERSION 1 /* WPA2 version */
#define WPA2_VERSION_LEN 2 /* WAP2 version length */
+#define MAX_RSNE_SUPPORTED_VERSION WPA2_VERSION /* Max supported version */
/* ************* WPS definitions. ************* */
#define WPS_OUI "\x00\x50\xF2" /* WPS OUI */
@@ -4391,23 +4403,25 @@
#define WPS_OUI_TYPE 4
/* ************* WFA definitions. ************* */
-#if defined(WL_LEGACY_P2P)
-#define MAC_OUI "\x00\x17\xF2" /* MACOSX OUI */
-#define MAC_OUI_TYPE_P2P 5
+#define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */
+#define WFA_OUI_LEN 3 /* WFA OUI length */
+#define WFA_OUI_TYPE_P2P 9
+
+#ifdef WL_LEGACY_P2P
+#define APPLE_OUI "\x00\x17\xF2" /* MACOSX OUI */
+#define APPLE_OUI_LEN 3
+#define APPLE_OUI_TYPE_P2P 5
#endif /* WL_LEGACY_P2P */
-#ifdef P2P_IE_OVRD
-#define WFA_OUI MAC_OUI
+#ifndef WL_LEGACY_P2P
+#define P2P_OUI WFA_OUI
+#define P2P_OUI_LEN WFA_OUI_LEN
+#define P2P_OUI_TYPE WFA_OUI_TYPE_P2P
#else
-#define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */
-#endif /* P2P_IE_OVRD */
-#define WFA_OUI_LEN 3 /* WFA OUI length */
-#ifdef P2P_IE_OVRD
-#define WFA_OUI_TYPE_P2P MAC_OUI_TYPE_P2P
-#else
-#define WFA_OUI_TYPE_TPC 8
-#define WFA_OUI_TYPE_P2P 9
-#endif // endif
+#define P2P_OUI APPLE_OUI
+#define P2P_OUI_LEN APPLE_OUI_LEN
+#define P2P_OUI_TYPE APPLE_OUI_TYPE_P2P
+#endif /* !WL_LEGACY_P2P */
#define WFA_OUI_TYPE_TPC 8
#ifdef WLTDLS
@@ -4422,24 +4436,31 @@
#define WFA_OUI_TYPE_MBO_OCE 0x16
/* RSN authenticated key managment suite */
-#define RSN_AKM_NONE 0 /* None (IBSS) */
-#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */
-#define RSN_AKM_PSK 2 /* Pre-shared Key */
-#define RSN_AKM_FBT_1X 3 /* Fast Bss transition using 802.1X */
-#define RSN_AKM_FBT_PSK 4 /* Fast Bss transition using Pre-shared Key */
+#define RSN_AKM_NONE 0 /* None (IBSS) */
+#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */
+#define RSN_AKM_PSK 2 /* Pre-shared Key */
+#define RSN_AKM_FBT_1X 3 /* Fast Bss transition using 802.1X */
+#define RSN_AKM_FBT_PSK 4 /* Fast Bss transition using Pre-shared Key */
/* RSN_AKM_MFP_1X and RSN_AKM_MFP_PSK are not used any more
* Just kept here to avoid build issue in BISON/CARIBOU branch
*/
-#define RSN_AKM_MFP_1X 5 /* SHA256 key derivation, using 802.1X */
-#define RSN_AKM_MFP_PSK 6 /* SHA256 key derivation, using Pre-shared Key */
-#define RSN_AKM_SHA256_1X 5 /* SHA256 key derivation, using 802.1X */
-#define RSN_AKM_SHA256_PSK 6 /* SHA256 key derivation, using Pre-shared Key */
-#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */
-#define RSN_AKM_SAE_PSK 8 /* AKM for SAE with 4-way handshake */
-#define RSN_AKM_SAE_FBT 9 /* AKM for SAE with FBT */
-#define RSN_AKM_FILS_SHA256 14 /* SHA256 key derivation, using FILS */
-#define RSN_AKM_FILS_SHA384 15 /* SHA384 key derivation, using FILS */
-#define RSN_AKM_OWE 18 /* RFC 8110 OWE */
+#define RSN_AKM_MFP_1X 5 /* SHA256 key derivation, using 802.1X */
+#define RSN_AKM_MFP_PSK 6 /* SHA256 key derivation, using Pre-shared Key */
+#define RSN_AKM_SHA256_1X 5 /* SHA256 key derivation, using 802.1X */
+#define RSN_AKM_SHA256_PSK 6 /* SHA256 key derivation, using Pre-shared Key */
+#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */
+#define RSN_AKM_SAE_PSK 8 /* AKM for SAE with 4-way handshake */
+#define RSN_AKM_SAE_FBT 9 /* AKM for SAE with FBT */
+#define RSN_AKM_SUITEB_SHA256_1X 11 /* Suite B SHA256 */
+#define RSN_AKM_SUITEB_SHA384_1X 12 /* Suite B-192 SHA384 */
+#define RSN_AKM_FBT_SHA384_1X 13 /* FBT SHA384 */
+#define RSN_AKM_FILS_SHA256 14 /* SHA256 key derivation, using FILS */
+#define RSN_AKM_FILS_SHA384 15 /* SHA384 key derivation, using FILS */
+#define RSN_AKM_FBT_SHA256_FILS 16
+#define RSN_AKM_FBT_SHA384_FILS 17
+#define RSN_AKM_OWE 18 /* RFC 8110 OWE */
+#define RSN_AKM_FBT_SHA384_PSK 19
+#define RSN_AKM_PSK_SHA384 20
/* OSEN authenticated key managment suite */
#define OSEN_AKM_UNSPECIFIED RSN_AKM_UNSPECIFIED /* Over 802.1x */
@@ -4479,10 +4500,8 @@
#define WCN_OUI "\x00\x50\xf2" /* WCN OUI */
#define WCN_TYPE 4 /* WCN type */
-#ifdef BCMWAPI_WPI
#define SMS4_KEY_LEN 16
#define SMS4_WPI_CBC_MAC_LEN 16
-#endif // endif
/* 802.11r protocol definitions */
@@ -4570,13 +4589,11 @@
#define BSSID_INVALID "\x00\x00\x00\x00\x00\x00"
#define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF"
-#ifdef BCMWAPI_WAI
#define WAPI_IE_MIN_LEN 20 /* WAPI IE min length */
#define WAPI_VERSION 1 /* WAPI version */
#define WAPI_VERSION_LEN 2 /* WAPI version length */
#define WAPI_OUI "\x00\x14\x72" /* WAPI OUI */
#define WAPI_OUI_LEN DOT11_OUI_LEN /* WAPI OUI length */
-#endif /* BCMWAPI_WAI */
/* ************* WMM Parameter definitions. ************* */
#define WMM_OUI "\x00\x50\xF2" /* WNN OUI */
@@ -4953,9 +4970,6 @@
uint8 data[1];
} BWL_POST_PACKED_STRUCT ftm_vs_tlv_t;
-#define FTM_TPK_LEN_SECURE_2_0 32
-#define FTM_TPK_RI_PHY_LEN_SECURE_2_0 14
-#define FTM_TPK_RR_PHY_LEN_SECURE_2_0 14
BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_ie {
uint8 id; /* DOT11_MNG_VS_ID */
uint8 len; /* length following */
@@ -4988,7 +5002,7 @@
#define FTM_TPK_LEN 16
#define FTM_RI_RR_BUF_LEN 32
#define FTM_TPK_RI_RR_LEN 13
-#define FTM_TPK_RI_RR_LEN_SECURE_2_0 28
+#define FTM_TPK_RI_RR_LEN_SECURE_2_0 28
#define FTM_TPK_DIGEST_LEN 32
#define FTM_TPK_BUFFER_LEN 128
#define FTM_TPK_RI_PHY_LEN 7
@@ -5044,6 +5058,15 @@
(_ie)->ext_id = _id; \
} while (0)
+BWL_PRE_PACKED_STRUCT struct sae_token_container_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ uint8 data[1]; /* WPA3 SAE Anticlogging token */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct sae_token_container_ie sae_token_container_ie_t;
+
/* 802.11u interworking access network options */
#define IW_ANT_MASK 0x0f
#define IW_INTERNET_MASK 0x10
@@ -5239,15 +5262,6 @@
#define BCM_AIBSS_IE_TYPE 56
-/* WLWAVE */
-/* timing advertising frame */
-BWL_PRE_PACKED_STRUCT struct dot11_timing_adv {
- uint8 tsf[8]; /* Time Synchronization Function */
- uint16 capability; /* capability information */
-} BWL_POST_PACKED_STRUCT;
-typedef struct dot11_timing_adv dot11_timing_adv_t;
-#define DOT11_TIMING_ADV_FIXED_LEN 10
-
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11ax.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11ax.h
new file mode 100644
index 0000000..b3d0679
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11ax.h
@@ -0,0 +1,1015 @@
+/*
+ * Basic types and constants relating to 802.11ax/HE STA
+ * This is a portion of 802.11ax definition. The rest are in 802.11.h.
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _802_11ax_h_
+#define _802_11ax_h_
+
+#include <typedefs.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* special STA-IDs (Section 27.11.1) */
+#define HE_STAID_BSS_BCAST 0
+#define HE_STAID_UNASSOCIATED_STA 2045
+#define HE_STAID_NO_USER 2046
+#define HE_STAID_MBSS_BCAST 2047
+#define HE_STAID_MASK 0x07FF
+#define HE_AID12_MASK 0x0FFF
+
+/* Deprecated */
+#define HE_STAID_RU_NODATA 2046
+
+/**
+ * HE Capabilites element (sec 9.4.2.218)
+ */
+
+/* HE MAC Capabilities Information field (figure 9-589ck) */
+#define HE_MAC_CAP_INFO_SIZE 6
+typedef uint8 he_mac_cap_t[HE_MAC_CAP_INFO_SIZE];
+
+/* bit position and field width */
+#define HE_MAC_HTC_HE_SUPPORT_IDX 0 /* HTC HE Support */
+#define HE_MAC_HTC_HE_SUPPORT_FSZ 1
+#define HE_MAC_TWT_REQ_SUPPORT_IDX 1 /* TWT Requestor Support */
+#define HE_MAC_TWT_REQ_SUPPORT_FSZ 1
+#define HE_MAC_TWT_RESP_SUPPORT_IDX 2 /* TWT Responder Support */
+#define HE_MAC_TWT_RESP_SUPPORT_FSZ 1
+#define HE_MAC_FRAG_SUPPORT_IDX 3 /* Fragmentation Support */
+#define HE_MAC_FRAG_SUPPORT_FSZ 2
+#define HE_MAC_MAX_MSDU_AMSDU_FRAGS_IDX 5 /* Max. Fragmented MSDUs/AMSDUs Exponent */
+#define HE_MAC_MAX_MSDU_AMSDU_FRAGS_FSZ 3
+#define HE_MAC_MIN_FRAG_SIZE_IDX 8 /* Min. Fragment Size */
+#define HE_MAC_MIN_FRAG_SIZE_FSZ 2
+#define HE_MAC_TRG_PAD_DUR_IDX 10 /* Trigger Frame MAC Pad Dur */
+#define HE_MAC_TRG_PAD_DUR_FSZ 2
+#define HE_MAC_MULTI_TID_RX_AGG_IDX 12 /* Multi TID RX Aggregation */
+#define HE_MAC_MULTI_TID_RX_AGG_FSZ 3
+#define HE_MAC_LINK_ADAPT_IDX 15 /* HE Link Adaptation */
+#define HE_MAC_LINK_ADAPT_FSZ 2
+#define HE_MAC_ALL_ACK_SUPPORT_IDX 17 /* All Ack Support */
+#define HE_MAC_ALL_ACK_SUPPORT_FSZ 1
+#define HE_MAC_TRS_SUPPORT_IDX 18 /* TRS Support */
+#define HE_MAC_TRS_SUPPORT_FSZ 1
+#define HE_MAC_A_BSR_IDX 19 /* A-BSR Support */
+#define HE_MAC_A_BSR_FSZ 1
+#define HE_MAC_BCAST_TWT_SUPPORT_IDX 20 /* Broadcast TWT Support */
+#define HE_MAC_BCAST_TWT_SUPPORT_FSZ 1
+#define HE_MAC_BA_32BITMAP_SUPPORT_IDX 21 /* 32-bit BA Bitmap Support */
+#define HE_MAC_BA_32BITMAP_SUPPORT_FSZ 1
+#define HE_MAC_MU_CASCADE_SUPPORT_IDX 22 /* MU Cascade Support */
+#define HE_MAC_MU_CASCADE_SUPPORT_FSZ 1
+#define HE_MAC_MULTI_TID_AGG_ACK_IDX 23 /* Ack Enabled Multi TID Agg. */
+#define HE_MAC_MULTI_TID_AGG_ACK_FSZ 1
+#define HE_MAC_RESVD1_IDX 24 /* Reserved Bit */
+#define HE_MAC_RESVD1_FSZ 1
+#define HE_MAC_OMI_ACONTROL_SUPPORT_IDX 25 /* OMI A-Control Support */
+#define HE_MAC_OMI_ACONTROL_SUPPORT_FSZ 1
+#define HE_MAC_OFDMA_RA_SUPPORT_IDX 26 /* OFDMA RA Support */
+#define HE_MAC_OFDMA_RA_SUPPORT_FSZ 1
+#define HE_MAC_MAX_AMPDU_LEN_EXP_EXT_IDX 27 /* Max AMPDU Length Exponent Extention */
+#define HE_MAC_MAX_AMPDU_LEN_EXP_EXT_FSZ 2
+#define HE_MAC_AMSDU_FRAG_SUPPORT_IDX 29 /* AMSDU Fragementation Support */
+#define HE_MAC_AMSDU_FRAG_SUPPORT_FSZ 1
+#define HE_MAC_FLEX_TWT_SCHEDULE_IDX 30 /* Flexible TWT Schedule Support */
+#define HE_MAC_FLEX_TWT_SCHEDULE_FSZ 1
+#define HE_MAC_RX_MBSS_CTL_FRAME_IDX 31 /* Rx of Control frames of MBSS */
+#define HE_MAC_RX_MBSS_CTL_FRAME_FSZ 1
+#define HE_MAC_RX_AGG_BSRP_IDX 32 /* Support Rx of aggregated BSRP BQRP */
+#define HE_MAC_RX_AGG_BSRP_FSZ 1
+#define HE_MAC_QTP_SUPPORT_IDX 33 /* Support Quiet time period */
+#define HE_MAC_QTP_SUPPORT_FSZ 1
+#define HE_MAC_ABQR_SUPPORT_IDX 34 /* Support aggregated BQR */
+#define HE_MAC_ABQR_SUPPORT_FSZ 1
+#define HE_MAC_SRP_RSPNDR_IDX 35 /* SRP responder */
+#define HE_MAC_SRP_RSPNDR_FSZ 1
+#define HE_MAC_NDP_FDBCK_SUPPORT_IDX 36 /* NDP feedback report */
+#define HE_MAC_NDP_FDBCK_SUPPORT_FSZ 1
+#define HE_MAC_OPS_SUPPORT_IDX 37 /* OPS support */
+#define HE_MAC_OPS_SUPPORT_FSZ 1
+#define HE_MAC_AMSDU_IN_AMPDU_IDX 38 /* AMSDU in AMPDU support */
+#define HE_MAC_AMSDU_IN_AMPDU_FSZ 1
+#define HE_MAC_MULTI_TID_TX_AGG_IDX 39 /* Multi TID TX Aggregation */
+#define HE_MAC_MULTI_TID_TX_AGG_FSZ 3
+#define HE_MAC_SUBCH_SEL_TR_SUPPORT_IDX 42 /* HE Subchl Selective Trns Sup */
+#define HE_MAC_SUBCH_SEL_TR_SUPPORT_FSZ 1
+#define HE_MAC_UL_TONE_RU_SUPPORT_IDX 43 /* UL tone RUSupport */
+#define HE_MAC_UL_TONE_RU_SUPPORT_FSZ 1
+#define HE_MAC_OMC_UL_MU_DIS_RX_IDX 44 /* OM Control ULMUData Dis RX Sup */
+#define HE_MAC_OMC_UL_MU_DIS_RX_FSZ 1
+#define HE_MAC_HE_DSMPS_SUPPORT_IDX 45 /* HE Dynamic SM Power Save Sup */
+#define HE_MAC_HE_DSMPS_SUPPORT_FSZ 1
+#define HE_MAC_PUNC_SOUND_SUPPORT_IDX 46 /* Punctured Sounding Sup */
+#define HE_MAC_PUNC_SOUND_SUPPORT_FSZ 1
+#define HE_MAC_NONAX_TFRX_SUPPORT_IDX 47 /* HT and VHT TriggerFrame Rx Sup */
+#define HE_MAC_NONAX_TFRX_SUPPORT_FSZ 1
+
+/* bit position and field width */
+#define HE_SU_PPDU_FORMAT_IDX 0
+#define HE_SU_PPDU_FORMAT_FSZ 1
+#define HE_SU_PPDU_BEAM_CHANGE_IDX 1
+#define HE_SU_PPDU_BEAM_CHANGE_FSZ 1
+#define HE_SU_PPDU_DL_UL_IDX 2
+#define HE_SU_PPDU_DL_UL_FSZ 1
+#define HE_SU_PPDU_MCS_IDX 3
+#define HE_SU_PPDU_MCS_FSZ 4
+#define HE_SU_PPDU_DCM_IDX 7
+#define HE_SU_PPDU_DCM_FSZ 1
+#define HE_SU_PPDU_BSS_COLOR_IDX 8
+#define HE_SU_PPDU_BSS_COLOR_FSZ 6
+#define HE_SU_PPDU_SR_IDX 15
+#define HE_SU_PPDU_SR_FSZ 4
+#define HE_SU_PPDU_BW_IDX 19
+#define HE_SU_PPDU_BW_FSZ 2
+#define HE_SU_PPDU_LTF_IDX 21
+#define HE_SU_PPDU_LTF_FSZ 2
+#define HE_SU_PPDU_NSTS_IDX 23
+#define HE_SU_PPDU_NSTS_FSZ 3
+#define HE_SU_PPDU_TXOP_IDX 26
+#define HE_SU_PPDU_TXOP_FSZ 7
+#define HE_SU_PPDU_CODING_IDX 33
+#define HE_SU_PPDU_CODING_FSZ 1
+#define HE_SU_PPDU_LDPC_IDX 34
+#define HE_SU_PPDU_LDPC_FSZ 1
+#define HE_SU_PPDU_STBC_IDX 35
+#define HE_SU_PPDU_STBC_FSZ 1
+#define HE_SU_PPDU_TXBF_IDX 36
+#define HE_SU_PPDU_TXBF_FSZ 1
+
+/* HT Control Field: (Table 9-9a) */
+#define HTC_HE_VARIANT 0x3F
+/* HT Control IDs: (Table 9-18a) */
+#define HTC_HE_CTLID_SHIFT 0x2
+#define HTC_HE_CTLID_TRS 0x0
+#define HTC_HE_CTLID_OMI 0x1
+#define HTC_HE_CTLID_HLA 0x2
+#define HTC_HE_CTLID_BSR 0x3
+#define HTC_HE_CTLID_UPH 0x4
+#define HTC_HE_CTLID_BQR 0x5
+#define HTC_HE_CTLID_CAS 0x6
+#define HTC_HE_CTLID_NONE 0xF
+
+#define HE_LTF_1_GI_1_6us (0)
+#define HE_LTF_2_GI_0_8us (1)
+#define HE_LTF_2_GI_1_6us (2)
+#define HE_LTF_4_GI_3_2us (3)
+
+/* max. # of spacial streams */
+#define HE_CAP_MCS_MAP_NSS_MAX 8
+
+/* HE PHY Capabilities Information field (figure 9-589cl) */
+#define HE_PHY_CAP_INFO_SIZE 11
+typedef uint8 he_phy_cap_t[HE_PHY_CAP_INFO_SIZE];
+
+/* PHY Ccapabilites for D3.0 */
+#define HE_PHY_RESVD1_IDX 0 /* Reserved */
+#define HE_PHY_RESVD1_FSZ 1
+#define HE_PHY_CH_WIDTH_SET_IDX 1 /* Channel Width Set */
+#define HE_PHY_CH_WIDTH_SET_FSZ 7
+#define HE_PHY_PREAMBLE_PUNCT_RX_IDX 8 /* Preamble Puncturing Rx */
+#define HE_PHY_PREAMBLE_PUNCT_RX_FSZ 4
+#define HE_PHY_DEVICE_CLASS_IDX 12 /* Device Class */
+#define HE_PHY_DEVICE_CLASS_FSZ 1
+#define HE_PHY_LDPC_PYLD_IDX 13 /* LDPC Coding In Payload */
+#define HE_PHY_LDPC_PYLD_FSZ 1
+#define HE_PHY_SU_PPDU_1x_LTF_0_8_GI_IDX 14 /* SU PPDU 1x LTF GI 0.8 us */
+#define HE_PHY_SU_PPDU_1x_LTF_0_8_GI_FSZ 1
+#define HE_PHY_MIDAMBLE_RX_MAX_NSTS_IDX 15 /* Midamble Tx/Rx Max NSTS */
+#define HE_PHY_MIDAMBLE_RX_MAX_NSTS_FSZ 2
+#define HE_PHY_NDP_4x_LTF_3_2_GI_RX_IDX 17 /* NDP with 4xLTF 3.2us GI Rx */
+#define HE_PHY_NDP_4x_LTF_3_2_GI_RX_FSZ 1
+#define HE_PHY_STBC_TX_LESS_EQ_80_IDX 18 /* STBC Tx <= 80 MHz */
+#define HE_PHY_STBC_TX_LESS_EQ_80_FSZ 1
+#define HE_PHY_STBC_RX_LESS_EQ_80_IDX 19 /* STBC Rx <= 80 MHz */
+#define HE_PHY_STBC_RX_LESS_EQ_80_FSZ 1
+#define HE_PHY_DOPPLER_TX_IDX 20 /* Doppler Tx */
+#define HE_PHY_DOPPLER_TX_FSZ 1
+#define HE_PHY_DOPPLER_RX_IDX 21 /* Doppler Rx */
+#define HE_PHY_DOPPLER_RX_FSZ 1
+#define HE_PHY_FULL_BW_UL_MU_IDX 22 /* Full bandwidth UL MU */
+#define HE_PHY_FULL_BW_UL_MU_FSZ 1
+#define HE_PHY_PART_BW_UL_MU_IDX 23 /* Partial bandwidth UL MU */
+#define HE_PHY_PART_BW_UL_MU_FSZ 1
+#define HE_PHY_DCM_MAX_CONST_TX_IDX 24 /* DCM Max constellation */
+#define HE_PHY_DCM_MAX_CONST_TX_FSZ 2
+#define HE_PHY_DCM_NSS_TX_IDX 26 /* DCM Encoding Tx */
+#define HE_PHY_DCM_NSS_TX_FSZ 1
+#define HE_PHY_DCM_MAX_CONST_RX_IDX 27 /* DCM Max constellation */
+#define HE_PHY_DCM_MAX_CONST_RX_FSZ 2
+#define HE_PHY_DCM_NSS_RX_IDX 29 /* DCM Encoding Rx */
+#define HE_PHY_DCM_NSS_RX_FSZ 1
+#define HE_PHY_RX_MUPPDU_NON_AP_STA_IDX 30 /* Rx HE MMPDUE from Non-AP */
+#define HE_PHY_RX_MUPPDU_NON_AP_STA_FSZ 1
+#define HE_PHY_SU_BEAMFORMER_IDX 31 /* SU Beamformer */
+#define HE_PHY_SU_BEAMFORMER_FSZ 1
+#define HE_PHY_SU_BEAMFORMEE_IDX 32 /* SU Beamformee */
+#define HE_PHY_SU_BEAMFORMEE_FSZ 1
+#define HE_PHY_MU_BEAMFORMER_IDX 33 /* MU Beamformer */
+#define HE_PHY_MU_BEAMFORMER_FSZ 1
+#define HE_PHY_BEAMFORMEE_STS_BELOW80MHZ_IDX 34 /* Beamformee STS For <= 80MHz */
+#define HE_PHY_BEAMFORMEE_STS_BELOW80MHZ_FSZ 3
+#define HE_PHY_BEAMFORMEE_STS_ABOVE80MHZ_IDX 37 /* Beamformee STS For >80 MHz */
+#define HE_PHY_BEAMFORMEE_STS_ABOVE80MHZ_FSZ 3
+#define HE_PHY_SOUND_DIM_BELOW80MHZ_IDX 40 /* Num. Sounding Dim.<= 80 MHz */
+#define HE_PHY_SOUND_DIM_BELOW80MHZ_FSZ 3
+#define HE_PHY_SOUND_DIM_ABOVE80MHZ_IDX 43 /* Num. Sounding Dim.> 80 MHz */
+#define HE_PHY_SOUND_DIM_ABOVE80MHZ_FSZ 3
+#define HE_PHY_SU_FEEDBACK_NG16_SUPPORT_IDX 46 /* Ng=16 For SU Feedback */
+#define HE_PHY_SU_FEEDBACK_NG16_SUPPORT_FSZ 1
+#define HE_PHY_MU_FEEDBACK_NG16_SUPPORT_IDX 47 /* Ng=16 For MU Feedback */
+#define HE_PHY_MU_FEEDBACK_NG16_SUPPORT_FSZ 1
+#define HE_PHY_SU_CODEBOOK_SUPPORT_IDX 48 /* Codebook Sz {4, 2} For SU */
+#define HE_PHY_SU_CODEBOOK_SUPPORT_FSZ 1
+#define HE_PHY_MU_CODEBOOK_SUPPORT_IDX 49 /* Codebook Size {7, 5} For MU */
+#define HE_PHY_MU_CODEBOOK_SUPPORT_FSZ 1
+#define HE_PHY_TRG_SU_BFM_FEEDBACK_IDX 50 /* Triggered SU TXBF Feedback */
+#define HE_PHY_TRG_SU_BFM_FEEDBACK_FSZ 1
+#define HE_PHY_TRG_MU_BFM_FEEDBACK_IDX 51 /* Triggered MU TXBF Feedback */
+#define HE_PHY_TRG_MU_BFM_FEEDBACK_FSZ 1
+#define HE_PHY_TRG_CQI_FEEDBACK_IDX 52 /* Triggered CQI Feedback */
+#define HE_PHY_TRG_CQI_FEEDBACK_FSZ 1
+#define HE_PHY_EXT_RANGE_SU_PYLD_IDX 53 /* HE ER SU PPDU Payload */
+#define HE_PHY_EXT_RANGE_SU_PYLD_FSZ 1
+#define HE_PHY_DL_MU_MIMO_PART_BW_IDX 54 /* DL MUMIMO On Partial BW */
+#define HE_PHY_DL_MU_MIMO_PART_BW_FSZ 1
+#define HE_PHY_PPE_THRESH_PRESENT_IDX 55 /* PPE Threshold Present */
+#define HE_PHY_PPE_THRESH_PRESENT_FSZ 1
+#define HE_PHY_SRP_SR_SUPPORT_IDX 56 /* SRP based SR Support */
+#define HE_PHY_SRP_SR_SUPPORT_FSZ 1
+#define HE_PHY_POWER_BOOST_FACTOR_IDX 57 /* Power Boost Factor Support */
+#define HE_PHY_POWER_BOOST_FACTOR_FSZ 1
+#define HE_PHY_LONG_LTF_SHORT_GI_SU_PPDU_IDX 58 /* HE SU - Long LTF Short GI */
+#define HE_PHY_LONG_LTF_SHORT_GI_SU_PPDU_FSZ 1
+#define HE_PHY_MAX_NC_IDX 59 /* Max Nc */
+#define HE_PHY_MAX_NC_FSZ 3
+#define HE_PHY_STBC_GT80_TX_IDX 62 /* STBC Tx > 80 MHz */
+#define HE_PHY_STBC_GT80_TX_FSZ 1
+#define HE_PHY_STBC_GT80_RX_IDX 63 /* STBC Rx > 80 MHz */
+#define HE_PHY_STBC_GT80_RX_FSZ 1
+#define HE_PHY_HE_ER_SU_PPDU_4X_RX_IDX 64 /* HEERSUPPDU With 4x HE-LTF & 0.8 GI */
+#define HE_PHY_HE_ER_SU_PPDU_4X_RX_FSZ 1
+#define HE_PHY_20_40_HE_PPDU_IDX 65 /* 20MHz In 40MHz HEPPDU In 2.4GHz Band */
+#define HE_PHY_20_40_HE_PPDU_FSZ 1
+#define HE_PHY_20_160_HE_PPDU_IDX 66 /* 20MHz In 160/80+80MHz HEPPDU */
+#define HE_PHY_20_160_HE_PPDU_FSZ 1
+#define HE_PHY_80_160_HE_PPDU_IDX 67 /* 80MHz In 160/80+80MHz HEPPDU */
+#define HE_PHY_80_160_HE_PPDU_FSZ 1
+#define HE_PHY_HE_ER_SU_PPDU_IDX 68 /* HEERSUPPDU With 1x HE-LTF & 0.8 GI */
+#define HE_PHY_HE_ER_SU_PPDU_FSZ 1
+#define HE_PHY_MIDAMBLE_TX_RX_2X_1X_HE_LTF_IDX 69 /* Midamble RX 2x & 1x HE LTF */
+#define HE_PHY_MIDAMBLE_TX_RX_2X_1X_HE_LTF_FSZ 1
+#define HE_PHY_DCM_MAX_BW_IDX 70 /* DCM Max BW */
+#define HE_PHY_DCM_MAX_BW_FSZ 2
+#define HE_PHY_SIGB_SYM_GT16_RX_SUPPORT_IDX 72 /* Greater than 16 HESIG-B OFDM Symb Sup */
+#define HE_PHY_SIGB_SYM_GT16_RX_SUPPORT_FSZ 1
+#define HE_PHY_NON_TRIG_CQI_FEEDBACK_IDX 73 /* Non- Triggered CQI Feedback */
+#define HE_PHY_NON_TRIG_CQI_FEEDBACK_FSZ 1
+#define HE_PHY_1024QAM_TX_IN_RU_LT242_IDX 74 /* Tx 1024-QAM < 242-tone RU Support */
+#define HE_PHY_1024QAM_TX_IN_RU_LT242_FSZ 1
+#define HE_PHY_1024QAM_RX_IN_RU_LT242_IDX 75 /* Rx 1024-QAM < 242-tone RU Support */
+#define HE_PHY_1024QAM_RX_IN_RU_LT242_FSZ 1
+#define HE_PHY_RX_HE_MU_COMPR_SIGB_IDX 76 /* RxFull BWSU HEMUPPDU Wt CompSIGB */
+#define HE_PHY_RX_HE_MU_COMPR_SIGB_FSZ 1
+#define HE_PHY_RX_HE_MU_NCOMP_SIGB_IDX 77 /* RxFull BWSU HEMUPPDU wt NCompSIGB */
+#define HE_PHY_RX_HE_MU_NCOMP_SIGB_FSZ 1
+#define HE_PHY_RESVD2_IDX 78 /* Reserved */
+#define HE_PHY_RESVD2_FSZ 10
+
+/* DCM */
+#define HE_PHY_CAP_DCM_NOT_SUPP 0x0
+#define HE_PHY_CAP_DCM_BPSK 0x1
+#define HE_PHY_CAP_DCM_QPSK 0x2
+#define HE_PHY_CAP_DCM_16_QAM 0x3
+#define HE_PHY_CAP_DCM_1SS 0x0
+#define HE_PHY_CAP_DCM_2SS 0x1
+
+/* HE Mac Capabilities values */
+
+/* b3-b4: Fragmentation Support field (table 9-262z) */
+#define HE_MAC_FRAG_NOSUPPORT 0 /* dynamic frag not supported */
+#define HE_MAC_FRAG_VHT_MPDU 1 /* Frag support for VHT single MPDU only */
+#define HE_MAC_FRAG_ONE_PER_AMPDU 2 /* 1 frag per MPDU in A-MPDU */
+#define HE_MAC_FRAG_MULTI_PER_AMPDU 3 /* 2+ frag per MPDU in A-MPDU */
+
+/* b8-b9: Minimum payload size of first fragment */
+/* no restriction on min. payload size */
+#define HE_MAC_MINFRAG_NO_RESTRICT 0
+/* minimum payload size of 128 Bytes */
+#define HE_MAC_MINFRAG_SIZE_128 1
+/* minimum payload size of 256 Bytes */
+#define HE_MAC_MINFRAG_SIZE_256 2
+/* minimum payload size of 512 Bytes */
+#define HE_MAC_MINFRAG_SIZE_512 3
+
+/* b15-b16: HE Link Adaptation */
+#define HE_MAC_SEND_NO_MFB 0 /* if STA does not provide HE MFB */
+#define HE_MAC_SEND_UNSOLICATED_MFB 2 /* if STA provides unsolicited HE MFB */
+#define HE_MAC_SEND_MFB_IN_RESPONSE 3 /* if STA can provide HE MFB in response to
+ * HE MRQ and if the STA provides unsolicited HE MFB.
+ */
+
+/* b27-b28: Max. AMPDU Length HE Exponent */
+/* Use Max AMPDU length exponent from VHT or HT */
+#define HE_MAC_MAX_AMPDU_EXP_ADOPT_VHT (0)
+/* Max. AMPDU length =
+ * 2^(20 + MAX_AMPDU_LEN_HE_EXPO_1) -1 (if this value in VHT CAP is 7) or
+ * 2^(16 + MAX_AMPDU_LEN_HE_EXPO_1) -1 (if this value in HT CAP is 7).
+ */
+#define HE_MAC_MAX_AMPDU_EXP_HE_1 (1)
+/* Max. AMPDU length =
+ * 2^(20 + MAX_AMPDU_LEN_HE_EXPO_2) -1 (if this value in VHT CAP is 7) or
+ * 2^(16 + MAX_AMPDU_LEN_HE_EXPO_2) -1 (if this value in HT CAP is 7).
+ */
+#define HE_MAC_MAX_AMPDU_EXP_HE_2 (2)
+
+/* HE PHY Capabilities values */
+/* b1-b7: Channel Width Support field */
+#define HE_PHY_CH_WIDTH_2G_40 0x01
+#define HE_PHY_CH_WIDTH_5G_80 0x02
+#define HE_PHY_CH_WIDTH_5G_160 0x04
+#define HE_PHY_CH_WIDTH_5G_80P80 0x08
+#define HE_PHY_CH_WIDTH_2G_40_RU 0x10
+#define HE_PHY_CH_WIDTH_5G_242_RU 0x20
+
+/* b8-b11: Preamble puncturing Rx */
+#define HE_PHY_PREAMBLE_PUNC_RX_0 0x1
+#define HE_PHY_PREAMBLE_PUNC_RX_1 0x2
+#define HE_PHY_PREAMBLE_PUNC_RX_2 0x4
+#define HE_PHY_PREAMBLE_PUNC_RX_3 0x8
+
+/* b24-b29: DCM Encoding at Tx and Rx */
+#define HE_PHY_TX_DCM_ENC_NOSUPPORT 0x00
+#define HE_PHY_TX_DCM_ENC_BPSK 0x01
+#define HE_PHY_TX_DCM_ENC_QPSK 0x02
+#define HE_PHY_TX_DCM_ENC_QAM 0x03
+
+#define HE_PHY_TX_DCM_1_SS 0x00
+#define HE_PHY_TX_DCM_2_SS 0x04
+
+#define HE_PHY_RX_DCM_ENC_NOSUPPORT 0x00
+#define HE_PHY_RX_DCM_ENC_BPSK 0x08
+#define HE_PHY_RX_DCM_ENC_QPSK 0x10
+#define HE_PHY_RX_DCM_ENC_QAM 0x18
+
+#define HE_PHY_RX_DCM_1_SS 0x00
+#define HE_PHY_RX_DCM_2_SS 0x20
+
+/* HE Duration based RTS Threshold IEEE Draft P802.11ax D1.0 Figure 9-589cr */
+#define HE_RTS_THRES_DISABLED 1023
+#define HE_RTS_THRES_ALL_FRAMES 0
+#define HE_RTS_THRES_MASK 0x03ff
+
+/* Tx Rx HE MCS Support field format : IEEE Draft P802.11ax D0.5 Table 9-589cm */
+#define HE_TX_RX_MCS_NSS_SUP_FIELD_MIN_SIZE 2 /* 2 bytes (16 bits) */
+
+/* Fixed portion of the support field */
+#define HE_MCS_NSS_MAX_NSS_M1_IDX 0
+#define HE_MCS_NSS_MAX_NSS_M1_SZ 3
+#define HE_MCS_NSS_MAX_MCS_IDX 3
+#define HE_MCS_NSS_MAX_MCS_SZ 3
+#define HE_MCS_NSS_TX_BW_BMP_IDX 6
+#define HE_MCS_NSS_TX_BW_BMP_SZ 5
+#define HE_MCS_NSS_RX_BW_BMP_IDX 11
+#define HE_MCS_NSS_RX_BW_BMP_SZ 5
+
+#define HE_CAP_MASK(idx, sz) (((1 << sz) - 1) << idx)
+
+/* Descriptor format IEEE Draft P802.11ax_D1.1 Figure 9-589cn */
+#define HE_MCS_DESC_IDX 0
+#define HE_MCS_DESC_SZ 4
+#define HE_NSS_DESC_IDX 4
+#define HE_NSS_DESC_SZ 3
+#define HE_LAST_DESC_IDX 7
+#define HE_LAST_DESC_SZ 1
+
+#define HE_GET_DESC_MCS(desc) ((*((const uint8 *)desc) &\
+ HE_CAP_MASK(HE_MCS_DESC_IDX, HE_MCS_DESC_SZ))\
+ >> HE_MCS_DESC_IDX)
+#define HE_GET_DESC_NSS(desc) ((*((const uint8 *)desc) &\
+ HE_CAP_MASK(HE_NSS_DESC_IDX, HE_NSS_DESC_SZ))\
+ >> HE_NSS_DESC_IDX)
+
+/**
+* Bandwidth configuration indices used in the HE TX-RX MCS support field
+* IEEE Draft P802.11ax_D1.1 Section 9.4.2.218.4
+*/
+#define HE_BW20_CFG_IDX 0
+#define HE_BW40_CFG_IDX 1
+#define HE_BW80_CFG_IDX 2
+#define HE_BW80P80_CFG_IDX 3
+#define HE_BW160_CFG_IDX 4
+#define HE_MAX_BW_CFG 5
+
+#define HE_MCS_CODE_0_7 0u
+#define HE_MCS_CODE_0_9 1u
+#define HE_MCS_CODE_0_11 2u
+#define HE_MCS_CODE_NONE 3u
+#define HE_MCS_CODE_SIZE 2u /* num bits */
+#define HE_MCS_CODE_MASK 0x3u /* mask for 1-stream */
+
+/* Defines for The Max HE MCS For n SS subfield (where n = 1, ..., 8) */
+#define HE_MCS_MAP_NSS_MAX 8u /* Max number of streams possible */
+#define HE_MCS_NSS_SET_MASK 0xffffu /* Field is to be 16 bits long */
+#define HE_MCS_NSS_GET_SS_IDX(nss) (((nss)-1u) * HE_MCS_CODE_SIZE)
+#define HE_MCS_NSS_GET_MCS(nss, mcs_nss_map) \
+ (((mcs_nss_map) >> HE_MCS_NSS_GET_SS_IDX(nss)) & HE_MCS_CODE_MASK)
+#define HE_MCS_NSS_SET_MCS(nss, mcs_code, mcs_nss_map) \
+ do { \
+ (mcs_nss_map) &= (~(HE_MCS_CODE_MASK << HE_MCS_NSS_GET_SS_IDX(nss))); \
+ (mcs_nss_map) |= (((mcs_code) & HE_MCS_CODE_MASK) \
+ << HE_MCS_NSS_GET_SS_IDX(nss)); \
+ (mcs_nss_map) &= (HE_MCS_NSS_SET_MASK); \
+ } while (0)
+
+#define HE_BW80_ORDR_IDX 0u
+#define HE_BW160_ORDR_IDX 1u
+#define HE_BW80P80_ORDR_IDX 2u
+
+#define HE_MCS_NSS_SUP_FLD_UNIT_MAP_LEN 2u /* 2 bytes */
+#define HE_MCS_NSS_SUP_FLD_UNIT_MAP_SZ (HE_MCS_NSS_SUP_FLD_UNIT_MAP_LEN * 8u) /* 16 bits */
+
+/* Two unit-maps (TX+RX) */
+#define HE_MCS_NSS_SUP_FLD_TXRX_MAP_LEN (HE_MCS_NSS_SUP_FLD_UNIT_MAP_LEN * 2u)
+#define HE_MCS_NSS_SUP_FLD_TXRX_MAP_SZ (HE_MCS_NSS_SUP_FLD_TXRX_MAP_LEN * 8u) /* 32 bits */
+
+/* One TX-RX unit-map (80 MHz) */
+#define HE_MCS_NSS_SUP_FLD_MIN_LEN (HE_MCS_NSS_SUP_FLD_TXRX_MAP_LEN)
+/* Three TX-RX unit-maps (80 MHz, 160MHz, 80+80MHz) */
+#define HE_MCS_NSS_SUP_FLD_MAX_LEN (HE_MCS_NSS_SUP_FLD_TXRX_MAP_LEN * 3u)
+
+/* HE Capabilities element */
+BWL_PRE_PACKED_STRUCT struct he_cap_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ he_mac_cap_t mac_cap; /* MAC Capabilities Information */
+ he_phy_cap_t phy_cap; /* PHY Capabilities Information */
+ /* he_tx_rx_mcs_nss_sup_t txx_rx_mcs_nss_sup; */ /* Tx Rx HE MCS NSS Support (variable) */
+ /* he_ppe_ths_t ppe_ths; */ /* PPE Thresholds (optional) */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_cap_ie he_cap_ie_t;
+
+/* Multiple BSSID element */
+BWL_PRE_PACKED_STRUCT struct nontrans_BSSID_cap {
+ uint8 id; /* 83 */
+ uint8 len;
+ uint16 capability;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct nontrans_BSSID_cap nontrans_BSSID_cap_t;
+
+BWL_PRE_PACKED_STRUCT struct multi_BSSID_index {
+ uint8 id; /* 85 */
+ uint8 len; /* 3 in beacon, 1 in probe response */
+ uint8 bssid_index; /* between 1 and 2^n - 1 */
+ uint8 dtim_period; /* only valid in beacon */
+ uint8 dtim_count; /* only valid in beacon */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct multi_BSSID_index multi_BSSID_index_t;
+
+BWL_PRE_PACKED_STRUCT struct fms_descriptor {
+ uint8 id; /* 86 */
+ uint8 len;
+ uint8 num_FMS_counters;
+ uint8 *FMS_counters;
+ uint8 *FMSID;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct fms_descriptor fms_descriptor_t;
+
+BWL_PRE_PACKED_STRUCT struct nontrans_BSSID_profile_subie {
+ uint8 subie_id; /* 0 */
+ uint8 subie_len;
+ uint8 moreie[1];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct nontrans_BSSID_profile_subie nontrans_BSSID_profile_subie_t;
+
+BWL_PRE_PACKED_STRUCT struct multi_BSSID_ie {
+ uint8 id;
+ uint8 len;
+ uint8 maxBSSID_indicator;
+ nontrans_BSSID_profile_subie_t profile[1];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct multi_BSSID_ie multi_BSSID_ie_t;
+#define DOT11_MULTIPLE_BSSID_PROFILE_SUBID 0
+
+/* IEEE Draft P802.11ax D0.5 Table 9-262ab, Highest MCS Supported subfield encoding */
+#define HE_CAP_MCS_CODE_0_7 0
+#define HE_CAP_MCS_CODE_0_8 1
+#define HE_CAP_MCS_CODE_0_9 2
+#define HE_CAP_MCS_CODE_0_10 3
+#define HE_CAP_MCS_CODE_0_11 4
+#define HE_CAP_MCS_CODE_SIZE 3 /* num bits for 1-stream */
+#define HE_CAP_MCS_CODE_MASK 0x7 /* mask for 1-stream */
+
+/**
+ * IEEE Draft P802.11ax D0.5 Figure 9-589cm
+ * - Defines for TX & RX BW BITMAP
+ *
+ * (Size of TX BW bitmap = RX BW bitmap = 5 bits)
+ */
+#define HE_MCS_NSS_TX_BW_MASK 0x07c0
+#define HE_MCS_NSS_TX_BW_SHIFT 6
+
+#define HE_MCS_NSS_RX_BW_MASK 0xf800
+#define HE_MCS_NSS_RX_BW_SHIFT 11
+
+#define HE_CAP_MCS_MAP_NSS_MAX 8 /* Max number of streams possible */
+
+#define HE_MAX_RU_COUNT 4 /* Max number of RU allocation possible */
+
+#define HE_NSSM1_IDX 0 /* Offset of NSSM1 field */
+#define HE_NSSM1_LEN 3 /* length of NSSM1 field in bits */
+
+#define HE_RU_INDEX_MASK_IDX 3 /* Offset of RU index mask field */
+#define HE_RU_INDEX_MASK_LEN 4 /* length of RU Index mask field in bits */
+
+/* MU EDCA parameter set element */
+BWL_PRE_PACKED_STRUCT struct he_mu_ac_param_record {
+ uint8 aci_aifsn;
+ uint8 ecw_min_max;
+ uint8 muedca_timer;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_mu_ac_param_record he_mu_ac_param_record_t;
+
+BWL_PRE_PACKED_STRUCT struct he_muedca_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ uint8 mu_qos_info;
+ he_mu_ac_param_record_t param_ac[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_muedca_ie he_muedca_ie_t;
+
+#define HE_MU_EDCA_PARAM_UPD_CNT_IDX 0u /* EDCA Parameter Set Update Count */
+#define HE_MU_EDCA_PARAM_UPD_CNT_LEN 4u
+
+#define HE_MU_SIGA_SIGB_MCS_DPCU 0
+#define HE_MU_SIGA_SIGB_SYMS_DPCU 3u
+#define HE_MU_SIGA_GI_LTF_DPCU 3u
+
+/* For HE SU/RE SIG A : PLCP0 bit fields [32bit] */
+#define HE_SU_RE_SIGA_FORMAT_MASK 0x00000001
+#define HE_SU_RE_SIGA_RE_VAL 0x00000000
+#define HE_SU_RE_SIGA_SU_VAL 0x00000001
+#define HE_SU_RE_SIGA_FORMAT_SHIFT 0
+#define HE_SU_RE_SIGA_UL_DL_SHIFT 2
+#define HE_SU_RE_SIGA_MCS_MASK 0x00000078
+#define HE_SU_RE_SIGA_MCS_SHIFT 3
+#define HE_SU_RE_SIGA_DCM_MASK 0x00000080
+#define HE_SU_RE_SIGA_DCM_SHIFT 7
+#define HE_SU_RE_SIGA_BSS_COLOR_SHIFT 8 /* Bits 13:8 */
+#define HE_SU_RE_SIGA_BSS_COLOR_MASK 0x00003F00
+#define HE_SU_RE_SIGA_RSVD_PLCP0_VAL 0x00004000
+#define HE_SU_SIGA_BW_MASK 0x00180000
+#define HE_SU_SIGA_BW_SHIFT 19
+#define HE_RE_SIGA_TONE_MASK 0x00180000
+#define HE_RE_SIGA_TONE_SHIFT 19
+#define HE_SU_RE_SIGA_20MHZ_VAL 0x00000000
+#define HE_SU_RE_SIGA_40MHZ_VAL 0x00080000
+#define HE_SU_RE_SIGA_80MHZ_VAL 0x00100000
+#define HE_SU_RE_SIGA_160MHZ_VAL 0x00180000
+#define HE_SU_RE_SIGA_GI_LTF_MASK 0x00600000
+#define HE_SU_RE_SIGA_1xLTF_GI8us_VAL 0x00000000
+#define HE_SU_RE_SIGA_2xLTF_GI8us_VAL 0x00200000
+#define HE_SU_RE_SIGA_2xLTF_GI16us_VAL 0x00400000
+#define HE_SU_RE_SIGA_4xLTF_GI32us_VAL 0x00600000
+#define HE_SU_RE_SIGA_GI_LTF_SHIFT 21
+#define HE_SU_RE_SIGA_NSTS_MASK 0x03800000
+#define HE_SU_RE_SIGA_NSTS_SHIFT 23
+#define HE_SU_RE_SIGA_TXOP_PLCP0_MASK 0xFC000000
+#define HE_SU_RE_SIGA_TXOP_PLCP0_SHIFT 26
+
+/* For HE MU SIG A : PLCP0 bit fields [32bit] */
+#define HE_MU_SIGA_UL_DL_SHIFT 0
+#define HE_MU_SIGA_UL_TB_PPDU 0
+#define HE_MU_SIGA_SIGB_MCS_SHIFT 1
+#define HE_MU_SIGA_SIGB_DCM_SHIFT 4
+#define HE_MU_SIGA_SIGB_DCM_DISABLED 0
+#define HE_MU_SIGA_BW_SHIFT 15
+#define HE_MU_SIGA_BW_80_UNPUNCTURED 2
+#define HE_MU_SIGA_BW_SEC_20_PUNCTURED 4
+#define HE_MU_SIGA_BW_SEC_40_PUNCTURED 5
+#define HE_MU_SIGA_SIGB_SYMS_SHIFT 18
+#define HE_MU_SIGA_GI_LTF_SHIFT 23
+
+/* PLCP1 starts with B6 of HE SIG A 2 */
+
+/* For HE SU/RE SIG A : PLCP1 bit fields [16bit] */
+#define HE_SU_RE_SIGA_TXOP_PLCP1_MASK 0x0001
+#define HE_SU_RE_SIGA_TXOP_PLCP1_SHIFT 0
+#define HE_SU_RE_SIGA_CODING_MASK 0x0002
+#define HE_SU_RE_SIGA_CODING_SHIFT 1
+#define HE_SU_RE_SIGA_STBC_MASK 0x0008
+#define HE_SU_RE_SIGA_STBC_SHIFT 3
+#define HE_SU_RE_SIGA_BEAMFORM_MASK 0x0010
+#define HE_SU_RE_SIGA_BEAMFORM_SHIFT 4
+#define HE_SU_RE_SIGA_RSVD_PLCP1_VAL 0x0100
+
+/* For HE MU SIG A : PLCP1 bit fields [16bit] */
+#define HE_MU_SIGA_RSVD_SHIFT 1
+#define HE_MU_SIGA_LTF_SYMS_SHIFT 2
+
+/* PPE Threshold field (figure 9-589co) */
+#define HE_PPE_THRESH_NSS_RU_FSZ 3
+
+/* PPE Threshold Info field (figure 9-589cp) */
+/* ruc: RU Count; NSSnM1: NSSn - 1; RUmM1: RUm - 1 */
+/* bit offset in PPE Threshold field */
+#define HE_PPET16_BIT_OFFSET(ruc, NSSnM1, RUmM1) \
+ (HE_NSSM1_LEN + HE_RU_INDEX_MASK_LEN + ((NSSnM1) * (ruc) + (RUmM1)) * 6)
+
+#define HE_PPET8_BIT_OFFSET(ruc, NSSnM1, RUmM1) \
+ (HE_NSSM1_LEN + HE_RU_INDEX_MASK_LEN + ((NSSnM1) * (ruc) + (RUmM1)) * 6 + 3)
+
+/* Total PPE Threshold field byte length (Figure 9-589cq) */
+#define HE_PPE_THRESH_LEN(nss, ruc) \
+ (CEIL((HE_NSSM1_LEN + HE_RU_INDEX_MASK_LEN + ((nss) * (ruc) * 6)), 8))
+
+/* RU Allocation Index encoding (table 9-262ae) */
+#define HE_RU_ALLOC_IDX_242 0 /* RU alloc: 282 tones */
+#define HE_RU_ALLOC_IDX_484 1 /* RU alloc: 484 tones - 40Mhz */
+#define HE_RU_ALLOC_IDX_996 2 /* RU alloc: 996 tones - 80Mhz */
+#define HE_RU_ALLOC_IDX_2x996 3 /* RU alloc: 2x996 tones - 80p80/160Mhz */
+
+/* Constellation Index encoding (table 9-262ac) */
+#define HE_CONST_IDX_BPSK 0
+#define HE_CONST_IDX_QPSK 1
+#define HE_CONST_IDX_16QAM 2
+#define HE_CONST_IDX_64QAM 3
+#define HE_CONST_IDX_256QAM 4
+#define HE_CONST_IDX_1024QAM 5
+#define HE_CONST_IDX_RSVD 6
+#define HE_CONST_IDX_NONE 7
+
+/* Min HE cap ie length when only 80Mhz is supported */
+#define HE_CAP_IE_MIN_LEN (sizeof(he_cap_ie_t) - TLV_HDR_LEN + HE_MCS_NSS_SUP_FLD_MIN_LEN)
+
+/* Max HE cap ie length considering MAX NSS and RU */
+#define HE_CAP_IE_MAX_LEN (sizeof(he_cap_ie_t) - TLV_HDR_LEN + HE_MCS_NSS_SUP_FLD_MAX_LEN + \
+ HE_PPE_THRESH_LEN(HE_CAP_MCS_MAP_NSS_MAX, HE_MAX_RU_COUNT))
+/**
+ * HE Operation IE (sec 9.4.2.219)
+ */
+/* HE Operation Parameters field (figure 9-589cr) */
+#define HE_OP_PARAMS_SIZE 3
+typedef uint8 he_op_parms_t[HE_OP_PARAMS_SIZE];
+
+#define HE_OP_BSS_COLOR_INFO 1
+typedef uint8 he_op_bsscolorinfo_t[HE_OP_BSS_COLOR_INFO];
+
+#define HE_BASIC_MCS_NSS_SIZE 2
+typedef uint8 he_basic_mcs_nss_set_t[HE_BASIC_MCS_NSS_SIZE];
+
+/* VHT_OP_INFO_LEN = 3 defined in 802.11.h file */
+typedef uint8 he_vht_opinfo_t[VHT_OP_INFO_LEN];
+
+#define HE_OP_MAX_BSSID_IND_LEN 1
+typedef uint8 he_max_bssid_ind_t[HE_OP_MAX_BSSID_IND_LEN];
+
+/* 6G Operation Information Element field (Figure 9-788k) */
+#define HE_6G_OP_INFO 5
+typedef uint8 he_6g_opinfo_t[HE_6G_OP_INFO];
+
+/* HE Operation Parameters for D3.0 */
+#define HE_OP_DEFAULT_PE_DUR_IDX 0 /* Default PE Duration */
+#define HE_OP_DEFAULT_PE_DUR_FSZ 3
+#define HE_OP_TWT_REQUIRED_IDX 3 /* TWT Required */
+#define HE_OP_TWT_REQUIRED_FSZ 1
+#define HE_OP_TXOP_DUR_RTS_THOLD_IDX 4 /* TXOP Duration RTS Threshold */
+#define HE_OP_TXOP_DUR_RTS_THOLD_FSZ 10
+#define HE_OP_VHT_OP_INFO_PRESENT_IDX 14 /* VHT Operation Information Present */
+#define HE_OP_VHT_OP_INFO_PRESENT_FSZ 1
+#define HE_OP_CO_LOCATED_BSS_IDX 15 /* Co-Located BSS */
+#define HE_OP_CO_LOCATED_BSS_FSZ 1
+#define HE_OP_ER_SU_DISABLE_IDX 16 /* ER SU Disable */
+#define HE_OP_ER_SU_DISABLE_FSZ 1
+#define HE_OP_6G_OP_INFO_PRESENT_IDX 17 /* 6G Operation Information Present */
+#define HE_OP_6G_OP_INFO_PRESENT_FSZ 1
+#define HE_OP_RESERVED_IDX 18 /* Reserved */
+#define HE_OP_RESERVED_FSZ 6
+
+/* BSS Color for D3.0 */
+#define HE_OP_BSS_COLOR_IDX 0 /* BSS Color */
+#define HE_OP_BSS_COLOR_FSZ 6
+#define HE_OP_PARTIAL_BSS_COLOR_IDX 6 /* Partial BSS color */
+#define HE_OP_PARTIAL_BSS_COLOR_FSZ 1
+#define HE_OP_BSS_COLOR_DIS_IDX 7 /* BSS Color Disabled */
+#define HE_OP_BSS_COLOR_DIS_FSZ 1
+
+/* 6 Ghz Operation Information Element for D8.0 */
+#define HE_6G_OP_INFO_PRI_CHANNEL_IDX 0 /* Primary channel */
+#define HE_6G_OP_INFO_PRI_CHANNEL_FSZ 8
+#define HE_6G_OP_INFO_CONTROL_IDX 8 /* Control Field */
+#define HE_6G_OP_INFO_CONTROL_FSZ 8
+#define HE_6G_OP_INFO_FREQ_SEG0_IDX 16 /* Center Frequency segment0 */
+#define HE_6G_OP_INFO_FREQ_SEG0_FSZ 8
+#define HE_6G_OP_INFO_FREQ_SEG1_IDX 24 /* Center Frequency segment1 */
+#define HE_6G_OP_INFO_FREQ_SEG1_FSZ 8
+#define HE_6G_OP_INFO_MIN_RATE_IDX 32 /* Min Rate */
+#define HE_6G_OP_INFO_MIN_RATE_FSZ 8
+#define HE_6G_OP_INFO_CONTROL_IDX_CW_FSZ 2
+
+/* Control Field Format (Figure 9-788I) */
+#define HE_6G_CONTROL_CHANNEL_WIDTH_IDX 0 /* Channel Width */
+#define HE_6G_CONTROL_CHANNEL_WIDTH_FSZ 2
+#define HE_6G_CONTROL_DUP_BCN_IDX 2 /* Duplicate beacon */
+#define HE_6G_CONTROL_DUP_BCN_FSZ 1
+#define HE_6G_CONTROL_REG_INFO_IDX 3 /* Regulatory info */
+#define HE_6G_CONTROL_REG_INFO_FSZ 3
+
+/* HE Operation element */
+BWL_PRE_PACKED_STRUCT struct he_op_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ he_op_parms_t parms;
+ he_op_bsscolorinfo_t bsscolorinfo;
+ he_basic_mcs_nss_set_t mcs_nss_op; /* Basic HE MCS & NSS Set */
+ /* he_vht_opinfo_t vht_opinfo; */ /* VHT Operation Information element */
+ /* he_max_bssid_ind_t max_bssid_ind; */ /* Max Co-Hosted BSSID Indicator element */
+ /* he_6g_opinfo_t he_6g_opinfo; */ /* 6 GHz Operation Information element */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_op_ie he_op_ie_t;
+
+/* The Max HE MCS For n SS subfield (where n = 1, ..., 8) is encoded as follows:
+ * P802.11ax D1.1 P94L53 - P94L61:
+ */
+#define HE_OP_MCS_CODE_0_7 0
+#define HE_OP_MCS_CODE_0_8 1
+#define HE_OP_MCS_CODE_0_9 2
+#define HE_OP_MCS_CODE_0_10 3
+#define HE_OP_MCS_CODE_0_11 4
+#define HE_OP_MCS_CODE_NONE 7
+#define HE_OP_MCS_CODE_SIZE 3 /* num bits */
+#define HE_OP_MCS_CODE_MASK 0x7 /* mask for 1-stream */
+
+/* Defines for The Max HE MCS For n SS subfield (where n = 1, ..., 8) */
+#define HE_OP_MCS_NSS_SET_MASK 0x00ffffff /* Field is to be 24 bits long */
+#define HE_OP_MCS_NSS_GET_SS_IDX(nss) (((nss)-1) * HE_OP_MCS_CODE_SIZE)
+#define HE_OP_MCS_NSS_GET_MCS(nss, mcs_nss_map) \
+ (((mcs_nss_map) >> HE_OP_MCS_NSS_GET_SS_IDX(nss)) & HE_OP_MCS_CODE_MASK)
+#define HE_OP_MCS_NSS_SET_MCS(nss, mcs_code, mcs_nss_map) \
+ do { \
+ (mcs_nss_map) &= (~(HE_OP_MCS_CODE_MASK << HE_OP_MCS_NSS_GET_SS_IDX(nss))); \
+ (mcs_nss_map) |= (((mcs_code) & HE_OP_MCS_CODE_MASK) \
+ << HE_OP_MCS_NSS_GET_SS_IDX(nss)); \
+ (mcs_nss_map) &= (HE_OP_MCS_NSS_SET_MASK); \
+ } while (0)
+
+#define HE_OP_IE_MIN_LEN (sizeof(he_op_ie_t) - TLV_HDR_LEN)
+#define HE_OP_IE_MAX_LEN (sizeof(he_op_ie_t) - TLV_HDR_LEN + VHT_OP_INFO_LEN +\
+ HE_OP_MAX_BSSID_IND_LEN + HE_6G_OP_INFO)
+
+/* bit position and field width */
+#define HE_BSSCOLOR_CHANGE_NEWCOLOR_IDX 0 /* New BSSColor info */
+#define HE_BSSCOLOR_CHANGE_NEWCOLOR_FSZ 6
+
+/* HE Bsscolor change element */
+BWL_PRE_PACKED_STRUCT struct he_bsscolor_change_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ uint8 color_switch_cntdwn;
+ uint8 new_bsscolor_info;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_bsscolor_change_ie he_bsscolor_change_ie_t;
+
+/*
+ * HE 6 GHz Band Capabilities element (sec 9.4.2.263)
+ * Capabilities Information field format (figure 9-788aj)
+ */
+
+#define HE_6GBAND_CAP_IE_SIZE 2
+typedef uint8 he_6gband_cap_t[HE_6GBAND_CAP_IE_SIZE];
+
+/* HE 6 GHz Band Capabilities */
+#define HE_6GBAND_MPDU_STRT_SPACE_IDX 0 /* Minimum MPDU Start Spacing */
+#define HE_6GBAND_MPDU_STRT_SPACE_FSZ 3
+#define HE_6GBAND_MAX_AMPDU_LENEXP_IDX 3 /* Maximum A-MPDU Length Exponent */
+#define HE_6GBAND_MAX_AMPDU_LENEXP_FSZ 3
+#define HE_6GBAND_MAX_MPDU_LEN_IDX 6 /* Maximum MPDU Length */
+#define HE_6GBAND_MAX_MPDU_LEN_FSZ 2
+/* B8 is reserved */
+#define HE_6GBAND_SM_PWRSAVE_IDX 9 /* SM Power Save */
+#define HE_6GBAND_SM_PWRSAVE_FSZ 2
+#define HE_6GBAND_RD_RESP_IDX 11 /* RD Responder */
+#define HE_6GBAND_RD_RESP_FSZ 1
+#define HE_6GBAND_RXANT_PAT_IDX 12 /* Rx Antenna Pattern Consistency */
+#define HE_6GBAND_RXANT_PAT_FSZ 1
+#define HE_6GBAND_TXANT_PAT_IDX 13 /* Tx Antenna Pattern Consistency */
+#define HE_6GBAND_TXANT_PAT_FSZ 1
+/* B14-15 are reserved */
+
+BWL_PRE_PACKED_STRUCT struct he_6gband_cap_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ he_6gband_cap_t he_6gband_cap;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_6gband_cap_ie he_6gband_cap_ie_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+/* HE Action Frame */
+#define HE_AF_CAT_OFF 0
+#define HE_AF_ACT_OFF 1
+
+/* TWT Setup */
+#define HE_AF_TWT_SETUP_TOKEN_OFF 2
+#define HE_AF_TWT_SETUP_TWT_IE_OFF 3
+
+/* TWT Teardown */
+#define HE_AF_TWT_TEARDOWN_FLOW_OFF 2
+
+/* TWT Information */
+#define HE_AF_TWT_INFO_OFF 2
+
+/* HE Action ID */
+#define HE_ACTION_TWT_SETUP 1
+#define HE_ACTION_TWT_TEARDOWN 2
+#define HE_ACTION_TWT_INFO 3
+
+/* HE Basic trigger frame common info fields */
+#define HE_TRIG_CMNINFO_SZ 8
+typedef uint8 he_trig_cmninfo_set_t[HE_TRIG_CMNINFO_SZ];
+
+/* bit position and field width */
+#define HE_TRIG_CMNINFO_FRMTYPE_INDX 0 /* Trigger frame type */
+#define HE_TRIG_CMNINFO_FRMTYPE_FSZ 4
+#define HE_TRIG_CMNINFO_LSIGLEN_INDX 4 /* L-sig length */
+#define HE_TRIG_CMNINFO_LSIGLEN_FSZ 12
+#define HE_TRIG_CMNINFO_CASCADEIND_INDX 16 /* Cascade indication */
+#define HE_TRIG_CMNINFO_CASCADEIND_FSZ 1
+#define HE_TRIG_CMNINFO_CSREQ_INDX 17 /* Carrier sense indication */
+#define HE_TRIG_CMNINFO_CSREQ_FSZ 1
+#define HE_TRIG_CMNINFO_BWINFO_INDX 18 /* Bw info */
+#define HE_TRIG_CMNINFO_BWINFO_FSZ 2
+#define HE_TRIG_CMNINFO_GI_LTF_INDX 20 /* Cp-LTF size */
+#define HE_TRIG_CMNINFO_GI_LTF_FSZ 2
+#define HE_TRIG_CMNINFO_MUMIMO_LTF_INDX 22 /* HE-LTF mask enable */
+#define HE_TRIG_CMNINFO_MUMIMO_LTF_FSZ 1
+#define HE_TRIG_CMNINFO_HELTF_SYM_INDX 23 /* He-LTF sumbols */
+#define HE_TRIG_CMNINFO_HELTF_SYM_FSZ 3
+#define HE_TRIG_CMNINFO_STBC_INDX 26 /* STBC support */
+#define HE_TRIG_CMNINFO_STBC_FSZ 1
+#define HE_TRIG_CMNINFO_LDPC_EXTSYM_INDX 27 /* LDPC extra symbol */
+#define HE_TRIG_CMNINFO_LDPC_EXTSYM_FSZ 1
+#define HE_TRIG_CMNINFO_AP_TXPWR_INDX 28 /* AP TX power */
+#define HE_TRIG_CMNINFO_AP_TXPWR_FSZ 6
+#define HE_TRIG_CMNINFO_AFACT_INDX 34 /* a-factor */
+#define HE_TRIG_CMNINFO_AFACT_FSZ 2
+#define HE_TRIG_CMNINFO_PEDISAMBIG_INDX 36 /* PE disambiguity */
+#define HE_TRIG_CMNINFO_PEDISAMBIG_FSZ 1
+#define HE_TRIG_CMNINFO_SPTIAL_REUSE_INDX 37 /* spatial re-use */
+#define HE_TRIG_CMNINFO_SPTIAL_REUSE_FSZ 16
+#define HE_TRIG_CMNINFO_DOPPLER_INDX 53 /* doppler supoort */
+#define HE_TRIG_CMNINFO_DOPPLER_FSZ 1
+#define HE_TRIG_CMNINFO_HESIGA_RSVD_INDX 54 /* rsvd bits from HE-SIGA */
+#define HE_TRIG_CMNINFO_HESIGA_RSVD_FSZ 9
+#define HE_TRIG_CMNINFO_RSVD_INDX 63 /* reseved bit from HE-SIGA */
+#define HE_TRIG_CMNINFO_RSVD_FSZ 1
+
+/* HE Basic trigger frame user info fields */
+#define HE_TRIG_USRINFO_SZ 5
+typedef uint8 he_trig_usrinfo_set_t[HE_TRIG_USRINFO_SZ];
+
+/* bit position and field width */
+#define HE_TRIG_USRINFO_AID_INDX 0 /* AID */
+#define HE_TRIG_USRINFO_AID_FSZ 12
+#define HE_TRIG_USRINFO_RU_ALLOC_INDX 12 /* RU allocation index */
+#define HE_TRIG_USRINFO_RU_ALLOC_FSZ 8
+#define HE_TRIG_USRINFO_CODING_INDX 20 /* coding type (BCC/LDPC) */
+#define HE_TRIG_USRINFO_CODING_FSZ 1
+#define HE_TRIG_USRINFO_MCS_INDX 21 /* MCS index value */
+#define HE_TRIG_USRINFO_MCS_FSZ 4
+#define HE_TRIG_USRINFO_DCM_INDX 25 /* Dual carrier modulation */
+#define HE_TRIG_USRINFO_DCM_FSZ 1
+#define HE_TRIG_USRINFO_SSALLOC_STRMOFFSET_INDX 26 /* stream offset */
+#define HE_TRIG_USRINFO_SSALLOC_STRMOFFSET_FSZ 3
+#define HE_TRIG_USRINFO_SSALLOC_NSS_INDX 29 /* number of spatial streams */
+#define HE_TRIG_USRINFO_SSALLOC_NSS_FSZ 3
+#define HE_TRIG_USRINFO_TARGET_RSSI_INDX 32 /* Target RSSI */
+#define HE_TRIG_USRINFO_TARGET_RSSI_FSZ 7
+#define HE_TRIG_USRINFO_RSVD_INDX 39 /* Reserved bit */
+#define HE_TRIG_USRINFO_RSVD_FSZ 1
+
+/* Different types of trigger frame */
+#define HE_TRIG_TYPE_BASIC_FRM 0 /* basic trigger frame */
+#define HE_TRIG_TYPE_BEAM_RPT_POLL_FRM 1 /* beamforming report poll frame */
+#define HE_TRIG_TYPE_MU_BAR_FRM 2 /* MU-BAR frame */
+#define HE_TRIG_TYPE_MU_RTS_FRM 3 /* MU-RTS frame */
+#define HE_TRIG_TYPE_BSR_FRM 4 /* Buffer status report poll */
+
+/* HE Timing related parameters (802.11ax D1.2 Table 28-9) */
+#define HE_T_LEG_STF 8
+#define HE_T_LEG_LTF 8
+#define HE_T_LEG_LSIG 4
+#define HE_T_RL_SIG 4
+#define HE_T_SIGA 8
+#define HE_T_STF 4 /* STF for SU / MU HE PPDUs */
+#define HE_T_TB_PPDU_STF 8 /* STF for HE trigger based PPDUs */
+#define HE_T_LEG_PREAMBLE (HE_T_LEG_STF + HE_T_LEG_LTF + HE_T_LEG_LSIG)
+#define HE_T_LEG_SYMB 4
+#define HE_RU_26_TONE 26
+#define HE_RU_52_TONE 52
+#define HE_RU_106_TONE 106
+#define HE_RU_242_TONE 242
+#define HE_RU_484_TONE 484
+#define HE_RU_996_TONE 996
+#define HE_MAX_26_TONE_RU_INDX 36
+#define HE_MAX_52_TONE_RU_INDX 52
+#define HE_MAX_106_TONE_RU_INDX 60
+#define HE_MAX_242_TONE_RU_INDX 64
+#define HE_MAX_484_TONE_RU_INDX 66
+
+/**
+ * Ref : (802.11ax D3.0 Figure 9-27 Page 85)
+ */
+#define HE_BAR_CONTROL_SZ 2
+typedef uint8 he_bar_control_set_t[HE_BAR_CONTROL_SZ];
+
+/* bit position and field width */
+#define HE_BAR_CONTROL_ACK_POLICY_INDX 0 /* BAR ack policy */
+#define HE_BAR_CONTROL_ACK_POLICY_FSZ 1
+#define HE_BAR_CONTROL_ACK_TYPE_INDX 1 /* BAR ack type */
+#define HE_BAR_CONTROL_ACK_TYPE_FSZ 4
+#define HE_BAR_CONTROL_RSVD_INDX 5 /* Reserved */
+#define HE_BAR_CONTROL_RSVD_FSZ 7
+#define HE_BAR_CONTROL_TID_INFO_INDX 12 /* BAR TID INFO */
+#define HE_BAR_CONTROL_TID_INFO_FSZ 4
+
+#define BAR_TYPE_BASIC 0
+#define BAR_TYPE_EXT_COMPRESSED 1
+#define BAR_TYPE_COMPRESSED 2
+#define BAR_TYPE_MULTI_TID 3
+
+/**
+ * Ref : 802.11-2016.pdf Page 674
+ * Figure 9-28 Block Ack Starting Sequence Control subfield
+ */
+#define HE_BAR_INFO_SZ 2
+typedef uint8 he_cba_bar_info_set_t[HE_BAR_INFO_SZ];
+
+/* bit position and field width */
+#define HE_CBA_BAR_INFO_FRAGNUM_INDX 0 /* Fragment Number */
+#define HE_CBA_BAR_INFO_FRAGNUM_FSZ 4
+#define HE_CBA_BAR_INFO_SEQNUM_INDX 4 /* Starting Sequence Number */
+#define HE_CBA_BAR_INFO_SEQNUM_FSZ 12
+
+/**
+ * ref: (802.11ax D1.2 Table 28-9 Page 285)
+ *
+ * - for calculation purpose - in multiples of 10 (*10)
+ */
+#define HE_T_LTF_1X 32
+#define HE_T_LTF_2X 64
+#define HE_T_LTF_4X 128
+#define HE_T_SYM1 136 /* OFDM symbol duration with base GI */
+#define HE_T_SYM2 144 /* OFDM symbol duration with double GI */
+#define HE_T_SYM4 160 /* OFDM symbol duration with quad GI */
+
+#define HE_N_LEG_SYM 3 /* bytes per legacy symbol */
+#define HE_N_TAIL 6 /* tail field bits for BCC */
+#define HE_N_SERVICE 16 /* bits in service field */
+#define HE_T_MAX_PE 16 /* max Packet extension duration */
+#endif /* _802_11ax_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11e.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11e.h
index de6b1c9..f8c3332 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11e.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11e.h
@@ -1,7 +1,9 @@
/*
* 802.11e protocol header file
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11s.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11s.h
index 13976a6..3ad38ba 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11s.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.11s.h
@@ -1,7 +1,9 @@
/*
* Fundamental types and constants relating to 802.11s Mesh
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.1d.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.1d.h
index 74b17c0..fac083c 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.1d.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.1d.h
@@ -1,7 +1,9 @@
/*
* Fundamental types and constants relating to 802.1D
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.3.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.3.h
index bc676e7..27c73b0 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.3.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/802.3.h
@@ -1,7 +1,9 @@
/*
* Fundamental constants relating to 802.3
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/aidmp.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/aidmp.h
index d24cee9..fac1f22 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/aidmp.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/aidmp.h
@@ -1,7 +1,9 @@
/*
* Broadcom AMBA Interconnect definitions.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcm_cfg.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcm_cfg.h
index 27ad185..f997bdc 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcm_cfg.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcm_cfg.h
@@ -1,7 +1,9 @@
/*
* BCM common config options
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h
index 0b85696..9bb0154 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h
@@ -35,7 +35,9 @@
* and instrumentation on top of the heap, without modifying the heap
* allocation implementation.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcm_ring.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcm_ring.h
index 0ccf7d9..9b0116e 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcm_ring.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcm_ring.h
@@ -6,7 +6,9 @@
*
* NOTE: A ring of size N, may only hold N-1 elements.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -101,7 +103,9 @@
* private L1 data cache.
* +----------------------------------------------------------------------------
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmarp.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmarp.h
index ddd9f3c..747bc78 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmarp.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmarp.h
@@ -1,7 +1,9 @@
/*
* Fundamental constants relating to ARP Protocol
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -50,6 +52,7 @@
#define ARP_DATA_LEN 28 /* ARP data length */
#define HTYPE_ETHERNET 1 /* htype for ethernet */
+#define HTYPE_WLAN 6 /* htype for IEEE 802 */
BWL_PRE_PACKED_STRUCT struct bcmarp {
uint16 htype; /* Header type (1 = ethernet) */
uint16 ptype; /* Protocol type (0x800 = IP) */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmbloom.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmbloom.h
index be54a5d..64b835d 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmbloom.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmbloom.h
@@ -1,7 +1,9 @@
/*
* Bloom filter support
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmcdc.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmcdc.h
index 936ada0..3059b73 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmcdc.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmcdc.h
@@ -4,7 +4,9 @@
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmdefs.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmdefs.h
index 8306d3a..a52b065 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmdefs.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmdefs.h
@@ -1,7 +1,9 @@
/*
* Misc system wide definitions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -51,8 +53,8 @@
* Define these diagnostic macros to help suppress cast-qual warning
* until all the work can be done to fix the casting issues.
*/
-#if defined(__GNUC__) && defined(STRICT_GCC_WARNINGS) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
+#if (defined(__GNUC__) && defined(STRICT_GCC_WARNINGS) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6)))
#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST() \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
@@ -63,6 +65,21 @@
#define GCC_DIAGNOSTIC_POP()
#endif /* Diagnostic macros not defined */
+/* Support clang for MACOSX compiler */
+#ifdef __clang__
+#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST() \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wcast-qual\"")
+#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT() \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wformat-nonliteral\"")
+#define CLANG_DIAGNOSTIC_POP() \
+ _Pragma("clang diagnostic pop")
+#else
+#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
+#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT()
+#define CLANG_DIAGNOSTIC_POP()
+#endif // endif
/* Compile-time assert can be used in place of ASSERT if the expression evaluates
* to a constant at compile time.
*/
@@ -74,7 +91,7 @@
typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1] UNUSED_VAR; \
}
#else
-#define STATIC_ASSERT(expr) compiletime_assert(expr, "Compile time condition failure");
+#define STATIC_ASSERT(expr) compiletime_assert(expr, "Compile time condition failure");
#endif /* __GNUC__ <= 4 && __GNUC_MINOR__ >= 4 */
/* Reclaiming text and data :
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmdevs.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmdevs.h
index f639144..c49209f 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmdevs.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmdevs.h
@@ -1,7 +1,9 @@
/*
* Broadcom device-specific manifest constants.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -55,6 +57,7 @@
#define VENDOR_BROADCOM_SDIO 0x00BF
/* DONGLE VID/PIDs */
+#define CY_DNGL_VID 0x04b4
#define BCM_DNGL_VID 0x0a5c
#define BCM_DNGL_BL_PID_4328 0xbd12
#define BCM_DNGL_BL_PID_4322 0xbd13
@@ -269,10 +272,27 @@
#define BCM4347_D11AC2G_ID 0x440b /* 4347 802.11ac 2.4G device */
#define BCM4347_D11AC5G_ID 0x440c /* 4347 802.11ac 5G device */
+#ifdef CHIPS_CUSTOMER_HW6
+#define BCM4376_D11AC_ID 0x4435 /* 4376 802.11ac dualband device */
+#define BCM4376_D11AC2G_ID 0x4436 /* 4376 802.11ac 2.4G device */
+#define BCM4376_D11AC5G_ID 0x4437 /* 4376 802.11ac 5G device */
+
+#define BCM4378_D11AC_ID 0x4425 /* 4378 802.11ac dualband device */
+#define BCM4378_D11AC2G_ID 0x4426 /* 4378 802.11ac 2.4G device */
+#define BCM4378_D11AC5G_ID 0x4427 /* 4378 802.11ac 5G device */
+#endif /* CHIPS_CUSTOMER_HW6 */
+
#define BCM4361_D11AC_ID 0x441f /* 4361 802.11ac dualband device */
#define BCM4361_D11AC2G_ID 0x4420 /* 4361 802.11ac 2.4G device */
#define BCM4361_D11AC5G_ID 0x4421 /* 4361 802.11ac 5G device */
+#define BCM4362_D11AX_ID 0x4490 /* 4362 802.11ax dualband device */
+#define BCM4362_D11AX2G_ID 0x4491 /* 4362 802.11ax 2.4G device */
+#define BCM4362_D11AX5G_ID 0x4492 /* 4362 802.11ax 5G device */
+#define BCM43751_D11AX_ID 0x4490 /* 43751 802.11ax dualband device */
+#define BCM43751_D11AX2G_ID 0x4491 /* 43751 802.11ax 2.4G device */
+#define BCM43751_D11AX5G_ID 0x4492 /* 43751 802.11ax 5G device */
+
#define BCM4364_D11AC_ID 0x4464 /* 4364 802.11ac dualband device */
#define BCM4364_D11AC2G_ID 0x446a /* 4364 802.11ac 2.4G device */
#define BCM4364_D11AC5G_ID 0x446b /* 4364 802.11ac 5G device */
@@ -294,6 +314,26 @@
#define BCM4375_D11AX2G_ID 0x4476 /* 4375 802.11ax 2.4G device */
#define BCM4375_D11AX5G_ID 0x4477 /* 4375 802.11ax 5G device */
+#ifdef CHIPS_CUSTOMER_HW6
+#define BCM4377_D11AX_ID 0x4480 /* 4377 802.11ax dualband device */
+#define BCM4377_D11AX2G_ID 0x4481 /* 4377 802.11ax 2.4G device */
+#define BCM4377_D11AX5G_ID 0x4482 /* 4377 802.11ax 5G device */
+
+#define BCM4377_M_D11AX_ID 0x4488 /* 4377 802.11ax dualband device with multifunction */
+
+/* TBD change below values */
+#define BCM4367_D11AC_ID 0x4422
+#define BCM4367_D11AC2G_ID 0x4423
+#define BCM4367_D11AC5G_ID 0x4424
+#endif /* CHIPS_CUSTOMER_HW6 */
+
+#ifdef CHIPS_CUSTOMER_HW6
+#define BCM4368_D11AC_ID 0x442f
+#define BCM4368_D11AC2G_ID 0x4430
+#define BCM4368_D11AC5G_ID 0x4431
+#define BCM4368_D11ACBT_ID 0x5f30
+#endif /* CHIPS_CUSTOMER_HW6 */
+
#define BCM43349_D11N_ID 0x43e6 /* 43349 802.11n dualband id */
#define BCM43349_D11N2G_ID 0x43e7 /* 43349 802.11n 2.4Ghz band id */
#define BCM43349_D11N5G_ID 0x43e8 /* 43349 802.11n 5Ghz band id */
@@ -510,14 +550,33 @@
case BCM4359_CHIP_ID
#define BCM43596_CHIP_ID 43596 /* 43596 chipcommon chipid */
+#ifdef CHIPS_CUSTOMER_HW6
+#define BCM4368_CHIP_ID 0x4368 /* 4368 chipcommon chipid */
+#define BCM4368_CHIP(chipid) (CHIPID(chipid) == BCM4368_CHIP_ID)
+#define BCM4368_CHIP_GRPID BCM4367_CHIP_ID: \
+ case BCM4368_CHIP_ID
+#endif /* CHIPS_CUSTOMER_HW6 */
+
#define BCM4347_CHIP_ID 0x4347 /* 4347 chipcommon chipid */
#define BCM4357_CHIP_ID 0x4357 /* 4357 chipcommon chipid */
#define BCM4361_CHIP_ID 0x4361 /* 4361 chipcommon chipid */
#define BCM4369_CHIP_ID 0x4369 /* 4369/ chipcommon chipid */
+#define BCM4373_CHIP_ID 0x4373 /* 4373/ chipcommon chipid */
#define BCM4375_CHIP_ID 0x4375 /* 4375/ chipcommon chipid */
#define BCM4377_CHIP_ID 0x4377 /* 4377/ chipcommon chipid */
+#define BCM4362_CHIP_ID 0x4362 /* 4362 chipcommon chipid */
+#define BCM43751_CHIP_ID 0xAAE7 /* 43751 chipcommon chipid */
+#ifdef CHIPS_CUSTOMER_HW6
+#define BCM4369_CHIP_ID 0x4369 /* 4369/ chipcommon chipid */
+#define BCM4375_CHIP_ID 0x4375 /* 4375/ chipcommon chipid */
+#define BCM4376_CHIP_ID 0x4376 /* 4376 chipcommon chipid */
+#define BCM4377_CHIP_ID 0x4377 /* 4377/ chipcommon chipid */
+#define BCM4378_CHIP_ID 0x4378 /* 4378 chipcommon chipid */
+#define BCM4387_CHIP_ID 0x4387 /* 4387 chipcommon chipid */
+#endif /* CHIPS_CUSTOMER_HW6 */
-#define CYW55560_CHIP_ID 0xD908 /* CYW55560 chipcommon chipid */
+#define CYW55500_CHIP_ID 0xD8CC /* CYW55500 chipcommon chipid */
+#define CYW55560_CHIP_ID 0xD908 /* CYW55560 chipcommon chipid */
#define BCM4347_CHIP(chipid) ((CHIPID(chipid) == BCM4347_CHIP_ID) || \
(CHIPID(chipid) == BCM4357_CHIP_ID) || \
@@ -531,6 +590,24 @@
#define BCM4369_CHIP_GRPID BCM4369_CHIP_ID: \
case BCM4377_CHIP_ID
+#define BCM4362_CHIP(chipid) (CHIPID(chipid) == BCM4362_CHIP_ID)
+#define BCM4362_CHIP_GRPID BCM4362_CHIP_ID
+
+#ifdef CHIPS_CUSTOMER_HW6
+#define BCM4378_CHIP(chipid) ((CHIPID(chipid) == BCM4378_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4376_CHIP_ID))
+#define BCM4378_CHIP_GRPID BCM4378_CHIP_ID: \
+ case BCM4376_CHIP_ID
+
+/* BCM4367 */
+#define BCM4367_CHIP_ID 0x4367 /* 4367 chipcommon chipid */
+#define CASE_BCM4367_CHIP case BCM4367_CHIP_ID
+#define BCM4367_CHIP(chipid) (CHIPID(chipid) == BCM4367_CHIP_ID)
+
+#define BCM4387_CHIP(chipid) (CHIPID(chipid) == BCM4387_CHIP_ID)
+#define BCM4387_CHIP_GRPID BCM4387_CHIP_ID
+#endif /* CHIPS_CUSTOMER_HW6 */
+
#define BCM4365_CHIP_ID 0x4365 /* 4365 chipcommon chipid */
#define BCM4366_CHIP_ID 0x4366 /* 4366 chipcommon chipid */
#define BCM43664_CHIP_ID 43664 /* 4366E chipcommon chipid */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmdhcp.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmdhcp.h
index b87145c..b2ee55e 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmdhcp.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmdhcp.h
@@ -1,7 +1,9 @@
/*
* Fundamental constants relating to DHCP Protocol
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmendian.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmendian.h
index 62c8889..4f8754c 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmendian.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmendian.h
@@ -1,7 +1,9 @@
/*
* Byte order utilities
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -108,10 +110,10 @@
#define load64_ua(a) ltoh64_ua(a)
#define store64_ua(a, v) htol64_ua_store(v, a)
-#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8))
-#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
-#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1])
-#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
+#define _LTOH16_UA(cp) (uint16)((cp)[0] | ((cp)[1] << 8))
+#define _LTOH32_UA(cp) (uint32)((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
+#define _NTOH16_UA(cp) (uint16)(((cp)[0] << 8) | (cp)[1])
+#define _NTOH32_UA(cp) (uint32)(((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
#define _LTOH64_UA(cp) ((uint64)(cp)[0] | ((uint64)(cp)[1] << 8) | \
((uint64)(cp)[2] << 16) | ((uint64)(cp)[3] << 24) | \
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmeth.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmeth.h
index a25c5cb..5ec9aff 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmeth.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmeth.h
@@ -1,7 +1,9 @@
/*
* Broadcom Ethernettype protocol definitions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmevent.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmevent.h
index 61b4aad..36de297 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmevent.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmevent.h
@@ -3,7 +3,9 @@
*
* Dependencies: bcmeth.h
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -39,7 +41,6 @@
#define _BCMEVENT_H_
#include <typedefs.h>
-#include <bcmwifi_channels.h>
/* #include <ethernet.h> -- TODO: req., excluded to overwhelming coupling (break up ethernet.h) */
#include <bcmeth.h>
#if defined(DNGL_EVENT_SUPPORT)
@@ -284,7 +285,6 @@
#define WLC_E_PFN_SCAN_BACKOFF 167 /* PFN SCAN Backoff event */
#define WLC_E_PFN_BSSID_SCAN_BACKOFF 168 /* PFN BSSID SCAN BAckoff event */
#define WLC_E_AGGR_EVENT 169 /* Aggregated event */
-#define WLC_E_AP_CHAN_CHANGE 170 /* AP channel change event propage to User */
#define WLC_E_TVPM_MITIGATION 171 /* Change in mitigation applied by TVPM */
#define WLC_E_SCAN_START 172 /* Deprecated */
#define WLC_E_SCAN 172 /* Scan event */
@@ -292,13 +292,28 @@
#define WLC_E_PHY_CAL 174 /* Phy calibration start indication to host */
#define WLC_E_RPSNOA 175 /* Radio power save start/end indication to host */
#define WLC_E_ADPS 176 /* ADPS event */
-#define WLC_E_EXT_AUTH_REQ 187 /* authentication request received */
-#define WLC_E_EXT_AUTH_FRAME_RX 188 /* authentication request received */
-#define WLC_E_MGMT_FRAME_TXSTATUS 189 /* mgmt frame Tx complete */
-#define WLC_E_MGMT_FRAME_OFF_CHAN_COMPLETE 190
-#define WLC_E_LAST 191 /* highest val + 1 for range checking */
-#if (WLC_E_LAST > 191)
-#error "WLC_E_LAST: Invalid value for last event; must be <= 191."
+#define WLC_E_SLOTTED_BSS_PEER_OP 177 /* Per peer SCB delete */
+#define WLC_E_HWA 178 /* HWA events */
+#define WLC_E_GTK_KEYROT_NO_CHANSW 179 /* Avoid Chanswitch while GTK key rotation */
+#define WLC_E_ONBODY_STATUS_CHANGE 180 /* Indication of onbody status change */
+#define WLC_E_BCNRECV_ABORTED 181 /* Fake AP bcnrecv aborted roam event */
+#define WLC_E_PMK_INFO 182 /* PMK,PMKID information event */
+#define WLC_E_BSSTRANS 183 /* BSS Transition request / Response */
+#define WLC_E_WA_LQM 184 /* link quality monitoring */
+#define WLC_E_ACTION_FRAME_OFF_CHAN_DWELL_COMPLETE 185 /* action frame off channel
+ * dwell time complete
+ */
+#define WLC_E_WSEC 186 /* wsec keymgmt event */
+#define WLC_E_EXT_AUTH_REQ 187 /* authentication request received */
+#define WLC_E_EXT_AUTH_FRAME_RX 188 /* authentication request received */
+#define WLC_E_MGMT_FRAME_TXSTATUS 189 /* mgmt frame Tx complete */
+#define WLC_E_MGMT_FRAME_OFF_CHAN_COMPLETE 190
+#define WLC_E_IND_DOS_STATUS 191
+#define WLC_E_LDF_HOGGER 192 /* Detection Hogger Squasher -Cambium */
+#define WLC_E_DLTRO 193 /* DHCP lease time renew offload */
+#define WLC_E_LAST 194 /* highest val + 1 for range checking */
+#if (WLC_E_LAST > 194)
+#error "WLC_E_LAST: Invalid value for last event; must be <= 193."
#endif /* WLC_E_LAST */
/* define an API for getting the string name of an event */
@@ -334,6 +349,9 @@
#endif /* BCMCCX */
#define WLC_E_STATUS_CS_ABORT 15 /* abort channel select */
#define WLC_E_STATUS_ERROR 16 /* request failed due to error */
+#define WLC_E_STATUS_SLOTTED_PEER_ADD 17 /* Slotted scb for peer addition status */
+#define WLC_E_STATUS_SLOTTED_PEER_DEL 18 /* Slotted scb for peer deletion status */
+#define WLC_E_STATUS_RXBCN 19 /* Rx Beacon event for FAKEAP feature */
#define WLC_E_STATUS_INVALID 0xff /* Invalid status code to init variables. */
/* 4-way handshake event type */
@@ -384,6 +402,11 @@
struct wl_event_sdb_data values[WL_MAX_BSSCFG];
} wl_event_sdb_trans_t;
+/* reason codes for WLC_E_GTK_KEYROT_NO_CHANSW event */
+#define WLC_E_GTKKEYROT_SCANDELAY 0 /* Delay scan while gtk in progress */
+#define WLC_E_GTKKEYROT_SKIPCHANSW_AWDL 1 /* Avoid chansw by awdl while gtk in progress */
+#define WLC_E_GTKKEYROT_SKIPCHANSW_P2P 2 /* Avoid chansw by p2p while gtk in progress */
+
/* roam reason codes */
#define WLC_E_REASON_INITIAL_ASSOC 0 /* initial assoc */
#define WLC_E_REASON_LOW_RSSI 1 /* roamed due to low RSSI */
@@ -403,6 +426,8 @@
#define WLC_E_REASON_LOW_RSSI_CU 12 /* roamed due to low RSSI and Channel Usage */
#define WLC_E_REASON_RADAR_DETECTED 13 /* roamed due to radar detection by STA */
#define WLC_E_REASON_CSA 14 /* roamed due to CSA from AP */
+#define WLC_E_REASON_ESTM_LOW 15 /* roamed due to ESTM low tput */
+#define WLC_E_REASON_LAST 16 /* NOTE: increment this as you add reasons above */
/* prune reason codes */
#define WLC_E_PRUNE_ENCR_MISMATCH 1 /* encryption mismatch */
@@ -431,6 +456,8 @@
#define WLC_E_PRUNE_NO_DIAG_SUPPORT 19 /* prune due to diagnostic mode not supported */
#endif /* BCMCCX */
#define WLC_E_PRUNE_AUTH_RESP_MAC 20 /* suppress auth resp by MAC filter */
+#define WLC_E_PRUNE_ASSOC_RETRY_DELAY 21 /* MBO assoc retry delay */
+#define WLC_E_PRUNE_RSSI_ASSOC_REJ 22 /* OCE RSSI-based assoc rejection */
/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */
#define WLC_E_SUP_OTHER 0 /* Other reason */
@@ -451,17 +478,15 @@
#define WLC_E_SUP_WPA_PSK_TMO 15 /* WPA PSK 4-way handshake timeout */
#define WLC_E_SUP_WPA_PSK_M1_TMO 16 /* WPA PSK 4-way handshake M1 timeout */
#define WLC_E_SUP_WPA_PSK_M3_TMO 17 /* WPA PSK 4-way handshake M3 timeout */
+#define WLC_E_SUP_GTK_UPDATE_FAIL 18 /* GTK update failure */
+#define WLC_E_SUP_TK_UPDATE_FAIL 19 /* TK update failure */
+#define WLC_E_SUP_KEY_INSTALL_FAIL 20 /* Buffered key install failure */
/* Ucode reason codes carried in the WLC_E_MACDBG event */
#define WLC_E_MACDBG_LIST_PSM 0 /* Dump list update for PSM registers */
#define WLC_E_MACDBG_LIST_PSMX 1 /* Dump list update for PSMx registers */
#define WLC_E_MACDBG_REGALL 2 /* Dump all registers */
-/* reason codes for WLC_E_LINK_QUALITY event */
-#define WLC_E_LINK_QUALITY_NONE 0
-#define WLC_E_LINK_QUALITY_NO_ACK 1
-#define WLC_E_LINK_QUALITY_NO_RESULT 2
-
/* Event data for events that include frames received over the air */
/* WLC_E_PROBRESP_MSG
* WLC_E_P2P_PROBREQ_MSG
@@ -536,6 +561,22 @@
int32 noise;
} wl_event_data_rssi_t;
+#define WL_EVENT_WA_LQM_VER 0 /* initial version */
+
+#define WL_EVENT_WA_LQM_BASIC 0 /* event sub-types */
+typedef struct { /* payload of subevent in xtlv */
+ int32 rssi;
+ int32 snr;
+ uint32 tx_rate;
+ uint32 rx_rate;
+} wl_event_wa_lqm_basic_t;
+
+typedef struct wl_event_wa_lqm {
+ uint16 ver; /* version */
+ uint16 len; /* total length structure */
+ uint8 subevent[]; /* sub-event data in bcm_xtlv_t format */
+} wl_event_wa_lqm_t;
+
/* WLC_E_IF flag */
#define WLC_E_IF_FLAGS_BSSCFG_NOIF 0x1 /* no host I/F creation needed */
@@ -787,6 +828,8 @@
WL_NAN_EVENT_SLOT_START = 42, /* SLOT_START event */
WL_NAN_EVENT_SLOT_END = 43, /* SLOT_END event */
WL_NAN_EVENT_HOST_ASSIST_REQ = 44, /* Requesting host assist */
+ WL_NAN_EVENT_RX_MGMT_FRM = 45, /* NAN management frame received */
+ WL_NAN_EVENT_DISC_CACHE_TIMEOUT = 46, /* Disc cache timeout */
WL_NAN_EVENT_INVALID /* delimiter for max value */
} nan_app_events_e;
@@ -812,7 +855,6 @@
#define WL_ULP_EVENT_VERSION 1
#define WL_ULP_DISABLE_CONSOLE 1 /* Disable console message on ULP entry */
#define WL_ULP_UCODE_DOWNLOAD 2 /* Download ULP ucode file */
-#define WL_ULP_ENTRY 3 /* inform ulp entry to Host during warmboot */
typedef struct wl_ulp_event {
uint16 version;
@@ -825,6 +867,12 @@
uint8 pad[3]; /* 4-byte struct alignment */
} BWL_POST_PACKED_STRUCT wl_event_tko_t;
+/* DHCP lease time renew event data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_dltro {
+ uint8 index; /* DLTRO connection index, 0 to max-1 */
+ uint8 pad[3]; /* 4-byte struct alignment */
+} BWL_POST_PACKED_STRUCT wl_event_dltro_t;
+
typedef struct {
uint8 radar_type; /* one of RADAR_TYPE_XXX */
uint16 min_pw; /* minimum pulse-width (usec * 20) */
@@ -841,26 +889,6 @@
radar_detected_event_info_t radar_info[2];
} wl_event_radar_detect_data_t;
-typedef enum {
- WL_CHAN_REASON_CSA = 0,
- WL_CHAN_REASON_DFS_AP_MOVE_START = 1,
- WL_CHAN_REASON_DFS_AP_MOVE_RADAR_FOUND = 2,
- WL_CHAN_REASON_DFS_AP_MOVE_ABORTED = 3,
- WL_CHAN_REASON_DFS_AP_MOVE_SUCCESS = 4,
- WL_CHAN_REASON_DFS_AP_MOVE_STUNT = 5
-} wl_chan_change_reason_t;
-
-typedef struct wl_event_change_chan {
- uint16 version;
- uint16 length; /* excluding pad field bytes */
- wl_chan_change_reason_t reason; /* CSA or DFS_AP_MOVE */
- chanspec_t target_chanspec;
- uint16 pad; /* 4 byte alignment */
-} wl_event_change_chan_t;
-
-#define WL_CHAN_CHANGE_EVENT_VER_1 1 /* channel change event version */
-#define WL_CHAN_CHANGE_EVENT_LEN_VER_1 10
-
#define WL_EVENT_MODESW_VER_1 1
#define WL_EVENT_MODESW_VER_CURRENT WL_EVENT_MODESW_VER_1
@@ -1071,6 +1099,14 @@
uint8 pad[2];
} wl_hc_temp_stats_v1_t;
+#define WL_PHY_HC_TEMP_STATS_VER_2 (2)
+typedef struct {
+ uint16 version;
+ uint16 chanspec;
+ int16 curtemp; /* Temperature */
+ uint8 pad[2];
+} wl_hc_temp_stats_v2_t;
+
#define WL_PHY_HC_VCOCAL_STATS_VER (1)
typedef struct wl_hc_vcocal_stats {
uint16 version;
@@ -1101,10 +1137,25 @@
uint8 pad[3];
} wl_hc_txpwr_stats_v1_t;
+#define WL_PHY_HC_TXPWR_STATS_VER_2 (2)
+typedef struct {
+ uint16 version;
+ uint16 chanspec;
+ int8 tgt_pwr[MAX_PHY_CORE_NUM]; /* Target pwr (qdBm) */
+ uint8 estPwr[MAX_PHY_CORE_NUM]; /* Rate corrected (qdBm) */
+ uint8 estPwr_adj[MAX_PHY_CORE_NUM]; /* Max power (qdBm) */
+ uint8 baseindex[MAX_PHY_CORE_NUM]; /* Tx base index */
+ int16 temp; /* Temperature */
+ uint16 TxCtrlWrd[3]; /* 6 PHY ctrl bytes */
+ int8 min_txpower; /* min tx power per ant */
+ uint8 pad[3];
+} wl_hc_txpwr_stats_v2_t;
+
typedef enum wl_mbo_event_type {
WL_MBO_E_CELLULAR_NW_SWITCH = 1,
+ WL_MBO_E_BTM_RCVD = 2,
/* ADD before this */
- WL_MBO_E_LAST = 2, /* highest val + 1 for range checking */
+ WL_MBO_E_LAST = 3 /* highest val + 1 for range checking */
} wl_mbo_event_type_t;
/* WLC_E_MBO event structure version */
@@ -1136,11 +1187,45 @@
uint32 assoc_time_remain;
};
-/* WLC_E_ADPS event data */
-#define WL_EVENT_ADPS_VER_1 1
+/* WLC_E_MBO_BTM_RCVD event structure version */
+#define WL_BTM_EVENT_DATA_VER_1 1
+/* Specific btm event type data */
+struct wl_btm_event_type_data {
+ uint16 version;
+ uint16 len;
+ uint8 transition_reason; /* transition reason code */
+ uint8 pad[3]; /* pad */
+};
-/* WLC_E_ADPS event type */
-#define WL_E_TYPE_ADPS_BAD_AP 1
+/* WLC_E_PRUNE event structure version */
+#define WL_BSSID_PRUNE_EVT_VER_1 1
+/* MBO-OCE params */
+struct wl_bssid_prune_evt_info {
+ uint16 version;
+ uint16 len;
+ uint8 SSID[32];
+ uint32 time_remaining; /* Time remaining */
+ struct ether_addr BSSID;
+ uint8 SSID_len;
+ uint8 reason; /* Reason code */
+ int8 rssi_threshold; /* RSSI threshold */
+ uint8 pad[3]; /* pad */
+};
+
+/* WLC_E_HWA Event structure */
+typedef struct wl_event_hwa {
+ uint16 version; /* structure version */
+ uint16 length; /* length of structure */
+ uint32 sub_type; /* Sub event type */
+ uint8 data[0]; /* variable length data */
+} wl_event_hwa_t;
+
+#define WL_HWA_EVENT_VER 1
+
+typedef enum wl_event_hwa_subtype {
+ WL_HWA_EVENT_SUBTYPE_ERROR = 1,
+ WL_HWA_EVENT_SUBTYPE_LAST = 2
+} wl_event_hwa_subtype_t;
/* WLC_E_ADPS status */
enum {
@@ -1148,6 +1233,12 @@
WL_E_STATUS_ADPS_MAX
};
+/* WLC_E_ADPS event data */
+#define WL_EVENT_ADPS_VER_1 1
+
+/* WLC_E_ADPS event type */
+#define WL_E_TYPE_ADPS_BAD_AP 1
+
typedef struct wl_event_adps_bad_ap {
uint32 status;
uint32 reason;
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmicmp.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmicmp.h
index f8cd4d4..d7539ba 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmicmp.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmicmp.h
@@ -1,7 +1,9 @@
/*
* Fundamental constants relating to ICMP Protocol
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmiov.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmiov.h
index fe89105..f4c2308 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmiov.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmiov.h
@@ -4,7 +4,9 @@
* To be used in firmware and host apps or dhd - reducing code size,
* duplication, and maintenance overhead.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmip.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmip.h
index 057ce20..3fe93ad 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmip.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmip.h
@@ -1,7 +1,9 @@
/*
* Fundamental constants relating to IP Protocol
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -247,4 +249,9 @@
((uint32)addr & 0x0000ff00) >> 8, \
((uint32)addr & 0x000000ff)
+#define IPCOPY(s, d) \
+do { \
+ ((uint16 *)(d))[0] = ((const uint16 *)(s))[0]; \
+ ((uint16 *)(d))[1] = ((const uint16 *)(s))[1]; \
+} while (0)
#endif /* _bcmip_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmipv6.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmipv6.h
index 0395aa9..5e0ef8e 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmipv6.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmipv6.h
@@ -1,7 +1,9 @@
/*
* Fundamental constants relating to Neighbor Discovery Protocol
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h
index 3e33866..366d686 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h
@@ -4,7 +4,9 @@
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -69,6 +71,11 @@
#define H2DRING_DYNAMIC_INFO_MAX_ITEM 32
#define D2HRING_DYNAMIC_INFO_MAX_ITEM 32
+#define D2HRING_EDL_HDR_SIZE 48u
+#define D2HRING_EDL_ITEMSIZE 2048u
+#define D2HRING_EDL_MAX_ITEM 256u
+#define D2HRING_EDL_WATERMARK (D2HRING_EDL_MAX_ITEM >> 5u)
+
#define D2HRING_CTRL_CMPLT_MAX_ITEM 64
enum {
@@ -429,6 +436,7 @@
uint8 flags;
} pcie_dma_xfer_params_t;
+#define BCMPCIE_FLOW_RING_INTF_HP2P 0x1
/** Complete msgbuf hdr for flow ring update from host to dongle */
typedef struct tx_flowring_create_request {
cmn_msg_hdr_t msg;
@@ -809,8 +817,10 @@
uint16 info_data_len;
/* sequence number */
uint16 seqnum;
+ /* destination */
+ uint8 dest;
/* rsvd */
- uint32 rsvd;
+ uint8 rsvd[3];
/* XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
} info_buf_resp_t;
@@ -956,7 +966,7 @@
/** user defined rate */
uint8 rate;
- uint8 rsvd2;
+ uint8 exp_time;
};
/** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmnvram.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmnvram.h
index 1f5e331..78b0741 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmnvram.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmnvram.h
@@ -1,7 +1,9 @@
/*
* NVRAM variable manipulation
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmpcie.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmpcie.h
index 8126300..06e7861 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmpcie.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmpcie.h
@@ -3,7 +3,9 @@
* Software-specific definitions shared between device and host side
* Explains the shared area between host and dongle
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -32,7 +34,7 @@
#ifndef _bcmpcie_h_
#define _bcmpcie_h_
-#include <bcmutils.h>
+#include <typedefs.h>
#define ADDR_64(x) (x.addr)
#define HIGH_ADDR_32(x) ((uint32) (((sh_addr_t) x).high_addr))
@@ -87,6 +89,7 @@
#define PCIE_SHARED2_SNAPSHOT_UPLOAD 0x00000008 /* BT/WLAN snapshot upload support */
#define PCIE_SHARED2_SUBMIT_COUNT_WAR 0x00000010 /* submission count WAR */
#define PCIE_SHARED2_FW_SMALL_MEMDUMP 0x00000200 /* FW small memdump */
+#define PCIE_SHARED2_DEBUG_BUF_DEST 0x00002000 /* debug buf dest support */
#define PCIE_SHARED_FAST_DELETE_RING 0x00000020 /* Fast Delete Ring */
#define PCIE_SHARED_EVENT_BUF_POOL_MAX 0x000000c0 /* event buffer pool max bits */
#define PCIE_SHARED_EVENT_BUF_POOL_MAX_POS 6 /* event buffer pool max bit position */
@@ -161,6 +164,15 @@
firmware support added to reuse
timesync to update PKT txstatus
*/
+/* Support Enhanced Debug Lane */
+#define PCIE_SHARED2_EDL_RING 0x00001000
+
+/* Timestamp in packet */
+#define PCIE_SHARED2_PKT_TIMESTAMP 0x00008000
+
+/* HP2P feature */
+#define PCIE_SHARED2_HP2P 0x00010000u
+
/**
* Message rings convey messages between host and device. They are unidirectional, and are located
* in host memory.
@@ -195,6 +207,9 @@
#define BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL 0x4
#define BCMPCIE_D2H_RING_TYPE_AC_RX_COMPLETE 0x5
#define BCMPCIE_D2H_RING_TYPE_BTLOG_CPL 0x6
+#define BCMPCIE_D2H_RING_TYPE_EDL 0x7
+#define BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL 0x8
+#define BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL 0x9
/**
* H2D and D2H, WR and RD index, are maintained in the following arrays:
@@ -263,8 +278,6 @@
#define BCMPCIE_D2H_RW_INDEX_ARRAY_SZ(rw_index_sz) \
((rw_index_sz) * BCMPCIE_D2H_COMMON_MSGRINGS)
-#define HOFFLOAD_MODULES_ENAB(shmem) (0)
-
/**
* This type is used by a 'message buffer' (which is a FIFO for messages). Message buffers are used
* for host<->device communication and are instantiated on both sides. ring_mem_t is instantiated
@@ -382,6 +395,7 @@
/* location in host memory for offloaded modules */
sh_addr_t hoffload_addr;
+ uint32 debug_info_addr; /* Address of debug_info area */
} pciedev_shared_t;
/* Device F/W provides the following access function:
@@ -409,6 +423,10 @@
#define HOSTCAP_HSCB 0x02000000
/* Host support for extended device trap debug buffer */
#define HOSTCAP_EXT_TRAP_DBGBUF 0x04000000
+/* Host support for enhanced debug lane */
+#define HOSTCAP_EDL_RING 0x10000000
+#define HOSTCAP_PKT_TIMESTAMP 0x20000000
+#define HOSTCAP_PKT_HP2P 0x40000000
/* extended trap debug buffer allocation sizes. Note that this buffer can be used for
* other trap related purposes also.
@@ -450,6 +468,7 @@
#define D2HMB_DS_HOST_SLEEP_EXIT_ACK 0x00000008
#define D2H_DEV_IDMA_INITED 0x00000010
#define D2H_DEV_FWHALT 0x10000000
+#define D2H_DEV_TRAP_PING_HOST_FAILURE 0x08000000
#define D2H_DEV_EXT_TRAP_DATA 0x20000000
#define D2H_DEV_TRAP_IN_TRAP 0x40000000
#define D2H_DEV_TRAP_DUE_TO_BT 0x01000000
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmpcispi.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
index b455246..5202018 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
@@ -1,7 +1,9 @@
/*
* Broadcom PCI-SPI Host Controller Register Definitions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmperf.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmperf.h
index bfb7b5d..65fff9f 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmperf.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmperf.h
@@ -1,7 +1,9 @@
/*
* Performance counters software interface.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdbus.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
index 1d0ee1c..b5815c7 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
@@ -2,7 +2,9 @@
* Definitions for API from sdio common code (bcmsdh) to individual
* host controller drivers.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdh.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdh.h
index 28f99e3..66bd89c 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdh.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdh.h
@@ -3,7 +3,9 @@
* export functions to client drivers
* abstract OS and BUS specific details of SDIO
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
index 01e337c..1da39f2 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
@@ -1,7 +1,9 @@
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -59,7 +61,7 @@
/* private bus modes */
#define SDIOH_MODE_SD4 2
#define CLIENT_INTR 0x100 /* Get rid of this! */
-#define SDIOH_SDMMC_MAX_SG_ENTRIES 32
+#define SDIOH_SDMMC_MAX_SG_ENTRIES 64
struct sdioh_info {
osl_t *osh; /* osh handler */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
index da0a271..37d968b 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
@@ -2,7 +2,9 @@
* Broadcom SDIO/PCMCIA
* Software-specific definitions shared between device and host side
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -295,6 +297,7 @@
uint32 msgtrace_addr;
uint32 fwid;
uint32 device_fatal_logbuf_start;
+ uint32 debug_info_addr; /* Address of debug_info area */
} sdpcm_shared_t;
/* Device F/W provides the following access function:
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdspi.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
index 86f1095..52883dd 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
@@ -1,7 +1,9 @@
/*
* SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -64,6 +66,7 @@
uint32 caps; /* cached value of capabilities reg */
uint bar0; /* BAR0 for PCI Device */
osl_t *osh; /* osh handler */
+ void *bcmsdh; /* handler to upper layer stack (bcmsdh) */
void *controller; /* Pointer to SPI Controller's private data struct */
uint lockcount; /* nest count of sdspi_lock() calls */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdstd.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
index 24417f6..7485343 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
@@ -1,7 +1,9 @@
/*
* 'Standard' SDIO HOST CONTROLLER driver
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -81,8 +83,13 @@
#define SDIOH_CMD7_EXP_STATUS 0x00001E00
#define RETRIES_LARGE 100000
+#ifdef BCMQT
+extern void sdstd_os_yield(sdioh_info_t *sd);
+#define RETRIES_SMALL 10000
+#else
#define sdstd_os_yield(sd) do {} while (0)
#define RETRIES_SMALL 100
+#endif // endif
#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
#define USE_MULTIBLOCK 0x4
@@ -108,13 +115,13 @@
#endif // endif
struct sdioh_info {
- uint cfg_bar; /* pci cfg address for bar */
- uint32 caps; /* cached value of capabilities reg */
- uint32 curr_caps; /* max current capabilities reg */
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint32 curr_caps; /* max current capabilities reg */
- osl_t *osh; /* osh handler */
- volatile char *mem_space; /* pci device memory va */
- uint lockcount; /* nest count of sdstd_lock() calls */
+ osl_t *osh; /* osh handler */
+ volatile char *mem_space; /* pci device memory va */
+ uint lockcount; /* nest count of sdstd_lock() calls */
bool client_intr_enabled; /* interrupt connnected flag */
bool intr_handler_valid; /* client driver interrupt handler valid */
sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
@@ -160,12 +167,12 @@
ulong adma2_dscr_start_phys;
uint alloced_adma2_dscr_size;
- int r_cnt; /* rx count */
- int t_cnt; /* tx_count */
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
bool got_hcint; /* local interrupt flag */
uint16 last_intrstatus; /* to cache intrstatus */
- int host_UHSISupported; /* whether UHSI is supported for HC. */
- int card_UHSI_voltage_Supported; /* whether UHSI is supported for
+ int host_UHSISupported; /* whether UHSI is supported for HC. */
+ int card_UHSI_voltage_Supported; /* whether UHSI is supported for
* Card in terms of Voltage [1.8 or 3.3].
*/
int global_UHSI_Supp; /* type of UHSI support in both host and card.
@@ -173,9 +180,10 @@
* HOST_SDR_12_25: SDR12 and SDR25 supported
* HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd
*/
- volatile int sd3_dat_state; /* data transfer state used for retuning check */
- volatile int sd3_tun_state; /* tuning state used for retuning check */
- bool sd3_tuning_reqd; /* tuning requirement parameter */
+ volatile int sd3_dat_state; /* data transfer state used for retuning check */
+ volatile int sd3_tun_state; /* tuning state used for retuning check */
+ bool sd3_tuning_reqd; /* tuning requirement parameter */
+ bool sd3_tuning_disable; /* tuning disable due to bus sleeping */
uint32 caps3; /* cached value of 32 MSbits capabilities reg (SDIO 3.0) */
#ifdef BCMSDIOH_TXGLOM
glom_buf_t glom_info; /* pkt information used for glomming */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmspi.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmspi.h
index 7a94323..5146a7f 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmspi.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmspi.h
@@ -1,7 +1,9 @@
/*
* Broadcom SPI Low-Level Hardware Driver API
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h
index 5119116..74b9abc 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h
@@ -1,7 +1,9 @@
/*
* SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -82,6 +84,7 @@
void *bar0; /* BAR0 for PCI Device */
#endif /* !BCMSPI_ANDROID */
osl_t *osh; /* osh handler */
+ void *bcmsdh; /* handler to upper layer stack (bcmsdh) */
void *controller; /* Pointer to SPI Controller's private data struct */
uint lockcount; /* nest count of spi_lock() calls */
bool client_intr_enabled; /* interrupt connnected flag */
@@ -105,6 +108,7 @@
bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
/* Must be on for sd_multiblock to be effective */
bool use_client_ints; /* If this is false, make sure to restore */
+ bool got_hcint; /* Host Controller interrupt. */
/* polling hack in wl_linux.c:wl_timer() */
int adapter_slot; /* Maybe dealing with multiple slots/controllers */
int sd_mode; /* SD1/SD4/SPI */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h
index 9af8bf1..e6d90cd 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h
@@ -1,7 +1,9 @@
/*
* SROM format definition.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h
index efeb71f..e42663b 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h
@@ -1,7 +1,9 @@
/*
* Table that encodes the srom formats for PCI/PCIe NICs.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmstdlib_s.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmstdlib_s.h
new file mode 100644
index 0000000..582612e
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmstdlib_s.h
@@ -0,0 +1,47 @@
+/*
+ * Broadcom Secure Standard Library.
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * * $Id $
+ */
+
+#ifndef _bcmstdlib_s_h_
+#define _bcmstdlib_s_h_
+
+#ifndef BWL_NO_INTERNAL_STDLIB_SUPPORT
+#if !defined(__STDC_WANT_SECURE_LIB__) && !(defined(__STDC_LIB_EXT1__) && \
+ defined(__STDC_WANT_LIB_EXT1__))
+extern int memmove_s(void *dest, size_t destsz, const void *src, size_t n);
+extern int memcpy_s(void *dest, size_t destsz, const void *src, size_t n);
+extern int memset_s(void *dest, size_t destsz, int c, size_t n);
+#endif /* !__STDC_WANT_SECURE_LIB__ && !(__STDC_LIB_EXT1__ && __STDC_WANT_LIB_EXT1__) */
+#if !defined(FREEBSD) && !defined(BCM_USE_PLATFORM_STRLCPY)
+extern size_t strlcpy(char *dest, const char *src, size_t size);
+#endif // endif
+extern size_t strlcat_s(char *dest, const char *src, size_t size);
+#endif /* !BWL_NO_INTERNAL_STDLIB_SUPPORT */
+#endif /* _bcmstdlib_s_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmtcp.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmtcp.h
index 92379d7..610f878 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmtcp.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmtcp.h
@@ -1,7 +1,9 @@
/*
* Fundamental constants relating to TCP Protocol
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmtlv.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmtlv.h
index b14d292..83d4c91 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmtlv.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmtlv.h
@@ -1,7 +1,9 @@
/*
* TLV and XTLV support
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -83,10 +85,16 @@
#define BCM_TLV_EXT_SIZE(_tlv_ext) (BCM_TLV_EXT_HDR_SIZE + (_tlv_ext)->len)
/* find the next tlv */
-bcm_tlv_t *bcm_next_tlv(const bcm_tlv_t *elt, int *buflen);
+bcm_tlv_t *bcm_next_tlv(const bcm_tlv_t *elt, uint *buflen);
+
+/* move buffer/buflen up to the given tlv, or set to NULL/0 on error */
+void bcm_tlv_buffer_advance_to(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen);
+
+/* move buffer/buflen past the given tlv, or set to NULL/0 on error */
+void bcm_tlv_buffer_advance_past(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen);
/* find the tlv for a given id */
-bcm_tlv_t *bcm_parse_tlvs(const void *buf, int buflen, uint key);
+bcm_tlv_t *bcm_parse_tlvs(const void *buf, uint buflen, uint key);
/*
* Traverse tlvs and return pointer to the first tlv that
@@ -101,8 +109,8 @@
const bcm_tlv_t *bcm_parse_ordered_tlvs(const void *buf, int buflen, uint key);
/* find a tlv with DOT11_MNG_PROPR_ID as id, and the given oui and type */
- bcm_tlv_t *bcm_find_vendor_ie(const void *tlvs, int tlvs_len, const char *voui,
- uint8 *type, int type_len);
+ bcm_tlv_t *bcm_find_vendor_ie(const void *tlvs, uint tlvs_len, const char *voui,
+ uint8 *type, uint type_len);
/* write tlv at dst and return next tlv ptr */
uint8 *bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst);
@@ -139,6 +147,8 @@
#define BCM_XTLV_OPTION_ALIGN32 0x0001 /* 32bit alignment of type.len.data */
#define BCM_XTLV_OPTION_IDU8 0x0002 /* shorter id */
#define BCM_XTLV_OPTION_LENU8 0x0004 /* shorted length */
+#define BCM_XTLV_OPTION_IDBE 0x0008 /* big endian format id */
+#define BCM_XTLV_OPTION_LENBE 0x0010 /* big endian format length */
typedef uint16 bcm_xtlv_opts_t;
/* header size. depends on options. Macros names ending w/ _EX are where
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmudp.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmudp.h
index 64c79ec..5c4a10c 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmudp.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmudp.h
@@ -1,7 +1,9 @@
/*
* Fundamental constants relating to UDP Protocol
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmutils.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmutils.h
index 387ba20..cacf4fd 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmutils.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/bcmutils.h
@@ -1,7 +1,9 @@
/*
* Misc useful os-independent macros and functions.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -163,12 +165,22 @@
#define DSCP_AF21 0x12
#define DSCP_AF22 0x14
#define DSCP_AF23 0x16
+/* CS2: OAM (RFC2474) */
+#define DSCP_CS2 0x10
/* AF3x: Multimedia Streaming (RFC2597) */
#define DSCP_AF31 0x1A
#define DSCP_AF32 0x1C
#define DSCP_AF33 0x1E
+/* CS3: Broadcast Video (RFC2474) */
+#define DSCP_CS3 0x18
+/* VA: VOCIE-ADMIT (RFC5865) */
+#define DSCP_VA 0x2C
/* EF: Telephony (RFC3246) */
#define DSCP_EF 0x2E
+/* CS6: Network Control (RFC2474) */
+#define DSCP_CS6 0x30
+/* CS7: Network Control (RFC2474) */
+#define DSCP_CS7 0x38
extern uint pktsetprio(void *pkt, bool update_vtag);
extern uint pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag);
@@ -414,7 +426,8 @@
#define BCME_DACBUF_RCCAL_FAIL -65 /* RCCAL failed */
#define BCME_VCOCAL_FAIL -66 /* VCOCAL failed */
#define BCME_BANDLOCKED -67 /* interface is restricted to a band */
-#define BCME_LAST BCME_BANDLOCKED
+#define BCME_DNGL_DEVRESET -68 /* dongle re-attach during DEVRESET */
+#define BCME_LAST BCME_DNGL_DEVRESET
#define BCME_NOTENABLED BCME_DISABLED
@@ -498,6 +511,7 @@
"DAC buf RC Cal failed", \
"VCO Cal failed", \
"band locked", \
+ "Dongle Devreset", \
}
#ifndef ABS
@@ -576,7 +590,7 @@
#define SIZE_OF(type, field) sizeof(((type *)0)->field)
#ifndef ARRAYSIZE
-#define ARRAYSIZE(a) (sizeof(a) / sizeof(a[0]))
+#define ARRAYSIZE(a) (uint32)(sizeof(a) / sizeof(a[0]))
#endif // endif
#ifndef ARRAYLAST /* returns pointer to last array element */
@@ -659,9 +673,9 @@
return ((*a >> pos) & MSK); \
}
-DECLARE_MAP_API(2, 4, 1, 15U, 0x0003) /* setbit2() and getbit2() */
-DECLARE_MAP_API(4, 3, 2, 7U, 0x000F) /* setbit4() and getbit4() */
-DECLARE_MAP_API(8, 2, 3, 3U, 0x00FF) /* setbit8() and getbit8() */
+DECLARE_MAP_API(2, 4, 1, 15U, 0x0003U) /* setbit2() and getbit2() */
+DECLARE_MAP_API(4, 3, 2, 7U, 0x000FU) /* setbit4() and getbit4() */
+DECLARE_MAP_API(8, 2, 3, 3U, 0x00FFU) /* setbit8() and getbit8() */
/* basic mux operation - can be optimized on several architectures */
#define MUX(pred, true, false) ((pred) ? (true) : (false))
@@ -715,17 +729,17 @@
(ea).octet[5]
#if !defined(SIMPLE_MAC_PRINT)
#define MACDBG "%02x:%02x:%02x:%02x:%02x:%02x"
-#define MAC2STRDBG(ea) (ea)[0], (ea)[1], (ea)[2], (ea)[3], (ea)[4], (ea)[5]
+#define MAC2STRDBG(ea) CONST_ETHERP_TO_MACF(ea)
#else
#define MACDBG "%02x:xx:xx:xx:x%x:%02x"
-#define MAC2STRDBG(ea) (ea)[0], ((ea)[4] & 0xf), (ea)[5]
+#define MAC2STRDBG(ea) ((uint8*)(ea))[0], (((uint8*)(ea))[4] & 0xf), ((uint8*)(ea))[5]
#endif /* SIMPLE_MAC_PRINT */
#define MACOUIDBG "%02x:%x:%02x"
-#define MACOUI2STRDBG(ea) (ea)[0], (ea)[1] & 0xf, (ea)[2]
+#define MACOUI2STRDBG(ea) ((uint8*)(ea))[0], ((uint8*)(ea))[1] & 0xf, ((uint8*)(ea))[2]
#define MACOUI "%02x:%02x:%02x"
-#define MACOUI2STR(ea) (ea)[0], (ea)[1], (ea)[2]
+#define MACOUI2STR(ea) ((uint8*)(ea))[0], ((uint8*)(ea))[1], ((uint8*)(ea))[2]
/* bcm_format_flags() bit description structure */
typedef struct bcm_bit_desc {
@@ -834,7 +848,8 @@
char *buf, uint32 bufsize);
extern uint bcm_bitcount(uint8 *bitmap, uint bytelength);
-extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+ __attribute__ ((format (__printf__, 2, 0)));
/* power conversion */
extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
@@ -842,6 +857,8 @@
extern uint bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint len);
unsigned int process_nvram_vars(char *varbuf, unsigned int len);
+extern bool replace_nvram_variable(char *varbuf, unsigned int buflen, const char *variable,
+ unsigned int *datalen);
/* trace any object allocation / free, with / without features (flags) set to the object */
@@ -914,7 +931,7 @@
while (u32arg) {
shifts++; u32arg >>= 1;
}
- return (32U - shifts);
+ return (32 - shifts);
}
/* the format of current TCM layout during boot
@@ -928,11 +945,11 @@
/* The HOST need to provided 64 bytes (512 bits) entropy for the bcm SW RNG */
#define BCM_ENTROPY_MAGIC_SIZE 4u
#define BCM_ENTROPY_COUNT_SIZE 4u
-#define BCM_ENTROPY_SEED_NBYTES 64u
-#define BCM_ENTROPY_NONCE_NBYTES 16u
-#define BCM_ENTROPY_HOST_NBYTES (BCM_ENTROPY_SEED_NBYTES + BCM_ENTROPY_NONCE_NBYTES)
-#define BCM_ENTROPY_HOST_MAXSIZE \
- (BCM_ENTROPY_MAGIC_SIZE + BCM_ENTROPY_COUNT_SIZE + BCM_ENTROPY_HOST_NBYTES)
+#define BCM_ENTROPY_MIN_NBYTES 64u
+#define BCM_ENTROPY_MAX_NBYTES 512u
+#define BCM_ENTROPY_HOST_NBYTES 128u
+#define BCM_ENTROPY_HOST_MAXSIZE \
+ (BCM_ENTROPY_MAGIC_SIZE + BCM_ENTROPY_COUNT_SIZE + BCM_ENTROPY_MAX_NBYTES)
/* Keep BCM MAX_RAND NUMBERS definition for the current dongle image. It will be
* removed after the dongle image is updated to use the bcm RNG.
@@ -945,15 +962,10 @@
#define BCM_NVRAM_RNG_SIGNATURE 0xFEEDC0DEu
typedef struct bcm_rand_metadata {
- uint32 signature; /* host fills it in, FW verfies before reading rand */
uint32 count; /* number of random numbers in bytes */
+ uint32 signature; /* host fills it in, FW verfies before reading rand */
} bcm_rand_metadata_t;
-typedef struct bcm_host_whitelist_metadata {
- uint32 signature; /* host fills it in, FW verfies before reading Whitelist region */
- uint32 count; /* size of whitelist region in bytes */
-} bcm_host_whitelist_metadata_t;
-
#ifdef BCMDRIVER
/*
* Assembly instructions: Count Leading Zeros
@@ -1266,6 +1278,7 @@
#endif // endif
#ifdef SHOW_LOGTRACE
#define TRACE_LOG_BUF_MAX_SIZE 1700
+#define RTT_LOG_BUF_MAX_SIZE 1700
#define BUF_NOT_AVAILABLE 0
#define NEXT_BUF_NOT_AVAIL 1
#define NEXT_BUF_AVAIL 2
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h
index aeaf5a0..9ba30d6 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h
@@ -1,7 +1,9 @@
/*
* Definitions for nl80211 vendor command/event access to host driver
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -35,9 +37,17 @@
#define OUI_GOOGLE 0x001A11
enum wl_vendor_subcmd {
- BRCM_VENDOR_SCMD_UNSPEC,
- BRCM_VENDOR_SCMD_PRIV_STR,
- BRCM_VENDOR_SCMD_BCM_STR
+ BRCM_VENDOR_SCMD_UNSPEC = 0,
+ BRCM_VENDOR_SCMD_PRIV_STR = 1,
+ BRCM_VENDOR_SCMD_BCM_STR = 2,
+ BRCM_VENDOR_SCMD_BCM_PSK = 3,
+ BRCM_VENDOR_SCMD_SET_PMK = 4,
+ BRCM_VENDOR_SCMD_GET_FEATURES = 5,
+ BRCM_VENDOR_SCMD_FRAMEBURST = 6,
+ BRCM_VENDOR_SCMD_MPC = 7,
+ BRCM_VENDOR_SCMD_BAND = 8,
+ BRCM_VENDOR_SCMD_MAX = 9
+
};
struct bcm_nlmsg_hdr {
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dbus.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dbus.h
index 45f47fc..3b6a7c2 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dbus.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dbus.h
@@ -2,7 +2,9 @@
* Dongle BUS interface Abstraction layer
* target serial buses like USB, SDIO, SPI, etc.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dhd_daemon.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dhd_daemon.h
index af53c16..90ad408 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dhd_daemon.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dhd_daemon.h
@@ -1,7 +1,9 @@
/*
* Header file for DHD daemon to handle timeouts
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -40,7 +42,11 @@
#define NO_TRAP 0
#define DO_TRAP 1
-#define BCM_NL_USER 31
+/* Keep common BCM netlink macros here */
+#define BCM_NL_USER 31
+#define BCM_NL_OXYGEN 30
+#define BCM_NL_TS 29
+/* ====== !! ADD NEW NL socket related defines here !! ====== */
typedef enum notify_dhd_daemon_reason {
REASON_COMMAND_TO,
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dhdioctl.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dhdioctl.h
index f2d8307..6b0582a 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dhdioctl.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dhdioctl.h
@@ -5,7 +5,9 @@
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -60,6 +62,33 @@
DMA_XFER_FAILED
} dma_xfer_status_t;
+typedef enum d11_lpbk_type {
+ M2M_DMA_LPBK = 0,
+ D11_LPBK = 1,
+ BMC_LPBK = 2,
+ M2M_NON_DMA_LPBK = 3,
+ D11_HOST_MEM_LPBK = 4,
+ BMC_HOST_MEM_LPBK = 5,
+ MAX_LPBK = 6
+} dma_xfer_type_t;
+
+typedef struct dmaxfer_info {
+ uint16 version;
+ uint16 length;
+ dma_xfer_status_t status;
+ dma_xfer_type_t type;
+ uint src_delay;
+ uint dest_delay;
+ uint should_wait;
+ uint core_num;
+ int error_code;
+ uint32 num_bytes;
+ uint64 time_taken;
+ uint64 tput;
+} dma_xfer_info_t;
+
+#define DHD_DMAXFER_VERSION 0x1
+
typedef struct tput_test {
uint16 version;
uint16 length;
@@ -148,7 +177,7 @@
#define DHD_ERROR_MEM_VAL 0x8000000
#define DHD_DNGL_IOVAR_SET_VAL 0x10000000 /**< logs the setting of dongle iovars */
#define DHD_LPBKDTDUMP_VAL 0x20000000
-#define DHD_EVENT_MEM_VAL 0x40000000
+#define DHD_PRSRV_MEM_VAL 0x40000000
#define DHD_IOVAR_MEM_VAL 0x80000000
#ifdef SDTEST
@@ -230,4 +259,12 @@
int buf_size; /* buffer size per chunk */
} bt_mem_req_t;
+/* max dest supported */
+#define DEBUG_BUF_DEST_MAX 4
+
+/* debug buf dest stat */
+typedef struct debug_buf_dest_stat {
+ uint32 stat[DEBUG_BUF_DEST_MAX];
+} debug_buf_dest_stat_t;
+
#endif /* _dhdioctl_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dnglevent.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dnglevent.h
index 0cb7e49..46defe2 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dnglevent.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/dnglevent.h
@@ -1,7 +1,9 @@
/*
* Broadcom Event protocol definitions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/eapol.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/eapol.h
index 2f6a7da..440a7fa 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/eapol.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/eapol.h
@@ -5,7 +5,9 @@
* IEEE Std 802.1X-2001
* IEEE 802.1X RADIUS Usage Guidelines
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -62,30 +64,30 @@
unsigned short length; /* Length of body */
} eapol_hdr_t;
-#define EAPOL_HDR_LEN 4
+#define EAPOL_HDR_LEN 4u
/* EAPOL version */
-#define WPA2_EAPOL_VERSION 2
-#define WPA_EAPOL_VERSION 1
-#define LEAP_EAPOL_VERSION 1
-#define SES_EAPOL_VERSION 1
+#define WPA2_EAPOL_VERSION 2u
+#define WPA_EAPOL_VERSION 1u
+#define LEAP_EAPOL_VERSION 1u
+#define SES_EAPOL_VERSION 1u
/* EAPOL types */
#define EAP_PACKET 0
-#define EAPOL_START 1
-#define EAPOL_LOGOFF 2
-#define EAPOL_KEY 3
-#define EAPOL_ASF 4
+#define EAPOL_START 1u
+#define EAPOL_LOGOFF 2u
+#define EAPOL_KEY 3u
+#define EAPOL_ASF 4u
/* EAPOL-Key types */
-#define EAPOL_RC4_KEY 1
-#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */
-#define EAPOL_WPA_KEY 254 /* WPA */
+#define EAPOL_RC4_KEY 1u
+#define EAPOL_WPA2_KEY 2u /* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY 254u /* WPA */
/* RC4 EAPOL-Key header field sizes */
-#define EAPOL_KEY_REPLAY_LEN 8
-#define EAPOL_KEY_IV_LEN 16
-#define EAPOL_KEY_SIG_LEN 16
+#define EAPOL_KEY_REPLAY_LEN 8u
+#define EAPOL_KEY_IV_LEN 16u
+#define EAPOL_KEY_SIG_LEN 16u
/* RC4 EAPOL-Key */
typedef BWL_PRE_PACKED_STRUCT struct {
@@ -98,28 +100,48 @@
unsigned char key[1]; /* Key (optional) */
} BWL_POST_PACKED_STRUCT eapol_key_header_t;
-#define EAPOL_KEY_HEADER_LEN 44
+#define EAPOL_KEY_HEADER_LEN 44u
/* RC4 EAPOL-Key flags */
-#define EAPOL_KEY_FLAGS_MASK 0x80
-#define EAPOL_KEY_BROADCAST 0
-#define EAPOL_KEY_UNICAST 0x80
+#define EAPOL_KEY_FLAGS_MASK 0x80u
+#define EAPOL_KEY_BROADCAST 0u
+#define EAPOL_KEY_UNICAST 0x80u
/* RC4 EAPOL-Key index */
-#define EAPOL_KEY_INDEX_MASK 0x7f
+#define EAPOL_KEY_INDEX_MASK 0x7fu
/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */
#define EAPOL_AKW_BLOCK_LEN 8
-#define EAPOL_WPA_KEY_REPLAY_LEN 8
-#define EAPOL_WPA_KEY_NONCE_LEN 32
-#define EAPOL_WPA_KEY_IV_LEN 16
-#define EAPOL_WPA_KEY_RSC_LEN 8
-#define EAPOL_WPA_KEY_ID_LEN 8
-#define EAPOL_WPA_KEY_MIC_LEN 16
+#define EAPOL_WPA_KEY_REPLAY_LEN 8u
+#define EAPOL_WPA_KEY_NONCE_LEN 32u
+#define EAPOL_WPA_KEY_IV_LEN 16u
+#define EAPOL_WPA_KEY_RSC_LEN 8u
+#define EAPOL_WPA_KEY_ID_LEN 8u
#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + EAPOL_AKW_BLOCK_LEN)
-#define EAPOL_WPA_MAX_KEY_SIZE 32
+#define EAPOL_WPA_MAX_KEY_SIZE 32u
+#define EAPOL_WPA_KEY_MAX_MIC_LEN 32u
+#define EAPOL_WPA_ENCR_KEY_MAX_LEN 64u
+#define EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN 32u
-/* WPA EAPOL-Key */
+#define EAPOL_WPA_PMK_MAX_LEN 64u
+#define EAPOL_WPA_PMK_SHA384_LEN 48u
+#define EAPOL_WPA_PMK_DEFAULT_LEN 32u
+#define EAPOL_WPA_KCK_DEFAULT_LEN 16u
+#define EAPOL_WPA_KCK_MIC_DEFAULT_LEN 16u
+#define EAPOL_WPA_ENCR_KEY_DEFAULT_LEN 16u
+
+#ifndef EAPOL_KEY_HDR_VER_V2
+#define EAPOL_WPA_KEY_LEN 95u /* deprecated */
+#endif // endif
+
+/* MIC length for SHA1 and HMAC-SHA-256 based key derivation/intergrity algorithm */
+#define EAPOL_WPA_KEY_MIC_LEN 16u /* deprecated */
+
+/* MIC length for HMAC-SHA-384 based key derivation/integrity algorithm */
+#define EAPOL_WPA_KEY_MIC_LEN_SHA384 24u
+
+#ifndef EAPOL_KEY_HDR_VER_V2
+/* WPA EAPOL-Key : deprecated */
typedef BWL_PRE_PACKED_STRUCT struct {
unsigned char type; /* Key Descriptor Type */
unsigned short key_info; /* Key Information (unaligned) */
@@ -133,11 +155,44 @@
unsigned short data_len; /* Key Data Length */
unsigned char data[EAPOL_WPA_KEY_DATA_LEN]; /* Key data */
} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t;
+#else
+/* WPA EAPOL-Key : new structure to consider dynamic MIC length */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ unsigned char type; /* Key Descriptor Type */
+ unsigned short key_info; /* Key Information (unaligned) */
+ unsigned short key_len; /* Key Length (unaligned) */
+ unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */
+ unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */
+ unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */
+ unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */
+ unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */
+} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_v2_t;
-#define EAPOL_WPA_KEY_LEN 95
+typedef eapol_wpa_key_header_v2_t eapol_wpa_key_header_t;
+#endif /* EAPOL_KEY_HDR_VER_V2 */
+
+#define EAPOL_WPA_KEY_DATA_LEN_SIZE 2u
+
+#ifdef EAPOL_KEY_HDR_VER_V2
+#define EAPOL_WPA_KEY_HDR_SIZE(mic_len) (sizeof(eapol_wpa_key_header_v2_t) \
+ + mic_len + EAPOL_WPA_KEY_DATA_LEN_SIZE)
+
+/* WPA EAPOL-Key header macros to reach out mic/data_len/data field */
+#define EAPOL_WPA_KEY_HDR_MIC_PTR(pos) ((uint8 *)pos + sizeof(eapol_wpa_key_header_v2_t))
+#define EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(pos, mic_len) \
+ ((uint8 *)pos + sizeof(eapol_wpa_key_header_v2_t) + mic_len)
+#define EAPOL_WPA_KEY_HDR_DATA_PTR(pos, mic_len) \
+ ((uint8 *)pos + EAPOL_WPA_KEY_HDR_SIZE(mic_len))
+#else
+#define EAPOL_WPA_KEY_HDR_SIZE(mic_len) EAPOL_WPA_KEY_LEN
+#define EAPOL_WPA_KEY_HDR_MIC_PTR(pos) ((uint8 *)&pos->mic)
+#define EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(pos, mic_len) ((uint8 *)&pos->data_len)
+#define EAPOL_WPA_KEY_HDR_DATA_PTR(pos, mic_len) ((uint8 *)&pos->data)
+#endif /* EAPOL_KEY_HDR_VER_V2 */
/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
#define WPA_KEY_DESC_OSEN 0x0
+#define WPA_KEY_DESC_V0 0x0
#define WPA_KEY_DESC_V1 0x01
#define WPA_KEY_DESC_V2 0x02
#define WPA_KEY_DESC_V3 0x03
@@ -179,6 +234,7 @@
#define WPA2_KEY_DATA_SUBTYPE_MAC 3
#define WPA2_KEY_DATA_SUBTYPE_PMKID 4
#define WPA2_KEY_DATA_SUBTYPE_IGTK 9
+#define WPA2_KEY_DATA_SUBTYPE_TRANS_DISABLE 32
/* GTK encapsulation */
typedef BWL_PRE_PACKED_STRUCT struct {
@@ -212,6 +268,15 @@
#define WPA2_KEY_DATA_PAD 0xdd
+/* Transition Disable Bitmap field encapsualtion */
+#define TRANS_BITMAP_FIELD_LEN 1
+
+/* Transition Disable Bitmap field index values */
+#define TRANS_DISABLE_WPA3_PSK 0
+#define TRANS_DISABLE_SAE_PK 1
+#define TRANS_DISABLE_WPA3_ENT 2
+#define TRANS_DISABLE_WIFI_ENHANCE_OPEN 3
+
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/epivers.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/epivers.h
index 326ca00..0ad4819 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/epivers.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/epivers.h
@@ -15,26 +15,26 @@
#define EPI_MINOR_VERSION 10
-#define EPI_RC_NUMBER 1
+#define EPI_RC_NUMBER 52
-#define EPI_INCREMENTAL_NUMBER 23
+#define EPI_INCREMENTAL_NUMBER 0
#define EPI_BUILD_NUMBER 0
-#define EPI_VERSION 100, 10, 1, 23
+#define EPI_VERSION 100, 10, 52, 0
-#define EPI_VERSION_NUM 0x640a0117
+#define EPI_VERSION_NUM 0x640a3400
-#define EPI_VERSION_DEV 100.10.1
+#define EPI_VERSION_DEV 100.10.52
/* Driver Version String, ASCII, 32 chars max */
#ifdef BCMINTERNAL
-#define EPI_VERSION_STR "100.10.1.23 (af45420 BCMINT)"
+#define EPI_VERSION_STR "100.10.52 (1d305b2 BCMINT)"
#else
#ifdef WLTEST
-#define EPI_VERSION_STR "100.10.1.23 (af45420 WLTEST)"
+#define EPI_VERSION_STR "100.10.52 (1d305b2 WLTEST)"
#else
-#define EPI_VERSION_STR "100.10.1.23 (af45420)"
+#define EPI_VERSION_STR "100.10.52 (1d305b2)"
#endif
#endif /* BCMINTERNAL */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/etd.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/etd.h
index 027abdb..daa69bc 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/etd.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/etd.h
@@ -1,7 +1,9 @@
/*
* Extended Trap data component interface file.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -78,6 +80,10 @@
TAG_TRAP_LOG_DATA = 20,
TAG_TRAP_CODE = 21, /* The trap type */
TAG_TRAP_HMAP = 22, /* HMAP violation Address and Info */
+ TAG_TRAP_PCIE_ERR_ATTN = 23, /* PCIE error attn log */
+ TAG_TRAP_AXI_ERROR = 24, /* AXI Error */
+ TAG_TRAP_AXI_HOST_INFO = 25, /* AXI Host log */
+ TAG_TRAP_AXI_SR_ERROR = 26, /* AXI SR error log */
TAG_TRAP_LAST /* This must be the last entry */
} hnd_ext_tag_trap_t;
@@ -106,6 +112,42 @@
uint32 itipoobdout;
} hnd_ext_trap_bp_err_t;
+#define HND_EXT_TRAP_AXISR_INFO_VER_1 1
+typedef struct hnd_ext_trap_axi_sr_err_v1
+{
+ uint8 version;
+ uint8 pad[3];
+ uint32 error;
+ uint32 coreid;
+ uint32 baseaddr;
+ uint32 ioctrl;
+ uint32 iostatus;
+ uint32 resetctrl;
+ uint32 resetstatus;
+ uint32 resetreadid;
+ uint32 resetwriteid;
+ uint32 errlogctrl;
+ uint32 errlogdone;
+ uint32 errlogstatus;
+ uint32 errlogaddrlo;
+ uint32 errlogaddrhi;
+ uint32 errlogid;
+ uint32 errloguser;
+ uint32 errlogflags;
+ uint32 itipoobaout;
+ uint32 itipoobbout;
+ uint32 itipoobcout;
+ uint32 itipoobdout;
+
+ /* axi_sr_issue_debug */
+ uint32 sr_pwr_control;
+ uint32 sr_corereset_wrapper_main;
+ uint32 sr_corereset_wrapper_aux;
+ uint32 sr_main_gci_status_0;
+ uint32 sr_aux_gci_status_0;
+ uint32 sr_dig_gci_status_0;
+} hnd_ext_trap_axi_sr_err_v1_t;
+
#define HND_EXT_TRAP_PSMWD_INFO_VER 1
typedef struct hnd_ext_trap_psmwd_v1 {
uint16 xtag;
@@ -226,11 +268,64 @@
uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES];
} hnd_ext_trap_wlc_mem_err_v2_t;
+#define HND_EXT_TRAP_WLC_MEM_ERR_VER_V3 3
+
+typedef struct hnd_ext_trap_wlc_mem_err_v3 {
+ uint8 version;
+ uint8 instance;
+ uint8 stas_associated;
+ uint8 aps_associated;
+ uint8 soft_ap_client_cnt;
+ uint8 peer_cnt;
+ uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES];
+} hnd_ext_trap_wlc_mem_err_v3_t;
+
typedef struct hnd_ext_trap_pcie_mem_err {
uint16 d2h_queue_len;
uint16 d2h_req_queue_len;
} hnd_ext_trap_pcie_mem_err_t;
+#define MAX_DMAFIFO_ENTRIES_V1 1
+#define MAX_DMAFIFO_DESC_ENTRIES_V1 2
+#define HND_EXT_TRAP_AXIERROR_SIGNATURE 0xbabebabe
+#define HND_EXT_TRAP_AXIERROR_VERSION_1 1
+
+/* Structure to collect debug info of descriptor entry for dma channel on encountering AXI Error */
+/* Below three structures are dependant, any change will bump version of all the three */
+
+typedef struct hnd_ext_trap_desc_entry_v1 {
+ uint32 ctrl1; /* descriptor entry at din < misc control bits > */
+ uint32 ctrl2; /* descriptor entry at din <buffer count and address extension> */
+ uint32 addrlo; /* descriptor entry at din <address of data buffer, bits 31:0> */
+ uint32 addrhi; /* descriptor entry at din <address of data buffer, bits 63:32> */
+} dma_dentry_v1_t;
+
+/* Structure to collect debug info about a dma channel on encountering AXI Error */
+typedef struct hnd_ext_trap_dma_fifo_v1 {
+ uint8 valid; /* no of valid desc entries filled, non zero = fifo entry valid */
+ uint8 direction; /* TX=1, RX=2, currently only using TX */
+ uint16 index; /* Index of the DMA channel in system */
+ uint32 dpa; /* Expected Address of Descriptor table from software state */
+ uint32 desc_lo; /* Low Address of Descriptor table programmed in DMA register */
+ uint32 desc_hi; /* High Address of Descriptor table programmed in DMA register */
+ uint16 din; /* rxin / txin */
+ uint16 dout; /* rxout / txout */
+ dma_dentry_v1_t dentry[MAX_DMAFIFO_DESC_ENTRIES_V1]; /* Descriptor Entires */
+} dma_fifo_v1_t;
+
+typedef struct hnd_ext_trap_axi_error_v1 {
+ uint8 version; /* version = 1 */
+ uint8 dma_fifo_valid_count; /* Number of valid dma_fifo entries */
+ uint16 length; /* length of whole structure */
+ uint32 signature; /* indicate that its filled with AXI Error data */
+ uint32 axi_errorlog_status; /* errlog_status from slave wrapper */
+ uint32 axi_errorlog_core; /* errlog_core from slave wrapper */
+ uint32 axi_errorlog_lo; /* errlog_lo from slave wrapper */
+ uint32 axi_errorlog_hi; /* errlog_hi from slave wrapper */
+ uint32 axi_errorlog_id; /* errlog_id from slave wrapper */
+ dma_fifo_v1_t dma_fifo[MAX_DMAFIFO_ENTRIES_V1];
+} hnd_ext_trap_axi_error_v1_t;
+
#define HND_EXT_TRAP_MACSUSP_INFO_VER 1
typedef struct hnd_ext_trap_macsusp {
uint16 xtag;
@@ -282,6 +377,7 @@
uint16 PAD;
} hnd_ext_trap_macenab_t;
+#define HND_EXT_TRAP_PHY_INFO_VER_1 (1)
typedef struct hnd_ext_trap_phydbg {
uint16 err;
uint16 RxFeStatus;
@@ -326,7 +422,7 @@
uint16 core_mask;
} reg_dump_config_t;
-#define HND_EXT_TRAP_PHY_INFO_VER 2
+#define HND_EXT_TRAP_PHY_INFO_VER 2
typedef struct hnd_ext_trap_phydbg_v2 {
uint8 version;
uint8 len;
@@ -361,6 +457,43 @@
uint32 additional_regs[1];
} hnd_ext_trap_phydbg_v2_t;
+#define HND_EXT_TRAP_PHY_INFO_VER_3 (3)
+typedef struct hnd_ext_trap_phydbg_v3 {
+ uint8 version;
+ uint8 len;
+ uint16 err;
+ uint16 RxFeStatus;
+ uint16 TxFIFOStatus0;
+ uint16 TxFIFOStatus1;
+ uint16 RfseqMode;
+ uint16 RfseqStatus0;
+ uint16 RfseqStatus1;
+ uint16 RfseqStatus_Ocl;
+ uint16 RfseqStatus_Ocl1;
+ uint16 OCLControl1;
+ uint16 TxError;
+ uint16 bphyTxError;
+ uint16 TxCCKError;
+ uint16 TxCtrlWrd0;
+ uint16 TxCtrlWrd1;
+ uint16 TxCtrlWrd2;
+ uint16 TxLsig0;
+ uint16 TxLsig1;
+ uint16 TxVhtSigA10;
+ uint16 TxVhtSigA11;
+ uint16 TxVhtSigA20;
+ uint16 TxVhtSigA21;
+ uint16 txPktLength;
+ uint16 txPsdulengthCtr;
+ uint16 gpioClkControl;
+ uint16 gpioSel;
+ uint16 pktprocdebug;
+ uint32 gpioOut[3];
+ uint16 HESigURateFlagStatus;
+ uint16 HESigUsRateFlagStatus;
+ uint32 additional_regs[1];
+} hnd_ext_trap_phydbg_v3_t;
+
/* Phy TxErr Dump Structure */
#define HND_EXT_TRAP_PHYTXERR_INFO_VER 1
#define HND_EXT_TRAP_PHYTXERR_INFO_VER_V2 2
@@ -440,6 +573,22 @@
uint32 recv_fifo_status[3][2]; /* Rcv Status0 & Rcv Status1 for 3 Rx fifos */
} hnd_ext_trap_macphytxerr_v2_t;
+#define HND_EXT_TRAP_PCIE_ERR_ATTN_VER_1 (1u)
+#define MAX_AER_HDR_LOG_REGS (4u)
+typedef struct hnd_ext_trap_pcie_err_attn_v1 {
+ uint8 version;
+ uint8 pad[3];
+ uint32 err_hdr_logreg1;
+ uint32 err_hdr_logreg2;
+ uint32 err_hdr_logreg3;
+ uint32 err_hdr_logreg4;
+ uint32 err_code_logreg;
+ uint32 err_type;
+ uint32 err_code_state;
+ uint32 last_err_attn_ts;
+ uint32 cfg_tlp_hdr[MAX_AER_HDR_LOG_REGS];
+} hnd_ext_trap_pcie_err_attn_v1_t;
+
#define MAX_EVENTLOG_BUFFERS 48
typedef struct eventlog_trapdata_info {
uint32 num_elements;
@@ -455,7 +604,7 @@
#if defined(ETD) && !defined(WLETD)
#define ETD_SW_FLAG_MEM 0x00000001
-void etd_init(osl_t *osh);
+int etd_init(osl_t *osh);
int etd_register_trap_ext_callback(void *cb, void *arg);
int (etd_register_trap_ext_callback_late)(void *cb, void *arg);
uint32 *etd_get_trap_ext_data(void);
@@ -464,6 +613,13 @@
void etd_notify_trap_ext_callback(trap_t *tr);
reg_dump_config_t *etd_get_reg_dump_config_tbl(void);
uint etd_get_reg_dump_config_len(void);
+
+extern bool _etd_enab;
+
+ #define ETD_ENAB(pub) (_etd_enab)
+
+#else
+#define ETD_ENAB(pub) (0)
#endif /* WLETD */
#endif /* !LANGUAGE_ASSEMBLY */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/ethernet.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/ethernet.h
index fbb9946..6408ffb 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/ethernet.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/ethernet.h
@@ -1,7 +1,9 @@
/*
* From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log.h
index bd3ad88..b1aee3a 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log.h
@@ -1,7 +1,9 @@
/*
* EVENT_LOG system definitions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log_payload.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log_payload.h
index d8c88b2..a33064f 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log_payload.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log_payload.h
@@ -4,7 +4,9 @@
* This file describes the payloads of event log entries that are data buffers
* rather than formatted string entries. The contents are generally XTLVs.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -682,4 +684,118 @@
uint16 chanspec_list[WL_MSCH_NUMCHANNELS];
} msch_register_params_t;
+typedef struct {
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 rxstrt; /**< number of received frames with a good PLCP */
+ uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 goodfcs; /**< Good fcs counters */
+ uint32 rxf0ovfl; /** < Rx FIFO0 overflow counters information */
+ uint32 rxf1ovfl; /** < Rx FIFO1 overflow counters information */
+} phy_periodic_counters_v1_t;
+
+typedef struct phycal_log_cmn {
+ uint16 chanspec; /* Current phy chanspec */
+ uint8 last_cal_reason; /* Last Cal Reason */
+ uint8 pad1; /* Padding byte to align with word */
+ uint last_cal_time; /* Last cal time in sec */
+} phycal_log_cmn_t;
+
+typedef struct phycal_log_core {
+ uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */
+ uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */
+ uint16 ofdm_txd; /* contain di & dq */
+ uint16 bphy_txa; /* BPHY Tx IQ Cal a coeff */
+ uint16 bphy_txb; /* BPHY Tx IQ Cal b coeff */
+ uint16 bphy_txd; /* contain di & dq */
+
+ uint16 rxa; /* Rx IQ Cal A coeffecient */
+ uint16 rxb; /* Rx IQ Cal B coeffecient */
+ int32 rxs; /* FDIQ Slope coeffecient */
+
+ uint8 baseidx; /* TPC Base index */
+ uint8 adc_coeff_cap0_adcI; /* ADC CAP Cal Cap0 I */
+ uint8 adc_coeff_cap1_adcI; /* ADC CAP Cal Cap1 I */
+ uint8 adc_coeff_cap2_adcI; /* ADC CAP Cal Cap2 I */
+ uint8 adc_coeff_cap0_adcQ; /* ADC CAP Cal Cap0 Q */
+ uint8 adc_coeff_cap1_adcQ; /* ADC CAP Cal Cap1 Q */
+ uint8 adc_coeff_cap2_adcQ; /* ADC CAP Cal Cap2 Q */
+ uint8 pad; /* Padding byte to align with word */
+} phycal_log_core_t;
+
+#define PHYCAL_LOG_VER1 (1u)
+
+typedef struct phycal_log_v1 {
+ uint8 version; /* Logging structure version */
+ uint8 numcores; /* Numbe of cores for which core specific data present */
+ uint16 length; /* Length of the entire structure */
+ phycal_log_cmn_t phycal_log_cmn; /* Logging common structure */
+ /* This will be a variable length based on the numcores field defined above */
+ phycal_log_core_t phycal_log_core[1];
+} phycal_log_v1_t;
+
+typedef struct phy_periodic_log_cmn {
+ uint16 chanspec; /* Current phy chanspec */
+ uint16 vbatmeas; /* Measured VBAT sense value */
+ uint16 featureflag; /* Currently active feature flags */
+ int8 chiptemp; /* Chip temparature */
+ int8 femtemp; /* Fem temparature */
+
+ uint32 nrate; /* Current Tx nrate */
+
+ uint8 cal_phase_id; /* Current Multi phase cal ID */
+ uint8 rxchain; /* Rx Chain */
+ uint8 txchain; /* Tx Chain */
+ uint8 ofdm_desense; /* OFDM desense */
+
+ uint8 bphy_desense; /* BPHY desense */
+ uint8 pll_lockstatus; /* PLL Lock status */
+ uint8 pad1; /* Padding byte to align with word */
+ uint8 pad2; /* Padding byte to align with word */
+
+ uint32 duration; /**< millisecs spent sampling this channel */
+ uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss; /**< traffic not in our bss */
+ uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
+
+} phy_periodic_log_cmn_t;
+
+typedef struct phy_periodic_log_core {
+ uint8 baseindxval; /* TPC Base index */
+ int8 tgt_pwr; /* Programmed Target power */
+ int8 estpwradj; /* Current Est Power Adjust value */
+ int8 crsmin_pwr; /* CRS Min/Noise power */
+ int8 rssi_per_ant; /* RSSI Per antenna */
+ int8 snr_per_ant; /* SNR Per antenna */
+ int8 pad1; /* Padding byte to align with word */
+ int8 pad2; /* Padding byte to align with word */
+} phy_periodic_log_core_t;
+
+#define PHY_PERIODIC_LOG_VER1 (1u)
+
+typedef struct phy_periodic_log_v1 {
+ uint8 version; /* Logging structure version */
+ uint8 numcores; /* Numbe of cores for which core specific data present */
+ uint16 length; /* Length of the entire structure */
+ phy_periodic_log_cmn_t phy_perilog_cmn;
+ phy_periodic_counters_v1_t counters_peri_log;
+ /* This will be a variable length based on the numcores field defined above */
+ phy_periodic_log_core_t phy_perilog_core[1];
+} phy_periodic_log_v1_t;
+
#endif /* _EVENT_LOG_PAYLOAD_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log_set.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log_set.h
index cc93653..7541a81 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log_set.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log_set.h
@@ -1,7 +1,9 @@
/*
* EVENT_LOG system definitions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -36,50 +38,72 @@
* this to an appropriat enumber in their makefiles to reduce
* ROM invalidation
*/
-#define NUM_EVENT_LOG_SETS 11
+#define NUM_EVENT_LOG_SETS (24)
#endif // endif
-/* Legacy implementation does not have these sets. So make them 0. */
-#if (NUM_EVENT_LOG_SETS <= 8)
-#define NUM_EVENT_LOG_DBG_SETS 0
-#elif (NUM_EVENT_LOG_SETS == 9)
-#define NUM_EVENT_LOG_DBG_SETS 1
-#else
-#define NUM_EVENT_LOG_DBG_SETS 2
-#endif // endif
+/* Set assignments */
+#define EVENT_LOG_SET_BUS (0u)
+#define EVENT_LOG_SET_WL (1u)
+#define EVENT_LOG_SET_PSM (2u)
+#define EVENT_LOG_SET_ERROR (3u)
-/* Debug log sets start from this log set and are always the last few ones */
-/* Note that these log sets are not reserved for debug builds. They can be used
- * for other purpose as well. If used for other purpose, the debug log set
- * allocation code will check if there is a free one available out of
- * NUM_EVENT_LOG_DBG_SETS starting from EVENT_LOG_DBG_START_SET
+/* MSCH logging */
+#define EVENT_LOG_SET_MSCH_PROFILER (4u)
+
+#define EVENT_LOG_SET_5 (5u)
+#define EVENT_LOG_SET_ECOUNTERS (EVENT_LOG_SET_5)
+#define EVENT_LOG_SET_6 (6u)
+#define EVENT_LOG_SET_7 (7u)
+
+#define EVENT_LOG_SET_8 (8u)
+#define EVENT_LOG_SET_PRSRV (EVENT_LOG_SET_8)
+
+#define EVENT_LOG_SET_9 (9u)
+/* General purpose preserve chatty.
+ * EVENT_LOG_SET_PRSRV_CHATTY log set should not be used by FW as it is
+ * used by customer host. FW should use EVENT_LOG_SET_GP_PRSRV_CHATTY
+ * for general purpose preserve chatty logs.
*/
-#define EVENT_LOG_DBG_START_SET (NUM_EVENT_LOG_SETS - NUM_EVENT_LOG_DBG_SETS)
+#define EVENT_LOG_SET_GP_PRSRV_CHATTY (EVENT_LOG_SET_9)
+#define EVENT_LOG_SET_PRSRV_CHATTY (EVENT_LOG_SET_6)
-/* Define new event log sets here */
-#define EVENT_LOG_SET_BUS 0
-#define EVENT_LOG_SET_WL 1
-#define EVENT_LOG_SET_PSM 2
-#define EVENT_LOG_SET_ERROR 3
-#define EVENT_LOG_SET_MEM_API 4
-/* Share the set with MEM_API for now to limit ROM invalidation.
- * The above set is used in dingo only
- * On trunk, MSCH should move to a different set.
+/* BUS preserve */
+#define EVENT_LOG_SET_PRSRV_BUS (10u)
+
+/* WL preserve */
+#define EVENT_LOG_SET_PRSRV_WL (11u)
+
+/* Slotted BSS set */
+#define EVENT_LOG_SET_WL_SLOTTED_BSS (12u)
+
+/* PHY entity logging */
+#define EVENT_LOG_SET_PHY (13u)
+
+/* PHY preserve */
+#define EVENT_LOG_SET_PRSRV_PHY (14u)
+
+/* RTE entity */
+#define EVENT_LOG_SET_RTE (15u)
+
+/* Malloc and free logging */
+#define EVENT_LOG_SET_MEM_API (16u)
+
+/* Console buffer */
+#define EVENT_LOG_SET_RTE_CONS_BUF (17u)
+
+/* three log sets for general debug purposes */
+#define EVENT_LOG_SET_GENERAL_DBG_1 (18u)
+#define EVENT_LOG_SET_GENERAL_DBG_2 (19u)
+#define EVENT_LOG_SET_GENERAL_DBG_3 (20u)
+
+/* Log sets for capturing power related logs. Note that these sets
+ * are to be used across entire system and not just WL.
*/
-#define EVENT_LOG_SET_MSCH_PROFILER 4
-#define EVENT_LOG_SET_ECOUNTERS 5 /* Host to instantiate this for ecounters. */
-#define EVENT_LOG_SET_6 6 /* Instantiated by host for channel switch logs */
-#define EVENT_LOG_SET_7 7 /* Instantiated by host for AMPDU stats */
+#define EVENT_LOG_SET_POWER_1 (21u)
+#define EVENT_LOG_SET_POWER_2 (22u)
-/* The following ones could be used for debug builds. Always the last few ones */
-#define EVENT_LOG_SET_8 8
-#define EVENT_LOG_SET_9 9
-
-#define EVENT_LOG_SET_PRSRV 7 /* The logtag set flushed only on error. Share with 7 to avoid
- * abandons.
- */
-
-#define EVENT_LOG_SET_PRSRV_BUS 10
+/* Spare log sets for future assignments */
+#define EVENT_LOG_SET_23 (23u)
/* send delayed logs when >= 50% of buffer is full */
#ifndef ECOUNTERS_DELAYED_FLUSH_PERCENTAGE
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log_tag.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log_tag.h
index bb2c333..0e33635 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log_tag.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_log_tag.h
@@ -1,7 +1,9 @@
/*
* EVENT_LOG system definitions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -240,6 +242,12 @@
#define EVENT_LOG_TAG_FILS_DBG 219
#define EVENT_LOG_TAG_FILS_INFO 220
#define EVENT_LOG_TAG_FILS_ERROR 221
+#define EVENT_LOG_TAG_HWA_TXPOST 222
+#define EVENT_LOG_TAG_HWA_TXDMA 223
+/* Arbitrator callback log tags */
+#define EVENT_LOG_TAG_STF_ARB_CB_TRACE 224
+#define EVENT_LOG_TAG_STF_ARB_CB_ERROR 225
+#define EVENT_LOG_TAG_PHY_PERIODIC_SEC 226
/* Debug tags for making debug builds */
#define EVENT_LOG_TAG_DBG1 251
@@ -288,10 +296,42 @@
#define EVENT_LOG_TAG_SAE_ERROR 287
#define EVENT_LOG_TAG_SAE_INFO 288
-/* EVENT_LOG_TAG_MAX = Set to the same value of last tag, not last tag + 1 */
-#define EVENT_LOG_TAG_MAX 288
-#define EVENT_LOG_TAG_MAX_LEGACY_FORMAT 255
-/* Note: New event should be added/reserved in trunk before adding it to branches */
+/* rxsig module logging */
+#define EVENT_LOG_TAG_RXSIG_ERROR 289
+#define EVENT_LOG_TAG_RXSIG_DEBUG 290
+#define EVENT_LOG_TAG_RXSIG_INFO 291
+
+/* HE TWT HEB EVEVNT_LOG_TAG */
+#define EVENT_LOG_TAG_WL_HE_INFO 292
+#define EVENT_LOG_TAG_WL_HE_TRACE 293
+#define EVENT_LOG_TAG_WL_HE_WARN 294
+#define EVENT_LOG_TAG_WL_HE_ERROR 295
+#define EVENT_LOG_TAG_WL_TWT_INFO 296
+#define EVENT_LOG_TAG_WL_TWT_TRACE 297
+#define EVENT_LOG_TAG_WL_TWT_WARN 298
+#define EVENT_LOG_TAG_WL_TWT_ERROR 299
+#define EVENT_LOG_TAG_WL_HEB_ERROR 300
+#define EVENT_LOG_TAG_WL_HEB_TRACE 301
+
+/* RRM EVENT_LOG_TAG */
+#define EVENT_LOG_TAG_RRM_DBG 302
+#define EVENT_LOG_TAG_RRM_INFO 303
+#define EVENT_LOG_TAG_RRM_ERR 304
+
+/* scan core */
+#define EVENT_LOG_TAG_SC 305
+
+#define EVENT_LOG_TAG_ESP_DBG 306
+#define EVENT_LOG_TAG_ESP_INFO 307
+#define EVENT_LOG_TAG_ESP_ERR 308
+
+/* SDC */
+#define EVENT_LOG_TAG_SDC_DBG 309
+#define EVENT_LOG_TAG_SDC_INFO 310
+#define EVENT_LOG_TAG_SDC_ERR 311
+
+/* EVENT_LOG_TAG_MAX = Set to the same value of last tag, not last tag + 1 */
+#define EVENT_LOG_TAG_MAX 312
typedef enum wl_el_set_type_def {
EVENT_LOG_SET_TYPE_DEFAULT = 0, /* flush the log buffer when it is full - Default option */
@@ -344,6 +384,9 @@
uint32 t; /* Type cheat */
} event_log_hdr_t;
+/* for internal use - legacy max. tag */
+#define EVENT_LOG_TAG_MAX_LEGACY_FORMAT 255
+
/*
* The position of the extended header in the event log stream will be as follows:
* <event log payload><ARM cycle count timestamp><extended header><regular header>
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_trace.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_trace.h
index 5bd2077..3495ecb 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_trace.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/event_trace.h
@@ -1,7 +1,9 @@
/*
* Trace log blocks sent over HBUS
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/fils.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/fils.h
index dc93068..c5cf876 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/fils.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/fils.h
@@ -1,6 +1,8 @@
/*
* Fundamental types and constants relating to FILS AUTHENTICATION
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -86,8 +88,67 @@
uint8 tbtt_offset;
uint8 bssid[ETHER_ADDR_LEN];
uint32 short_ssid;
+ uint8 bss_params; /* BSS paramters subfield */
+ uint8 mhz_psd; /* 20 MHz PSD subfield */
} BWL_POST_PACKED_STRUCT tbtt_info_field_t;
+/* TBTT Information Length subfield value */
+typedef enum {
+ TBTTINFO_LEN_1 = 1, /* Neighbor AP TBTT Offset */
+ TBTTINFO_LEN_2 = 2, /* Neighbor AP TBTT Offset and BSS Parameters */
+ TBTTINFO_LEN_3 = 3, /* Reserved */
+ TBTTINFO_LEN_4 = 4, /* Reserved */
+ TBTTINFO_LEN_5 = 5, /* Neighbor AP TBTT Offset and the Short-SSID */
+ TBTTINFO_LEN_6 = 6, /* Neighbor AP TBTT Offset, Short-SSID subfield, BSS Parameters */
+ TBTTINFO_LEN_7 = 7, /* Neighbor AP TBTT Offset, BSSID subfields */
+ TBTTINFO_LEN_8 = 8, /* Neighbor AP TBTT Offset, BSSID subfield, BSS Parameters */
+ TBTTINFO_LEN_9 = 9, /* TBTT Offset, BSSID, BSS Parameters, 20 MHz PSD */
+ TBTTINFO_LEN_10 = 10, /* Reserved */
+ TBTTINFO_LEN_11 = 11, /* Neighbor AP TBTT Offset, BSSID subfield, short-SSID subfield */
+ TBTTINFO_LEN_12 = 12, /* TBTT Offset, BSSID subfield, short-SSID subfield, BSS params */
+ TBTTINFO_LEN_13 = 13, /* TBTT Offset, BSSID, short-SSID subfield, BSS params, 20MHz */
+ /* 14 - 255 reserved */
+} tbttinfo_len_t;
+
+/* TBTT Information field contents */
+enum {
+ NEIGHBOR_AP_TBTT_SUBFIELD_SHIFT = 0, /* Neighbor AP TBTT Offset */
+ BSSID_SUBFIELD_SHIFT = 1, /* BSSID (optional) */
+ SHORT_SSID_SUBFIELD_SHIFT = 2, /* Short-SSID (optional) */
+ BSS_PARAMETERS_SUBFIELD_SHIFT = 3, /* BSS parameters */
+ MHZ_SUBFIELD_SHIFT = 4 /* 20 MHz PSD */
+};
+
+/* Length in Octets */
+#define NEIGHBOR_AP_TBTT_SUBFIELD_LEN 1
+#define BSSID_SUBFIELD_LEN 6
+#define SHORT_SSID_SUBFIELD_LEN 4
+#define BSS_PARAMETERS_SUBFIELD_LEN 1
+#define MHZ_SUBFIELD_LEN 1
+
+#define NEIGHBOR_AP_TBTT_SUBFIELD (1 << NEIGHBOR_AP_TBTT_SUBFIELD_SHIFT)
+#define BSSID_SUBFIELD (1 << BSSID_SUBFIELD_SHIFT)
+#define SHORT_SSID_SUBFIELD (1 << SHORT_SSID_SUBFIELD_SHIFT)
+#define BSS_PARAMETERS_SUBFIELD (1 << BSS_PARAMETERS_SUBFIELD_SHIFT)
+#define MHZ_SUBFIELD (1 << MHZ_SUBFIELD_SHIFT)
+
+/* BSS paramters subfield 802.11ax D8.0 */
+#define TBTT_INFO_BSS_PARAMS_OCT_IDX 0 /* OCT Recommended subfield */
+#define TBTT_INFO_BSS_PARAMS_OCT_FSZ 1
+#define TBTT_INFO_BSS_PARAMS_SSSID_IDX 1 /* Same SSID subfield */
+#define TBTT_INFO_BSS_PARAMS_SSSID_FSZ 1
+#define TBTT_INFO_BSS_PARAMS_MBSSID_IDX 2 /* Multiple BSSID subfield */
+#define TBTT_INFO_BSS_PARAMS_MBSSID_FSZ 1
+#define TBTT_INFO_BSS_PARAMS_TBSSID_IDX 3 /* Transmitted BSSID subfield */
+#define TBTT_INFO_BSS_PARAMS_TBSSID_FSZ 1
+#define TBTT_INFO_BSS_PARAMS_ESS_COLOCATED_IDX 4 /* ESS With 2.4/5 GHz Co-Located AP */
+#define TBTT_INFO_BSS_PARAMS_ESS_COLOCATED_FSZ 1
+#define TBTT_INFO_BSS_PARAMS_UNSOLPB_IDX 5 /* Unsolicited Probe Responses Active */
+#define TBTT_INFO_BSS_PARAMS_UNSOLPB_FSZ 1
+#define TBTT_INFO_BSS_PARAMS_COLOC_AP_IDX 6 /* Co-Located AP subfield */
+#define TBTT_INFO_BSS_PARAMS_COLOC_AP_FSZ 1
+/* Bit 7 is reserved */
+
#define TBTT_INFO_FIELD_HDR_LEN (sizeof(tbtt_info_field_t))
/* 11ai D11.0 9.4.2.171.1 Neighbor AP Information field */
@@ -136,6 +197,17 @@
uint8 fils_nonce[FILS_NONCE_LENGTH];
} BWL_POST_PACKED_STRUCT fils_nonce_element_t;
+/* 11ai 9.4.2.186 FILS Key Delivery element */
+#define FILS_KEY_RSC_LENGTH 8u
+
+typedef BWL_PRE_PACKED_STRUCT struct fils_key_delivery_element {
+ uint8 elementid;
+ uint8 length;
+ uint8 element_id_ext;
+ uint8 key_rsc[FILS_KEY_RSC_LENGTH];
+ uint8 kde_list[]; /* Key Data Elements */
+} BWL_POST_PACKED_STRUCT fils_key_delivery_element_t;
+
/* 8.4.2.175 FILS Session element */
#define FILS_SESSION_LENGTH 8u
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h
index 43d5503..ac61c90 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h
@@ -1,7 +1,9 @@
/*
* HND arm trap handling.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_cons.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_cons.h
index 3aaa78a..94af2fe 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_cons.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_cons.h
@@ -1,7 +1,9 @@
/*
* Console support for RTE - for host use only.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -37,6 +39,8 @@
#ifndef LOG_BUF_LEN
#if defined(BCM_BIG_LOG)
#define LOG_BUF_LEN (16 * 1024)
+#elif defined(BCMQT)
+#define LOG_BUF_LEN (16 * 1024)
#else
#define LOG_BUF_LEN 1024
#endif // endif
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_debug.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_debug.h
index 6434efd..d21715f 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_debug.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_debug.h
@@ -1,7 +1,9 @@
/*
* HND Run Time Environment debug info area
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h
index 620e63e..a437230 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h
@@ -1,7 +1,9 @@
/*
* HND generic packet pool operation primitives
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_pktq.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_pktq.h
index 1efe449..88fab42 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_pktq.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_pktq.h
@@ -1,7 +1,9 @@
/*
* HND generic pktq operation primitives
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_trap.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_trap.h
index be7aa7e..5482654 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_trap.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hnd_trap.h
@@ -1,7 +1,9 @@
/*
* HND Trap handling.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndchipc.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndchipc.h
index 112ab65..e12545a 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndchipc.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndchipc.h
@@ -1,7 +1,9 @@
/*
* HND SiliconBackplane chipcommon support - OS independent.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndlhl.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndlhl.h
index 5ed670e..f7f4611 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndlhl.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndlhl.h
@@ -1,7 +1,9 @@
/*
* HND SiliconBackplane PMU support.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndmem.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndmem.h
index 36465c7..8b40e91 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndmem.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndmem.h
@@ -1,7 +1,9 @@
/*
* Utility routines for configuring different memories in Broadcom chips.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndoobr.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndoobr.h
new file mode 100644
index 0000000..f277e5c
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndoobr.h
@@ -0,0 +1,77 @@
+/*
+ * HND OOBR interface header
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hndoobr.h 772387 2018-07-17 00:58:05Z $
+ */
+
+#ifndef _hndoobr_h_
+#define _hndoobr_h_
+
+#include <typedefs.h>
+#include <siutils.h>
+
+/* for 'srcpidx' of hnd_oobr_get_intr_config() */
+#define HND_CORE_MAIN_INTR 0
+#define HND_CORE_ALT_INTR 1
+
+uint32 hnd_oobr_get_intstatus(si_t *sih);
+int hnd_oobr_get_intr_config(si_t *sih, uint srccidx, uint srcpidx, uint dstcidx, uint *dstpidx);
+int hnd_oobr_set_intr_src(si_t *sih, uint dstcidx, uint dstpidx, uint intrnum);
+void hnd_oobr_init(si_t *sih);
+
+#define OOBR_INVALID_PORT 0xFFu
+
+/* per core source/dest sel reg */
+#define OOBR_INTR_PER_CONFREG 4u /* 4 interrupts per configure reg */
+#define OOBR_INTR_NUM_MASK 0x7Fu
+#define OOBR_INTR_EN 0x80u
+/* per core config reg */
+#define OOBR_CORECNF_OUTPUT_MASK 0x0000FF00u
+#define OOBR_CORECNF_OUTPUT_SHIFT 8u
+#define OOBR_CORECNF_INPUT_MASK 0x00FF0000u
+#define OOBR_CORECNF_INPUT_SHIFT 16u
+
+typedef volatile struct hndoobr_percore_reg {
+ uint32 sourcesel[OOBR_INTR_PER_CONFREG]; /* 0x00 - 0x0c */
+ uint32 destsel[OOBR_INTR_PER_CONFREG]; /* 0x10 - 0x1c */
+ uint32 reserved[6];
+ uint32 config; /* 0x38 */
+ uint32 reserved1[17]; /* 0x3c to 0x7c */
+} hndoobr_percore_reg_t;
+
+/* capability reg */
+#define OOBR_CAP_CORECNT_MASK 0x1fu
+typedef volatile struct hndoobr_reg {
+ uint32 capability; /* 0x00 */
+ uint32 reserved[3];
+ uint32 intstatus[4]; /* 0x10 - 0x1c */
+ uint32 reserved1[56]; /* 0x20 - 0xfc */
+ hndoobr_percore_reg_t percore_reg[1]; /* 0x100 */
+} hndoobr_reg_t;
+
+#endif /* _hndoobr_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndpmu.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndpmu.h
index 41818b7..d2e11b1 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndpmu.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndpmu.h
@@ -1,7 +1,9 @@
/*
* HND SiliconBackplane PMU support.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndsoc.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndsoc.h
index 153e44c..c3942a8 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndsoc.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/hndsoc.h
@@ -1,7 +1,9 @@
/*
* Broadcom HND chip & on-chip-interconnect-related definitions.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -216,6 +218,7 @@
#define ARMCA7_CORE_ID 0x847 /* ARM CA7 CPU */
#define SYSMEM_CORE_ID 0x849 /* System memory core */
#define HUB_CORE_ID 0x84b /* Hub core ID */
+#define HND_OOBR_CORE_ID 0x85c /* Hnd oob router core ID */
#define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */
#define AXI_CORE_ID 0x301 /* AXI/GPV core ID */
#define EROM_CORE_ID 0x366 /* EROM core ID */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/linux_osl.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/linux_osl.h
index 85f03c4..1817601 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/linux_osl.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/linux_osl.h
@@ -1,7 +1,9 @@
/*
* Linux OS Independent Layer
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -36,7 +38,6 @@
#include <linux/time64.h>
void do_gettimeofday(struct timeval *tv);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 1) */
-
/* Linux Kernel: File Operations: start */
extern void * osl_os_open_image(char * filename);
extern int osl_os_get_image_block(char * buf, int len, void * image);
@@ -55,7 +56,10 @@
extern void osl_set_bus_handle(osl_t *osh, void *bus_handle);
extern void* osl_get_bus_handle(osl_t *osh);
#ifdef DHD_MAP_LOGGING
-extern void osl_dma_map_dump(void);
+extern void osl_dma_map_dump(osl_t *osh);
+#define OSL_DMA_MAP_DUMP(osh) osl_dma_map_dump(osh)
+#else
+#define OSL_DMA_MAP_DUMP(osh) do {} while (0)
#endif /* DHD_MAP_LOGGING */
/* Global ASSERT type */
@@ -332,14 +336,15 @@
#include <linuxver.h> /* use current 2.4.x calling conventions */
#include <linux/kernel.h> /* for vsn/printf's */
#include <linux/string.h> /* for mem*, str* */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29)
extern uint64 osl_sysuptime_us(void);
#define OSL_SYSUPTIME() ((uint32)jiffies_to_msecs(jiffies))
#define OSL_SYSUPTIME_US() osl_sysuptime_us()
-#else
-#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ))
-#error "OSL_SYSUPTIME_US() may need to be defined"
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */
+extern uint64 osl_localtime_ns(void);
+extern void osl_get_localtime(uint64 *sec, uint64 *usec);
+extern uint64 osl_systztime_us(void);
+#define OSL_LOCALTIME_NS() osl_localtime_ns()
+#define OSL_GET_LOCALTIME(sec, usec) osl_get_localtime((sec), (usec))
+#define OSL_SYSTZTIME_US() osl_systztime_us()
#define printf(fmt, args...) printk(fmt , ## args)
#include <linux/kernel.h> /* for vsn/printf's */
#include <linux/string.h> /* for mem*, str* */
@@ -580,4 +585,39 @@
extern void osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic);
extern bool osl_timer_del(osl_t *osh, osl_timer_t *t);
+typedef atomic_t osl_atomic_t;
+#define OSL_ATOMIC_SET(osh, v, x) atomic_set(v, x)
+#define OSL_ATOMIC_INIT(osh, v) atomic_set(v, 0)
+#define OSL_ATOMIC_INC(osh, v) atomic_inc(v)
+#define OSL_ATOMIC_INC_RETURN(osh, v) atomic_inc_return(v)
+#define OSL_ATOMIC_DEC(osh, v) atomic_dec(v)
+#define OSL_ATOMIC_DEC_RETURN(osh, v) atomic_dec_return(v)
+#define OSL_ATOMIC_READ(osh, v) atomic_read(v)
+#define OSL_ATOMIC_ADD(osh, v, x) atomic_add(v, x)
+
+#ifndef atomic_set_mask
+#define OSL_ATOMIC_OR(osh, v, x) atomic_or(x, v)
+#define OSL_ATOMIC_AND(osh, v, x) atomic_and(x, v)
+#else
+#define OSL_ATOMIC_OR(osh, v, x) atomic_set_mask(x, v)
+#define OSL_ATOMIC_AND(osh, v, x) atomic_clear_mask(~x, v)
+#endif // endif
+
+#include <linux/rbtree.h>
+
+typedef struct rb_node osl_rb_node_t;
+typedef struct rb_root osl_rb_root_t;
+
+#define OSL_RB_ENTRY(ptr, type, member) rb_entry(ptr, type, member)
+#define OSL_RB_INSERT_COLOR(root, node) rb_insert_color(root, node)
+#define OSL_RB_ERASE(node, root) rb_erase(node, root)
+#define OSL_RB_FIRST(root) rb_first(root)
+#define OSL_RB_LAST(root) rb_last(root)
+#define OSL_RB_LINK_NODE(node, parent, rb_link) \
+ rb_link_node(node, parent, rb_link)
+
+extern void *osl_spin_lock_init(osl_t *osh);
+extern void osl_spin_lock_deinit(osl_t *osh, void *lock);
+extern unsigned long osl_spin_lock(void *lock);
+extern void osl_spin_unlock(void *lock, unsigned long flags);
#endif /* _linux_osl_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/linux_pkt.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/linux_pkt.h
index 9baec52..1292b1a 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/linux_pkt.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/linux_pkt.h
@@ -1,7 +1,9 @@
/*
* Linux Packet (skb) interface
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -121,12 +123,14 @@
#define PKTID(skb) ({BCM_REFERENCE(skb); 0;})
#define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);})
#define PKTSHRINK(osh, m) ({BCM_REFERENCE(osh); m;})
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+#define PKTORPHAN(skb) skb_orphan(skb)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER)
#define PKTORPHAN(skb) osl_pkt_orphan_partial(skb)
extern void osl_pkt_orphan_partial(struct sk_buff *skb);
#else
#define PKTORPHAN(skb) ({BCM_REFERENCE(skb); 0;})
-#endif /* LINUX VERSION >= 3.6 */
+#endif /* LINUX VERSION >= 3.6 and 4,12 */
#define PKTSETFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
#define PKTCLRFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/linuxver.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/linuxver.h
index e619796..b2f045a 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/linuxver.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/linuxver.h
@@ -2,7 +2,9 @@
* Linux-specific abstractions to gain some independence from linux kernel versions.
* Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -587,6 +589,8 @@
struct semaphore sema;
int terminated;
struct completion completed;
+ int flush_ind;
+ struct completion flushed;
spinlock_t spinlock;
int up_cnt;
} tsk_ctl_t;
@@ -647,9 +651,12 @@
{ \
sema_init(&((tsk_ctl)->sema), 0); \
init_completion(&((tsk_ctl)->completed)); \
+ init_completion(&((tsk_ctl)->flushed)); \
(tsk_ctl)->parent = owner; \
(tsk_ctl)->proc_name = name; \
(tsk_ctl)->terminated = FALSE; \
+ (tsk_ctl)->flush_ind = FALSE; \
+ (tsk_ctl)->up_cnt = 0; \
(tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \
if (IS_ERR((tsk_ctl)->p_task)) { \
(tsk_ctl)->thr_pid = -1; \
@@ -663,15 +670,70 @@
}; \
}
+#define PROC_WAIT_TIMEOUT_MSEC 5000 /* 5 seconds */
+
#define PROC_STOP(tsk_ctl) \
{ \
+ uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
(tsk_ctl)->terminated = TRUE; \
smp_wmb(); \
up(&((tsk_ctl)->sema)); \
- wait_for_completion(&((tsk_ctl)->completed)); \
- DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
+ DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
+ if (timeout == 0) \
+ DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ else \
+ DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ (tsk_ctl)->parent = NULL; \
+ (tsk_ctl)->proc_name = NULL; \
(tsk_ctl)->thr_pid = -1; \
+ (tsk_ctl)->up_cnt = 0; \
+}
+
+#define PROC_STOP_USING_BINARY_SEMA(tsk_ctl) \
+{ \
+ uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
+ (tsk_ctl)->terminated = TRUE; \
+ smp_wmb(); \
+ binary_sema_up(tsk_ctl); \
+ DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
+ if (timeout == 0) \
+ DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ else \
+ DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ (tsk_ctl)->parent = NULL; \
+ (tsk_ctl)->proc_name = NULL; \
+ (tsk_ctl)->thr_pid = -1; \
+}
+
+/*
+* Flush is non-rentrant, so callers must make sure
+* there is no race condition.
+* For safer exit, added wait_for_completion_timeout
+* with 1 sec timeout.
+*/
+#define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl) \
+{ \
+ uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
+ (tsk_ctl)->flush_ind = TRUE; \
+ smp_wmb(); \
+ binary_sema_up(tsk_ctl); \
+ DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \
+ if (timeout == 0) \
+ DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ else \
+ DBG_THR(("%s(): thread:%s:%lx flushed OK\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
}
/* ----------------------- */
@@ -819,6 +881,9 @@
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
#define vfs_write(fp, buf, len, pos) kernel_write(fp, buf, len, pos)
#define vfs_read(fp, buf, len, pos) kernel_read(fp, buf, len, pos)
+int kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count);
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
+#define kernel_read_compat(file, offset, addr, count) kernel_read(file, offset, addr, count)
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
#endif /* _linuxver_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/lpflags.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/lpflags.h
index 72871bc..6f56f97 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/lpflags.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/lpflags.h
@@ -1,7 +1,9 @@
/*
* Chip related low power flags
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/mbo.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/mbo.h
index 38c701b..1d9a66a 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/mbo.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/mbo.h
@@ -1,7 +1,9 @@
/*
* Fundamental types and constants relating to WFA MBO
* (Multiband Operation)
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -161,7 +163,8 @@
MBO_ASSOC_DISALLOWED_RC_MAX_STA_REACHED = 2,
MBO_ASSOC_DISALLOWED_RC_AIR_IFACE_OVERLOADED = 3,
MBO_ASSOC_DISALLOWED_RC_AUTH_SRVR_OVERLOADED = 4,
- MBO_ASSOC_DISALLOWED_RC_INSUFFIC_RSSI = 5
+ MBO_ASSOC_DISALLOWED_RC_INSUFFIC_RSSI = 5,
+ MBO_ASSOC_DISALLOWED_RC_INVALID = 0xffff
};
/* Cellular Data Conn Pref attribute */
@@ -279,6 +282,19 @@
uint8 cell_conn;
} BWL_POST_PACKED_STRUCT wifi_mbo_cell_cap_subelem_t;
+typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_perf_chan_subelem_s {
+ /* 0xDD */
+ uint8 sub_elem_id;
+ /* Length of the following fields in sub-element */
+ uint8 len;
+ /* WFA_OUI 50:6F:9A */
+ uint8 oui[WFA_OUI_LEN];
+ /* OUI_TYPE 0x02 */
+ uint8 oui_type;
+ /* variable length of channel preference data */
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT wifi_mbo_pref_chan_subelem_t;
+
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/miniopt.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/miniopt.h
index 1386e27..2db807b 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/miniopt.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/miniopt.h
@@ -1,7 +1,9 @@
/*
* Command line options parser.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/msf.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/msf.h
index 48c2710..9491806 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/msf.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/msf.h
@@ -1,7 +1,9 @@
/*
* Common interface to MSF (multi-segment format) definitions.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/msgtrace.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/msgtrace.h
index 2b57474..1d1cfe0 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/msgtrace.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/msgtrace.h
@@ -1,7 +1,9 @@
/*
* Trace messages sent over HBUS
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/nan.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/nan.h
index 99eec22..7c37f6d 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/nan.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/nan.h
@@ -2,7 +2,9 @@
* Fundamental types and constants relating to WFA NAN
* (Neighbor Awareness Networking)
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/osl.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/osl.h
index cf7c571..0d8c4a3 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/osl.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/osl.h
@@ -1,7 +1,9 @@
/*
* OS Abstraction Layer
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -38,7 +40,7 @@
TAIL_BYTES_TYPE_MIC = 3
};
-#define OSL_PKTTAG_SZ 32 /* Size of PktTag */
+#define OSL_PKTTAG_SZ 48 /* standard linux pkttag size is 48 bytes */
/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */
typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
@@ -90,6 +92,22 @@
#define OSL_SYSUPTIME_SUPPORT TRUE
#endif /* OSL_SYSUPTIME */
+#ifndef OSL_GET_LOCALTIME
+#define OSL_GET_LOCALTIME(sec, usec) \
+ do { \
+ BCM_REFERENCE(sec); \
+ BCM_REFERENCE(usec); \
+ } while (0)
+#endif /* OSL_GET_LOCALTIME */
+
+#ifndef OSL_LOCALTIME_NS
+#define OSL_LOCALTIME_NS() (OSL_SYSUPTIME_US() * NSEC_PER_USEC)
+#endif /* OSL_LOCALTIME_NS */
+
+#ifndef OSL_SYSTZTIME_US
+#define OSL_SYSTZTIME_US() OSL_SYSUPTIME_US()
+#endif /* OSL_GET_SYSTZTIME */
+
#ifndef OSL_SYS_HALT
#define OSL_SYS_HALT() do {} while (0)
#endif // endif
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/osl_decl.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/osl_decl.h
index 0f41129..a36c7c4 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/osl_decl.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/osl_decl.h
@@ -1,7 +1,9 @@
/*
* osl forward declarations
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/osl_ext.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/osl_ext.h
index 096589f..97fa174 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/osl_ext.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/osl_ext.h
@@ -2,7 +2,9 @@
* OS Abstraction Layer Extension - the APIs defined by the "extension" API
* are only supported by a subset of all operating systems.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/p2p.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/p2p.h
index 2fd541e..9e1d9a8 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/p2p.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/p2p.h
@@ -1,7 +1,9 @@
/*
* Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -40,7 +42,6 @@
#include <packed_section_start.h>
/* WiFi P2P OUI values */
-#define P2P_OUI WFA_OUI /* WiFi P2P OUI */
#define P2P_VER WFA_OUI_TYPE_P2P /* P2P version: 9=WiFi P2P v1.0 */
#define P2P_IE_ID 0xdd /* P2P IE element ID */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/packed_section_end.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/packed_section_end.h
index 2bad094..c1654c8 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/packed_section_end.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/packed_section_end.h
@@ -15,7 +15,9 @@
* #include <packed_section_end.h>
*
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/packed_section_start.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/packed_section_start.h
index cd47c12..56b21fd 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/packed_section_start.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/packed_section_start.h
@@ -15,7 +15,9 @@
* #include <packed_section_end.h>
*
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/pcicfg.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/pcicfg.h
index 8c0b1f5..03d53f0 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/pcicfg.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/pcicfg.h
@@ -1,7 +1,9 @@
/*
* pcicfg.h: PCI configuration constants and structures.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -78,6 +80,7 @@
#define PCI_CAP_MSICAP_ID 0x05
#define PCI_CAP_VENDSPEC_ID 0x09
#define PCI_CAP_PCIECAP_ID 0x10
+#define PCI_CAP_MSIXCAP_ID 0x11
/* Data structure to define the Message Signalled Interrupt facility
* Valid for PCI and PCIE configurations
@@ -135,11 +138,13 @@
/* PCIE Extended configuration */
#define PCIE_ADV_CORR_ERR_MASK 0x114
+#define PCIE_ADV_CORR_ERR_MASK_OFFSET 0x14
#define CORR_ERR_RE (1 << 0) /* Receiver */
-#define CORR_ERR_BT (1 << 6) /* Bad TLP */
+#define CORR_ERR_BT (1 << 6) /* Bad TLP */
#define CORR_ERR_BD (1 << 7) /* Bad DLLP */
#define CORR_ERR_RR (1 << 8) /* REPLAY_NUM rollover */
#define CORR_ERR_RT (1 << 12) /* Reply timer timeout */
+#define CORR_ERR_AE (1 << 13) /* Adviosry Non-Fital Error Mask */
#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \
CORR_ERR_RR | CORR_ERR_RT)
@@ -247,6 +252,15 @@
#define PCI_PHY_DBG_CLKREG_2 0x1e18
#define PCI_PHY_DBG_CLKREG_3 0x1e1c
+/* Bit settings for PCIE_CFG_SUBSYSTEM_CONTROL register */
+#define PCIE_BAR1COHERENTACCEN_BIT 8
+#define PCIE_BAR2COHERENTACCEN_BIT 9
+#define PCIE_SSRESET_STATUS_BIT 13
+#define PCIE_SSRESET_DISABLE_BIT 14
+#define PCIE_SSRESET_DIS_ENUM_RST_BIT 15
+
+#define PCIE_BARCOHERENTACCEN_MASK 0x300
+
/* Bit settings for PCI_UC_ERR_STATUS register */
#define PCI_UC_ERR_URES (1 << 20) /* Unsupported Request Error Status */
#define PCI_UC_ERR_ECRCS (1 << 19) /* ECRC Error Status */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/pcie_core.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/pcie_core.h
index c80330d..4a6ff73 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/pcie_core.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/pcie_core.h
@@ -1,7 +1,9 @@
/*
* BCM43XX PCIE core hardware definitions.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -322,6 +324,7 @@
uint32 clk_ctl_st; /* 0xAE0 */
uint32 PAD[1]; /* 0xAE4 */
uint32 powerctl; /* 0xAE8 */
+ uint32 PAD[5]; /* 0xAEC-0xAFF */
} dar;
/* corerev > = 64 */
struct {
@@ -351,7 +354,7 @@
uint32 erraddr; /* 0xA64 */
uint32 mbox_int; /* 0xA68 */
uint32 fis_ctrl; /* 0xA6C */
- uint32 PAD[4]; /* 0xA70 - 0xA7C */
+ uint32 PAD[4]; /* 0xA70-0xAFF */
uint32 d2h_msg_reg0; /* 0xA80 */
uint32 d2h_msg_reg1; /* 0xA84 */
uint32 PAD[2]; /* 0xA88 - 0xA8C */
@@ -359,6 +362,31 @@
uint32 h2d_msg_reg1; /* 0xA94 */
} dar_64;
} u1;
+ uint32 PAD[64]; /* 0xB00-0xBFF */
+ /* Function Control/Status Registers for corerev >= 64 */
+ /* 0xC00 - 0xCFF */
+ struct {
+ uint32 control; /* 0xC00 */
+ uint32 iostatus; /* 0xC04 */
+ uint32 capability; /* 0xC08 */
+ uint32 PAD[1]; /* 0xC0C */
+ uint32 intstatus; /* 0xC10 */
+ uint32 intmask; /* 0xC14 */
+ uint32 pwr_intstatus; /* 0xC18 */
+ uint32 pwr_intmask; /* 0xC1C */
+ uint32 msi_vector; /* 0xC20 */
+ uint32 msi_intmask; /* 0xC24 */
+ uint32 msi_intstatus; /* 0xC28 */
+ uint32 msi_pend_cnt; /* 0xC2C */
+ uint32 mbox_intstatus; /* 0xC30 */
+ uint32 mbox_intmask; /* 0xC34 */
+ uint32 ltr_state; /* 0xC38 */
+ uint32 PAD[1]; /* 0xC3C */
+ uint32 intr_vector; /* 0xC40 */
+ uint32 intr_addrlow; /* 0xC44 */
+ uint32 intr_addrhigh; /* 0xC48 */
+ uint32 PAD[45]; /* 0xC4C-0xCFF */
+ } ftn_ctrl;
} sbpcieregs_t;
#define PCIE_CFG_DA_OFFSET 0x400 /* direct access register offset for configuration space */
@@ -381,6 +409,12 @@
#define PCIE_IDMA_MODE_EN(rev) (REV_GE_64(rev) ? 0x1 : 0x800000) /* implicit M2M DMA mode */
#define PCIE_TL_CLK_DETCT 0x4000000 /* enable TL clk detection */
+/* Function control (corerev > 64) */
+#define PCIE_CPLCA_ENABLE 0x01
+/* 1: send CPL with CA on BP error, 0: send CPLD with SC and data is FFFF */
+#define PCIE_DLY_PERST_TO_COE 0x02
+/* when set, PERST is holding asserted until sprom-related register updates has completed */
+
#define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */
#define PCIE_CFGDATA 0x124 /* offsetof(configdata) */
#define PCIE_SWPME_FN0 0x10000
@@ -765,6 +799,16 @@
/* Uc_Err reg offset in AER Cap */
#define PCIE_EXTCAP_ID_ERR 0x01 /* Advanced Error Reporting */
#define PCIE_EXTCAP_AER_UCERR_OFFSET 4 /* Uc_Err reg offset in AER Cap */
+#define PCIE_EXTCAP_ERR_HEADER_LOG_0 28
+#define PCIE_EXTCAP_ERR_HEADER_LOG_1 32
+#define PCIE_EXTCAP_ERR_HEADER_LOG_2 36
+#define PCIE_EXTCAP_ERR_HEADER_LOG_3 40
+
+/* L1SS reg offset in L1SS Ext Cap */
+#define PCIE_EXTCAP_ID_L1SS 0x1e /* PCI Express L1 PM Substates Capability */
+#define PCIE_EXTCAP_L1SS_CAP_OFFSET 4 /* L1SSCap reg offset in L1SS Cap */
+#define PCIE_EXTCAP_L1SS_CONTROL_OFFSET 8 /* L1SSControl reg offset in L1SS Cap */
+#define PCIE_EXTCAP_L1SS_CONTROL2_OFFSET 0xc /* L1SSControl reg offset in L1SS Cap */
/* Linkcontrol reg offset in PCIE Cap */
#define PCIE_CAP_LINKCTRL_OFFSET 16 /* linkctrl offset in pcie cap */
@@ -789,6 +833,7 @@
#define PCIE_CAP_DEVCTRL_MPS_512B 2 /* 512 Byte */
#define PCIE_CAP_DEVCTRL_MPS_1024B 3 /* 1024 Byte */
+#define PCIE_ASPM_CTRL_MASK 3 /* bit 0 and 1 */
#define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */
#define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */
#define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */
@@ -797,6 +842,9 @@
#define PCIE_ASPM_L11_ENAB 8 /* ASPM L1.1 in PML1_sub_control2 */
#define PCIE_ASPM_L12_ENAB 4 /* ASPM L1.2 in PML1_sub_control2 */
+#define PCIE_EXT_L1SS_MASK 0xf /* Bits [3:0] of L1SSControl 0x248 */
+#define PCIE_EXT_L1SS_ENAB 0xf /* Bits [3:0] of L1SSControl 0x248 */
+
/* NumMsg and NumMsgEn in PCIE MSI Cap */
#define MSICAP_NUM_MSG_SHF 17
#define MSICAP_NUM_MSG_MASK (0x7 << MSICAP_NUM_MSG_SHF)
@@ -879,6 +927,12 @@
#define PCIECFGREG_PHY_DBG_CLKREQ1 0x1E14
#define PCIECFGREG_PHY_DBG_CLKREQ2 0x1E18
#define PCIECFGREG_PHY_DBG_CLKREQ3 0x1E1C
+#define PCIECFGREG_PHY_LTSSM_HIST_0 0x1CEC
+#define PCIECFGREG_PHY_LTSSM_HIST_1 0x1CF0
+#define PCIECFGREG_PHY_LTSSM_HIST_2 0x1CF4
+#define PCIECFGREG_PHY_LTSSM_HIST_3 0x1CF8
+#define PCIECFGREG_TREFUP 0x1814
+#define PCIECFGREG_TREFUP_EXT 0x1818
/* PCIECFGREG_PML1_SUB_CTRL1 Bit Definition */
#define PCI_PM_L1_2_ENA_MASK 0x00000001 /* PCI-PM L1.2 Enabled */
@@ -906,6 +960,7 @@
#define PCIH2D_MailBox_2 0x160 /* for dma channel2 which will be used for Implicit DMA */
#define PCIH2D_DB1_2 0x164
#define PCID2H_MailBox_2 0x168
+#define PCIE_CLK_CTRL 0x1E0
#define PCIE_PWR_CTRL 0x1E8
#define PCIControl(rev) (REV_GE_64(rev) ? 0xC00 : 0x00)
@@ -913,15 +968,27 @@
#define IDMAControl(rev) (REV_GE_64(rev) ? 0x480 : 0x00)
#define PCIMailBoxInt(rev) (REV_GE_64(rev) ? 0xC30 : 0x48)
#define PCIMailBoxMask(rev) (REV_GE_64(rev) ? 0xC34 : 0x4C)
+#define PCIFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xC10 : 0x20)
+#define PCIFunctionIntmask(rev) (REV_GE_64(rev) ? 0xC14 : 0x24)
+#define PCIPowerIntstatus(rev) (REV_GE_64(rev) ? 0xC18 : 0x1A4)
+#define PCIPowerIntmask(rev) (REV_GE_64(rev) ? 0xC1C : 0x1A8)
+#define PCIDARClkCtl(rev) (REV_GE_64(rev) ? 0xA08 : 0xAE0)
+#define PCIDARPwrCtl(rev) (REV_GE_64(rev) ? 0xA0C : 0xAE8)
+#define PCIDARFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xA10 : 0xA20)
+#define PCIDARH2D_DB0(rev) (REV_GE_64(rev) ? 0xA20 : 0xA28)
+#define PCIDARErrlog(rev) (REV_GE_64(rev) ? 0xA60 : 0xA40)
+#define PCIDARErrlog_Addr(rev) (REV_GE_64(rev) ? 0xA64 : 0xA44)
+#define PCIDARMailboxint(rev) (REV_GE_64(rev) ? 0xA68 : 0xA48)
#define PCIMSIVecAssign 0x58
/* HMAP Registers */
-#define PCI_HMAP_WINDOW_BASE 0x540 /* base of all HMAP window registers */
-#define PCI_HMAP_VIOLATION_ADDR_L 0x5C0
-#define PCI_HMAP_VIOLATION_ADDR_U 0x5C4
-#define PCI_HMAP_VIOLATION_INFO 0x5C8
-#define PCI_HMAP_WINDOW_CONFIG 0x5D0
+/* base of all HMAP window registers */
+#define PCI_HMAP_WINDOW_BASE(rev) (REV_GE_64(rev) ? 0x580u : 0x540u)
+#define PCI_HMAP_VIOLATION_ADDR_L(rev) (REV_GE_64(rev) ? 0x600u : 0x5C0u)
+#define PCI_HMAP_VIOLATION_ADDR_U(rev) (REV_GE_64(rev) ? 0x604u : 0x5C4u)
+#define PCI_HMAP_VIOLATION_INFO(rev) (REV_GE_64(rev) ? 0x608u : 0x5C8u)
+#define PCI_HMAP_WINDOW_CONFIG(rev) (REV_GE_64(rev) ? 0x610u : 0x5D0u)
#define PCI_HMAP_NWINDOWS_SHIFT 8
#define PCI_HMAP_NWINDOWS_MASK 0x0000ff00 /* bits 8:15 */
@@ -992,6 +1059,11 @@
OFFSETOF(sbpcieregs_t, u1.dar_64.intstatus) : \
OFFSETOF(sbpcieregs_t, u1.dar.intstatus))
+#define DAR_FIS_CTRL(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.fis_ctrl)
+
+#define DAR_FIS_START_SHIFT 0u
+#define DAR_FIS_START_MASK (1u << DAR_FIS_START_SHIFT)
+
#define PCIE_PWR_REQ_PCIE (0x1 << 8)
/* SROM hardware region */
@@ -1134,12 +1206,19 @@
#define PCIE_DAR_MSG_H2D_REG1_OFFSET 0xA94
#define HS_POLL_PERIOD_US 10
+#ifdef BCMQT
+#define D2H_READY_WD_RESET_COUNT (84) /* ~84secs >~ BL ready time after wd rst */
+#define D2H_READY_WD_RESET_US 1000000 /* 1s */
+#define D2H_READY_TIMEOUT_US (1000000 * 60 * 3) /* 3 Mins >~ FW download time */
+#define D2H_VALDN_DONE_TIMEOUT_US (1000000 * 60 * 5) /* 5 Mins >~ Validation time */
+#define D2H_TRX_HDR_PARSE_DONE_TIMEOUT_US (1000000 * 60 * 1) /* 1 Mins >~ TRX Parsing */
+#else
+#define D2H_READY_WD_RESET_COUNT (200) /* ~200ms >~ BL ready time after wd rst */
#define D2H_READY_WD_RESET_US 1000 /* 1ms */
#define D2H_READY_TIMEOUT_US (100000) /* 100ms >~ FW download time */
#define D2H_VALDN_DONE_TIMEOUT_US (250000) /* 250ms >~ Validation time */
#define D2H_TRX_HDR_PARSE_DONE_TIMEOUT_US (50000) /* 50ms >~ TRX Parsing */
-#define D2H_READY_WD_RESET_COUNT (200000) /* ~200ms >~ BL ready time after wd rst */
-#define D2H_READY_WD_RESET_DBG_PRINT_US (10000) /* 10ms - DEBUG print at evry 10ms */
+#endif // endif
typedef struct bl_hs_address {
volatile void *d2h;
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/rte_ioctl.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/rte_ioctl.h
index 59253eb..76e24e7 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/rte_ioctl.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/rte_ioctl.h
@@ -1,7 +1,9 @@
/*
* HND Run Time Environment ioctl.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbchipc.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbchipc.h
index 35bc1e6..731ef6c 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbchipc.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbchipc.h
@@ -7,7 +7,9 @@
*
* $Id: sbchipc.h 701163 2017-05-23 22:21:03Z $
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -117,7 +119,10 @@
uint32 pmuintctrl1; /* 0x784 */
uint32 PAD[2];
uint32 extwakectrl[2]; /* 0x790 */
- uint32 PAD[10];
+ uint32 PAD[7];
+ uint32 fis_ctrl_status; /* 0x7b4 */
+ uint32 fis_min_res_mask; /* 0x7b8 */
+ uint32 PAD[1];
uint32 PrecisionTmrCtrlStatus; /* 0x7c0 */
} pmuregs_t;
@@ -417,7 +422,10 @@
uint32 pmuintctrl0; /* 0x780 */
uint32 PAD[3]; /* 0x784 - 0x78c */
uint32 extwakectrl[1]; /* 0x790 */
- uint32 PAD[27];
+ uint32 PAD[8];
+ uint32 fis_ctrl_status; /* 0x7b4 */
+ uint32 fis_min_res_mask; /* 0x7b8 */
+ uint32 PAD[17];
uint16 sromotp[512]; /* 0x800 */
#ifdef CCNFLASH_SUPPORT
/* Nand flash MLC controller registers (corerev >= 38) */
@@ -607,6 +615,8 @@
#define PMU_PLL_CONTROL_DATA 0x664
#define CC_SROM_CTRL 0x190
+#define CC_SROM_ADDRESS 0x194u
+#define CC_SROM_DATA 0x198u
#ifdef SROM16K_4364_ADDRSPACE
#define CC_SROM_OTP 0xa000 /* SROM/OTP address space */
#else
@@ -3129,42 +3139,6 @@
#define PMU_4369_MACCORE_0_RES_REQ_MASK 0x3FCBF7FF
#define PMU_4369_MACCORE_1_RES_REQ_MASK 0x7FFB3647
-/* 4367 related */
-#define RES4367_ABUCK 0
-#define RES4367_CBUCK 1
-#define RES4367_MISCLDO_PU 2
-#define RES4367_VBOOST 3
-#define RES4367_LDO3P3_PU 4
-#define RES4367_LAST_LPO_AVAIL 5
-#define RES4367_XTAL_PU 6
-#define RES4367_XTAL_STABLE 7
-#define RES4367_PWRSW_DIG 8
-#define RES4367_SR_DIG 9
-#define RES4367_SPARE10 10
-#define RES4367_PWRSW_AUX 11
-#define RES4367_SR_AUX 12
-#define RES4367_SPARE2 13
-#define RES4367_PWRSW_MAIN 14
-#define RES4367_SR_MAIN 15
-#define RES4367_ARMPLL_PWRUP 16
-#define RES4367_DIG_CORE_RDY 17
-#define RES4367_CORE_RDY_AUX 18
-#define RES4367_ALP_AVAIL 19
-#define RES4367_RADIO_AUX_PU 20
-#define RES4367_MINIPMU_AUX_PU 21
-#define RES4367_CORE_RDY_MAIN 22
-#define RES4367_RADIO_MAIN_PU 23
-#define RES4367_MINIPMU_MAIN_PU 24
-#define RES4367_PCIE_RET 25
-#define RES4367_COLD_START_WAIT 26
-#define RES4367_ARMPLL_HTAVAIL 27
-#define RES4367_HT_AVAIL 28
-#define RES4367_MACPHY_AUX_CLK_AVAIL 29
-#define RES4367_MACPHY_MAIN_CLK_AVAIL 30
-#define RES4367_RESERVED_31 31
-
-#define CST4367_SPROM_PRESENT (1 << 17)
-
/* 43430 PMU resources based on pmu_params.xls */
#define RES43430_LPLDO_PU 0
#define RES43430_BG_PU 1
@@ -3294,10 +3268,18 @@
#define CCTRL1_4335_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */
#define CCTRL1_4335_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */
+/* 55500, Dedicated sapce for TCAM_PATCH and TRX HDR area at RAMSTART */
+#define CR4_55500_RAM_START (0x3a0000)
+#define CR4_55500_TCAM_SZ (0x800)
+#define CR4_55500_TRX_HDR_SZ (0x2b4)
/* 55560, Dedicated sapce for TCAM_PATCH and TRX HDR area at RAMSTART */
#define CR4_55560_RAM_START (0x370000)
#define CR4_55560_TCAM_SZ (0x800)
+#if defined BCMTRXV4
+#define CR4_55560_TRX_HDR_SZ (0x2b4)
+#else
#define CR4_55560_TRX_HDR_SZ (0x20)
+#endif // endif
/* 4335 Chip specific ChipControl2 register bits */
#define CCTRL2_4335_AOSBLOCK (1 << 30)
@@ -3312,13 +3294,39 @@
#define CR4_4360_RAM_BASE (0x0)
#define CR4_43602_RAM_BASE (0x180000)
#define CA7_4365_RAM_BASE (0x200000)
+#define CR4_4373_RAM_BASE (0x160000)
+#define CST4373_JTAG_ENABLE(cs) (((cs) & (1 << 0)) != 0)
+#define CST4373_CHIPMODE_RSRC_INIT0(cs) (((cs) & (1 << 1)) != 0)
+#define CST4373_SDIO_PADVDDIO(cs) (((cs) & (1 << 5)) != 0)
+#define CST4373_USBHUB_BYPASS(cs) (((cs) & (1 << 9)) != 0)
+#define STRAP4373_CHIPMODE_RSRC_INIT1 0x1
+#define STRAP4373_VTRIM_EN 0x1
+#define STRAP4373_SFLASH_PRESENT 0x1
+#define OTP4373_SFLASH_BYTE_OFFSET 680
+#define OTP4373_SFLASH_MASK 0x3F
+#define OTP4373_SFLASH_PRESENT_MASK 0x1
+#define OTP4373_SFLASH_TYPE_MASK 0x2
+#define OTP4373_SFLASH_TYPE_SHIFT 0x1
+#define OTP4373_SFLASH_CLKDIV_MASK 0x3C
+#define OTP4373_SFLASH_CLKDIV_SHIFT 0x2
+#define SPROM4373_OTP_SELECT 0x00000010
+#define SPROM4373_OTP_PRESENT 0x00000020
+#define CC4373_SFLASH_CLKDIV_MASK 0x1F000000
+#define CC4373_SFLASH_CLKDIV_SHIFT 25
#define CR4_4347_RAM_BASE (0x170000)
#define CR4_4362_RAM_BASE (0x170000)
#define CR4_4369_RAM_BASE (0x170000)
#define CR4_4377_RAM_BASE (0x170000)
+#define CR4_43751_RAM_BASE (0x170000)
#define CA7_4367_RAM_BASE (0x200000)
#define CR4_4378_RAM_BASE (0x352000)
+#ifdef CHIPS_CUSTOMER_HW6
+#define CA7_4368_RAM_BASE (0x200000)
+#endif /* CHIPS_CUSTOMER_HW6 */
+/* TODO: Fix 55500 RAM BASE */
+#define CR4_55500_RAM_BASE (CR4_55500_RAM_START + CR4_55500_TCAM_SZ \
+ + CR4_55500_TRX_HDR_SZ)
#define CR4_55560_RAM_BASE (CR4_55560_RAM_START + CR4_55560_TCAM_SZ \
+ CR4_55560_TRX_HDR_SZ)
@@ -4624,16 +4632,28 @@
#define SRPWR_DMN3_MACMAIN (3) /* MAC/Phy Main */
#define SRPWR_DMN3_MACMAIN_SHIFT (SRPWR_DMN3_MACMAIN) /* MAC/Phy Main */
#define SRPWR_DMN3_MACMAIN_MASK (1 << SRPWR_DMN3_MACMAIN_SHIFT) /* MAC/Phy Main */
-#define SRPWR_DMN_ALL_MASK (0xF)
+
+#define SRPWR_DMN4_MACSCAN (4) /* MAC/Phy Scan */
+#define SRPWR_DMN4_MACSCAN_SHIFT (SRPWR_DMN4_MACSCAN) /* MAC/Phy Scan */
+#define SRPWR_DMN4_MACSCAN_MASK (1 << SRPWR_DMN4_MACSCAN_SHIFT) /* MAC/Phy Scan */
+
+/* all power domain mask */
+#define SRPWR_DMN_ALL_MASK(sih) si_srpwr_domain_all_mask(sih)
#define SRPWR_REQON_SHIFT (8) /* PowerOnRequest[11:8] */
-#define SRPWR_REQON_MASK (SRPWR_DMN_ALL_MASK << SRPWR_REQON_SHIFT)
+#define SRPWR_REQON_MASK(sih) (SRPWR_DMN_ALL_MASK(sih) << SRPWR_REQON_SHIFT)
+
#define SRPWR_STATUS_SHIFT (16) /* ExtPwrStatus[19:16], RO */
-#define SRPWR_STATUS_MASK (SRPWR_DMN_ALL_MASK << SRPWR_STATUS_SHIFT)
-#define SRPWR_DMN_SHIFT (28) /* PowerDomain[31:28], RO */
-#define SRPWR_DMN_MASK (SRPWR_DMN_ALL_MASK << SRPWR_DMN_SHIFT)
+#define SRPWR_STATUS_MASK(sih) (SRPWR_DMN_ALL_MASK(sih) << SRPWR_STATUS_SHIFT)
+
+#define SRPWR_DMN_ID_SHIFT (28) /* PowerDomain[31:28], RO */
+#define SRPWR_DMN_ID_MASK (0xF)
/* PMU Precision Usec Timer */
#define PMU_PREC_USEC_TIMER_ENABLE 0x1
+/* FISCtrlStatus */
+#define PMU_CLEAR_FIS_DONE_SHIFT 1u
+#define PMU_CLEAR_FIS_DONE_MASK (1u << PMU_CLEAR_FIS_DONE_SHIFT)
+
#endif /* _SBCHIPC_H */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbconfig.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbconfig.h
index 2ba56f9..285084b 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbconfig.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbconfig.h
@@ -1,7 +1,9 @@
/*
* Broadcom SiliconBackplane hardware register definitions.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbgci.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbgci.h
index 3a7e373..e52c41c 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbgci.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbgci.h
@@ -1,7 +1,9 @@
/*
* SiliconBackplane GCI core hardware definitions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbhndarm.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbhndarm.h
index a0a5fb0..7f8787f 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbhndarm.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbhndarm.h
@@ -1,7 +1,9 @@
/*
* Broadcom SiliconBackplane ARM definitions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -30,198 +32,6 @@
#ifndef _sbhndarm_h_
#define _sbhndarm_h_
-#include <arminc.h>
-#include <sbconfig.h>
-
-/* register offsets */
-#define ARM7_CORECTL 0
-
-/* bits in corecontrol */
-#define ACC_FORCED_RST 0x1
-#define ACC_SERRINT 0x2
-#define ACC_WFICLKSTOP 0x4
-#define ACC_NOTSLEEPINGCLKREQ_SHIFT 24
-#define ACC_FORCECLOCKRATIO (1<<7)
-#define ACC_CLOCKRATIO_SHIFT (8)
-#define ACC_CLOCKRATIO_MASK (0xF00)
-#define ACC_CLOCKMODE_SHIFT (12)
-#define ACC_CLOCKMODE_MASK (0x7000)
-#define ACC_NOTSLEEPCLKREQ0 3
-#define ACC_NOTSLEEPCLKREQ1 18
-
-#define ACC_CLOCKRATIO_1_TO_1 (0)
-#define ACC_CLOCKRATIO_2_TO_1 (0x4)
-
-#define ACC_CLOCKMODE_SAME (0) /**< BP and CPU clock are the same */
-#define ACC_CLOCKMODE_ASYNC (1) /**< BP and CPU clock are asynchronous */
-#define ACC_CLOCKMODE_SYNCH (2) /**< BP and CPU clock are synch, ratio 1:1 or 1:2 */
-
-/* Request ALP on backplane bit 3 and 18 */
-#define ACC_REQALP ((1<<ACC_NOTSLEEPCLKREQ0) | (1<<ACC_NOTSLEEPCLKREQ1))
-
-/* arm resetlog */
-#define SBRESETLOG 0x1
-#define SERRORLOG 0x2
-
-/* arm core-specific control flags */
-#define SICF_REMAP_MSK 0x001c
-#define SICF_REMAP_NONE 0
-#define SICF_REMAP_ROM 0x0004
-#define SIFC_REMAP_FLASH 0x0008
-
-/* misc core-specific defines */
-#if defined(__ARM_ARCH_7M__)
-/* cortex-m3 */
-/* backplane related stuff */
-#define ARM_CORE_ID ARMCM3_CORE_ID /**< arm coreid */
-#define SI_ARM_ROM SI_ARMCM3_ROM /**< ROM backplane/system address */
-#define SI_ARM_SRAM2 SI_ARMCM3_SRAM2 /**< RAM backplane address when remap is 1 or 2 */
-/* core registers offsets */
-#define ARMCM3_CYCLECNT 0x90 /**< Cortex-M3 core registers offsets */
-#define ARMCM3_INTTIMER 0x94
-#define ARMCM3_INTMASK 0x98
-#define ARMCM3_INTSTATUS 0x9c
-/* interrupt/exception */
-#define ARMCM3_NUMINTS 16 /**< # of external interrupts */
-#define ARMCM3_INTALL ((1 << ARMCM3_NUMINTS) - 1) /**< Interrupt mask */
-#define ARMCM3_SHARED_INT 0 /**< Interrupt shared by multiple cores */
-#define ARMCM3_INT(i) (1 << (i)) /**< Individual interrupt enable/disable */
-/* intmask/intstatus bits */
-#define ARMCM3_INTMASK_TIMER 0x1
-#define ARMCM3_INTMASK_SYSRESET 0x4
-#define ARMCM3_INTMASK_LOCKUP 0x8
-
-/*
- * Overlay Support in Rev 5
- */
-#define ARMCM3_OVL_VALID_SHIFT 0
-#define ARMCM3_OVL_VALID 1
-#define ARMCM3_OVL_SZ_SHIFT 1
-#define ARMCM3_OVL_SZ_MASK 0x0000000e
-#define ARMCM3_OVL_SZ_512B 0 /* 512B */
-#define ARMCM3_OVL_SZ_1KB 1 /* 1KB */
-#define ARMCM3_OVL_SZ_2KB 2 /* 2KB */
-#define ARMCM3_OVL_SZ_4KB 3 /* 4KB */
-#define ARMCM3_OVL_SZ_8KB 4 /* 8KB */
-#define ARMCM3_OVL_SZ_16KB 5 /* 16KB */
-#define ARMCM3_OVL_SZ_32KB 6 /* 32KB */
-#define ARMCM3_OVL_SZ_64KB 7 /* 64KB */
-#define ARMCM3_OVL_ADDR_SHIFT 9
-#define ARMCM3_OVL_ADDR_MASK 0x003FFE00
-#define ARMCM3_OVL_MAX 16
-
-#elif defined(__ARM_ARCH_7R__)
-/* cortex-r4 */
-/* backplane related stuff */
-#define ARM_CORE_ID ARMCR4_CORE_ID /**< arm coreid */
-#define SI_ARM_ROM SI_ARMCR4_ROM /**< ROM backplane/system address */
-#define SI_ARM_SRAM2 0x0 /**< In the cr4 the RAM is just not available
- * when remap is 1
- */
-
-/* core registers offsets */
-#define ARMCR4_CORECTL 0
-#define ARMCR4_CORECAP 4
-#define ARMCR4_COREST 8
-
-#define ARMCR4_FIQRSTATUS 0x10
-#define ARMCR4_FIQMASK 0x14
-#define ARMCR4_IRQMASK 0x18
-
-#define ARMCR4_INTSTATUS 0x20
-#define ARMCR4_INTMASK 0x24
-#define ARMCR4_CYCLECNT 0x28
-#define ARMCR4_INTTIMER 0x2c
-
-#define ARMCR4_GPIOSEL 0x30
-#define ARMCR4_GPIOEN 0x34
-
-#define ARMCR4_BANKIDX 0x40
-#define ARMCR4_BANKINFO 0x44
-#define ARMCR4_BANKSTBY 0x48
-#define ARMCR4_BANKPDA 0x4c
-
-#define ARMCR4_TCAMPATCHCTRL 0x68
-#define ARMCR4_TCAMPATCHTBLBASEADDR 0x6C
-#define ARMCR4_TCAMCMDREG 0x70
-#define ARMCR4_TCAMDATAREG 0x74
-#define ARMCR4_TCAMBANKXMASKREG 0x78
-
-#define ARMCR4_ROMNB_MASK 0xf00
-#define ARMCR4_ROMNB_SHIFT 8
-#define ARMCR4_MSB_ROMNB_MASK 0x1E00000
-#define ARMCR4_MSB_ROMNB_SHIFT 16
-#define ARMCR4_TCBBNB_MASK 0xf0
-#define ARMCR4_TCBBNB_SHIFT 4
-#define ARMCR4_TCBANB_MASK 0xf
-#define ARMCR4_TCBANB_SHIFT 0
-
-#define ARMCR4_MT_MASK 0x300
-#define ARMCR4_MT_SHIFT 8
-#define ARMCR4_MT_ROM 0x100
-#define ARMCR4_MT_RAM 0
-
-#define ARMCR4_BSZ_MASK 0x3f
-#define ARMCR4_BSZ_MULT 8192
-
-#define ARMCR4_STBY_SUPPORTED 0x400
-#define ARMCR4_STBY_TIMER_PRESENT 0x800
-
-#define ARMCR4_TIMER_VAL_MASK 0xfffff
-#define ARMCR4_STBY_TIMER_ENABLE (1 << 24)
-#define ARMCR4_STBY_OVERRIDE (1 << 31)
-
-#define ARMCR4_TCAM_ENABLE (1u << 31)
-#define ARMCR4_TCAM_CLKENAB (1u << 30)
-#define ARMCR4_TCAM_PATCHCNT_MASK 0xf
-
-#define ARMCR4_TCAM_CMD_DONE (1u << 31)
-#define ARMCR4_TCAM_MATCH (1u << 24)
-#define ARMCR4_TCAM_OPCODE_MASK (3 << 16)
-#define ARMCR4_TCAM_OPCODE_SHIFT 16
-#define ARMCR4_TCAM_ADDR_MASK 0xffff
-#define ARMCR4_TCAM_NONE (0 << ARMCR4_TCAM_OPCODE_SHIFT)
-#define ARMCR4_TCAM_READ (1 << ARMCR4_TCAM_OPCODE_SHIFT)
-#define ARMCR4_TCAM_WRITE (2 << ARMCR4_TCAM_OPCODE_SHIFT)
-#define ARMCR4_TCAM_COMPARE (3 << ARMCR4_TCAM_OPCODE_SHIFT)
-#define ARMCR4_TCAM_CMD_DONE_DLY 1000
-
-#define ARMCR4_DATA_MASK (~0x7)
-#define ARMCR4_DATA_VALID (1u << 0)
-
-/* intmask/intstatus bits */
-#define ARMCR4_INTMASK_TIMER (0x1)
-#define ARMCR4_INTMASK_CLOCKSTABLE (0x20000000)
-
-#define CHIP_SDRENABLE(sih) (sih->boardflags2 & BFL2_SDR_EN)
-#define CHIP_TCMPROTENAB(sih) (si_arm_sflags(sih) & SISF_TCMPROT)
-
-/* Power Control */
-#define ARM_ENAB_MEM_CLK_GATE_SHIFT 5
-
-#define ROM_STBY_TIMER_4347 0xb0
-#define RAM_STBY_TIMER_4347 0x64
-
-/* using CHIPID because no capabilities bit */
-#define ARM_CLKGATING_CAP(sih) ((void)(sih), BCM4347_CHIP(sih->chip))
-
-#define ARM_CLKGATING_ENAB(sih) (ARM_CLKGATING_CAP(sih) && 1)
-
-#elif defined(__ARM_ARCH_7A__)
-
-#if defined(CA7)
-/* backplane related stuff */
-#define ARM_CORE_ID ARMCA7_CORE_ID
-#define SI_ARM_ROM SI_ARMCA7_ROM /**< ROM backplane/system address */
-
-#else
-/* backplane related stuff */
-#define ARM_CORE_ID ARMCA9_CORE_ID /* arm coreid */
-#endif /* __ARM_ARCH_7A__ */
-#else /* !__ARM_ARCH_7M__ && !__ARM_ARCH_7R__ */
-#error Unrecognized ARM Architecture
-#endif /* !__ARM_ARCH_7M__ && !__ARM_ARCH_7R__ */
-
#ifndef _LANGUAGE_ASSEMBLY
/* cpp contortions to concatenate w/arg prescan */
@@ -231,7 +41,6 @@
#define PAD _XSTR(__LINE__)
#endif /* PAD */
-#if defined(__ARM_ARCH_7M__)
/* cortex-m3 */
typedef volatile struct {
uint32 corecontrol; /* 0x0 */
@@ -265,10 +74,8 @@
uint32 PAD[1];
uint32 powerctl; /* 0x1e8 */
} cm3regs_t;
-#define ARMREG(regs, reg) (&((cm3regs_t *)regs)->reg)
-#endif /* __ARM_ARCH_7M__ */
+#define ARM_CM3_REG(regs, reg) (&((cm3regs_t *)regs)->reg)
-#if defined(__ARM_ARCH_7R__)
/* cortex-R4 */
typedef volatile struct {
uint32 corecontrol; /* 0x0 */
@@ -278,7 +85,7 @@
uint32 nmiisrst; /* 0x10 */
uint32 nmimask; /* 0x14 */
uint32 isrmask; /* 0x18 */
- uint32 PAD[1];
+ uint32 swintreg; /* 0x1C */
uint32 intstatus; /* 0x20 */
uint32 intmask; /* 0x24 */
uint32 cyclecnt; /* 0x28 */
@@ -301,10 +108,8 @@
uint32 PAD[1];
uint32 powerctl; /* 0x1e8 */
} cr4regs_t;
-#define ARMREG(regs, reg) (&((cr4regs_t *)regs)->reg)
-#endif /* __ARM_ARCH_7R__ */
+#define ARM_CR4_REG(regs, reg) (&((cr4regs_t *)regs)->reg)
-#if defined(__ARM_ARCH_7A__)
/* cortex-A7 */
typedef volatile struct {
uint32 corecontrol; /* 0x0 */
@@ -319,7 +124,18 @@
uint32 PAD[1];
uint32 powerctl; /* 0x1e8 */
} ca7regs_t;
-#define ARMREG(regs, reg) (&((ca7regs_t *)regs)->reg)
+#define ARM_CA7_REG(regs, reg) (&((ca7regs_t *)regs)->reg)
+
+#if defined(__ARM_ARCH_7M__)
+#define ARMREG(regs, reg) ARM_CM3_REG(regs, reg)
+#endif /* __ARM_ARCH_7M__ */
+
+#if defined(__ARM_ARCH_7R__)
+#define ARMREG(regs, reg) ARM_CR4_REG(regs, reg)
+#endif /* __ARM_ARCH_7R__ */
+
+#if defined(__ARM_ARCH_7A__)
+#define ARMREG(regs, reg) ARM_CA7_REG(regs, reg)
#endif /* __ARM_ARCH_7A__ */
#endif /* _LANGUAGE_ASSEMBLY */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbhnddma.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbhnddma.h
index 6250abb..052430c 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbhnddma.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbhnddma.h
@@ -2,7 +2,9 @@
* Generic Broadcom Home Networking Division (HND) DMA engine HW interface
* This supports the following chips: BCM42xx, 44xx, 47xx .
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbpcmcia.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
index 549ef12..c194719 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
@@ -1,7 +1,9 @@
/*
* BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -119,6 +121,7 @@
#define CISTPL_NULL 0x00
#define CISTPL_END 0xff /* End of the CIS tuple chain */
+#define CISTPL_OFFSET 0xC0
#define CISTPL_BRCM_HNBU 0x80
@@ -128,6 +131,9 @@
#define HNBU_HNBUCIS 0x1d /* what follows is proprietary HNBU CIS format */
+#define HNBU_CUSTOM1 0x82 /* 4 byte; For non-BRCM post-mfg additions */
+#define HNBU_CUSTOM2 0x83 /* Reserved; For non-BRCM post-mfg additions */
+
/* sbtmstatelow */
#define SBTML_INT_ACK 0x40000 /* ack the sb interrupt */
#define SBTML_INT_EN 0x20000 /* enable sb interrupt */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsdio.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsdio.h
index e8123a0..59639e9 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsdio.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsdio.h
@@ -4,7 +4,9 @@
*
* SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
index 6bbecf9..a4ea048 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
@@ -2,7 +2,9 @@
* Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific
* device core support
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsocram.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsocram.h
index 8143781..75587be 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsocram.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsocram.h
@@ -1,7 +1,9 @@
/*
* BCM47XX Sonics SiliconBackplane embedded ram core
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsysmem.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsysmem.h
index dc00f6f..d41d87d 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsysmem.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sbsysmem.h
@@ -1,7 +1,9 @@
/*
* SiliconBackplane System Memory core
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdio.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdio.h
index 8e774fa..9b53584 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdio.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdio.h
@@ -2,7 +2,9 @@
* SDIO spec header file
* Protocol and standard (common) device definitions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -631,11 +633,19 @@
#define HS_POLL_PERIOD_MS 10
#define D2H_READY_WD_RESET_MS 1 /* 1ms */
+#ifdef BCMQT
+#define D2H_READY_TIMEOUT_MS (1000 * 60 * 3) /* 3 Mins >~ FW download time */
+#define D2H_VALDN_DONE_TIMEOUT_MS (1000 * 60 * 5) /* 5 Mins >~ Validation time */
+#define D2H_TRX_HDR_PARSE_DONE_TIMEOUT_MS (1000 * 60 * 1) /* 1 Mins >~ TRX Parsing */
+#define D2H_READY_WD_RESET_COUNT (84 * 1000) /* ~84secs >~ BL ready time after wd rst */
+#define D2H_READY_WD_RESET_DBG_PRINT_MS (1000) /* 1000ms - DEBUG print at every 1000ms */
+#else
#define D2H_READY_TIMEOUT_MS (100) /* 100ms >~ FW download time */
#define D2H_VALDN_DONE_TIMEOUT_MS (250) /* 250ms >~ Validation time */
#define D2H_TRX_HDR_PARSE_DONE_TIMEOUT_MS (50) /* 50ms >~ TRX Parsing */
#define D2H_READY_WD_RESET_COUNT (200) /* ~200ms >~ BL ready time after wd rst */
#define D2H_READY_WD_RESET_DBG_PRINT_MS (10) /* 10ms - DEBUG print at evry 10ms */
+#endif // endif
typedef struct bl_hs_address {
volatile void *d2h;
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdioh.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdioh.h
index c55828e..6ab58ab 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdioh.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdioh.h
@@ -2,7 +2,9 @@
* SDIO Host Controller Spec header file
* Register map and definitions for the Standard Host Controller
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -181,6 +183,10 @@
#define CAP3_RETUNING_MODES_M BITFIELD_MASK(2)
#define CAP3_RETUNING_MODES_S (46 - CAP3_MSBits_OFFSET)
+#define CAP3_RETUNING_TC_DISABLED (0x0)
+#define CAP3_RETUNING_TC_1024S (0xB)
+#define CAP3_RETUNING_TC_OTHER (0xF)
+
#define CAP3_CLK_MULT_M BITFIELD_MASK(8)
#define CAP3_CLK_MULT_S (48 - CAP3_MSBits_OFFSET)
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdiovar.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdiovar.h
index 834c7dc..b580264 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdiovar.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdiovar.h
@@ -2,7 +2,9 @@
* Structure used by apps whose drivers access SDIO drivers.
* Pulled out separately so dhdu and wlu can both use it.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdspi.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdspi.h
index 713cdcf..291fd54 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdspi.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/sdspi.h
@@ -1,7 +1,9 @@
/*
* SD-SPI Protocol Standard
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/siutils.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/siutils.h
index 3797006..d992283 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/siutils.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/siutils.h
@@ -2,7 +2,9 @@
* Misc utility routines for accessing the SOC Interconnects
* of Broadcom HNBU chips.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -160,7 +162,11 @@
#define SECI_ACCESS_UART_PUTC 7
#define SECI_ACCESS_STATUSMASK_GET 8
+#if defined(BCMQT)
+#define ISSIM_ENAB(sih) TRUE
+#else
#define ISSIM_ENAB(sih) FALSE
+#endif // endif
#define INVALID_ADDR (~0)
@@ -210,8 +216,10 @@
#define ARMCR4_TCBANB_SHIFT 0
#define SICF_CPUHALT (0x0020)
-#define ARMCR4_BSZ_MASK 0x3f
-#define ARMCR4_BSZ_MULT 8192
+#define ARMCR4_BSZ_MASK 0x7f
+#define ARMCR4_BUNITSZ_MASK 0x200
+#define ARMCR4_BSZ_8K 8192
+#define ARMCR4_BSZ_1K 1024
#define SI_BPIND_1BYTE 0x1
#define SI_BPIND_2BYTE 0x3
#define SI_BPIND_4BYTE 0xF
@@ -245,6 +253,7 @@
extern int si_backplane_access(si_t *sih, uint addr, uint size,
uint *val, bool read);
extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint si_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val);
extern volatile uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff);
extern volatile void *si_coreregs(si_t *sih);
@@ -261,6 +270,7 @@
extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
extern volatile void *si_setcoreidx(si_t *sih, uint coreidx);
extern volatile void *si_setcore(si_t *sih, uint coreid, uint coreunit);
+extern uint32 si_oobr_baseaddr(si_t *sih, bool second);
extern volatile void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val);
extern void si_restore_core(si_t *sih, uint coreid, uint intr_val);
extern int si_numaddrspaces(si_t *sih);
@@ -759,6 +769,7 @@
extern uint32 si_srpwr_stat_spinwait(si_t *sih, uint32 mask, uint32 val);
extern uint32 si_srpwr_stat(si_t *sih);
extern uint32 si_srpwr_domain(si_t *sih);
+extern uint32 si_srpwr_domain_all_mask(si_t *sih);
/* SR Power Control */
/* No capabilities bit so using chipid for now */
@@ -786,6 +797,12 @@
* ARM, TCM, Main, Aux
* Host needs to power up
*/
+#ifdef CHIPS_CUSTOMER_HW6
+#define MULTIBP_CAP(sih) (BCM4368_CHIP(sih->chip) || BCM4378_CHIP(sih->chip) || \
+ BCM4387_CHIP(sih->chip))
+#else /* !CHIPS_CUSTOMER_HW6 */
+#define MULTIBP_CAP(sih) (FALSE)
+#endif /* CHIPS_CUSTOMER_HW6 */
#define MULTIBP_ENAB(sih) ((sih) && (sih)->_multibp_enable)
uint32 si_enum_base(uint devid);
@@ -799,4 +816,7 @@
void si_clrirq_idx(si_t *sih, uint core_idx);
+/* return if scan core is present */
+bool si_scan_core_present(si_t *sih);
+
#endif /* _siutils_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/spid.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/spid.h
index d9239b6..ffb1e3b 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/spid.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/spid.h
@@ -1,7 +1,9 @@
/*
* SPI device spec header file
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/trxhdr.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/trxhdr.h
index 601e346..41f1166 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/trxhdr.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/trxhdr.h
@@ -1,7 +1,9 @@
/*
* TRX image file header format.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -40,26 +42,48 @@
#define TRX_ROMSIM_IMAGE 0x10 /* Trx contains ROM simulation image */
#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed rtecdc.bin image */
#define TRX_BOOTLOADER 0x40 /* the image is a bootloader */
+
#define TRX_VERSION_BIT_OFFSET 16
-#define R_COMP_SIZE 32 /* R component - 32 bytes */
-#define S_COMP_SIZE 32 /* S component - 32 bytes */
+#define R_COMP_SIZE 32 /* R component - 32 bytes */
+#define S_COMP_SIZE 32 /* S component - 32 bytes */
#define ECDSA_SIGNATURE_SIZE (R_COMP_SIZE + S_COMP_SIZE) /* r[32bytes] and s[32bytes] components */
-#ifdef BCMTRXV3
-#define TRX_VERSION 3 /* Version 3 */
-#define TRX_MAX_OFFSET 4 /* FW size + Jump_addr + NVRAM size[if exist]
- * + signature size
- */
+enum {
+ TRX_V4_OFFS_SIGN_INFO_IDX = 0,
+ TRX_V4_OFFS_DATA_FOR_SIGN1_IDX = 1,
+ TRX_V4_OFFS_DATA_FOR_SIGN2_IDX = 2,
+ TRX_V4_OFFS_ROOT_MODULUS_IDX = 3,
+ TRX_V4_OFFS_ROOT_EXPONENT_IDX = 67,
+ TRX_V4_OFFS_CONT_MODULUS_IDX = 68,
+ TRX_V4_OFFS_CONT_EXPONENT_IDX = 132,
+ TRX_V4_OFFS_HASH_FW_IDX = 133,
+ TRX_V4_OFFS_FW_LEN_IDX = 149,
+ TRX_V4_OFFS_TR_RST_IDX = 150,
+ TRX_V4_OFFS_FW_VER_FOR_ANTIROOLBACK_IDX = 151,
+ TRX_V4_OFFS_IV_IDX = 152,
+ TRX_V4_OFFS_NONCE_IDX = 160,
+ TRX_V4_OFFS_SIGN_INFO2_IDX = 168,
+ TRX_V4_OFFS_MAX_IDX
+};
+
+#if defined BCMTRXV4
+#define TRX_VERSION 4 /* Version 4 */
+#define TRX_MAX_OFFSET (TRX_V4_OFFS_MAX_IDX)
+
+#elif defined BCMTRXV3
+#define TRX_VERSION 3 /* Version 3 */
+#define TRX_MAX_OFFSET 4 /* FW size + Jump_addr + NVRAM size[if exist]
+ * + signature size
+ */
#elif defined BCMTRXV2
-#define TRX_VERSION 2 /* Version 2 */
-#define TRX_MAX_OFFSET 5 /* Max number of individual files
- * to support SDR signature +
- * * Config data region
- */
+#define TRX_VERSION 2 /* Version 2 */
+#define TRX_MAX_OFFSET 5 /* Max number of individual files
+ * to support SDR signature + Config data region
+ */
#else
-#define TRX_VERSION 1 /* Version 1 */
-#define TRX_MAX_OFFSET 3 /* Max number of individual files */
-#endif // endif
+#define TRX_VERSION 1 /* Version 1 */
+#define TRX_MAX_OFFSET 3 /* Max number of individual files */
+#endif /* BCMTRXV3 BCMTRXV2 BCMTRXV4 */
/* BMAC Host driver/application like bcmdl need to support both Ver 1 as well as
* Ver 2 of trx header. To make it generic, trx_header is structure is modified
@@ -70,19 +94,19 @@
* is defined.
*/
struct trx_header {
- uint32 magic; /* "HDR0" */
- uint32 len; /* Length of file including header */
- uint32 crc32; /* 32-bit CRC from flag_version to end of file */
- uint32 flag_version; /* 0:15 flags, 16:31 version */
- uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */
+ uint32 magic; /* "HDR0" */
+ uint32 len; /* Length of file including header */
+ uint32 crc32; /* 32-bit CRC from flag_version to end of file */
+ uint32 flag_version; /* 0:15 flags, 16:31 version */
+ uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */
};
/* bootloader makes special use of trx header "offsets" array */
enum {
- TRX_OFFS_FW_LEN_IDX = 0, /* Size of the fw; used in uncompressed case */
- TRX_OFFS_TR_RST_IDX = 1, /* RAM address[tr_rst] for jump to after download */
- TRX_OFFS_DSG_LEN_IDX = 2, /* Len of digital signature */
- TRX_OFFS_CFG_LEN_IDX = 3 /* Len of config region */
+ TRX_OFFS_FW_LEN_IDX = 0, /* Size of the fw; used in uncompressed case */
+ TRX_OFFS_TR_RST_IDX = 1, /* RAM address[tr_rst] for jump to after download */
+ TRX_OFFS_DSG_LEN_IDX = 2, /* Len of digital signature */
+ TRX_OFFS_CFG_LEN_IDX = 3 /* Len of config region */
};
/* Compatibility */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/typedefs.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/typedefs.h
index fc280b9..2d5120b 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/typedefs.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/typedefs.h
@@ -1,5 +1,7 @@
/*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/vlan.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/vlan.h
index b9761e5..5c5ebdf 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/vlan.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/vlan.h
@@ -1,7 +1,9 @@
/*
* 802.1Q VLAN protocol definitions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wl_iw.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wl_iw.h
index 86a8a46..bfb9955 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wl_iw.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wl_iw.h
@@ -1,7 +1,9 @@
/*
* Linux Wireless Extensions support
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlfc_proto.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
index d9a4186..1e91e74 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
@@ -1,5 +1,7 @@
/*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlioctl.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlioctl.h
index 48cb059..4d64e6e 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlioctl.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlioctl.h
@@ -6,7 +6,9 @@
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -46,9 +48,7 @@
#include <802.11s.h>
#include <802.1d.h>
#include <bcmwifi_channels.h>
-#ifdef WL11AX
#include <802.11ax.h>
-#endif /* WL11AX */
#include <bcmwifi_rates.h>
#include <wlioctl_defs.h>
#include <bcmipv6.h>
@@ -88,6 +88,7 @@
#endif // endif
#define WL_ASSOC_START_EVT_DATA_VERSION 1
+#define WSEC_MAX_SAE_PASSWORD_LEN 128
typedef struct assoc_event_data {
uint32 version;
@@ -169,6 +170,36 @@
#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params)
+#define WL_EXTAUTH_START 1
+#define WL_EXTAUTH_ABORT 2
+#define WL_EXTAUTH_FAIL 3
+#define WL_EXTAUTH_SUCCESS 4
+
+/**
+ * Used by both dongle and host
+ * dongle asks host to start auth(SAE)
+ * host updates auth status to dongle
+ */
+typedef struct wl_auth_req_status {
+ uint16 flags;
+ struct ether_addr peer_mac; /**< peer mac address */
+ uint32 ssid_len;
+ uint8 ssid[DOT11_MAX_SSID_LEN];
+ uint8 pmkid[WPA2_PMKID_LEN];
+} wl_auth_req_status_t;
+
+typedef struct wl_mf_params {
+ uint32 version;
+ uint32 dwell_time;
+ uint16 len;
+ uint16 fc;
+ uint16 channel;
+ struct ether_addr da;
+ struct ether_addr bssid;
+ uint32 packetId;
+ uint8 data[1];
+} wl_mf_params_t;
+
#define MFP_TEST_FLAG_NORMAL 0
#define MFP_TEST_FLAG_ANY_KEY 1
typedef struct wl_sa_query {
@@ -468,6 +499,58 @@
uint32 he_txmcsmap; /**< HE tx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
} wl_bss_info_v109_1_t;
+/**
+ * BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info_v109_2 {
+ uint32 version; /**< version field */
+ uint32 length; /**< byte length of data in this record,
+ * starting at version and including IEs
+ */
+ struct ether_addr BSSID;
+ uint16 beacon_period; /**< units are Kusec */
+ uint16 capability; /**< Capability information */
+ uint8 SSID_len;
+ uint8 SSID[32];
+ uint8 bcnflags; /* additional flags w.r.t. beacon */
+ struct {
+ uint32 count; /**< # rates in this set */
+ uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */
+ } rateset; /**< supported rates */
+ chanspec_t chanspec; /**< chanspec for bss */
+ uint16 atim_window; /**< units are Kusec */
+ uint8 dtim_period; /**< DTIM period */
+ uint8 accessnet; /* from beacon interwork IE (if bcnflags) */
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ int8 phy_noise; /**< noise (in dBm) */
+ uint8 n_cap; /**< BSS is 802.11N Capable */
+ uint8 he_cap; /**< BSS is he capable */
+ uint8 freespace1; /* make implicit padding explicit */
+ uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */
+ uint8 ctl_ch; /**< 802.11N BSS control channel number */
+ uint8 padding1[3]; /**< explicit struct alignment padding */
+ uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+ uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+ uint8 flags; /**< flags */
+ uint8 vht_cap; /**< BSS is vht capable */
+ uint8 reserved[2]; /**< Reserved for expansion of BSS properties */
+ uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */
+
+ uint16 ie_offset; /**< offset at which IEs start, from beginning */
+ uint16 freespace2; /* making implicit padding explicit */
+ uint32 ie_length; /**< byte length of Information Elements */
+ int16 SNR; /**< average SNR of during frame reception */
+ uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */
+ uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */
+ uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */
+ uint32 he_mcsmap; /**< STA's Associated hemcsmap */
+ uint32 he_rxmcsmap; /**< HE rx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
+ uint32 he_txmcsmap; /**< HE tx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
+ uint32 timestamp[2]; /* Beacon Timestamp for FAKEAP req */
+} wl_bss_info_v109_2_t;
+
#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
typedef wl_bss_info_v109_t wl_bss_info_t;
#endif // endif
@@ -523,6 +606,57 @@
uint32 chanspec;
} wl_bss_config_t;
+#define INVALID_HE_OMI_ARG 255
+
+#define WL_HE_OMI_V1 1
+typedef struct wl_he_omi_v1 {
+ uint8 version;
+ uint8 length;
+ uint8 rx_nss;
+ uint8 chnl_wdth;
+ uint8 ul_mu_dis;
+ uint8 tx_nsts;
+ uint8 er_su_dis;
+ uint8 dl_mu_resound;
+ uint8 ul_mu_data_dis;
+} wl_he_omi_v1_t;
+
+#ifndef HE_OMI_VER_ENABLED
+#define WL_HE_OMI_VER (WL_HE_OMI_V1)
+typedef wl_he_omi_v1_t wl_he_omi_t;
+#endif // endif
+
+/* HE PPE Thresholds field description (variable length) support */
+typedef struct wl_he_ppe_ths_params {
+ uint8 ppe_ths_len; /* data length (starting after this field) */
+ uint8 ppe_ths_field[1]; /* field data with variable length */
+} wl_he_ppe_ths_params_t;
+
+/* MU EDCA override support for testbed STA configuration */
+struct he_muedca_params {
+ bool override_enabled;
+ uint8 muedca_upd_cnt;
+ he_mu_ac_param_record_t param_ac[AC_COUNT];
+};
+
+typedef struct he_muedca_params he_muedca_params_t;
+
+/* MUEDCA Timer range */
+#define HE_MUEDCA_TIMER_MIN 1u
+#define HE_MUEDCA_TIMER_MAX 255u
+
+/* AIFSN=0 indicates that EDCA is disabled for the duration
+ * specified by the MUEDCATimer for the corresponding AC
+ */
+#define HE_MUEDCA_AIFSN_MIN 0
+
+/* HE OFDMA TX DEMO support */
+typedef struct he_ofdmatx_demo_params {
+ bool demo_mode_enabled;
+ uint32 twt_wake_dur; /* target wake duration in unit of microseconds */
+ uint32 twt_wake_int; /* target wake interval in unit of microseconds */
+} he_ofdmatx_demo_params_t;
+
/* Number of Bsscolor supported per core */
#ifndef HE_MAX_BSSCOLOR_RES
#define HE_MAX_BSSCOLOR_RES 2
@@ -543,15 +677,15 @@
#define STAID1_IDX 1
#define STAID2_IDX 2
#define STAID3_IDX 3
-#define HE_STAID_MAX_VAL 2047
+#define HE_STAID_MAX_VAL 0x07FF
typedef struct wl_bsscolor_info {
uint16 version; /**< structure version */
uint16 length; /**< length of the bsscolor info */
uint8 bsscolor_index; /**< bsscolor index 0-1 */
uint8 bsscolor; /**<bsscolor value from 0 to 63 */
- bool partial_bsscolor_ind;
- bool disable_bsscolor_ind; /**< To disable perticular bsscolor */
+ uint8 partial_bsscolor_ind;
+ uint8 disable_bsscolor_ind; /**< To disable particular bsscolor */
uint16 staid_info[HE_MAX_STAID_PER_BSSCOLOR]; /**< 0-3 staid info of each bsscolor */
} wl_bsscolor_info_t;
@@ -702,9 +836,60 @@
uint16 channel_list[1]; /**< list of chanspecs */
} wl_scan_params_t;
+/* changes in wl_scan_params_v2 as comapred to wl_scan_params (v1)
+ * * unit8 scantype to uint32
+ */
+typedef struct wl_scan_params_v2 {
+ uint16 version; /* Version of wl_scan_params, change value of
+ * WL_SCAN_PARAM_VERSION on version update
+ */
+ uint16 length; /* length of structure wl_scan_params_v1_t
+ * without implicit pad
+ */
+ wlc_ssid_t ssid; /**< default: {0, ""} */
+ struct ether_addr bssid; /**< default: bcast */
+ int8 bss_type; /**< default: any,
+ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+ */
+ uint8 PAD;
+ uint32 scan_type; /**< flags, 0 use default, and flags specified in
+ * WL_SCANFLAGS_XXX
+ */
+ int32 nprobes; /**< -1 use default, number of probes per channel */
+ int32 active_time; /**< -1 use default, dwell time per channel for
+ * active scanning
+ */
+ int32 passive_time; /**< -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ int32 home_time; /**< -1 use default, dwell time for the home channel
+ * between channel scans
+ */
+ int32 channel_num; /**< count of channels and ssids that follow
+ *
+ * low half is count of channels in channel_list, 0
+ * means default (use all available channels)
+ *
+ * high half is entries in wlc_ssid_t array that
+ * follows channel_list, aligned for int32 (4 bytes)
+ * meaning an odd channel count implies a 2-byte pad
+ * between end of channel_list and first ssid
+ *
+ * if ssid count is zero, single ssid in the fixed
+ * parameter portion is assumed, otherwise ssid in
+ * the fixed portion is ignored
+ */
+ uint16 channel_list[1]; /**< list of chanspecs */
+} wl_scan_params_v2_t;
+
+#define WL_SCAN_PARAMS_VERSION_V2 2
+
/** size of wl_scan_params not including variable length array */
#define WL_SCAN_PARAMS_FIXED_SIZE 64
#define WL_MAX_ROAMSCAN_DATSZ (WL_SCAN_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
+#define WL_SCAN_PARAMS_V2_FIXED_SIZE (OFFSETOF(wl_scan_params_v2_t, channel_list))
+#define WL_MAX_ROAMSCAN_V2_DATSZ \
+ (WL_SCAN_PARAMS_V2_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
#define ISCAN_REQ_VERSION 1
@@ -748,6 +933,7 @@
} iscan_buf_t;
#endif /* SIMPLE_ISCAN */
#define ESCAN_REQ_VERSION 1
+#define ESCAN_REQ_VERSION_V2 2
/** event scan reduces amount of SOC memory needed to store scan results */
typedef struct wl_escan_params {
@@ -757,7 +943,15 @@
wl_scan_params_t params;
} wl_escan_params_t;
+typedef struct wl_escan_params_v2 {
+ uint32 version;
+ uint16 action;
+ uint16 sync_id;
+ wl_scan_params_v2_t params;
+} wl_escan_params_v2_t;
+
#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+#define WL_ESCAN_PARAMS_V2_FIXED_SIZE (OFFSETOF(wl_escan_params_v2_t, params) + sizeof(wlc_ssid_t))
/** event scan reduces amount of SOC memory needed to store scan results */
typedef struct wl_escan_result {
@@ -1106,6 +1300,21 @@
} cca_chan_qual_event_t;
typedef struct {
+ uint16 status;
+ uint16 id;
+ chanspec_t chanspec; /**< Which channel? */
+ uint16 len;
+ union {
+ cca_congest_simple_t cca_busy; /**< CCA busy */
+ struct {
+ cca_congest_t cca_busy_ext; /**< Extended CCA report */
+ cca_congest_t cca_busy_nopm; /**< Extedned CCA report (PM awake time) */
+ cca_congest_t cca_busy_pm; /**< Extedned CCA report (PM sleep time) */
+ };
+ };
+} cca_only_chan_qual_event_t;
+
+typedef struct {
uint32 msrmnt_time; /**< Time for Measurement (msec) */
uint32 msrmnt_done; /**< flag set when measurement complete */
char buf[];
@@ -1164,6 +1373,7 @@
WLC_CCODE_ROLE_80211D_ASSOC,
WLC_CCODE_ROLE_80211D_SCAN,
WLC_CCODE_ROLE_DEFAULT,
+ WLC_CCODE_ROLE_DEFAULT_SROM_BKUP,
WLC_CCODE_LAST
} wl_ccode_role_t;
#define WLC_NUM_CCODE_INFO WLC_CCODE_LAST
@@ -1324,6 +1534,23 @@
uint8 key[WSEC_MAX_PSK_LEN]; /**< PMK material */
} wsec_pmk_t;
+#define WL_AUTH_EVENT_DATA_V1 0x1
+
+/* tlv ids for auth event */
+#define WL_AUTH_PMK_TLV_ID 1
+#define WL_AUTH_PMKID_TLV_ID 2
+/* AUTH event data
+* pmk and pmkid in case of SAE auth
+* xtlvs will be 32 bit alligned
+*/
+typedef struct wl_auth_event {
+ uint16 version;
+ uint16 length;
+ uint8 xtlvs[];
+} wl_auth_event_t;
+
+#define WL_AUTH_EVENT_FIXED_LEN_V1 OFFSETOF(wl_auth_event_t, xtlvs)
+
#define FILS_CACHE_ID_LEN 2
#define PMK_LEN_MAX 48
@@ -1345,7 +1572,24 @@
uint8 fils_cache_id[FILS_CACHE_ID_LEN];
} pmkid_v2_t;
+typedef struct _pmkid_v3 {
+ struct ether_addr bssid;
+ uint8 pmkid[WPA2_PMKID_LEN];
+ uint8 pmkid_len;
+ uint8 pmk[PMK_LEN_MAX];
+ uint8 pmk_len;
+ uint16 fils_cache_id; /* 2-byte length */
+ uint8 pad;
+ uint8 ssid_len;
+ uint8 ssid[DOT11_MAX_SSID_LEN]; /* For FILS, to save ESSID */
+ /* one pmkid used in whole ESS */
+ uint32 time_left; /* remaining time until expirary in sec. */
+ /* 0 means expired, all 0xFF means never expire */
+} pmkid_v3_t;
+
#define PMKID_LIST_VER_2 2
+#define PMKID_LIST_VER_3 3
+
typedef struct _pmkid_list_v1 {
uint32 npmkid;
pmkid_v1_t pmkid[1];
@@ -1357,6 +1601,14 @@
pmkid_v2_t pmkid[1];
} pmkid_list_v2_t;
+typedef struct _pmkid_list_v3 {
+ uint16 version;
+ uint16 length;
+ uint16 count;
+ uint16 pad;
+ pmkid_v3_t pmkid[];
+} pmkid_list_v3_t;
+
#ifndef PMKID_VERSION_ENABLED
/* pmkid structure before versioning. legacy. DONOT update anymore here */
typedef pmkid_v1_t pmkid_t;
@@ -1881,7 +2133,10 @@
#define WL_NUM_RATES_MCS_1STREAM 8 /**< MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */
#define WL_NUM_RATES_EXTRA_VHT 2 /**< Additional VHT 11AC rates */
#define WL_NUM_RATES_VHT 10
+#define WL_NUM_RATES_VHT_ALL (WL_NUM_RATES_VHT + WL_NUM_RATES_EXTRA_VHT)
+#define WL_NUM_RATES_HE 12
#define WL_NUM_RATES_MCS32 1
+#define UC_URL_LEN 128u /**< uCode URL length */
/*
* Structure for passing hardware and software
@@ -1897,7 +2152,7 @@
uint32 boardvendor; /**< board vendor (usu. PCI sub-vendor id) */
uint32 boardrev; /**< board revision */
uint32 driverrev; /**< driver version */
- uint32 ucoderev; /**< microcode version */
+ uint32 ucoderev; /**< uCode version */
uint32 bus; /**< bus type */
uint32 chipnum; /**< chip number */
uint32 phytype; /**< phy type */
@@ -1911,6 +2166,9 @@
uint32 drvrev_minor; /**< driver version: minor */
uint32 drvrev_rc; /**< driver version: rc */
uint32 drvrev_rc_inc; /**< driver version: rc incremental */
+ uint16 ucodeprebuilt; /**< uCode prebuilt flag */
+ uint16 ucodediffct; /**< uCode diff count */
+ uchar ucodeurl[UC_URL_LEN]; /**< uCode repo URL@cmt_id */
} wlc_rev_info_t;
#define WL_REV_INFO_LEGACY_LENGTH 48
@@ -2882,21 +3140,23 @@
#define PSBW_CAL_DONE 0x8 /* 20M channel cal done */
/* Bits for disable_reasons */
-#define WL_PSBW_DISA_HOST 0x00001 /* Host has disabled through psbw_cfg */
-#define WL_PSBW_DISA_AP20M 0x00002 /* AP is operating on 20 MHz */
-#define WL_PSBW_DISA_SLOTTED_BSS 0x00004 /* AWDL or NAN active */
-#define WL_PSBW_DISA_NOT_PMFAST 0x00008 /* Not PM_FAST */
-#define WL_PSBW_DISA_BASICRATESET 0x00010 /* BasicRateSet is empty */
-#define WL_PSBW_DISA_NOT_D3 0x00020 /* PCIe not in D3 */
-#define WL_PSBW_DISA_CSA 0x00040 /* CSA IE is present */
-#define WL_PSBW_DISA_ASSOC 0x00080 /* assoc state is active/or unassoc */
-#define WL_PSBW_DISA_SCAN 0x00100 /* scan state is active */
-#define WL_PSBW_DISA_NONE 0x00200 /* internal use only */
- /* following are not part of disable reasons */
-#define WL_PSBW_EXIT_PM 0x01000 /* Out of PM */
-#define WL_PSBW_EXIT_TIM 0x02000 /* unicast TIM bit present */
-#define WL_PSBW_EXIT_DATA 0x04000 /* Data for transmission */
-#define WL_PSBW_EXIT_MGMTDATA 0x08000 /* management frame for transmission */
+#define WL_PSBW_DISA_HOST 0x00000001 /* Host has disabled through psbw_cfg */
+#define WL_PSBW_DISA_AP20M 0x00000002 /* AP is operating on 20 MHz */
+#define WL_PSBW_DISA_SLOTTED_BSS 0x00000004 /* AWDL or NAN active */
+#define WL_PSBW_DISA_NOT_PMFAST 0x00000008 /* Not PM_FAST */
+#define WL_PSBW_DISA_BASICRATESET 0x00000010 /* BasicRateSet is empty */
+#define WL_PSBW_DISA_NOT_D3 0x00000020 /* PCIe not in D3 */
+#define WL_PSBW_DISA_CSA 0x00000040 /* CSA IE is present */
+#define WL_PSBW_DISA_ASSOC 0x00000080 /* assoc state is active/or unassoc */
+#define WL_PSBW_DISA_SCAN 0x00000100 /* scan state is active */
+#define WL_PSBW_DISA_CAL 0x00000200 /* cal pending or active */
+/* following are not part of disable reasons */
+#define WL_PSBW_EXIT_PM 0x00001000 /* Out of PM */
+#define WL_PSBW_EXIT_TIM 0x00002000 /* unicast TIM bit present */
+#define WL_PSBW_EXIT_DATA 0x00004000 /* Data for transmission */
+#define WL_PSBW_EXIT_MGMTDATA 0x00008000 /* management frame for transmission */
+#define WL_PSBW_EXIT_BW_UPD 0x00010000 /* BW being updated */
+#define WL_PSBW_DISA_NONE 0x80000000 /* reserved for internal use only */
/*
* Join preference iovar value is an array of tuples. Each tuple has a one-byte type,
@@ -2958,6 +3218,8 @@
#define RATE_LEGACY_OFDM_54MBPS 7
#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION 1
+#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION_V1 1
+#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION_V2 2
typedef struct wl_bsstrans_rssi {
int8 rssi_2g; /**< RSSI in dbm for 2.4 G */
@@ -2967,6 +3229,27 @@
#define RSSI_RATE_MAP_MAX_STREAMS 4 /**< max streams supported */
/** RSSI to rate mapping, all 20Mhz, no SGI */
+typedef struct wl_bsstrans_rssi_rate_map_v2 {
+ uint16 ver;
+ uint16 len; /**< length of entire structure */
+ wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */
+ wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */
+ wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */
+ wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT_ALL]; /**< MCS0-11 */
+ wl_bsstrans_rssi_t phy_ax[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_HE]; /**< MCS0-11 */
+} wl_bsstrans_rssi_rate_map_v2_t;
+
+/** RSSI to rate mapping, all 20Mhz, no SGI */
+typedef struct wl_bsstrans_rssi_rate_map_v1 {
+ uint16 ver;
+ uint16 len; /**< length of entire structure */
+ wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */
+ wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */
+ wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */
+ wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /**< MCS0-9 */
+} wl_bsstrans_rssi_rate_map_v1_t;
+
+/** RSSI to rate mapping, all 20Mhz, no SGI */
typedef struct wl_bsstrans_rssi_rate_map {
uint16 ver;
uint16 len; /**< length of entire structure */
@@ -3102,7 +3385,8 @@
/* tlv IDs uniquely identifies periodic state component */
enum wl_periodic_slice_state_xtlv_id {
WL_STATE_COMPACT_COUNTERS = 0x1,
- WL_STATE_TXBF_COUNTERS = 0x2
+ WL_STATE_TXBF_COUNTERS = 0x2,
+ WL_STATE_COMPACT_HE_COUNTERS = 0x3
};
/* tlv IDs uniquely identifies periodic state component */
@@ -3251,7 +3535,7 @@
#define WL_CNT_MCXST_STRUCT_SZ ((uint32)sizeof(wl_cnt_ge64mcxst_v1_t))
-#define WL_CNT_HE_STRUCT_SZ ((uint32)sizeof(wl_he_cnt_wlc_v1_t))
+#define WL_CNT_HE_STRUCT_SZ ((uint32)sizeof(wl_he_cnt_wlc_t))
#define WL_CNT_SECVLN_STRUCT_SZ ((uint32)sizeof(wl_secvln_cnt_t))
@@ -3545,11 +3829,13 @@
uint32 rx_toss_cnt; /* number of rx packets tossed */
uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */
uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */
+ uint32 pmk_badlen_cnt; /* number of invalid pmk len */
} wl_cnt_wlc_t;
-/* he counters */
-typedef struct {
+/* he counters Version 1 */
+#define HE_COUNTERS_V1 (1)
+typedef struct wl_he_cnt_wlc_v1 {
uint32 he_rxtrig_myaid;
uint32 he_rxtrig_rand;
uint32 he_colormiss_cnt;
@@ -3569,6 +3855,38 @@
uint32 he_txtbppdu;
} wl_he_cnt_wlc_v1_t;
+/* he counters Version 2 */
+#define HE_COUNTERS_V2 (2)
+typedef struct wl_he_cnt_wlc_v2 {
+ uint16 version;
+ uint16 len;
+ uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */
+ uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */
+ uint32 he_colormiss_cnt; /**< for bss color mismatch cases */
+ uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */
+ uint32 he_txmtid_back; /**< for multi-TID BACK transmission */
+ uint32 he_rxmtid_back; /**< reception of multi-TID BACK */
+ uint32 he_rxmsta_back; /**< reception of multi-STA BACK */
+ uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */
+ uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */
+ uint32 he_txtrig; /**< transmission of trigger frames */
+ uint32 he_rxtrig_basic; /**< reception of basic trigger frame */
+ uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */
+ uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */
+ uint32 he_rxdlmu; /**< reception of DL MU PPDU */
+ uint32 he_physu_rx; /**< reception of SU frame */
+ uint32 he_phyru_rx; /**< reception of RU frame */
+ uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */
+ uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */
+ uint32 he_rxtrig_bfrp; /**< reception of BFRP frame */
+ uint32 he_rxtrig_mubar; /**< reception of MU BAR frame */
+} wl_he_cnt_wlc_v2_t;
+
+#ifndef HE_COUNTERS_VERSION_ENABLED
+#define HE_COUNTERS_VERSION (HE_COUNTERS_V1)
+typedef wl_he_cnt_wlc_v1_t wl_he_cnt_wlc_t;
+#endif /* HE_COUNTERS_VERSION_ENABLED */
+
/* security vulnerabilities counters */
typedef struct {
uint32 ie_unknown; /* number of unknown IEs */
@@ -3865,7 +4183,9 @@
uint32 PAD[6];
uint32 rxerr_stat;
uint32 ctx_fifo_full;
- uint32 PAD[38]; /* PAD added for counter elements to be added soon */
+ uint32 txdatamcast; /**< Number of TX multicast data packets */
+ uint32 txdatabcast; /**< Number of TX broadcast data packets */
+ uint32 PAD[36]; /* PAD added for counter elements to be added soon */
} wl_cnt_ge80mcst_v1_t;
typedef struct {
@@ -5506,6 +5826,7 @@
int32 auth; /**< Open Vs Closed */
int32 wpa_auth; /**< WPA type */
int32 wsec; /**< wsec value */
+ wsec_pmk_t psk; /**< Password */
} wl_pfn_t;
typedef struct wl_pfn_list {
@@ -6293,7 +6614,16 @@
uint16 sweep_counter[]; /**< Array of frame counters */
} wl_pkteng_sweep_ctrs_t;
-#define WL_PKTENG_RU_FILL_VER_1 1
+/* IOVAR pkteng_rx_pkt response structure */
+#define WL_PKTENG_RX_PKT_VERSION 1
+typedef struct wl_pkteng_rx_pkt {
+ uint16 version; /**< Version - 1 */
+ uint16 size; /**< Complete Size including the packet */
+ uint8 payload[]; /**< Packet payload */
+} wl_pkteng_rx_pkt_t;
+
+#define WL_PKTENG_RU_FILL_VER_1 1u
+#define WL_PKTENG_RU_FILL_VER_2 2u
// struct for ru packet engine
typedef struct wl_pkteng_ru_v1 {
uint16 version; /* ver is 1 */
@@ -6313,9 +6643,33 @@
uint8 trig_tx; /* form and transmit the trigger frame */
uint8 trig_type; /* type of trigger frame */
uint8 trig_period; /* trigger tx periodicity TBD */
+ uint8 nuserinfo; /* Number of Userinfo fields */
struct ether_addr dest; /* destination address for un-associated mode */
} wl_pkteng_ru_v1_t;
+typedef struct wl_pkteng_ru_v2 {
+ uint16 version; /* ver is 1 */
+ uint16 length; /* size of complete structure */
+ uint8 bw; /* bandwidth info */
+ uint8 ru_alloc_val; /* ru allocation index number */
+ uint8 mcs_val; /* mcs allocated value */
+ uint8 nss_val; /* num of spatial streams */
+ uint32 num_bytes; /* approx num of bytes to calculate other required params */
+ struct ether_addr dest; /* destination address for un-associated mode */
+ uint8 cp_ltf_val ; /* GI and LTF symbol size */
+ uint8 he_ltf_symb ; /* num of HE-LTF symbols */
+ uint8 stbc; /* STBC support */
+ uint8 coding_val; /* BCC/LDPC coding support */
+ uint8 pe_category; /* PE duration 0/8/16usecs */
+ uint8 dcm; /* dual carrier modulation */
+ uint8 mumimo_ltfmode; /* ltf mode */
+ uint8 trig_tx; /* form and transmit the trigger frame */
+ uint8 trig_type; /* type of trigger frame */
+ uint8 trig_period; /* trigger tx periodicity TBD */
+ uint8 tgt_rssi; /* target rssi value in encoded format */
+ uint8 pad[3]; /* 3 byte padding to make structure size a multiple of 32bits */
+} wl_pkteng_ru_v2_t;
+
#ifndef WL_PKTENG_RU_VER
/* App uses the latest version - source picks it up from wlc_types.h */
typedef wl_pkteng_ru_v1_t wl_pkteng_ru_fill_t;
@@ -6543,6 +6897,7 @@
* for reporting purpose only.
*/
#define WL_CHAN_QUAL_FULL_CCA (0x100 | WL_CHAN_QUAL_CCA)
+#define WL_CHAN_QUAL_FULLPM_CCA (0x200u | WL_CHAN_QUAL_CCA) /* CCA: me vs. notme, PM vs. !PM */
#define MAX_CHAN_QUAL_LEVELS 8
@@ -6835,6 +7190,9 @@
#define NBR_ADD_DYNAMIC 1
#define WL_RRM_NBR_RPT_VER 1
+#define WL_NBR_RPT_FLAG_BSS_PREF_FROM_AP 0x01
+#define WL_RRM_DEFAULT_BSS_PREFERENCE 255
+#define WL_RRM_ADD_NBR_MIN_PARAMS 6
/** 11k Neighbor Report element */
typedef struct nbr_rpt_elem {
uint8 version;
@@ -6854,6 +7212,12 @@
uint8 flags;
} nbr_rpt_elem_t;
+typedef struct bcn_nbr_query {
+ uint16 list_cnt;
+ uint8 pad[2];
+ struct ether_addr sta_addr;
+} bcn_nbr_query_t;
+
typedef enum event_msgs_ext_command {
EVENTMSGS_NONE = 0,
EVENTMSGS_SET_BIT = 1,
@@ -6928,18 +7292,16 @@
/* ##### HMAP section ##### */
#define PCIE_MAX_HMAP_WINDOWS 8
-#define PCIE_HMAPTEST_VERSION 1
-#define HMAPTEST_VAL_INVALIDMASK 1
-#define HMAPTEST_VAL_WRITEMASK 2
-#define HMAPTEST_ACCESS_TYPE_MASK 0b1100
-#define HMAPTEST_ACCESS_TYPE_SHIFT 2
+#define PCIE_HMAPTEST_VERSION 2
+#define HMAPTEST_INVALID_OFFSET 0xFFFFFFFFu
+#define HMAPTEST_DEFAULT_WRITE_PATTERN 0xBABECAFEu
#define HMAPTEST_ACCESS_ARM 0
#define HMAPTEST_ACCESS_M2M 1
#define HMAPTEST_ACCESS_D11 2
-#define HMAPTEST_ACCESS_NONE 3 /* marker to stop HMAPTEST */
+#define HMAPTEST_ACCESS_NONE 3
typedef struct pcie_hmaptest {
- uint16 version; /* Version = 1 */
+ uint16 version; /* Version */
uint16 length; /* Length of entire structure */
uint32 xfer_len;
uint32 accesstype;
@@ -6947,6 +7309,9 @@
uint32 is_invalid;
uint32 host_addr_hi;
uint32 host_addr_lo;
+ uint32 host_offset;
+ uint32 value; /* 4 byte value to be filled in case of write access test */
+ uint32 delay; /* wait time in seconds before initiating access from dongle */
} pcie_hmaptest_t;
/* HMAP window register set */
@@ -6958,7 +7323,7 @@
#define PCIE_HMAP_VERSION 1
typedef struct pcie_hmap {
- uint16 version; /**< Version = 1 */
+ uint16 version; /**< Version */
uint16 length; /**< Length of entire structure */
uint32 enable; /**< status of HMAP enabled/disabled */
uint32 nwindows; /* no. of HMAP windows enabled */
@@ -7768,7 +8133,7 @@
chanspec_t local_chanspec; /**< channel on which we are associated */
uint8 local_max; /**< local max according to the AP */
uint8 local_constraint; /**< local constraint according to the AP */
- int8 antgain[2]; /**< Ant gain for each band - from SROM */
+ int8 antgain[2]; /**< Ant gain for each band - from SROM */
uint8 rf_cores; /**< count of RF Cores being reported */
uint8 est_Pout[4]; /**< Latest tx power out estimate per RF chain */
uint8 est_Pout_act[4]; /**< Latest tx power out estimate per RF chain w/o adjustment */
@@ -7785,11 +8150,55 @@
uint32 ppr_len; /**< length of each ppr serialization buffer */
int8 SARLIMIT[MAX_STREAMS_SUPPORTED];
uint8 pprdata[1]; /**< ppr serialization buffer */
-} BWL_POST_PACKED_STRUCT tx_pwr_rpt_t;
+} BWL_POST_PACKED_STRUCT tx_pwr_rpt_v1_t;
#include <packed_section_end.h>
#include <packed_section_start.h>
typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 flags;
+ chanspec_t chanspec; /**< txpwr report for this channel */
+ chanspec_t local_chanspec; /**< channel on which we are associated */
+ uint8 local_max; /**< local max according to the AP */
+ uint8 local_constraint; /**< local constraint according to the AP */
+ int8 antgain[3]; /**< Ant gain for each band - from SROM */
+ uint8 rf_cores; /**< count of RF Cores being reported */
+ uint8 est_Pout[4]; /**< Latest tx power out estimate per RF chain */
+ uint8 est_Pout_act[4]; /**< Latest tx power out estimate per RF chain w/o adjustment */
+ uint8 est_Pout_cck; /**< Latest CCK tx power out estimate */
+ uint8 tx_power_max[4]; /**< Maximum target power among all rates */
+ uint32 tx_power_max_rate_ind[4]; /**< Index of the rate with the max target power */
+ int8 sar; /**< SAR limit for display by wl executable */
+ int8 channel_bandwidth; /**< 20, 40 or 80 MHz bandwidth? */
+ uint8 version; /**< Version of the data format wlu <--> driver */
+ uint8 display_core; /**< Displayed curpower core */
+ int8 target_offsets[4]; /**< Target power offsets for current rate per core */
+ uint32 last_tx_ratespec; /**< Ratespec for last transmition */
+ uint32 user_target; /**< user limit */
+ uint32 ppr_len; /**< length of each ppr serialization buffer */
+ int8 SARLIMIT[MAX_STREAMS_SUPPORTED];
+ uint8 pprdata[1]; /**< ppr serialization buffer */
+} BWL_POST_PACKED_STRUCT tx_pwr_rpt_v2_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 version; /* Version of the data format wlu <--> driver */
+ uint32 length; /* Length token */
+} BWL_POST_PACKED_STRUCT tx_pwr_rpt_version_t;
+#include <packed_section_end.h>
+
+#ifdef TBAND
+typedef tx_pwr_rpt_v2_t tx_pwr_rpt_t;
+#else
+typedef tx_pwr_rpt_v1_t tx_pwr_rpt_t;
+#endif /* TBAND */
+
+#define TXPWR_RPT_LEN_TOKEN 0x5649414e /* Spells VIAN */
+#define TXPWR_RPT_VERSION_V1 1
+#define TXPWR_RPT_VERSION_V2 2
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
struct ipv4_addr ipv4_addr;
struct ether_addr nexthop;
} BWL_POST_PACKED_STRUCT ibss_route_entry_t;
@@ -7905,6 +8314,50 @@
/* Version of wlc_btc_stats_t structure.
* Increment whenever a change is made to wlc_btc_stats_t
*/
+#define BTCX_STATS_VER_4 4
+typedef struct wlc_btc_stats_v4 {
+ uint16 version; /* version number of struct */
+ uint16 valid; /* Size of this struct */
+ uint32 stats_update_timestamp; /* tStamp when data is updated. */
+ uint32 btc_status; /* Hybrid/TDM indicator: Bit2:Hybrid, Bit1:TDM,Bit0:CoexEnabled */
+ uint32 bt_req_type_map; /* BT Antenna Req types since last stats sample */
+ uint32 bt_req_cnt; /* #BT antenna requests since last stats sampl */
+ uint32 bt_gnt_cnt; /* #BT antenna grants since last stats sample */
+ uint32 bt_gnt_dur; /* usec BT owns antenna since last stats sample */
+ uint16 bt_abort_cnt; /* #Times WL was preempted due to BT since WL up */
+ uint16 bt_rxf1ovfl_cnt; /* #Time PSNULL retry count exceeded since WL up */
+ uint16 bt_latency_cnt; /* #Time ucode high latency detected since WL up */
+ uint16 bt_succ_pm_protect_cnt; /* successful PM protection */
+ uint16 bt_succ_cts_cnt; /* successful CTS2A protection */
+ uint16 bt_wlan_tx_preempt_cnt; /* WLAN TX Preemption */
+ uint16 bt_wlan_rx_preempt_cnt; /* WLAN RX Preemption */
+ uint16 bt_ap_tx_after_pm_cnt; /* AP TX even after PM protection */
+ uint16 bt_peraud_cumu_gnt_cnt; /* Grant cnt for periodic audio */
+ uint16 bt_peraud_cumu_deny_cnt; /* Deny cnt for periodic audio */
+ uint16 bt_a2dp_cumu_gnt_cnt; /* Grant cnt for A2DP */
+ uint16 bt_a2dp_cumu_deny_cnt; /* Deny cnt for A2DP */
+ uint16 bt_sniff_cumu_gnt_cnt; /* Grant cnt for Sniff */
+ uint16 bt_sniff_cumu_deny_cnt; /* Deny cnt for Sniff */
+ uint16 bt_dcsn_map; /* Accumulated decision bitmap once Ant grant */
+ uint16 bt_dcsn_cnt; /* Accumulated decision bitmap counters once Ant grant */
+ uint16 bt_a2dp_hiwat_cnt; /* Ant grant by a2dp high watermark */
+ uint16 bt_datadelay_cnt; /* Ant grant by acl/a2dp datadelay */
+ uint16 bt_crtpri_cnt; /* Ant grant by critical BT task */
+ uint16 bt_pri_cnt; /* Ant grant by high BT task */
+ uint16 a2dpbuf1cnt; /* Ant request with a2dp buffercnt 1 */
+ uint16 a2dpbuf2cnt; /* Ant request with a2dp buffercnt 2 */
+ uint16 a2dpbuf3cnt; /* Ant request with a2dp buffercnt 3 */
+ uint16 a2dpbuf4cnt; /* Ant request with a2dp buffercnt 4 */
+ uint16 a2dpbuf5cnt; /* Ant request with a2dp buffercnt 5 */
+ uint16 a2dpbuf6cnt; /* Ant request with a2dp buffercnt 6 */
+ uint16 a2dpbuf7cnt; /* Ant request with a2dp buffercnt 7 */
+ uint16 a2dpbuf8cnt; /* Ant request with a2dp buffercnt 8 */
+ uint16 antgrant_lt10ms; /* Ant grant duration cnt 0~10ms */
+ uint16 antgrant_lt30ms; /* Ant grant duration cnt 10~30ms */
+ uint16 antgrant_lt60ms; /* Ant grant duration cnt 30~60ms */
+ uint16 antgrant_ge60ms; /* Ant grant duration cnt 60~ms */
+} wlc_btc_stats_v4_t;
+
#define BTCX_STATS_VER_3 3
typedef struct wlc_btc_stats_v3 {
@@ -7952,6 +8405,44 @@
uint16 rsvd; /* pad to align struct to 32bit bndry */
} wlc_btc_stats_v2_t;
+/* Durations for each bt task in millisecond */
+#define WL_BTCX_DURSTATS_VER_1 (1u)
+typedef struct wlc_btcx_durstats_v1 {
+ uint16 version; /* version number of struct */
+ uint16 valid; /* validity of this struct */
+ uint32 stats_update_timestamp; /* tStamp when data is updated */
+ uint16 bt_acl_dur; /* acl duration in ms */
+ uint16 bt_sco_dur; /* sco duration in ms */
+ uint16 bt_esco_dur; /* esco duration in ms */
+ uint16 bt_a2dp_dur; /* a2dp duration in ms */
+ uint16 bt_sniff_dur; /* sniff duration in ms */
+ uint16 bt_pscan_dur; /* page scan duration in ms */
+ uint16 bt_iscan_dur; /* inquiry scan duration in ms */
+ uint16 bt_page_dur; /* paging duration in ms */
+ uint16 bt_inquiry_dur; /* inquiry duration in ms */
+ uint16 bt_mss_dur; /* mss duration in ms */
+ uint16 bt_park_dur; /* park duration in ms */
+ uint16 bt_rssiscan_dur; /* rssiscan duration in ms */
+ uint16 bt_iscan_sco_dur; /* inquiry scan sco duration in ms */
+ uint16 bt_pscan_sco_dur; /* page scan sco duration in ms */
+ uint16 bt_tpoll_dur; /* tpoll duration in ms */
+ uint16 bt_sacq_dur; /* sacq duration in ms */
+ uint16 bt_sdata_dur; /* sdata duration in ms */
+ uint16 bt_rs_listen_dur; /* rs listen duration in ms */
+ uint16 bt_rs_burst_dur; /* rs brust duration in ms */
+ uint16 bt_ble_adv_dur; /* ble adv duration in ms */
+ uint16 bt_ble_scan_dur; /* ble scan duration in ms */
+ uint16 bt_ble_init_dur; /* ble init duration in ms */
+ uint16 bt_ble_conn_dur; /* ble connection duration in ms */
+ uint16 bt_task_lmp_dur; /* lmp duration in ms */
+ uint16 bt_esco_retran_dur; /* esco retransmission duration in ms */
+ uint16 bt_task26_dur; /* task26 duration in ms */
+ uint16 bt_task27_dur; /* task27 duration in ms */
+ uint16 bt_task28_dur; /* task28 duration in ms */
+ uint16 bt_task_pred_dur; /* prediction task duration in ms */
+ uint16 bt_multihid_dur; /* multihid duration in ms */
+} wlc_btcx_durstats_v1_t;
+
#define WL_IPFO_ROUTE_TBL_FIXED_LEN 4
#define WL_MAX_IPFO_ROUTE_TBL_ENTRY 64
@@ -8022,12 +8513,13 @@
uint8 PAD[3];
} chanim_stats_v2_t;
+#define CCASTATS_V3_MAX 10
typedef struct chanim_stats {
uint32 glitchcnt; /**< normalized as per second count */
uint32 badplcp; /**< normalized as per second count */
- uint8 ccastats[CCASTATS_MAX]; /**< normalized as 0-255 */
+ uint8 ccastats[CCASTATS_V3_MAX]; /**< normalized as 0-255 */
int8 bgnoise; /**< background noise level (in dBm) */
- uint8 pad_1[11 - CCASTATS_MAX];
+ uint8 pad_1[11 - CCASTATS_V3_MAX];
chanspec_t chanspec; /**< ctrl chanspec of the interface */
uint8 pad_2[2];
uint32 timestamp; /**< time stamp at which the stats are collected */
@@ -8274,6 +8766,10 @@
typedef struct {
uint8 band2g[WLC_TXCORE_MAX];
uint8 band5g[WLC_SUBBAND_MAX][WLC_TXCORE_MAX];
+#ifdef TBAND
+ /* Temporary WAR. SWWLAN-140646 */
+ uint8 band6g[WLC_SUBBAND_MAX][WLC_TXCORE_MAX];
+#endif /* TBAND */
} sar_limit_t;
#define MAX_NUM_TXCAL_MEAS 128
@@ -8370,12 +8866,28 @@
uint8 ofdm;
} wl_olpc_pwr_t;
+typedef struct wl_rfem_temp_vdet_temp {
+ uint8 vdet_fem_t1;
+ int8 rfem_temp_t1;
+ uint8 vdet_fem_t2;
+ int8 rfem_temp_t2;
+} wl_rfem_temp_vdet_temp_t;
+
+typedef struct wl_rfem_temp_vin_tssi {
+ uint16 vin_chip_v1;
+ int16 tssi_chip_v1;
+ uint16 vin_chip_v2;
+ int16 tssi_chip_v2;
+} wl_rfem_temp_vin_tssi_t;
+
typedef struct wl_txcal_tempsense {
uint16 version;
uint8 valid_cnt;
uint8 core;
int16 ref_temperature;
int16 meas_temperature;
+ wl_rfem_temp_vdet_temp_t vdet_temp;
+ wl_rfem_temp_vin_tssi_t vin_tssi;
} wl_txcal_tempsense_t;
/** IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */
@@ -8390,6 +8902,87 @@
uint32 ipaddr_gateway;
} nwoe_ifconfig_t;
+/** Traffic management priority classes */
+typedef enum trf_mgmt_priority_class {
+ trf_mgmt_priority_low = 0, /**< Maps to 802.1p BK */
+ trf_mgmt_priority_medium = 1, /**< Maps to 802.1p BE */
+ trf_mgmt_priority_high = 2, /**< Maps to 802.1p VI */
+ trf_mgmt_priority_nochange = 3, /**< do not update the priority */
+ trf_mgmt_priority_invalid = (trf_mgmt_priority_nochange + 1)
+} trf_mgmt_priority_class_t;
+
+/** Traffic management configuration parameters */
+typedef struct trf_mgmt_config {
+ uint32 trf_mgmt_enabled; /**< 0 - disabled, 1 - enabled */
+ uint32 flags; /**< See TRF_MGMT_FLAG_xxx defines */
+ uint32 host_ip_addr; /**< My IP address to determine subnet */
+ uint32 host_subnet_mask; /**< My subnet mask */
+ uint32 downlink_bandwidth; /**< In units of kbps */
+ uint32 uplink_bandwidth; /**< In units of kbps */
+ uint32 min_tx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /**< Minimum guaranteed tx bandwidth */
+ uint32 min_rx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /**< Minimum guaranteed rx bandwidth */
+} trf_mgmt_config_t;
+
+/** Traffic management filter */
+typedef struct trf_mgmt_filter {
+ struct ether_addr dst_ether_addr; /**< His L2 address */
+ uint8 PAD[2];
+ uint32 dst_ip_addr; /**< His IP address */
+ uint16 dst_port; /**< His L4 port */
+ uint16 src_port; /**< My L4 port */
+ uint16 prot; /**< L4 protocol (only TCP or UDP) */
+ uint16 flags; /**< TBD. For now, this must be zero. */
+ trf_mgmt_priority_class_t priority; /**< Priority for filtered packets */
+ uint32 dscp; /**< DSCP */
+} trf_mgmt_filter_t;
+
+/** Traffic management filter list (variable length) */
+typedef struct trf_mgmt_filter_list {
+ uint32 num_filters;
+ trf_mgmt_filter_t filter[1];
+} trf_mgmt_filter_list_t;
+
+/** Traffic management global info used for all queues */
+typedef struct trf_mgmt_global_info {
+ uint32 maximum_bytes_per_second;
+ uint32 maximum_bytes_per_sampling_period;
+ uint32 total_bytes_consumed_per_second;
+ uint32 total_bytes_consumed_per_sampling_period;
+ uint32 total_unused_bytes_per_sampling_period;
+} trf_mgmt_global_info_t;
+
+/** Traffic management shaping info per priority queue */
+typedef struct trf_mgmt_shaping_info {
+ uint32 gauranteed_bandwidth_percentage;
+ uint32 guaranteed_bytes_per_second;
+ uint32 guaranteed_bytes_per_sampling_period;
+ uint32 num_bytes_produced_per_second;
+ uint32 num_bytes_consumed_per_second;
+ uint32 num_queued_packets; /**< Number of packets in queue */
+ uint32 num_queued_bytes; /**< Number of bytes in queue */
+} trf_mgmt_shaping_info_t;
+
+/** Traffic management shaping info array */
+typedef struct trf_mgmt_shaping_info_array {
+ trf_mgmt_global_info_t tx_global_shaping_info;
+ trf_mgmt_shaping_info_t tx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+ trf_mgmt_global_info_t rx_global_shaping_info;
+ trf_mgmt_shaping_info_t rx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_shaping_info_array_t;
+
+/** Traffic management statistical counters */
+typedef struct trf_mgmt_stats {
+ uint32 num_processed_packets; /**< Number of packets processed */
+ uint32 num_processed_bytes; /**< Number of bytes processed */
+ uint32 num_discarded_packets; /**< Number of packets discarded from queue */
+} trf_mgmt_stats_t;
+
+/** Traffic management statistics array */
+typedef struct trf_mgmt_stats_array {
+ trf_mgmt_stats_t tx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+ trf_mgmt_stats_t rx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_stats_array_t;
+
/* Both powersel_params and lpc_params are used by IOVAR lpc_params.
* The powersel_params is replaced by lpc_params in later WLC versions.
*/
@@ -8632,6 +9225,15 @@
uint8 unicast; /**< request by unicast or by broadcast */
} wl_bsstrans_req_t;
+typedef struct wl_bsstrans_req_v2 {
+ wl_bsstrans_req_t v1;
+ int16 reason; /**< Transition reason code, -1: no reason code */
+ uint16 assoc_delay; /**< Association retry delay, 0: means no delay */
+} wl_bsstrans_req_v2_t;
+
+#define WL_BTM_REQ_NO_REASON_CODE -1
+#define WL_BTM_REQ_NO_ASSOC_RETRY_DELAY 0
+
enum {
BSSTRANS_RESP_AUTO = 0, /**< Currently equivalent to ENABLE */
BSSTRANS_RESP_DISABLE = 1, /**< Never answer BSS Trans Req frames */
@@ -9185,58 +9787,59 @@
* Bit 0 : If set to 1, means event uses nan bsscfg,
* otherwise uses infra bsscfg. Default is using infra bsscfg
*/
-#define WL_NAN_CTRL_ROUTE_EVENT_VIA_NAN_BSSCFG 0x000001
+#define WL_NAN_CTRL_ROUTE_EVENT_VIA_NAN_BSSCFG 0x0000001
/* If set, discovery beacons are transmitted on 2G band */
-#define WL_NAN_CTRL_DISC_BEACON_TX_2G 0x000002
+#define WL_NAN_CTRL_DISC_BEACON_TX_2G 0x0000002
/* If set, sync beacons are transmitted on 2G band */
-#define WL_NAN_CTRL_SYNC_BEACON_TX_2G 0x000004
+#define WL_NAN_CTRL_SYNC_BEACON_TX_2G 0x0000004
/* If set, discovery beacons are transmitted on 5G band */
-#define WL_NAN_CTRL_DISC_BEACON_TX_5G 0x000008
+#define WL_NAN_CTRL_DISC_BEACON_TX_5G 0x0000008
/* If set, sync beacons are transmitted on 5G band */
-#define WL_NAN_CTRL_SYNC_BEACON_TX_5G 0x000010
+#define WL_NAN_CTRL_SYNC_BEACON_TX_5G 0x0000010
/* If set, auto datapath responses will be sent by FW */
-#define WL_NAN_CTRL_AUTO_DPRESP 0x000020
+#define WL_NAN_CTRL_AUTO_DPRESP 0x0000020
/* If set, auto datapath confirms will be sent by FW */
-#define WL_NAN_CTRL_AUTO_DPCONF 0x000040
+#define WL_NAN_CTRL_AUTO_DPCONF 0x0000040
/* If set, auto schedule responses will be sent by FW */
-#define WL_NAN_CTRL_AUTO_SCHEDRESP 0x000080
+#define WL_NAN_CTRL_AUTO_SCHEDRESP 0x0000080
/* If set, auto schedule confirms will be sent by FW */
-#define WL_NAN_CTRL_AUTO_SCHEDCONF 0x000100
+#define WL_NAN_CTRL_AUTO_SCHEDCONF 0x0000100
/* If set, proprietary rates are supported by FW */
-#define WL_NAN_CTRL_PROP_RATE 0x000200
+#define WL_NAN_CTRL_PROP_RATE 0x0000200
/* If set, service awake_dw overrides global dev awake_dw */
-#define WL_NAN_CTRL_SVC_OVERRIDE_DEV_AWAKE_DW 0x000400
+#define WL_NAN_CTRL_SVC_OVERRIDE_DEV_AWAKE_DW 0x0000400
/* If set, merge scan will be disabled */
-#define WL_NAN_CTRL_SCAN_DISABLE 0x000800
+#define WL_NAN_CTRL_SCAN_DISABLE 0x0000800
/* If set, power save will be disabled */
-#define WL_NAN_CTRL_POWER_SAVE_DISABLE 0x001000
+#define WL_NAN_CTRL_POWER_SAVE_DISABLE 0x0001000
/* If set, device will merge to configured CID only */
-#define WL_NAN_CTRL_MERGE_CONF_CID_ONLY 0x002000
+#define WL_NAN_CTRL_MERGE_CONF_CID_ONLY 0x0002000
/* If set, 5g core will be brought down in single band NAN */
-#define WL_NAN_CTRL_5G_SLICE_POWER_OPT 0x004000
-#define WL_NAN_CTRL_DUMP_HEAP 0x008000
+#define WL_NAN_CTRL_5G_SLICE_POWER_OPT 0x0004000
+#define WL_NAN_CTRL_DUMP_HEAP 0x0008000
/* If set, host generates and assign ndp id for ndp sessions */
-#define WL_NAN_CTRL_HOST_GEN_NDPID 0x010000
+#define WL_NAN_CTRL_HOST_GEN_NDPID 0x0010000
/* If set, nan ndp inactivity watchdog will be activated */
-#define WL_NAN_CTRL_DELETE_INACTIVE_PEERS 0x020000
+#define WL_NAN_CTRL_DELETE_INACTIVE_PEERS 0x0020000
/* If set, nan assoc coex will be activated */
-#define WL_NAN_CTRL_INFRA_ASSOC_COEX 0x040000
+#define WL_NAN_CTRL_INFRA_ASSOC_COEX 0x0040000
/* If set, dam will accept all NDP/RNG request from the peer including counter */
-#define WL_NAN_CTRL_DAM_ACCEPT_ALL 0x080000
+#define WL_NAN_CTRL_DAM_ACCEPT_ALL 0x0080000
/* If set, nan mac ignores role for tx discovery beacon for periodic config */
-#define WL_NAN_CTRL_FASTDISC_IGNO_ROLE 0x100000
+#define WL_NAN_CTRL_FASTDISC_IGNO_ROLE 0x0100000
/* If set, include NA in NAN beacons (disc beacons for now) */
-#define WL_NAN_CTRL_INCL_NA_IN_BCNS 0x200000
+#define WL_NAN_CTRL_INCL_NA_IN_BCNS 0x0200000
/* If set, host assist will be enabled */
-#define WL_NAN_CTRL_HOST_ASSIST 0x400000
+#define WL_NAN_CTRL_HOST_ASSIST 0x0400000
/* If set, host configures NDI associated with the service */
-#define WL_NAN_CTRL_HOST_CFG_SVC_NDI 0x800000
+#define WL_NAN_CTRL_HOST_CFG_SVC_NDI 0x0800000
+#define WL_NAN_CTRL_NDP_HB_ENABLE 0x1000000
/* Value when all host-configurable bits set */
-#define WL_NAN_CTRL_MAX_MASK 0xFFFFFF
-#define WL_NAN_CFG_CTRL_FW_BITS 8
+#define WL_NAN_CTRL_MAX_MASK 0xFFFFFFF
+#define WL_NAN_CFG_CTRL_FW_BITS 4
-/* Last 8-bits are firmware controlled bits.
+/* Last 4-bits are firmware controlled bits.
* Bit 31:
* If set - indicates that NAN initialization is successful
* Bit 30:
@@ -9365,6 +9968,8 @@
WL_NAN_FRM_TYPE_RNG_RESP = 14,
WL_NAN_FRM_TYPE_RNG_TERM = 15,
WL_NAN_FRM_TYPE_RNG_REPORT = 16,
+
+ WL_NAN_FRM_TYPE_UNSOLICIT_SDF = 17,
WL_NAN_FRM_TYPE_INVALID
};
typedef uint8 wl_nan_frame_type_t;
@@ -9424,6 +10029,21 @@
uint8 attr_list[0]; /* attributes payload */
} wl_nan_ev_receive_t;
+/* WL_NAN_EVENT_DISC_CACHE_TIMEOUT */
+#define WL_NAN_DISC_CACHE_EXPIRY_ENTRIES_MAX 8
+
+typedef struct wl_nan_disc_expired_cache_entry {
+ uint8 l_sub_id; /* local sub instance_id */
+ uint8 r_pub_id; /* remote-matched pub instance_id */
+ struct ether_addr r_nmi_addr; /* remote-matched pub nmi addr */
+} wl_nan_disc_expired_cache_entry_t;
+
+typedef struct wl_nan_ev_disc_cache_timeout {
+ uint16 count; /* no. of expired cache entries */
+ uint16 pad;
+ wl_nan_disc_expired_cache_entry_t cache_exp_list[];
+} wl_nan_ev_disc_cache_timeout_t;
+
/* For NAN event mask extention */
#define WL_NAN_EVMASK_EXTN_VER 1
#define WL_NAN_EVMASK_EXTN_LEN 16 /* 16*8 = 128 masks supported */
@@ -9492,6 +10112,7 @@
#define WL_NAN_CMD_GENERIC_COMP_ID 0x08
#define WL_NAN_CMD_SCHED_COMP_ID 0x09
#define WL_NAN_CMD_NSR_COMP_ID 0x0a /* NAN Save Restore */
+#define WL_NAN_CMD_NANHO_COMP_ID 0x0b /* NAN Host offload */
#define WL_NAN_CMD_DBG_COMP_ID 0x0f
#define WL_NAN_CMD_COMP_SHIFT 8
@@ -9537,6 +10158,7 @@
WL_NAN_XTLV_SD_NDP_SPEC_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0F),
WL_NAN_XTLV_SD_NDPE_TLV_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x10),
WL_NAN_XTLV_SD_NDL_QOS_UPD = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x11),
+ WL_NAN_XTLV_SD_DISC_CACHE_TIMEOUT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x12),
WL_NAN_XTLV_SYNC_BCN_RX = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01),
WL_NAN_XTLV_EV_MR_CHANGED = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x02),
@@ -9565,7 +10187,16 @@
/* Nan Save-Restore XTLVs */
WL_NAN_XTLV_NSR2_PEER = NAN_CMD(WL_NAN_CMD_NSR_COMP_ID, 0x21),
- WL_NAN_XTLV_NSR2_NDP = NAN_CMD(WL_NAN_CMD_NSR_COMP_ID, 0x22)
+ WL_NAN_XTLV_NSR2_NDP = NAN_CMD(WL_NAN_CMD_NSR_COMP_ID, 0x22),
+
+ /* Host offload XTLVs */
+ WL_NAN_XTLV_NANHO_PEER_ENTRY = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01),
+ WL_NAN_XTLV_NANHO_DCAPLIST = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x02),
+ WL_NAN_XTLV_NANHO_DCSLIST = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x03),
+ WL_NAN_XTLV_NANHO_BLOB = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x04),
+ WL_NAN_XTLV_NANHO_NDP_STATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x05),
+ WL_NAN_XTLV_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x06),
+ WL_NAN_XTLV_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x07)
} wl_nan_tlv_t;
/* Sub Module ID's for NAN */
@@ -9577,10 +10208,10 @@
NAN_PEER_ENTRY = 4, /* nan peer entry */
NAN_AVAIL = 5, /* nan avail */
NAN_DAM = 6, /* nan dam */
- NAN_NDP = 7, /* nan ndp */
- NAN_NDL = 8, /* nan ndl */
- NAN_DP = 9, /* nan dp core */
- NAN_FSM = 10, /* nan fsm registry */
+ NAN_FSM = 7, /* nan fsm registry */
+ NAN_NDP = 8, /* nan ndp */
+ NAN_NDL = 9, /* nan ndl */
+ NAN_DP = 10, /* nan dp core */
NAN_RNG = 11, /* nan ranging */
NAN_SEC = 12, /* nan sec */
NAN_LAST = 13
@@ -9651,7 +10282,9 @@
WL_NAN_CMD_SD_FUP_TRANSMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0B),
WL_NAN_CMD_SD_CONNECTION = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0C),
WL_NAN_CMD_SD_SHOW = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0D),
- WL_NAN_CMD_SD_MAX = WL_NAN_CMD_SD_SHOW,
+ WL_NAN_CMD_SD_DISC_CACHE_TIMEOUT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0E),
+ WL_NAN_CMD_SD_DISC_CACHE_CLEAR = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0F),
+ WL_NAN_CMD_SD_MAX = WL_NAN_CMD_SD_DISC_CACHE_CLEAR,
/* nan time sync sub-commands */
@@ -9691,6 +10324,8 @@
WL_NAN_CMD_RANGE_AUTO = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x02),
WL_NAN_CMD_RANGE_RESPONSE = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x03),
WL_NAN_CMD_RANGE_CANCEL = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x04),
+ WL_NAN_CMD_RANGE_IDLE_COUNT = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x05),
+ WL_NAN_CMD_RANGE_CANCEL_EXT = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x06),
/* nan debug sub-commands */
WL_NAN_CMD_DBG_SCAN_PARAMS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x01),
@@ -9714,11 +10349,17 @@
/* Generic componenet */
WL_NAN_CMD_GEN_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01),
WL_NAN_CMD_GEN_FW_CAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x02),
+ WL_NAN_CMD_GEN_MAX = WL_NAN_CMD_GEN_FW_CAP,
/* NAN Save-Restore */
WL_NAN_CMD_NSR2 = NAN_CMD(WL_NAN_CMD_NSR_COMP_ID, 0x20),
+ WL_NAN_CMD_NSR2_MAX = WL_NAN_CMD_NSR2,
- WL_NAN_CMD_GEN_MAX = WL_NAN_CMD_NSR2 /* New ones before and update */
+ /* Host offload sub-commands */
+ WL_NAN_CMD_NANHO_UPDATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01),
+ WL_NAN_CMD_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x02),
+ WL_NAN_CMD_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x03),
+ WL_NAN_CMD_NANHO_MAX = WL_NAN_CMD_NANHO_OOB_NAF
};
/*
@@ -9998,6 +10639,15 @@
/** status - TBD BCME_ vs NAN status - range reserved for BCME_ */
enum {
/* add new status here... */
+ WL_NAN_E_PEER_NOTAVAIL = -2131,
+ WL_NAN_E_SCB_EXISTS = -2130,
+ WL_NAN_E_INVALID_PEER_NDI = -2129,
+ WL_NAN_E_INVALID_LOCAL_NDI = -2128,
+ WL_NAN_E_ALREADY_EXISTS = -2127, /* generic NAN error for duplication */
+ WL_NAN_E_EXCEED_MAX_NUM_MAPS = -2126,
+ WL_NAN_E_INVALID_DEV_CHAN_SCHED = -2125,
+ WL_NAN_E_INVALID_PEER_BLOB_TYPE = -2124,
+ WL_NAN_E_INVALID_LCL_BLOB_TYPE = -2123,
WL_NAN_E_BCMC_PDPA = -2122, /* BCMC NAF PDPA */
WL_NAN_E_TIMEOUT = -2121,
WL_NAN_E_HOST_CFG = -2120,
@@ -10157,11 +10807,15 @@
*/
WL_NAN_CTRL2_FLAG1_ALLOW_SDF_TX_UCAST_IN_PROG = 0x00000001,
/* Allows broadcast SDF TX while local device is under NDP/NDL negotiation */
- WL_NAN_CTRL2_FLAG1_ALLOW_SDF_TX_BCAST_IN_PROG = 0x00000002
+ WL_NAN_CTRL2_FLAG1_ALLOW_SDF_TX_BCAST_IN_PROG = 0x00000002,
+ /* Allows the device to send schedule update automatically on local schedule change */
+ WL_NAN_CTRL2_FLAG1_AUTO_SCHEDUPD = 0x00000004,
+ /* Allows the device to handle slot pre_close operations */
+ WL_NAN_CTRL2_FLAG1_SLOT_PRE_CLOSE = 0x00000008
};
-#define WL_NAN_CTRL_FLAGS1_MASK 0x00000003
+#define WL_NAN_CTRL2_FLAGS1_MASK 0x0000000F
-#define WL_NAN_CTRL_FLAGS2_MASK 0x00000000
+#define WL_NAN_CTRL2_FLAGS2_MASK 0x00000000
/*
* WL_NAN_CMD_CFG_BAND, WL_NAN_CMD_CFG_RSSI_THRESHOLD(Get only)
@@ -10406,6 +11060,26 @@
} nan_adv_entry_t;
#define NAN_VIRTUAL_PEER_BIT 0x80
+typedef enum {
+ NAC_CNT_NTLV_AF_TX = 0, /* count of AWDL AF containing NTLV tx */
+ NAC_CNT_NTLV_AF_RX, /* count of AWDL AF containing NTLV rx */
+ NAC_CNT_NTLV_TMERR_TX, /* count of NTLV tx timing error */
+ NAC_CNT_NTLV_TMERR_RX, /* count of NTLV rx timing error */
+ NAC_CNT_NTLV_TM_MISMATCH, /* count of TopMaster mismatch in Rx NTLV processing */
+ NAC_CNT_NTLV_ADV_EXISTED, /* count of NTLV ignored bc advertiser existed from bcn */
+ NAC_CNT_NTLV_STALED_BCN, /* count of staled bcn from NTLV info */
+ NAC_CNT_NTLV_MERGE, /* count of NTLV used for NAN cluster merge */
+ NAC_CNT_NTLV_ELECTION_DROP, /* count of NTLV dropped in NAN election */
+ NAC_CNT_NTLV_TSF_ADOPT, /* count of NTLV used for NAN TSF adoption */
+ NAC_CNT_NTLV_LAST
+} nac_cnt_enum_t;
+
+#define NAC_MAX_CNT (NAC_CNT_NTLV_LAST)
+
+typedef struct nac_stats {
+ uint32 nac_cnt[NAC_MAX_CNT];
+} nac_stats_t;
+
typedef struct nan_adv_table {
uint8 num_adv;
uint8 adv_size;
@@ -10420,12 +11094,6 @@
nan_sync_master_t target_master;
} wl_nan_role_cfg_t;
-typedef struct wl_nan_role_config {
- wl_nan_role_t role;
- struct ether_addr target_master;
- uint8 pad;
-} wl_nan_role_config_t;
-
typedef int8 wl_nan_sd_optional_field_types_t;
/* Flag bits for Publish and Subscribe (wl_nan_sd_params_t flags) */
@@ -10633,6 +11301,9 @@
uint8 opt_tlv[]; /* optional tlvs in bcm_xtlv_t type */
} wl_nan_sd_transmit_t;
+/* disc cache timeout for a cache entry */
+typedef uint16 wl_nan_disc_cache_timeout_t;
+
/*
* WL_NAN_CMD_SYNC_TSRESERVE
*/
@@ -11325,23 +11996,42 @@
WL_NAN_FW_CAP_FLAG1_NO_ID_GEN = 0x00004000,
WL_NAN_FW_CAP_FLAG1_DP_OPAQUE_DATA = 0x00008000,
WL_NAN_FW_CAP_FLAG1_NSR2 = 0x00010000,
- WL_NAN_FW_CAP_FLAG1_NSR2_SAVE = 0x00020000
+ WL_NAN_FW_CAP_FLAG1_NSR2_SAVE = 0x00020000,
+ WL_NAN_FW_CAP_FLAG1_NANHO = 0x00040000
};
/* WL_NAN_XTLV_GEN_FW_CAP */
typedef struct wl_nan_fw_cap {
- uint32 flags1; /* nan sub-features compiled in firmware */
- uint32 flags2; /* for more sub-features in future */
- uint8 max_svc_publishes; /* max num of service publish */
- uint8 max_svc_subscribes; /* max num of service subscribe */
- uint8 max_lcl_sched_maps; /* max num of local schedule map */
- uint8 max_lcl_ndc_entries; /* max num of local NDC entry */
- uint8 max_lcl_ndi_interfaces; /* max num of local NDI interface */
- uint8 max_peer_entries; /* max num of peer entry */
- uint8 max_ndp_sessions; /* max num of NDP session */
- uint8 pad;
+ uint32 flags1; /* nan sub-features compiled in firmware */
+ uint32 flags2; /* for more sub-features in future */
+ uint8 max_svc_publishes; /* max num of service publish */
+ uint8 max_svc_subscribes; /* max num of service subscribe */
+ uint8 max_lcl_sched_maps; /* max num of local schedule map */
+ uint8 max_lcl_ndc_entries; /* max num of local NDC entry */
+ uint8 max_lcl_ndi_interfaces; /* max num of local NDI interface */
+ uint8 max_peer_entries; /* max num of peer entry */
+ uint8 max_ndp_sessions; /* max num of NDP session */
+ uint8 max_concurrent_nan_clusters; /* max num of concurrent clusters */
+ uint16 max_service_name_len; /* max service name length */
+ uint16 max_match_filter_len; /* max match filter length */
+ uint16 max_total_match_filter_len; /* max total match filter length */
+ uint16 max_service_specific_info_len; /* max service specific info length */
+ uint16 max_vsa_data_len; /* max vendor specific attrib data length */
+ uint16 max_mesh_data_len; /* max mesh data length */
+ uint16 max_app_info_len; /* max app info length */
+ uint16 max_sdea_svc_specific_info_len; /* max sdea ser specific info length */
+ uint8 max_queued_tx_followup_msgs; /* max no. of queued tx followup msgs */
+ uint8 max_subscribe_address; /* max subscribe addresses supported */
+ uint8 ndp_supported_bands; /* number of ndp supported bands */
+ uint8 is_ndp_security_supported; /* if secure ndp is supported */
+ uint8 cipher_suites_supported_mask; /* bitmask for suites supported */
+ uint8 pad[3];
} wl_nan_fw_cap_t;
+/* nan cipher suite support mask bits */
+#define WL_NAN_CIPHER_SUITE_SHARED_KEY_128_MASK 0x01
+#define WL_NAN_CIPHER_SUITE_SHARED_KEY_256_MASK 0x02
+
/* NAN Save Restore */
#define WL_NAN_NSR2_INFO_MAX_SIZE 2048 /* arbitrary */
@@ -11414,6 +12104,19 @@
#define NAN_RNG_RESP_IOV_LEN 20
+#define NAN_RNG_TERM_FLAG_IMMEDIATE (1u << 0u) /* Do not wait for TXS */
+#define NAN_RNG_TERM_FLAG_SILIENT_TEARDOWN (1u << 1u) /* Do not TX rng_term */
+#define NAN_RNG_TERM_FLAG_EVENT_HOST (1u << 2u) /* Notify event to host */
+#define NAN_RNG_TERM_FLAG_OPT_TLVS (1u << 3u) /* opt tlvs present */
+
+typedef struct wl_nan_range_cancel_ext {
+ wl_nan_range_id range_id;
+ uint8 flags;
+ uint8 pad[2];
+} wl_nan_range_cancel_ext_t;
+
+#define NAN_RNG_CANCEL_IOV_FIXED_LEN 4u
+
#define NAN_RNG_MAX_IOV_LEN 255
typedef struct wl_nan_ev_rng_req_ind {
@@ -11442,6 +12145,19 @@
#define NAN_RNG_RPT_IND_SIZE 12
+/* number of continuous ranging crbs which can be idle,
+* after which ranging session will be terminated.
+* Default value is 5. Set to zero for disabling the
+* idle timeout functionality
+*/
+typedef uint8 wl_nan_range_idle_count_t;
+
+/* nan ranging termination reason codes */
+#define NAN_RNG_TERM_IDLE_TIMEOUT 1 /* no ftms from peer */
+#define NAN_RNG_TERM_PEER_REQ 2
+#define NAN_RNG_TERM_USER_REQ 3
+#define NAN_RNG_TERM_RNG_RESP_TIMEOUT 4
+
typedef struct wl_nan_ev_rng_term_ind {
struct ether_addr peer_m_addr;
uint8 reason_code;
@@ -11827,13 +12543,19 @@
WL_NAN_WFA_TM_SKIP_RAW_NA_BLOB = 0x00002000,
/* overwrite local NA with peer NA in received frame */
WL_NAN_WFA_TM_LOCAL_NA_OVERWRITE = 0x00004000,
- /* randomize and self configure ndl qos(needed at responder in auto mode) */
+ /* randomize and self configure ndl qos(needed at responder in auto mode) */
WL_NAN_WFA_TM_SELF_CFG_NDL_QOS = 0x00008000,
/* send NAF frames only in DW */
WL_NAN_WFA_TM_SEND_NAF_IN_DW = 0x00010000,
/* restrict channels used for countered slots to Ch 6/149 only */
WL_NAN_WFA_TM_RESTRICT_COUNTER_SLOTS_CHAN = 0x00020000,
- WL_NAN_WFA_TM_FLAG_MASK = 0x0003ffff /* add above & update mask */
+ /* NDPE negative test case (4.2.5 & 4.2.6) */
+ WL_NAN_WFA_TM_NDPE_NEGATIVE_TEST_TB = 0x00040000,
+ /* Set NDPE(NAN3.0) capable bit in dev cap attr */
+ WL_NAN_WFA_TM_ENABLE_NDPE_CAP = 0x00080000,
+
+ /* add above & update mask */
+ WL_NAN_WFA_TM_FLAG_MASK = 0x000FFFFF
};
typedef uint32 wl_nan_wfa_testmode_t;
@@ -11864,6 +12586,16 @@
uint8 pad[3];
} wl_nan_dev_cap_t;
+/* arbitrary max len for frame template */
+#define WL_NAN_FRM_TPLT_MAX_LEN 1024
+
+typedef struct wl_nan_frm_tplt {
+ wl_nan_frame_type_t type;
+ uint8 pad;
+ uint16 len; /* length of template */
+ uint8 data[]; /* template */
+} wl_nan_frm_tplt_t;
+
#define RSSI_THRESHOLD_SIZE 16
#define MAX_IMP_RESP_SIZE 256
@@ -12292,6 +13024,19 @@
#define WL_TKO_SUBCMD_CONNECT 2 /* TCP connection info */
#define WL_TKO_SUBCMD_ENABLE 3 /* enable/disable */
#define WL_TKO_SUBCMD_STATUS 4 /* TCP connection status */
+#define WL_TKO_SUBCMD_RST 5 /* TCP wake RST frame release request */
+
+/** Delay to retain a TCP RST frame on a monitored TCP session
+ * before toggling the wake GPIO (in sec).
+ */
+typedef enum tko_rst_wake_delay {
+ /** No delay, no RST frame retention */
+ TKO_RST_WAKE_DELAY_DISABLED = -2,
+ /** Infinite delay. No timeout */
+ TKO_RST_WAKE_DELAY_INFINITY = -1,
+ /* 0 - 3600 sec are valid */
+ TKO_RST_WAKE_DELAY_MAX = 3600
+} tko_rst_wake_delay_t;
/* WL_TKO_SUBCMD_MAX_CONNECT subcommand data */
typedef struct wl_tko_max_tcp {
@@ -12304,7 +13049,7 @@
uint16 interval; /* keepalive tx interval (secs) */
uint16 retry_interval; /* keepalive retry interval (secs) */
uint16 retry_count; /* retry_count */
- uint8 pad[2]; /* 4-byte struct alignment */
+ int16 rst_delay; /* delay to delay a RST frame from reaching the host */
} wl_tko_param_t;
/* WL_TKO_SUBCMD_CONNECT subcommand data
@@ -12357,6 +13102,13 @@
*/
} wl_tko_status_t;
+/* TKO_STATUS_TCP_RST subcommand data */
+typedef struct wl_tko_get_reset {
+ uint8 index; /**< TCP connection index, 0 to max-1 */
+ uint8 clear_only; /**< 1 to clear the RST frame without sending it up to the host */
+ uint8 pad[2]; /**< 4-bytes structure alignment */
+} wl_tko_get_reset_t;
+
typedef enum {
TKO_STATUS_NORMAL = 0, /* TCP connection normal, no error */
TKO_STATUS_NO_RESPONSE = 1, /* no response to TCP keepalive */
@@ -12365,9 +13117,60 @@
TKO_STATUS_SEQ_NUM_INVALID = 4, /* ACK != sequence number */
TKO_STATUS_REMOTE_SEQ_NUM_INVALID = 5, /* SEQ > remote sequence number */
TKO_STATUS_TCP_DATA = 6, /* TCP data available */
+ TKO_STATUS_TCP_RST = 7, /* TCP RST flag received */
TKO_STATUS_UNAVAILABLE = 255, /* not used/configured */
} tko_status_t;
+/*
+ * DHCP lease time renew offload definitions
+ */
+
+/* Common iovar struct */
+typedef struct wl_dltro {
+ uint8 subcmd_id; /* Subcommand id */
+ uint8 pad;
+ uint16 len; /* Total length of data[] */
+ uint8 data[1]; /* Subcommand data */
+} wl_dltro_t;
+
+/* Subcommand ids */
+#define WL_DLTRO_SUBCMD_CONNECT 0 /* DLTRO connection info */
+#define WL_DLTRO_SUBCMD_PARAM 1 /* DLTRO parameter info */
+#define WL_DLTRO_SUBCMD_MAX_DLTRO 2 /* Max DLTRO supported */
+
+/* WL_DLTRO_SUBCMD_CONNECT subcommand data
+ * Invoke with unique 'index' for each DLTRO connection
+ */
+typedef struct wl_dltro_connect {
+ uint8 index; /* DLTRO connection index, 0 to max-1 */
+ uint8 ip_addr_type; /* 0 - IPv4, 1 - IPv6 */
+ uint8 offload_type; /* 0 - Client, 1 - Server */
+ uint8 pad;
+ uint32 tid; /* Transaction id */
+ uint32 timer_val; /* DHCP lease time remaining in seconds */
+ uint32 time_before_expiry; /* Time before expiry for DHCP lease renewal in seconds */
+ uint32 len; /* Length of the variable data */
+ uint8 data[1]; /* Variable length field containing DLTRO packet */
+} wl_dltro_connect_t;
+
+/* WL_DLTRO_SUBCMD_PARAM subcommand data
+ * Invoke with unique 'index' for each DLTRO connection
+ */
+typedef struct wl_dltro_param {
+ uint8 index; /* DLTRO connection index, 0 to max-1 */
+ uint8 retry; /* Number of retries */
+} wl_dltro_param_t;
+
+/* WL_DLTRO_SUBCMD_MAX_DLTRO subcommand data */
+typedef struct wl_dltro_max_dltro {
+ uint8 max; /* Max DLTRO supported */
+} wl_dltro_max_dltro_t;
+
+/* WL_DLTRO_SUBCMD_PARAM subcommand data to GET configured info for specific index */
+typedef struct wl_dltro_get_param {
+ uint8 index; /* DLTRO connection index, 0 to max-1 */
+} wl_dltro_get_param_t;
+
enum rssi_reason {
RSSI_REASON_UNKNOW = 0,
RSSI_REASON_LOWRSSI = 1,
@@ -12542,6 +13345,34 @@
chanspec_list_t chspec_list;
} bcn_req_t;
+#define WL_RRM_BCN_REQ_VER_2 2
+typedef struct bcn_req_v2 {
+ uint8 version;
+ uint8 bcn_mode;
+ uint8 pad_1[2];
+ int32 dur;
+ int32 channel;
+ struct ether_addr da;
+ uint16 random_int;
+ wlc_ssid_t ssid;
+ uint16 reps;
+ uint8 req_elements;
+ uint8 pad_2;
+ struct ether_addr bssid;
+ uint8 pad_3[2];
+ chanspec_list_t chspec_list;
+} bcn_req_v2_t;
+
+#define WL_RRM_BCN_REQ_MIN_PARAMS 8
+#define WL_RRM_BCN_REQ_PARAM_INC_SSID 8
+#define WL_RRM_BCN_REQ_PARAM_INC_CHANS 9
+enum {
+ WL_RRM_BCN_MODE_PASSIVE = 0, /* passive mode */
+ WL_RRM_BCN_MODE_ACTIVE = 1, /* active mode */
+ WL_RRM_BCN_MODE_TABLE = 2, /* beacon table */
+ WL_RRM_BCN_MODE_MAX_VALUE = 2
+};
+
typedef struct rrmreq {
struct ether_addr da;
uint8 reg;
@@ -12739,6 +13570,16 @@
uint8 flags;
} wl_el_tag_params_t;
+/** Video Traffic Interference Monitor config */
+#define INTFER_VERSION 1
+typedef struct wl_intfer_params {
+ uint16 version; /**< version */
+ uint8 period; /**< sample period */
+ uint8 cnt; /**< sample cnt */
+ uint8 txfail_thresh; /**< non-TCP txfail threshold */
+ uint8 tcptxfail_thresh; /**< tcptxfail threshold */
+} wl_intfer_params_t;
+
#define EVENT_LOG_SET_TYPE_CURRENT_VERSION 0
typedef struct wl_el_set_type_s {
uint16 version;
@@ -12849,7 +13690,14 @@
GPAIO_PMU_LDO1P6,
GPAIO_RCAL,
GPAIO_IQDAC_BUF_DC_MEAS,
- GPAIO_IQDAC_BUF_DC_CLEAR
+ GPAIO_IQDAC_BUF_DC_CLEAR,
+ GPAIO_DAC_IQ_DC_RDBK,
+ GPAIO_DAC_IQ_DC_RDBK_CLEAR,
+ GPAIO_AFE_LDO_FOR_DAC_DC,
+ GPAIO_PA5G_VCAS_SOURCE,
+ GPAIO_BIQ2_DC_MEAS,
+ GPAIO_BIQ2_DC_CLEAR,
+ GPAIO_LDO_1P8_OUT
} wl_gpaio_option_t;
/** IO Var Operations - the Value of iov_op In wlc_ap_doiovar */
@@ -12857,8 +13705,8 @@
WLC_AP_IOV_OP_DELETE = -1,
WLC_AP_IOV_OP_DISABLE = 0,
WLC_AP_IOV_OP_ENABLE = 1,
- WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE = 2,
- WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE = 3,
+ WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE = 2,
+ WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE = 3,
WLC_AP_IOV_OP_MOVE = 4
} wlc_ap_iov_bss_oper_t;
@@ -13843,6 +14691,7 @@
#define WL_ROAM_PROF_VER_0 0
#define WL_ROAM_PROF_VER_1 1
+#define WL_ROAM_PROF_VER_2 2
#define WL_MAX_ROAM_PROF_VER WL_ROAM_PROF_VER_1
#define WL_ROAM_PROF_NONE (0 << 0)
@@ -13866,6 +14715,33 @@
#define WL_CU_CALC_DURATION_DEFAULT 10 /* seconds */
#define WL_CU_CALC_DURATION_MAX 60 /* seconds */
+#define WL_ESTM_LOW_TRIGGER_DISABLE 0
+#define WL_ESTM_LOW_TRIGGER_DEFAULT 5 /* Mbps */
+#define WL_ESTM_LOW_TRIGGER_MAX 250 /* Mbps */
+#define WL_ESTM_ROAM_DELTA_DEFAULT 10
+
+typedef struct wl_roam_prof_v3 {
+ uint8 roam_flags; /**< bit flags */
+ int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */
+ int8 rssi_lower;
+ int8 roam_delta;
+
+ /* if channel_usage if zero, roam_delta is rssi delta required for new AP */
+ /* if channel_usage if non-zero, roam_delta is score delta(%) required for new AP */
+ int8 rssi_boost_thresh; /**< Min RSSI to qualify for RSSI boost */
+ int8 rssi_boost_delta; /**< RSSI boost for AP in the other band */
+ uint16 nfscan; /**< number of full scan to start with */
+ uint16 fullscan_period;
+ uint16 init_scan_period;
+ uint16 backoff_multiplier;
+ uint16 max_scan_period;
+ uint8 channel_usage;
+ uint8 cu_avg_calc_dur;
+ uint16 estm_low_trigger; /**< ESTM low throughput roam trigger */
+ int8 estm_roam_delta; /**< ESTM low throughput roam delta */
+ uint8 pad;
+} wl_roam_prof_v3_t;
+
typedef struct wl_roam_prof_v2 {
int8 roam_flags; /**< bit flags */
int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */
@@ -13903,6 +14779,13 @@
uint16 max_scan_period;
} wl_roam_prof_v1_t;
+typedef struct wl_roam_prof_band_v3 {
+ uint32 band; /**< Must be just one band */
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ wl_roam_prof_v3_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS];
+} wl_roam_prof_band_v3_t;
+
typedef struct wl_roam_prof_band_v2 {
uint32 band; /**< Must be just one band */
uint16 ver; /**< version of this struct */
@@ -13962,6 +14845,7 @@
#define WNM_BSS_SELECT_TYPE_RSSI 0
#define WNM_BSS_SELECT_TYPE_CU 1
+#define WNM_BSS_SELECT_TYPE_ESTM_DL 2
#define WNM_BSSLOAD_MONITOR_VERSION 1
typedef struct wnm_bssload_monitor_cfg {
@@ -14146,6 +15030,7 @@
} wl_prio_roam_mode_t;
/* BTCX AIBSS (Oxygen) Status */
+/* MC2 strobing status */
typedef struct wlc_btc_aibss_info {
uint32 prev_tsf_l; // Lower 32 bits of last read of TSF
uint32 prev_tsf_h; // Higher 32 bits of last read of TSF
@@ -14172,7 +15057,8 @@
uint8 acl_grant_cnt; // ALC grant cnt
uint8 pad1;
uint16 ibss_tsf_shm; // SHM address of strobe TSF
- uint16 pad2;
+ uint16 strobe_interval; /**< interval of the strobe */
+ uint32 strobe_missed_cnt; /**< missed strobe counter */
} wlc_btc_aibss_info_t;
#define WLC_BTC_AIBSS_STATUS_VER 1
@@ -14520,7 +15406,9 @@
uint8 version;
uint8 num_pwrcap;
uint8 current_country[2];
- uint32 current_channel;
+ uint8 current_channel;
+ uint8 high_cap_state_enabled;
+ uint8 reserved[2];
uint8 download_present;
uint8 num_ants; /* number antenna slice */
uint8 num_cc_groups; /* number cc groups */
@@ -14554,13 +15442,13 @@
uint8 length; /* size of entire structure, including the pwrs */
uint8 capability; /* capability bitmap */
uint8 num_cores; /* number of cores */
- uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES];
+ uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES_V3];
/*
* pwrs array has TXPWRCAP_MAX_STATES rows - one for each cap state.
* Each row has up to TXPWRCAP_MAX_NUM_ANTENNAS entries - for each antenna.
* Included in the rows of powers are rows for fail safe.
*/
- int8 pwrs[][TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */
+ int8 pwrs[][TXPWRCAP_MAX_NUM_ANTENNAS_V3]; /* qdBm units */
} wl_txpwrcap_tbl_v3_t;
/* ##### Ecounters section ##### */
@@ -14609,7 +15497,8 @@
ECOUNTERS_TRIGGER_REASON_EVENTS = 1,
ECOUNTERS_TRIGGER_REASON_D2H_EVENTS = 2,
ECOUNTERS_TRIGGER_REASON_H2D_EVENTS = 3,
- ECOUNTERS_TRIGGER_REASON_MAX = 4
+ ECOUNTERS_TRIGGER_REASON_USER_EVENTS = 4,
+ ECOUNTERS_TRIGGER_REASON_MAX = 5
} ecounters_trigger_reasons_list_t;
typedef struct ecounters_trigger_reason {
@@ -14685,6 +15574,85 @@
uint32 txbcnfrm; /**< beacons transmitted */
} wl_periodic_compact_cntrs_v1_t;
+#define WL_PERIODIC_COMPACT_CNTRS_VER_2 (2)
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ /* taken from wl_wlc_cnt_t */
+ uint32 txfail;
+ /* taken from wl_cnt_ge40mcst_v1_t */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txback; /**< blockack txcnt */
+ uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
+ uint32 txnoack; /**< dot11ACKFailureCount */
+ uint32 txframe; /**< tx data frames */
+ uint32 txretrans; /**< tx mac retransmits */
+ uint32 txpspoll; /**< Number of TX PS-poll */
+
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
+ uint32 rxstrt; /**< number of received frames with a good PLCP */
+ uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxhlovfl; /**< number of length / header fifo overflows */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmpdu_mu; /**< Number of MU MPDUs received */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 rxframe; /**< rx data frames */
+ uint32 lqcm_report; /**< lqcm metric tx/rx idx */
+ uint32 tx_toss_cnt; /* number of tx packets tossed */
+ uint32 rx_toss_cnt; /* number of rx packets tossed */
+ uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */
+ uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 rxretry; /* Number of rx packets received after retry */
+ uint32 rxdup; /* Number of dump packet. Indicates whether peer is receiving ack */
+ uint32 chswitch_cnt; /* Number of channel switches */
+ uint32 pm_dur; /* Total sleep time in PM, msecs */
+} wl_periodic_compact_cntrs_v2_t;
+
+#define WL_PERIODIC_COMPACT_HE_CNTRS_VER_1 (1)
+typedef struct {
+ uint16 version;
+ uint16 len;
+ uint32 he_rxtrig_rand;
+ uint32 he_colormiss_cnt;
+ uint32 he_txmtid_back;
+ uint32 he_rxmtid_back;
+ uint32 he_rxmsta_back;
+ uint32 he_rxtrig_basic;
+ uint32 he_rxtrig_murts;
+ uint32 he_rxtrig_bsrp;
+ uint32 he_rxdlmu;
+ uint32 he_physu_rx;
+ uint32 he_txtbppdu;
+} wl_compact_he_cnt_wlc_v1_t;
+
typedef struct {
uint16 version;
uint16 coreup;
@@ -14763,6 +15731,7 @@
#define ECOUNTERS_STATS_TYPES_FLAG_SLICE 0x1
#define ECOUNTERS_STATS_TYPES_FLAG_IFACE 0x2
#define ECOUNTERS_STATS_TYPES_FLAG_GLOBAL 0x4
+#define ECOUNTERS_STATS_TYPES_DEFAULT 0x8
/* Slice mask bits */
#define ECOUNTERS_STATS_TYPES_SLICE_MASK_SLICE0 0x1
@@ -14812,6 +15781,9 @@
* of blocks gets filled. This is to avoid
* many interrupts to host.
*/
+#define EVENT_ECOUNTERS_FLAGS_USER (1 << 6) /* Add configuration for user defined events
+ * Reuse the same flag as H2D
+ */
/* Ecounters suspend resume */
#define ECOUNTERS_SUSPEND_VERSION_V1 1
@@ -15680,9 +16652,12 @@
#define NATOE_FLAGS_PUB_NW_UP (1 << NATOE_FLAGS_PUBNW_SHFT_MASK)
#define NATOE_FLAGS_PVT_NW_UP (1 << NATOE_FLAGS_PVTNW_SHFT_MASK)
-/* Module version is 1 for IGUANA
-*/
+#define PCIE_FRWDPKT_STATS_VERSION 1
+
+/* Module version is 1 for IGUANA */
#define WL_NATOE_MODULE_VER_1 1
+/* Module version is 2 for Lemur */
+#define WL_NATOE_MODULE_VER_2 2
/* WL_NATOE_CMD_MOD_VER */
typedef uint16 wl_natoe_ver_t;
@@ -15810,6 +16785,28 @@
uint8 opcode;
} wl_natoe_dstnat_entry_v1_t;
+typedef struct wl_pcie_frwd_stats_v1 {
+ uint16 version;
+ uint16 len;
+ uint16 frwd_txfrag_q_cnt; /* no. of txfrags in frwd_txfrag_list */
+ /* no. of outstanding lbufs in txpath on if0/ifx */
+ uint16 tx_frwd_n_lb_if0;
+ uint16 tx_frwd_n_lb_ifx;
+ /* no. of outstanding lfrags in txpath on if0/ifx */
+ uint16 tx_frwd_n_lf_if0;
+ uint16 tx_frwd_n_lf_ifx;
+ /* no. of pending frwd pkts dropped upon d3 entry */
+ uint16 tx_frwd_d3_drop_cnt;
+ /* Total no. of lbufs frwded in txpath on if0/ifx */
+ uint32 tx_frwd_n_lb_if0_cnt;
+ uint32 tx_frwd_n_lb_ifx_cnt;
+ /* Total no. of lfrags frwded in txpath on if0/ifx */
+ uint32 tx_frwd_n_lf_if0_cnt;
+ uint32 tx_frwd_n_lf_ifx_cnt;
+ uint32 frwd_tx_drop_thr_cnt; /* no. of pkts dropped due to txfrag threshold */
+ uint32 frwd_tx_drop_err_cnt; /* no. of pkts dropped due to txfrags not avail / errors */
+} wl_pcie_frwd_stats_v1_t;
+
enum wl_natoe_cmds {
WL_NATOE_CMD_MOD_VER = 0,
WL_NATOE_CMD_ENABLE = 1,
@@ -15819,7 +16816,8 @@
WL_NATOE_CMD_EXCEPTION_PORT = 5,
WL_NATOE_CMD_SKIP_PORT = 6,
WL_NATOE_CMD_TBL_CNT = 7,
- WL_NATOE_CMD_CONFIG_DSTNAT = 8
+ WL_NATOE_CMD_CONFIG_DSTNAT = 8,
+ WL_NATOE_CMD_CTRL = 9
};
enum wl_natoe_cmd_xtlv_id {
@@ -15833,7 +16831,8 @@
WL_NATOE_XTLV_TBL_CNT = 7,
WL_NATOE_XTLV_ARP_TBL = 8,
WL_NATOE_XTLV_POOLREORG = 9,
- WL_NATOE_XTLV_CONFIG_DSTNAT = 10
+ WL_NATOE_XTLV_CONFIG_DSTNAT = 10,
+ WL_NATOE_XTLV_CTRL = 11
};
/* endif WL_NATOE */
@@ -15871,7 +16870,8 @@
uint32 four_way_hs_fail; /* No of 4-way handshake fails */
} wl_idauth_counters_t;
-#define WLC_UTRACE_LEN 512
+#define WLC_UTRACE_LEN (1024u * 4u) // default length
+#define WLC_UTRACE_LEN_AUX (1024u * 3u) // reduced length to fit smaller AUX BM
#define WLC_UTRACE_READ_END 0
#define WLC_UTRACE_MORE_DATA 1
typedef struct wl_utrace_capture_args_v1 {
@@ -15949,6 +16949,7 @@
};
/* Health check: PHY IDs */
+/* Needed for iguana 13.35 branch */
typedef enum {
PHY_HC_DD_ALL = 0,
PHY_HC_DD_TEMPSENSE = 1,
@@ -15958,6 +16959,14 @@
PHY_HC_DD_LAST /* This must be the last entry */
} phy_hc_dd_type_t;
+typedef enum {
+ PHY_HC_DD_TEMP_FAIL = 0,
+ PHY_HC_DD_VCO_FAIL = 1,
+ PHY_HC_DD_RXDSN_FAIL = 2,
+ PHY_HC_DD_TXPOW_FAIL = 3,
+ PHY_HC_DD_END /* This must be the last entry */
+} phy_hc_dd_type_v2_t;
+
/* IDs of Health Check report structures for sub types of health checks within WL */
typedef enum wl_hc_dd_type {
WL_HC_DD_PCIE = 0, /* PCIe */
@@ -16018,6 +17027,24 @@
WL_HE_CMD_STAID = 8,
WL_HE_CMD_RTSDURTHRESH = 10,
WL_HE_CMD_PEDURATION = 11,
+ WL_HE_CMD_TESTBED_MODE = 12,
+ WL_HE_CMD_OMI = 13,
+ WL_HE_CMD_MAC_PAD_DUR = 14,
+ WL_HE_CMD_MUEDCA = 15,
+ WL_HE_CMD_MACCAP = 16,
+ WL_HE_CMD_PHYCAP = 17,
+ WL_HE_CMD_DISPLAY = 18,
+ WL_HE_CMD_ACTION = 19,
+ WL_HE_CMD_OFDMATX = 20,
+ WL_HE_CMD_20IN80_MODE = 21,
+ WL_HE_CMD_SMPS = 22,
+ WL_HE_CMD_PPETHRESH = 23,
+#ifdef WL11AX_TESTBED
+ WL_HE_CMD_HTC_OMI_EN = 24,
+ WL_HE_CMD_ERSU_EN = 25,
+ WL_HE_CMD_PREPUNCRX_EN = 26,
+ WL_HE_CMD_MIMOCAP_EN = 27,
+#endif // endif
WL_HE_CMD_LAST
};
@@ -16027,6 +17054,12 @@
WL_TWT_CMD_SETUP = 1,
WL_TWT_CMD_TEARDOWN = 2,
WL_TWT_CMD_INFO = 3,
+ WL_TWT_CMD_AUTOSCHED = 4,
+ WL_TWT_CMD_STATS = 5,
+ WL_TWT_CMD_EARLY_TERM_TIME = 6,
+ WL_TWT_CMD_RESP_CONFIG = 7,
+ WL_TWT_CMD_SPPS_ENAB = 8,
+ WL_TWT_CMD_FEATURES = 9,
WL_TWT_CMD_LAST
};
@@ -16100,111 +17133,223 @@
} wl_heb_status_v1_t;
/* TWT Setup descriptor */
-typedef struct {
+typedef struct wl_twt_sdesc {
/* Setup Command. */
- uint8 setup_cmd; /* See TWT_SETUP_CMD_XXXX in 802.11ah.h,
- * valid when bcast_twt is FALSE.
- */
- /* Flow attributes */
- uint8 flow_flags; /* See WL_TWT_FLOW_FLAG_XXXX below */
- uint8 flow_id; /* must be between 0 and 7 */
- /* Target Wake Time */
+ uint8 setup_cmd; /* See TWT_SETUP_CMD_XXXX in 802.11ah.h */
+ uint8 flow_flags; /* Flow attributes. See WL_TWT_FLOW_FLAG_XXXX below */
+ uint8 flow_id; /* must be between 0 and 7. Set 0xFF for auto assignment */
uint8 wake_type; /* See WL_TWT_TIME_TYPE_XXXX below */
uint32 wake_time_h; /* target wake time - BSS TSF (us) */
uint32 wake_time_l;
uint32 wake_dur; /* target wake duration in unit of microseconds */
uint32 wake_int; /* target wake interval */
+ uint32 btwt_persistence; /* Broadcast TWT Persistence */
+ uint32 wake_int_max; /* max wake interval(uS) for TWT */
+ uint8 duty_cycle_min; /* min duty cycle for TWT(Percentage) */
+ uint8 pad;
+ uint8 bid; /* must be between 0 and 31. Set 0xFF for auto assignment */
+ uint8 channel; /* Twt channel - Not used for now */
+ uint8 negotiation_type; /* Negotiation Type: See macros TWT_NEGO_TYPE_X */
+ uint8 frame_recomm; /* frame recommendation for broadcast TWTs - Not used for now */
+ /* deprecated - to be removed */
+ uint16 li;
- uint16 bid; /* must be between 0 and 255. Set 0xFFFF for auto assignment */
- uint16 li; /* Listen interval: Units in number of beacon intervals */
- uint8 channel; /* twt channel */
- uint8 pad[3];
} wl_twt_sdesc_t;
/* Flow flags */
-#define WL_TWT_FLOW_FLAG_BROADCAST (1<<0)
-#define WL_TWT_FLOW_FLAG_IMPLICIT (1<<1)
-#define WL_TWT_FLOW_FLAG_UNANNOUNCED (1<<2)
-#define WL_TWT_FLOW_FLAG_TRIGGER (1<<3)
-#define WL_TWT_FLOW_FLAG_WAKE_TBTT_NEGO (1<<4)
-#define WL_TWT_FLOW_FLAG_REQUEST (1<<5)
+/* [TODO] : below flags are to be applied with corresponding wl param parsing
+#define WL_TWT_FLOW_FLAG_UNANNOUNCED (1u << 0u)
+#define WL_TWT_FLOW_FLAG_TRIGGER (1u << 1u)
+#define WL_TWT_FLOW_FLAG_REQUEST (1u << 2u)
+#define WL_TWT_FLOW_FLAG_PROTECT (1u << 3u)
+#define WL_TWT_FLOW_FLAG_RESPONDER_PM (1u << 4u)
+#define WL_TWT_FLOW_FLAG_UNSOLICITED (1u << 5u)
+
+#define WL_TWT_FLOW_FLAG_BROADCAST (1u << 5u)
+#define WL_TWT_FLOW_FLAG_WAKE_TBTT_NEGO (1u << 6u)
+#define WL_TWT_FLOW_FLAG_IMPLICIT (1u << 7u)
+*/
+#define WL_TWT_FLOW_FLAG_BROADCAST (1 << 0)
+#define WL_TWT_FLOW_FLAG_IMPLICIT (1 << 1)
+#define WL_TWT_FLOW_FLAG_UNANNOUNCED (1 << 2)
+#define WL_TWT_FLOW_FLAG_TRIGGER (1 << 3)
+#define WL_TWT_FLOW_FLAG_WAKE_TBTT_NEGO (1 << 4)
+#define WL_TWT_FLOW_FLAG_REQUEST (1 << 5)
+
+#define WL_TWT_FLOW_FLAG_PROTECT (1u << 0u)
+#define WL_TWT_FLOW_FLAG_RESPONDER_PM (1u << 6u)
+#define WL_TWT_FLOW_FLAG_UNSOLICITED (1u << 7u)
/* Flow id */
-#define WL_TWT_FLOW_ID_FID 0x07 /* flow id */
-#define WL_TWT_FLOW_ID_GID_MASK 0x70 /* group id - broadcast TWT only */
-#define WL_TWT_FLOW_ID_GID_SHIFT 4
+#define WL_TWT_FLOW_ID_FID 0x07u /* flow id */
+#define WL_TWT_FLOW_ID_GID_MASK 0x70u /* group id - broadcast TWT only */
+#define WL_TWT_FLOW_ID_GID_SHIFT 4u
-#define WL_TWT_INV_BCAST_ID 0xFFFFu
+#define WL_TWT_INV_BCAST_ID 0xFFu
#define WL_TWT_INV_FLOW_ID 0xFFu
+/* auto flow_id */
+#define WL_TWT_SETUP_FLOW_ID_AUTO 0xFFu
+/* auto broadcast ID */
+#define WL_TWT_SETUP_BCAST_ID_AUTO 0xFFu
+/* Infinite persistence for broadcast schedule */
+#define WL_TWT_INFINITE_BTWT_PERSIST 0xFFFFFFFFu
+
+#define WL_TWT_STATS_MAX_BTWT 4u
+#define WL_TWT_STATS_MAX_ITWT 4u
+
/* Wake type */
/* TODO: not yet finalized */
-#define WL_TWT_TIME_TYPE_BSS 0 /* The time specified in wake_time_h/l is
+#define WL_TWT_TIME_TYPE_BSS 0u /* The time specified in wake_time_h/l is
* the BSS TSF time.
*/
-#define WL_TWT_TIME_TYPE_OFFSET 1 /* The time specified in wake_time_h/l is an offset
+#define WL_TWT_TIME_TYPE_OFFSET 1u /* The time specified in wake_time_h/l is an offset
* of the TSF time when the iovar is processed.
*/
+#define WL_TWT_TIME_TYPE_AUTO 2u /* The target wake time is chosen internally by the FW */
-#define WL_TWT_SETUP_VER 0
+#define WL_TWT_SETUP_VER 0u
/* HE TWT Setup command */
-typedef struct {
+typedef struct wl_twt_setup {
/* structure control */
uint16 version; /* structure version */
uint16 length; /* data length (starting after this field) */
/* peer address */
struct ether_addr peer; /* leave it all 0s' for AP */
- /* session id */
- uint8 dialog; /* an arbitrary number to identify the seesion */
- uint8 pad;
+ uint8 pad[2];
/* setup descriptor */
wl_twt_sdesc_t desc;
+
+ /* deprecated - to be removed */
+ uint16 dialog;
+ uint8 pad1[2];
} wl_twt_setup_t;
-#define WL_TWT_TEARDOWN_VER 0
+/* deprecated -to be removed */
+#define WL_TWT_DIALOG_TOKEN_AUTO 0xFFFF
+
+#define WL_TWT_TEARDOWN_VER 0u
+
+/* twt teardown descriptor */
+typedef struct wl_twt_teardesc {
+ uint8 negotiation_type;
+ uint8 flow_id; /* must be between 0 and 7 */
+ uint8 bid; /* must be between 0 and 31 */
+ uint8 alltwt; /* all twt teardown - 0 or 1 */
+} wl_twt_teardesc_t;
/* HE TWT Teardown command */
-typedef struct {
+typedef struct wl_twt_teardown {
/* structure control */
uint16 version; /* structure version */
uint16 length; /* data length (starting after this field) */
/* peer address */
struct ether_addr peer; /* leave it all 0s' for AP */
- /* flow attributes */
- uint8 flow_flags; /* See WL_TWT_FLOW_FLAG_XXXX above.
- * (only BORADCAST) is applicable)
- */
- uint8 flow_id; /* must be between 0 and 7 */
- uint16 bid; /* must be between 0 and 255 */
+ wl_twt_teardesc_t teardesc; /* Teardown descriptor */
+
+ /* deprecated - to be removed */
+ uint8 flow_flags;
+ uint8 flow_id;
+ uint8 bid;
+ uint8 pad;
} wl_twt_teardown_t;
/* twt information descriptor */
-typedef struct {
+typedef struct wl_twt_infodesc {
uint8 flow_flags; /* See WL_TWT_INFO_FLAG_XXX below */
uint8 flow_id;
uint8 pad[2];
uint32 next_twt_h;
uint32 next_twt_l;
-} wl_twt_idesc_t;
+ /* deprecated - to be removed */
+ uint8 wake_type;
+ uint8 pad1[3];
+} wl_twt_infodesc_t;
/* Flow flags */
-#define WL_TWT_INFO_FLAG_RESP_REQ (1<<0) /* Request response */
+#define WL_TWT_INFO_FLAG_ALL_TWT (1u << 0u) /* All TWT */
+#define WL_TWT_INFO_FLAG_RESUME (1u << 1u) /* 1 is TWT Resume, 0 is TWT Suspend */
-#define WL_TWT_INFO_VER 0
+/* deprecated - to be removed */
+#define WL_TWT_INFO_FLAG_RESP_REQ (1 << 0) /* Response Requested */
+#define WL_TWT_INFO_FLAG_NEXT_TWT_REQ (1 << 1) /* Next TWT Request */
+#define WL_TWT_INFO_FLAG_BTWT_RESCHED (1 << 2) /* Broadcast Reschedule */
+typedef wl_twt_infodesc_t wl_twt_idesc_t;
+
+#define WL_TWT_INFO_VER 0u
/* HE TWT Information command */
-typedef struct {
+typedef struct wl_twt_info {
/* structure control */
uint16 version; /* structure version */
uint16 length; /* data length (starting after this field) */
/* peer address */
struct ether_addr peer; /* leave it all 0s' for AP */
uint8 pad[2];
- /* information descriptor */
+ wl_twt_infodesc_t infodesc; /* information descriptor */
+ /* deprecated - to be removed */
wl_twt_idesc_t desc;
} wl_twt_info_t;
+#define WL_TWT_PEER_STATS_VERSION_1 1u
+typedef struct wl_twt_peer_stats_v1 {
+ uint16 version;
+ uint16 length;
+ struct ether_addr peer;
+ uint8 PAD[2];
+ uint8 id;
+ uint8 flow_flags;
+ uint8 PAD[2];
+ uint32 sp_seq; /* sequence number of the service period */
+ uint32 tx_ucast_pkts;
+ uint32 tx_pkts_min;
+ uint32 tx_pkts_max;
+ uint32 tx_pkts_avg;
+ uint32 tx_failures;
+ uint32 rx_ucast_pkts;
+ uint32 rx_pkts_min;
+ uint32 rx_pkts_max;
+ uint32 rx_pkts_avg;
+ uint32 rx_pkts_retried;
+} wl_twt_peer_stats_v1_t;
+
+#define WL_TWT_STATS_VERSION_1 1
+typedef struct wl_twt_stats_v1 {
+ uint16 version;
+ uint16 length;
+ uint32 num_stats; /* number of peer stats in the peer_stats_list */
+ wl_twt_peer_stats_v1_t peer_stats_list[];
+} wl_twt_stats_v1_t;
+
+#define WL_TWT_STATS_CMD_VERSION_1 1
+#define WL_TWT_STATS_CMD_FLAGS_RESET (1u << 0u)
+/* HE TWT stats command */
+typedef struct wl_twt_stats_cmd_v1 {
+ uint16 version;
+ uint16 length;
+ struct ether_addr peer;
+ uint8 PAD[2];
+ uint16 flags; /* see WL_TWT_STATS_CMD_FLAGS */
+ uint8 num_fid;
+ uint8 num_bid;
+ uint8 fid_list[WL_TWT_STATS_MAX_ITWT];
+ uint8 bid_list[WL_TWT_STATS_MAX_BTWT];
+} wl_twt_stats_cmd_v1_t;
+
+#define WL_TWT_RESP_CFG_VER 0u
+
+#define WL_TWT_CMD_RESP_CFG_TYPE_ALTERNATE 0u
+#define WL_TWT_CMD_RESP_CFG_TYPE_DICTATE 1u
+/* HE TWT resp command */
+typedef struct wl_twt_resp_cfg {
+ /* structure control */
+ uint16 version; /* Structure version */
+ uint16 length; /* Data length (starting after this field) */
+ uint8 dc_max; /* Max supported duty cycle for single TWT */
+ uint8 resp_type; /* Resp. type(Alt/dict) if duty cycle>max duty cycle */
+} wl_twt_resp_cfg_t;
+
/* Current version for wlc_clm_power_limits_req_t structure and flags */
#define WLC_CLM_POWER_LIMITS_REQ_VERSION 1
/* "clm_power_limits" iovar request structure */
@@ -16246,6 +17391,14 @@
/* Output. Limits taken from country-default (all-product) data */
#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_DEFAULT_COUNTRY_LIMITS 0x00000004
+#define WL_MBO_IOV_MAJOR_VER 1
+#define WL_MBO_IOV_MINOR_VER 1
+#define WL_MBO_IOV_MAJOR_VER_SHIFT 8
+#define WL_MBO_IOV_VERSION \
+ ((WL_MBO_IOV_MAJOR_VER << WL_MBO_IOV_MAJOR_VER_SHIFT)| WL_MBO_IOV_MINOR_VER)
+
+#define MBO_MAX_CHAN_PREF_ENTRIES 16
+
enum wl_mbo_cmd_ids {
WL_MBO_CMD_ADD_CHAN_PREF = 1,
WL_MBO_CMD_DEL_CHAN_PREF = 2,
@@ -16263,6 +17416,8 @@
WL_MBO_CMD_NBR_INFO_CACHE = 11,
WL_MBO_CMD_ANQPO_SUPPORT = 12,
WL_MBO_CMD_DBG_EVENT_CHECK = 13,
+ WL_MBO_CMD_EVENT_MASK = 14,
+ WL_MBO_CMD_ASSOC_DISALLOWED = 15,
/* Add before this !! */
WL_MBO_CMD_LAST
};
@@ -16278,9 +17433,20 @@
WL_MBO_XTLV_SUB_ELEM_TYPE = 0x8,
WL_MBO_XTLV_BTQ_TRIG_START_OFFSET = 0x9,
WL_MBO_XTLV_BTQ_TRIG_RSSI_DELTA = 0xa,
- WL_MBO_XTLV_ANQP_CELL_SUPP = 0xb
+ WL_MBO_XTLV_ANQP_CELL_SUPP = 0xb,
+ WL_MBO_XTLV_BIT_MASK = 0xc,
+ WL_MBO_XTLV_ASSOC_DISALLOWED = 0xd
};
+/* event bit mask flags for MBO */
+#define MBO_EVT_BIT_MASK_CELLULAR_SWITCH 0x0001 /* Evt bit mask to enab cellular switch */
+#define MBO_EVT_BIT_MASK_BTM_REQ_RCVD 0x0002 /* Evt bit mask to enab BTM req rcvd */
+
+/* masks for values in assoc_disallowed */
+#define WL_MBO_ASSOC_DISALLOWED_MASK 0xff00
+#define WL_MBO_ASSOC_DISALLOWED_SHIFT 8
+#define WL_MBO_ASSOC_DISALLOWED_RC_MASK 0xff
+
typedef struct wl_mbo_counters {
/* No of transition req recvd */
uint16 trans_req_rcvd;
@@ -16830,6 +17996,18 @@
/* --- End BTCX WiFi Protection --- */
+/* --- BTCX ULMU disable (btc_ulmu_config iovar) --- */
+
+/* Version number */
+#define WL_BTC_ULMU_CONFIG_VER_1 1
+typedef struct wl_btc_ulmu_config_v1 {
+ uint16 version; /* btc_ulmu_config version */
+ uint16 len; /* Total length */
+ uint32 ulmu_bt_task_bm; /* BT Task bimtap for ULMU disable */
+ uint32 ulmu_bt_period_th; /* BT period thresh for ULMU disable */
+} wl_btc_ulmu_config_v1_t;
+
+/* --- End BTCX ULMU config --- */
enum wl_rpsnoa_cmd_ids {
WL_RPSNOA_CMD_ENABLE = 1,
WL_RPSNOA_CMD_STATUS,
@@ -16901,6 +18079,8 @@
WL_IFSTATS_XTLV_RX_AMPDU_STATS = 0x102,
/* scb ecounter statistics */
WL_IFSTATS_XTLV_SCB_ECOUNTERS = 0x103,
+ /* Global NAN stats */
+ WL_IFSTATS_XTLV_NAN_STATS = 0x104,
/* Per-slice information
* Per-interface reporting could also include slice specific data
@@ -16928,6 +18108,8 @@
WL_IFSTATS_XTLV_WL_SLICE_TDMTX = 0x30C,
/* Slice specific state capture in periodic fasion */
WL_SLICESTATS_XTLV_PERIODIC_STATE = 0x30D,
+ /* Per-slice BTCOEX task duration stats */
+ WL_IFSTATS_XTLV_WL_SLICE_BTCOEX_TSKDUR_STATS = 0x30E,
/* Per-interface */
/* XTLV container for reporting */
WL_IFSTATS_XTLV_IF = 0x501,
@@ -17146,6 +18328,7 @@
WL_TVPM_REQ_ENABLE, /* req_value: uint32, range 0...1 */
WL_TVPM_REQ_STATUS, /* req_value: none */
WL_TVPM_REQ_PERIOD, /* req_value: int32, range {-1,1-10} */
+ WL_TVPM_REQ_SETVAL,
WL_TVPM_REQ_MAX
} wl_tvpm_req_type_t;
@@ -17248,6 +18431,7 @@
*/
#define WL_SCB_ECOUNTERS_VERSION_1 1
+#define WL_SCB_ECOUNTERS_VERSION_2 2
typedef struct wl_scb_ecounters_v1 {
uint16 version; /* version field */
@@ -17261,6 +18445,28 @@
wlc_scb_stats_v1_t stats;
} wl_scb_ecounters_v1_t;
+typedef struct wl_scb_ecounters_v2 {
+ uint16 version; /* version field */
+ uint16 length; /* struct length starting from version */
+ uint32 chanspec; /* current chanspec where scb is operating */
+ struct ether_addr ea; /* peer ndi or sta ea */
+ uint8 peer_type; /* peer type */
+ uint8 pad;
+
+ /* scb tx and rx stats */
+ uint16 tx_rate; /* Rate(in Mbps) of last successful tx frame */
+ uint16 rx_rate; /* Rate(in Mbps) of last successful rx frame */
+ uint16 tx_rate_fallback; /* last used lowest fallback TX rate(in Mbps) */
+ uint16 pad1;
+ uint32 rx_decrypt_succeeds; /* num of packets decrypted successfully */
+ uint32 rx_decrypt_failures; /* num of packets decrypted unsuccessfully */
+ uint32 rx_pkts_retried; /* # rx with retry bit set */
+ uint32 tx_pkts_retries; /* total num of tx retries */
+ uint32 tx_failures; /* num of packets failed */
+ uint32 tx_pkts_total; /* total num of tx pkts */
+ int8 rssi[WL_STA_ANT_MAX]; /* average rssi per antenna of data frames */
+} wl_scb_ecounters_v2_t;
+
/* ecounters for nan slot stats
* XTLV ID: WL_IFSTATS_XTLV_NAN_SLOT_STATS
*/
@@ -17595,15 +18801,427 @@
uint8 channel[]; /* Channel to block, Variable Length */
} wl_block_ch_v1_t;
-/* Max Host whitelist region supported */
-#define DMA_WL_ADDR_REGION_HOST_MAX 1u
typedef struct dma_wl_addr_region {
uint32 addr_low;
uint32 addr_high;
} dma_wl_addr_region_t;
-typedef struct dma_wl_addr_region_host {
- dma_wl_addr_region_t hreg_start;
- dma_wl_addr_region_t hreg_end;
-} dma_wl_addr_region_host_t;
+#define WL_ROAMSTATS_IOV_VERSION 1
+
+#define MAX_PREV_ROAM_EVENTS 16u
+
+#define ROAMSTATS_UNKNOWN_CNT 0xFFFFu
+
+/* roaming statistics counter structures */
+typedef struct wlc_assoc_roamstats_event_msg_v1 {
+ uint32 event_type; /* Message (see below) */
+ uint32 status; /* Status code (see below) */
+ uint32 reason; /* Reason code (if applicable) */
+ uint32 timestamp; /* Timestamp of event */
+} wlc_assoc_roamstats_event_msg_v1_t;
+
+enum wl_roamstats_cmd_id {
+ WL_ROAMSTATS_XTLV_CMD_VER = 0,
+ WL_ROAMSTATS_XTLV_CMD_RESET = 1,
+ WL_ROAMSTATS_XTLV_CMD_STATUS = 2,
+ WL_ROAMSTATS_XTLV_CMD_LAST /* Keep this at the end */
+};
+
+enum wl_roamstats_xtlv_id {
+ WL_ROAMSTATS_XTLV_VER = 0x0,
+ WL_ROAMSTATS_XTLV_COUNTER_INFO = 0x1,
+ WL_ROAMSTATS_XTLV_PREV_ROAM_EVENTS = 0x2,
+ WL_ROAMSTATS_XTLV_REASON_INFO = 0x3
+};
+
+/* WL_ROAMSTATS_XTLV_COUNTER_INFO */
+typedef struct {
+ uint32 initial_assoc_time;
+ uint32 prev_roam_time;
+ uint32 host_access_time;
+ uint16 roam_success_cnt;
+ uint16 roam_fail_cnt;
+ uint16 roam_attempt_cnt;
+ uint16 max_roam_target_cnt;
+ uint16 min_roam_target_cnt;
+ uint16 max_cached_ch_cnt;
+ uint16 min_cached_ch_cnt;
+ uint16 partial_roam_scan_cnt;
+ uint16 full_roam_scan_cnt;
+} roamstats_counter_info_v1_t;
+
+/* WL_ROAMSTATS_XTLV_PREV_ROAM_EVENTS */
+typedef struct {
+ uint16 max;
+ uint16 pos;
+ wlc_assoc_roamstats_event_msg_v1_t roam_event[];
+} roamstats_prev_roam_events_v1_t;
+
+/* WL_ROAMSTATS_XTLV_REASON_INFO */
+typedef struct {
+ uint16 max;
+ uint16 reason_cnt[];
+} roamstats_reason_info_v1_t;
+
+#ifdef HEALTH_CHECK_WLIOCTL
+/* Health check status format:
+ * reporting status size = uint32
+ * 8 LSB bits are reserved for: WARN (0), ERROR (1), and other levels
+ * MSB 24 bits are reserved for client to fill in its specific status
+ */
+#define HEALTH_CHECK_STATUS_OK 0
+/* Bit positions. */
+#define HEALTH_CHECK_STATUS_WARN 0x1
+#define HEALTH_CHECK_STATUS_ERROR 0x2
+#define HEALTH_CHECK_STATUS_TRAP 0x4
+#define HEALTH_CHECK_STATUS_NOEVENT 0x8
+
+/* Indication that required information is populated in log buffers */
+#define HEALTH_CHECK_STATUS_INFO_LOG_BUF 0x80
+#define HEALTH_CHECK_STATUS_MASK (0xFF)
+
+#define HEALTH_CHECK_STATUS_MSB_SHIFT 8
+#endif /* HEALTH_CHECK_WLIOCTL */
+
+/** receive signal reporting module interface */
+
+#define WL_RXSIG_IOV_MAJOR_VER (1u)
+#define WL_RXSIG_IOV_MINOR_VER (1u)
+#define WL_RXSIG_IOV_MAJOR_VER_SHIFT (8u)
+#define WL_RXSIG_IOV_VERSION \
+ ((WL_RXSIG_IOV_MAJOR_VER << WL_RXSIG_IOV_MAJOR_VER_SHIFT) | WL_RXSIG_IOV_MINOR_VER)
+#define WL_RXSIG_IOV_GET_MAJOR(x) (x >> WL_RXSIG_IOV_MAJOR_VER_SHIFT)
+#define WL_RXSIG_IOV_GET_MINOR(x) (x & 0xFF)
+
+enum wl_rxsig_cmd_rssi_mode {
+ WL_RXSIG_MODE_DB = 0x0,
+ WL_RXSIG_MODE_QDB = 0x1,
+ WL_RXSIG_MODE_LAST
+};
+
+/* structure defs for 'wl rxsig [cmd]' iovars */
+enum wl_rxsig_iov_v1 {
+ WL_RXSIG_CMD_RSSI = 0x1, /**< combined rssi moving avg */
+ WL_RXSIG_CMD_SNR = 0x2, /**< combined snr moving avg */
+ WL_RXSIG_CMD_RSSIANT = 0x3, /**< rssi moving avg per-ant */
+ WL_RXSIG_CMD_SNRANT = 0x4, /**< snr moving avg per-snr */
+ WL_RXSIG_CMD_SMPLWIN = 0x5, /**< config for sampling window size */
+ WL_RXSIG_CMD_SMPLGRP = 0x7, /**< config for grouping of pkt type */
+ WL_RXSIG_CMD_STA_MA = 0x8,
+ WL_RXSIG_CMD_MAMODE = 0x9,
+ WL_RXSIG_CMD_MADIV = 0xa,
+ WL_RXSIG_CMD_DUMP = 0xb,
+ WL_RXSIG_CMD_DUMPWIN = 0xc,
+ WL_RXSIG_CMD_TOTAL
+};
+
+struct wl_rxsig_cfg_v1 {
+ uint16 version;
+ chanspec_t chan; /**< chanspec info for querying stats */
+ uint8 pmac[ETHER_ADDR_LEN]; /**< peer(link) mac address */
+};
+
+struct wl_rxsig_iov_rssi_v1 {
+ int8 rssi;
+ uint8 rssi_qdb;
+ uint8 pad[2];
+};
+
+struct wl_rxsig_iov_snr_v1 {
+ int16 snr;
+ uint16 pad;
+};
+
+struct wl_rxsig_iov_rssi_ant_v1 {
+ int8 deci[WL_RSSI_ANT_MAX];
+ uint8 frac[WL_RSSI_ANT_MAX];
+ uint8 rssi_mode; /**< MODE_DB or MODE_QDB */
+ uint8 num_of_ant; /**< total number of ants */
+ uint8 pad[2]; /**< padding for 32bit align */
+};
+
+#ifdef BCM_SDC
+
+#define SDC_TRIGGER_CONFIG_VER_1 1
+typedef struct {
+ uint16 version;
+ uint16 type;
+ uint8 activate;
+ uint8 pad;
+} sdc_trigger_cfg_t;
+
+typedef enum sdc_trigger_types {
+ SDC_TYPE_STA_ONBOARD_DEBUG = 1,
+#ifdef SDC_TEST
+ /*
+ * This is for test purpose only. Don't assign specific value.
+ * Keep at the end
+ */
+ SDC_TYPE_TEST1,
+ SDC_TYPE_TEST2,
+ SDC_TYPE_TEST3,
+#endif /* SDC_TEST */
+ SDC_TYPE_MAX_TRIGGER
+} sdc_trigger_types_t;
+
+#endif /* BCM_SDC */
+
+typedef struct wl_avs_info_v1 {
+ uint16 version; /* Structure version */
+ uint16 equ_version; /* Equation Version */
+ uint32 RO; /* RO in OTP */
+ uint32 equ_csr; /* Equated CSR */
+ uint32 read_csr; /* Read Back CSR */
+ uint32 aging; /* aging setting in nvram */
+} wl_avs_info_v1_t;
+
+#define WL_AVS_INFO_VER_1 1
+
+/* bitmap for clm_flags iovar */
+#define WL_CLM_TXBF 0x01 /**< Flag for Tx beam forming */
+#define WL_CLM_RED_EU 0x02 /* Flag for EU RED */
+#define WL_CLM_EDCRS_EU 0x04 /**< Use EU post-2015 energy detect */
+#define WL_CLM_DFS_TPC 0x08 /**< Flag for DFS TPC */
+#define WL_CLM_RADAR_TYPE_EU 0x10 /**< Flag for EU */
+#define WL_CLM_DFS_FCC WL_CLM_DFS_TPC /**< Flag for DFS FCC */
+#define WL_CLM_DFS_EU (WL_CLM_DFS_TPC | WL_CLM_RADAR_TYPE_EU) /**< Flag for DFS EU */
+
+/* SC (scan core) command IDs */
+enum wl_sc_cmd {
+ WL_SC_CMD_DBG = 0,
+ WL_SC_CMD_CNX = 1,
+ WL_SC_CMD_LAST
+};
+
+#define WSEC_MAX_SAE_PASSWORD_LEN 128
+
+typedef struct {
+ ushort password_len; /* octets in key material */
+ uint8 password[WSEC_MAX_SAE_PASSWORD_LEN]; /* maximum key len for SAE passphrase */
+} wsec_sae_password_t;
+
+/** Statistic related to dongle sleep while in wowl. */
+typedef struct wl_sleep_stats {
+ uint32 cpu_wakes; /** Number of times the CPU has woken up from sleep. */
+ uint32 cpu_up_us; /** CPU active time in us */
+ uint32 total_time_ms; /** Total measured time in ms */
+ uint32 dtim; /** Number of DTIM processed */
+ uint32 tx_packets; /** Number of packets sent */
+ uint32 tx_bytes; /** Number of bytes sent */
+ uint32 tx_us; /** Time radio spent doing TX. */
+ uint32 rx_packets; /** Number of packets received */
+ uint32 rx_bytes; /** Number of bytes received */
+ uint32 rx_us; /** Time radio spent doing RX. */
+ uint32 idle_us; /** Time radio spent idle. */
+ uint32 arp_resp_cnt; /** Number of ARP replies sent */
+ uint32 tcpka_ack_cnt; /** Number of TCP KeepAlive Acks sent */
+ uint32 grp_key_renew_cnt; /** Number of group key renewals */
+} wl_sleep_stats_t;
+
+/* NSC defintions */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct nsc_af_body {
+ uint8 type; /* should be 0x7f */
+ uint8 oui[DOT11_OUI_LEN]; /* just like it says */
+ uint8 subtype; /* 221, 0xdd: proprietary ie */
+ uint8 ielen; /* */
+ uint8 data[1]; /* variable */
+} BWL_POST_PACKED_STRUCT nsc_af_body_t;
+#include <packed_section_end.h>
+
+#define NSC_SUBTYPES_MAX 8
+#define NSC_SUBTYPES_EOL 0xff
+
+/* Aloe constants */
+#define WL_ALOE_AF_TYPE (0x7)
+#define WL_ALOE_CMD_PERIODIC (0x01)
+#define WL_ALOE_CMD_ONESHOT (0x02)
+#define WL_ALOE_HASHTABLE_SZ (508) /**< Max number of hash entries */
+#define WL_ALOE_HASH_SZ (8) /**< eight byte hash size */
+#define WL_ALOE_AF_CACHE_SZ (8) /** Mac Number of cached AF. */
+
+/** ALOE configuration */
+typedef struct wl_aloe_cfg {
+ /** ALOE major version number to be able to receive. */
+ uint8 major;
+ /** ALOE minor version number to be able to receive. */
+ uint8 minor;
+ /** Bitfield of the command to be able to receive.
+ * BWL_ALOE_CMD_PERIODIC and/or BWL_ALOE_CMD_ONESHOT.
+ */
+ uint8 command;
+ /** BSSID to receive the Aloe frame sent on. */
+ struct ether_addr BSSID;
+} wl_aloe_cfg_t;
+
+/** ALOE periodic action frame */
+typedef struct wl_aloe_periodic_af {
+ /** Tx interval, in ms. */
+ uint32 tx_int;
+ /** Number of AF to send during a phase.
+ * Phase duration is defined as tx_cnt * tx_int ms.
+ */
+ uint32 tx_cnt;
+ /** Interphase space, defined as muted TX periods during which no AF is sent.
+ * Interphase duration is defined as idle_cnt * tx_int ms.
+ */
+ uint32 idle_cnt;
+ /** Start of the RX window as a number of tx interval
+ * The RX window starts at rx_start * tx_int ms after the
+ * beginning of a given phase.
+ */
+ uint32 rx_start;
+ /** Number of tx_inteval the listening window lasts
+ * A RX window lasts for rx_cnt * tx_int ms.
+ */
+ uint32 rx_cnt;
+ wl_af_params_t af;
+} wl_aloe_periodic_af_t;
+
+/**
+ * @brief Aloe's action frame format.
+ */
+typedef struct wl_aloe_af {
+ /** Vendor specific: 0x7f */
+ uint8 category;
+ /** 00-22-aa */
+ uint8 oui[3];
+ /** 7 */
+ uint8 subtype;
+ uint8 PAD0;
+ /** ALOE Major version number */
+ uint8 major;
+ /** ALOE Minor version number */
+ uint8 minor;
+ /** Periodic or single shot */
+ uint8 command;
+ uint8 PAD1;
+ /** Authentication hash */
+ uint8 hash[8];
+ /** Aloe payload */
+ uint8 data[];
+} wl_aloe_af_t;
+
+typedef struct wl_aloe_stats {
+ uint32 tx_af_total; /** Counter for the total number of AF sent. */
+ /** Number of AF not sent because the previous TX is still pending */
+ /** Data is held in the AF data legacy buffer */
+ uint32 tx_af_aborted;
+ uint32 tx_af_failed; /** Number of TX AF that failed for a reason */
+ uint32 rx_af; /** Received ALOE Action frames. */
+ uint32 rx_af_filtered; /** Received and filtered out Aloe AF. */
+ /** Action frame received but dropped as the host is not asleep yet. */
+ uint32 rx_dropped;
+ uint32 cache_cnt;
+} wl_aloe_stats_t;
+
+/* pkteng_ru_fill enum definitions */
+#define PKTENG_RU_FILL_VERSION 1
+#define PKTENG_RU_FILL_LENGTH 32
+
+/* Packet length in bytes */
+#define PKT_LEN_TRIG_PROFILE_DEFAULT 100
+#define PKT_LEN_TRIG_PROFILE_0 650
+#define PKT_LEN_TRIG_PROFILE_1 600
+#define PKT_LEN_TRIG_PROFILE_2 4000
+#define PKT_LEN_TRIG_PROFILE_3 4000
+#define PKT_LEN_TRIG_PROFILE_4 3000
+#define PKT_LEN_TRIG_PROFILE_5 3000
+
+/* RU Allocation value */
+#define RU26_5 5
+#define RU242_61 61
+
+/* MAC Broadcast address individual Octet */
+#define BCAST_ADDR_OCTET 0xff
+
+#define ONE_SHOT_TRIGGER_TX 255 /* To send one shot trigger frame */
+#define TRIG_PERIOD_MAX 255 /* Max period */
+#define TRIG_PROFILES 6 /* Packet profiles to compute cmn and usr info params */
+#define HE_TRIG_FRM_NUSERINFO 1 /* Default number of user info fields */
+#define TRIG_TX_MIN_IP_ARGS 1
+#define TRIG_TX_MAX_IP_ARGS 3
+
+typedef struct {
+ uint8 ru_alloc_val; /* ru allocation index number */
+ uint8 mcs_val; /* mcs allocated value */
+ uint8 nss_val; /* num of spatial streams */
+ uint32 num_bytes; /* approx num of bytes to calculate other required params */
+} pkteng_reduced_t;
+
+/* Generic BW defintions */
+enum {
+ BW_20MHz = 0,
+ BW_40MHz = 1,
+ BW_80MHz = 2,
+ BW_160MHz = 3
+};
+
+/* Generic MCS */
+enum {
+ MCS_0 = 0,
+ MCS_1 = 1,
+ MCS_2 = 2,
+ MCS_3 = 3,
+ MCS_4 = 4,
+ MCS_5 = 5,
+ MCS_6 = 6,
+ MCS_7 = 7,
+ MCS_8 = 8,
+ MCS_9 = 9,
+ MCS_10 = 10,
+ MCS_11 = 11
+};
+
+/* Spatial streams */
+enum {
+ NSS1 = 1,
+ NSS2 = 2
+};
+
+/* HE-LTF Symbols index */
+enum {
+ NUM_HE_LTF_SYM0 = 0,
+ NUM_HE_LTF_SYM1 = 1,
+ NUM_HE_LTF_SYM2 = 2,
+ NUM_HE_LTF_SYM4 = 4,
+ NUM_HE_LTF_SYM5 = 5,
+ NUM_HE_LTF_SYM6 = 6
+};
+
+/* Status of STBC encoding in the solicited HE TBPPDUs */
+enum {
+ STBC_DIS = 0, /* Disabled */
+ STBC_EN = 1 /* Enabled */
+};
+
+/* pe_category, PE dur supported */
+enum {
+ PE0 = 0,
+ PE8 = 1,
+ PE16 = 2
+};
+
+/* Dual carrier modulation in solicited HE TBPPDU */
+enum {
+ DCM_DIS = 0, /* Disabled */
+ DCM_EN = 1, /* Enabled */
+};
+
+enum {
+ TRIG_TX_DIS = 0, /* Fake trigger frame TX */
+ TRIG_TX_EN = 1 /* Enable trigger frame transmission */
+};
+
+/* UL FEC Coding Type */
+enum {
+ CODING_BCC = 0, /* BCC Coding */
+ CODING_LDPC = 1 /* LDPC coding */
+};
+
+/* MU-MIMO LTF Mode subfield encoding */
+enum {
+ MUMIMO_LTF_0 = 0, /* HE single stream pilot HE-LTF mode */
+ MUMIMO_LTF_1 = 1 /* HE masked HE-LTF sequence mode */
+};
#endif /* _wlioctl_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlioctl_defs.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlioctl_defs.h
index 363b3c9..b6ec056 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlioctl_defs.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlioctl_defs.h
@@ -4,7 +4,9 @@
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -63,6 +65,7 @@
#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */
#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */
#define WL_RSPEC_ENCODE_HE 0x03000000 /* HE MCS and Nss is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_HE_NSS_UNSPECIFIED 0xF
/* WL_RSPEC_BW field defs */
#define WL_RSPEC_BW_UNSPECIFIED 0
@@ -380,6 +383,8 @@
/* bit definitions for bcnflags in wl_bss_info */
#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT 0x01 /* beacon had IE, accessnet valid */
#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT_VALID 0x02 /* on indicates support for this API */
+#define WL_BSS_BCNFLAGS_MULTIPLE_BSSID_SET 0x04 /* this AP belongs to a multiple BSSID set */
+#define WL_BSS_BCNFLAGS_NONTRANSMITTED_BSSID 0x08 /* this AP is the transmitted BSSID */
/* bssinfo flag for nbss_cap */
#define VHT_BI_SGI_80MHZ 0x00000100
@@ -420,6 +425,7 @@
#define CRYPTO_ALGO_BIP_GMAC 18 /* 128 bit BIP GMAC */
#define CRYPTO_ALGO_BIP_GMAC256 19 /* 256 bit BIP GMAC */
+#define CRYPTO_ALGO_DYN_SET 0xFFFE /* RT set according to received packet, used by GTKOE */
#define CRYPTO_ALGO_NONE CRYPTO_ALGO_OFF
/* algo bit vector */
@@ -471,9 +477,7 @@
#define CKIP_MIC_ENABLED 0x0020
#endif /* BCMCCX */
#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */
-#ifdef BCMWAPI_WPI
#define SMS4_ENABLED 0x0100
-#endif /* BCMWAPI_WPI */
#define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED)
#define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED)
@@ -508,9 +512,7 @@
#endif /* BCMCCX */
#define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED)
-#ifdef BCMWAPI_WAI
#define WSEC_SMS4_ENABLED(wsec) ((wsec) & SMS4_ENABLED)
-#endif /* BCMWAPI_WAI */
/* Following macros are not used any more. Just kept here to
* avoid build issue in BISON/CARIBOU branch
@@ -520,38 +522,43 @@
#define MFP_SHA256 0x0800 /* a special configuration for STA for WIFI test tool */
/* WPA authentication mode bitvec */
-#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
-#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
-#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
-#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
+#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
+#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
+#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
+#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
#if defined(BCMCCX) || defined(BCMEXTCCX)
-#define WPA_AUTH_CCKM 0x0008 /* CCKM */
-#define WPA2_AUTH_CCKM 0x0010 /* CCKM2 */
+#define WPA_AUTH_CCKM 0x0008 /* CCKM */
+#define WPA2_AUTH_CCKM 0x0010 /* CCKM2 */
#endif /* BCMCCX || BCMEXTCCX */
/* #define WPA_AUTH_8021X 0x0020 */ /* 802.1x, reserved */
-#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
-#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
-#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */
-#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */
-#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI)
-#define WPA_AUTH_WAPI 0x0400
-#define WAPI_AUTH_NONE WPA_AUTH_NONE /* none (IBSS) */
-#define WAPI_AUTH_UNSPECIFIED 0x0400 /* over AS */
-#define WAPI_AUTH_PSK 0x0800 /* Pre-shared key */
-#endif /* BCMWAPI_WAI || BCMWAPI_WPI */
-#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */
-#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */
-#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */
-#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */
-#define WPA2_AUTH_FILS_SHA256 0x10000 /* FILS with SHA256 key derivation */
-#define WPA2_AUTH_FILS_SHA384 0x20000 /* FILS with SHA384 key derivation */
+#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
+#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
+#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */
+#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */
+#define WPA_AUTH_WAPI 0x0400
+#define WAPI_AUTH_NONE WPA_AUTH_NONE /* none (IBSS) */
+#define WAPI_AUTH_UNSPECIFIED 0x0400 /* over AS */
+#define WAPI_AUTH_PSK 0x0800 /* Pre-shared key */
+#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */
+#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */
+#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */
+#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */
+#define WPA2_AUTH_FILS_SHA256 0x10000 /* FILS with SHA256 key derivation */
+#define WPA2_AUTH_FILS_SHA384 0x20000 /* FILS with SHA384 key derivation */
#define WPA2_AUTH_IS_FILS(auth) ((auth) & (WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FILS_SHA384))
-#define WPA3_AUTH_SAE_PSK 0x40000 /* SAE with 4-way handshake */
-#define WPA3_AUTH_SAE_FBT 0x80000 /* SAE with FT */
-#define WPA3_AUTH_OWE 0x100000 /* OWE */
+#define WPA3_AUTH_SAE_PSK 0x40000 /* SAE with 4-way handshake */
+#define WPA3_AUTH_SAE_FBT 0x80000 /* SAE with FT */
+#define WPA3_AUTH_OWE 0x100000 /* OWE */
+#define WPA3_AUTH_1X_SUITE_B_SHA256 0x200000 /* Suite B SHA256 */
+#define WPA3_AUTH_1X_SUITE_B_SHA384 0x400000 /* Suite B-192 SHA384 */
+#define WPA3_AUTH_PSK_SHA384 0x800000 /* PSK with SHA384 key derivation */
+#define WPA3_AUTH_1X_SHA384 0x2000000 /* 1x with SHA384 key derivation */
+
/* WPA2_AUTH_SHA256 not used anymore. Just kept here to avoid build issue in DINGO */
-#define WPA2_AUTH_SHA256 0x8000
-#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
+#define WPA2_AUTH_SHA256 0x8000
+#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
+
+#define WPA3_AUTH_SAE 0x10000 /* SAE authentication with SHA-256 */
/* pmkid */
#define MAXPMKID 16
@@ -963,6 +970,7 @@
#define WL_AUTH_OPEN_SYSTEM 0 /* d11 open authentication */
#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
#define WL_AUTH_OPEN_SHARED 2 /* try open, then shared if open failed w/rc 13 */
+#define WL_AUTH_SAE 3 /* Simultaneous Authentication of Equals (SAE) */
#define WL_AUTH_FILS_SHARED 4 /* d11 fils shared key authentication */
#define WL_AUTH_FILS_SHARED_PFS 5 /* d11 fils shared key w/ pfs authentication */
#define WL_AUTH_FILS_PUBLIC 6 /* d11 fils public key authentication */
@@ -1001,7 +1009,14 @@
#define WLC_BAND_AUTO 0 /* auto-select */
#define WLC_BAND_5G 1 /* 5 Ghz */
#define WLC_BAND_2G 2 /* 2.4 Ghz */
+#define WLC_BAND_6G 3 /* 6 Ghz */
+#define WLC_BAND_ALL 4 /* all bands */
+
+#ifdef CONFIG_BCM43012
+#undef WLC_BAND_ALL
#define WLC_BAND_ALL 3 /* all bands */
+#endif // endif
+
#define WLC_BAND_INVALID -1 /* Invalid band */
/* band range returned by band_range iovar */
@@ -1022,6 +1037,12 @@
#define WL_CHAN_FREQ_RANGE_5G_BAND3 4
#define WL_CHAN_FREQ_RANGE_5G_4BAND 5
+/* 6G Subbands */
+#define WL_CHAN_FREQ_RANGE_6G_BAND0 5
+#define WL_CHAN_FREQ_RANGE_6G_BAND1 6
+#define WL_CHAN_FREQ_RANGE_6G_BAND2 7
+#define WL_CHAN_FREQ_RANGE_6G_BAND3 8
+
/* SROM12 */
#define WL_CHAN_FREQ_RANGE_5G_BAND4 5
#define WL_CHAN_FREQ_RANGE_2G_40 6
@@ -1167,7 +1188,8 @@
#define ACPHY_HWACI_MITIGATION 16 /* bit 4 */
#define ACPHY_LPD_PREEMPTION 32 /* bit 5 */
#define ACPHY_HWOBSS_MITIGATION 64 /* bit 6 */
-#define ACPHY_ACI_MAX_MODE 127
+#define ACPHY_ACI_NBDETECT 128 /* bit 7 */
+#define ACPHY_ACI_MAX_MODE 255
/* AP environment */
#define AP_ENV_DETECT_NOT_USED 0 /* We aren't using AP environment detection */
@@ -1353,6 +1375,7 @@
*/
#define WL_ASSOC_AP_VAL 0x00000001
#define WL_FILS_VAL 0x00000002
+#define WL_HE_VAL 0x00000004
/* max # of leds supported by GPIO (gpio pin# == led index#) */
#define WL_LED_NUMGPIO 32 /* gpio 0-31 */
@@ -1429,22 +1452,23 @@
#define SPECT_MNGMT_LOOSE_11H_D 4 /* operation defined above */
/* bit position in per_chan_info; these depend on current country/regulatory domain */
-#define WL_CHAN_VALID_HW (1 << 0) /* valid with current HW */
-#define WL_CHAN_VALID_SW (1 << 1) /* valid with current country setting */
-#define WL_CHAN_BAND_5G (1 << 2) /* 5GHz-band channel */
-#define WL_CHAN_RADAR (1 << 3) /* radar sensitive channel */
-#define WL_CHAN_INACTIVE (1 << 4) /* temporarily inactive due to radar */
-#define WL_CHAN_PASSIVE (1 << 5) /* channel is in passive mode */
-#define WL_CHAN_RESTRICTED (1 << 6) /* restricted use channel */
-#define WL_CHAN_RADAR_EU_WEATHER (1 << 7) /* EU Radar weather channel. Implies an
- * EU Radar channel.
- */
-#define WL_CHAN_CLM_RESTRICTED (1 << 8) /* channel restricted in CLM
- * (i.e. by default)
- */
-
-/* following definition is for precommit; will be removed once wl, acsd switch to the new def */
-#define WL_CHAN_WEATHER_RADAR WL_CHAN_RADAR_EU_WEATHER
+#define WL_CHAN_VALID_HW (1u << 0) /* valid with current HW */
+#define WL_CHAN_VALID_SW (1u << 1) /* valid with current country setting */
+#define WL_CHAN_BAND_5G (1u << 2) /* 5GHz-band channel */
+#define WL_CHAN_RADAR (1u << 3) /* radar sensitive channel */
+#define WL_CHAN_INACTIVE (1u << 4) /* temporarily inactive due to radar */
+#define WL_CHAN_PASSIVE (1u << 5) /* channel is in passive mode */
+#define WL_CHAN_RESTRICTED (1u << 6) /* restricted use channel */
+#define WL_CHAN_RADAR_EU_WEATHER (1u << 7) /* EU Radar weather channel.
+ * Implies an EU Radar channel.
+ */
+#define WL_CHAN_CLM_RESTRICTED (1u << 8) /* channel restricted in CLM (i.e. by default) */
+#define WL_CHAN_BAND_6G (1u << 9) /* 6GHz-band channel */
+#define WL_CHAN_OOS_SHIFT 24u /* shift for OOS field */
+#define WL_CHAN_OOS_MASK 0xFF000000u /* field specifying minutes remaining for this
+ * channel's out-of-service period due to radar
+ * detection
+ */
/* BTC mode used by "btc_mode" iovar */
#define WL_BTC_DISABLE 0 /* disable BT coexistence */
@@ -1549,6 +1573,7 @@
#define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */
#define WL_PKTENG_SYNCHRONOUS_UNBLK 0x200 /* synchronous unblock flag */
+#define WL_PKTENG_COLLECT 0x400 /* Save last Rx'ed packet */
#ifdef PKTENG_LONGPKTSZ
/* max pktsz limit for pkteng */
#define WL_PKTENG_MAXPKTSZ PKTENG_LONGPKTSZ
@@ -1882,7 +1907,8 @@
#define WL_WNM_FMS 0x00000080
#define WL_WNM_NOTIF 0x00000100
#define WL_WNM_WBTEXT 0x00000200
-#define WL_WNM_MAX 0x00000400
+#define WL_WNM_ESTM 0x00000400
+#define WL_WNM_MAX 0x00000800
#ifdef WLWNM_BRCM
#define BRCM_WNM_FEATURE_SET\
(WL_WNM_PROXYARP | \
@@ -2003,6 +2029,8 @@
/* report found/lost events for SSID and BSSID networks seperately */
#define REPORT_SEPERATELY_BIT 11
#define BESTN_BSSID_ONLY_BIT 12
+/* Debug Flags */
+#define FORCE_SSID_SWITCH 13
#define SORT_CRITERIA_MASK 0x0001
#define AUTO_NET_SWITCH_MASK 0x0002
@@ -2019,6 +2047,8 @@
#define REPORT_SEPERATELY_MASK 0x0800
#define BESTN_BSSID_ONLY_MASK 0x1000
+#define FORCE_SSID_SWITCH_MASK 0x2000
+
#define PFN_VERSION 2
#ifdef PFN_SCANRESULT_2
#define PFN_SCANRESULT_VERSION 2
@@ -2063,9 +2093,14 @@
#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */
#define PNO_SCAN_MIN_FW_SEC 10 /* min time scan time in SEC */
#define WL_PFN_HIDDEN_MASK 0x4
-#define MAX_SSID_WHITELIST_NUM 4
-#define MAX_BSSID_PREF_LIST_NUM 32
-#define MAX_BSSID_BLACKLIST_NUM 32
+#define MAX_BSSID_PREF_LIST_NUM 32
+#ifndef CONFIG_SOC_S5E5515
+#define MAX_SSID_WHITELIST_NUM 4
+#define MAX_BSSID_BLACKLIST_NUM 32
+#else
+#define MAX_SSID_WHITELIST_NUM 0
+#define MAX_BSSID_BLACKLIST_NUM 0
+#endif /* CONFIG_SOC_S5E5515 */
#ifndef BESTN_MAX
#define BESTN_MAX 10
@@ -2081,10 +2116,11 @@
#define TOE_ERRTEST_RX_CSUM2 0x00000004
/* ARP Offload feature flags for arp_ol iovar */
-#define ARP_OL_AGENT 0x00000001
-#define ARP_OL_SNOOP 0x00000002
-#define ARP_OL_HOST_AUTO_REPLY 0x00000004
-#define ARP_OL_PEER_AUTO_REPLY 0x00000008
+#define ARP_OL_AGENT 0x00000001
+#define ARP_OL_SNOOP 0x00000002
+#define ARP_OL_HOST_AUTO_REPLY 0x00000004
+#define ARP_OL_PEER_AUTO_REPLY 0x00000008
+#define ARP_OL_UPDATE_HOST_CACHE 0x00000010
/* ARP Offload error injection */
#define ARP_ERRTEST_REPLY_PEER 0x1
@@ -2136,7 +2172,7 @@
/* RFAWARE def */
#define BCM_ACTION_RFAWARE 0x77
-#define BCM_ACTION_RFAWARE_DCS 0x01
+#define BCM_ACTION_RFAWARE_DCS 0x20
/* DCS reason code define */
#define BCM_DCS_IOVAR 0x1
@@ -2260,6 +2296,7 @@
#define PM_MAX 1
#define PM_FAST 2
#define PM_FORCE_OFF 3 /* use this bit to force PM off even bt is active */
+#define PM_TWT 4
#define WL_WME_CNT_VERSION 1 /* current version of wl_wme_cnt_t */
@@ -2315,4 +2352,7 @@
#define ETD_DATA_JOIN_INFO 0
#define ETD_DATA_VERSION_V1 1
+/* The macro for data field in empty vendor specific action frame */
+#define VS_EMPTY_ACTION 0xac
+
#endif /* wlioctl_defs_h */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlioctl_utils.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlioctl_utils.h
index 186c833..f8d5155 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlioctl_utils.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wlioctl_utils.h
@@ -1,7 +1,9 @@
/*
* Custom OID/ioctl related helper functions.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wpa.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wpa.h
index 56d83a1..fb7253d 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wpa.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wpa.h
@@ -1,7 +1,9 @@
/*
* Fundamental types and constants relating to WPA
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -135,13 +137,15 @@
#define WPA_CIPHER_AES_GCM 8 /* AES (GCM) */
#define WPA_CIPHER_AES_GCM256 9 /* AES (GCM256) */
+#define WPA_CIPHER_CCMP_256 10 /* CCMP-256 */
+#define WPA_CIPHER_BIP_GMAC_128 11 /* BIP_GMAC_128 */
+#define WPA_CIPHER_BIP_GMAC_256 12 /* BIP_GMAC_256 */
+#define WPA_CIPHER_BIP_CMAC_256 13 /* BIP_CMAC_256 */
-#ifdef BCMWAPI_WAI
#define WAPI_CIPHER_NONE WPA_CIPHER_NONE
#define WAPI_CIPHER_SMS4 11
#define WAPI_CSE_WPI_SMS4 1
-#endif /* BCMWAPI_WAI */
#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \
(cipher) == WPA_CIPHER_WEP_40 || \
@@ -153,7 +157,6 @@
(cipher) == WPA_CIPHER_AES_GCM256 || \
(cipher) == WPA_CIPHER_TPK)
-#ifdef BCMWAPI_WAI
#define IS_WAPI_CIPHER(cipher) ((cipher) == WAPI_CIPHER_NONE || \
(cipher) == WAPI_CSE_WPI_SMS4)
@@ -163,8 +166,29 @@
#define WAPI_CIPHER_2_CSE_WPI(cipher) ((cipher) == WAPI_CIPHER_SMS4 ? \
WAPI_CSE_WPI_SMS4 : WAPI_CIPHER_NONE)
-#endif /* BCMWAPI_WAI */
+#define IS_VALID_AKM(akm) ((akm) == RSN_AKM_NONE || \
+ (akm) == RSN_AKM_UNSPECIFIED || \
+ (akm) == RSN_AKM_PSK || \
+ (akm) == RSN_AKM_FBT_1X || \
+ (akm) == RSN_AKM_FBT_PSK || \
+ (akm) == RSN_AKM_MFP_1X || \
+ (akm) == RSN_AKM_MFP_PSK || \
+ (akm) == RSN_AKM_SHA256_1X || \
+ (akm) == RSN_AKM_SHA256_PSK || \
+ (akm) == RSN_AKM_TPK || \
+ (akm) == RSN_AKM_SAE_PSK || \
+ (akm) == RSN_AKM_SAE_FBT || \
+ (akm) == RSN_AKM_FILS_SHA256 || \
+ (akm) == RSN_AKM_FILS_SHA384 || \
+ (akm) == RSN_AKM_OWE || \
+ (akm) == RSN_AKM_SUITEB_SHA256_1X || \
+ (akm) == RSN_AKM_SUITEB_SHA384_1X)
+
+#define IS_VALID_BIP_CIPHER(cipher) ((cipher) == WPA_CIPHER_BIP || \
+ (cipher) == WPA_CIPHER_BIP_GMAC_128 || \
+ (cipher) == WPA_CIPHER_BIP_GMAC_256 || \
+ (cipher) == WPA_CIPHER_BIP_CMAC_256)
/* WPA TKIP countermeasures parameters */
#define WPA_TKIP_CM_DETECT 60 /* multiple MIC failure window (seconds) */
#define WPA_TKIP_CM_BLOCK 60 /* countermeasures active window (seconds) */
@@ -205,7 +229,45 @@
#define WPA2_PMKID_COUNT_LEN 2
-#ifdef BCMWAPI_WAI
+/* RSN dev type in rsn_info struct */
+typedef enum {
+ DEV_NONE = 0,
+ DEV_STA = 1,
+ DEV_AP = 2
+} device_type_t;
+
+typedef uint32 rsn_akm_mask_t; /* RSN_AKM_... see 802.11.h */
+typedef uint8 rsn_cipher_t; /* WPA_CIPHER_xxx */
+typedef uint32 rsn_ciphers_t; /* mask of rsn_cipher_t */
+typedef uint8 rsn_akm_t;
+typedef uint8 auth_ie_type_mask_t;
+
+typedef struct rsn_ie_info {
+ uint8 version;
+ rsn_cipher_t g_cipher;
+ uint8 p_count;
+ uint8 akm_count;
+ uint8 pmkid_count;
+ rsn_akm_t sta_akm; /* single STA akm */
+ uint16 caps;
+ rsn_ciphers_t p_ciphers;
+ rsn_akm_mask_t akms;
+ uint8 pmkids_offset; /* offset into the IE */
+ rsn_cipher_t g_mgmt_cipher;
+ device_type_t dev_type; /* AP or STA */
+ rsn_cipher_t sta_cipher; /* single STA cipher */
+ uint16 key_desc; /* key descriptor version as STA */
+ int parse_status;
+ uint16 mic_len; /* unused. keep for ROM compatibility. */
+ auth_ie_type_mask_t auth_ie_type; /* bit field of WPA, WPA2 and (not yet) CCX WAPI */
+ uint8 pmk_len; /* EAPOL PMK */
+ uint8 kck_mic_len; /* EAPOL MIC (by KCK) */
+ uint8 kck_len; /* EAPOL KCK */
+ uint8 kek_len; /* EAPOL KEK */
+ uint8 tk_len; /* EAPOL TK */
+ uint8 ptk_len; /* EAPOL PTK */
+} rsn_ie_info_t;
+
#define WAPI_CAP_PREAUTH RSN_CAP_PREAUTH
/* Other WAI definition */
@@ -217,7 +279,6 @@
#define WAPI_USK_REKEY_COUNT 0x4000000 /* 0xA00000 */
#define WAPI_MSK_REKEY_COUNT 0x4000000 /* 0xA00000 */
-#endif /* BCMWAPI_WAI */
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wps.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wps.h
index 3c4cbf4..d5f2e4c 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wps.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/include/wps.h
@@ -1,7 +1,9 @@
/*
* WPS IE definitions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/linux_osl.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/linux_osl.c
index 558dc5a..b66b0ea 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/linux_osl.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/linux_osl.c
@@ -1,7 +1,9 @@
/*
* Linux OS Independent Layer
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -101,7 +103,11 @@
/* PCMCIA attribute space access macros */
+#ifdef CUSTOMER_HW4_DEBUG
+uint32 g_assert_type = 1; /* By Default not cause Kernel Panic */
+#else
uint32 g_assert_type = 0; /* By Default Kernel Panic */
+#endif /* CUSTOMER_HW4_DEBUG */
module_param(g_assert_type, int, 0);
#ifdef BCM_SECURE_DMA
@@ -119,9 +125,13 @@
#endif /* BCM_SECURE_DMA */
#ifdef USE_DMA_LOCK
-#define DMA_LOCK(osh) spin_lock_bh(&(osh)->dma_lock)
-#define DMA_UNLOCK(osh) spin_unlock_bh(&(osh)->dma_lock)
-#define DMA_LOCK_INIT(osh) spin_lock_init(&(osh)->dma_lock)
+static void osl_dma_lock(osl_t *osh);
+static void osl_dma_unlock(osl_t *osh);
+static void osl_dma_lock_init(osl_t *osh);
+
+#define DMA_LOCK(osh) osl_dma_lock(osh)
+#define DMA_UNLOCK(osh) osl_dma_unlock(osh)
+#define DMA_LOCK_INIT(osh) osl_dma_lock_init(osh);
#else
#define DMA_LOCK(osh) do { /* noop */ } while(0)
#define DMA_UNLOCK(osh) do { /* noop */ } while(0)
@@ -197,12 +207,13 @@
-EINVAL, /* BCME_DACBUF_RCCAL_FAIL */
-EINVAL, /* BCME_VCOCAL_FAIL */
-EINVAL, /* BCME_BANDLOCKED */
+ -EINVAL, /* BCME_DNGL_DEVRESET */
/* When an new error code is added to bcmutils.h, add os
* specific error translation here as well
*/
/* check if BCME_LAST changed since the last time this function was updated */
-#if BCME_LAST != -67
+#if BCME_LAST != -68
#error "You need to add a OS error translation in the linuxbcmerrormap \
for new error code defined in bcmutils.h"
#endif // endif
@@ -212,27 +223,93 @@
#ifdef DHD_MAP_LOGGING
#define DHD_MAP_LOG_SIZE 2048
+typedef struct dhd_map_item {
+ dmaaddr_t pa; /* DMA address (physical) */
+ uint64 ts_nsec; /* timestamp: nsec */
+ uint32 size; /* mapping size */
+ uint8 rsvd[4]; /* reserved for future use */
+} dhd_map_item_t;
+
typedef struct dhd_map_record {
- dma_addr_t addr;
- uint64 time;
+ uint32 items; /* number of total items */
+ uint32 idx; /* current index of metadata */
+ dhd_map_item_t map[0]; /* metadata storage */
} dhd_map_log_t;
-dhd_map_log_t *dhd_map_log = NULL, *dhd_unmap_log = NULL;
-uint32 map_idx = 0, unmap_idx = 0;
-
void
-osl_dma_map_dump(void)
+osl_dma_map_dump(osl_t *osh)
{
- printk("%s: map_idx=%d unmap_idx=%d current time=%llu\n",
- __FUNCTION__, map_idx, unmap_idx, OSL_SYSUPTIME_US());
- if (dhd_map_log && dhd_unmap_log) {
- printk("%s: dhd_map_log(pa)=%llx size=%d, dma_unmap_log(pa)=%llx size=%d\n",
- __FUNCTION__, (uint64)__virt_to_phys((ulong)dhd_map_log),
- (uint32)(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE),
- (uint64)__virt_to_phys((ulong)dhd_unmap_log),
- (uint32)(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE));
+ dhd_map_log_t *map_log, *unmap_log;
+ uint64 ts_sec, ts_usec;
+
+ map_log = (dhd_map_log_t *)(osh->dhd_map_log);
+ unmap_log = (dhd_map_log_t *)(osh->dhd_unmap_log);
+ osl_get_localtime(&ts_sec, &ts_usec);
+
+ if (map_log && unmap_log) {
+ printk("%s: map_idx=%d unmap_idx=%d "
+ "current time=[%5lu.%06lu]\n", __FUNCTION__,
+ map_log->idx, unmap_log->idx, (unsigned long)ts_sec,
+ (unsigned long)ts_usec);
+ printk("%s: dhd_map_log(pa)=0x%llx size=%d,"
+ " dma_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
+ (uint64)__virt_to_phys((ulong)(map_log->map)),
+ (uint32)(sizeof(dhd_map_item_t) * map_log->items),
+ (uint64)__virt_to_phys((ulong)(unmap_log->map)),
+ (uint32)(sizeof(dhd_map_item_t) * unmap_log->items));
}
}
+
+static void *
+osl_dma_map_log_init(uint32 item_len)
+{
+ dhd_map_log_t *map_log;
+ gfp_t flags;
+ uint32 alloc_size = (uint32)(sizeof(dhd_map_log_t) +
+ (item_len * sizeof(dhd_map_item_t)));
+
+ flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
+ map_log = (dhd_map_log_t *)kmalloc(alloc_size, flags);
+ if (map_log) {
+ memset(map_log, 0, alloc_size);
+ map_log->items = item_len;
+ map_log->idx = 0;
+ }
+
+ return (void *)map_log;
+}
+
+static void
+osl_dma_map_log_deinit(osl_t *osh)
+{
+ if (osh->dhd_map_log) {
+ kfree(osh->dhd_map_log);
+ osh->dhd_map_log = NULL;
+ }
+
+ if (osh->dhd_unmap_log) {
+ kfree(osh->dhd_unmap_log);
+ osh->dhd_unmap_log = NULL;
+ }
+}
+
+static void
+osl_dma_map_logging(osl_t *osh, void *handle, dmaaddr_t pa, uint32 len)
+{
+ dhd_map_log_t *log = (dhd_map_log_t *)handle;
+ uint32 idx;
+
+ if (log == NULL) {
+ printk("%s: log is NULL\n", __FUNCTION__);
+ return;
+ }
+
+ idx = log->idx;
+ log->map[idx].ts_nsec = osl_localtime_ns();
+ log->map[idx].pa = pa;
+ log->map[idx].size = len;
+ log->idx = (idx + 1) % log->items;
+}
#endif /* DHD_MAP_LOGGING */
/* translate bcmerrors into linux errors */
@@ -403,13 +480,14 @@
DMA_LOCK_INIT(osh);
#ifdef DHD_MAP_LOGGING
- dhd_map_log = kmalloc(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE, flags);
- if (dhd_map_log) {
- memset(dhd_map_log, 0, sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE);
+ osh->dhd_map_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
+ if (osh->dhd_map_log == NULL) {
+ printk("%s: Failed to alloc dhd_map_log\n", __FUNCTION__);
}
- dhd_unmap_log = kmalloc(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE, flags);
- if (dhd_unmap_log) {
- memset(dhd_unmap_log, 0, sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE);
+
+ osh->dhd_unmap_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
+ if (osh->dhd_unmap_log == NULL) {
+ printk("%s: Failed to alloc dhd_unmap_log\n", __FUNCTION__);
}
#endif /* DHD_MAP_LOGGING */
@@ -454,12 +532,8 @@
bcm_object_trace_deinit();
#ifdef DHD_MAP_LOGGING
- if (dhd_map_log) {
- kfree(dhd_map_log);
- }
- if (dhd_unmap_log) {
- kfree(dhd_unmap_log);
- }
+ osl_dma_map_log_deinit(osh->dhd_map_log);
+ osl_dma_map_log_deinit(osh->dhd_unmap_log);
#endif /* DHD_MAP_LOGGING */
ASSERT(osh->magic == OS_HANDLE_MAGIC);
@@ -588,7 +662,7 @@
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
-#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+#if defined(__ARM_ARCH_7A__)
return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
#else
return ((struct pci_dev *)osh->pdev)->bus->number;
@@ -601,7 +675,7 @@
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
-#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+#if defined(__ARM_ARCH_7A__)
return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
#else
return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
@@ -908,14 +982,12 @@
return (void *)(uintptr)virt_to_phys(va);
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
#include <asm/cacheflush.h>
void BCMFASTPATH
osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
{
return;
}
-#endif /* LINUX_VERSION_CODE >= 2.6.36 */
dmaaddr_t BCMFASTPATH
osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
@@ -952,22 +1024,8 @@
map_addr = pci_map_single(osh->pdev, va, size, dir);
#endif /* ! STB_SOC_WIFI */
-#ifdef DHD_MAP_LOGGING
- if (dhd_map_log) {
- dhd_map_log[map_idx].addr = map_addr;
- dhd_map_log[map_idx].time = OSL_SYSUPTIME_US();
- map_idx++;
- map_idx = map_idx % DHD_MAP_LOG_SIZE;
- }
-#endif /* DHD_MAP_LOGGING */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
ret = pci_dma_mapping_error(osh->pdev, map_addr);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
- ret = pci_dma_mapping_error(map_addr);
-#else
- ret = 0;
-#endif // endif
+
if (ret) {
printk("%s: Failed to map memory\n", __FUNCTION__);
PHYSADDRLOSET(ret_addr, 0);
@@ -977,6 +1035,10 @@
PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
}
+#ifdef DHD_MAP_LOGGING
+ osl_dma_map_logging(osh, osh->dhd_map_log, ret_addr, size);
+#endif /* DHD_MAP_LOGGING */
+
DMA_UNLOCK(osh);
return ret_addr;
@@ -996,17 +1058,12 @@
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
-#ifdef BCMDMA64OSL
- PHYSADDRTOULONG(pa, paddr);
#ifdef DHD_MAP_LOGGING
- if (dhd_unmap_log) {
- dhd_unmap_log[unmap_idx].addr = paddr;
- dhd_unmap_log[unmap_idx].time = OSL_SYSUPTIME_US();
- unmap_idx++;
- unmap_idx = unmap_idx % DHD_MAP_LOG_SIZE;
- }
+ osl_dma_map_logging(osh, osh->dhd_unmap_log, pa, size);
#endif /* DHD_MAP_LOGGING */
+#ifdef BCMDMA64OSL
+ PHYSADDRTOULONG(pa, paddr);
pci_unmap_single(osh->pdev, paddr, size, dir);
#else /* BCMDMA64OSL */
@@ -1024,19 +1081,11 @@
dma_unmap_single(osh->pdev, (uintptr)pa, size, dir);
#endif /* (__LINUX_ARM_ARCH__ == 8) */
#else /* STB_SOC_WIFI */
-#ifdef DHD_MAP_LOGGING
- if (dhd_unmap_log) {
- dhd_unmap_log[unmap_idx].addr = pa;
- dhd_unmap_log[unmap_idx].time = OSL_SYSUPTIME_US();
- unmap_idx++;
- unmap_idx = unmap_idx % DHD_MAP_LOG_SIZE;
- }
-#endif /* DHD_MAP_LOGGING */
-
pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
#endif /* STB_SOC_WIFI */
#endif /* BCMDMA64OSL */
+
DMA_UNLOCK(osh);
}
@@ -1122,12 +1171,10 @@
void
osl_sleep(uint ms)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
if (ms < 20)
usleep_range(ms*1000, ms*1000 + 1000);
else
-#endif // endif
- msleep(ms);
+ msleep(ms);
}
uint64
@@ -1142,6 +1189,43 @@
return usec;
}
+uint64
+osl_localtime_ns(void)
+{
+ uint64 ts_nsec = 0;
+
+ ts_nsec = local_clock();
+
+ return ts_nsec;
+}
+
+void
+osl_get_localtime(uint64 *sec, uint64 *usec)
+{
+ uint64 ts_nsec = 0;
+ unsigned long rem_nsec = 0;
+
+ ts_nsec = local_clock();
+ rem_nsec = do_div(ts_nsec, NSEC_PER_SEC);
+ *sec = (uint64)ts_nsec;
+ *usec = (uint64)(rem_nsec / MSEC_PER_SEC);
+}
+
+uint64
+osl_systztime_us(void)
+{
+ struct timeval tv;
+ uint64 tzusec;
+
+ do_gettimeofday(&tv);
+ /* apply timezone */
+ tzusec = (uint64)((tv.tv_sec - (sys_tz.tz_minuteswest * 60)) *
+ USEC_PER_SEC);
+ tzusec += tv.tv_usec;
+
+ return tzusec;
+}
+
/*
* OSLREGOPS specifies the use of osl_XXX routines to be used for register access
*/
@@ -1188,11 +1272,12 @@
if (!image)
return 0;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
rdlen = kernel_read(fp, buf, len, &fp->f_pos);
#else
rdlen = kernel_read(fp, fp->f_pos, buf, len);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
+
if (rdlen > 0)
fp->f_pos += rdlen;
@@ -1810,3 +1895,78 @@
}
return (TRUE);
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
+int
+kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count)
+{
+ return (int)kernel_read(file, addr, (size_t)count, &offset);
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
+
+void *
+osl_spin_lock_init(osl_t *osh)
+{
+ /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
+ /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
+ /* and this results in kernel asserts in internal builds */
+ spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
+ if (lock)
+ spin_lock_init(lock);
+ return ((void *)lock);
+}
+
+void
+osl_spin_lock_deinit(osl_t *osh, void *lock)
+{
+ if (lock)
+ MFREE(osh, lock, sizeof(spinlock_t) + 4);
+}
+
+unsigned long
+osl_spin_lock(void *lock)
+{
+ unsigned long flags = 0;
+
+ if (lock)
+ spin_lock_irqsave((spinlock_t *)lock, flags);
+
+ return flags;
+}
+
+void
+osl_spin_unlock(void *lock, unsigned long flags)
+{
+ if (lock)
+ spin_unlock_irqrestore((spinlock_t *)lock, flags);
+}
+
+#ifdef USE_DMA_LOCK
+static void
+osl_dma_lock(osl_t *osh)
+{
+ if (likely(in_irq() || irqs_disabled())) {
+ spin_lock(&osh->dma_lock);
+ } else {
+ spin_lock_bh(&osh->dma_lock);
+ osh->dma_lock_bh = TRUE;
+ }
+}
+
+static void
+osl_dma_unlock(osl_t *osh)
+{
+ if (unlikely(osh->dma_lock_bh)) {
+ osh->dma_lock_bh = FALSE;
+ spin_unlock_bh(&osh->dma_lock);
+ } else {
+ spin_unlock(&osh->dma_lock);
+ }
+}
+
+static void
+osl_dma_lock_init(osl_t *osh)
+{
+ spin_lock_init(&osh->dma_lock);
+ osh->dma_lock_bh = FALSE;
+}
+#endif /* USE_DMA_LOCK */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/linux_osl_priv.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/linux_osl_priv.h
index 3ed32f4..ca71d66 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/linux_osl_priv.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/linux_osl_priv.h
@@ -1,7 +1,9 @@
/*
* Private header file for Linux OS Independent Layer
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -167,7 +169,12 @@
#endif /* BCM_BACKPLANE_TIMEOUT */
#ifdef USE_DMA_LOCK
spinlock_t dma_lock;
+ bool dma_lock_bh;
#endif /* USE_DMA_LOCK */
+#ifdef DHD_MAP_LOGGING
+ void *dhd_map_log;
+ void *dhd_unmap_log;
+#endif /* DHD_MAP_LOGGING */
};
#endif /* _LINUX_OSL_PRIV_H_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/linux_pkt.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/linux_pkt.c
index 688ca59..b42dac9 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/linux_pkt.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/linux_pkt.c
@@ -1,7 +1,9 @@
/*
* Linux Packet (skb) interface
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -57,21 +59,17 @@
#define OSL_PKTTAG_CLEAR(p) \
do { \
struct sk_buff *s = (struct sk_buff *)(p); \
- ASSERT(OSL_PKTTAG_SZ == 32); \
- *(uint32 *)(&s->cb[4]) = 0; \
- *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
- *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
- *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+ uint tagsz = sizeof(s->cb); \
+ ASSERT(OSL_PKTTAG_SZ <= tagsz); \
+ memset(s->cb + 4, 0, tagsz - 4); \
} while (0)
#else
#define OSL_PKTTAG_CLEAR(p) \
do { \
struct sk_buff *s = (struct sk_buff *)(p); \
- ASSERT(OSL_PKTTAG_SZ == 32); \
- *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
- *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
- *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
- *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+ uint tagsz = sizeof(s->cb); \
+ ASSERT(OSL_PKTTAG_SZ <= tagsz); \
+ memset(s->cb, 0, tagsz); \
} while (0)
#endif /* BCM_OBJECT_TRACE */
@@ -86,7 +84,7 @@
ASSERT(osh->magic == OS_HANDLE_MAGIC);
return -ENOMEM;
} else {
- printk("alloc static buf at %p!\n", bcm_static_buf);
+ printk("succeed to alloc static buf\n");
}
spin_lock_init(&bcm_static_buf->static_lock);
@@ -619,6 +617,7 @@
*/
fraction = skb->truesize * (TSQ_MULTIPLIER - 1) / TSQ_MULTIPLIER;
skb->truesize -= fraction;
+
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
atomic_sub(fraction, &skb->sk->sk_wmem_alloc.refs);
#else
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/otpdefs.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/otpdefs.h
new file mode 100644
index 0000000..dff03f9
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/otpdefs.h
@@ -0,0 +1,144 @@
+/*
+ * otpdefs.h SROM/OTP definitions.
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright 2021 Broadcom
+ *
+ * This program is the proprietary software of Broadcom and/or
+ * its licensors, and may only be used, duplicated, modified or distributed
+ * pursuant to the terms and conditions of a separate, written license
+ * agreement executed between you and Broadcom (an "Authorized License").
+ * Except as set forth in an Authorized License, Broadcom grants no license
+ * (express or implied), right to use, or waiver of any kind with respect to
+ * the Software, and Broadcom expressly reserves all rights in and to the
+ * Software and all intellectual property rights therein. IF YOU HAVE NO
+ * AUTHORIZED LICENSE, THEN YOU HAVE NO RIGHT TO USE THIS SOFTWARE IN ANY
+ * WAY, AND SHOULD IMMEDIATELY NOTIFY BROADCOM AND DISCONTINUE ALL USE OF
+ * THE SOFTWARE.
+ *
+ * Except as expressly set forth in the Authorized License,
+ *
+ * 1. This program, including its structure, sequence and organization,
+ * constitutes the valuable trade secrets of Broadcom, and you shall use
+ * all reasonable efforts to protect the confidentiality thereof, and to
+ * use this information only in connection with your use of Broadcom
+ * integrated circuit products.
+ *
+ * 2. TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED
+ * "AS IS" AND WITH ALL FAULTS AND BROADCOM MAKES NO PROMISES,
+ * REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR
+ * OTHERWISE, WITH RESPECT TO THE SOFTWARE. BROADCOM SPECIFICALLY
+ * DISCLAIMS ANY AND ALL IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY,
+ * NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES,
+ * ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. YOU ASSUME THE ENTIRE RISK ARISING
+ * OUT OF USE OR PERFORMANCE OF THE SOFTWARE.
+ *
+ * 3. TO THE MAXIMUM EXTENT PERMITTED BY LAW, IN NO EVENT SHALL
+ * BROADCOM OR ITS LICENSORS BE LIABLE FOR (i) CONSEQUENTIAL, INCIDENTAL,
+ * SPECIAL, INDIRECT, OR EXEMPLARY DAMAGES WHATSOEVER ARISING OUT OF OR
+ * IN ANY WAY RELATING TO YOUR USE OF OR INABILITY TO USE THE SOFTWARE EVEN
+ * IF BROADCOM HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES; OR (ii)
+ * ANY AMOUNT IN EXCESS OF THE AMOUNT ACTUALLY PAID FOR THE SOFTWARE ITSELF
+ * OR U.S. $1, WHICHEVER IS GREATER. THESE LIMITATIONS SHALL APPLY
+ * NOTWITHSTANDING ANY FAILURE OF ESSENTIAL PURPOSE OF ANY LIMITED REMEDY.
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _OTPDEFS_H_
+#define _OTPDEFS_H_
+
+/* SFLASH */
+#define SFLASH_ADDRESS_OFFSET_4368 0x1C000000u
+#define SFLASH_SKU_OFFSET_4368 0xEu
+#define SFLASH_MACADDR_OFFSET_4368 0x4u
+/*
+ * In sflash based chips, first word in sflash says the length.
+ * So only default value is defined here. Actual length is read
+ * from sflash in dhdpcie_srom_sflash_health_chk
+ * 0x0521 * 2 .x2 since length says number of words.
+ */
+#define SFLASH_LEN_4368 0xA42u
+
+#define SROM_ADDRESS_OFFSET_4355 0x0800u
+#define SROM_ADDRESS_OFFSET_4364 0xA000u
+#define SROM_ADDRESS_OFFSET_4377 0x0800u
+#define SROM_ADDRESS(sih, offset) (SI_ENUM_BASE(sih) + (offset))
+#define SROM_MACADDR_OFFSET_4355 0x84u
+#define SROM_MACADDR_OFFSET_4364 0x82u
+#define SROM_MACADDR_OFFSET_4377 0xE2u
+#define SROM_SKU_OFFSET_4355 0x8Au
+#define SROM_SKU_OFFSET_4364 0x8Cu
+#define SROM_SKU_OFFSET_4377 0xECu
+#define SROM_CAL_SIG1_OFFSET_4355 0xB8u
+#define SROM_CAL_SIG2_OFFSET_4355 0xBAu
+#define SROM_CAL_SIG1_OFFSET_4364 0xA0u
+#define SROM_CAL_SIG2_OFFSET_4364 0xA2u
+#define SROM_CAL_SIG1 0x4c42u
+#define SROM_CAL_SIG2 0x424fu
+#define SROM_LEN_4355 512u
+#define SROM_LEN_4364 2048u
+#define SROM_LEN_4377 2048u
+
+#define OTP_USER_AREA_OFFSET_4355 0xC0u
+#define OTP_USER_AREA_OFFSET_4364 0xC0u
+#define OTP_USER_AREA_OFFSET_4368 0x120u
+#define OTP_USER_AREA_OFFSET_4377 0x120u
+#define OTP_OFFSET_4368 0x5000u
+#define OTP_OFFSET_4377 0x11000u
+#define OTP_CTRL1_VAL 0xFA0000
+#define OTP_ADDRESS(sih, offset) (SI_ENUM_BASE(sih) + (offset))
+#define OTP_VERSION_TUPLE_ID 0x15
+#define OTP_VENDOR_TUPLE_ID 0x80
+#define OTP_CIS_REGION_END_TUPLE_ID 0XFF
+
+#define PCIE_CTRL_REG_ADDR(sih) (SI_ENUM_BASE(sih) + 0x3000)
+#define SPROM_CTRL_REG_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SROM_CTRL)
+#define SPROM_CTRL_OPCODE_READ_MASK 0x9FFFFFFF
+#define SPROM_CTRL_START_BUSY_MASK 0x80000000
+#define SPROM_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SROM_ADDRESS)
+#define SPROM_DATA(sih) (SI_ENUM_BASE(sih) + CC_SROM_DATA)
+#define OTP_CTRL1_REG_ADDR(sih) (SI_ENUM_BASE(sih) + 0xF4)
+#define PMU_MINRESMASK_REG_ADDR(sih) (SI_ENUM_BASE(sih) + MINRESMASKREG)
+#define CHIP_COMMON_STATUS_REG_ADDR(sih) (SI_ENUM_BASE(sih) + CC_CHIPST)
+#define CHIP_COMMON_CLKDIV2_ADDR(sih) (SI_ENUM_BASE(sih) + CC_CLKDIV2)
+
+#define CC_CLKDIV2_SPROMDIV_MASK 0x7u
+#define CC_CLKDIV2_SPROMDIV_VAL 0X4u
+#define CC_CHIPSTATUS_STRAP_BTUART_MASK 0x40u
+#define PMU_OTP_PWR_ON_MASK 0xC47
+#define PMU_PWRUP_DELAY 500 /* in us */
+#define DONGLE_TREFUP_PROGRAM_DELAY 5000 /* 5ms in us */
+#define SPROM_BUSY_POLL_DELAY 5 /* 5us */
+
+typedef enum {
+ BCM4355_IDX = 0,
+ BCM4364_IDX,
+ BCM4368_IDX,
+ BCM4377_IDX,
+ BCMMAX_IDX
+} chip_idx_t;
+
+typedef enum {
+ BCM4368_BTOP_IDX,
+ BCM4377_BTOP_IDX,
+ BCMMAX_BTOP_IDX
+} chip_idx_btop_t;
+
+typedef enum {
+ BCM4368_SFLASH_IDX,
+ BCMMAX_SFLASH_IDX
+} chip_idx_sflash_t;
+
+extern uint32 otp_addr_offsets[];
+extern uint32 otp_usrarea_offsets[];
+extern uint32 sku_offsets[];
+extern uint32 srf_addr_offsets[];
+extern uint32 supported_chips[];
+
+char *dhd_get_plat_sku(void);
+#endif /* _OTPDEFS_H */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/pcie_core.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/pcie_core.c
index d57246b..6fb018a 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/pcie_core.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/pcie_core.c
@@ -3,7 +3,9 @@
* Contains PCIe related functions that are shared between different driver models (e.g. firmware
* builds, DHD builds, BMAC builds), in order to avoid code duplication.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -56,8 +58,6 @@
/* wd_mask/wd_val is only for chipc_corerev >= 65 */
void pcie_watchdog_reset(osl_t *osh, si_t *sih, uint32 wd_mask, uint32 wd_val)
{
-/* To avoid hang on FPGA, donot reset watchdog */
-#ifndef BCMFPGA_HW
uint32 val, i, lsc;
uint16 cfg_offset[] = {PCIECFGREG_STATUS_CMD, PCIECFGREG_PM_CSR,
PCIECFGREG_MSI_CAP, PCIECFGREG_MSI_ADDR_L,
@@ -69,6 +69,21 @@
uint32 origidx = si_coreidx(sih);
int32 bcmerror = BCME_ERROR;
+#ifdef BCMQT
+ /* To avoid hang on FPGA, donot reset watchdog */
+ if (CCREV(sih->ccrev) < 65) {
+ si_setcoreidx(sih, origidx);
+ return;
+ }
+#endif // endif
+#ifdef BCMFPGA_HW
+ if (CCREV(sih->ccrev) < 67) {
+ /* To avoid hang on FPGA, donot reset watchdog */
+ si_setcoreidx(sih, origidx);
+ return;
+ }
+#endif // endif
+
/* Switch to PCIE2 core */
pcieregs = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
BCM_REFERENCE(pcieregs);
@@ -93,7 +108,11 @@
si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, 4);
+#ifdef BCMQT
+ OSL_DELAY(10000000);
+#else
OSL_DELAY(100000);
+#endif // endif
/*
* CYW55560 - Once watchdog reset initiated, bootloader takes much time to be ready,
@@ -121,7 +140,6 @@
}
}
si_setcoreidx(sih, origidx);
-#endif /* BCMFPGA_HW */
}
/* CRWLPCIEGEN2-117 pcie_pipe_Iddq should be controlled
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/sbutils.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/sbutils.c
index 019c7cf..55c7aad 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/sbutils.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/sbutils.c
@@ -2,7 +2,9 @@
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/siutils.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/siutils.c
index 36aa771..f983855 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/siutils.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/siutils.c
@@ -2,7 +2,9 @@
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -385,13 +387,14 @@
/* get chipcommon rev */
sii->pub.ccrev = (int)si_corerev(&sii->pub);
-//tianyan@2021.7.27 modify for add wifi6 module start
-#if 0
/* get chipcommon chipstatus */
- if (CCREV(sii->pub.ccrev) >= 11)
- sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
-#endif
-//tianyan@2021.7.27 modify for add wifi6 module end
+ if (CCREV(sii->pub.ccrev) >= 11) {
+ /* TODO : We need a better approach to avoid this access in secure mode chips */
+ if (sii->pub.chip != CYW55500_CHIP_ID &&
+ sii->pub.chip != CYW55560_CHIP_ID) {
+ sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
+ }
+ }
/* get chipcommon capabilites */
sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
@@ -581,6 +584,12 @@
sii->chipnew = sih->chip; /* save it */
sii->pub.chip = BCM4347_CHIP_ID; /* chip class */
break;
+#ifdef CHIPS_CUSTOMER_HW6
+ case BCM4377_CHIP_ID:
+ sii->chipnew = sih->chip; /* save it */
+ sii->pub.chip = BCM4369_CHIP_ID; /* chip class */
+ break;
+#endif /* CHIPS_CUSTOMER_HW6 */
default:
break;
}
@@ -744,6 +753,13 @@
sih->issim = IS_SIM(sih->chippkg);
+#ifdef CHIPS_CUSTOMER_HW6
+ if (MULTIBP_CAP(sih))
+ {
+ sih->_multibp_enable = TRUE;
+ }
+#endif // endif
+
/* scan for cores */
if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
SI_MSG(("Found chip type SB (0x%08x)\n", w));
@@ -841,6 +857,32 @@
if (bustype == PCI_BUS) {
+#ifdef BCMQT
+ /* Set OTPClkDiv to smaller value otherwise OTP always reads 0xFFFF.
+ * For real-chip we shouldn't set OTPClkDiv to 2 because 20/2 = 10 > 9Mhz
+ */
+ {
+ uint otpclkdiv = 0;
+
+ if ((CHIPID(sih->chip) == BCM43131_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43217_CHIP_ID)) {
+ otpclkdiv = 4;
+ }
+
+ if (otpclkdiv != 0) {
+ uint clkdiv, savecore;
+ savecore = si_coreidx(sih);
+ si_setcore(sih, CC_CORE_ID, 0);
+
+ clkdiv = R_REG(osh, &cc->clkdiv);
+ clkdiv = (clkdiv & ~CLKD_OTP) | (otpclkdiv << CLKD_OTP_SHIFT);
+ W_REG(osh, &cc->clkdiv, clkdiv);
+
+ SI_ERROR(("%s: set clkdiv to 0x%x for QT\n", __FUNCTION__, clkdiv));
+ si_setcoreidx(sih, savecore);
+ }
+ }
+#endif /* BCMQT */
}
#endif // endif
#ifdef BCM_SDRBL
@@ -1055,6 +1097,23 @@
ASSERT(0);
}
+uint32
+si_oobr_baseaddr(si_t *sih, bool second)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return 0;
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return (second ? sii->oob_router1 : sii->oob_router);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
uint
si_coreid(si_t *sih)
{
@@ -1620,6 +1679,12 @@
}
}
+uint
+si_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ return ai_corereg_writeonly(sih, coreidx, regoff, mask, val);
+}
+
/** ILP sensitive register access needs special treatment to avoid backplane stalls */
bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff)
{
@@ -1732,10 +1797,11 @@
uint idx = si_findcoreidx(sih, coreid, 0);
uint num = 0;
- if (CHIPTYPE(sih->socitype) == SOCI_AI) {
- num = ai_num_slaveports(sih, idx);
+ if (idx != BADIDX) {
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+ num = ai_num_slaveports(sih, idx);
+ }
}
-
return num;
}
@@ -1976,6 +2042,12 @@
else if (CST4369_CHIPMODE_PCIE(sih->chipst))
hosti = CHIP_HOSTIF_PCIEMODE;
break;
+#ifdef CHIPS_CUSTOMER_HW6
+ case BCM4368_CHIP_GRPID:
+ hosti = CHIP_HOSTIF_PCIEMODE;
+ break;
+#endif /* CHIPS_CUSTOMER_HW6 */
+
case BCM4350_CHIP_ID:
case BCM4354_CHIP_ID:
case BCM43556_CHIP_ID:
@@ -2033,7 +2105,8 @@
SPINWAIT((PMU_REG(sih, pmustatus, 0, 0) & PST_ILPFASTLPO),
PMU_MAX_TRANSITION_DLY);
}
- if (sih->chip == CYW55560_CHIP_ID) {
+ if (sih->chip == CYW55500_CHIP_ID ||
+ sih->chip == CYW55560_CHIP_ID) {
si_corereg(sih, si_findcoreidx(sih, PMU_CORE_ID, 0),
OFFSETOF(pmuregs_t, pmuwatchdog), ~0, ticks);
} else {
@@ -2875,6 +2948,7 @@
bool wasup = FALSE;
uint32 corecap;
uint memsize = 0;
+ uint banku_size = 0;
uint32 nab = 0;
uint32 nbb = 0;
uint32 totb = 0;
@@ -2896,8 +2970,8 @@
* but remain in halt
*/
if (!sih->secureboot) {
- if (!(wasup = si_iscoreup(sih)))
- si_core_reset(sih, SICF_CPUHALT, SICF_CPUHALT);
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, SICF_CPUHALT, SICF_CPUHALT);
}
arm_cap_reg = (volatile uint32 *)(regs + SI_CR4_CAP);
@@ -2913,7 +2987,12 @@
W_REG(sii->osh, arm_bidx, idx);
bxinfo = R_REG(sii->osh, arm_binfo);
- memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
+ if (bxinfo & ARMCR4_BUNITSZ_MASK) {
+ banku_size = ARMCR4_BSZ_1K;
+ } else {
+ banku_size = ARMCR4_BSZ_8K;
+ }
+ memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * banku_size;
}
/* Return to previous state and core */
@@ -3229,6 +3308,10 @@
} else {
return (sih->chipst & CST4369_SPROM_PRESENT) != 0;
}
+#ifdef CHIPS_CUSTOMER_HW6
+ case BCM4368_CHIP_ID:
+ return FALSE;
+#endif /* CHIPS_CUSTOMER_HW6 */
case BCM4347_CHIP_GRPID:
return (sih->chipst & CST4347_SPROM_PRESENT) != 0;
break;
@@ -3540,11 +3623,7 @@
bool _bcmsrpwr = FALSE;
#endif // endif
-#ifndef BCMSDIO
-#define PWRREQ_OFFSET(sih) DAR_PCIE_PWR_CTRL((sih)->buscorerev)
-#else
#define PWRREQ_OFFSET(sih) OFFSETOF(chipcregs_t, powerctl)
-#endif // endif
static void
si_corereg_pciefast_write(si_t *sih, uint regoff, uint val)
@@ -3568,12 +3647,6 @@
ASSERT((BUSTYPE(sih->bustype) == PCI_BUS));
-#ifndef BCMSDIO
- if (PCIECOREREV(sih->buscorerev) == 66) {
- si_corereg_pciefast_write(sih, OFFSETOF(sbpcieregs_t, u1.dar_64.dar_ctrl), 0);
- }
-#endif // endif
-
r = (volatile uint32 *)((volatile char *)sii->curmap +
PCI_16KB0_PCIREGS_OFFSET + regoff);
@@ -3660,7 +3733,7 @@
ASSERT(r == val);
}
- r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK;
+ r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK(sih);
return r;
}
@@ -3678,7 +3751,7 @@
r = si_corereg_pciefast_read(sih, offset);
}
- r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK;
+ r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK(sih);
return r;
}
@@ -3696,11 +3769,26 @@
r = si_corereg_pciefast_read(sih, offset);
}
- r = (r >> SRPWR_DMN_SHIFT) & SRPWR_DMN_ALL_MASK;
+ r = (r >> SRPWR_DMN_ID_SHIFT) & SRPWR_DMN_ID_MASK;
return r;
}
+uint32
+si_srpwr_domain_all_mask(si_t *sih)
+{
+ uint32 mask = SRPWR_DMN0_PCIE_MASK |
+ SRPWR_DMN1_ARMBPSD_MASK |
+ SRPWR_DMN2_MACAUX_MASK |
+ SRPWR_DMN3_MACMAIN_MASK;
+
+ if (si_scan_core_present(sih)) {
+ mask |= SRPWR_DMN4_MACSCAN_MASK;
+ }
+
+ return mask;
+}
+
/* Utility API to read/write the raw registers with absolute address.
* This function can be invoked from either FW or host driver.
*/
@@ -3775,3 +3863,10 @@
si_info_t *sii = SI_INFO(sih);
return sii->lhl_ps_mode;
}
+
+bool
+BCMRAMFN(si_scan_core_present)(si_t *sih)
+{
+ return ((si_numcoreunits(sih, D11_CORE_ID) >= 2) &&
+ (si_numcoreunits(sih, SR_CORE_ID) > 4));
+}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/siutils_priv.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/siutils_priv.h
index 7121329..fcb868d 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/siutils_priv.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/siutils_priv.h
@@ -1,7 +1,9 @@
/*
* Include file private to the SOC Interconnect support files.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -157,6 +159,7 @@
void *curwrap; /**< current wrapper va */
uint32 oob_router; /**< oob router registers for axi */
+ uint32 oob_router1; /**< oob router registers for axi */
si_cores_info_t *cores_info;
gci_gpio_item_t *gci_gpio_head; /**< gci gpio interrupts head */
@@ -296,6 +299,7 @@
extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
extern void ai_d11rsdb_core_reset(si_t *sih, uint32 bits,
uint32 resetbits, void *p, volatile void *s);
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_android.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_android.c
index 94ce274..2ebb419 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_android.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_android.c
@@ -1,7 +1,9 @@
/*
* Linux cfg80211 driver - Android related functions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -24,9 +26,10 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_android.c 701844 2017-05-26 18:56:40Z $
+ * $Id: wl_android.c 814826 2019-04-15 05:25:59Z $
*/
+#include <linux/string.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <net/netlink.h>
@@ -36,6 +39,7 @@
#include <wlioctl.h>
#include <wlioctl_utils.h>
#include <bcmutils.h>
+#include <bcmstdlib_s.h>
#include <linux_osl.h>
#include <dhd_dbg.h>
#include <dngl_stats.h>
@@ -49,6 +53,7 @@
#endif // endif
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
+#include <wl_cfgscan.h>
#endif // endif
#ifdef WL_NAN
#include <wl_cfgnan.h>
@@ -57,11 +62,27 @@
#include <dhd_ip.h>
#endif /* DHDTCPACK_SUPPRESS */
#include <bcmwifi_rspec.h>
+#include <bcmwifi_channels.h>
#include <dhd_linux.h>
#include <bcmiov.h>
#ifdef DHD_PKT_LOGGING
#include <dhd_pktlog.h>
#endif /* DHD_PKT_LOGGING */
+#ifdef WL_BCNRECV
+#include <wl_cfgvendor.h>
+#include <brcm_nl80211.h>
+#endif /* WL_BCNRECV */
+#ifdef WL_MBO
+#include <mbo.h>
+#endif /* WL_MBO */
+
+#ifdef DHD_BANDSTEER
+#include <dhd_bandsteer.h>
+#endif /* DHD_BANDSTEER */
+
+#ifdef WL_STATIC_IF
+#define WL_BSSIDX_MAX 16
+#endif /* WL_STATIC_IF */
/*
* Android private command strings, PLEASE define new private commands here
* so they can be updated easily in the future (if needed)
@@ -69,10 +90,14 @@
#define CMD_START "START"
#define CMD_STOP "STOP"
-#define CMD_SCAN_ACTIVE "SCAN-ACTIVE"
-#define CMD_SCAN_PASSIVE "SCAN-PASSIVE"
-#define CMD_RSSI "RSSI"
+
+#ifdef AUTOMOTIVE_FEATURE
+#define CMD_SCAN_ACTIVE "SCAN-ACTIVE"
+#define CMD_SCAN_PASSIVE "SCAN-PASSIVE"
+#define CMD_RSSI "RSSI"
#define CMD_LINKSPEED "LINKSPEED"
+#endif /* AUTOMOTIVE_FEATURE */
+
#define CMD_RXFILTER_START "RXFILTER-START"
#define CMD_RXFILTER_STOP "RXFILTER-STOP"
#define CMD_RXFILTER_ADD "RXFILTER-ADD"
@@ -82,12 +107,15 @@
#define CMD_BTCOEXMODE "BTCOEXMODE"
#define CMD_SETSUSPENDOPT "SETSUSPENDOPT"
#define CMD_SETSUSPENDMODE "SETSUSPENDMODE"
+#define CMD_SETDTIM_IN_SUSPEND "SET_DTIM_IN_SUSPEND"
#define CMD_MAXDTIM_IN_SUSPEND "MAX_DTIM_IN_SUSPEND"
+#define CMD_DISDTIM_IN_SUSPEND "DISABLE_DTIM_IN_SUSPEND"
#define CMD_P2P_DEV_ADDR "P2P_DEV_ADDR"
#define CMD_SETFWPATH "SETFWPATH"
#define CMD_SETBAND "SETBAND"
#define CMD_GETBAND "GETBAND"
#define CMD_COUNTRY "COUNTRY"
+#define CMD_CHANNELS_IN_CC "CHANNELS_IN_CC"
#define CMD_P2P_SET_NOA "P2P_SET_NOA"
#if !defined WL_ENABLE_P2P_IF
#define CMD_P2P_GET_NOA "P2P_GET_NOA"
@@ -110,21 +138,19 @@
#define CMD_GET_BEST_CHANNELS "GET_BEST_CHANNELS"
#endif /* WL_SUPPORT_AUTO_CHANNEL */
-#define CMD_80211_MODE "MODE" /* 802.11 mode a/b/g/n/ac */
#define CMD_CHANSPEC "CHANSPEC"
+#ifdef AUTOMOTIVE_FEATURE
#define CMD_DATARATE "DATARATE"
+#define CMD_80211_MODE "MODE" /* 802.11 mode a/b/g/n/ac */
#define CMD_ASSOC_CLIENTS "ASSOCLIST"
+#endif /* AUTOMOTIVE_FEATURE */
#define CMD_SET_CSA "SETCSA"
+#define CMD_RSDB_MODE "RSDB_MODE"
#ifdef WL_SUPPORT_AUTO_CHANNEL
#define CMD_SET_HAPD_AUTO_CHANNEL "HAPD_AUTO_CHANNEL"
#endif /* WL_SUPPORT_AUTO_CHANNEL */
#ifdef CUSTOMER_HW4_PRIVATE_CMD
-#ifdef SUPPORT_HIDDEN_AP
/* Hostapd private command */
-#define CMD_SET_HAPD_MAX_NUM_STA "HAPD_MAX_NUM_STA"
-#define CMD_SET_HAPD_SSID "HAPD_SSID"
-#define CMD_SET_HAPD_HIDE_SSID "HAPD_HIDE_SSID"
-#endif /* SUPPORT_HIDDEN_AP */
#ifdef SUPPORT_SOFTAP_SINGL_DISASSOC
#define CMD_HAPD_STA_DISASSOC "HAPD_STA_DISASSOC"
#endif /* SUPPORT_SOFTAP_SINGL_DISASSOC */
@@ -142,19 +168,17 @@
#define CMD_TEST_GET_TX_POWER "TEST_GET_TX_POWER"
#endif /* TEST_TX_POWER_CONTROL */
#define CMD_SARLIMIT_TX_CONTROL "SET_TX_POWER_CALLING"
+#ifdef SUPPORT_SET_TID
+#define CMD_SET_TID "SET_TID"
+#define CMD_GET_TID "GET_TID"
+#endif /* SUPPORT_SET_TID */
#endif /* CUSTOMER_HW4_PRIVATE_CMD */
#define CMD_KEEP_ALIVE "KEEPALIVE"
-
-#ifdef BCMCCX
-/* CCX Private Commands */
-#define CMD_GETCCKM_RN "get cckm_rn"
-#define CMD_SETCCKM_KRK "set cckm_krk"
-#define CMD_GET_ASSOC_RES_IES "get assoc_res_ies"
-
-#define CCKM_KRK_LEN 16
-#define CCKM_BTK_LEN 32
-#endif // endif
-
+#ifdef SUPPORT_HIDDEN_AP
+#define CMD_SET_HAPD_MAX_NUM_STA "MAX_NUM_STA"
+#define CMD_SET_HAPD_SSID "HAPD_SSID"
+#define CMD_SET_HAPD_HIDE_SSID "HIDE_SSID"
+#endif /* SUPPORT_HIDDEN_AP */
#ifdef PNO_SUPPORT
#define CMD_PNOSSIDCLR_SET "PNOSSIDCLR"
#define CMD_PNOSETUP_SET "PNOSETUP "
@@ -163,9 +187,13 @@
#define CMD_WLS_BATCHING "WLS_BATCHING"
#endif /* PNO_SUPPORT */
+#ifdef AUTOMOTIVE_FEATURE
#define CMD_HAPD_MAC_FILTER "HAPD_MAC_FILTER"
+#endif /* AUTOMOTIVE_FEATURE */
+#define CMD_ADDIE "ADD_IE"
+#define CMD_DELIE "DEL_IE"
-#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#if defined(CUSTOMER_HW4_PRIVATE_CMD) || defined(IGUANA_LEGACY_CHIPS)
#ifdef ROAM_API
#define CMD_ROAMTRIGGER_SET "SETROAMTRIGGER"
@@ -176,8 +204,10 @@
#define CMD_ROAMSCANPERIOD_GET "GETROAMSCANPERIOD"
#define CMD_FULLROAMSCANPERIOD_SET "SETFULLROAMSCANPERIOD"
#define CMD_FULLROAMSCANPERIOD_GET "GETFULLROAMSCANPERIOD"
+#ifdef AUTOMOTIVE_FEATURE
#define CMD_COUNTRYREV_SET "SETCOUNTRYREV"
#define CMD_COUNTRYREV_GET "GETCOUNTRYREV"
+#endif /* AUTOMOTIVE_FEATURE */
#endif /* ROAM_API */
#if defined(SUPPORT_RANDOM_MAC_SCAN)
@@ -258,6 +288,14 @@
#define CMD_TDLS_RESET "TDLS_RESET"
#endif /* WLTDLS */
+#ifdef CONFIG_SILENT_ROAM
+#define CMD_SROAM_TURN_ON "SROAMTURNON"
+#define CMD_SROAM_SET_INFO "SROAMSETINFO"
+#define CMD_SROAM_GET_INFO "SROAMGETINFO"
+#endif /* CONFIG_SILENT_ROAM */
+
+#define CMD_SET_DISCONNECT_IES "SET_DISCONNECT_IES"
+
#ifdef FCC_PWR_LIMIT_2G
#define CMD_GET_FCC_PWR_LIMIT_2G "GET_FCC_CHANNEL"
#define CMD_SET_FCC_PWR_LIMIT_2G "SET_FCC_CHANNEL"
@@ -312,6 +350,59 @@
#define CMD_WBTEXT_DELTA_CONFIG "WBTEXT_DELTA_CONFIG"
#define CMD_WBTEXT_BTM_TIMER_THRESHOLD "WBTEXT_BTM_TIMER_THRESHOLD"
#define CMD_WBTEXT_BTM_DELTA "WBTEXT_BTM_DELTA"
+#define CMD_WBTEXT_ESTM_ENABLE "WBTEXT_ESTM_ENABLE"
+
+#ifdef WBTEXT
+#define CMD_WBTEXT_PROFILE_CONFIG "WBTEXT_PROFILE_CONFIG"
+#define CMD_WBTEXT_WEIGHT_CONFIG "WBTEXT_WEIGHT_CONFIG"
+#define CMD_WBTEXT_TABLE_CONFIG "WBTEXT_TABLE_CONFIG"
+#define CMD_WBTEXT_DELTA_CONFIG "WBTEXT_DELTA_CONFIG"
+#define DEFAULT_WBTEXT_PROFILE_A_V2 "a -70 -75 70 10 -75 -128 0 10"
+#define DEFAULT_WBTEXT_PROFILE_B_V2 "b -60 -75 70 10 -75 -128 0 10"
+#define DEFAULT_WBTEXT_PROFILE_A_V3 "a -70 -75 70 10 -75 -128 0 10"
+#define DEFAULT_WBTEXT_PROFILE_B_V3 "b -60 -75 70 10 -75 -128 0 10"
+#define DEFAULT_WBTEXT_WEIGHT_RSSI_A "RSSI a 65"
+#define DEFAULT_WBTEXT_WEIGHT_RSSI_B "RSSI b 65"
+#define DEFAULT_WBTEXT_WEIGHT_CU_A "CU a 35"
+#define DEFAULT_WBTEXT_WEIGHT_CU_B "CU b 35"
+#define DEFAULT_WBTEXT_WEIGHT_ESTM_DL_A "ESTM_DL a 70"
+#define DEFAULT_WBTEXT_WEIGHT_ESTM_DL_B "ESTM_DL b 70"
+#ifdef WBTEXT_SCORE_V2
+#define DEFAULT_WBTEXT_TABLE_RSSI_A "RSSI a 0 55 100 55 60 90 \
+60 70 60 70 80 20 80 128 20"
+#define DEFAULT_WBTEXT_TABLE_RSSI_B "RSSI b 0 55 100 55 60 90 \
+60 70 60 70 80 20 80 128 20"
+#define DEFAULT_WBTEXT_TABLE_CU_A "CU a 0 30 100 30 80 20 \
+80 100 20"
+#define DEFAULT_WBTEXT_TABLE_CU_B "CU b 0 10 100 10 70 20 \
+70 100 20"
+#else
+#define DEFAULT_WBTEXT_TABLE_RSSI_A "RSSI a 0 55 100 55 60 90 \
+60 65 70 65 70 50 70 128 20"
+#define DEFAULT_WBTEXT_TABLE_RSSI_B "RSSI b 0 55 100 55 60 90 \
+60 65 70 65 70 50 70 128 20"
+#define DEFAULT_WBTEXT_TABLE_CU_A "CU a 0 30 100 30 50 90 \
+50 60 70 60 80 50 80 100 20"
+#define DEFAULT_WBTEXT_TABLE_CU_B "CU b 0 10 100 10 25 90 \
+25 40 70 40 70 50 70 100 20"
+#endif /* WBTEXT_SCORE_V2 */
+#endif /* WBTEXT */
+
+#define BUFSZ 8
+#define BUFSZN BUFSZ + 1
+
+#define _S(x) #x
+#define S(x) _S(x)
+
+#define MAXBANDS 2 /**< Maximum #of bands */
+#define BAND_2G_INDEX 0
+#define BAND_5G_INDEX 0
+
+typedef union {
+ wl_roam_prof_band_v1_t v1;
+ wl_roam_prof_band_v2_t v2;
+ wl_roam_prof_band_v3_t v3;
+} wl_roamprof_band_t;
#ifdef WLWFDS
#define CMD_ADD_WFDS_HASH "ADD_WFDS_HASH"
@@ -347,26 +438,21 @@
#define CMD_SET_AP_RPS_PARAMS "SET_AP_RPS_PARAMS"
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-#ifdef WL_STATIC_IF
-#define WL_BSSIDX_MAX 16
-#endif /* WL_STATIC_IF */
-
+#ifdef DHD_BANDSTEER
+#define CMD_BANDSTEER "BANDSTEER"
+#define CMD_BANDSTEER_TRIGGER "TRIGGER_BANDSTEER"
+#endif /* DHD_BANDSTEER */
/* miracast related definition */
#define MIRACAST_MODE_OFF 0
#define MIRACAST_MODE_SOURCE 1
#define MIRACAST_MODE_SINK 2
-#ifndef MIRACAST_AMPDU_SIZE
-#define MIRACAST_AMPDU_SIZE 8
-#endif // endif
+#define CMD_CHANNEL_WIDTH "CHANNEL_WIDTH"
+#define CMD_TRANSITION_DISABLE "TRANSITION_DISABLE"
-#ifndef MIRACAST_MCHAN_ALGO
-#define MIRACAST_MCHAN_ALGO 1
-#endif // endif
-
-#ifndef MIRACAST_MCHAN_BW
-#define MIRACAST_MCHAN_BW 25
-#endif // endif
+#ifdef ENABLE_HOGSQS
+#define CMD_AP_HOGSQS "HOGSQS"
+#endif /* ENABLE_HOGSQS */
#ifdef CONNECTION_STATISTICS
#define CMD_GET_CONNECTION_STATS "GET_CONNECTION_STATS"
@@ -403,6 +489,11 @@
void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd);
#endif /* DHD_LOG_DUMP */
+#ifdef DHD_STATUS_LOGGING
+#define CMD_DUMP_STATUS_LOG "DUMP_STAT_LOG"
+#define CMD_QUERY_STATUS_LOG "QUERY_STAT_LOG"
+#endif /* DHD_STATUS_LOGGING */
+
#ifdef DHD_HANG_SEND_UP_TEST
#define CMD_MAKE_HANG "MAKE_HANG"
#endif /* CMD_DHD_HANG_SEND_UP_TEST */
@@ -419,6 +510,18 @@
struct list_head list;
};
+typedef enum {
+ HEAD_SAR_BACKOFF_DISABLE = -1,
+ HEAD_SAR_BACKOFF_ENABLE = 0,
+ GRIP_SAR_BACKOFF_DISABLE,
+ GRIP_SAR_BACKOFF_ENABLE,
+ NR_mmWave_SAR_BACKOFF_DISABLE,
+ NR_mmWave_SAR_BACKOFF_ENABLE,
+ NR_Sub6_SAR_BACKOFF_DISABLE,
+ NR_Sub6_SAR_BACKOFF_ENABLE,
+ SAR_BACKOFF_DISABLE_ALL
+} sar_modes;
+
#if defined(BCMFW_ROAM_ENABLE)
#define CMD_SET_ROAMPREF "SET_ROAMPREF"
@@ -495,6 +598,9 @@
#endif /* WL_NATOE */
+static int
+wl_android_get_channel_list(struct net_device *dev, char *command, int total_len);
+
#ifdef SET_PCIE_IRQ_CPU_CORE
#define CMD_PCIE_IRQ_CORE "PCIE_IRQ_CORE"
#endif /* SET_PCIE_IRQ_CPU_CORE */
@@ -510,6 +616,7 @@
#define CMD_PKTLOG_FILTER_PATTERN_ENABLE "PKTLOG_FILTER_PATTERN_ENABLE"
#define CMD_PKTLOG_FILTER_PATTERN_DISABLE "PKTLOG_FILTER_PATTERN_DISABLE"
#define CMD_PKTLOG_FILTER_ADD "PKTLOG_FILTER_ADD"
+#define CMD_PKTLOG_FILTER_DEL "PKTLOG_FILTER_DEL"
#define CMD_PKTLOG_FILTER_INFO "PKTLOG_FILTER_INFO"
#define CMD_PKTLOG_START "PKTLOG_START"
#define CMD_PKTLOG_STOP "PKTLOG_STOP"
@@ -523,6 +630,63 @@
#define CMD_EWP_FILTER "EWP_FILTER"
#endif /* DHD_EVENT_LOG_FILTER */
+#ifdef WL_BCNRECV
+#define CMD_BEACON_RECV "BEACON_RECV"
+#endif /* WL_BCNRECV */
+#ifdef WL_CAC_TS
+#define CMD_CAC_TSPEC "CAC_TSPEC"
+#endif /* WL_CAC_TS */
+#ifdef WL_CHAN_UTIL
+#define CMD_GET_CHAN_UTIL "GET_CU"
+#endif /* WL_CHAN_UTIL */
+
+/* drv command info structure */
+typedef struct wl_drv_cmd_info {
+ uint8 *command; /* pointer to the actual command */
+ uint16 tot_len; /* total length of the command */
+ uint16 bytes_written; /* Bytes written for get response */
+} wl_drv_cmd_info_t;
+
+typedef struct wl_drv_sub_cmd wl_drv_sub_cmd_t;
+typedef int (drv_cmd_handler_t)(struct net_device *dev,
+ const wl_drv_sub_cmd_t *cmd, char *command, wl_drv_cmd_info_t *cmd_info);
+
+struct wl_drv_sub_cmd {
+ char *name;
+ uint8 version; /* cmd version */
+ uint16 id; /* id for the dongle f/w switch/case */
+ uint16 type; /* base type of argument */
+ drv_cmd_handler_t *handler; /* cmd handler */
+};
+
+#ifdef WL_MBO
+
+#define CMD_MBO "MBO"
+enum {
+ WL_MBO_CMD_NON_CHAN_PREF = 1,
+ WL_MBO_CMD_CELL_DATA_CAP = 2
+};
+#define WL_ANDROID_MBO_FUNC(suffix) wl_android_mbo_subcmd_ ##suffix
+
+static int wl_android_process_mbo_cmd(struct net_device *dev,
+ char *command, int total_len);
+static int wl_android_mbo_subcmd_cell_data_cap(struct net_device *dev,
+ const wl_drv_sub_cmd_t *cmd, char *command, wl_drv_cmd_info_t *cmd_info);
+static int wl_android_mbo_subcmd_non_pref_chan(struct net_device *dev,
+ const wl_drv_sub_cmd_t *cmd, char *command, wl_drv_cmd_info_t *cmd_info);
+
+static const wl_drv_sub_cmd_t mbo_cmd_list[] = {
+ {"non_pref_chan", 0x01, WL_MBO_CMD_NON_CHAN_PREF,
+ IOVT_BUFFER, WL_ANDROID_MBO_FUNC(non_pref_chan)
+ },
+ {"cell_data_cap", 0x01, WL_MBO_CMD_CELL_DATA_CAP,
+ IOVT_BUFFER, WL_ANDROID_MBO_FUNC(cell_data_cap)
+ },
+ {NULL, 0, 0, 0, NULL}
+};
+
+#endif /* WL_MBO */
+
#ifdef WL_GENL
static s32 wl_genl_handle_msg(struct sk_buff *skb, struct genl_info *info);
static int wl_genl_init(void);
@@ -538,6 +702,7 @@
[BCM_GENL_ATTR_MSG] = { .type = NLA_BINARY },
};
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) */
+
#define WL_GENL_VER 1
/* family definition */
static struct genl_family wl_genl_family = {
@@ -597,6 +762,11 @@
#define LQCM_RX_INDEX_SHIFT 16 /* LQCM rx index shift */
#endif /* SUPPORT_LQCM */
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+#define NUMBER_SEQUENTIAL_PRIVCMD_ERRORS 7
+static int priv_cmd_errors = 0;
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+
/**
* Extern function declarations (TODO: move them to dhd_linux.h)
*/
@@ -629,12 +799,17 @@
char *command, int total_len);
static int wl_cfg80211_wbtext_btm_delta(struct net_device *dev,
char *command, int total_len);
+static int wl_cfg80211_wbtext_estm_enable(struct net_device *dev,
+ char *command, int total_len);
+static int wlc_wbtext_get_roam_prof(struct net_device *ndev, wl_roamprof_band_t *rp,
+ uint8 band, uint8 *roam_prof_ver, uint8 *roam_prof_size);
#endif /* WBTEXT */
#ifdef WES_SUPPORT
/* wl_roam.c */
extern int get_roamscan_mode(struct net_device *dev, int *mode);
extern int set_roamscan_mode(struct net_device *dev, int mode);
-extern int get_roamscan_channel_list(struct net_device *dev, unsigned char channels[]);
+extern int get_roamscan_channel_list(struct net_device *dev,
+ unsigned char channels[], int n_channels);
extern int set_roamscan_channel_list(struct net_device *dev, unsigned char n,
unsigned char channels[], int ioctl_ver);
#endif /* WES_SUPPORT */
@@ -654,6 +829,117 @@
extern bool g_pm_control;
#endif /* DHD_PM_CONTROL_FROM_FILE */
+/* private command support for restoring roam/scan parameters */
+#ifdef SUPPORT_RESTORE_SCAN_PARAMS
+#define CMD_RESTORE_SCAN_PARAMS "RESTORE_SCAN_PARAMS"
+
+typedef int (*PRIV_CMD_HANDLER) (struct net_device *dev, char *command);
+typedef int (*PRIV_CMD_HANDLER_WITH_LEN) (struct net_device *dev, char *command, int total_len);
+
+enum {
+ RESTORE_TYPE_UNSPECIFIED = 0,
+ RESTORE_TYPE_PRIV_CMD = 1,
+ RESTORE_TYPE_PRIV_CMD_WITH_LEN = 2
+};
+
+typedef struct android_restore_scan_params {
+ char command[64];
+ int parameter;
+ int cmd_type;
+ union {
+ PRIV_CMD_HANDLER cmd_handler;
+ PRIV_CMD_HANDLER_WITH_LEN cmd_handler_w_len;
+ };
+} android_restore_scan_params_t;
+
+/* function prototypes of private command handler */
+static int wl_android_set_roam_trigger(struct net_device *dev, char* command);
+int wl_android_set_roam_delta(struct net_device *dev, char* command);
+int wl_android_set_roam_scan_period(struct net_device *dev, char* command);
+int wl_android_set_full_roam_scan_period(struct net_device *dev, char* command, int total_len);
+int wl_android_set_roam_scan_control(struct net_device *dev, char *command);
+int wl_android_set_scan_channel_time(struct net_device *dev, char *command);
+int wl_android_set_scan_home_time(struct net_device *dev, char *command);
+int wl_android_set_scan_home_away_time(struct net_device *dev, char *command);
+int wl_android_set_scan_nprobes(struct net_device *dev, char *command);
+static int wl_android_set_band(struct net_device *dev, char *command);
+int wl_android_set_scan_dfs_channel_mode(struct net_device *dev, char *command);
+int wl_android_set_wes_mode(struct net_device *dev, char *command);
+int wl_android_set_okc_mode(struct net_device *dev, char *command);
+
+/* default values */
+#ifdef ROAM_API
+#define DEFAULT_ROAM_TIRGGER -75
+#define DEFAULT_ROAM_DELTA 10
+#define DEFAULT_ROAMSCANPERIOD 10
+#define DEFAULT_FULLROAMSCANPERIOD_SET 120
+#endif /* ROAM_API */
+#ifdef WES_SUPPORT
+#define DEFAULT_ROAMSCANCONTROL 0
+#define DEFAULT_SCANCHANNELTIME 40
+#ifdef BCM4361_CHIP
+#define DEFAULT_SCANHOMETIME 60
+#else
+#define DEFAULT_SCANHOMETIME 45
+#endif /* BCM4361_CHIP */
+#define DEFAULT_SCANHOMEAWAYTIME 100
+#define DEFAULT_SCANPROBES 2
+#define DEFAULT_DFSSCANMODE 1
+#define DEFAULT_WESMODE 0
+#define DEFAULT_OKCMODE 1
+#endif /* WES_SUPPORT */
+#define DEFAULT_BAND 0
+#ifdef WBTEXT
+#define DEFAULT_WBTEXT_ENABLE 1
+#endif /* WBTEXT */
+
+/* restoring parameter list, please don't change order */
+static android_restore_scan_params_t restore_params[] =
+{
+/* wbtext need to be disabled while updating roam/scan parameters */
+#ifdef WBTEXT
+ { CMD_WBTEXT_ENABLE, 0, RESTORE_TYPE_PRIV_CMD_WITH_LEN,
+ .cmd_handler_w_len = wl_android_wbtext},
+#endif /* WBTEXT */
+#ifdef ROAM_API
+ { CMD_ROAMTRIGGER_SET, DEFAULT_ROAM_TIRGGER,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_trigger},
+ { CMD_ROAMDELTA_SET, DEFAULT_ROAM_DELTA,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_delta},
+ { CMD_ROAMSCANPERIOD_SET, DEFAULT_ROAMSCANPERIOD,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_scan_period},
+ { CMD_FULLROAMSCANPERIOD_SET, DEFAULT_FULLROAMSCANPERIOD_SET,
+ RESTORE_TYPE_PRIV_CMD_WITH_LEN,
+ .cmd_handler_w_len = wl_android_set_full_roam_scan_period},
+#endif /* ROAM_API */
+#ifdef WES_SUPPORT
+ { CMD_SETROAMSCANCONTROL, DEFAULT_ROAMSCANCONTROL,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_scan_control},
+ { CMD_SETSCANCHANNELTIME, DEFAULT_SCANCHANNELTIME,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_scan_channel_time},
+ { CMD_SETSCANHOMETIME, DEFAULT_SCANHOMETIME,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_scan_home_time},
+ { CMD_GETSCANHOMEAWAYTIME, DEFAULT_SCANHOMEAWAYTIME,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_scan_home_away_time},
+ { CMD_SETSCANNPROBES, DEFAULT_SCANPROBES,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_scan_nprobes},
+ { CMD_SETDFSSCANMODE, DEFAULT_DFSSCANMODE,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_scan_dfs_channel_mode},
+ { CMD_SETWESMODE, DEFAULT_WESMODE,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_wes_mode},
+ { CMD_SETOKCMODE, DEFAULT_OKCMODE,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_okc_mode},
+#endif /* WES_SUPPORT */
+ { CMD_SETBAND, DEFAULT_BAND,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_band},
+#ifdef WBTEXT
+ { CMD_WBTEXT_ENABLE, DEFAULT_WBTEXT_ENABLE,
+ RESTORE_TYPE_PRIV_CMD_WITH_LEN, .cmd_handler_w_len = wl_android_wbtext},
+#endif /* WBTEXT */
+ { "\0", 0, RESTORE_TYPE_UNSPECIFIED, .cmd_handler = NULL}
+};
+#endif /* SUPPORT_RESTORE_SCAN_PARAMS */
+
/**
* Local (static) functions and variables
*/
@@ -668,6 +954,204 @@
* Local (static) function definitions
*/
+static int
+wl_android_set_channel_width(struct net_device *dev, char *command, int total_len)
+{
+ u32 channel_width = 0;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy);
+ command = (command + strlen(CMD_CHANNEL_WIDTH));
+ command++;
+ channel_width = bcm_atoi(command);
+ if (channel_width == 80)
+ wl_set_chanwidth_by_netdev(cfg, dev, WL_CHANSPEC_BW_80);
+ else if (channel_width == 40)
+ wl_set_chanwidth_by_netdev(cfg, dev, WL_CHANSPEC_BW_40);
+ else if (channel_width == 20)
+ wl_set_chanwidth_by_netdev(cfg, dev, WL_CHANSPEC_BW_20);
+ else
+ return 0;
+ DHD_INFO(("%s : channel width = %d\n", __FUNCTION__, channel_width));
+ return 0;
+}
+
+#ifdef ENABLE_HOGSQS
+#define M_HOGSQS_DURATION (M_HOGSQS_CFG + 0x2)
+#define M_HOGSQS_DUR_THR (M_HOGSQS_CFG + 0x4)
+#define M_HOGSQS_STAT (M_HOGSQS_CFG + 0x6)
+#define M_HOGSQS_TXCFE_DET_CNT (M_HOGSQS_CFG + 0xe)
+static int
+wl_android_hogsqs(struct net_device *dev, char *command, int total_len)
+{
+ int ret = 0, bytes_written = 0;
+ s32 value = 0;
+ uint32 reg = 0;
+ uint32 set_val = 0;
+ uint32 set_val2 = 0;
+ char *pos = command;
+ char *pos2 = NULL;
+
+ if (*(command + strlen(CMD_AP_HOGSQS)) == '\0') {
+ DHD_ERROR(("%s: Error argument is required on %s \n", __FUNCTION__, CMD_AP_HOGSQS));
+ return -EINVAL;
+ } else {
+ pos = pos + strlen(CMD_AP_HOGSQS) + 1;
+ if (!strncmp(pos, "cfg", strlen("cfg"))) {
+ reg = M_HOGSQS_CFG;
+ pos2 = pos + strlen("cfg");
+ } else if (!strncmp(pos, "duration", strlen("duration"))) {
+ reg = M_HOGSQS_DURATION;
+ pos2 = pos + strlen("duration");
+ } else if (!strncmp(pos, "durth", strlen("durth"))) {
+ reg = M_HOGSQS_DUR_THR;
+ pos2 = pos + strlen("durth");
+ } else if (!strncmp(pos, "count", strlen("count"))) {
+ reg = M_HOGSQS_TXCFE_DET_CNT;
+ pos2 = pos + strlen("count");
+ } else {
+ DHD_ERROR(("%s: Error wrong argument is on %s \n", __FUNCTION__,
+ CMD_AP_HOGSQS));
+ return -EINVAL;
+ }
+ value = reg;
+
+ if (*pos2 == '\0') {
+ /* Get operation */
+ ret = wldev_iovar_getint(dev, "hogsqs", &value);
+ if (ret) {
+ DHD_ERROR(("%s: Failed to get hogsqs\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ if (reg == M_HOGSQS_TXCFE_DET_CNT)
+ bytes_written = snprintf(command, total_len, " %s 0x%x/0x%x",
+ CMD_AP_HOGSQS, (value&0x00FF), ((value&0xFF00)>> 8));
+ else
+ bytes_written = snprintf(command, total_len, " %s 0x%x",
+ CMD_AP_HOGSQS, value);
+
+ return bytes_written;
+ } else {
+ /* Set operation */
+ pos2 = pos2 + 1;
+ set_val = (uint32)simple_strtol(pos2, NULL, 0);
+
+ set_val2 = (reg & 0xFFFF) << 16;
+ set_val2 |= set_val;
+
+ ret = wldev_iovar_setint(dev, "hogsqs", set_val2);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: hogsqs set returned (%d)\n", __FUNCTION__, ret));
+ return BCME_ERROR;
+ }
+ }
+ }
+ return 0;
+}
+#endif /* ENABLE_HOGSQS */
+
+#ifdef DHD_BANDSTEER
+static int
+wl_android_set_bandsteer(struct net_device *dev, char *command, int total_len)
+{
+ char *iftype;
+ char *token1, *context1 = NULL;
+ int val;
+ int ret = 0;
+
+ struct wireless_dev *__wdev = (struct wireless_dev *)(dev)->ieee80211_ptr;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(__wdev->wiphy);
+
+ command = (command + strlen(CMD_BANDSTEER));
+ command++;
+ token1 = command;
+
+ iftype = bcmstrtok(&token1, " ", context1);
+ val = bcm_atoi(token1);
+
+ if (val < 0 || val > 1) {
+ DHD_ERROR(("%s : invalid val\n", __FUNCTION__));
+ return -1;
+ }
+
+ if (!strncmp(iftype, "p2p", 3)) {
+ cfg->ap_bs = 0;
+ cfg->p2p_bs = 1;
+
+ if (val) {
+ ret = dhd_bandsteer_module_init(dev, cfg->ap_bs, cfg->p2p_bs);
+ if (ret == BCME_ERROR) {
+ DHD_ERROR(("%s: Failed to enable %s bandsteer\n", __FUNCTION__,
+ cfg->ap_bs ? "ap":"p2p"));
+ return ret;
+ } else {
+ DHD_ERROR(("%s: Successfully enabled %s bandsteer\n", __FUNCTION__,
+ cfg->ap_bs ? "ap":"p2p"));
+ }
+ } else {
+ ret = dhd_bandsteer_module_deinit(dev, cfg->ap_bs, cfg->p2p_bs);
+ if (ret == BCME_ERROR) {
+ DHD_ERROR(("%s: Failed to disable %s bandsteer\n", __FUNCTION__,
+ cfg->ap_bs ? "ap":"p2p"));
+ return ret;
+ } else {
+ DHD_ERROR(("%s: Successfully disabled %s bandsteer\n", __FUNCTION__,
+ cfg->ap_bs ? "ap":"p2p"));
+ }
+ }
+ } else if (!strncmp(iftype, "ap", 2)) {
+ cfg->ap_bs = 1;
+ cfg->p2p_bs = 0;
+
+ if (val) {
+ ret = dhd_bandsteer_module_init(dev, cfg->ap_bs, cfg->p2p_bs);
+ if (ret == BCME_ERROR) {
+ DHD_ERROR(("%s: Failed to enable %s bandsteer\n", __FUNCTION__,
+ cfg->ap_bs ? "ap":"p2p"));
+ return ret;
+ } else {
+ DHD_ERROR(("%s: Successfully enabled %s bandsteer\n", __FUNCTION__,
+ cfg->ap_bs ? "ap":"p2p"));
+ }
+ } else {
+ ret = dhd_bandsteer_module_deinit(dev, cfg->ap_bs, cfg->p2p_bs);
+ if (ret == BCME_ERROR) {
+ DHD_ERROR(("%s: Failed to disable %s bandsteer\n", __FUNCTION__,
+ cfg->ap_bs ? "ap":"p2p"));
+ return ret;
+ } else {
+ DHD_ERROR(("%s: Successfully disabled %s bandsteer\n", __FUNCTION__,
+ cfg->ap_bs ? "ap":"p2p"));
+ }
+ }
+ } else if (!strncmp(iftype, "1", 1)) {
+ cfg->ap_bs = 1;
+ cfg->p2p_bs = 1;
+ ret = dhd_bandsteer_module_init(dev, cfg->ap_bs, cfg->p2p_bs);
+ if (ret == BCME_ERROR) {
+ DHD_ERROR(("%s: Failed to enable bandsteer\n", __FUNCTION__));
+ return ret;
+ } else {
+ DHD_ERROR(("%s: Successfully enabled bandsteer\n", __FUNCTION__));
+ }
+ } else if (!strncmp(iftype, "0", 1)) {
+ cfg->ap_bs = 1;
+ cfg->p2p_bs = 1;
+ ret = dhd_bandsteer_module_deinit(dev, cfg->ap_bs, cfg->p2p_bs);
+ if (ret == BCME_ERROR) {
+ DHD_ERROR(("%s: Failed to diable bandsteer\n", __FUNCTION__));
+ return ret;
+ } else {
+ DHD_ERROR(("%s: Successfully disabled bandsteer\n", __FUNCTION__));
+ }
+ } else {
+ DHD_ERROR(("%s: Invalid bandsteer iftype\n", __FUNCTION__));
+ return -1;
+ }
+ return ret;
+}
+#endif /* DHD_BANDSTEER */
+
#ifdef WLWFDS
static int wl_android_set_wfds_hash(
struct net_device *dev, char *command, bool enable)
@@ -679,8 +1163,8 @@
smbuf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MAXLEN);
if (smbuf == NULL) {
- DHD_ERROR(("%s: failed to allocated memory %d bytes\n",
- __FUNCTION__, WLC_IOCTL_MAXLEN));
+ DHD_ERROR(("wl_android_set_wfds_hash: failed to allocated memory %d bytes\n",
+ WLC_IOCTL_MAXLEN));
return -ENOMEM;
}
@@ -696,7 +1180,7 @@
}
if (error) {
- DHD_ERROR(("%s: failed to %s, error=%d\n", __FUNCTION__, command, error));
+ DHD_ERROR(("wl_android_set_wfds_hash: failed to %s, error=%d\n", command, error));
}
if (smbuf) {
@@ -706,6 +1190,7 @@
}
#endif /* WLWFDS */
+#ifdef AUTOMOTIVE_FEATURE
static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len)
{
int link_speed;
@@ -721,7 +1206,7 @@
/* Convert Kbps to Android Mbps */
link_speed = link_speed / 1000;
bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed);
- DHD_INFO(("%s: command result is %s\n", __FUNCTION__, command));
+ DHD_INFO(("wl_android_get_link_speed: command result is %s\n", command));
return bytes_written;
}
@@ -748,15 +1233,15 @@
/* Ap/GO mode
* driver rssi <sta_mac_addr>
*/
- DHD_TRACE(("%s: cmd:%s\n", __FUNCTION__, delim));
+ DHD_TRACE(("wl_android_get_rssi: cmd:%s\n", delim));
/* skip space from delim after finding char */
delim++;
if (!(bcm_ether_atoe((delim), &scbval.ea))) {
- DHD_ERROR(("%s:address err\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_get_rssi: address err\n"));
return -1;
}
scbval.val = htod32(0);
- DHD_TRACE(("%s: address:"MACDBG, __FUNCTION__, MAC2STRDBG(scbval.ea.octet)));
+ DHD_TRACE(("wl_android_get_rssi: address:"MACDBG, MAC2STRDBG(scbval.ea.octet)));
#ifdef WL_VIRTUAL_APSTA
/* RSDB AP may have another virtual interface
* In this case, format of private command is as following,
@@ -780,7 +1265,7 @@
}
else {
/* STA/GC mode */
- memset(&scbval, 0, sizeof(scb_val_t));
+ bzero(&scbval, sizeof(scb_val_t));
}
error = wldev_get_rssi(target_ndev, &scbval);
@@ -791,7 +1276,7 @@
if (error)
return -1;
if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) {
- DHD_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_get_rssi: wldev_get_ssid failed\n"));
} else if (total_len <= ssid.SSID_len) {
return -ENOMEM;
} else {
@@ -805,9 +1290,10 @@
" rssi %d", scbval.val);
command[bytes_written] = '\0';
- DHD_TRACE(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written));
+ DHD_TRACE(("wl_android_get_rssi: command result is %s (%d)\n", command, bytes_written));
return bytes_written;
}
+#endif /* AUTOMOTIVE_FEATURE */
static int wl_android_set_suspendopt(struct net_device *dev, char *command)
{
@@ -824,10 +1310,10 @@
if (ret_now != suspend_flag) {
if (!(ret = net_os_set_suspend(dev, ret_now, 1))) {
- DHD_INFO(("%s: Suspend Flag %d -> %d\n",
- __FUNCTION__, ret_now, suspend_flag));
+ DHD_INFO(("wl_android_set_suspendopt: Suspend Flag %d -> %d\n",
+ ret_now, suspend_flag));
} else {
- DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ DHD_ERROR(("wl_android_set_suspendopt: failed %d\n", ret));
}
}
@@ -846,14 +1332,15 @@
suspend_flag = 1;
if (!(ret = net_os_set_suspend(dev, suspend_flag, 0)))
- DHD_INFO(("%s: Suspend Mode %d\n", __FUNCTION__, suspend_flag));
+ DHD_INFO(("wl_android_set_suspendmode: Suspend Mode %d\n", suspend_flag));
else
- DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ DHD_ERROR(("wl_android_set_suspendmode: failed %d\n", ret));
#endif // endif
return ret;
}
+#ifdef AUTOMOTIVE_FEATURE
int wl_android_get_80211_mode(struct net_device *dev, char *command, int total_len)
{
uint8 mode[5];
@@ -864,12 +1351,13 @@
if (error)
return -1;
- DHD_INFO(("%s: mode:%s\n", __FUNCTION__, mode));
+ DHD_INFO(("wl_android_get_80211_mode: mode:%s\n", mode));
bytes_written = snprintf(command, total_len, "%s %s", CMD_80211_MODE, mode);
- DHD_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command));
+ DHD_INFO(("wl_android_get_80211_mode: command:%s EXIT\n", command));
return bytes_written;
}
+#endif /* AUTOMOTIVE_FEATURE */
extern chanspec_t
wl_chspec_driver_to_host(chanspec_t chanspec);
@@ -892,13 +1380,14 @@
return -1;
chanspec = wl_chspec_driver_to_host(chsp);
- DHD_INFO(("%s:return value of chanspec:%x\n", __FUNCTION__, chanspec));
+ DHD_INFO(("wl_android_get_80211_mode: return value of chanspec:%x\n", chanspec));
channel = chanspec & WL_CHANSPEC_CHAN_MASK;
band = chanspec & WL_CHANSPEC_BAND_MASK;
bw = chanspec & WL_CHANSPEC_BW_MASK;
- DHD_INFO(("%s:channel:%d band:%d bandwidth:%d\n", __FUNCTION__, channel, band, bw));
+ DHD_INFO(("wl_android_get_80211_mode: channel:%d band:%d bandwidth:%d\n",
+ channel, band, bw));
if (bw == WL_CHANSPEC_BW_80)
bw = WL_CH_BANDWIDTH_80MHZ;
@@ -932,12 +1421,29 @@
bytes_written = snprintf(command, total_len, "%s channel %d band %s bw %d", CMD_CHANSPEC,
channel, band == WL_CHANSPEC_BAND_5G ? "5G":"2G", bw);
- DHD_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command));
+ DHD_INFO(("wl_android_get_chanspec: command:%s EXIT\n", command));
return bytes_written;
}
+/* returns whether rsdb supported or not */
+int wl_android_get_rsdb_mode(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhd = wl_cfg80211_get_dhdp(dev);
+ int rsdb_mode = 0;
+
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ rsdb_mode = 1;
+ }
+ DHD_INFO(("wl_android_get_rsdb_mode: rsdb_mode:%d\n", rsdb_mode));
+
+ bytes_written = snprintf(command, total_len, "%d", rsdb_mode);
+ return bytes_written;
+}
+
/* returns current datarate datarate returned from firmware are in 500kbps */
+#ifdef AUTOMOTIVE_FEATURE
int wl_android_get_datarate(struct net_device *dev, char *command, int total_len)
{
int error = 0;
@@ -948,11 +1454,12 @@
if (error)
return -1;
- DHD_INFO(("%s:datarate:%d\n", __FUNCTION__, datarate));
+ DHD_INFO(("wl_android_get_datarate: datarate:%d\n", datarate));
bytes_written = snprintf(command, total_len, "%s %d", CMD_DATARATE, (datarate/2));
return bytes_written;
}
+
int wl_android_get_assoclist(struct net_device *dev, char *command, int total_len)
{
int error = 0;
@@ -963,7 +1470,7 @@
sizeof(struct ether_addr) + sizeof(uint)] = {0};
struct maclist *assoc_maclist = (struct maclist *)mac_buf;
- DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
+ DHD_TRACE(("wl_android_get_assoclist: ENTER\n"));
assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST);
@@ -984,14 +1491,64 @@
if ((len > 0) && (len < (total_len - bytes_written))) {
bytes_written += len;
} else {
- DHD_ERROR(("%s: Insufficient buffer %d, bytes_written %d\n",
- __FUNCTION__, total_len, bytes_written));
+ DHD_ERROR(("wl_android_get_assoclist: Insufficient buffer %d,"
+ " bytes_written %d\n",
+ total_len, bytes_written));
bytes_written = -1;
break;
}
}
return bytes_written;
}
+#endif /* AUTOMOTIVE_FEATURE */
+static int wl_android_get_channel_list(struct net_device *dev, char *command, int total_len)
+{
+
+ int error = 0, len = 0, i;
+ char smbuf[WLC_IOCTL_SMLEN] = {0};
+ wl_channels_in_country_t *cic;
+ char band[2];
+ char *pos = command;
+
+ cic = (wl_channels_in_country_t *)smbuf;
+
+ pos = pos + strlen(CMD_CHANNELS_IN_CC) + 1;
+
+ sscanf(pos, "%s %s", cic->country_abbrev, band);
+ DHD_INFO(("%s:country %s and mode %s \n", __FUNCTION__, cic->country_abbrev, band));
+ len = strlen(cic->country_abbrev);
+ if ((len > 3) || (len < 2)) {
+ DHD_ERROR(("%s :invalid country abbrev\n", __FUNCTION__));
+ return -1;
+ }
+
+ if (!strcmp(band, "a") || !strcmp(band, "A"))
+ cic->band = WLC_BAND_5G;
+ else if (!strcmp(band, "b") || !strcmp(band, "B"))
+ cic->band = WLC_BAND_2G;
+ else {
+ DHD_ERROR(("%s: unsupported band: \n", __FUNCTION__));
+ return -1;
+ }
+
+ cic->count = 0;
+ cic->buflen = WL_EXTRA_BUF_MAX;
+
+ error = wldev_ioctl_get(dev, WLC_GET_CHANNELS_IN_COUNTRY, cic, sizeof(smbuf));
+ if (error) {
+ DHD_ERROR(("%s :Failed to get channels \n", __FUNCTION__));
+ return -1;
+ }
+
+ if (cic->count == 0)
+ return -1;
+
+ for (i = 0; i < (cic->count); i++) {
+ pos += snprintf(pos, total_len, " %d", (cic->channel[i]));
+ }
+ return (pos - command);
+}
+
extern chanspec_t
wl_chspec_host_to_driver(chanspec_t chanspec);
static int wl_android_set_csa(struct net_device *dev, char *command)
@@ -1001,13 +1558,38 @@
wl_chan_switch_t csa_arg;
u32 chnsp = 0;
int err = 0;
+ char *str, str_chan[8];
+ uint default_bw = WL_CHANSPEC_BW_20;
+#if !defined(DISALBE_11H) && defined(DHD_NOSCAN_DURING_CSA)
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy);
+#endif // endif
- DHD_INFO(("%s: command:%s\n", __FUNCTION__, command));
+ DHD_INFO(("wl_android_set_csa: command:%s\n", command));
+
+#if !defined(DISALBE_11H) && defined(DHD_NOSCAN_DURING_CSA)
+ if (!(wdev->iftype == NL80211_IFTYPE_AP || wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+ DHD_ERROR(("%s:error csa is for only AP/AGO mode(%d)\n", __FUNCTION__,
+ wdev->iftype));
+ return -1;
+ }
+#endif // endif
+
+ /*
+ * SETCSA driver command provides support for AP/AGO to switch its channel
+ * as well as connected STAs channel. This command will send CSA frame and
+ * based on this connected STA will switch to channel which we will pass in
+ * CSA frame.
+ * Usage:
+ * > IFNAME=<group_iface_name> DRIVER SETCSA mode count channel frame_type
+ * > IFNAME=<group_iface_name> DRIVER SETCSA 0 10 1 u
+ * If no frame type is specified, frame_type=0 (Broadcast frame type)
+ */
command = (command + strlen(CMD_SET_CSA));
/* Order is mode, count channel */
if (!*++command) {
- DHD_ERROR(("%s:error missing arguments\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_csa:error missing arguments\n"));
return -1;
}
csa_arg.mode = bcm_atoi(command);
@@ -1018,7 +1600,7 @@
}
if (!*++command) {
- DHD_ERROR(("%s:error missing count\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_csa: error missing count\n"));
return -1;
}
command++;
@@ -1026,22 +1608,53 @@
csa_arg.reg = 0;
csa_arg.chspec = 0;
- command += 2;
- if (!*command) {
- DHD_ERROR(("%s:error missing channel\n", __FUNCTION__));
+
+ str = strchr(command, ' ');
+ if (str == NULL) {
+ DHD_ERROR(("wl_android_set_csa: error missing channel\n"));
return -1;
}
+ command = ++str;
- chnsp = wf_chspec_aton(command);
- if (chnsp == 0) {
- DHD_ERROR(("%s:chsp is not correct\n", __FUNCTION__));
+ str = strchr(command, ' ');
+ if (str != NULL){
+ strncpy(str_chan, command, (str-command));
+ }else {
+ strncpy(str_chan, command, strlen(command));
+ }
+
+ /* Get current chanspec to retrieve the current bandwidth */
+ error = wldev_iovar_getint(dev, "chanspec", &chnsp);
+ if (error == BCME_OK) {
+ chnsp = wl_chspec_driver_to_host(chnsp);
+ /* Use current bandwidth as default if it is not specified in cmd string */
+ default_bw = chnsp & WL_CHANSPEC_BW_MASK;
+ }
+
+ chnsp = wf_chspec_aton_ex(str_chan, default_bw);
+
+ if (chnsp == 0) {
+ DHD_ERROR(("wl_android_set_csa:chsp is not correct\n"));
return -1;
}
chnsp = wl_chspec_host_to_driver(chnsp);
csa_arg.chspec = chnsp;
+ /* csa action frame type */
+ if (str != NULL){
+ if (strcmp(++str, "u") == 0) {
+ csa_arg.frame_type = CSA_UNICAST_ACTION_FRAME;
+ } else {
+ DHD_ERROR(("%s:error: invalid frame type: %s\n",
+ __FUNCTION__, command));
+ return -1;
+ }
+ } else {
+ csa_arg.frame_type = CSA_BROADCAST_ACTION_FRAME;
+ }
+
if (chnsp & WL_CHANSPEC_BAND_5G) {
- u32 chanspec = chnsp;
+ u32 chanspec = wf_chspec_ctlchan(chnsp);
err = wldev_iovar_getint(dev, "per_chan_info", &chanspec);
if (!err) {
if ((chanspec & WL_CHAN_RADAR) || (chanspec & WL_CHAN_PASSIVE)) {
@@ -1059,15 +1672,44 @@
DHD_INFO(("non radar sensitivity\n"));
}
error = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg),
- smbuf, sizeof(smbuf), NULL);
+ smbuf, sizeof(smbuf), NULL);
if (error) {
- DHD_ERROR(("%s:set csa failed:%d\n", __FUNCTION__, error));
+ DHD_ERROR(("wl_android_set_csa:set csa failed:%d\n", error));
return -1;
}
+
+#if !defined(DISALBE_11H) && defined(DHD_NOSCAN_DURING_CSA)
+ cfg->in_csa = TRUE;
+ mod_timer(&cfg->csa_timeout, jiffies + msecs_to_jiffies(100 * (csa_arg.count+2)));
+#endif // endif
return 0;
}
static int
+wl_android_set_bcn_li_dtim(struct net_device *dev, char *command)
+{
+ int ret = 0;
+ int dtim;
+
+ dtim = *(command + strlen(CMD_SETDTIM_IN_SUSPEND) + 1) - '0';
+
+ if (dtim > (MAX_DTIM_ALLOWED_INTERVAL / MAX_DTIM_SKIP_BEACON_INTERVAL)) {
+ DHD_ERROR(("%s: failed, invalid dtim %d\n",
+ __FUNCTION__, dtim));
+ return BCME_ERROR;
+ }
+
+ if (!(ret = net_os_set_suspend_bcn_li_dtim(dev, dtim))) {
+ DHD_TRACE(("%s: SET bcn_li_dtim in suspend %d\n",
+ __FUNCTION__, dtim));
+ } else {
+ DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ }
+
+ return ret;
+}
+
+static int
wl_android_set_max_dtim(struct net_device *dev, char *command)
{
int ret = 0;
@@ -1076,15 +1718,36 @@
dtim_flag = *(command + strlen(CMD_MAXDTIM_IN_SUSPEND) + 1) - '0';
if (!(ret = net_os_set_max_dtim_enable(dev, dtim_flag))) {
- DHD_TRACE(("%s: use Max bcn_li_dtim in suspend %s\n",
- __FUNCTION__, (dtim_flag ? "Enable" : "Disable")));
+ DHD_TRACE(("wl_android_set_max_dtim: use Max bcn_li_dtim in suspend %s\n",
+ (dtim_flag ? "Enable" : "Disable")));
} else {
- DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ DHD_ERROR(("wl_android_set_max_dtim: failed %d\n", ret));
}
return ret;
}
+#ifdef DISABLE_DTIM_IN_SUSPEND
+static int
+wl_android_set_disable_dtim_in_suspend(struct net_device *dev, char *command)
+{
+ int ret = 0;
+ int dtim_flag;
+
+ dtim_flag = *(command + strlen(CMD_DISDTIM_IN_SUSPEND) + 1) - '0';
+
+ if (!(ret = net_os_set_disable_dtim_in_suspend(dev, dtim_flag))) {
+ DHD_TRACE(("wl_android_set_disable_dtim_in_suspend: "
+ "use Disable bcn_li_dtim in suspend %s\n",
+ (dtim_flag ? "Enable" : "Disable")));
+ } else {
+ DHD_ERROR(("wl_android_set_disable_dtim_in_suspend: failed %d\n", ret));
+ }
+
+ return ret;
+}
+#endif /* DISABLE_DTIM_IN_SUSPEND */
+
static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
{
uint band;
@@ -1098,7 +1761,257 @@
return bytes_written;
}
-#ifdef CUSTOMER_HW4_PRIVATE_CMD
+static int
+wl_android_set_band(struct net_device *dev, char *command)
+{
+ int error = 0;
+ uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+#ifdef WL_HOST_BAND_MGMT
+ int ret = 0;
+ if ((ret = wl_cfg80211_set_band(dev, band)) < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ /* If roam_var is unsupported, fallback to the original method */
+ WL_ERR(("WL_HOST_BAND_MGMT defined, "
+ "but roam_band iovar unsupported in the firmware\n"));
+ } else {
+ error = -1;
+ }
+ }
+ if (((ret == 0) && (band == WLC_BAND_AUTO)) || (ret == BCME_UNSUPPORTED)) {
+ /* Apply if roam_band iovar is not supported or band setting is AUTO */
+ error = wldev_set_band(dev, band);
+ }
+#else
+ error = wl_cfg80211_set_if_band(dev, band);
+#endif /* WL_HOST_BAND_MGMT */
+#ifdef ROAM_CHANNEL_CACHE
+ wl_update_roamscan_cache_by_band(dev, band);
+#endif /* ROAM_CHANNEL_CACHE */
+ return error;
+}
+
+static int wl_android_add_vendor_ie(struct net_device *dev, char *command, int total_len)
+{
+ char ie_buf[VNDR_IE_MAX_LEN];
+ char *ioctl_buf = NULL;
+ char hex[] = "XX";
+ char *pcmd = NULL;
+ int ielen = 0, datalen = 0, idx = 0, tot_len = 0;
+ vndr_ie_setbuf_t *vndr_ie = NULL;
+ s32 iecount;
+ uint32 pktflag;
+ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ s32 err = BCME_OK;
+
+ /*
+ * ADD_IE driver command provides support for addition of vendor elements
+ * to different management frames via wpa_cli
+ * Usage:
+ * Create softap/AGO
+ * wpa_cli> IFNAME=<group_iface_name> DRIVER ADD_IE <flag> <OUI> <DATA>
+ * Here Flag is 802.11 Mgmt packet flags values
+ * Beacon: 0
+ * Probe Rsp: 1
+ * Assoc Rsp: 2
+ * Auth Rsp: 4
+ * Probe Req: 8
+ * Assoc Req: 16
+ * E.g
+ * wpa_cli> IFNAME=bcm0 DRIVER ADD_IE 1 998877 1122334455667788
+ */
+ pcmd = command + strlen(CMD_ADDIE) + 1;
+ pktflag = simple_strtoul(pcmd, &pcmd, 16);
+ pcmd = pcmd + 1;
+
+ for (idx = 0; idx < DOT11_OUI_LEN; idx++) {
+ hex[0] = *pcmd++;
+ hex[1] = *pcmd++;
+ ie_buf[idx] = (uint8)simple_strtoul(hex, NULL, 16);
+ }
+ pcmd++;
+ while ((*pcmd != '\0') && (idx < VNDR_IE_MAX_LEN)) {
+ hex[0] = *pcmd++;
+ hex[1] = *pcmd++;
+ ie_buf[idx++] = (uint8)simple_strtoul(hex, NULL, 16);
+ datalen++;
+ }
+
+ tot_len = sizeof(vndr_ie_setbuf_t) + (datalen - 1);
+
+ if (tot_len > VNDR_IE_MAX_LEN) {
+ WL_ERR(("Invalid IE total length %d\n", tot_len));
+ return -ENOMEM;
+ }
+
+ vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, kflags);
+ if (!vndr_ie) {
+ WL_ERR(("IE memory alloc failed\n"));
+ return -ENOMEM;
+ }
+ /* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+ strncpy(vndr_ie->cmd, "add", VNDR_IE_CMD_LEN - 1);
+ vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+ /* Set the IE count - the buffer contains only 1 IE */
+ iecount = htod32(1);
+ memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+ /* Set packet flag to indicate the appropriate frame will contain this IE */
+ pktflag = htod32(1<<pktflag);
+ memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+ sizeof(u32));
+
+ /* Set the IE ID */
+ vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar) DOT11_MNG_PROPR_ID;
+
+ /* Set the OUI */
+ memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, &ie_buf,
+ DOT11_OUI_LEN);
+ /* Set the Data */
+ memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data,
+ &ie_buf[DOT11_OUI_LEN], datalen);
+
+ ielen = DOT11_OUI_LEN + datalen;
+ vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (uchar) ielen;
+
+ ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+ if (!ioctl_buf) {
+ WL_ERR(("ioctl memory alloc failed\n"));
+ if (vndr_ie) {
+ kfree(vndr_ie);
+ }
+ return -ENOMEM;
+ }
+ memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN); /* init the buffer */
+ err = wldev_iovar_setbuf(dev, "vndr_ie", vndr_ie, tot_len, ioctl_buf,
+ WLC_IOCTL_MEDLEN, NULL);
+
+ if (err != BCME_OK) {
+ err = -EINVAL;
+ }
+
+ if (vndr_ie) {
+ kfree(vndr_ie);
+ }
+
+ if (ioctl_buf) {
+ kfree(ioctl_buf);
+ }
+
+ return err;
+}
+
+static int wl_android_del_vendor_ie(struct net_device *dev, char *command, int total_len)
+{
+ char ie_buf[VNDR_IE_MAX_LEN];
+ char *ioctl_buf = NULL;
+ char hex[] = "XX";
+ char *pcmd = NULL;
+ int ielen = 0, datalen = 0, idx = 0, tot_len = 0;
+ vndr_ie_setbuf_t *vndr_ie = NULL;
+ s32 iecount;
+ uint32 pktflag;
+ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ s32 err = BCME_OK;
+
+ /*
+ * DEL_IE driver command provides support for deletoon of vendor elements
+ * from different management frames via wpa_cli
+ * Usage:
+ * Create softap/AGO
+ * wpa_cli> IFNAME=<group_iface_name> DRIVER DEL_IE <flag> <OUI> <DATA>
+ * Here Flag is 802.11 Mgmt packet flags values
+ * Beacon: 1
+ * Probe Rsp: 2
+ * Assoc Rsp: 4
+ * Auth Rsp: 8
+ * Probe Req: 16
+ * Assoc Req: 32
+ * E.g
+ * wpa_cli> IFNAME=bcm0 DRIVER DEL_IE 1 998877 1122334455667788
+ */
+ pcmd = command + strlen(CMD_DELIE) + 1;
+
+ pktflag = simple_strtoul(pcmd, &pcmd, 16);
+ pcmd = pcmd + 1;
+
+ for (idx = 0; idx < DOT11_OUI_LEN; idx++) {
+ hex[0] = *pcmd++;
+ hex[1] = *pcmd++;
+ ie_buf[idx] = (uint8)simple_strtoul(hex, NULL, 16);
+ }
+ pcmd++;
+ while ((*pcmd != '\0') && (idx < VNDR_IE_MAX_LEN)) {
+ hex[0] = *pcmd++;
+ hex[1] = *pcmd++;
+ ie_buf[idx++] = (uint8)simple_strtoul(hex, NULL, 16);
+ datalen++;
+ }
+
+ tot_len = sizeof(vndr_ie_setbuf_t) + (datalen - 1);
+ if (tot_len > VNDR_IE_MAX_LEN) {
+ WL_ERR(("Invalid IE total length %d\n", tot_len));
+ return -ENOMEM;
+ }
+ vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, kflags);
+ if (!vndr_ie) {
+ WL_ERR(("IE memory alloc failed\n"));
+ return -ENOMEM;
+ }
+ /* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+ strncpy(vndr_ie->cmd, "del", VNDR_IE_CMD_LEN - 1);
+ vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+ /* Set the IE count - the buffer contains only 1 IE */
+ iecount = htod32(1);
+ memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+ /* Set packet flag to indicate the appropriate frame will contain this IE */
+ pktflag = htod32(1<<(pktflag-1));
+ memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+ sizeof(u32));
+
+ /* Set the IE ID */
+ vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar) DOT11_MNG_PROPR_ID;
+
+ /* Set the OUI */
+ memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, &ie_buf,
+ DOT11_OUI_LEN);
+
+ /* Set the Data */
+ memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data,
+ &ie_buf[DOT11_OUI_LEN], datalen);
+
+ ielen = DOT11_OUI_LEN + datalen;
+ vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (uchar) ielen;
+
+ ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+ if (!ioctl_buf) {
+ WL_ERR(("ioctl memory alloc failed\n"));
+ if (vndr_ie) {
+ kfree(vndr_ie);
+ }
+ return -ENOMEM;
+ }
+ memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN); /* init the buffer */
+ err = wldev_iovar_setbuf(dev, "vndr_ie", vndr_ie, tot_len, ioctl_buf,
+ WLC_IOCTL_MEDLEN, NULL);
+
+ if (err != BCME_OK) {
+ err = -EINVAL;
+ }
+
+ if (vndr_ie) {
+ kfree(vndr_ie);
+ }
+
+ if (ioctl_buf) {
+ kfree(ioctl_buf);
+ }
+ return err;
+}
+
+#if defined(CUSTOMER_HW4_PRIVATE_CMD) || defined(IGUANA_LEGACY_CHIPS)
#ifdef ROAM_API
static bool wl_android_check_wbtext(struct net_device *dev)
{
@@ -1144,9 +2057,11 @@
uint16 band = 0;
int chsp = {0};
chanspec_t chanspec;
+#ifdef WBTEXT
int i;
- struct wl_roam_prof_band_v2 rp;
- char smbuf[WLC_IOCTL_SMLEN];
+ wl_roamprof_band_t rp;
+ uint8 roam_prof_ver = 0, roam_prof_size = 0;
+#endif /* WBTEXT */
error = wldev_iovar_getint(dev, "chanspec", &chsp);
if (error != BCME_OK) {
@@ -1162,22 +2077,39 @@
band = WLC_BAND_2G;
if (wl_android_check_wbtext(dev)) {
- rp.ver = WL_MAX_ROAM_PROF_VER;
- rp.len = 0;
- rp.band = band;
- error = wldev_iovar_getbuf(dev, "roam_prof", &rp, sizeof(rp),
- smbuf, sizeof(smbuf), NULL);
- if (error != BCME_OK) {
- WL_ERR(("failed to get roam profile (%d)\n", error));
- return BCME_ERROR;
+#ifdef WBTEXT
+ memset_s(&rp, sizeof(rp), 0, sizeof(rp));
+ if ((error = wlc_wbtext_get_roam_prof(dev, &rp, band, &roam_prof_ver,
+ &roam_prof_size))) {
+ WL_ERR(("Getting roam_profile failed with err=%d \n", error));
+ return -EINVAL;
}
- memcpy(&rp, smbuf, sizeof(struct wl_roam_prof_band_v2));
- for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
- if (rp.roam_prof[i].channel_usage == 0) {
- roam_trigger[0] = rp.roam_prof[i].roam_trigger;
- break;
+ switch (roam_prof_ver) {
+ case WL_ROAM_PROF_VER_1:
+ {
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ if (rp.v2.roam_prof[i].channel_usage == 0) {
+ roam_trigger[0] = rp.v2.roam_prof[i].roam_trigger;
+ break;
+ }
+ }
}
+ break;
+ case WL_ROAM_PROF_VER_2:
+ {
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ if (rp.v3.roam_prof[i].channel_usage == 0) {
+ roam_trigger[0] = rp.v3.roam_prof[i].roam_trigger;
+ break;
+ }
+ }
+ }
+ break;
+ default:
+ WL_ERR(("bad version = %d \n", roam_prof_ver));
+ return BCME_VERSION;
}
+#endif /* WBTEXT */
if (roam_trigger[0] == 0) {
WL_ERR(("roam trigger was not set properly\n"));
return BCME_ERROR;
@@ -1299,6 +2231,7 @@
return bytes_written;
}
+#ifdef AUTOMOTIVE_FEATURE
int wl_android_set_country_rev(
struct net_device *dev, char* command)
{
@@ -1308,7 +2241,16 @@
char smbuf[WLC_IOCTL_SMLEN];
int rev = 0;
- memset(country_code, 0, sizeof(country_code));
+ /*
+ * SETCOUNTRYREV driver command provides support setting the country.
+ * e.g US, DE, JP etc via supplicant. Once set, band and channels
+ * too automatically gets updated based on the country.
+ * Usage:
+ * > IFNAME=wlan0 DRIVER SETCOUNTRYREV JP
+ * OK
+ */
+
+ bzero(country_code, sizeof(country_code));
sscanf(command+sizeof("SETCOUNTRYREV"), "%3s %10d", country_code, &rev);
WL_TRACE(("country_code = %s, rev = %d\n", country_code, rev));
@@ -1320,12 +2262,12 @@
sizeof(cspec), smbuf, sizeof(smbuf), NULL);
if (error) {
- DHD_ERROR(("%s: set country '%s/%d' failed code %d\n",
- __FUNCTION__, cspec.ccode, cspec.rev, error));
+ DHD_ERROR(("wl_android_set_country_rev: set country '%s/%d' failed code %d\n",
+ cspec.ccode, cspec.rev, error));
} else {
dhd_bus_country_set(dev, &cspec, true);
- DHD_INFO(("%s: set country '%s/%d'\n",
- __FUNCTION__, cspec.ccode, cspec.rev));
+ DHD_INFO(("wl_android_set_country_rev: set country '%s/%d'\n",
+ cspec.ccode, cspec.rev));
}
return error;
@@ -1339,17 +2281,25 @@
char smbuf[WLC_IOCTL_SMLEN];
wl_country_t cspec;
+ /*
+ * GETCOUNTRYREV driver command provides support getting the country.
+ * e.g US, DE, JP etc via supplicant.
+ * Usage:
+ * > IFNAME=wlan0 DRIVER GETCOUNTRYREV
+ * GETCOUNTRYREV JP 0
+ */
+
error = wldev_iovar_getbuf(dev, "country", NULL, 0, smbuf,
sizeof(smbuf), NULL);
if (error) {
- DHD_ERROR(("%s: get country failed code %d\n",
- __FUNCTION__, error));
+ DHD_ERROR(("wl_android_get_country_rev: get country failed code %d\n",
+ error));
return -1;
} else {
memcpy(&cspec, smbuf, sizeof(cspec));
- DHD_INFO(("%s: get country '%c%c %d'\n",
- __FUNCTION__, cspec.ccode[0], cspec.ccode[1], cspec.rev));
+ DHD_INFO(("wl_android_get_country_rev: get country '%c%c %d'\n",
+ cspec.ccode[0], cspec.ccode[1], cspec.rev));
}
bytes_written = snprintf(command, total_len, "%s %c%c %d",
@@ -1357,6 +2307,7 @@
return bytes_written;
}
+#endif /* AUTOMOTIVE_FEATURE */
#endif /* ROAM_API */
#ifdef WES_SUPPORT
@@ -1368,7 +2319,8 @@
error = get_roamscan_mode(dev, &mode);
if (error) {
- DHD_ERROR(("%s: Failed to get Scan Control, error = %d\n", __FUNCTION__, error));
+ DHD_ERROR(("wl_android_get_roam_scan_control: Failed to get Scan Control,"
+ " error = %d\n", error));
return -1;
}
@@ -1383,14 +2335,15 @@
int mode = 0;
if (sscanf(command, "%*s %d", &mode) != 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_roam_scan_control: Failed to get Parameter\n"));
return -1;
}
error = set_roamscan_mode(dev, mode);
if (error) {
- DHD_ERROR(("%s: Failed to set Scan Control %d, error = %d\n",
- __FUNCTION__, mode, error));
+ DHD_ERROR(("wl_android_set_roam_scan_control: Failed to set Scan Control %d,"
+ " error = %d\n",
+ mode, error));
return -1;
}
@@ -1405,7 +2358,7 @@
int i = 0;
int buf_avail, len;
- channel_cnt = get_roamscan_channel_list(dev, channels);
+ channel_cnt = get_roamscan_channel_list(dev, channels, MAX_ROAM_CHANNEL);
bytes_written = snprintf(command, total_len, "%s %d",
CMD_GETROAMSCANCHANNELS, channel_cnt);
buf_avail = total_len - bytes_written;
@@ -1413,7 +2366,9 @@
/* A return value of 'buf_avail' or more means that the output was truncated */
len = snprintf(command + bytes_written, buf_avail, " %d", channels[i]);
if (len >= buf_avail) {
- WL_ERR(("%s: Insufficient memory, %d bytes\n", __FUNCTION__, total_len));
+ WL_ERR(("wl_android_get_roam_scan_channels: Insufficient memory,"
+ " %d bytes\n",
+ total_len));
bytes_written = -1;
break;
}
@@ -1421,7 +2376,7 @@
buf_avail -= len;
bytes_written += len;
}
- WL_INFORM(("%s: %s\n", __FUNCTION__, command));
+ WL_INFORM(("wl_android_get_roam_scan_channels: %s\n", command));
return bytes_written;
}
@@ -1432,8 +2387,9 @@
int get_ioctl_version = wl_cfg80211_get_ioctl_version();
error = set_roamscan_channel_list(dev, p[0], &p[1], get_ioctl_version);
if (error) {
- DHD_ERROR(("%s: Failed to set Scan Channels %d, error = %d\n",
- __FUNCTION__, p[0], error));
+ DHD_ERROR(("wl_android_set_roam_scan_channels: Failed to set Scan Channels %d,"
+ " error = %d\n",
+ p[0], error));
return -1;
}
@@ -1448,8 +2404,9 @@
error = wldev_ioctl_get(dev, WLC_GET_SCAN_CHANNEL_TIME, &time, sizeof(time));
if (error) {
- DHD_ERROR(("%s: Failed to get Scan Channel Time, error = %d\n",
- __FUNCTION__, error));
+ DHD_ERROR(("wl_android_get_scan_channel_time: Failed to get Scan Channel Time,"
+ " error = %d\n",
+ error));
return -1;
}
@@ -1464,7 +2421,7 @@
int time = 0;
if (sscanf(command, "%*s %d", &time) != 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_scan_channel_time: Failed to get Parameter\n"));
return -1;
}
#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
@@ -1472,8 +2429,9 @@
error = wldev_ioctl_set(dev, WLC_SET_SCAN_CHANNEL_TIME, &time, sizeof(time));
#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
if (error) {
- DHD_ERROR(("%s: Failed to set Scan Channel Time %d, error = %d\n",
- __FUNCTION__, time, error));
+ DHD_ERROR(("wl_android_set_scan_channel_time: Failed to set Scan Channel Time %d,"
+ " error = %d\n",
+ time, error));
return -1;
}
@@ -1489,8 +2447,9 @@
error = wldev_ioctl_get(dev, WLC_GET_SCAN_UNASSOC_TIME, &time, sizeof(time));
if (error) {
- DHD_ERROR(("%s: Failed to get Scan Unassoc Time, error = %d\n",
- __FUNCTION__, error));
+ DHD_ERROR(("wl_android_get_scan_unassoc_time: Failed to get Scan Unassoc"
+ " Time, error = %d\n",
+ error));
return -1;
}
@@ -1506,7 +2465,7 @@
int time = 0;
if (sscanf(command, "%*s %d", &time) != 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_scan_unassoc_time: Failed to get Parameter\n"));
return -1;
}
#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
@@ -1514,8 +2473,9 @@
error = wldev_ioctl_set(dev, WLC_SET_SCAN_UNASSOC_TIME, &time, sizeof(time));
#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
if (error) {
- DHD_ERROR(("%s: Failed to set Scan Unassoc Time %d, error = %d\n",
- __FUNCTION__, time, error));
+ DHD_ERROR(("wl_android_set_scan_unassoc_time: Failed to set Scan Unassoc Time %d,"
+ " error = %d\n",
+ time, error));
return -1;
}
@@ -1531,8 +2491,9 @@
error = wldev_ioctl_get(dev, WLC_GET_SCAN_PASSIVE_TIME, &time, sizeof(time));
if (error) {
- DHD_ERROR(("%s: Failed to get Scan Passive Time, error = %d\n",
- __FUNCTION__, error));
+ DHD_ERROR(("wl_android_get_scan_passive_time: Failed to get Scan Passive Time,"
+ " error = %d\n",
+ error));
return -1;
}
@@ -1548,7 +2509,7 @@
int time = 0;
if (sscanf(command, "%*s %d", &time) != 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_scan_passive_time: Failed to get Parameter\n"));
return -1;
}
#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
@@ -1556,8 +2517,9 @@
error = wldev_ioctl_set(dev, WLC_SET_SCAN_PASSIVE_TIME, &time, sizeof(time));
#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
if (error) {
- DHD_ERROR(("%s: Failed to set Scan Passive Time %d, error = %d\n",
- __FUNCTION__, time, error));
+ DHD_ERROR(("wl_android_set_scan_passive_time: Failed to set Scan Passive Time %d,"
+ " error = %d\n",
+ time, error));
return -1;
}
@@ -1587,7 +2549,7 @@
int time = 0;
if (sscanf(command, "%*s %d", &time) != 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_scan_home_time: Failed to get Parameter\n"));
return -1;
}
#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
@@ -1595,8 +2557,9 @@
error = wldev_ioctl_set(dev, WLC_SET_SCAN_HOME_TIME, &time, sizeof(time));
#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
if (error) {
- DHD_ERROR(("%s: Failed to set Scan Home Time %d, error = %d\n",
- __FUNCTION__, time, error));
+ DHD_ERROR(("wl_android_set_scan_home_time: Failed to set Scan Home Time %d,"
+ " error = %d\n",
+ time, error));
return -1;
}
@@ -1611,8 +2574,9 @@
error = wldev_iovar_getint(dev, "scan_home_away_time", &time);
if (error) {
- DHD_ERROR(("%s: Failed to get Scan Home Away Time, error = %d\n",
- __FUNCTION__, error));
+ DHD_ERROR(("wl_android_get_scan_home_away_time: Failed to get Scan Home Away Time,"
+ " error = %d\n",
+ error));
return -1;
}
@@ -1627,7 +2591,7 @@
int time = 0;
if (sscanf(command, "%*s %d", &time) != 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_scan_home_away_time: Failed to get Parameter\n"));
return -1;
}
#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
@@ -1635,8 +2599,9 @@
error = wldev_iovar_setint(dev, "scan_home_away_time", time);
#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
if (error) {
- DHD_ERROR(("%s: Failed to set Scan Home Away Time %d, error = %d\n",
- __FUNCTION__, time, error));
+ DHD_ERROR(("wl_android_set_scan_home_away_time: Failed to set Scan Home Away"
+ " Time %d, error = %d\n",
+ time, error));
return -1;
}
@@ -1651,7 +2616,8 @@
error = wldev_ioctl_get(dev, WLC_GET_SCAN_NPROBES, &num, sizeof(num));
if (error) {
- DHD_ERROR(("%s: Failed to get Scan NProbes, error = %d\n", __FUNCTION__, error));
+ DHD_ERROR(("wl_android_get_scan_nprobes: Failed to get Scan NProbes,"
+ " error = %d\n", error));
return -1;
}
@@ -1666,14 +2632,15 @@
int num = 0;
if (sscanf(command, "%*s %d", &num) != 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_scan_nprobes: Failed to get Parameter\n"));
return -1;
}
error = wldev_ioctl_set(dev, WLC_SET_SCAN_NPROBES, &num, sizeof(num));
if (error) {
- DHD_ERROR(("%s: Failed to set Scan NProbes %d, error = %d\n",
- __FUNCTION__, num, error));
+ DHD_ERROR(("wl_android_set_scan_nprobes: Failed to set Scan NProbes %d,"
+ " error = %d\n",
+ num, error));
return -1;
}
@@ -1689,7 +2656,8 @@
error = wldev_iovar_getint(dev, "scan_passive_time", &scan_passive_time);
if (error) {
- DHD_ERROR(("%s: Failed to get Passive Time, error = %d\n", __FUNCTION__, error));
+ DHD_ERROR(("wl_android_get_scan_dfs_channel_mode: Failed to get Passive Time,"
+ " error = %d\n", error));
return -1;
}
@@ -1711,7 +2679,7 @@
int scan_passive_time = 0;
if (sscanf(command, "%*s %d", &mode) != 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_scan_dfs_channel_mode: Failed to get Parameter\n"));
return -1;
}
@@ -1720,14 +2688,16 @@
} else if (mode == 0) {
scan_passive_time = 0;
} else {
- DHD_ERROR(("%s: Failed to set Scan DFS channel mode %d, error = %d\n",
- __FUNCTION__, mode, error));
+ DHD_ERROR(("wl_android_set_scan_dfs_channel_mode: Failed to set Scan DFS"
+ " channel mode %d, error = %d\n",
+ mode, error));
return -1;
}
error = wldev_iovar_setint(dev, "scan_passive_time", scan_passive_time);
if (error) {
- DHD_ERROR(("%s: Failed to set Scan Passive Time %d, error = %d\n",
- __FUNCTION__, scan_passive_time, error));
+ DHD_ERROR(("wl_android_set_scan_dfs_channel_mode: Failed to set Scan"
+ " Passive Time %d, error = %d\n",
+ scan_passive_time, error));
return -1;
}
@@ -1754,10 +2724,10 @@
pcmd = command + strlen(CMD_SETJOINPREFER) + 1;
total_len_left = strlen(pcmd);
- memset(buf, 0, sizeof(buf));
+ bzero(buf, sizeof(buf));
if (total_len_left != JOINPREFFER_BUF_SIZE << 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_join_prefer: Failed to get Parameter\n"));
return BCME_ERROR;
}
@@ -1771,7 +2741,7 @@
#ifdef WBTEXT
/* No coexistance between 11kv and join pref */
if (wl_android_check_wbtext(dev)) {
- memset(commandp, 0, sizeof(commandp));
+ bzero(commandp, sizeof(commandp));
if (memcmp(buf, clear, sizeof(buf)) == 0) {
snprintf(commandp, WLC_IOCTL_SMLEN, "WBTEXT_ENABLE 1");
} else {
@@ -1807,49 +2777,52 @@
if (total_len <
(strlen(CMD_SENDACTIONFRAME) + 1 + sizeof(android_wifi_af_params_t))) {
- DHD_ERROR(("%s: Invalid parameters \n", __FUNCTION__));
+ DHD_ERROR(("wl_android_send_action_frame: Invalid parameters \n"));
goto send_action_frame_out;
}
params = (android_wifi_af_params_t *)(command + strlen(CMD_SENDACTIONFRAME) + 1);
if ((uint16)params->len > ANDROID_WIFI_ACTION_FRAME_SIZE) {
- DHD_ERROR(("%s: Requested action frame len was out of range(%d)\n",
- __FUNCTION__, params->len));
+ DHD_ERROR(("wl_android_send_action_frame: Requested action frame len"
+ " was out of range(%d)\n",
+ params->len));
goto send_action_frame_out;
}
smbuf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MAXLEN);
if (smbuf == NULL) {
- DHD_ERROR(("%s: failed to allocated memory %d bytes\n",
- __FUNCTION__, WLC_IOCTL_MAXLEN));
+ DHD_ERROR(("wl_android_send_action_frame: failed to allocated memory %d bytes\n",
+ WLC_IOCTL_MAXLEN));
goto send_action_frame_out;
}
af_params = (wl_af_params_t *)MALLOCZ(cfg->osh, WL_WIFI_AF_PARAMS_SIZE);
if (af_params == NULL) {
- DHD_ERROR(("%s: unable to allocate frame\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_send_action_frame: unable to allocate frame\n"));
goto send_action_frame_out;
}
- memset(&tmp_bssid, 0, ETHER_ADDR_LEN);
+ bzero(&tmp_bssid, ETHER_ADDR_LEN);
if (bcm_ether_atoe((const char *)params->bssid, (struct ether_addr *)&tmp_bssid) == 0) {
- memset(&tmp_bssid, 0, ETHER_ADDR_LEN);
+ bzero(&tmp_bssid, ETHER_ADDR_LEN);
error = wldev_ioctl_get(dev, WLC_GET_BSSID, &tmp_bssid, ETHER_ADDR_LEN);
if (error) {
- memset(&tmp_bssid, 0, ETHER_ADDR_LEN);
- DHD_ERROR(("%s: failed to get bssid, error=%d\n", __FUNCTION__, error));
+ bzero(&tmp_bssid, ETHER_ADDR_LEN);
+ DHD_ERROR(("wl_android_send_action_frame: failed to get bssid,"
+ " error=%d\n", error));
goto send_action_frame_out;
}
}
if (params->channel < 0) {
struct channel_info ci;
- memset(&ci, 0, sizeof(ci));
+ bzero(&ci, sizeof(ci));
error = wldev_ioctl_get(dev, WLC_GET_CHANNEL, &ci, sizeof(ci));
if (error) {
- DHD_ERROR(("%s: failed to get channel, error=%d\n", __FUNCTION__, error));
+ DHD_ERROR(("wl_android_send_action_frame: failed to get channel,"
+ " error=%d\n", error));
goto send_action_frame_out;
}
@@ -1872,7 +2845,8 @@
error = wldev_iovar_setbuf(dev, "actframe", af_params,
sizeof(wl_af_params_t), smbuf, WLC_IOCTL_MAXLEN, NULL);
if (error) {
- DHD_ERROR(("%s: failed to set action frame, error=%d\n", __FUNCTION__, error));
+ DHD_ERROR(("wl_android_send_action_frame: failed to set action frame,"
+ " error=%d\n", error));
}
send_action_frame_out:
@@ -1901,21 +2875,21 @@
if (total_len <
(strlen(CMD_REASSOC) + 1 + sizeof(android_wifi_reassoc_params_t))) {
- DHD_ERROR(("%s: Invalid parameters \n", __FUNCTION__));
+ DHD_ERROR(("wl_android_reassoc: Invalid parameters \n"));
return -1;
}
params = (android_wifi_reassoc_params_t *)(command + strlen(CMD_REASSOC) + 1);
- memset(&reassoc_params, 0, WL_REASSOC_PARAMS_FIXED_SIZE);
+ bzero(&reassoc_params, WL_REASSOC_PARAMS_FIXED_SIZE);
if (bcm_ether_atoe((const char *)params->bssid,
(struct ether_addr *)&reassoc_params.bssid) == 0) {
- DHD_ERROR(("%s: Invalid bssid \n", __FUNCTION__));
+ DHD_ERROR(("wl_android_reassoc: Invalid bssid \n"));
return -1;
}
if (params->channel < 0) {
- DHD_ERROR(("%s: Invalid Channel \n", __FUNCTION__));
+ DHD_ERROR(("wl_android_reassoc: Invalid Channel \n"));
return -1;
}
@@ -1942,7 +2916,7 @@
error = wldev_ioctl_set(dev, WLC_REASSOC, &reassoc_params, params_size);
if (error) {
- DHD_ERROR(("%s: failed to reassoc, error=%d\n", __FUNCTION__, error));
+ DHD_ERROR(("wl_android_reassoc: failed to reassoc, error=%d\n", error));
return -1;
}
return 0;
@@ -1969,21 +2943,21 @@
#endif /* WBTEXT */
if (sscanf(command, "%*s %d", &mode) != 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_wes_mode: Failed to get Parameter\n"));
return -1;
}
error = wl_cfg80211_set_wes_mode(mode);
if (error) {
- DHD_ERROR(("%s: Failed to set WES Mode %d, error = %d\n",
- __FUNCTION__, mode, error));
+ DHD_ERROR(("wl_android_set_wes_mode: Failed to set WES Mode %d, error = %d\n",
+ mode, error));
return -1;
}
#ifdef WBTEXT
/* No coexistance between 11kv and FMC */
if (wl_android_check_wbtext(dev)) {
- memset(commandp, 0, sizeof(commandp));
+ bzero(commandp, sizeof(commandp));
if (!mode) {
snprintf(commandp, WLC_IOCTL_SMLEN, "WBTEXT_ENABLE 1");
} else {
@@ -2007,7 +2981,7 @@
error = wldev_iovar_getint(dev, "okc_enable", &mode);
if (error) {
- DHD_ERROR(("%s: Failed to get OKC Mode, error = %d\n", __FUNCTION__, error));
+ DHD_ERROR(("wl_android_get_okc_mode: Failed to get OKC Mode, error = %d\n", error));
return -1;
}
@@ -2022,14 +2996,14 @@
int mode = 0;
if (sscanf(command, "%*s %d", &mode) != 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_okc_mode: Failed to get Parameter\n"));
return -1;
}
error = wldev_iovar_setint(dev, "okc_enable", mode);
if (error) {
- DHD_ERROR(("%s: Failed to set OKC Mode %d, error = %d\n",
- __FUNCTION__, mode, error));
+ DHD_ERROR(("wl_android_set_okc_mode: Failed to set OKC Mode %d, error = %d\n",
+ mode, error));
return -1;
}
@@ -2041,15 +3015,24 @@
uchar pmk[33];
int error = 0;
char smbuf[WLC_IOCTL_SMLEN];
+ dhd_pub_t *dhdp;
#ifdef OKC_DEBUG
int i = 0;
#endif // endif
if (total_len < (strlen("SET_PMK ") + 32)) {
- DHD_ERROR(("%s: Invalid argument\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_pmk: Invalid argument\n"));
return -1;
}
+
+ dhdp = wl_cfg80211_get_dhdp(dev);
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return -1;
+ }
+
bzero(pmk, sizeof(pmk));
+ DHD_STATLOG_CTRL(dhdp, ST(INSTALL_OKC_PMK), dhd_net2idx(dhdp->info, dev), 0);
memcpy((char *)pmk, command + strlen("SET_PMK "), 32);
error = wldev_iovar_setbuf(dev, "okc_info_pmk", pmk, 32, smbuf, sizeof(smbuf), NULL);
if (error) {
@@ -2081,6 +3064,44 @@
return error;
}
#endif /* WES_SUPPORT */
+
+#ifdef SUPPORT_RESTORE_SCAN_PARAMS
+ static int
+wl_android_restore_scan_params(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ uint error_cnt = 0;
+ int cnt = 0;
+ char restore_command[WLC_IOCTL_SMLEN];
+
+ while (strlen(restore_params[cnt].command) > 0 && restore_params[cnt].cmd_handler) {
+ sprintf(restore_command, "%s %d", restore_params[cnt].command,
+ restore_params[cnt].parameter);
+ if (restore_params[cnt].cmd_type == RESTORE_TYPE_PRIV_CMD) {
+ error = restore_params[cnt].cmd_handler(dev, restore_command);
+ } else if (restore_params[cnt].cmd_type == RESTORE_TYPE_PRIV_CMD_WITH_LEN) {
+ error = restore_params[cnt].cmd_handler_w_len(dev,
+ restore_command, total_len);
+ } else {
+ DHD_ERROR(("Unknown restore command handler\n"));
+ error = -1;
+ }
+ if (error) {
+ DHD_ERROR(("Failed to restore scan parameters %s, error : %d\n",
+ restore_command, error));
+ error_cnt++;
+ }
+ cnt++;
+ }
+ if (error_cnt > 0) {
+ DHD_ERROR(("Got %d error(s) while restoring scan parameters\n",
+ error_cnt));
+ error = -1;
+ }
+ return error;
+}
+#endif /* SUPPORT_RESTORE_SCAN_PARAMS */
+
#ifdef WLTDLS
int wl_android_tdls_reset(struct net_device *dev)
{
@@ -2098,6 +3119,253 @@
return 0;
}
#endif /* WLTDLS */
+
+#ifdef CONFIG_SILENT_ROAM
+int
+wl_android_sroam_turn_on(struct net_device *dev, const char* turn)
+{
+ int ret = BCME_OK, sroam_mode;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ sroam_mode = bcm_atoi(turn);
+ dhdp->sroam_turn_on = sroam_mode;
+ DHD_INFO(("%s Silent mode %s\n", __FUNCTION__,
+ sroam_mode ? "enable" : "disable"));
+
+ if (!sroam_mode) {
+ ret = dhd_sroam_set_mon(dhdp, FALSE);
+ if (ret) {
+ DHD_ERROR(("%s Failed to Set sroam %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+
+ return ret;
+}
+
+int
+wl_android_sroam_set_info(struct net_device *dev, char *data,
+ char *command, int total_len)
+{
+ int ret = BCME_OK;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ size_t slen = strlen(data);
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ wlc_sroam_t *psroam;
+ wlc_sroam_info_t *sroam;
+ uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
+
+ data[slen] = '\0';
+ psroam = (wlc_sroam_t *)MALLOCZ(dhdp->osh, sroamlen);
+ if (!psroam) {
+ WL_ERR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+
+ psroam->ver = WLC_SILENT_ROAM_CUR_VER;
+ psroam->len = sizeof(*sroam);
+ sroam = (wlc_sroam_info_t *)psroam->data;
+
+ sroam->sroam_on = FALSE;
+ if (*data && *data != '\0') {
+ sroam->sroam_min_rssi = simple_strtol(data, &data, 10);
+ WL_DBG(("1.Minimum RSSI %d\n", sroam->sroam_min_rssi));
+ data++;
+ }
+ if (*data && *data != '\0') {
+ sroam->sroam_rssi_range = simple_strtol(data, &data, 10);
+ WL_DBG(("2.RSSI Range %d\n", sroam->sroam_rssi_range));
+ data++;
+ }
+ if (*data && *data != '\0') {
+ sroam->sroam_score_delta = simple_strtol(data, &data, 10);
+ WL_DBG(("3.Score Delta %d\n", sroam->sroam_score_delta));
+ data++;
+ }
+ if (*data && *data != '\0') {
+ sroam->sroam_period_time = simple_strtol(data, &data, 10);
+ WL_DBG(("4.Sroam period %d\n", sroam->sroam_period_time));
+ data++;
+ }
+ if (*data && *data != '\0') {
+ sroam->sroam_band = simple_strtol(data, &data, 10);
+ WL_DBG(("5.Sroam Band %d\n", sroam->sroam_band));
+ data++;
+ }
+ if (*data && *data != '\0') {
+ sroam->sroam_inact_cnt = simple_strtol(data, &data, 10);
+ WL_DBG(("6.Inactivity Count %d\n", sroam->sroam_inact_cnt));
+ data++;
+ }
+
+ if (*data != '\0') {
+ ret = BCME_BADARG;
+ goto done;
+ }
+
+ ret = wldev_iovar_setbuf(dev, "sroam", psroam, sroamlen, ioctl_buf,
+ sizeof(ioctl_buf), NULL);
+ if (ret) {
+ WL_ERR(("Failed to set silent roam info(%d)\n", ret));
+ goto done;
+ }
+done:
+ if (psroam) {
+ MFREE(dhdp->osh, psroam, sroamlen);
+ }
+
+ return ret;
+}
+
+int
+wl_android_sroam_get_info(struct net_device *dev, char *command, int total_len)
+{
+ int ret = BCME_OK;
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ wlc_sroam_t *psroam;
+ wlc_sroam_info_t *sroam;
+ uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
+
+ psroam = (wlc_sroam_t *)MALLOCZ(dhdp->osh, sroamlen);
+ if (!psroam) {
+ WL_ERR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+
+ ret = wldev_iovar_getbuf(dev, "sroam", NULL, 0, psroam, sroamlen, NULL);
+ if (ret) {
+ WL_ERR(("Failed to get silent roam info(%d)\n", ret));
+ goto done;
+ }
+
+ if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) {
+ ret = BCME_VERSION;
+ WL_ERR(("Ver(%d:%d). mismatch silent roam info(%d)\n",
+ psroam->ver, WLC_SILENT_ROAM_CUR_VER, ret));
+ goto done;
+ }
+
+ sroam = (wlc_sroam_info_t *)psroam->data;
+ bytes_written = snprintf(command, total_len,
+ "%s %d %d %d %d %d %d %d\n",
+ CMD_SROAM_GET_INFO, sroam->sroam_on, sroam->sroam_min_rssi, sroam->sroam_rssi_range,
+ sroam->sroam_score_delta, sroam->sroam_period_time, sroam->sroam_band,
+ sroam->sroam_inact_cnt);
+ ret = bytes_written;
+
+ WL_DBG(("%s", command));
+done:
+ if (psroam) {
+ MFREE(dhdp->osh, psroam, sroamlen);
+ }
+
+ return ret;
+}
+#endif /* CONFIG_SILENT_ROAM */
+
+static int
+get_int_bytes(uchar *oui_str, uchar *oui, int len)
+{
+ int idx;
+ uchar val;
+ uchar *src, *dest;
+ char hexstr[3];
+
+ if ((oui_str == NULL) || (oui == NULL) || (len == 0)) {
+ return BCME_BADARG;
+ }
+ src = oui_str;
+ dest = oui;
+
+ for (idx = 0; idx < len; idx++) {
+ if (*src == '\0') {
+ *dest = '\0';
+ break;
+ }
+ hexstr[0] = src[0];
+ hexstr[1] = src[1];
+ hexstr[2] = '\0';
+
+ val = (uchar)bcm_strtoul(hexstr, NULL, 16);
+ if (val == (uchar)-1) {
+ return BCME_ERROR;
+ }
+ *dest++ = val;
+ src += 2;
+ }
+ return BCME_OK;
+}
+
+#define TAG_BYTE 0
+static int
+wl_android_set_disconnect_ies(struct net_device *dev, char *command)
+{
+ int cmd_prefix_len = 0;
+ char ie_len = 0;
+ int hex_ie_len = 0;
+ int total_len = 0;
+ int max_len = 0;
+ int cmd_len = 0;
+ uchar disassoc_ie[VNDR_IE_MAX_LEN] = {0};
+ s32 bssidx = 0;
+ struct bcm_cfg80211 *cfg = NULL;
+ s32 ret = 0;
+ cfg = wl_get_cfg(dev);
+
+ cmd_prefix_len = strlen("SET_DISCONNECT_IES ");
+ cmd_len = strlen(command);
+ /*
+ * <CMD> + <IES in HEX format>
+ * IES in hex format has to be in following format
+ * First byte = Tag, Second Byte = len and rest of
+ * bytes will be value. For ex: SET_DISCONNECT_IES dd0411223344
+ * tag = dd, len =04. Total IEs len = len + 2
+ */
+ WL_DBG(("cmd recv = %s\n", command));
+ max_len = MIN(cmd_len, VNDR_IE_MAX_LEN);
+ /* Validate IEs len */
+ get_int_bytes(&command[cmd_prefix_len + 2], &ie_len, 1);
+ WL_INFORM_MEM(("ie_len = %d \n", ie_len));
+ if (ie_len <= 0 || ie_len > max_len) {
+ ret = BCME_BADLEN;
+ return ret;
+ }
+
+ /* Total len in hex is sum of double binary len, tag and len byte */
+ hex_ie_len = (ie_len * 2) + 4;
+ total_len = cmd_prefix_len + hex_ie_len;
+ if (command[total_len] != '\0' || (cmd_len != total_len)) {
+ WL_ERR(("command recv not matching with len, command = %s"
+ "total_len = %d, cmd_len = %d\n", command, total_len, cmd_len));
+ ret = BCME_BADARG;
+ return ret;
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ ret = -EINVAL;
+ return ret;
+ }
+
+ /* Tag and len bytes are also part of total len of ies in binary */
+ ie_len = ie_len + 2;
+ /* Convert IEs in binary */
+ get_int_bytes(&command[cmd_prefix_len], disassoc_ie, ie_len);
+ if (disassoc_ie[TAG_BYTE] != 0xdd) {
+ WL_ERR(("Wrong tag recv, tag = 0x%02x\n", disassoc_ie[TAG_BYTE]));
+ ret = BCME_UNSUPPORTED;
+ return ret;
+ }
+
+ ret = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ ndev_to_cfgdev(dev), bssidx, VNDR_IE_DISASSOC_FLAG, disassoc_ie, ie_len);
+
+ return ret;
+}
+
#ifdef FCC_PWR_LIMIT_2G
int
wl_android_set_fcc_pwr_limit_2g(struct net_device *dev, char *command)
@@ -2108,16 +3376,17 @@
sscanf(command+sizeof("SET_FCC_CHANNEL"), "%d", &enable);
if ((enable != CUSTOMER_HW4_ENABLE) && (enable != CUSTOMER_HW4_DISABLE)) {
- DHD_ERROR(("%s: Invalid data\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_fcc_pwr_limit_2g: Invalid data\n"));
return BCME_ERROR;
}
CUSTOMER_HW4_EN_CONVERT(enable);
- DHD_ERROR(("%s: fccpwrlimit2g set (%d)\n", __FUNCTION__, enable));
+ DHD_ERROR(("wl_android_set_fcc_pwr_limit_2g: fccpwrlimit2g set (%d)\n", enable));
error = wldev_iovar_setint(dev, "fccpwrlimit2g", enable);
if (error) {
- DHD_ERROR(("%s: fccpwrlimit2g set returned (%d)\n", __FUNCTION__, error));
+ DHD_ERROR(("wl_android_set_fcc_pwr_limit_2g: fccpwrlimit2g"
+ " set returned (%d)\n", error));
return BCME_ERROR;
}
@@ -2133,10 +3402,11 @@
error = wldev_iovar_getint(dev, "fccpwrlimit2g", &enable);
if (error) {
- DHD_ERROR(("%s: fccpwrlimit2g get error (%d)\n", __FUNCTION__, error));
+ DHD_ERROR(("wl_android_get_fcc_pwr_limit_2g: fccpwrlimit2g get"
+ " error (%d)\n", error));
return BCME_ERROR;
}
- DHD_ERROR(("%s: fccpwrlimit2g get (%d)\n", __FUNCTION__, enable));
+ DHD_ERROR(("wl_android_get_fcc_pwr_limit_2g: fccpwrlimit2g get (%d)\n", enable));
bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_FCC_PWR_LIMIT_2G, enable);
@@ -2171,8 +3441,8 @@
iovar_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
if (iovar_buf == NULL) {
- DHD_ERROR(("%s: failed to allocated memory %d bytes\n",
- __FUNCTION__, WLC_IOCTL_MAXLEN));
+ DHD_ERROR(("wl_cfg80211_get_sta_info: failed to allocated memory %d bytes\n",
+ WLC_IOCTL_MAXLEN));
goto error;
}
@@ -2181,11 +3451,11 @@
str = bcmstrtok(&pcmd, " ", NULL);
/* If GETSTAINFO subcmd name is not provided, return error */
if (str == NULL) {
- WL_ERR(("GETSTAINFO subcmd not provided %s\n", __FUNCTION__));
+ WL_ERR(("GETSTAINFO subcmd not provided wl_cfg80211_get_sta_info\n"));
goto error;
}
- memset(&mac, 0, ETHER_ADDR_LEN);
+ bzero(&mac, ETHER_ADDR_LEN);
if ((bcm_ether_atoe((str), &mac))) {
/* get the sta info */
ret = wldev_iovar_getbuf(dev, "sta_info",
@@ -2291,8 +3561,8 @@
if (!argc) {
error = wldev_iovar_getint(dev, "wnm_bsstrans_resp", &data);
if (error) {
- DHD_ERROR(("%s: Failed to set wbtext error = %d\n",
- __FUNCTION__, error));
+ DHD_ERROR(("wl_android_wbtext: Failed to set wbtext error = %d\n",
+ error));
return error;
}
bytes_written = snprintf(command, total_len, "WBTEXT %s\n",
@@ -2305,8 +3575,8 @@
}
if ((error = wldev_iovar_setint(dev, "wnm_bsstrans_resp", data)) != BCME_OK) {
- DHD_ERROR(("%s: Failed to set wbtext error = %d\n",
- __FUNCTION__, error));
+ DHD_ERROR(("wl_android_wbtext: Failed to set wbtext error = %d\n",
+ error));
return error;
}
@@ -2322,8 +3592,8 @@
roam_trigger[1] = WLC_BAND_ALL;
if ((error = wldev_ioctl_set(dev, WLC_SET_ROAM_TRIGGER, roam_trigger,
sizeof(roam_trigger))) != BCME_OK) {
- DHD_ERROR(("%s: Failed to reset roam trigger = %d\n",
- __FUNCTION__, error));
+ DHD_ERROR(("wl_android_wbtext: Failed to reset roam trigger = %d\n",
+ error));
return error;
}
dhdp->wbtext_support = FALSE;
@@ -2382,6 +3652,41 @@
return error;
}
+static int wl_cfg80211_wbtext_estm_enable(struct net_device *dev,
+ char *command, int total_len)
+{
+ int error = BCME_OK;
+ int data = 0, bytes_written = 0;
+ int wnmmask = 0;
+ char *pcmd = command;
+
+ bcmstrtok(&pcmd, " ", NULL);
+
+ error = wldev_iovar_getint(dev, "wnm", &wnmmask);
+ if (error) {
+ WL_ERR(("Failed to get wnm_btmdelta (%d)\n", error));
+ return error;
+ }
+ WL_DBG(("wnmmask %x\n", wnmmask));
+ if (*pcmd == WL_IOCTL_ACTION_GET) {
+ bytes_written = snprintf(command, total_len, "wbtext_estm_enable %d\n",
+ (wnmmask & WL_WNM_ESTM) ? 1:0);
+ return bytes_written;
+ } else {
+ data = bcm_atoi(pcmd);
+ if (data == 0) {
+ wnmmask &= ~WL_WNM_ESTM;
+ } else {
+ wnmmask |= WL_WNM_ESTM;
+ }
+ WL_DBG(("wnmmask %x\n", wnmmask));
+ if ((error = wldev_iovar_setint(dev, "wnm", wnmmask)) != BCME_OK) {
+ WL_ERR(("Failed to set wnm mask (%d)\n", error));
+ return error;
+ }
+ }
+ return error;
+}
#endif /* WBTEXT */
#ifdef PNO_SUPPORT
@@ -2397,29 +3702,29 @@
char param[PNO_PARAM_SIZE+1], value[VALUE_SIZE+1];
struct dhd_pno_batch_params batch_params;
- DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+ DHD_PNO(("wls_parse_batching_cmd: command=%s, len=%d\n", command, total_len));
len_remain = total_len;
if (len_remain > (strlen(CMD_WLS_BATCHING) + 1)) {
pos = command + strlen(CMD_WLS_BATCHING) + 1;
len_remain -= strlen(CMD_WLS_BATCHING) + 1;
} else {
- WL_ERR(("%s: No arguments, total_len %d\n", __FUNCTION__, total_len));
+ WL_ERR(("wls_parse_batching_cmd: No arguments, total_len %d\n", total_len));
err = BCME_ERROR;
goto exit;
}
- memset(&batch_params, 0, sizeof(struct dhd_pno_batch_params));
+ bzero(&batch_params, sizeof(struct dhd_pno_batch_params));
if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) {
if (len_remain > (strlen(PNO_BATCHING_SET) + 1)) {
pos += strlen(PNO_BATCHING_SET) + 1;
} else {
- WL_ERR(("%s: %s missing arguments, total_len %d\n",
- __FUNCTION__, PNO_BATCHING_SET, total_len));
+ WL_ERR(("wls_parse_batching_cmd: %s missing arguments, total_len %d\n",
+ PNO_BATCHING_SET, total_len));
err = BCME_ERROR;
goto exit;
}
while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) {
- memset(param, 0, sizeof(param));
- memset(value, 0, sizeof(value));
+ bzero(param, sizeof(param));
+ bzero(value, sizeof(value));
if (token == NULL || !*token)
break;
if (*token == '\0')
@@ -2444,8 +3749,9 @@
tokens = sscanf(value, "<%s>", value);
if (tokens != 1) {
err = BCME_ERROR;
- DHD_ERROR(("%s : invalid format for channel"
- " <> params\n", __FUNCTION__));
+ DHD_ERROR(("wls_parse_batching_cmd: invalid format"
+ " for channel"
+ " <> params\n"));
goto exit;
}
while ((token2 = strsep(&pos2,
@@ -2478,7 +3784,7 @@
batch_params.rtt = simple_strtol(value, NULL, 0);
DHD_PNO(("rtt : %d\n", batch_params.rtt));
} else {
- DHD_ERROR(("%s : unknown param: %s\n", __FUNCTION__, param));
+ DHD_ERROR(("wls_parse_batching_cmd : unknown param: %s\n", param));
err = BCME_ERROR;
goto exit;
}
@@ -2487,7 +3793,7 @@
if (err < 0) {
DHD_ERROR(("failed to configure batch scan\n"));
} else {
- memset(command, 0, total_len);
+ bzero(command, total_len);
err = snprintf(command, total_len, "%d", err);
}
} else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) {
@@ -2502,11 +3808,11 @@
if (err < 0) {
DHD_ERROR(("failed to stop batching scan\n"));
} else {
- memset(command, 0, total_len);
+ bzero(command, total_len);
err = snprintf(command, total_len, "OK");
}
} else {
- DHD_ERROR(("%s : unknown command\n", __FUNCTION__));
+ DHD_ERROR(("wls_parse_batching_cmd : unknown command\n"));
err = BCME_ERROR;
goto exit;
}
@@ -2546,10 +3852,10 @@
0x00
};
#endif /* PNO_SET_DEBUG */
- DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+ DHD_PNO(("wl_android_set_pno_setup: command=%s, len=%d\n", command, total_len));
if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) {
- DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
+ DHD_ERROR(("wl_android_set_pno_setup: argument=%d less min size\n", total_len));
goto exit_proc;
}
#ifdef PNO_SET_DEBUG
@@ -2560,7 +3866,7 @@
tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET);
cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
- memset(ssids_local, 0, sizeof(ssids_local));
+ bzero(ssids_local, sizeof(ssids_local));
if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) &&
(cmd_tlv_temp->version == PNO_TLV_VERSION) &&
@@ -2575,36 +3881,38 @@
goto exit_proc;
} else {
if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
- DHD_ERROR(("%s scan duration corrupted field size %d\n",
- __FUNCTION__, tlv_size_left));
+ DHD_ERROR(("wl_android_set_pno_setup: scan duration corrupted"
+ " field size %d\n",
+ tlv_size_left));
goto exit_proc;
}
str_ptr++;
pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
- DHD_PNO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
+ DHD_PNO(("wl_android_set_pno_setup: pno_time=%d\n", pno_time));
if (str_ptr[0] != 0) {
if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
- DHD_ERROR(("%s pno repeat : corrupted field\n",
- __FUNCTION__));
+ DHD_ERROR(("wl_android_set_pno_setup: pno repeat:"
+ " corrupted field\n"));
goto exit_proc;
}
str_ptr++;
pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
- DHD_PNO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
+ DHD_PNO(("wl_android_set_pno_setup: got pno_repeat=%d\n",
+ pno_repeat));
if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
- DHD_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n",
- __FUNCTION__));
+ DHD_ERROR(("wl_android_set_pno_setup: FREQ_EXPO_MAX"
+ " corrupted field size\n"));
goto exit_proc;
}
str_ptr++;
pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
- DHD_PNO(("%s: pno_freq_expo_max=%d\n",
- __FUNCTION__, pno_freq_expo_max));
+ DHD_PNO(("wl_android_set_pno_setup: pno_freq_expo_max=%d\n",
+ pno_freq_expo_max));
}
}
} else {
- DHD_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_pno_setup: get wrong TLV command\n"));
goto exit_proc;
}
@@ -2623,125 +3931,19 @@
#define MAC_ADDR_STR_LEN 18
if (total_len < MAC_ADDR_STR_LEN) {
- DHD_ERROR(("%s: buflen %d is less than p2p dev addr\n",
- __FUNCTION__, total_len));
+ DHD_ERROR(("wl_android_get_p2p_dev_addr: buflen %d is less than p2p dev addr\n",
+ total_len));
return -1;
}
ret = wl_cfg80211_get_p2p_dev_addr(ndev, &p2pdev_addr);
if (ret) {
- DHD_ERROR(("%s Failed to get p2p dev addr\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_get_p2p_dev_addr: Failed to get p2p dev addr\n"));
return -1;
}
return (snprintf(command, total_len, MACF, ETHERP_TO_MACF(&p2pdev_addr)));
}
-#ifdef BCMCCX
-static int wl_android_get_cckm_rn(struct net_device *dev, char *command)
-{
- int error, rn;
-
- WL_TRACE(("%s:wl_android_get_cckm_rn\n", dev->name));
-
- error = wldev_iovar_getint(dev, "cckm_rn", &rn);
- if (unlikely(error)) {
- WL_ERR(("wl_android_get_cckm_rn error (%d)\n", error));
- return -1;
- }
- memcpy(command, &rn, sizeof(int));
-
- return sizeof(int);
-}
-
-static int
-wl_android_set_cckm_krk(struct net_device *dev, char *command, int total_len)
-{
- int error, key_len, skip_len;
- unsigned char key[CCKM_KRK_LEN + CCKM_BTK_LEN];
- char iovar_buf[WLC_IOCTL_SMLEN];
-
- WL_TRACE(("%s: wl_iw_set_cckm_krk\n", dev->name));
-
- skip_len = strlen("set cckm_krk")+1;
-
- if (total_len < (skip_len + CCKM_KRK_LEN)) {
- return BCME_BADLEN;
- }
-
- if (total_len >= skip_len + CCKM_KRK_LEN + CCKM_BTK_LEN) {
- key_len = CCKM_KRK_LEN + CCKM_BTK_LEN;
- } else {
- key_len = CCKM_KRK_LEN;
- }
-
- memset(iovar_buf, 0, sizeof(iovar_buf));
- memcpy(key, command+skip_len, key_len);
-
- WL_DBG(("CCKM KRK-BTK (%d/%d) :\n", key_len, total_len));
- if (wl_dbg_level & WL_DBG_DBG) {
- prhex(NULL, key, key_len);
- }
-
- error = wldev_iovar_setbuf(dev, "cckm_krk", key, key_len,
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- if (unlikely(error)) {
- WL_ERR((" cckm_krk set error (%d)\n", error));
- return -1;
- }
- return 0;
-}
-
-static int wl_android_get_assoc_res_ies(struct net_device *dev, char *command, int total_len)
-{
- int error;
- u8 buf[WL_ASSOC_INFO_MAX];
- wl_assoc_info_t assoc_info;
- u32 resp_ies_len = 0;
- int bytes_written = 0;
-
- WL_TRACE(("%s: wl_iw_get_assoc_res_ies\n", dev->name));
-
- error = wldev_iovar_getbuf(dev, "assoc_info", NULL, 0, buf, WL_ASSOC_INFO_MAX, NULL);
- if (unlikely(error)) {
- WL_ERR(("could not get assoc info (%d)\n", error));
- return -1;
- }
-
- memcpy(&assoc_info, buf, sizeof(wl_assoc_info_t));
- assoc_info.req_len = htod32(assoc_info.req_len);
- assoc_info.resp_len = htod32(assoc_info.resp_len);
- assoc_info.flags = htod32(assoc_info.flags);
-
- if (assoc_info.resp_len) {
- resp_ies_len = assoc_info.resp_len - sizeof(struct dot11_assoc_resp);
- }
-
- if (total_len < (sizeof(u32) + resp_ies_len)) {
- WL_ERR(("%s: Insufficient memory, %d bytes\n",
- __FUNCTION__, total_len));
- return -1;
- }
- /* first 4 bytes are ie len */
- memcpy(command, &resp_ies_len, sizeof(u32));
- bytes_written = sizeof(u32);
-
- /* get the association resp IE's if there are any */
- if (resp_ies_len) {
- error = wldev_iovar_getbuf(dev, "assoc_resp_ies", NULL, 0,
- buf, WL_ASSOC_INFO_MAX, NULL);
- if (unlikely(error)) {
- WL_ERR(("could not get assoc resp_ies (%d)\n", error));
- return -1;
- }
-
- memcpy(command+sizeof(u32), buf, resp_ies_len);
- bytes_written += resp_ies_len;
- }
- return bytes_written;
-}
-
-#endif /* BCMCCX */
-
int
wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist)
{
@@ -2753,21 +3955,22 @@
/* set filtering mode */
if ((ret = wldev_ioctl_set(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode)) != 0)) {
- DHD_ERROR(("%s : WLC_SET_MACMODE error=%d\n", __FUNCTION__, ret));
+ DHD_ERROR(("wl_android_set_ap_mac_list : WLC_SET_MACMODE error=%d\n", ret));
return ret;
}
if (macmode != MACLIST_MODE_DISABLED) {
/* set the MAC filter list */
if ((ret = wldev_ioctl_set(dev, WLC_SET_MACLIST, maclist,
sizeof(int) + sizeof(struct ether_addr) * maclist->count)) != 0) {
- DHD_ERROR(("%s : WLC_SET_MACLIST error=%d\n", __FUNCTION__, ret));
+ DHD_ERROR(("wl_android_set_ap_mac_list : WLC_SET_MACLIST error=%d\n", ret));
return ret;
}
/* get the current list of associated STAs */
assoc_maclist->count = MAX_NUM_OF_ASSOCLIST;
if ((ret = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST, assoc_maclist,
sizeof(mac_buf))) != 0) {
- DHD_ERROR(("%s : WLC_GET_ASSOCLIST error=%d\n", __FUNCTION__, ret));
+ DHD_ERROR(("wl_android_set_ap_mac_list: WLC_GET_ASSOCLIST error=%d\n",
+ ret));
return ret;
}
/* do we have any STA associated? */
@@ -2777,8 +3980,9 @@
match = 0;
/* compare with each entry */
for (j = 0; j < maclist->count; j++) {
- DHD_INFO(("%s : associated="MACDBG " list="MACDBG "\n",
- __FUNCTION__, MAC2STRDBG(assoc_maclist->ea[i].octet),
+ DHD_INFO(("wl_android_set_ap_mac_list: associated="MACDBG
+ "list = "MACDBG "\n",
+ MAC2STRDBG(assoc_maclist->ea[i].octet),
MAC2STRDBG(maclist->ea[j].octet)));
if (memcmp(assoc_maclist->ea[i].octet,
maclist->ea[j].octet, ETHER_ADDR_LEN) == 0) {
@@ -2798,8 +4002,10 @@
if ((ret = wldev_ioctl_set(dev,
WLC_SCB_DEAUTHENTICATE_FOR_REASON,
&scbval, sizeof(scb_val_t))) != 0)
- DHD_ERROR(("%s WLC_SCB_DEAUTHENTICATE error=%d\n",
- __FUNCTION__, ret));
+ DHD_ERROR(("wl_android_set_ap_mac_list:"
+ " WLC_SCB_DEAUTHENTICATE"
+ " error=%d\n",
+ ret));
}
}
}
@@ -2811,6 +4017,7 @@
* HAPD_MAC_FILTER mac_mode mac_cnt mac_addr1 mac_addr2
*
*/
+#ifdef AUTOMOTIVE_FEATURE
static int
wl_android_set_mac_address_filter(struct net_device *dev, char* str)
{
@@ -2834,7 +4041,7 @@
macmode = bcm_atoi(token);
if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) {
- DHD_ERROR(("%s : invalid macmode %d\n", __FUNCTION__, macmode));
+ DHD_ERROR(("wl_android_set_mac_address_filter: invalid macmode %d\n", macmode));
return -1;
}
@@ -2844,15 +4051,16 @@
}
macnum = bcm_atoi(token);
if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
- DHD_ERROR(("%s : invalid number of MAC address entries %d\n",
- __FUNCTION__, macnum));
+ DHD_ERROR(("wl_android_set_mac_address_filter: invalid number of MAC"
+ " address entries %d\n",
+ macnum));
return -1;
}
/* allocate memory for the MAC list */
list = (struct maclist*) MALLOCZ(cfg->osh, sizeof(int) +
sizeof(struct ether_addr) * macnum);
if (!list) {
- DHD_ERROR(("%s : failed to allocate memory\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_mac_address_filter : failed to allocate memory\n"));
return -1;
}
/* prepare the MAC list */
@@ -2861,32 +4069,35 @@
for (i = 0; i < list->count; i++) {
token = strsep((char**)&str, " ");
if (token == NULL) {
- DHD_ERROR(("%s : No mac address present\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_mac_address_filter : No mac address present\n"));
ret = -EINVAL;
goto exit;
}
- strncpy(eabuf, token, ETHER_ADDR_STR_LEN - 1);
+ strlcpy(eabuf, token, sizeof(eabuf));
if (!(ret = bcm_ether_atoe(eabuf, &list->ea[i]))) {
- DHD_ERROR(("%s : mac parsing err index=%d, addr=%s\n",
- __FUNCTION__, i, eabuf));
+ DHD_ERROR(("wl_android_set_mac_address_filter : mac parsing err index=%d,"
+ " addr=%s\n",
+ i, eabuf));
list->count = i;
break;
}
- DHD_INFO(("%s : %d/%d MACADDR=%s", __FUNCTION__, i, list->count, eabuf));
+ DHD_INFO(("wl_android_set_mac_address_filter : %d/%d MACADDR=%s",
+ i, list->count, eabuf));
}
if (i == 0)
goto exit;
/* set the list */
if ((ret = wl_android_set_ap_mac_list(dev, macmode, list)) != 0)
- DHD_ERROR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+ DHD_ERROR(("wl_android_set_mac_address_filter: Setting MAC list failed error=%d\n",
+ ret));
exit:
MFREE(cfg->osh, list, sizeof(int) + sizeof(struct ether_addr) * macnum);
return ret;
}
-
+#endif /* AUTOMOTIVE_FEATURE */
/**
* Global function definitions (declared in wl_android.h)
*/
@@ -2896,9 +4107,9 @@
int ret = 0;
int retry = POWERUP_MAX_RETRY;
- DHD_ERROR(("%s in\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_wifi_on in\n"));
if (!dev) {
- DHD_ERROR(("%s: dev is null\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_wifi_on: dev is null\n"));
return -EINVAL;
}
@@ -2924,6 +4135,9 @@
} while (retry-- > 0);
if (ret != 0) {
DHD_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n"));
+#ifdef BCM_DETECT_TURN_ON_FAILURE
+ BUG_ON(1);
+#endif /* BCM_DETECT_TURN_ON_FAILURE */
goto exit;
}
#ifdef BCMSDIO
@@ -2951,16 +4165,16 @@
{
int ret = 0;
- DHD_ERROR(("%s in\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_wifi_off in\n"));
if (!dev) {
- DHD_TRACE(("%s: dev is null\n", __FUNCTION__));
+ DHD_TRACE(("wl_android_wifi_off: dev is null\n"));
return -EINVAL;
}
#if defined(BCMPCIE) && defined(DHD_DEBUG_UART)
ret = dhd_debug_uart_is_running(dev);
if (ret) {
- DHD_ERROR(("%s - Debug UART App is running\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_wifi_off: - Debug UART App is running\n"));
return -EBUSY;
}
#endif /* BCMPCIE && DHD_DEBUG_UART */
@@ -2998,7 +4212,7 @@
u8 result[WLC_IOCTL_SMLEN];
chanim_stats_t *stats;
- memset(¶m, 0, sizeof(param));
+ bzero(¶m, sizeof(param));
param.buflen = htod32(sizeof(wl_chanim_stats_t));
param.count = htod32(WL_CHANIM_COUNT_ONE);
@@ -3050,6 +4264,7 @@
#ifndef DISABLE_IF_COUNTERS
wl_if_stats_t* if_stats = NULL;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
#endif /* DISABLE_IF_COUNTERS */
int link_speed = 0;
@@ -3058,17 +4273,18 @@
int bytes_written = -1;
int ret = 0;
- WL_INFORM(("%s: enter Get Connection Stats\n", __FUNCTION__));
+ WL_INFORM(("wl_android_get_connection_stats: enter Get Connection Stats\n"));
if (total_len <= 0) {
- WL_ERR(("%s: invalid buffer size %d\n", __FUNCTION__, total_len));
+ WL_ERR(("wl_android_get_connection_stats: invalid buffer size %d\n", total_len));
goto error;
}
bufsize = total_len;
if (bufsize < sizeof(struct connection_stats)) {
- WL_ERR(("%s: not enough buffer size, provided=%u, requires=%zu\n",
- __FUNCTION__, bufsize,
+ WL_ERR(("wl_android_get_connection_stats: not enough buffer size, provided=%u,"
+ " requires=%zu\n",
+ bufsize,
sizeof(struct connection_stats)));
goto error;
}
@@ -3078,21 +4294,24 @@
#ifndef DISABLE_IF_COUNTERS
if_stats = (wl_if_stats_t *)MALLOCZ(cfg->osh, sizeof(*if_stats));
if (if_stats == NULL) {
- WL_ERR(("%s(%d): MALLOCZ failed\n", __FUNCTION__, __LINE__));
+ WL_ERR(("wl_android_get_connection_stats: MALLOCZ failed\n"));
goto error;
}
- memset(if_stats, 0, sizeof(*if_stats));
+ bzero(if_stats, sizeof(*if_stats));
if (FW_SUPPORTED(dhdp, ifst)) {
ret = wl_cfg80211_ifstats_counters(dev, if_stats);
- } else {
+ } else
+ {
ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
(char *)if_stats, sizeof(*if_stats), NULL);
}
+ ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
+ (char *)if_stats, sizeof(*if_stats), NULL);
if (ret) {
- WL_ERR(("%s: if_counters not supported ret=%d\n",
- __FUNCTION__, ret));
+ WL_ERR(("wl_android_get_connection_stats: if_counters not supported ret=%d\n",
+ ret));
/* In case if_stats IOVAR is not supported, get information from counters. */
#endif /* DISABLE_IF_COUNTERS */
@@ -3104,13 +4323,14 @@
}
ret = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0);
if (ret != BCME_OK) {
- WL_ERR(("%s wl_cntbuf_to_xtlv_format ERR %d\n",
- __FUNCTION__, ret));
+ WL_ERR(("wl_android_get_connection_stats:"
+ " wl_cntbuf_to_xtlv_format ERR %d\n",
+ ret));
goto error;
}
if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) {
- WL_ERR(("%s wlc_cnt NULL!\n", __FUNCTION__));
+ WL_ERR(("wl_android_get_connection_stats: wlc_cnt NULL!\n"));
goto error;
}
@@ -3129,8 +4349,10 @@
} else {
/* Populate from if_stats. */
if (dtoh16(if_stats->version) > WL_IF_STATS_T_VERSION) {
- WL_ERR(("%s: incorrect version of wl_if_stats_t, expected=%u got=%u\n",
- __FUNCTION__, WL_IF_STATS_T_VERSION, if_stats->version));
+ WL_ERR(("wl_android_get_connection_stats: incorrect version of"
+ " wl_if_stats_t,"
+ " expected=%u got=%u\n",
+ WL_IF_STATS_T_VERSION, if_stats->version));
goto error;
}
@@ -3157,8 +4379,9 @@
/* link_speed is in kbps */
ret = wldev_get_link_speed(dev, &link_speed);
if (ret || link_speed < 0) {
- WL_ERR(("%s: wldev_get_link_speed() failed, ret=%d, speed=%d\n",
- __FUNCTION__, ret, link_speed));
+ WL_ERR(("wl_android_get_connection_stats: wldev_get_link_speed()"
+ " failed, ret=%d, speed=%d\n",
+ ret, link_speed));
goto error;
}
@@ -3197,7 +4420,7 @@
/* If natoe subcmd name is not provided, return error */
if (*pcmd == '\0') {
- WL_ERR(("natoe subcmd not provided %s\n", __FUNCTION__));
+ WL_ERR(("natoe subcmd not provided wl_android_process_natoe_cmd\n"));
ret = -EINVAL;
return ret;
}
@@ -3333,7 +4556,7 @@
}
else
{
- DHD_ERROR(("%s: get command failed code %d\n", __FUNCTION__, res));
+ DHD_ERROR(("wl_natoe_get_ioctl: get command failed code %d\n", res));
res = BCME_ERROR;
}
@@ -3378,7 +4601,7 @@
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MEDLEN, cmd_info);
if (ret != BCME_OK) {
- WL_ERR(("Fail to get iovar %s\n", __FUNCTION__));
+ WL_ERR(("Fail to get iovar wl_android_natoe_subcmd_enable\n"));
ret = -EINVAL;
}
} else { /* set */
@@ -3456,7 +4679,7 @@
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MEDLEN, cmd_info);
if (ret != BCME_OK) {
- WL_ERR(("Fail to get iovar %s\n", __FUNCTION__));
+ WL_ERR(("Fail to get iovar wl_android_natoe_subcmd_config_ips\n"));
ret = -EINVAL;
}
} else { /* set */
@@ -3464,7 +4687,7 @@
/* save buflen at start */
uint16 buflen_at_start = buflen;
- memset(&config_ips, 0, sizeof(config_ips));
+ bzero(&config_ips, sizeof(config_ips));
str = bcmstrtok(&pcmd, " ", NULL);
if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_ip)) {
@@ -3576,7 +4799,7 @@
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MEDLEN, cmd_info);
if (ret != BCME_OK) {
- WL_ERR(("Fail to get iovar %s\n", __FUNCTION__));
+ WL_ERR(("Fail to get iovar wl_android_natoe_subcmd_config_ports\n"));
ret = -EINVAL;
}
} else { /* set */
@@ -3584,7 +4807,7 @@
/* save buflen at start */
uint16 buflen_at_start = buflen;
- memset(&ports_config, 0, sizeof(ports_config));
+ bzero(&ports_config, sizeof(ports_config));
str = bcmstrtok(&pcmd, " ", NULL);
if (!str) {
@@ -3643,7 +4866,7 @@
int ret = BCME_OK;
wl_natoe_ioc_t *natoe_ioc;
char *pcmd = command;
- uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_DBG_STATS_BUFSZ;
uint16 buflen = WL_NATOE_DBG_STATS_BUFSZ;
bcm_xtlv_t *pxtlv = NULL;
@@ -3675,7 +4898,7 @@
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MAXLEN, cmd_info);
if (ret != BCME_OK) {
- WL_ERR(("Fail to get iovar %s\n", __FUNCTION__));
+ WL_ERR(("Fail to get iovar wl_android_natoe_subcmd_dbg_stats\n"));
ret = -EINVAL;
}
} else { /* set */
@@ -3720,7 +4943,7 @@
int ret = BCME_OK;
wl_natoe_ioc_t *natoe_ioc;
char *pcmd = command;
- uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ;
uint16 buflen = WL_NATOE_IOC_BUFSZ;
bcm_xtlv_t *pxtlv = NULL;
@@ -3752,7 +4975,7 @@
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MEDLEN, cmd_info);
if (ret != BCME_OK) {
- WL_ERR(("Fail to get iovar %s\n", __FUNCTION__));
+ WL_ERR(("Fail to get iovar wl_android_natoe_subcmd_tbl_cnt\n"));
ret = -EINVAL;
}
} else { /* set */
@@ -3792,6 +5015,439 @@
#endif /* WL_NATOE */
+#ifdef WL_MBO
+static int
+wl_android_process_mbo_cmd(struct net_device *dev, char *command, int total_len)
+{
+ int ret = BCME_ERROR;
+ char *pcmd = command;
+ char *str = NULL;
+ wl_drv_cmd_info_t cmd_info;
+ const wl_drv_sub_cmd_t *mbo_cmd = &mbo_cmd_list[0];
+
+ /* skip to cmd name after "mbo" */
+ str = bcmstrtok(&pcmd, " ", NULL);
+
+ /* If mbo subcmd name is not provided, return error */
+ if (*pcmd == '\0') {
+ WL_ERR(("mbo subcmd not provided %s\n", __FUNCTION__));
+ ret = -EINVAL;
+ return ret;
+ }
+
+ /* get the mbo command name to str */
+ str = bcmstrtok(&pcmd, " ", NULL);
+
+ while (mbo_cmd->name != NULL) {
+ if (strnicmp(mbo_cmd->name, str, strlen(mbo_cmd->name)) == 0) {
+ /* dispatch cmd to appropriate handler */
+ if (mbo_cmd->handler) {
+ cmd_info.command = command;
+ cmd_info.tot_len = total_len;
+ ret = mbo_cmd->handler(dev, mbo_cmd, pcmd, &cmd_info);
+ }
+ return ret;
+ }
+ mbo_cmd++;
+ }
+ return ret;
+}
+
+static int
+wl_android_send_wnm_notif(struct net_device *dev, bcm_iov_buf_t *iov_buf,
+ uint16 iov_buf_len, uint8 *iov_resp, uint16 iov_resp_len, uint8 sub_elem_type)
+{
+ int ret = BCME_OK;
+ uint8 *pxtlv = NULL;
+ uint16 iovlen = 0;
+ uint16 buflen = 0, buflen_start = 0;
+
+ memset_s(iov_buf, iov_buf_len, 0, iov_buf_len);
+ iov_buf->version = WL_MBO_IOV_VERSION;
+ iov_buf->id = WL_MBO_CMD_SEND_NOTIF;
+ buflen = buflen_start = iov_buf_len - sizeof(bcm_iov_buf_t);
+ pxtlv = (uint8 *)&iov_buf->data[0];
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_SUB_ELEM_TYPE,
+ sizeof(sub_elem_type), &sub_elem_type, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ return ret;
+ }
+ iov_buf->len = buflen_start - buflen;
+ iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len;
+ ret = wldev_iovar_setbuf(dev, "mbo",
+ iov_buf, iovlen, iov_resp, WLC_IOCTL_MAXLEN, NULL);
+ if (ret != BCME_OK) {
+ WL_ERR(("Fail to sent wnm notif %d\n", ret));
+ }
+ return ret;
+}
+
+static int
+wl_android_mbo_resp_parse_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ wl_drv_cmd_info_t *cmd_info = (wl_drv_cmd_info_t *)ctx;
+ uint8 *command = cmd_info->command;
+ uint16 total_len = cmd_info->tot_len;
+ uint16 bytes_written = 0;
+
+ UNUSED_PARAMETER(len);
+ /* TODO: validate data value */
+ if (data == NULL) {
+ WL_ERR(("%s: Bad argument !!\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ switch (type) {
+ case WL_MBO_XTLV_CELL_DATA_CAP:
+ {
+ bytes_written = snprintf(command, total_len, "cell_data_cap: %u\n", *data);
+ cmd_info->bytes_written = bytes_written;
+ }
+ break;
+ default:
+ WL_ERR(("%s: Unknown tlv %u\n", __FUNCTION__, type));
+ }
+ return BCME_OK;
+}
+
+static int
+wl_android_mbo_subcmd_cell_data_cap(struct net_device *dev, const wl_drv_sub_cmd_t *cmd,
+ char *command, wl_drv_cmd_info_t *cmd_info)
+{
+ int ret = BCME_OK;
+ uint8 *pxtlv = NULL;
+ uint16 buflen = 0, buflen_start = 0;
+ uint16 iovlen = 0;
+ char *pcmd = command;
+ bcm_iov_buf_t *iov_buf = NULL;
+ bcm_iov_buf_t *p_resp = NULL;
+ uint8 *iov_resp = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ uint16 version;
+
+ /* first get the configured value */
+ iov_buf = (bcm_iov_buf_t *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (iov_buf == NULL) {
+ ret = -ENOMEM;
+ WL_ERR(("iov buf memory alloc exited\n"));
+ goto exit;
+ }
+ iov_resp = (uint8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
+ if (iov_resp == NULL) {
+ ret = -ENOMEM;
+ WL_ERR(("iov resp memory alloc exited\n"));
+ goto exit;
+ }
+
+ /* fill header */
+ iov_buf->version = WL_MBO_IOV_VERSION;
+ iov_buf->id = WL_MBO_CMD_CELLULAR_DATA_CAP;
+
+ ret = wldev_iovar_getbuf(dev, "mbo", iov_buf, WLC_IOCTL_MEDLEN, iov_resp,
+ WLC_IOCTL_MAXLEN,
+ NULL);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ p_resp = (bcm_iov_buf_t *)iov_resp;
+
+ /* get */
+ if (*pcmd == WL_IOCTL_ACTION_GET) {
+ /* Check for version */
+ version = dtoh16(*(uint16 *)iov_resp);
+ if (version != WL_MBO_IOV_VERSION) {
+ ret = -EINVAL;
+ }
+ if (p_resp->id == WL_MBO_CMD_CELLULAR_DATA_CAP) {
+ ret = bcm_unpack_xtlv_buf((void *)cmd_info, (uint8 *)p_resp->data,
+ p_resp->len, BCM_XTLV_OPTION_ALIGN32,
+ wl_android_mbo_resp_parse_cbfn);
+ if (ret == BCME_OK) {
+ ret = cmd_info->bytes_written;
+ }
+ } else {
+ ret = -EINVAL;
+ WL_ERR(("Mismatch: resp id %d req id %d\n", p_resp->id, cmd->id));
+ goto exit;
+ }
+ } else {
+ uint8 cell_cap = bcm_atoi(pcmd);
+ const uint8* old_cell_cap = NULL;
+ uint16 len = 0;
+
+ old_cell_cap = bcm_get_data_from_xtlv_buf((uint8 *)p_resp->data, p_resp->len,
+ WL_MBO_XTLV_CELL_DATA_CAP, &len, BCM_XTLV_OPTION_ALIGN32);
+ if (old_cell_cap && *old_cell_cap == cell_cap) {
+ WL_ERR(("No change is cellular data capability\n"));
+ /* No change in value */
+ goto exit;
+ }
+
+ buflen = buflen_start = WLC_IOCTL_MEDLEN - sizeof(bcm_iov_buf_t);
+
+ if (cell_cap < MBO_CELL_DATA_CONN_AVAILABLE ||
+ cell_cap > MBO_CELL_DATA_CONN_NOT_CAPABLE) {
+ WL_ERR(("wrong value %u\n", cell_cap));
+ ret = -EINVAL;
+ goto exit;
+ }
+ pxtlv = (uint8 *)&iov_buf->data[0];
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_CELL_DATA_CAP,
+ sizeof(cell_cap), &cell_cap, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ iov_buf->len = buflen_start - buflen;
+ iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len;
+ ret = wldev_iovar_setbuf(dev, "mbo",
+ iov_buf, iovlen, iov_resp, WLC_IOCTL_MAXLEN, NULL);
+ if (ret != BCME_OK) {
+ WL_ERR(("Fail to set iovar %d\n", ret));
+ ret = -EINVAL;
+ goto exit;
+ }
+ /* Skip for CUSTOMER_HW4 - WNM notification
+ * for cellular data capability is handled by host
+ */
+ /* send a WNM notification request to associated AP */
+ if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+ WL_INFORM(("Sending WNM Notif\n"));
+ ret = wl_android_send_wnm_notif(dev, iov_buf, WLC_IOCTL_MEDLEN,
+ iov_resp, WLC_IOCTL_MAXLEN, MBO_ATTR_CELL_DATA_CAP);
+ if (ret != BCME_OK) {
+ WL_ERR(("Fail to send WNM notification %d\n", ret));
+ ret = -EINVAL;
+ }
+ }
+ }
+exit:
+ if (iov_buf) {
+ MFREE(cfg->osh, iov_buf, WLC_IOCTL_MEDLEN);
+ }
+ if (iov_resp) {
+ MFREE(cfg->osh, iov_resp, WLC_IOCTL_MAXLEN);
+ }
+ return ret;
+}
+
+static int
+wl_android_mbo_non_pref_chan_parse_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ wl_drv_cmd_info_t *cmd_info = (wl_drv_cmd_info_t *)ctx;
+ uint8 *command = cmd_info->command + cmd_info->bytes_written;
+ uint16 total_len = cmd_info->tot_len;
+ uint16 bytes_written = 0;
+
+ WL_DBG(("Total bytes written at begining %u\n", cmd_info->bytes_written));
+ UNUSED_PARAMETER(len);
+ if (data == NULL) {
+ WL_ERR(("%s: Bad argument !!\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ switch (type) {
+ case WL_MBO_XTLV_OPCLASS:
+ {
+ bytes_written = snprintf(command, total_len, "%u:", *data);
+ WL_ERR(("wr %u %u\n", bytes_written, *data));
+ command += bytes_written;
+ cmd_info->bytes_written += bytes_written;
+ }
+ break;
+ case WL_MBO_XTLV_CHAN:
+ {
+ bytes_written = snprintf(command, total_len, "%u:", *data);
+ WL_ERR(("wr %u\n", bytes_written));
+ command += bytes_written;
+ cmd_info->bytes_written += bytes_written;
+ }
+ break;
+ case WL_MBO_XTLV_PREFERENCE:
+ {
+ bytes_written = snprintf(command, total_len, "%u:", *data);
+ WL_ERR(("wr %u\n", bytes_written));
+ command += bytes_written;
+ cmd_info->bytes_written += bytes_written;
+ }
+ break;
+ case WL_MBO_XTLV_REASON_CODE:
+ {
+ bytes_written = snprintf(command, total_len, "%u ", *data);
+ WL_ERR(("wr %u\n", bytes_written));
+ command += bytes_written;
+ cmd_info->bytes_written += bytes_written;
+ }
+ break;
+ default:
+ WL_ERR(("%s: Unknown tlv %u\n", __FUNCTION__, type));
+ }
+ WL_DBG(("Total bytes written %u\n", cmd_info->bytes_written));
+ return BCME_OK;
+}
+
+static int
+wl_android_mbo_subcmd_non_pref_chan(struct net_device *dev,
+ const wl_drv_sub_cmd_t *cmd, char *command,
+ wl_drv_cmd_info_t *cmd_info)
+{
+ int ret = BCME_OK;
+ uint8 *pxtlv = NULL;
+ uint16 buflen = 0, buflen_start = 0;
+ uint16 iovlen = 0;
+ char *pcmd = command;
+ bcm_iov_buf_t *iov_buf = NULL;
+ bcm_iov_buf_t *p_resp = NULL;
+ uint8 *iov_resp = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ uint16 version;
+
+ WL_ERR(("%s:%d\n", __FUNCTION__, __LINE__));
+ iov_buf = (bcm_iov_buf_t *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (iov_buf == NULL) {
+ ret = -ENOMEM;
+ WL_ERR(("iov buf memory alloc exited\n"));
+ goto exit;
+ }
+ iov_resp = (uint8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
+ if (iov_resp == NULL) {
+ ret = -ENOMEM;
+ WL_ERR(("iov resp memory alloc exited\n"));
+ goto exit;
+ }
+ /* get */
+ if (*pcmd == WL_IOCTL_ACTION_GET) {
+ /* fill header */
+ iov_buf->version = WL_MBO_IOV_VERSION;
+ iov_buf->id = WL_MBO_CMD_LIST_CHAN_PREF;
+
+ ret = wldev_iovar_getbuf(dev, "mbo", iov_buf, WLC_IOCTL_MEDLEN, iov_resp,
+ WLC_IOCTL_MAXLEN, NULL);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ p_resp = (bcm_iov_buf_t *)iov_resp;
+ /* Check for version */
+ version = dtoh16(*(uint16 *)iov_resp);
+ if (version != WL_MBO_IOV_VERSION) {
+ WL_ERR(("Version mismatch. returned ver %u expected %u\n",
+ version, WL_MBO_IOV_VERSION));
+ ret = -EINVAL;
+ }
+ if (p_resp->id == WL_MBO_CMD_LIST_CHAN_PREF) {
+ ret = bcm_unpack_xtlv_buf((void *)cmd_info, (uint8 *)p_resp->data,
+ p_resp->len, BCM_XTLV_OPTION_ALIGN32,
+ wl_android_mbo_non_pref_chan_parse_cbfn);
+ if (ret == BCME_OK) {
+ ret = cmd_info->bytes_written;
+ }
+ } else {
+ ret = -EINVAL;
+ WL_ERR(("Mismatch: resp id %d req id %d\n", p_resp->id, cmd->id));
+ goto exit;
+ }
+ } else {
+ char *str = pcmd;
+ uint opcl = 0, ch = 0, pref = 0, rc = 0;
+
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!(strnicmp(str, "set", 3)) || (!strnicmp(str, "clear", 5))) {
+ /* delete all configurations */
+ iov_buf->version = WL_MBO_IOV_VERSION;
+ iov_buf->id = WL_MBO_CMD_DEL_CHAN_PREF;
+ iov_buf->len = 0;
+ iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len;
+ ret = wldev_iovar_setbuf(dev, "mbo",
+ iov_buf, iovlen, iov_resp, WLC_IOCTL_MAXLEN, NULL);
+ if (ret != BCME_OK) {
+ WL_ERR(("Fail to set iovar %d\n", ret));
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ WL_ERR(("Unknown command %s\n", str));
+ goto exit;
+ }
+ /* parse non pref channel list */
+ if (strnicmp(str, "set", 3) == 0) {
+ uint8 cnt = 0;
+ str = bcmstrtok(&pcmd, " ", NULL);
+ while (str != NULL) {
+ ret = sscanf(str, "%u:%u:%u:%u", &opcl, &ch, &pref, &rc);
+ WL_ERR(("buflen %u op %u, ch %u, pref %u rc %u\n",
+ buflen, opcl, ch, pref, rc));
+ if (ret != 4) {
+ WL_ERR(("Not all parameter presents\n"));
+ ret = -EINVAL;
+ }
+ /* TODO: add a validation check here */
+ memset_s(iov_buf, WLC_IOCTL_MEDLEN, 0, WLC_IOCTL_MEDLEN);
+ buflen = buflen_start = WLC_IOCTL_MEDLEN;
+ pxtlv = (uint8 *)&iov_buf->data[0];
+ /* opclass */
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_OPCLASS,
+ sizeof(uint8), (uint8 *)&opcl, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ /* channel */
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_CHAN,
+ sizeof(uint8), (uint8 *)&ch, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ /* preference */
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_PREFERENCE,
+ sizeof(uint8), (uint8 *)&pref, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ /* reason */
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_REASON_CODE,
+ sizeof(uint8), (uint8 *)&rc, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ WL_ERR(("len %u\n", (buflen_start - buflen)));
+ /* Now set the new non pref channels */
+ iov_buf->version = WL_MBO_IOV_VERSION;
+ iov_buf->id = WL_MBO_CMD_ADD_CHAN_PREF;
+ iov_buf->len = buflen_start - buflen;
+ iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len;
+ ret = wldev_iovar_setbuf(dev, "mbo",
+ iov_buf, iovlen, iov_resp, WLC_IOCTL_MEDLEN, NULL);
+ if (ret != BCME_OK) {
+ WL_ERR(("Fail to set iovar %d\n", ret));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cnt++;
+ if (cnt >= MBO_MAX_CHAN_PREF_ENTRIES) {
+ break;
+ }
+ WL_ERR(("%d cnt %u\n", __LINE__, cnt));
+ str = bcmstrtok(&pcmd, " ", NULL);
+ }
+ }
+ /* send a WNM notification request to associated AP */
+ if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+ WL_INFORM(("Sending WNM Notif\n"));
+ ret = wl_android_send_wnm_notif(dev, iov_buf, WLC_IOCTL_MEDLEN,
+ iov_resp, WLC_IOCTL_MAXLEN, MBO_ATTR_NON_PREF_CHAN_REPORT);
+ if (ret != BCME_OK) {
+ WL_ERR(("Fail to send WNM notification %d\n", ret));
+ ret = -EINVAL;
+ }
+ }
+ }
+exit:
+ if (iov_buf) {
+ MFREE(cfg->osh, iov_buf, WLC_IOCTL_MEDLEN);
+ }
+ if (iov_resp) {
+ MFREE(cfg->osh, iov_resp, WLC_IOCTL_MAXLEN);
+ }
+ return ret;
+}
+#endif /* WL_MBO */
+
#ifdef CUSTOMER_HW4_PRIVATE_CMD
#ifdef SUPPORT_AMPDU_MPDU_CMD
/* CMD_AMPDU_MPDU */
@@ -3804,14 +5460,14 @@
ampdu_mpdu = bcm_atoi(string_num);
if (ampdu_mpdu > 32) {
- DHD_ERROR(("%s : ampdu_mpdu MAX value is 32.\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_ampdu_mpdu : ampdu_mpdu MAX value is 32.\n"));
return -1;
}
- DHD_ERROR(("%s : ampdu_mpdu = %d\n", __FUNCTION__, ampdu_mpdu));
+ DHD_ERROR(("wl_android_set_ampdu_mpdu : ampdu_mpdu = %d\n", ampdu_mpdu));
err = wldev_iovar_setint(dev, "ampdu_mpdu", ampdu_mpdu);
if (err < 0) {
- DHD_ERROR(("%s : ampdu_mpdu set error. %d\n", __FUNCTION__, err));
+ DHD_ERROR(("wl_android_set_ampdu_mpdu : ampdu_mpdu set error. %d\n", err));
return -1;
}
@@ -3820,6 +5476,13 @@
#endif /* SUPPORT_AMPDU_MPDU_CMD */
#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+extern int wl_cfg80211_send_msg_to_ril(void);
+extern void wl_cfg80211_register_dev_ril_bridge_event_notifier(void);
+extern void wl_cfg80211_unregister_dev_ril_bridge_event_notifier(void);
+extern int g_mhs_chan_for_cpcoex;
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+
#if defined(WL_SUPPORT_AUTO_CHANNEL)
/* SoftAP feature */
#define APCS_BAND_2G_LEGACY1 20
@@ -3849,9 +5512,9 @@
WL_INFORM(("Command: %s len:%d \n", cmd_str, (int)strlen(cmd_str)));
if (strncmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) {
band = WLC_BAND_AUTO;
- } else if (strncmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) {
+ } else if (strnicmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) {
band = WLC_BAND_5G;
- } else if (strncmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) {
+ } else if (strnicmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) {
band = WLC_BAND_2G;
} else {
/*
@@ -3874,6 +5537,20 @@
}
WL_INFORM(("HAPD_AUTO_CHANNEL = %d, band=%d \n", channel, band));
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+ wl_cfg80211_register_dev_ril_bridge_event_notifier();
+ if (band == WLC_BAND_2G) {
+ wl_cfg80211_send_msg_to_ril();
+
+ if (g_mhs_chan_for_cpcoex) {
+ channel = g_mhs_chan_for_cpcoex;
+ g_mhs_chan_for_cpcoex = 0;
+ goto done2;
+ }
+ }
+ wl_cfg80211_unregister_dev_ril_bridge_event_notifier();
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+
/* If STA is connected, return is STA channel, else ACS can be issued,
* set spect to 0 and proceed with ACS
*/
@@ -3985,7 +5662,7 @@
done:
if ((retry == 0) || (ret < 0)) {
/* On failure, fallback to a default channel */
- if ((band == WLC_BAND_5G)) {
+ if (band == WLC_BAND_5G) {
channel = APCS_DEFAULT_5G_CH;
} else {
channel = APCS_DEFAULT_2G_CH;
@@ -4012,17 +5689,22 @@
}
#endif /* WL_SUPPORT_AUTO_CHANNEL */
-#ifdef CUSTOMER_HW4_PRIVATE_CMD
#ifdef SUPPORT_HIDDEN_AP
static int
wl_android_set_max_num_sta(struct net_device *dev, const char* string_num)
{
+ int err = BCME_ERROR;
int max_assoc;
max_assoc = bcm_atoi(string_num);
- DHD_INFO(("%s : HAPD_MAX_NUM_STA = %d\n", __FUNCTION__, max_assoc));
- wldev_iovar_setint(dev, "maxassoc", max_assoc);
- return 1;
+ DHD_INFO(("wl_android_set_max_num_sta : HAPD_MAX_NUM_STA = %d\n", max_assoc));
+
+ err = wldev_iovar_setint(dev, "maxassoc", max_assoc);
+ if (err < 0) {
+ WL_ERR(("failed to set maxassoc, error:%d\n", err));
+ }
+
+ return err;
}
static int
@@ -4033,18 +5715,18 @@
ssid.SSID_len = strlen(hapd_ssid);
if (ssid.SSID_len == 0) {
- WL_ERR(("%s : No SSID\n", __FUNCTION__));
+ WL_ERR(("wl_android_set_ssids : No SSID\n"));
return -1;
}
if (ssid.SSID_len > DOT11_MAX_SSID_LEN) {
ssid.SSID_len = DOT11_MAX_SSID_LEN;
- WL_ERR(("%s : Too long SSID Length %zu\n", __FUNCTION__, strlen(hapd_ssid)));
+ WL_ERR(("wl_android_set_ssid : Too long SSID Length %zu\n", strlen(hapd_ssid)));
}
bcm_strncpy_s(ssid.SSID, sizeof(ssid.SSID), hapd_ssid, ssid.SSID_len);
- DHD_INFO(("%s: HAPD_SSID = %s\n", __FUNCTION__, ssid.SSID));
+ DHD_INFO(("wl_android_set_ssid: HAPD_SSID = %s\n", ssid.SSID));
ret = wldev_ioctl_set(dev, WLC_SET_SSID, &ssid, sizeof(wlc_ssid_t));
if (ret < 0) {
- WL_ERR(("%s : WLC_SET_SSID Error:%d\n", __FUNCTION__, ret));
+ WL_ERR(("wl_android_set_ssid : WLC_SET_SSID Error:%d\n", ret));
}
return 1;
@@ -4055,16 +5737,24 @@
{
int hide_ssid;
int enable = 0;
+ int err = BCME_ERROR;
hide_ssid = bcm_atoi(string_num);
- DHD_INFO(("%s: HAPD_HIDE_SSID = %d\n", __FUNCTION__, hide_ssid));
- if (hide_ssid)
+ DHD_INFO(("wl_android_set_hide_ssid: HIDE_SSID = %d\n", hide_ssid));
+ if (hide_ssid) {
enable = 1;
- wldev_iovar_setint(dev, "closednet", enable);
- return 1;
+ }
+
+ err = wldev_iovar_setint(dev, "closednet", enable);
+ if (err < 0) {
+ WL_ERR(("failed to set closednet, error:%d\n", err));
+ }
+
+ return err;
}
#endif /* SUPPORT_HIDDEN_AP */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
#ifdef SUPPORT_SOFTAP_SINGL_DISASSOC
static int
wl_android_sta_diassoc(struct net_device *dev, const char* straddr)
@@ -4072,17 +5762,17 @@
scb_val_t scbval;
int error = 0;
- DHD_INFO(("%s: deauth STA %s\n", __FUNCTION__, straddr));
+ DHD_INFO(("wl_android_sta_diassoc: deauth STA %s\n", straddr));
/* Unspecified reason */
scbval.val = htod32(1);
if (bcm_ether_atoe(straddr, &scbval.ea) == 0) {
- DHD_ERROR(("%s: Invalid station MAC Address!!!\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_sta_diassoc: Invalid station MAC Address!!!\n"));
return -1;
}
- DHD_ERROR(("%s: deauth STA: "MACDBG " scb_val.val %d\n", __FUNCTION__,
+ DHD_ERROR(("wl_android_sta_diassoc: deauth STA: "MACDBG " scb_val.val %d\n",
MAC2STRDBG(scbval.ea.octet), scbval.val));
error = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
@@ -4103,7 +5793,7 @@
s32 val = 1;
lpc_enabled = bcm_atoi(string_num);
- DHD_INFO(("%s : HAPD_LPC_ENABLED = %d\n", __FUNCTION__, lpc_enabled));
+ DHD_INFO(("wl_android_set_lpc: HAPD_LPC_ENABLED = %d\n", lpc_enabled));
ret = wldev_ioctl_set(dev, WLC_DOWN, &val, sizeof(s32));
if (ret < 0)
@@ -4125,19 +5815,24 @@
int error = 0;
s32 srl = 7;
s32 lrl = 4;
- printk("%s enter\n", __FUNCTION__);
+ printk("wl_android_ch_res_rl: enter\n");
if (change) {
srl = 4;
lrl = 2;
}
+
+ BCM_REFERENCE(lrl);
+
error = wldev_ioctl_set(dev, WLC_SET_SRL, &srl, sizeof(s32));
if (error) {
DHD_ERROR(("Failed to set SRL, error = %d\n", error));
}
+#ifndef CUSTOM_LONG_RETRY_LIMIT
error = wldev_ioctl_set(dev, WLC_SET_LRL, &lrl, sizeof(s32));
if (error) {
DHD_ERROR(("Failed to set LRL, error = %d\n", error));
}
+#endif /* CUSTOM_LONG_RETRY_LIMIT */
return error;
}
@@ -4154,7 +5849,7 @@
chan_bitmap = bcm_strtoul(string_num, NULL, 16);
- DHD_INFO(("%s : LTECOEX 0x%x\n", __FUNCTION__, chan_bitmap));
+ DHD_INFO(("wl_android_set_ltecx: LTECOEX 0x%x\n", chan_bitmap));
if (chan_bitmap) {
ret = wldev_iovar_setint(dev, "mws_coex_bitmap", chan_bitmap);
@@ -4198,7 +5893,7 @@
err = wldev_iovar_setint(net, "rmc_ackreq", rmc_enable);
if (err != BCME_OK) {
- DHD_ERROR(("%s: rmc_ackreq, error = %d\n", __FUNCTION__, err));
+ DHD_ERROR(("wl_android_rmc_enable: rmc_ackreq, error = %d\n", err));
}
return err;
}
@@ -4209,16 +5904,15 @@
int error = BCME_OK;
char smbuf[WLC_IOCTL_SMLEN];
wl_rmc_entry_t rmc_entry;
- DHD_INFO(("%s: Set new RMC leader %s\n", __FUNCTION__, straddr));
+ DHD_INFO(("wl_android_rmc_set_leader: Set new RMC leader %s\n", straddr));
- memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t));
+ bzero(&rmc_entry, sizeof(wl_rmc_entry_t));
if (!bcm_ether_atoe(straddr, &rmc_entry.addr)) {
if (strlen(straddr) == 1 && bcm_atoi(straddr) == 0) {
- DHD_INFO(("%s: Set auto leader selection mode\n", __FUNCTION__));
- memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t));
+ DHD_INFO(("wl_android_rmc_set_leader: Set auto leader selection mode\n"));
+ bzero(&rmc_entry, sizeof(wl_rmc_entry_t));
} else {
- DHD_ERROR(("%s: No valid mac address provided\n",
- __FUNCTION__));
+ DHD_ERROR(("wl_android_rmc_set_leader: No valid mac address provided\n"));
return BCME_ERROR;
}
}
@@ -4227,8 +5921,8 @@
smbuf, sizeof(smbuf), NULL);
if (error != BCME_OK) {
- DHD_ERROR(("%s: Unable to set RMC leader, error = %d\n",
- __FUNCTION__, error));
+ DHD_ERROR(("wl_android_rmc_set_leader: Unable to set RMC leader, error = %d\n",
+ error));
}
return error;
@@ -4261,8 +5955,9 @@
error = wldev_iovar_getint(dev, "scan_ps", &mode);
if (error) {
- DHD_ERROR(("%s: Failed to get single core scan Mode, error = %d\n",
- __FUNCTION__, error));
+ DHD_ERROR(("wl_android_get_singlecore_scan: Failed to get single core scan Mode,"
+ " error = %d\n",
+ error));
return -1;
}
@@ -4277,14 +5972,14 @@
int mode = 0;
if (sscanf(command, "%*s %d", &mode) != 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_singlecore_scan: Failed to get Parameter\n"));
return -1;
}
error = wldev_iovar_setint(dev, "scan_ps", mode);
if (error) {
- DHD_ERROR(("%s[1]: Failed to set Mode %d, error = %d\n",
- __FUNCTION__, mode, error));
+ DHD_ERROR(("wl_android_set_singlecore_scan[1]: Failed to set Mode %d, error = %d\n",
+ mode, error));
return -1;
}
@@ -4301,7 +5996,7 @@
dbm = bcm_atoi(string_num);
if (dbm < -1) {
- DHD_ERROR(("%s: dbm is negative...\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_tx_power: dbm is negative...\n"));
return -EINVAL;
}
@@ -4312,7 +6007,7 @@
err = wl_set_tx_power(dev, type, dbm);
if (unlikely(err)) {
- DHD_ERROR(("%s: error (%d)\n", __FUNCTION__, err));
+ DHD_ERROR(("wl_android_set_tx_power: error (%d)\n", err));
return err;
}
@@ -4328,14 +6023,14 @@
err = wl_get_tx_power(dev, &dbm);
if (unlikely(err)) {
- DHD_ERROR(("%s: error (%d)\n", __FUNCTION__, err));
+ DHD_ERROR(("wl_android_get_tx_power: error (%d)\n", err));
return err;
}
bytes_written = snprintf(command, total_len, "%s %d",
CMD_TEST_GET_TX_POWER, dbm);
- DHD_ERROR(("%s: GET_TX_POWER: dBm=%d\n", __FUNCTION__, dbm));
+ DHD_ERROR(("wl_android_get_tx_power: GET_TX_POWER: dBm=%d\n", dbm));
return bytes_written;
}
@@ -4344,30 +6039,154 @@
static int
wl_android_set_sarlimit_txctrl(struct net_device *dev, const char* string_num)
{
- int err = 0;
+ int err = BCME_ERROR;
int setval = 0;
s32 mode = bcm_atoi(string_num);
+ s32 mode_bit = 0;
+ int enab = 0;
- /* As Samsung specific and their requirement, '0' means activate sarlimit
- * and '-1' means back to normal state (deactivate sarlimit)
+ /* As Samsung specific and their requirement,
+ * the mode set as the following form.
+ * -1 : HEAD SAR disabled
+ * 0 : HEAD SAR enabled
+ * 1 : GRIP SAR disabled
+ * 2 : GRIP SAR enabled
+ * 3 : NR mmWave SAR disabled
+ * 4 : NR mmWave SAR enabled
+ * 5 : NR Sub6 SAR disabled
+ * 6 : NR Sub6 SAR enabled
+ * 7 : SAR BACKOFF disabled all
+ * The 'SAR BACKOFF disabled all' index should be the end of the mode.
*/
- if (mode >= 0 && mode < 3) {
- DHD_INFO(("%s: SAR limit control activated mode = %d\n", __FUNCTION__, mode));
- setval = mode + 1;
- } else if (mode == -1) {
- DHD_INFO(("%s: SAR limit control deactivated\n", __FUNCTION__));
+ if ((mode < HEAD_SAR_BACKOFF_DISABLE) || (mode > SAR_BACKOFF_DISABLE_ALL)) {
+ DHD_ERROR(("%s: Request for Unsupported:%d\n", __FUNCTION__, bcm_atoi(string_num)));
+ err = BCME_RANGE;
+ goto error;
+ }
+
+ mode_bit = mode + 1;
+ enab = mode_bit % 2;
+ mode_bit = mode_bit / 2;
+
+ err = wldev_iovar_getint(dev, "sar_enable", &setval);
+ if (unlikely(err)) {
+ DHD_ERROR(("%s: Failed to get sar_enable - error (%d)\n", __FUNCTION__, err));
+ goto error;
+ }
+
+ if (mode == SAR_BACKOFF_DISABLE_ALL) {
+ DHD_ERROR(("%s: SAR limit control all mode disable!\n", __FUNCTION__));
setval = 0;
} else {
- return -EINVAL;
+ DHD_ERROR(("%s: SAR limit control mode %d enab %d\n",
+ __FUNCTION__, mode_bit, enab));
+ if (enab) {
+ setval |= (1 << mode_bit);
+ } else {
+ setval &= ~(1 << mode_bit);
+ }
}
err = wldev_iovar_setint(dev, "sar_enable", setval);
if (unlikely(err)) {
- DHD_ERROR(("%s: error (%d)\n", __FUNCTION__, err));
+ DHD_ERROR(("%s: Failed to set sar_enable - error (%d)\n", __FUNCTION__, err));
+ goto error;
+ }
+error:
+ return err;
+}
+
+#ifdef SUPPORT_SET_TID
+static int
+wl_android_set_tid(struct net_device *dev, char* command)
+{
+ int err = BCME_ERROR;
+ char *pos = command;
+ char *token = NULL;
+ uint8 mode = 0;
+ uint32 uid = 0;
+ uint8 prio = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ if (!dhdp) {
+ WL_ERR(("dhd is NULL\n"));
return err;
}
- return 1;
+
+ WL_DBG(("%s: command[%s]\n", __FUNCTION__, command));
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ WL_ERR(("Invalid arguments\n"));
+ return err;
+ }
+
+ mode = bcm_atoi(token);
+
+ if (mode < SET_TID_OFF || mode > SET_TID_BASED_ON_UID) {
+ WL_ERR(("Invalid arguments, mode %d\n", mode));
+ return err;
+ }
+
+ if (mode) {
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ WL_ERR(("Invalid arguments for target uid\n"));
+ return err;
+ }
+
+ uid = bcm_atoi(token);
+
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ WL_ERR(("Invalid arguments for target tid\n"));
+ return err;
+ }
+
+ prio = bcm_atoi(token);
+ if (prio >= 0 && prio <= MAXPRIO) {
+ dhdp->tid_mode = mode;
+ dhdp->target_uid = uid;
+ dhdp->target_tid = prio;
+ } else {
+ WL_ERR(("Invalid arguments, prio %d\n", prio));
+ return err;
+ }
+ } else {
+ dhdp->tid_mode = SET_TID_OFF;
+ dhdp->target_uid = 0;
+ dhdp->target_tid = 0;
+ }
+
+ WL_DBG(("%s mode [%d], uid [%d], tid [%d]\n", __FUNCTION__,
+ dhdp->tid_mode, dhdp->target_uid, dhdp->target_tid));
+
+ err = BCME_OK;
+ return err;
}
+
+static int
+wl_android_get_tid(struct net_device *dev, char* command, int total_len)
+{
+ int bytes_written = BCME_ERROR;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ if (!dhdp) {
+ WL_ERR(("dhd is NULL\n"));
+ return bytes_written;
+ }
+
+ bytes_written = snprintf(command, total_len, "mode %d uid %d tid %d",
+ dhdp->tid_mode, dhdp->target_uid, dhdp->target_tid);
+
+ WL_DBG(("%s: command results %s\n", __FUNCTION__, command));
+
+ return bytes_written;
+}
+#endif /* SUPPORT_SET_TID */
#endif /* CUSTOMER_HW4_PRIVATE_CMD */
int wl_android_set_roam_mode(struct net_device *dev, char *command)
@@ -4376,19 +6195,20 @@
int mode = 0;
if (sscanf(command, "%*s %d", &mode) != 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_roam_mode: Failed to get Parameter\n"));
return -1;
}
error = wldev_iovar_setint(dev, "roam_off", mode);
if (error) {
- DHD_ERROR(("%s: Failed to set roaming Mode %d, error = %d\n",
- __FUNCTION__, mode, error));
+ DHD_ERROR(("wl_android_set_roam_mode: Failed to set roaming Mode %d, error = %d\n",
+ mode, error));
return -1;
}
else
- DHD_ERROR(("%s: succeeded to set roaming Mode %d, error = %d\n",
- __FUNCTION__, mode, error));
+ DHD_ERROR(("wl_android_set_roam_mode: succeeded to set roaming Mode %d,"
+ " error = %d\n",
+ mode, error));
return 0;
}
@@ -4447,8 +6267,7 @@
return -ENOMEM;
}
/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
- strncpy(vndr_ie->cmd, "add", VNDR_IE_CMD_LEN - 1);
- vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+ strlcpy(vndr_ie->cmd, "add", sizeof(vndr_ie->cmd));
/* Set the IE count - the buffer contains only 1 IE */
iecount = htod32(1);
@@ -4477,7 +6296,7 @@
}
return -ENOMEM;
}
- memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN); /* init the buffer */
+ bzero(ioctl_buf, WLC_IOCTL_MEDLEN); /* init the buffer */
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find index failed\n"));
err = BCME_ERROR;
@@ -4540,9 +6359,9 @@
if (total_len_left < (num_akm_suites * WIDTH_AKM_SUITE))
return -1;
- memset(buf, 0, sizeof(buf));
- memset(akm_suites, 0, sizeof(akm_suites));
- memset(ucipher_suites, 0, sizeof(ucipher_suites));
+ bzero(buf, sizeof(buf));
+ bzero(akm_suites, sizeof(akm_suites));
+ bzero(ucipher_suites, sizeof(ucipher_suites));
/* Save the AKM suites passed in the command */
for (i = 0; i < num_akm_suites; i++) {
@@ -4608,7 +6427,8 @@
total_bytes = JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE +
(JOIN_PREF_WPA_TUPLE_SIZE * num_tuples);
} else {
- DHD_ERROR(("%s: Too many wpa configs for join_pref \n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_roampref: Too many wpa configs"
+ " for join_pref \n"));
return -1;
}
} else {
@@ -4624,7 +6444,7 @@
memcpy(pref, (uint8 *)&ucipher_suites[i], WPA_SUITE_LEN);
pref += WPA_SUITE_LEN;
/* Set to 0 to match any available multicast cipher */
- memset(pref, 0, WPA_SUITE_LEN);
+ bzero(pref, WPA_SUITE_LEN);
pref += WPA_SUITE_LEN;
}
}
@@ -4652,14 +6472,14 @@
if (config->iovar) {
ret = wldev_iovar_getint(dev, config->iovar, &resume_cfg->param);
if (ret) {
- DHD_ERROR(("%s: Failed to get current %s value\n",
- __FUNCTION__, config->iovar));
+ DHD_ERROR(("wl_android_iolist_add: Failed to get current %s value\n",
+ config->iovar));
goto error;
}
ret = wldev_iovar_setint(dev, config->iovar, config->param);
if (ret) {
- DHD_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__,
+ DHD_ERROR(("wl_android_iolist_add: Failed to set %s to %d\n",
config->iovar, config->param));
goto error;
}
@@ -4673,13 +6493,13 @@
}
ret = wldev_ioctl_get(dev, config->ioctl, resume_cfg->arg, config->len);
if (ret) {
- DHD_ERROR(("%s: Failed to get ioctl %d\n", __FUNCTION__,
+ DHD_ERROR(("wl_android_iolist_add: Failed to get ioctl %d\n",
config->ioctl));
goto error;
}
ret = wldev_ioctl_set(dev, config->ioctl + 1, config->arg, config->len);
if (ret) {
- DHD_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__,
+ DHD_ERROR(("wl_android_iolist_add: Failed to set %s to %d\n",
config->iovar, config->param));
goto error;
}
@@ -4705,16 +6525,10 @@
struct list_head *cur, *q;
s32 ret = 0;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_safe(cur, q, head) {
config = list_entry(cur, struct io_cfg, list);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
if (config->iovar) {
if (!ret)
ret = wldev_iovar_setint(dev, config->iovar,
@@ -4739,11 +6553,11 @@
struct io_cfg config;
if (sscanf(command, "%*s %d", &mode) != 1) {
- DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_set_miracasts: Failed to get Parameter\n"));
return -1;
}
- DHD_INFO(("%s: enter miracast mode %d\n", __FUNCTION__, mode));
+ DHD_INFO(("wl_android_set_miracast: enter miracast mode %d\n", mode));
if (miracast_cur_mode == mode) {
return 0;
@@ -4751,18 +6565,20 @@
wl_android_iolist_resume(dev, &miracast_resume_list);
miracast_cur_mode = MIRACAST_MODE_OFF;
- memset((void *)&config, 0, sizeof(config));
+
+ bzero((void *)&config, sizeof(config));
switch (mode) {
case MIRACAST_MODE_SOURCE:
+#ifdef MIRACAST_MCHAN_ALGO
/* setting mchan_algo to platform specific value */
config.iovar = "mchan_algo";
ret = wldev_ioctl_get(dev, WLC_GET_BCNPRD, &val, sizeof(int));
if (!ret && val > 100) {
config.param = 0;
- DHD_ERROR(("%s: Connected station's beacon interval: "
+ DHD_ERROR(("wl_android_set_miracast: Connected station's beacon interval: "
"%d and set mchan_algo to %d \n",
- __FUNCTION__, val, config.param));
+ val, config.param));
} else {
config.param = MIRACAST_MCHAN_ALGO;
}
@@ -4770,7 +6586,9 @@
if (ret) {
goto resume;
}
+#endif /* MIRACAST_MCHAN_ALGO */
+#ifdef MIRACAST_MCHAN_BW
/* setting mchan_bw to platform specific value */
config.iovar = "mchan_bw";
config.param = MIRACAST_MCHAN_BW;
@@ -4778,7 +6596,9 @@
if (ret) {
goto resume;
}
+#endif /* MIRACAST_MCHAN_BW */
+#ifdef MIRACAST_AMPDU_SIZE
/* setting apmdu to platform specific value */
config.iovar = "ampdu_mpdu";
config.param = MIRACAST_AMPDU_SIZE;
@@ -4786,6 +6606,8 @@
if (ret) {
goto resume;
}
+#endif /* MIRACAST_AMPDU_SIZE */
+
/* Source mode shares most configurations with sink mode.
* Fall through here to avoid code duplication
*/
@@ -4828,7 +6650,7 @@
return 0;
resume:
- DHD_ERROR(("%s: turnoff miracast mode because of err%d\n", __FUNCTION__, ret));
+ DHD_ERROR(("wl_android_set_miracast: turnoff miracast mode because of err%d\n", ret));
wl_android_iolist_resume(dev, &miracast_resume_list);
return ret;
}
@@ -4984,9 +6806,8 @@
}
}
- buf = MALLOC(cfg->osh, WLC_IOCTL_MAXLEN);
- if (buf == NULL) {
- WL_ERR(("MALLOC failed\n"));
+ if ((buf = MALLOC(cfg->osh, WLC_IOCTL_MAXLEN)) == NULL) {
+ WL_ERR(("kmalloc failed\n"));
return -1;
}
@@ -5028,8 +6849,9 @@
MACF" %u %d ", ETHER_TO_MACF(peer_info->ea),
peer_info->tx_rate/1000, peer_info->rssi);
if (bytes_written >= total_len) {
- WL_ERR(("%s: Insufficient memory, %d bytes\n",
- __FUNCTION__, total_len));
+ WL_ERR(("wl_android_get_ibss_peer_info: Insufficient"
+ " memory, %d bytes\n",
+ total_len));
bytes_written = -1;
break;
}
@@ -5082,7 +6904,7 @@
}
return -ENOMEM;
}
- memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN);
+ bzero(ioctl_buf, WLC_IOCTL_MEDLEN);
/* drop command */
str = bcmstrtok(&pcmd, " ", NULL);
@@ -5165,7 +6987,7 @@
WL_DBG(("set ibss ampdu:%s\n", command));
- memset(&aggr, 0, sizeof(aggr));
+ bzero(&aggr, sizeof(aggr));
/* Cofigure all priorities */
aggr.conf_TID_bmap = NBITMASK(NUMPRIO);
@@ -5243,16 +7065,16 @@
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
if (extra == NULL) {
- DHD_ERROR(("%s: extra is NULL\n", __FUNCTION__));
+ DHD_ERROR(("wl_keep_alive_set: extra is NULL\n"));
return -1;
}
if (sscanf(extra, "%d", &period_msec) != 1) {
- DHD_ERROR(("%s: sscanf error. check period_msec value\n", __FUNCTION__));
+ DHD_ERROR(("wl_keep_alive_set: sscanf error. check period_msec value\n"));
return -EINVAL;
}
- DHD_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec));
+ DHD_ERROR(("wl_keep_alive_set: period_msec is %d\n", period_msec));
- memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
+ bzero(&mkeep_alive_pkt, sizeof(wl_mkeep_alive_pkt_t));
mkeep_alive_pkt.period_msec = period_msec;
mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
@@ -5264,15 +7086,15 @@
buf = (char *)MALLOC(cfg->osh, WLC_IOCTL_SMLEN);
if (!buf) {
- DHD_ERROR(("%s: buffer alloc failed\n", __FUNCTION__));
+ DHD_ERROR(("wl_keep_alive_set: buffer alloc failed\n"));
return BCME_NOMEM;
}
ret = wldev_iovar_setbuf(dev, "mkeep_alive", (char *)&mkeep_alive_pkt,
WL_MKEEP_ALIVE_FIXED_LEN, buf, WLC_IOCTL_SMLEN, NULL);
if (ret < 0)
- DHD_ERROR(("%s:keep_alive set failed:%d\n", __FUNCTION__, ret));
+ DHD_ERROR(("wl_keep_alive_set:keep_alive set failed:%d\n", ret));
else
- DHD_TRACE(("%s:keep_alive set ok\n", __FUNCTION__));
+ DHD_TRACE(("wl_keep_alive_set: keep_alive set ok\n"));
MFREE(cfg->osh, buf, WLC_IOCTL_SMLEN);
return ret;
}
@@ -5285,8 +7107,9 @@
error = wldev_iovar_getint(dev, "p2p_only_resp_wfdsrc", &only_resp_wfdsrc);
if (error) {
- DHD_ERROR(("%s: Failed to get the mode for only_resp_wfdsrc, error = %d\n",
- __FUNCTION__, error));
+ DHD_ERROR(("wl_android_get_wfdie_resp: Failed to get the mode"
+ " for only_resp_wfdsrc, error = %d\n",
+ error));
return -1;
}
@@ -5302,8 +7125,9 @@
error = wldev_iovar_setint(dev, "p2p_only_resp_wfdsrc", only_resp_wfdsrc);
if (error) {
- DHD_ERROR(("%s: Failed to set only_resp_wfdsrc %d, error = %d\n",
- __FUNCTION__, only_resp_wfdsrc, error));
+ DHD_ERROR(("wl_android_set_wfdie_resp: Failed to set only_resp_wfdsrc %d,"
+ " error = %d\n",
+ only_resp_wfdsrc, error));
return -1;
}
@@ -5318,7 +7142,7 @@
int err = BCME_OK;
char buf[WLC_IOCTL_SMLEN];
tbow_setup_netinfo_t netinfo;
- memset(&netinfo, 0, sizeof(netinfo));
+ bzero(&netinfo, sizeof(netinfo));
netinfo.opmode = TBOW_HO_MODE_TEARDOWN;
err = wldev_iovar_setbuf_bsscfg(dev, "tbow_doho", &netinfo,
@@ -5344,10 +7168,11 @@
if (!error) {
void *dhdp = wl_cfg80211_get_dhdp(net);
if (enable) {
- DHD_TRACE(("%s : set ack suppress. TCPACK_SUP_HOLD.\n", __FUNCTION__));
+ DHD_TRACE(("wl_android_set_rps_cpus: set ack suppress."
+ " TCPACK_SUP_HOLD.\n"));
dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD);
} else {
- DHD_TRACE(("%s : clear ack suppress.\n", __FUNCTION__));
+ DHD_TRACE(("wl_android_set_rps_cpus: clear ack suppress.\n"));
dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
}
}
@@ -5369,7 +7194,7 @@
datalen = sizeof(uint32) + sizeof(wl_bss_info_t);
- memset(buf, 0, datalen);
+ bzero(buf, datalen);
/* get BSS information */
*(u32 *) buf = htod32(datalen);
error = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, (void *)buf, datalen);
@@ -5492,12 +7317,14 @@
s32 bssidx;
int ret = 0;
int p2plo_pause = 0;
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ dhd_pub_t *dhd = NULL;
if (!cfg || !cfg->p2p) {
WL_ERR(("Wl %p or cfg->p2p %p is null\n",
cfg, cfg ? cfg->p2p : 0));
return 0;
}
+
+ dhd = (dhd_pub_t *)(cfg->pub);
if (!dhd->up) {
WL_ERR(("bus is already down\n"));
return ret;
@@ -5626,6 +7453,35 @@
}
return ret;
}
+void
+wl_cfg80211_cancel_p2plo(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev;
+ if (!cfg) {
+ return;
+ }
+
+ wdev = bcmcfg_to_p2p_wdev(cfg);
+
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ WL_INFORM_MEM(("P2P_FIND: Discovery offload is already in progress."
+ "it aborted\n"));
+ wl_clr_p2p_status(cfg, DISC_IN_PROGRESS);
+ if (wdev != NULL) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ cfg80211_remain_on_channel_expired(wdev,
+ cfg->last_roc_id,
+ &cfg->remain_on_chan, GFP_KERNEL);
+#else
+ cfg80211_remain_on_channel_expired(wdev,
+ cfg->last_roc_id,
+ &cfg->remain_on_chan,
+ cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ }
+ wl_cfg80211_p2plo_deinit(cfg);
+ }
+}
#endif /* P2P_LISTEN_OFFLOADING */
#ifdef WL_MURX
@@ -5651,7 +7507,7 @@
}
/* If successful intiate a reassoc */
- memset(&bssid, 0, ETHER_ADDR_LEN);
+ bzero(&bssid, ETHER_ADDR_LEN);
if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN)) < 0) {
WL_ERR(("Failed to get bssid, error=%d\n", err));
return err;
@@ -5684,7 +7540,7 @@
int bytes_written = 0;
bool mimo_rssi = FALSE;
- memset(&rssi_ant_mimo, 0, sizeof(wl_rssi_ant_mimo_t));
+ bzero(&rssi_ant_mimo, sizeof(wl_rssi_ant_mimo_t));
/*
* STA I/F: DRIVER GET_RSSI_PER_ANT <ifname> <mimo>
* AP/GO I/F: DRIVER GET_RSSI_PER_ANT <ifname> <Peer MAC addr> <mimo>
@@ -5752,7 +7608,7 @@
char *pos, *token;
int err = BCME_OK;
- memset(&set_param, 0, sizeof(rssilog_set_param_t));
+ bzero(&set_param, sizeof(rssilog_set_param_t));
/*
* DRIVER SET_RSSI_LOGGING <enable/disable> <RSSI Threshold> <Time Threshold>
*/
@@ -5852,14 +7708,20 @@
#ifdef SET_PCIE_IRQ_CPU_CORE
void
-wl_android_set_irq_cpucore(struct net_device *net, int set)
+wl_android_set_irq_cpucore(struct net_device *net, int affinity_cmd)
{
dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
if (!dhdp) {
WL_ERR(("dhd is NULL\n"));
return;
}
- dhd_set_irq_cpucore(dhdp, set);
+
+ if (affinity_cmd < PCIE_IRQ_AFFINITY_OFF || affinity_cmd > PCIE_IRQ_AFFINITY_LAST) {
+ WL_ERR(("Wrong Affinity cmds:%d, %s\n", affinity_cmd, __FUNCTION__));
+ return;
+ }
+
+ dhd_set_irq_cpucore(dhdp, affinity_cmd);
}
#endif /* SET_PCIE_IRQ_CPU_CORE */
@@ -5893,7 +7755,7 @@
tx_lqcm_idx = (lqcm_report & LQCM_TX_INDEX_MASK) >> LQCM_TX_INDEX_SHIFT;
rx_lqcm_idx = (lqcm_report & LQCM_RX_INDEX_MASK) >> LQCM_RX_INDEX_SHIFT;
- WL_ERR(("lqcm report EN:%d, TX:%d, RX:%d\n", lqcm_enable, tx_lqcm_idx, rx_lqcm_idx));
+ WL_DBG(("lqcm report EN:%d, TX:%d, RX:%d\n", lqcm_enable, tx_lqcm_idx, rx_lqcm_idx));
bytes_written = snprintf(command, total_len, "%s %d",
CMD_GET_LQCM_REPORT, lqcm_report);
@@ -5910,13 +7772,13 @@
error = wldev_iovar_getint(dev, "snr", &snr);
if (error) {
- DHD_ERROR(("%s: Failed to get SNR %d, error = %d\n",
- __FUNCTION__, snr, error));
+ DHD_ERROR(("wl_android_get_snr: Failed to get SNR %d, error = %d\n",
+ snr, error));
return -EIO;
}
bytes_written = snprintf(command, total_len, "snr %d", snr);
- DHD_INFO(("%s: command result is %s\n", __FUNCTION__, command));
+ DHD_INFO(("wl_android_get_snr: command result is %s\n", command));
return bytes_written;
}
@@ -6012,8 +7874,7 @@
return -EINVAL;
ifname = token;
- strncpy(name, ifname, IFNAMSIZ);
- name[IFNAMSIZ-1] = '\0';
+ strlcpy(name, ifname, sizeof(name));
WL_DBG(("ifacename %s\n", name));
bytes_written = wl_get_ap_rps(dev, command, name, total_len);
@@ -6055,8 +7916,7 @@
return -EINVAL;
ifname = token;
- strncpy(name, ifname, IFNAMSIZ);
- name[IFNAMSIZ-1] = '\0';
+ strlcpy(name, ifname, sizeof(name));
WL_DBG(("enable %d, ifacename %s\n", enable, name));
err = wl_set_ap_rps(dev, enable? TRUE: FALSE, name);
@@ -6076,7 +7936,7 @@
int err = BCME_OK;
char name[IFNAMSIZ];
- memset(&rps, 0, sizeof(rps));
+ bzero(&rps, sizeof(rps));
/*
* DRIVER SET_AP_RPS_PARAMS <pps> <level> <quiettime> <assoccheck> <ifname>
*/
@@ -6114,8 +7974,7 @@
if (!token)
return -EINVAL;
ifname = token;
- strncpy(name, ifname, IFNAMSIZ);
- name[IFNAMSIZ-1] = '\0';
+ strlcpy(name, ifname, sizeof(name));
WL_DBG(("pps %d, level %d, quiettime %d, sta_assoc_check %d, "
"ifacename %s\n", rps.pps, rps.level, rps.quiet_time,
@@ -6140,6 +7999,56 @@
}
#endif /* DHD_HANG_SEND_UP_TEST */
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+static void
+wl_android_check_priv_cmd_errors(struct net_device *dev)
+{
+ dhd_pub_t *dhdp;
+ int memdump_mode;
+
+ if (!dev) {
+ WL_ERR(("dev is NULL\n"));
+ return;
+ }
+
+ dhdp = wl_cfg80211_get_dhdp(dev);
+ if (!dhdp) {
+ WL_ERR(("dhdp is NULL\n"));
+ return;
+ }
+
+#ifdef DHD_FW_COREDUMP
+ memdump_mode = dhdp->memdump_enabled;
+#else
+ /* Default enable if DHD doesn't support SOCRAM dump */
+ memdump_mode = 1;
+#endif /* DHD_FW_COREDUMP */
+
+ if (report_hang_privcmd_err) {
+ priv_cmd_errors++;
+ } else {
+ priv_cmd_errors = 0;
+ }
+
+ /* Trigger HANG event only if memdump mode is enabled
+ * due to customer's request
+ */
+ if (memdump_mode == DUMP_MEMFILE_BUGON &&
+ (priv_cmd_errors > NUMBER_SEQUENTIAL_PRIVCMD_ERRORS)) {
+ WL_ERR(("Send HANG event due to sequential private cmd errors\n"));
+ priv_cmd_errors = 0;
+#ifdef DHD_FW_COREDUMP
+ /* Take a SOCRAM dump */
+ dhdp->memdump_type = DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR;
+ dhd_common_socram_dump(dhdp);
+#endif /* DHD_FW_COREDUMP */
+ /* Send the HANG event to upper layer */
+ dhdp->hang_reason = HANG_REASON_SEQUENTIAL_PRIVCMD_ERROR;
+ dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
+ }
+}
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+
#ifdef DHD_PKT_LOGGING
static int
wl_android_pktlog_filter_enable(struct net_device *dev, char *command, int total_len)
@@ -6305,6 +8214,40 @@
}
static int
+wl_android_pktlog_filter_del(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ int err = BCME_OK;
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_ERROR(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ filter = dhdp->pktlog->pktlog_filter;
+
+ if (strlen(CMD_PKTLOG_FILTER_DEL) + 1 > total_len) {
+ DHD_PKT_LOG(("%s(): wrong cmd length %d found\n",
+ __FUNCTION__, (int)strlen(CMD_PKTLOG_FILTER_DEL)));
+ return BCME_ERROR;
+ }
+
+ err = dhd_pktlog_filter_del(filter, command + strlen(CMD_PKTLOG_FILTER_DEL) + 1);
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ DHD_ERROR(("%s: pktlog filter del success\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s: pktlog filter del fail\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return bytes_written;
+}
+
+static int
wl_android_pktlog_filter_info(struct net_device *dev, char *command, int total_len)
{
int bytes_written = 0;
@@ -6345,14 +8288,13 @@
return -EINVAL;
}
- if (!dhdp->pktlog->tx_pktlog_ring || !dhdp->pktlog->rx_pktlog_ring) {
- DHD_PKT_LOG(("%s(): tx_pktlog_ring=%p rx_pktlog_ring=%p\n",
- __FUNCTION__, dhdp->pktlog->tx_pktlog_ring, dhdp->pktlog->rx_pktlog_ring));
+ if (!dhdp->pktlog->pktlog_ring) {
+ DHD_PKT_LOG(("%s(): pktlog_ring=%p\n",
+ __FUNCTION__, dhdp->pktlog->pktlog_ring));
return -EINVAL;
}
- dhdp->pktlog->tx_pktlog_ring->start = TRUE;
- dhdp->pktlog->rx_pktlog_ring->start = TRUE;
+ atomic_set(&dhdp->pktlog->pktlog_ring->start, TRUE);
bytes_written = snprintf(command, total_len, "OK");
@@ -6373,14 +8315,13 @@
return -EINVAL;
}
- if (!dhdp->pktlog->tx_pktlog_ring || !dhdp->pktlog->rx_pktlog_ring) {
- DHD_PKT_LOG(("%s(): tx_pktlog_ring=%p rx_pktlog_ring=%p\n",
- __FUNCTION__, dhdp->pktlog->tx_pktlog_ring, dhdp->pktlog->rx_pktlog_ring));
+ if (!dhdp->pktlog->pktlog_ring) {
+ DHD_PKT_LOG(("%s(): _pktlog_ring=%p\n",
+ __FUNCTION__, dhdp->pktlog->pktlog_ring));
return -EINVAL;
}
- dhdp->pktlog->tx_pktlog_ring->start = FALSE;
- dhdp->pktlog->rx_pktlog_ring->start = FALSE;
+ atomic_set(&dhdp->pktlog->pktlog_ring->start, FALSE);
bytes_written = snprintf(command, total_len, "OK");
@@ -6436,14 +8377,13 @@
return -EINVAL;
}
- if (!dhdp->pktlog->tx_pktlog_ring || !dhdp->pktlog->rx_pktlog_ring) {
- DHD_PKT_LOG(("%s(): tx_pktlog_ring=%p rx_pktlog_ring=%p\n",
- __FUNCTION__, dhdp->pktlog->tx_pktlog_ring, dhdp->pktlog->rx_pktlog_ring));
+ if (!dhdp->pktlog->pktlog_ring) {
+ DHD_PKT_LOG(("%s(): pktlog_ring=%p\n",
+ __FUNCTION__, dhdp->pktlog->pktlog_ring));
return -EINVAL;
}
- dhdp->pktlog->tx_pktlog_ring->pktlog_minmize = TRUE;
- dhdp->pktlog->rx_pktlog_ring->pktlog_minmize = TRUE;
+ dhdp->pktlog->pktlog_ring->pktlog_minmize = TRUE;
bytes_written = snprintf(command, total_len, "OK");
@@ -6464,14 +8404,13 @@
return -EINVAL;
}
- if (!dhdp->pktlog->tx_pktlog_ring || !dhdp->pktlog->rx_pktlog_ring) {
- DHD_PKT_LOG(("%s(): tx_pktlog_ring=%p rx_pktlog_ring=%p\n",
- __FUNCTION__, dhdp->pktlog->tx_pktlog_ring, dhdp->pktlog->rx_pktlog_ring));
+ if (!dhdp->pktlog->pktlog_ring) {
+ DHD_PKT_LOG(("%s(): pktlog_ring=%p\n",
+ __FUNCTION__, dhdp->pktlog->pktlog_ring));
return -EINVAL;
}
- dhdp->pktlog->tx_pktlog_ring->pktlog_minmize = FALSE;
- dhdp->pktlog->rx_pktlog_ring->pktlog_minmize = FALSE;
+ dhdp->pktlog->pktlog_ring->pktlog_minmize = FALSE;
bytes_written = snprintf(command, total_len, "OK");
@@ -6500,15 +8439,9 @@
size = bcm_strtoul(command + strlen(CMD_PKTLOG_CHANGE_SIZE) + 1, NULL, 0);
- dhdp->pktlog->tx_pktlog_ring =
- dhd_pktlog_ring_change_size(dhdp->pktlog->tx_pktlog_ring, size);
- if (!dhdp->pktlog->tx_pktlog_ring) {
- err = BCME_ERROR;
- }
-
- dhdp->pktlog->rx_pktlog_ring =
- dhd_pktlog_ring_change_size(dhdp->pktlog->rx_pktlog_ring, size);
- if (!dhdp->pktlog->tx_pktlog_ring) {
+ dhdp->pktlog->pktlog_ring =
+ dhd_pktlog_ring_change_size(dhdp->pktlog->pktlog_ring, size);
+ if (!dhdp->pktlog->pktlog_ring) {
err = BCME_ERROR;
}
@@ -6526,23 +8459,45 @@
#ifdef DHD_EVENT_LOG_FILTER
uint32 dhd_event_log_filter_serialize(dhd_pub_t *dhdp, char *buf, uint32 tot_len, int type);
+
+#ifdef DHD_EWPR_VER2
+uint32 dhd_event_log_filter_serialize_bit(dhd_pub_t *dhdp, char *buf, uint32 tot_len,
+ int index1, int index2, int index3);
+#endif // endif
+
static int
wl_android_ewp_filter(struct net_device *dev, char *command, uint32 tot_len)
{
uint32 bytes_written = 0;
- int type = 0;
dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+#ifdef DHD_EWPR_VER2
+ int index1 = 0, index2 = 0, index3 = 0;
+ unsigned char *index_str = (unsigned char *)(command +
+ strlen(CMD_EWP_FILTER) + 1);
+#else
+ int type = 0;
+#endif // endif
if (!dhdp || !command) {
DHD_ERROR(("%s(): dhdp=%p \n", __FUNCTION__, dhdp));
return -EINVAL;
}
+#ifdef DHD_EWPR_VER2
+ if (strlen(command) > strlen(CMD_EWP_FILTER) + 1) {
+ sscanf(index_str, "%10d %10d %10d", &index1, &index2, &index3);
+ DHD_TRACE(("%s(): get index request: %d %d %d\n", __FUNCTION__,
+ index1, index2, index3));
+ }
+ bytes_written += dhd_event_log_filter_serialize_bit(dhdp,
+ &command[bytes_written], tot_len - bytes_written, index1, index2, index3);
+#else
/* NEED TO GET TYPE if EXIST */
type = 0;
bytes_written += dhd_event_log_filter_serialize(dhdp,
&command[bytes_written], tot_len - bytes_written, type);
+#endif // endif
return (int)bytes_written;
}
@@ -6578,7 +8533,7 @@
}
}
if ((priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN) || (priv_cmd.total_len < 0)) {
- DHD_ERROR(("%s: buf length invalid:%d\n", __FUNCTION__,
+ DHD_ERROR(("wl_android_priv_cmd: buf length invalid:%d\n",
priv_cmd.total_len));
ret = -EINVAL;
goto exit;
@@ -6587,7 +8542,7 @@
buf_size = max(priv_cmd.total_len, PRIVATE_COMMAND_DEF_LEN);
command = (char *)MALLOC(cfg->osh, (buf_size + 1));
if (!command) {
- DHD_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_priv_cmd: failed to allocate memory\n"));
ret = -ENOMEM;
goto exit;
}
@@ -6597,7 +8552,8 @@
}
command[priv_cmd.total_len] = '\0';
- DHD_ERROR(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
+ DHD_ERROR(("wl_android_priv_cmd: Android private cmd \"%s\" on %s\n",
+ command, ifr->ifr_name));
bytes_written = wl_handle_private_cmd(net, command, priv_cmd.total_len);
if (bytes_written >= 0) {
@@ -6605,16 +8561,15 @@
command[0] = '\0';
}
if (bytes_written >= priv_cmd.total_len) {
- DHD_ERROR(("%s: err. bytes_written:%d >= total_len:%d, buf_size:%d\n",
- __FUNCTION__, bytes_written, priv_cmd.total_len, buf_size));
-
+ DHD_ERROR(("wl_android_priv_cmd: err. bytes_written:%d >= total_len:%d,"
+ " buf_size:%d \n", bytes_written, priv_cmd.total_len, buf_size));
ret = BCME_BUFTOOSHORT;
goto exit;
}
bytes_written++;
priv_cmd.used_len = bytes_written;
if (copy_to_user(priv_cmd.buf, command, bytes_written)) {
- DHD_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__));
+ DHD_ERROR(("wl_android_priv_cmd: failed to copy data to user buffer\n"));
ret = -EFAULT;
}
}
@@ -6624,6 +8579,16 @@
}
exit:
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+ if (ret) {
+ /* Avoid incrementing priv_cmd_errors in case of unsupported feature */
+ if (ret != BCME_UNSUPPORTED) {
+ wl_android_check_priv_cmd_errors(net);
+ }
+ } else {
+ priv_cmd_errors = 0;
+ }
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
net_os_wake_unlock(net);
MFREE(cfg->osh, command, (buf_size + 1));
return ret;
@@ -6644,7 +8609,7 @@
WL_ERR(("%s: SET_ADPS %d\n", __FUNCTION__, adps_mode));
if ((adps_mode < 0) && (1 < adps_mode)) {
- WL_ERR(("%s: Invalid value %d.\n", __FUNCTION__, adps_mode));
+ WL_ERR(("wl_android_set_adps_mode: Invalid value %d.\n", adps_mode));
return -EINVAL;
}
@@ -6660,7 +8625,7 @@
struct net_device *dev, char *command, int total_len)
{
int bytes_written, err = 0;
- int len;
+ uint len;
char buf[WLC_IOCTL_SMLEN];
bcm_iov_buf_t iov_buf;
@@ -6670,9 +8635,9 @@
uint8 *pdata = NULL;
uint8 band, mode = 0;
- memset(&iov_buf, 0, sizeof(iov_buf));
+ bzero(&iov_buf, sizeof(iov_buf));
- len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
+ len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(band);
iov_buf.version = WL_ADPS_IOV_VER;
iov_buf.len = sizeof(band);
@@ -6685,8 +8650,8 @@
err = wldev_iovar_getbuf(dev, "adps", &iov_buf, len,
buf, WLC_IOCTL_SMLEN, NULL);
if (err != BCME_OK) {
- WL_ERR(("%s fail to get adps band %d(%d).\n",
- __FUNCTION__, band, err));
+ WL_ERR(("wl_android_get_adps_mode fail to get adps band %d(%d).\n",
+ band, err));
return -EIO;
}
ptr = (bcm_iov_buf_t *) buf;
@@ -6703,6 +8668,407 @@
}
#endif /* WLADPS_PRIVATE_CMD */
+#ifdef WL_BCNRECV
+#define BCNRECV_ATTR_HDR_LEN 30
+int
+wl_android_bcnrecv_event(struct net_device *ndev, uint attr_type,
+ uint status, uint reason, uint8 *data, uint data_len)
+{
+ s32 err = BCME_OK;
+ struct sk_buff *skb;
+ gfp_t kflags;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ uint len;
+
+ len = BCNRECV_ATTR_HDR_LEN + data_len;
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ skb = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(ndev), len,
+ BRCM_VENDOR_EVENT_BEACON_RECV, kflags);
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return -ENOMEM;
+ }
+ if ((attr_type == BCNRECV_ATTR_BCNINFO) && (data)) {
+ /* send bcn info to upper layer */
+ nla_put(skb, BCNRECV_ATTR_BCNINFO, data_len, data);
+ } else if (attr_type == BCNRECV_ATTR_STATUS) {
+ nla_put_u32(skb, BCNRECV_ATTR_STATUS, status);
+ if (reason) {
+ nla_put_u32(skb, BCNRECV_ATTR_REASON, reason);
+ }
+ } else {
+ WL_ERR(("UNKNOWN ATTR_TYPE. attr_type:%d\n", attr_type));
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ cfg80211_vendor_event(skb, kflags);
+ return err;
+}
+
+static int
+_wl_android_bcnrecv_start(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool user_trigger)
+{
+ s32 err = BCME_OK;
+
+ /* check any scan is in progress before beacon recv scan trigger IOVAR */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ err = BCME_UNSUPPORTED;
+ WL_ERR(("Scan in progress, Aborting beacon recv start, "
+ "error:%d\n", err));
+ goto exit;
+ }
+
+ if (wl_get_p2p_status(cfg, SCANNING)) {
+ err = BCME_UNSUPPORTED;
+ WL_ERR(("P2P Scan in progress, Aborting beacon recv start, "
+ "error:%d\n", err));
+ goto exit;
+ }
+
+ if (wl_get_drv_status(cfg, REMAINING_ON_CHANNEL, ndev)) {
+ err = BCME_UNSUPPORTED;
+ WL_ERR(("P2P remain on channel, Aborting beacon recv start, "
+ "error:%d\n", err));
+ goto exit;
+ }
+
+ /* check STA is in connected state, Beacon recv required connected state
+ * else exit from beacon recv scan
+ */
+ if (!wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ err = BCME_UNSUPPORTED;
+ WL_ERR(("STA is in not associated state error:%d\n", err));
+ goto exit;
+ }
+
+#ifdef WL_NAN
+ /* Check NAN is enabled, if enabled exit else continue */
+ if (wl_cfgnan_check_state(cfg)) {
+ err = BCME_UNSUPPORTED;
+ WL_ERR(("Nan is enabled, NAN+STA+FAKEAP concurrency is not supported\n"));
+ goto exit;
+ }
+#endif /* WL_NAN */
+
+ /* Triggering an sendup_bcn iovar */
+ err = wldev_iovar_setint(ndev, "sendup_bcn", 1);
+ if (unlikely(err)) {
+ WL_ERR(("sendup_bcn failed to set, error:%d\n", err));
+ } else {
+ cfg->bcnrecv_info.bcnrecv_state = BEACON_RECV_STARTED;
+ WL_INFORM_MEM(("bcnrecv started. user_trigger:%d\n", user_trigger));
+ if (user_trigger) {
+ if ((err = wl_android_bcnrecv_event(ndev, BCNRECV_ATTR_STATUS,
+ WL_BCNRECV_STARTED, 0, NULL, 0)) != BCME_OK) {
+ WL_ERR(("failed to send bcnrecv event, error:%d\n", err));
+ }
+ }
+ }
+exit:
+ /*
+ * BCNRECV start request can be rejected from dongle
+ * in various conditions.
+ * Error code need to be overridden to BCME_UNSUPPORTED
+ * to avoid hang event from continous private
+ * command error
+ */
+ if (err) {
+ err = BCME_UNSUPPORTED;
+ }
+ return err;
+}
+
+int
+_wl_android_bcnrecv_stop(struct bcm_cfg80211 *cfg, struct net_device *ndev, uint reason)
+{
+ s32 err = BCME_OK;
+ u32 status;
+
+ /* Send sendup_bcn iovar for all cases except W_BCNRECV_ROAMABORT reason -
+ * fw generates roam abort event after aborting the bcnrecv.
+ */
+ if (reason != WL_BCNRECV_ROAMABORT) {
+ /* Triggering an sendup_bcn iovar */
+ err = wldev_iovar_setint(ndev, "sendup_bcn", 0);
+ if (unlikely(err)) {
+ WL_ERR(("sendup_bcn failed to set error:%d\n", err));
+ goto exit;
+ }
+ }
+
+ /* Send notification for all cases */
+ if (reason == WL_BCNRECV_SUSPEND) {
+ cfg->bcnrecv_info.bcnrecv_state = BEACON_RECV_SUSPENDED;
+ status = WL_BCNRECV_SUSPENDED;
+ } else {
+ cfg->bcnrecv_info.bcnrecv_state = BEACON_RECV_STOPPED;
+ WL_INFORM_MEM(("bcnrecv stopped\n"));
+ if (reason == WL_BCNRECV_USER_TRIGGER) {
+ status = WL_BCNRECV_STOPPED;
+ } else {
+ status = WL_BCNRECV_ABORTED;
+ }
+ }
+ if ((err = wl_android_bcnrecv_event(ndev, BCNRECV_ATTR_STATUS, status,
+ reason, NULL, 0)) != BCME_OK) {
+ WL_ERR(("failed to send bcnrecv event, error:%d\n", err));
+ }
+exit:
+ return err;
+}
+
+static int
+wl_android_bcnrecv_start(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ s32 err = BCME_OK;
+
+ /* Adding scan_sync mutex to avoid race condition in b/w scan_req and bcn recv */
+ mutex_lock(&cfg->scan_sync);
+ mutex_lock(&cfg->bcn_sync);
+ err = _wl_android_bcnrecv_start(cfg, ndev, true);
+ mutex_unlock(&cfg->bcn_sync);
+ mutex_unlock(&cfg->scan_sync);
+ return err;
+}
+
+int
+wl_android_bcnrecv_stop(struct net_device *ndev, uint reason)
+{
+ s32 err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ mutex_lock(&cfg->bcn_sync);
+ if ((cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_STARTED) ||
+ (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_SUSPENDED)) {
+ err = _wl_android_bcnrecv_stop(cfg, ndev, reason);
+ }
+ mutex_unlock(&cfg->bcn_sync);
+ return err;
+}
+
+int
+wl_android_bcnrecv_suspend(struct net_device *ndev)
+{
+ s32 ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ mutex_lock(&cfg->bcn_sync);
+ if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_STARTED) {
+ WL_INFORM_MEM(("bcnrecv suspend\n"));
+ ret = _wl_android_bcnrecv_stop(cfg, ndev, WL_BCNRECV_SUSPEND);
+ }
+ mutex_unlock(&cfg->bcn_sync);
+ return ret;
+}
+
+int
+wl_android_bcnrecv_resume(struct net_device *ndev)
+{
+ s32 ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ /* Adding scan_sync mutex to avoid race condition in b/w scan_req and bcn recv */
+ mutex_lock(&cfg->scan_sync);
+ mutex_lock(&cfg->bcn_sync);
+ if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_SUSPENDED) {
+ WL_INFORM_MEM(("bcnrecv resume\n"));
+ ret = _wl_android_bcnrecv_start(cfg, ndev, false);
+ }
+ mutex_unlock(&cfg->bcn_sync);
+ mutex_unlock(&cfg->scan_sync);
+ return ret;
+}
+
+/* Beacon recv functionality code implementation */
+int
+wl_android_bcnrecv_config(struct net_device *ndev, char *cmd_argv, int total_len)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ uint err = BCME_OK;
+
+ if (!ndev) {
+ WL_ERR(("ndev is NULL\n"));
+ return -EINVAL;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("cfg is NULL\n"));
+ return -EINVAL;
+ }
+
+ /* sync commands from user space */
+ mutex_lock(&cfg->usr_sync);
+ if (strncmp(cmd_argv, "start", strlen("start")) == 0) {
+ WL_INFORM(("BCNRECV start\n"));
+ err = wl_android_bcnrecv_start(cfg, ndev);
+ if (err != BCME_OK) {
+ WL_ERR(("Failed to process the start command, error:%d\n", err));
+ goto exit;
+ }
+ } else if (strncmp(cmd_argv, "stop", strlen("stop")) == 0) {
+ WL_INFORM(("BCNRECV stop\n"));
+ err = wl_android_bcnrecv_stop(ndev, WL_BCNRECV_USER_TRIGGER);
+ if (err != BCME_OK) {
+ WL_ERR(("Failed to stop the bcn recv, error:%d\n", err));
+ goto exit;
+ }
+ } else {
+ err = BCME_ERROR;
+ }
+exit:
+ mutex_unlock(&cfg->usr_sync);
+ return err;
+}
+#endif /* WL_BCNRECV */
+
+#ifdef WL_CAC_TS
+/* CAC TSPEC functionality code implementation */
+static void
+wl_android_update_tsinfo(uint8 access_category, tspec_arg_t *tspec_arg)
+{
+ uint8 tspec_id;
+ /* Using direction as bidirectional by default */
+ uint8 direction = TSPEC_BI_DIRECTION;
+ /* Using U-APSD as the default power save mode */
+ uint8 user_psb = TSPEC_UAPSD_PSB;
+ uint8 ADDTS_AC2PRIO[4] = {PRIO_8021D_BE, PRIO_8021D_BK, PRIO_8021D_VI, PRIO_8021D_VO};
+
+ /* Map tspec_id from access category */
+ tspec_id = ADDTS_AC2PRIO[access_category];
+
+ /* Update the tsinfo */
+ tspec_arg->tsinfo.octets[0] = (uint8)(TSPEC_EDCA_ACCESS | direction |
+ (tspec_id << TSPEC_TSINFO_TID_SHIFT));
+ tspec_arg->tsinfo.octets[1] = (uint8)((tspec_id << TSPEC_TSINFO_PRIO_SHIFT) |
+ user_psb);
+ tspec_arg->tsinfo.octets[2] = 0x00;
+}
+
+static s32
+wl_android_handle_cac_action(struct bcm_cfg80211 * cfg, struct net_device * ndev, char * argv)
+{
+ tspec_arg_t tspec_arg;
+ s32 err = BCME_ERROR;
+ u8 ts_cmd[12] = "cac_addts";
+ uint8 access_category;
+ s32 bssidx;
+
+ /* Following handling is done only for the primary interface */
+ memset_s(&tspec_arg, sizeof(tspec_arg), 0, sizeof(tspec_arg));
+ if (strncmp(argv, "addts", strlen("addts")) == 0) {
+ tspec_arg.version = TSPEC_ARG_VERSION;
+ tspec_arg.length = sizeof(tspec_arg_t) - (2 * sizeof(uint16));
+ /* Read the params passed */
+ sscanf(argv, "%*s %hhu %hu %hu", &access_category,
+ &tspec_arg.nom_msdu_size, &tspec_arg.surplus_bw);
+ if ((access_category > TSPEC_MAX_ACCESS_CATEGORY) ||
+ ((tspec_arg.surplus_bw < TSPEC_MIN_SURPLUS_BW) ||
+ (tspec_arg.surplus_bw > TSPEC_MAX_SURPLUS_BW)) ||
+ (tspec_arg.nom_msdu_size > TSPEC_MAX_MSDU_SIZE)) {
+ WL_ERR(("Invalid params access_category %hhu nom_msdu_size %hu"
+ " surplus BW %hu\n", access_category, tspec_arg.nom_msdu_size,
+ tspec_arg.surplus_bw));
+ return BCME_USAGE_ERROR;
+ }
+
+ /* Update tsinfo */
+ wl_android_update_tsinfo(access_category, &tspec_arg);
+ /* Update other tspec parameters */
+ tspec_arg.dialog_token = TSPEC_DEF_DIALOG_TOKEN;
+ tspec_arg.mean_data_rate = TSPEC_DEF_MEAN_DATA_RATE;
+ tspec_arg.min_phy_rate = TSPEC_DEF_MIN_PHY_RATE;
+ } else if (strncmp(argv, "delts", strlen("delts")) == 0) {
+ snprintf(ts_cmd, sizeof(ts_cmd), "cac_delts");
+ tspec_arg.length = sizeof(tspec_arg_t) - (2 * sizeof(uint16));
+ tspec_arg.version = TSPEC_ARG_VERSION;
+ /* Read the params passed */
+ sscanf(argv, "%*s %hhu", &access_category);
+
+ if (access_category > TSPEC_MAX_ACCESS_CATEGORY) {
+ WL_INFORM_MEM(("Invalide param, access_category %hhu\n", access_category));
+ return BCME_USAGE_ERROR;
+ }
+ /* Update tsinfo */
+ wl_android_update_tsinfo(access_category, &tspec_arg);
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ err = BCME_ERROR;
+ return err;
+ }
+ err = wldev_iovar_setbuf_bsscfg(ndev, ts_cmd, &tspec_arg, sizeof(tspec_arg),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("%s error (%d)\n", ts_cmd, err));
+ }
+
+ return err;
+}
+
+static s32
+wl_android_cac_ts_config(struct net_device *ndev, char *cmd_argv, int total_len)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ s32 err = BCME_OK;
+
+ if (!ndev) {
+ WL_ERR(("ndev is NULL\n"));
+ return -EINVAL;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("cfg is NULL\n"));
+ return -EINVAL;
+ }
+
+ /* Request supported only for primary interface */
+ if (ndev != bcmcfg_to_prmry_ndev(cfg)) {
+ WL_ERR(("Request on non-primary interface\n"));
+ return -1;
+ }
+
+ /* sync commands from user space */
+ mutex_lock(&cfg->usr_sync);
+ err = wl_android_handle_cac_action(cfg, ndev, cmd_argv);
+ mutex_unlock(&cfg->usr_sync);
+
+ return err;
+}
+#endif /* WL_CAC_TS */
+
+#ifdef WL_GET_CU
+/* Implementation to get channel usage from framework */
+static s32
+wl_android_get_channel_util(struct net_device *ndev, char *command, int total_len)
+{
+ s32 bytes_written, err = 0;
+ wl_bssload_t bssload;
+ u8 smbuf[WLC_IOCTL_SMLEN];
+ u8 chan_use_percentage = 0;
+
+ if ((err = wldev_iovar_getbuf(ndev, "bssload_report", NULL,
+ 0, smbuf, WLC_IOCTL_SMLEN, NULL))) {
+ WL_ERR(("Getting bssload report failed with err=%d \n", err));
+ return err;
+ }
+
+ (void)memcpy_s(&bssload, sizeof(wl_bssload_t), smbuf, sizeof(wl_bssload_t));
+ /* Convert channel usage to percentage value */
+ chan_use_percentage = (bssload.chan_util * 100) / 255;
+
+ bytes_written = snprintf(command, total_len, "CU %hhu",
+ chan_use_percentage);
+ WL_DBG(("Channel Utilization %u %u\n", bssload.chan_util, chan_use_percentage));
+
+ return bytes_written;
+}
+#endif /* WL_GET_CU */
+
int
wl_handle_private_cmd(struct net_device *net, char *command, u32 cmd_len)
{
@@ -6713,7 +9079,7 @@
priv_cmd.total_len = cmd_len;
if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
- DHD_INFO(("%s, Received regular START command\n", __FUNCTION__));
+ DHD_INFO(("wl_handle_private_cmd, Received regular START command\n"));
#ifdef SUPPORT_DEEP_SLEEP
trigger_deep_sleep = 1;
#else
@@ -6729,8 +9095,8 @@
}
if (!g_wifi_on) {
- DHD_ERROR(("%s: Ignore private cmd \"%s\" - iface is down\n",
- __FUNCTION__, command));
+ DHD_ERROR(("wl_handle_private_cmd: Ignore private cmd \"%s\" - iface is down\n",
+ command));
return 0;
}
@@ -6745,6 +9111,7 @@
#endif /* BT_OVER_SDIO */
#endif /* SUPPORT_DEEP_SLEEP */
}
+#ifdef AUTOMOTIVE_FEATURE
else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
wl_cfg80211_set_passive_scan(net, command);
}
@@ -6757,6 +9124,7 @@
else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) {
bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len);
}
+#endif /* AUTOMOTIVE_FEATURE */
#ifdef PKT_FILTER_SUPPORT
else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) {
bytes_written = net_os_enable_packet_filter(net, 1);
@@ -6800,36 +9168,29 @@
else if (strnicmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) {
bytes_written = wl_android_set_suspendmode(net, command);
}
+ else if (strnicmp(command, CMD_SETDTIM_IN_SUSPEND, strlen(CMD_SETDTIM_IN_SUSPEND)) == 0) {
+ bytes_written = wl_android_set_bcn_li_dtim(net, command);
+ }
else if (strnicmp(command, CMD_MAXDTIM_IN_SUSPEND, strlen(CMD_MAXDTIM_IN_SUSPEND)) == 0) {
bytes_written = wl_android_set_max_dtim(net, command);
}
+#ifdef DISABLE_DTIM_IN_SUSPEND
+ else if (strnicmp(command, CMD_DISDTIM_IN_SUSPEND, strlen(CMD_DISDTIM_IN_SUSPEND)) == 0) {
+ bytes_written = wl_android_set_disable_dtim_in_suspend(net, command);
+ }
+#endif /* DISABLE_DTIM_IN_SUSPEND */
else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
- uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
-#ifdef WL_HOST_BAND_MGMT
- s32 ret = 0;
- if ((ret = wl_cfg80211_set_band(net, band)) < 0) {
- if (ret == BCME_UNSUPPORTED) {
- /* If roam_var is unsupported, fallback to the original method */
- WL_ERR(("WL_HOST_BAND_MGMT defined, "
- "but roam_band iovar unsupported in the firmware\n"));
- } else {
- bytes_written = -1;
- }
- }
- if (((ret == 0) && (band == WLC_BAND_AUTO)) || (ret == BCME_UNSUPPORTED)) {
- /* Apply if roam_band iovar is not supported or band setting is AUTO */
- bytes_written = wldev_set_band(net, band);
- }
-#else
- bytes_written = wl_cfg80211_set_if_band(net, band);
-#endif /* WL_HOST_BAND_MGMT */
-#ifdef ROAM_CHANNEL_CACHE
- wl_update_roamscan_cache_by_band(net, band);
-#endif /* ROAM_CHANNEL_CACHE */
+ bytes_written = wl_android_set_band(net, command);
}
else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
}
+ else if (strnicmp(command, CMD_ADDIE, strlen(CMD_ADDIE)) == 0) {
+ bytes_written = wl_android_add_vendor_ie(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_DELIE, strlen(CMD_DELIE)) == 0) {
+ bytes_written = wl_android_del_vendor_ie(net, command, priv_cmd.total_len);
+ }
#ifdef WL_CFG80211
#ifndef CUSTOMER_SET_COUNTRY
/* CUSTOMER_SET_COUNTRY feature is define for only GGSM model */
@@ -6855,7 +9216,16 @@
(rev_info_delim + 1)) {
revinfo = bcm_atoi(rev_info_delim + 1);
}
- bytes_written = wldev_set_country(net, country_code, true, true, revinfo);
+#ifdef SAVE_CONNECTION_WHEN_CC_UPDATE
+ wl_check_valid_channel_in_country(net, country_code, true);
+ bytes_written = wl_cfg80211_set_country_code(net, country_code,
+ true, false, revinfo);
+
+ wl_update_ap_chandef(net);
+#else
+ bytes_written = wl_cfg80211_set_country_code(net, country_code,
+ true, true, revinfo);
+#endif // endif
#ifdef CUSTOMER_HW4_PRIVATE_CMD
#ifdef FCC_PWR_LIMIT_2G
if (wldev_iovar_setint(net, "fccpwrlimit2g", FALSE)) {
@@ -6868,19 +9238,27 @@
}
#endif /* CUSTOMER_SET_COUNTRY */
#endif /* WL_CFG80211 */
- else if (strnicmp(command, CMD_SET_CSA, strlen(CMD_SET_CSA)) == 0) {
+ else if (strnicmp(command, CMD_CHANNELS_IN_CC, strlen(CMD_CHANNELS_IN_CC)) == 0) {
+ bytes_written = wl_android_get_channel_list(net, command, priv_cmd.total_len);
+ } else if (strnicmp(command, CMD_SET_CSA, strlen(CMD_SET_CSA)) == 0) {
bytes_written = wl_android_set_csa(net, command);
- } else if (strnicmp(command, CMD_80211_MODE, strlen(CMD_80211_MODE)) == 0) {
- bytes_written = wl_android_get_80211_mode(net, command, priv_cmd.total_len);
} else if (strnicmp(command, CMD_CHANSPEC, strlen(CMD_CHANSPEC)) == 0) {
bytes_written = wl_android_get_chanspec(net, command, priv_cmd.total_len);
- } else if (strnicmp(command, CMD_DATARATE, strlen(CMD_DATARATE)) == 0) {
+ }
+#ifdef AUTOMOTIVE_FEATURE
+ else if (strnicmp(command, CMD_DATARATE, strlen(CMD_DATARATE)) == 0) {
bytes_written = wl_android_get_datarate(net, command, priv_cmd.total_len);
- } else if (strnicmp(command, CMD_ASSOC_CLIENTS, strlen(CMD_ASSOC_CLIENTS)) == 0) {
+ } else if (strnicmp(command, CMD_80211_MODE, strlen(CMD_80211_MODE)) == 0) {
+ bytes_written = wl_android_get_80211_mode(net, command, priv_cmd.total_len);
+ } else if (strnicmp(command, CMD_ASSOC_CLIENTS, strlen(CMD_ASSOC_CLIENTS)) == 0) {
bytes_written = wl_android_get_assoclist(net, command, priv_cmd.total_len);
}
+#endif /* AUTOMOTIVE_FEATURE */
+ else if (strnicmp(command, CMD_RSDB_MODE, strlen(CMD_RSDB_MODE)) == 0) {
+ bytes_written = wl_android_get_rsdb_mode(net, command, priv_cmd.total_len);
+ }
-#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#if defined(CUSTOMER_HW4_PRIVATE_CMD) || defined(IGUANA_LEGACY_CHIPS)
#ifdef ROAM_API
else if (strnicmp(command, CMD_ROAMTRIGGER_SET,
strlen(CMD_ROAMTRIGGER_SET)) == 0) {
@@ -6911,14 +9289,17 @@
strlen(CMD_FULLROAMSCANPERIOD_GET)) == 0) {
bytes_written = wl_android_get_full_roam_scan_period(net, command,
priv_cmd.total_len);
- } else if (strnicmp(command, CMD_COUNTRYREV_SET,
+ }
+#ifdef AUTOMOTIVE_FEATURE
+ else if (strnicmp(command, CMD_COUNTRYREV_SET,
strlen(CMD_COUNTRYREV_SET)) == 0) {
bytes_written = wl_android_set_country_rev(net, command);
#ifdef FCC_PWR_LIMIT_2G
if (wldev_iovar_setint(net, "fccpwrlimit2g", FALSE)) {
- DHD_ERROR(("%s: fccpwrlimit2g deactivation is failed\n", __FUNCTION__));
+ DHD_ERROR(("wl_handle_private_cmd: fccpwrlimit2g"
+ " deactivation is failed\n"));
} else {
- DHD_ERROR(("%s: fccpwrlimit2g is deactivated\n", __FUNCTION__));
+ DHD_ERROR(("wl_handle_private_cmd: fccpwrlimit2g is deactivated\n"));
}
#endif /* FCC_PWR_LIMIT_2G */
} else if (strnicmp(command, CMD_COUNTRYREV_GET,
@@ -6926,6 +9307,7 @@
bytes_written = wl_android_get_country_rev(net, command,
priv_cmd.total_len);
}
+#endif /* AUTOMOTIVE_FEATURE */
#endif /* ROAM_API */
#ifdef WES_SUPPORT
else if (strnicmp(command, CMD_GETROAMSCANCONTROL, strlen(CMD_GETROAMSCANCONTROL)) == 0) {
@@ -7012,30 +9394,40 @@
bytes_written = wl_android_okc_enable(net, command);
}
#endif /* WES_SUPPORT */
+#ifdef SUPPORT_RESTORE_SCAN_PARAMS
+ else if (strnicmp(command, CMD_RESTORE_SCAN_PARAMS, strlen(CMD_RESTORE_SCAN_PARAMS)) == 0) {
+ bytes_written = wl_android_restore_scan_params(net, command, priv_cmd.total_len);
+ }
+#endif /* SUPPORT_RESTORE_SCAN_PARAMS */
#ifdef WLTDLS
else if (strnicmp(command, CMD_TDLS_RESET, strlen(CMD_TDLS_RESET)) == 0) {
bytes_written = wl_android_tdls_reset(net);
}
#endif /* WLTDLS */
+#ifdef CONFIG_SILENT_ROAM
+ else if (strnicmp(command, CMD_SROAM_TURN_ON, strlen(CMD_SROAM_TURN_ON)) == 0) {
+ int skip = strlen(CMD_SROAM_TURN_ON) + 1;
+ bytes_written = wl_android_sroam_turn_on(net, (const char*)command+skip);
+ }
+ else if (strnicmp(command, CMD_SROAM_SET_INFO, strlen(CMD_SROAM_SET_INFO)) == 0) {
+ char *data = (command + strlen(CMD_SROAM_SET_INFO) + 1);
+ bytes_written = wl_android_sroam_set_info(net, data, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SROAM_GET_INFO, strlen(CMD_SROAM_GET_INFO)) == 0) {
+ bytes_written = wl_android_sroam_get_info(net, command, priv_cmd.total_len);
+ }
+#endif /* CONFIG_SILENT_ROAM */
+ else if (strnicmp(command, CMD_SET_DISCONNECT_IES, strlen(CMD_SET_DISCONNECT_IES)) == 0) {
+ bytes_written = wl_android_set_disconnect_ies(net, command);
+ }
#endif /* CUSTOMER_HW4_PRIVATE_CMD */
#ifdef PNO_SUPPORT
else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) {
-#if defined(SUPPORT_RANDOM_MAC_SCAN)
- wl_cfg80211_set_random_mac(net, FALSE);
-#endif /* SUPPORT_RANDOM_MAC_SCAN */
bytes_written = dhd_dev_pno_stop_for_ssid(net);
}
#ifndef WL_SCHED_SCAN
else if (strnicmp(command, CMD_PNOSETUP_SET, strlen(CMD_PNOSETUP_SET)) == 0) {
-#if defined(SUPPORT_RANDOM_MAC_SCAN)
- int res = BCME_ERROR;
- res = wl_cfg80211_set_random_mac(net, TRUE);
- if (res < 0 && res != BCME_UNSUPPORTED) {
- DHD_ERROR(("%s : failed to set random mac for PNO scan, %d\n",
- __FUNCTION__, res));
- }
-#endif /* SUPPORT_RANDOM_MAC_SCAN */
bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len);
}
#endif /* !WL_SCHED_SCAN */
@@ -7095,17 +9487,6 @@
}
#endif /* WLFBT */
#endif /* WL_CFG80211 */
-#ifdef BCMCCX
- else if (strnicmp(command, CMD_GETCCKM_RN, strlen(CMD_GETCCKM_RN)) == 0) {
- bytes_written = wl_android_get_cckm_rn(net, command);
- }
- else if (strnicmp(command, CMD_SETCCKM_KRK, strlen(CMD_SETCCKM_KRK)) == 0) {
- bytes_written = wl_android_set_cckm_krk(net, command, priv_cmd.total_len);
- }
- else if (strnicmp(command, CMD_GET_ASSOC_RES_IES, strlen(CMD_GET_ASSOC_RES_IES)) == 0) {
- bytes_written = wl_android_get_assoc_res_ies(net, command, priv_cmd.total_len);
- }
-#endif /* BCMCCX */
#if defined(WL_SUPPORT_AUTO_CHANNEL)
else if (strnicmp(command, CMD_GET_BEST_CHANNELS,
strlen(CMD_GET_BEST_CHANNELS)) == 0) {
@@ -7129,23 +9510,6 @@
bytes_written = wl_android_set_ampdu_mpdu(net, (const char*)command+skip);
}
#endif /* SUPPORT_AMPDU_MPDU_CMD */
-#if defined(SUPPORT_HIDDEN_AP)
- else if (strnicmp(command, CMD_SET_HAPD_MAX_NUM_STA,
- strlen(CMD_SET_HAPD_MAX_NUM_STA)) == 0) {
- int skip = strlen(CMD_SET_HAPD_MAX_NUM_STA) + 3;
- wl_android_set_max_num_sta(net, (const char*)command+skip);
- }
- else if (strnicmp(command, CMD_SET_HAPD_SSID,
- strlen(CMD_SET_HAPD_SSID)) == 0) {
- int skip = strlen(CMD_SET_HAPD_SSID) + 3;
- wl_android_set_ssid(net, (const char*)command+skip);
- }
- else if (strnicmp(command, CMD_SET_HAPD_HIDE_SSID,
- strlen(CMD_SET_HAPD_HIDE_SSID)) == 0) {
- int skip = strlen(CMD_SET_HAPD_HIDE_SSID) + 3;
- wl_android_set_hide_ssid(net, (const char*)command+skip);
- }
-#endif /* SUPPORT_HIDDEN_AP */
#ifdef SUPPORT_SOFTAP_SINGL_DISASSOC
else if (strnicmp(command, CMD_HAPD_STA_DISASSOC,
strlen(CMD_HAPD_STA_DISASSOC)) == 0) {
@@ -7227,13 +9591,40 @@
else if (strnicmp(command, CMD_SARLIMIT_TX_CONTROL,
strlen(CMD_SARLIMIT_TX_CONTROL)) == 0) {
int skip = strlen(CMD_SARLIMIT_TX_CONTROL) + 1;
- wl_android_set_sarlimit_txctrl(net, (const char*)command+skip);
+ bytes_written = wl_android_set_sarlimit_txctrl(net, (const char*)command+skip);
}
+#ifdef SUPPORT_SET_TID
+ else if (strnicmp(command, CMD_SET_TID, strlen(CMD_SET_TID)) == 0) {
+ bytes_written = wl_android_set_tid(net, command);
+ }
+ else if (strnicmp(command, CMD_GET_TID, strlen(CMD_GET_TID)) == 0) {
+ bytes_written = wl_android_get_tid(net, command, priv_cmd.total_len);
+ }
+#endif /* SUPPORT_SET_TID */
#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+#if defined(SUPPORT_HIDDEN_AP)
+ else if (strnicmp(command, CMD_SET_HAPD_MAX_NUM_STA,
+ strlen(CMD_SET_HAPD_MAX_NUM_STA)) == 0) {
+ int skip = strlen(CMD_SET_HAPD_MAX_NUM_STA) + 3;
+ wl_android_set_max_num_sta(net, (const char*)command+skip);
+ }
+ else if (strnicmp(command, CMD_SET_HAPD_SSID,
+ strlen(CMD_SET_HAPD_SSID)) == 0) {
+ int skip = strlen(CMD_SET_HAPD_SSID) + 3;
+ wl_android_set_ssid(net, (const char*)command+skip);
+ }
+ else if (strnicmp(command, CMD_SET_HAPD_HIDE_SSID,
+ strlen(CMD_SET_HAPD_HIDE_SSID)) == 0) {
+ int skip = strlen(CMD_SET_HAPD_HIDE_SSID) + 1;
+ wl_android_set_hide_ssid(net, (const char*)(command+skip));
+ }
+#endif /* SUPPORT_HIDDEN_AP */
+#ifdef AUTOMOTIVE_FEATURE
else if (strnicmp(command, CMD_HAPD_MAC_FILTER, strlen(CMD_HAPD_MAC_FILTER)) == 0) {
int skip = strlen(CMD_HAPD_MAC_FILTER) + 1;
wl_android_set_mac_address_filter(net, command+skip);
}
+#endif /* AUTOMOTIVE_FEATURE */
else if (strnicmp(command, CMD_SETROAMMODE, strlen(CMD_SETROAMMODE)) == 0)
bytes_written = wl_android_set_roam_mode(net, command);
#if defined(BCMFW_ROAM_ENABLE)
@@ -7344,6 +9735,11 @@
bytes_written = wl_cfg80211_wbtext_btm_delta(net, command,
priv_cmd.total_len);
}
+ else if (strnicmp(command, CMD_WBTEXT_ESTM_ENABLE,
+ strlen(CMD_WBTEXT_ESTM_ENABLE)) == 0) {
+ bytes_written = wl_cfg80211_wbtext_estm_enable(net, command,
+ priv_cmd.total_len);
+ }
#endif /* WBTEXT */
#ifdef SET_RPS_CPUS
else if (strnicmp(command, CMD_RPSMODE, strlen(CMD_RPSMODE)) == 0) {
@@ -7434,6 +9830,32 @@
bytes_written = wl_cfg80211_set_random_mac(net, FALSE);
}
#endif /* SUPPORT_RANDOM_MAC_SCAN */
+#ifdef DHD_BANDSTEER
+ else if (strnicmp(command, CMD_BANDSTEER, strlen(CMD_BANDSTEER)) == 0) {
+ bytes_written = wl_android_set_bandsteer(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_BANDSTEER_TRIGGER, strlen(CMD_BANDSTEER_TRIGGER)) == 0) {
+ uint8 *p = command + strlen(CMD_BANDSTEER_TRIGGER)+1;
+ struct ether_addr ea;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ bytes_written = 0;
+
+ bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+ strncpy(eabuf, p, ETHER_ADDR_STR_LEN - 1);
+
+ if (!bcm_ether_atoe(eabuf, &ea)) {
+ DHD_ERROR(("BANDSTEER: ERROR while parsing macaddr cmd %s - ignored\n",
+ command));
+ return BCME_BADARG;
+ }
+ bytes_written = dhd_bandsteer_trigger_bandsteer(net, ea.octet);
+ }
+#endif /* DHD_BANDSTEER */
+#ifdef ENABLE_HOGSQS
+ else if (strnicmp(command, CMD_AP_HOGSQS, strlen(CMD_AP_HOGSQS)) == 0) {
+ bytes_written = wl_android_hogsqs(net, command, priv_cmd.total_len);
+ }
+#endif /* ENABLE_HOGSQS */
#ifdef WL_NATOE
else if (strnicmp(command, CMD_NATOE, strlen(CMD_NATOE)) == 0) {
bytes_written = wl_android_process_natoe_cmd(net, command,
@@ -7455,10 +9877,10 @@
if (strnicmp(command + strlen(CMD_NEW_DEBUG_PRINT_DUMP), " ", 1) == 0) {
/* compare unwanted/disconnected command */
if (strnicmp(command + strlen(CMD_NEW_DEBUG_PRINT_DUMP) + 1,
- SUBCMD_UNWANTED, strlen(SUBCMD_UNWANTED)) == 0) {
+ SUBCMD_UNWANTED, strlen(SUBCMD_UNWANTED)) == 0) {
dhd_log_dump_trigger(dhdp, CMD_UNWANTED);
} else if (strnicmp(command + strlen(CMD_NEW_DEBUG_PRINT_DUMP) + 1,
- SUBCMD_DISCONNECTED, strlen(SUBCMD_DISCONNECTED)) == 0) {
+ SUBCMD_DISCONNECTED, strlen(SUBCMD_DISCONNECTED)) == 0) {
dhd_log_dump_trigger(dhdp, CMD_DISCONNECTED);
} else {
dhd_log_dump_trigger(dhdp, CMD_DEFAULT);
@@ -7468,10 +9890,19 @@
}
}
#endif /* DHD_LOG_DUMP */
+#ifdef DHD_STATUS_LOGGING
+ else if (strnicmp(command, CMD_DUMP_STATUS_LOG, strlen(CMD_DUMP_STATUS_LOG)) == 0) {
+ dhd_statlog_dump_scr(wl_cfg80211_get_dhdp(net));
+ }
+ else if (strnicmp(command, CMD_QUERY_STATUS_LOG, strlen(CMD_QUERY_STATUS_LOG)) == 0) {
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
+ bytes_written = dhd_statlog_query(dhdp, command, priv_cmd.total_len);
+ }
+#endif /* DHD_STATUS_LOGGING */
#ifdef SET_PCIE_IRQ_CPU_CORE
else if (strnicmp(command, CMD_PCIE_IRQ_CORE, strlen(CMD_PCIE_IRQ_CORE)) == 0) {
- int set = *(command + strlen(CMD_PCIE_IRQ_CORE) + 1) - '0';
- wl_android_set_irq_cpucore(net, set);
+ int affinity_cmd = *(command + strlen(CMD_PCIE_IRQ_CORE) + 1) - '0';
+ wl_android_set_irq_cpucore(net, affinity_cmd);
}
#endif /* SET_PCIE_IRQ_CPU_CORE */
#if defined(DHD_HANG_SEND_UP_TEST)
@@ -7525,6 +9956,9 @@
else if (strnicmp(command, CMD_PKTLOG_FILTER_ADD, strlen(CMD_PKTLOG_FILTER_ADD)) == 0) {
bytes_written = wl_android_pktlog_filter_add(net, command, priv_cmd.total_len);
}
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_DEL, strlen(CMD_PKTLOG_FILTER_DEL)) == 0) {
+ bytes_written = wl_android_pktlog_filter_del(net, command, priv_cmd.total_len);
+ }
else if (strnicmp(command, CMD_PKTLOG_FILTER_INFO, strlen(CMD_PKTLOG_FILTER_INFO)) == 0) {
bytes_written = wl_android_pktlog_filter_info(net, command, priv_cmd.total_len);
}
@@ -7560,6 +9994,42 @@
bytes_written = wl_android_ewp_filter(net, command, priv_cmd.total_len);
}
#endif /* DHD_EVENT_LOG_FILTER */
+#ifdef WL_BCNRECV
+ else if (strnicmp(command, CMD_BEACON_RECV,
+ strlen(CMD_BEACON_RECV)) == 0) {
+ char *data = (command + strlen(CMD_BEACON_RECV) + 1);
+ bytes_written = wl_android_bcnrecv_config(net,
+ data, priv_cmd.total_len);
+ }
+#endif /* WL_BCNRECV */
+#ifdef WL_MBO
+ else if (strnicmp(command, CMD_MBO, strlen(CMD_MBO)) == 0) {
+ bytes_written = wl_android_process_mbo_cmd(net, command,
+ priv_cmd.total_len);
+ }
+#endif /* WL_MBO */
+#ifdef WL_CAC_TS
+ else if (strnicmp(command, CMD_CAC_TSPEC,
+ strlen(CMD_CAC_TSPEC)) == 0) {
+ char *data = (command + strlen(CMD_CAC_TSPEC) + 1);
+ bytes_written = wl_android_cac_ts_config(net,
+ data, priv_cmd.total_len);
+ }
+#endif /* WL_CAC_TS */
+#ifdef WL_GET_CU
+ else if (strnicmp(command, CMD_GET_CHAN_UTIL,
+ strlen(CMD_GET_CHAN_UTIL)) == 0) {
+ bytes_written = wl_android_get_channel_util(net,
+ command, priv_cmd.total_len);
+ }
+#endif /* WL_GET_CU */
+ else if (strnicmp(command, CMD_CHANNEL_WIDTH, strlen(CMD_CHANNEL_WIDTH)) == 0) {
+ bytes_written = wl_android_set_channel_width(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_TRANSITION_DISABLE, strlen(CMD_TRANSITION_DISABLE)) == 0) {
+ int transition_disabled = *(command + strlen(CMD_TRANSITION_DISABLE) + 1) - '0';
+ bytes_written = wl_cfg80211_set_transition_mode(net, transition_disabled);
+ }
else {
DHD_ERROR(("Unknown PRIVATE command %s - ignored\n", command));
bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
@@ -7576,7 +10046,7 @@
dhd_download_fw_on_driverload = FALSE;
#endif /* ENABLE_INSMOD_NO_FW_LOAD */
if (!iface_name[0]) {
- memset(iface_name, 0, IFNAMSIZ);
+ bzero(iface_name, IFNAMSIZ);
bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ);
}
@@ -7602,17 +10072,12 @@
#endif /* WL_GENL */
wl_netlink_deinit();
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(cur, q, &miracast_resume_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_del(&cur->list);
kfree(cur);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
return ret;
}
@@ -7622,7 +10087,7 @@
#ifdef ENABLE_4335BT_WAR
bcm_bt_unlock(lock_cookie_wifi);
- printk("%s: btlock released\n", __FUNCTION__);
+ printk("wl_android_post_init: btlock released\n");
#endif /* ENABLE_4335BT_WAR */
if (!dhd_download_fw_on_driverload)
@@ -7751,7 +10216,7 @@
int pid = 0;
u8 *ptr = NULL, *p = NULL;
u32 tot_len = sizeof(bcm_event_hdr_t) + subhdr_len + len;
- u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
WL_DBG(("Enter \n"));
@@ -7915,16 +10380,26 @@
#ifdef WL_STATIC_IF
struct net_device *
-wl_cfg80211_register_static_if(struct bcm_cfg80211 *cfg, u16 iftype, char *ifname)
+wl_cfg80211_register_static_if(struct bcm_cfg80211 *cfg, u16 iftype, char *ifname, int ifidx)
{
struct net_device *ndev;
struct wireless_dev *wdev = NULL;
- int ifidx = WL_STATIC_IFIDX; /* Register ndev with a reserved ifidx */
- s32 mode;
+ u8 mac_addr[ETH_ALEN];
+ struct net_device *primary_ndev;
WL_INFORM_MEM(("[STATIC_IF] Enter (%s) iftype:%d\n", ifname, iftype));
- ndev = wl_cfg80211_allocate_if(cfg, ifidx, ifname, NULL,
+ if (!cfg) {
+ WL_ERR(("cfg null\n"));
+ return NULL;
+ }
+
+ /* Use primary mac with locally admin bit set */
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ (void)memcpy_s(mac_addr, ETH_ALEN, primary_ndev->dev_addr, ETH_ALEN);
+ mac_addr[0] |= 0x02;
+
+ ndev = wl_cfg80211_allocate_if(cfg, ifidx, ifname, mac_addr,
WL_BSSIDX_MAX, NULL);
if (unlikely(!ndev)) {
WL_ERR(("Failed to allocate static_if\n"));
@@ -7937,9 +10412,7 @@
}
wdev->wiphy = cfg->wdev->wiphy;
-
- mode = wl_iftype_to_mode(iftype);
- wdev->iftype = wl_mode_to_nl80211_iftype(mode);
+ wdev->iftype = iftype;
ndev->ieee80211_ptr = wdev;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
@@ -7951,8 +10424,8 @@
goto fail;
}
- cfg->static_ndev = ndev;
- cfg->static_ndev_state = NDEV_STATE_OS_IF_CREATED;
+ cfg->static_ndev[ifidx - DHD_MAX_IFS] = ndev;
+ cfg->static_ndev_state[ifidx - DHD_MAX_IFS] = NDEV_STATE_OS_IF_CREATED;
wl_cfg80211_update_iflist_info(cfg, ndev, ifidx, NULL, WL_BSSIDX_MAX,
ifname, NDEV_STATE_OS_IF_CREATED);
WL_INFORM_MEM(("Static I/F (%s) Registered\n", ndev->name));
@@ -7966,16 +10439,19 @@
void
wl_cfg80211_unregister_static_if(struct bcm_cfg80211 *cfg)
{
+ int i = 0;
WL_INFORM_MEM(("[STATIC_IF] Enter\n"));
- if (!cfg || !cfg->static_ndev) {
- WL_ERR(("invalid input\n"));
- return;
- }
+ for (i = 0; i < DHD_NUM_STATIC_IFACES; i++) {
+ if (!cfg || !cfg->static_ndev[i]) {
+ WL_ERR(("invalid input\n"));
+ continue;
+ }
- /* wdev free will happen from notifier context */
- /* free_netdev(cfg->static_ndev);
- */
- unregister_netdev(cfg->static_ndev);
+ /* wdev free will happen from notifier context */
+ /* free_netdev(cfg->static_ndev);
+ */
+ unregister_netdev(cfg->static_ndev[i]);
+ }
}
s32
@@ -7984,13 +10460,21 @@
struct wireless_dev *wdev = NULL;
struct bcm_cfg80211 *cfg = wl_get_cfg(net);
struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ u16 iftype = net->ieee80211_ptr ? net->ieee80211_ptr->iftype : 0;
+ u16 wl_iftype, wl_mode;
WL_INFORM_MEM(("[STATIC_IF] dev_open ndev %p and wdev %p\n", net, net->ieee80211_ptr));
- ASSERT(cfg->static_ndev == net);
+ ASSERT(is_static_iface(cfg, net));
- if (cfg->static_ndev_state != NDEV_STATE_FW_IF_CREATED) {
- wdev = wl_cfg80211_add_if(cfg, primary_ndev, WL_IF_TYPE_AP, net->name, NULL);
- ASSERT(wdev == net->ieee80211_ptr);
+ if (cfg80211_to_wl_iftype(iftype, &wl_iftype, &wl_mode) < 0) {
+ return BCME_ERROR;
+ }
+ if (static_if_ndev_get_state(cfg, net) != NDEV_STATE_FW_IF_CREATED) {
+ wdev = wl_cfg80211_add_if(cfg, primary_ndev, wl_iftype, net->name, NULL);
+ if (!wdev) {
+ WL_ERR(("[STATIC_IF] wdev is NULL, can't proceed"));
+ return BCME_ERROR;
+ }
} else {
WL_INFORM_MEM(("Fw IF for static netdev already created\n"));
}
@@ -8005,9 +10489,13 @@
struct bcm_cfg80211 *cfg = wl_get_cfg(net);
struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- WL_INFORM_MEM(("[STATIC_IF] dev_close\n"));
- if (cfg->static_ndev_state == NDEV_STATE_FW_IF_CREATED) {
- ret = wl_cfg80211_del_if(cfg, primary_ndev, net->ieee80211_ptr, net->name);
+ if (static_if_ndev_get_state(cfg, net) == NDEV_STATE_FW_IF_CREATED) {
+ if (mutex_is_locked(&cfg->if_sync) == TRUE) {
+ ret = _wl_cfg80211_del_if(cfg, primary_ndev, net->ieee80211_ptr, net->name);
+ } else {
+ ret = wl_cfg80211_del_if(cfg, primary_ndev, net->ieee80211_ptr, net->name);
+ }
+
if (unlikely(ret)) {
WL_ERR(("Del iface failed for static_if %d\n", ret));
}
@@ -8017,22 +10505,28 @@
}
struct net_device *
wl_cfg80211_post_static_ifcreate(struct bcm_cfg80211 *cfg,
- wl_if_event_info *event, u8 *addr, s32 iface_type)
+ wl_if_event_info *event, u8 *addr, s32 iface_type, const char *iface_name)
{
struct net_device *new_ndev = NULL;
struct wireless_dev *wdev = NULL;
- WL_INFORM_MEM(("Updating static iface after Fw IF create \n"));
- new_ndev = cfg->static_ndev;
+ int iface_num = 0;
+ /* Checks if iface number returned is valid or not */
+ if ((iface_num = get_iface_num(iface_name, cfg)) < 0) {
+ return NULL;
+ }
+ WL_INFORM_MEM(("Updating static iface after Fw IF create \n"));
+
+ new_ndev = cfg->static_ndev[iface_num];
if (new_ndev) {
wdev = new_ndev->ieee80211_ptr;
ASSERT(wdev);
wdev->iftype = iface_type;
- memcpy(new_ndev->dev_addr, addr, ETH_ALEN);
+ (void)memcpy_s(new_ndev->dev_addr, ETH_ALEN, addr, ETH_ALEN);
}
- cfg->static_ndev_state = NDEV_STATE_FW_IF_CREATED;
+ cfg->static_ndev_state[iface_num] = NDEV_STATE_FW_IF_CREATED;
wl_cfg80211_update_iflist_info(cfg, new_ndev, event->ifidx, addr, event->bssidx,
event->name, NDEV_STATE_FW_IF_CREATED);
return new_ndev;
@@ -8040,11 +10534,781 @@
s32
wl_cfg80211_post_static_ifdel(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
- cfg->static_ndev_state = NDEV_STATE_FW_IF_DELETED;
- wl_cfg80211_update_iflist_info(cfg, ndev, WL_STATIC_IFIDX, NULL,
- WL_BSSIDX_MAX, NULL, NDEV_STATE_FW_IF_DELETED);
+ int iface_num = 0;
+ if ((iface_num = get_iface_num(ndev->name, cfg)) < 0) {
+ return BCME_ERROR;
+ }
+
+ cfg->static_ndev_state[iface_num] = NDEV_STATE_FW_IF_DELETED;
+ wl_cfg80211_update_iflist_info(cfg, ndev, (DHD_MAX_IFS + iface_num), NULL,
+ WL_BSSIDX_MAX, NULL, NDEV_STATE_FW_IF_DELETED);
wl_cfg80211_clear_per_bss_ies(cfg, ndev->ieee80211_ptr);
wl_dealloc_netinfo_by_wdev(cfg, ndev->ieee80211_ptr);
return BCME_OK;
}
#endif /* WL_STATIC_IF */
+
+#ifdef WBTEXT
+static int
+wlc_wbtext_get_roam_prof(struct net_device *ndev, wl_roamprof_band_t *rp,
+ uint8 band, uint8 *roam_prof_ver, uint8 *roam_prof_size)
+{
+ int err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ u8 *ioctl_buf = NULL;
+
+ ioctl_buf = (u8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (unlikely(!ioctl_buf)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+ rp->v1.band = band;
+ rp->v1.len = 0;
+ /* Getting roam profile from fw */
+ if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp),
+ ioctl_buf, WLC_IOCTL_MEDLEN, NULL))) {
+ WL_ERR(("Getting roam_profile failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy_s(rp, sizeof(*rp), ioctl_buf, sizeof(*rp));
+ /* roam_prof version get */
+ if (rp->v1.ver > WL_ROAM_PROF_VER_2) {
+ WL_ERR(("bad version (=%d) in return data\n", rp->v1.ver));
+ err = BCME_VERSION;
+ goto exit;
+ }
+ switch (rp->v1.ver) {
+ case WL_ROAM_PROF_VER_0:
+ {
+ *roam_prof_size = sizeof(wl_roam_prof_v1_t);
+ *roam_prof_ver = WL_ROAM_PROF_VER_0;
+ }
+ break;
+ case WL_ROAM_PROF_VER_1:
+ {
+ *roam_prof_size = sizeof(wl_roam_prof_v2_t);
+ *roam_prof_ver = WL_ROAM_PROF_VER_1;
+ }
+ break;
+ case WL_ROAM_PROF_VER_2:
+ {
+ *roam_prof_size = sizeof(wl_roam_prof_v3_t);
+ *roam_prof_ver = WL_ROAM_PROF_VER_2;
+ }
+ break;
+ default:
+ WL_ERR(("bad version = %d \n", rp->v1.ver));
+ err = BCME_VERSION;
+ goto exit;
+ }
+ WL_DBG(("roam prof ver %u size %u\n", *roam_prof_ver, *roam_prof_size));
+ if ((rp->v1.len % *roam_prof_size) != 0) {
+ WL_ERR(("bad length (=%d) in return data\n", rp->v1.len));
+ err = BCME_BADLEN;
+ }
+exit:
+ if (ioctl_buf) {
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ }
+ return err;
+}
+
+s32
+wl_cfg80211_wbtext_set_default(struct net_device *ndev)
+{
+ char *commandp = NULL;
+ s32 ret = BCME_OK;
+ char *data;
+ u8 *ioctl_buf = NULL;
+ wl_roamprof_band_t rp;
+ uint8 bandidx = 0;
+ int wnmmask = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ WL_DBG(("set wbtext to default\n"));
+
+ commandp = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_SMLEN);
+ if (unlikely(!commandp)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_SMLEN);
+ if (unlikely(!ioctl_buf)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ rp.v1.band = WLC_BAND_2G;
+ rp.v1.len = 0;
+ /* Getting roam profile from fw */
+ if ((ret = wldev_iovar_getbuf(ndev, "roam_prof", &rp, sizeof(rp),
+ ioctl_buf, WLC_IOCTL_SMLEN, NULL))) {
+ WL_ERR(("Getting roam_profile failed with err=%d \n", ret));
+ goto exit;
+ }
+ memcpy_s(&rp, sizeof(rp), ioctl_buf, sizeof(rp));
+ for (bandidx = 0; bandidx < MAXBANDS; bandidx++) {
+ switch (rp.v1.ver) {
+ case WL_ROAM_PROF_VER_1:
+ {
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ if (bandidx == BAND_5G_INDEX) {
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_PROFILE_CONFIG,
+ DEFAULT_WBTEXT_PROFILE_A_V2);
+ data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ } else {
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_PROFILE_CONFIG,
+ DEFAULT_WBTEXT_PROFILE_B_V2);
+ data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ }
+ }
+ break;
+ case WL_ROAM_PROF_VER_2:
+ {
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ if (bandidx == BAND_5G_INDEX) {
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_PROFILE_CONFIG,
+ DEFAULT_WBTEXT_PROFILE_A_V3);
+ data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ } else {
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_PROFILE_CONFIG,
+ DEFAULT_WBTEXT_PROFILE_B_V3);
+ data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ }
+ }
+ break;
+ default:
+ WL_ERR(("No Support for roam prof ver = %d \n", rp.v1.ver));
+ ret = -EINVAL;
+ goto exit;
+ }
+ /* set roam profile */
+ ret = wl_cfg80211_wbtext_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set roam_prof %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+ }
+
+ /* set RSSI weight */
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_RSSI_A);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_RSSI_B);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ /* set CU weight */
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_CU_A);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_CU_B);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ ret = wldev_iovar_getint(ndev, "wnm", &wnmmask);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to get wnmmask error = %d\n", __func__, ret));
+ goto exit;
+ }
+ /* set ESTM DL weight. */
+ if (wnmmask & WL_WNM_ESTM) {
+ WL_ERR(("Setting ESTM wt\n"));
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_ESTM_DL_A);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_ESTM_DL_B);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+ }
+
+ /* set RSSI table */
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_RSSI_A);
+ data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set RSSI table %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_RSSI_B);
+ data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set RSSI table %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ /* set CU table */
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_CU_A);
+ data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set CU table %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_CU_B);
+ data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set CU table %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+exit:
+ if (commandp) {
+ MFREE(cfg->osh, commandp, WLC_IOCTL_SMLEN);
+ }
+ if (ioctl_buf) {
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_SMLEN);
+ }
+ return ret;
+}
+
+s32
+wl_cfg80211_wbtext_config(struct net_device *ndev, char *data, char *command, int total_len)
+{
+ uint i = 0;
+ long int rssi_lower, roam_trigger;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ wl_roamprof_band_t *rp = NULL;
+ int err = -EINVAL, bytes_written = 0;
+ size_t len = strlen(data);
+ int rp_len = 0;
+ u8 *ioctl_buf = NULL;
+ uint8 roam_prof_size = 0, roam_prof_ver = 0, fs_per = 0, prof_cnt = 0;
+
+ data[len] = '\0';
+ ioctl_buf = (u8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (unlikely(!ioctl_buf)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+ rp = (wl_roamprof_band_t *)MALLOCZ(cfg->osh, sizeof(*rp));
+ if (unlikely(!rp)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+ if (*data && (!strncmp(data, "b", 1))) {
+ rp->v1.band = WLC_BAND_2G;
+ } else if (*data && (!strncmp(data, "a", 1))) {
+ rp->v1.band = WLC_BAND_5G;
+ } else {
+ err = snprintf(command, total_len, "Missing band\n");
+ goto exit;
+ }
+ data++;
+ rp->v1.len = 0;
+ /* Getting roam profile from fw */
+ if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp),
+ ioctl_buf, WLC_IOCTL_MEDLEN, NULL))) {
+ WL_ERR(("Getting roam_profile failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy_s(rp, sizeof(*rp), ioctl_buf, sizeof(*rp));
+ /* roam_prof version get */
+ if (rp->v1.ver > WL_ROAM_PROF_VER_2) {
+ WL_ERR(("bad version (=%d) in return data\n", rp->v1.ver));
+ err = -EINVAL;
+ goto exit;
+ }
+ switch (rp->v1.ver) {
+ case WL_ROAM_PROF_VER_0:
+ {
+ roam_prof_size = sizeof(wl_roam_prof_v1_t);
+ roam_prof_ver = WL_ROAM_PROF_VER_0;
+ }
+ break;
+ case WL_ROAM_PROF_VER_1:
+ {
+ roam_prof_size = sizeof(wl_roam_prof_v2_t);
+ roam_prof_ver = WL_ROAM_PROF_VER_1;
+ }
+ break;
+ case WL_ROAM_PROF_VER_2:
+ {
+ roam_prof_size = sizeof(wl_roam_prof_v3_t);
+ roam_prof_ver = WL_ROAM_PROF_VER_2;
+ }
+ break;
+ default:
+ WL_ERR(("bad version = %d \n", rp->v1.ver));
+ goto exit;
+ }
+ WL_DBG(("roam prof ver %u size %u\n", roam_prof_ver, roam_prof_size));
+ if ((rp->v1.len % roam_prof_size) != 0) {
+ WL_ERR(("bad length (=%d) in return data\n", rp->v1.len));
+ err = -EINVAL;
+ goto exit;
+ }
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ /* printing contents of roam profile data from fw and exits
+ * if code hits any of one of the below condtion. If remaining
+ * length of buffer is less than roam profile size or
+ * if there is no valid entry.
+ */
+ if (((i * roam_prof_size) > rp->v1.len)) {
+ break;
+ }
+ if (roam_prof_ver == WL_ROAM_PROF_VER_0) {
+ fs_per = rp->v1.roam_prof[i].fullscan_period;
+ } else if (roam_prof_ver == WL_ROAM_PROF_VER_1) {
+ fs_per = rp->v2.roam_prof[i].fullscan_period;
+ } else if (roam_prof_ver == WL_ROAM_PROF_VER_2) {
+ fs_per = rp->v3.roam_prof[i].fullscan_period;
+ }
+ if (fs_per == 0) {
+ break;
+ }
+ prof_cnt++;
+ }
+
+ if (!*data) {
+ for (i = 0; (i < prof_cnt) && (i < WL_MAX_ROAM_PROF_BRACKETS); i++) {
+ if (roam_prof_ver == WL_ROAM_PROF_VER_1) {
+ bytes_written += snprintf(command+bytes_written,
+ total_len - bytes_written,
+ "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)\n",
+ rp->v2.roam_prof[i].roam_trigger,
+ rp->v2.roam_prof[i].rssi_lower,
+ rp->v2.roam_prof[i].channel_usage,
+ rp->v2.roam_prof[i].cu_avg_calc_dur);
+ } else if (roam_prof_ver == WL_ROAM_PROF_VER_2) {
+ bytes_written += snprintf(command+bytes_written,
+ total_len - bytes_written,
+ "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)\n",
+ rp->v3.roam_prof[i].roam_trigger,
+ rp->v3.roam_prof[i].rssi_lower,
+ rp->v3.roam_prof[i].channel_usage,
+ rp->v3.roam_prof[i].cu_avg_calc_dur);
+ }
+ }
+ err = bytes_written;
+ goto exit;
+ } else {
+ /* Do not set roam_prof from upper layer if fw doesn't have 2 rows */
+ if (prof_cnt != 2) {
+ WL_ERR(("FW must have 2 rows to fill roam_prof\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ /* setting roam profile to fw */
+ data++;
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ roam_trigger = simple_strtol(data, &data, 10);
+ if (roam_trigger >= 0) {
+ WL_ERR(("roam trigger[%d] value must be negative\n", i));
+ err = -EINVAL;
+ goto exit;
+ }
+ data++;
+ rssi_lower = simple_strtol(data, &data, 10);
+ if (rssi_lower >= 0) {
+ WL_ERR(("rssi lower[%d] value must be negative\n", i));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (roam_prof_ver == WL_ROAM_PROF_VER_1) {
+ rp->v2.roam_prof[i].roam_trigger = roam_trigger;
+ rp->v2.roam_prof[i].rssi_lower = rssi_lower;
+ data++;
+ rp->v2.roam_prof[i].channel_usage = simple_strtol(data, &data, 10);
+ data++;
+ rp->v2.roam_prof[i].cu_avg_calc_dur =
+ simple_strtol(data, &data, 10);
+ }
+ if (roam_prof_ver == WL_ROAM_PROF_VER_2) {
+ rp->v3.roam_prof[i].roam_trigger = roam_trigger;
+ rp->v3.roam_prof[i].rssi_lower = rssi_lower;
+ data++;
+ rp->v3.roam_prof[i].channel_usage = simple_strtol(data, &data, 10);
+ data++;
+ rp->v3.roam_prof[i].cu_avg_calc_dur =
+ simple_strtol(data, &data, 10);
+ }
+
+ rp_len += roam_prof_size;
+
+ if (*data == '\0') {
+ break;
+ }
+ data++;
+ }
+ if (i != 1) {
+ WL_ERR(("Only two roam_prof rows supported.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ rp->v1.len = rp_len;
+ if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp,
+ sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN,
+ &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("seting roam_profile failed with err %d\n", err));
+ }
+ }
+exit:
+ if (rp) {
+ MFREE(cfg->osh, rp, sizeof(*rp));
+ }
+ if (ioctl_buf) {
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ }
+ return err;
+}
+
+int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data,
+ char *command, int total_len)
+{
+ int bytes_written = 0, err = -EINVAL, argc = 0;
+ char rssi[BUFSZN], band[BUFSZN], weight[BUFSZN];
+ char *endptr = NULL;
+ wnm_bss_select_weight_cfg_t *bwcfg;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ bwcfg = (wnm_bss_select_weight_cfg_t *)MALLOCZ(cfg->osh, sizeof(*bwcfg));
+ if (unlikely(!bwcfg)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+ bwcfg->version = WNM_BSSLOAD_MONITOR_VERSION;
+ bwcfg->type = 0;
+ bwcfg->weight = 0;
+
+ argc = sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s %"S(BUFSZ)"s", rssi, band, weight);
+
+ if (!strcasecmp(rssi, "rssi"))
+ bwcfg->type = WNM_BSS_SELECT_TYPE_RSSI;
+ else if (!strcasecmp(rssi, "cu"))
+ bwcfg->type = WNM_BSS_SELECT_TYPE_CU;
+ else if (!strcasecmp(rssi, "estm_dl"))
+ bwcfg->type = WNM_BSS_SELECT_TYPE_ESTM_DL;
+ else {
+ /* Usage DRIVER WBTEXT_WEIGHT_CONFIG <rssi/cu/estm_dl> <band> <weight> */
+ WL_ERR(("%s: Command usage error\n", __func__));
+ goto exit;
+ }
+
+ if (!strcasecmp(band, "a"))
+ bwcfg->band = WLC_BAND_5G;
+ else if (!strcasecmp(band, "b"))
+ bwcfg->band = WLC_BAND_2G;
+ else if (!strcasecmp(band, "all"))
+ bwcfg->band = WLC_BAND_ALL;
+ else {
+ WL_ERR(("%s: Command usage error\n", __func__));
+ goto exit;
+ }
+
+ if (argc == 2) {
+ /* If there is no data after band, getting wnm_bss_select_weight from fw */
+ if (bwcfg->band == WLC_BAND_ALL) {
+ WL_ERR(("band option \"all\" is for set only, not get\n"));
+ goto exit;
+ }
+ if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_weight", bwcfg,
+ sizeof(*bwcfg),
+ ioctl_buf, sizeof(ioctl_buf), NULL))) {
+ WL_ERR(("Getting wnm_bss_select_weight failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy(bwcfg, ioctl_buf, sizeof(*bwcfg));
+ bytes_written = snprintf(command, total_len, "%s %s weight = %d\n",
+ (bwcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" :
+ (bwcfg->type == WNM_BSS_SELECT_TYPE_CU) ? "CU": "ESTM_DL",
+ (bwcfg->band == WLC_BAND_2G) ? "2G" : "5G", bwcfg->weight);
+ err = bytes_written;
+ goto exit;
+ } else {
+ /* if weight is non integer returns command usage error */
+ bwcfg->weight = simple_strtol(weight, &endptr, 0);
+ if (*endptr != '\0') {
+ WL_ERR(("%s: Command usage error", __func__));
+ goto exit;
+ }
+ /* setting weight for iovar wnm_bss_select_weight to fw */
+ if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_weight", bwcfg,
+ sizeof(*bwcfg),
+ ioctl_buf, sizeof(ioctl_buf), NULL))) {
+ WL_ERR(("setting wnm_bss_select_weight failed with err=%d\n", err));
+ }
+ }
+exit:
+ if (bwcfg) {
+ MFREE(cfg->osh, bwcfg, sizeof(*bwcfg));
+ }
+ return err;
+}
+
+/* WBTEXT_TUPLE_MIN_LEN_CHECK :strlen(low)+" "+strlen(high)+" "+strlen(factor) */
+#define WBTEXT_TUPLE_MIN_LEN_CHECK 5
+
+int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data,
+ char *command, int total_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ int bytes_written = 0, err = -EINVAL;
+ char rssi[BUFSZN], band[BUFSZN];
+ int btcfg_len = 0, i = 0, parsed_len = 0;
+ wnm_bss_select_factor_cfg_t *btcfg;
+ size_t slen = strlen(data);
+ char *start_addr = NULL;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+
+ data[slen] = '\0';
+ btcfg = (wnm_bss_select_factor_cfg_t *)MALLOCZ(cfg->osh,
+ (sizeof(*btcfg) + sizeof(*btcfg) * WL_FACTOR_TABLE_MAX_LIMIT));
+ if (unlikely(!btcfg)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ btcfg->version = WNM_BSS_SELECT_FACTOR_VERSION;
+ btcfg->band = WLC_BAND_AUTO;
+ btcfg->type = 0;
+ btcfg->count = 0;
+
+ sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s", rssi, band);
+
+ if (!strcasecmp(rssi, "rssi")) {
+ btcfg->type = WNM_BSS_SELECT_TYPE_RSSI;
+ }
+ else if (!strcasecmp(rssi, "cu")) {
+ btcfg->type = WNM_BSS_SELECT_TYPE_CU;
+ }
+ else {
+ WL_ERR(("%s: Command usage error\n", __func__));
+ goto exit;
+ }
+
+ if (!strcasecmp(band, "a")) {
+ btcfg->band = WLC_BAND_5G;
+ }
+ else if (!strcasecmp(band, "b")) {
+ btcfg->band = WLC_BAND_2G;
+ }
+ else if (!strcasecmp(band, "all")) {
+ btcfg->band = WLC_BAND_ALL;
+ }
+ else {
+ WL_ERR(("%s: Command usage, Wrong band\n", __func__));
+ goto exit;
+ }
+
+ if ((slen - 1) == (strlen(rssi) + strlen(band))) {
+ /* Getting factor table using iovar 'wnm_bss_select_table' from fw */
+ if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_table", btcfg,
+ sizeof(*btcfg),
+ ioctl_buf, sizeof(ioctl_buf), NULL))) {
+ WL_ERR(("Getting wnm_bss_select_table failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy(btcfg, ioctl_buf, sizeof(*btcfg));
+ memcpy(btcfg, ioctl_buf, (btcfg->count+1) * sizeof(*btcfg));
+
+ bytes_written += snprintf(command + bytes_written, total_len - bytes_written,
+ "No of entries in table: %d\n", btcfg->count);
+ bytes_written += snprintf(command + bytes_written, total_len - bytes_written,
+ "%s factor table\n",
+ (btcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU");
+ bytes_written += snprintf(command + bytes_written, total_len - bytes_written,
+ "low\thigh\tfactor\n");
+ for (i = 0; i <= btcfg->count-1; i++) {
+ bytes_written += snprintf(command + bytes_written,
+ total_len - bytes_written, "%d\t%d\t%d\n", btcfg->params[i].low,
+ btcfg->params[i].high, btcfg->params[i].factor);
+ }
+ err = bytes_written;
+ goto exit;
+ } else {
+ uint16 len = (sizeof(wnm_bss_select_factor_params_t) * WL_FACTOR_TABLE_MAX_LIMIT);
+ memset_s(btcfg->params, len, 0, len);
+ data += (strlen(rssi) + strlen(band) + 2);
+ start_addr = data;
+ slen = slen - (strlen(rssi) + strlen(band) + 2);
+ for (i = 0; i < WL_FACTOR_TABLE_MAX_LIMIT; i++) {
+ if (parsed_len + WBTEXT_TUPLE_MIN_LEN_CHECK <= slen) {
+ btcfg->params[i].low = simple_strtol(data, &data, 10);
+ data++;
+ btcfg->params[i].high = simple_strtol(data, &data, 10);
+ data++;
+ btcfg->params[i].factor = simple_strtol(data, &data, 10);
+ btcfg->count++;
+ if (*data == '\0') {
+ break;
+ }
+ data++;
+ parsed_len = data - start_addr;
+ } else {
+ WL_ERR(("%s:Command usage:less no of args\n", __func__));
+ goto exit;
+ }
+ }
+ btcfg_len = sizeof(*btcfg) + ((btcfg->count) * sizeof(*btcfg));
+ if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_table", btcfg, btcfg_len,
+ cfg->ioctl_buf, WLC_IOCTL_MEDLEN, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("seting wnm_bss_select_table failed with err %d\n", err));
+ goto exit;
+ }
+ }
+exit:
+ if (btcfg) {
+ MFREE(cfg->osh, btcfg,
+ (sizeof(*btcfg) + sizeof(*btcfg) * WL_FACTOR_TABLE_MAX_LIMIT));
+ }
+ return err;
+}
+
+s32
+wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data, char *command, int total_len)
+{
+ uint i = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ int err = -EINVAL, bytes_written = 0, argc = 0, val, len = 0;
+ char delta[BUFSZN], band[BUFSZN], *endptr = NULL;
+ wl_roamprof_band_t *rp = NULL;
+ uint8 band_val = 0, roam_prof_size = 0, roam_prof_ver = 0;
+
+ rp = (wl_roamprof_band_t *)MALLOCZ(cfg->osh, sizeof(*rp));
+ if (unlikely(!rp)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ argc = sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s", band, delta);
+ if (!strcasecmp(band, "a"))
+ band_val = WLC_BAND_5G;
+ else if (!strcasecmp(band, "b"))
+ band_val = WLC_BAND_2G;
+ else {
+ WL_ERR(("%s: Missing band\n", __func__));
+ goto exit;
+ }
+ if ((err = wlc_wbtext_get_roam_prof(ndev, rp, band_val, &roam_prof_ver,
+ &roam_prof_size))) {
+ WL_ERR(("Getting roam_profile failed with err=%d \n", err));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (argc == 2) {
+ /* if delta is non integer returns command usage error */
+ val = simple_strtol(delta, &endptr, 0);
+ if (*endptr != '\0') {
+ WL_ERR(("%s: Command usage error", __func__));
+ goto exit;
+ }
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ /*
+ * Checking contents of roam profile data from fw and exits
+ * if code hits below condtion. If remaining length of buffer is
+ * less than roam profile size or if there is no valid entry.
+ */
+ if (((i * roam_prof_size) > rp->v1.len) ||
+ (rp->v1.roam_prof[i].fullscan_period == 0)) {
+ break;
+ }
+ if (roam_prof_ver == WL_ROAM_PROF_VER_1) {
+ if (rp->v2.roam_prof[i].channel_usage != 0) {
+ rp->v2.roam_prof[i].roam_delta = val;
+ }
+ } else if (roam_prof_ver == WL_ROAM_PROF_VER_2) {
+ if (rp->v3.roam_prof[i].channel_usage != 0) {
+ rp->v3.roam_prof[i].roam_delta = val;
+ }
+ }
+ len += roam_prof_size;
+ }
+ }
+ else {
+ if (rp->v2.roam_prof[0].channel_usage != 0) {
+ bytes_written = snprintf(command, total_len,
+ "%s Delta %d\n", (rp->v1.band == WLC_BAND_2G) ? "2G" : "5G",
+ rp->v2.roam_prof[0].roam_delta);
+ }
+ err = bytes_written;
+ goto exit;
+ }
+ rp->v1.len = len;
+ if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp,
+ sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN,
+ &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("seting roam_profile failed with err %d\n", err));
+ }
+exit :
+ if (rp) {
+ MFREE(cfg->osh, rp, sizeof(*rp));
+ }
+ return err;
+}
+#endif /* WBTEXT */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_android.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_android.h
index 8473615..4b43ad5 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_android.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_android.h
@@ -1,7 +1,9 @@
/*
* Linux cfg80211 driver - Android related functions
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -24,7 +26,7 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_android.h 695651 2017-04-21 10:48:20Z $
+ * $Id: wl_android.h 794110 2018-12-12 05:03:21Z $
*/
#include <linux/module.h>
@@ -129,3 +131,30 @@
#define MAX_NUM_MAC_FILT 10
int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist);
+#ifdef WL_BCNRECV
+extern int wl_android_bcnrecv_config(struct net_device *ndev, char *data,
+ int total_len);
+extern int wl_android_bcnrecv_stop(struct net_device *ndev, uint reason);
+extern int wl_android_bcnrecv_resume(struct net_device *ndev);
+extern int wl_android_bcnrecv_suspend(struct net_device *ndev);
+extern int wl_android_bcnrecv_event(struct net_device *ndev,
+ uint attr_type, uint status, uint reason, uint8 *data, uint data_len);
+#endif /* WL_BCNRECV */
+#ifdef WL_CAC_TS
+#define TSPEC_UPLINK_DIRECTION (0 << 5) /* uplink direction traffic stream */
+#define TSPEC_DOWNLINK_DIRECTION (1 << 5) /* downlink direction traffic stream */
+#define TSPEC_BI_DIRECTION (3 << 5) /* bi direction traffic stream */
+#define TSPEC_EDCA_ACCESS (1 << 7) /* EDCA access policy */
+#define TSPEC_UAPSD_PSB (1 << 2) /* U-APSD power saving behavior */
+#define TSPEC_TSINFO_TID_SHIFT 1 /* TID Shift */
+#define TSPEC_TSINFO_PRIO_SHIFT 3 /* PRIO Shift */
+#define TSPEC_MAX_ACCESS_CATEGORY 3
+#define TSPEC_MAX_USER_PRIO 7
+#define TSPEC_MAX_DIALOG_TOKEN 255
+#define TSPEC_MAX_SURPLUS_BW 12410
+#define TSPEC_MIN_SURPLUS_BW 11210
+#define TSPEC_MAX_MSDU_SIZE 1520
+#define TSPEC_DEF_MEAN_DATA_RATE 120000
+#define TSPEC_DEF_MIN_PHY_RATE 6000000
+#define TSPEC_DEF_DIALOG_TOKEN 7
+#endif /* WL_CAC_TS */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfg80211.c
index 03cd5a1..004318d 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfg80211.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfg80211.c
@@ -1,7 +1,9 @@
/*
* Linux cfg80211 driver
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -24,7 +26,7 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfg80211.c 702770 2017-06-03 23:43:21Z $
+ * $Id: wl_cfg80211.c 814814 2019-04-15 03:31:10Z $
*/
/* */
#include <typedefs.h>
@@ -32,6 +34,7 @@
#include <linux/kernel.h>
#include <bcmutils.h>
+#include <bcmstdlib_s.h>
#include <bcmwifi_channels.h>
#include <bcmendian.h>
#include <ethernet.h>
@@ -39,10 +42,11 @@
#include <eapol.h>
#endif /* WL_WPS_SYNC */
#include <802.11.h>
-#ifdef FILS_SUPPORT
+#ifdef WL_FILS
#include <fils.h>
#include <frag.h>
-#endif // endif
+#endif /* WL_FILS */
+#include <bcmiov.h>
#include <linux/if_arp.h>
#include <asm/uaccess.h>
@@ -59,9 +63,11 @@
#include <net/rtnetlink.h>
#include <wlioctl.h>
+#include <bcmevent.h>
#include <wldev_common.h>
#include <wl_cfg80211.h>
#include <wl_cfgp2p.h>
+#include <wl_cfgscan.h>
#include <bcmdevs.h>
#ifdef OEM_ANDROID
#include <wl_android.h>
@@ -69,6 +75,7 @@
#include <dngl_stats.h>
#include <dhd.h>
#include <dhd_linux.h>
+#include <dhd_linux_pktdump.h>
#include <dhd_debug.h>
#include <dhdioctl.h>
#include <wlioctl.h>
@@ -94,13 +101,27 @@
#include <dhd_rtt.h>
#endif /* RTT_SUPPORT */
-#ifdef BIGDATA_SOFTAP
+#if defined(BIGDATA_SOFTAP) || defined(DHD_ENABLE_BIGDATA_LOGGING)
#include <wl_bigdata.h>
-#endif /* BIGDATA_SOFTAP */
+#endif /* BIGDATA_SOFTAP || DHD_ENABLE_BIGDATA_LOGGING */
#ifdef DHD_EVENT_LOG_FILTER
#include <dhd_event_log_filter.h>
#endif /* DHD_EVENT_LOG_FILTER */
+#define BRCM_SAE_VENDOR_EVENT_BUF_LEN 500
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+#include <bcmtlv.h>
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+#include <linux/dev_ril_bridge.h>
+#include <linux/notifier.h>
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+
+#ifdef DHD_BANDSTEER
+#include <dhd_bandsteer.h>
+#endif /* DHD_BANDSTEER */
#ifdef BCMWAPI_WPI
/* these items should evetually go into wireless.h of the linux system headfile dir */
@@ -134,10 +155,25 @@
#else /* BCMWAPI_WPI */
#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
#endif /* BCMWAPI_WPI */
+
+#ifdef WL_SAE
+#define MGMT_AUTH_FRAME_DWELL_TIME 4000
+#define MGMT_AUTH_FRAME_WAIT_TIME (MGMT_AUTH_FRAME_DWELL_TIME + 100)
+#endif /* WL_SAE */
+
+#if (defined(WL_FW_OCE_AP_SELECT) || defined(BCMFW_ROAM_ENABLE) && ((LINUX_VERSION_CODE \
+ >= KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)))
+uint fw_ap_select = true;
+#else
+uint fw_ap_select = false;
+#endif /* WL_FW_OCE_AP_SELECT && (ROAM_ENABLE || BCMFW_ROAM_ENABLE) */
+module_param(fw_ap_select, uint, 0660);
+
static struct device *cfg80211_parent_dev = NULL;
static struct bcm_cfg80211 *g_bcmcfg = NULL;
-u32 wl_dbg_level = WL_DBG_ERR;
+u32 wl_dbg_level = WL_DBG_ERR | WL_DBG_P2P_ACTION | WL_DBG_INFO;
+#define MAX_VIF_OFFSET 15
#define MAX_WAIT_TIME 1500
#ifdef WLAIBSS_MCHAN
#define IBSS_IF_NAME "ibss%d"
@@ -167,28 +203,40 @@
#define WLAN_EID_SSID 0
#define CH_MIN_5G_CHANNEL 34
-#define CH_MIN_2G_CHANNEL 1
-#define ACTIVE_SCAN 1
-#define PASSIVE_SCAN 0
#ifdef WLAIBSS
enum abiss_event_type {
AIBSS_EVENT_TXFAIL
};
#endif // endif
+#ifdef WL_SAE
+/**
+ * enum mgmt_tx_status - mgmt frame tx status
+ *
+ * @MGMT_TX_ACK: mgmt frame acked
+ * @MGMT_TX_NOACK: mgmt frame not acked
+ * @MGMT_TX_OFF_CHAN_COMPLETED: off-channel complete
+ * @MGMT_TX_SEND_FRAME: mgmt frame tx is in progres
+ */
+enum mgmt_tx_status {
+ MGMT_TX_ACK,
+ MGMT_TX_NOACK,
+ MGMT_TX_OFF_CHAN_COMPLETED,
+ MGMT_TX_SEND_FRAME
+};
+#endif /* WL_SAE */
+
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
4 && __GNUC_MINOR__ >= 6))
#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
-_Pragma("GCC diagnostic push") \
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); \
(entry) = list_first_entry((ptr), type, member); \
-_Pragma("GCC diagnostic pop") \
+GCC_DIAGNOSTIC_POP(); \
#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
-_Pragma("GCC diagnostic push") \
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); \
entry = container_of((ptr), type, member); \
-_Pragma("GCC diagnostic pop") \
+GCC_DIAGNOSTIC_POP(); \
#else
#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
@@ -206,13 +254,10 @@
};
#endif /* WL_RELMCAST */
-#ifdef WL_LASTEVT
-typedef struct wl_last_event {
- uint32 current_time; /* current tyime */
- uint32 timestamp; /* event timestamp */
- wl_event_msg_t event; /* Encapsulated event */
-} wl_last_event_t;
-#endif /* WL_LASTEVT */
+#ifdef DHD_SSW_SPECIFIC_REQ
+#define WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE 2
+#define WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE 3
+#endif /* DHD_SSW_SPECIFIC_REQ */
/* This is to override regulatory domains defined in cfg80211 module (reg.c)
* By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN
@@ -240,9 +285,9 @@
*/
REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
/* IEEE 802.11a, channel 36..64 */
- REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
+ REG_RULE(5150-10, 5350+10, 80, 6, 20, 0),
/* IEEE 802.11a, channel 100..165 */
- REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
+ REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), }
};
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
4 && __GNUC_MINOR__ >= 6))
@@ -293,20 +338,16 @@
},
};
-#define NUM_DIFF_CHANNELS 2
+#define NUM_DIFF_CHANNELS 3
static const struct ieee80211_iface_combination
common_iface_combinations[] = {
{
.num_different_channels = NUM_DIFF_CHANNELS,
/*
- * max_interfaces = 4
- * The max no of interfaces will be used in dual p2p case.
- * {STA, P2P Device, P2P Group 1, P2P Group 2}. Though we
- * will not be using the STA functionality in this case, it
- * will remain registered as it is the primary interface.
+ * At Max 5 network interfaces can be registered concurrently
*/
- .max_interfaces = 4,
+ .max_interfaces = IFACE_MAX_CNT,
.limits = common_if_limits,
.n_limits = ARRAY_SIZE(common_if_limits),
},
@@ -334,7 +375,7 @@
#define WLAN_AKM_SUITE_WAPI_CERT 0x000FAC12
#undef NL80211_WAPI_VERSION_1
-#define NL80211_WAPI_VERSION_1 1 << 2
+#define NL80211_WAPI_VERSION_1 1 << 3
#endif /* ANDROID_PLATFORM_VERSION && ANDROID_PLATFORM_VERSION >= 8 */
#endif /* BCMWAPI_WPI */
@@ -374,15 +415,10 @@
#define PM_BLOCK 1
#define PM_ENABLE 0
-#ifdef BCMCCX
-#ifndef WLAN_AKM_SUITE_CCKM
-#define WLAN_AKM_SUITE_CCKM 0x00409600
-#endif // endif
-#define DOT11_LEAP_AUTH 0x80 /* LEAP auth frame paylod constants */
-#endif /* BCMCCX */
-
-#define WL_AKM_SUITE_SHA256_1X 0x000FAC05
-#define WL_AKM_SUITE_SHA256_PSK 0x000FAC06
+/* GCMP crypto supported above kernel v4.0 */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0))
+#define WL_GCMP
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0) */
#ifndef IBSS_COALESCE_ALLOWED
#define IBSS_COALESCE_ALLOWED IBSS_COALESCE_DEFAULT
@@ -396,25 +432,6 @@
#define LONG_LISTEN_TIME 2000
#ifdef WBTEXT
-#define CMD_WBTEXT_PROFILE_CONFIG "WBTEXT_PROFILE_CONFIG"
-#define CMD_WBTEXT_WEIGHT_CONFIG "WBTEXT_WEIGHT_CONFIG"
-#define CMD_WBTEXT_TABLE_CONFIG "WBTEXT_TABLE_CONFIG"
-#define CMD_WBTEXT_DELTA_CONFIG "WBTEXT_DELTA_CONFIG"
-#define DEFAULT_WBTEXT_PROFILE_A "a -70 -75 70 10 -75 -128 0 10"
-#define DEFAULT_WBTEXT_PROFILE_B "b -60 -75 70 10 -75 -128 0 10"
-#define DEFAULT_WBTEXT_WEIGHT_RSSI_A "RSSI a 65"
-#define DEFAULT_WBTEXT_WEIGHT_RSSI_B "RSSI b 65"
-#define DEFAULT_WBTEXT_WEIGHT_CU_A "CU a 35"
-#define DEFAULT_WBTEXT_WEIGHT_CU_B "CU b 35"
-#define DEFAULT_WBTEXT_TABLE_RSSI_A "RSSI a 0 55 100 55 60 90 \
-60 65 70 65 70 50 70 128 20"
-#define DEFAULT_WBTEXT_TABLE_RSSI_B "RSSI b 0 55 100 55 60 90 \
-60 65 70 65 70 50 70 128 20"
-#define DEFAULT_WBTEXT_TABLE_CU_A "CU a 0 30 100 30 50 90 \
-50 60 70 60 80 50 80 100 20"
-#define DEFAULT_WBTEXT_TABLE_CU_B "CU b 0 10 100 10 25 90 \
-25 40 70 40 70 50 70 100 20"
-
typedef struct wl_wbtext_bssid {
struct ether_addr ea;
struct list_head list;
@@ -429,9 +446,19 @@
static bool wl_cfg80211_wbtext_send_btm_query(struct bcm_cfg80211 *cfg, struct net_device *dev,
struct wl_profile *profile);
static void wl_cfg80211_wbtext_set_wnm_maxidle(struct bcm_cfg80211 *cfg, struct net_device *dev);
-static int wl_cfg80211_recv_nbr_resp(struct net_device *dev, uint8 *body, int body_len);
+static int wl_cfg80211_recv_nbr_resp(struct net_device *dev, uint8 *body, uint body_len);
#endif /* WBTEXT */
+#ifdef RTT_SUPPORT
+static s32 wl_cfg80211_rtt_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* RTT_SUPPORT */
+#ifdef WL_CHAN_UTIL
+static s32 wl_cfg80211_bssload_report_event_handler(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_cfg80211_start_bssload_report(struct net_device *ndev);
+#endif /* WL_CHAN_UTIL */
+
#ifdef SUPPORT_AP_RADIO_PWRSAVE
#define RADIO_PWRSAVE_PPS 10
#define RADIO_PWRSAVE_QUIET_TIME 10
@@ -439,7 +466,7 @@
#define RADIO_PWRSAVE_STAS_ASSOC_CHECK 0
#define RADIO_PWRSAVE_LEVEL_MIN 1
-#define RADIO_PWRSAVE_LEVEL_MAX 5
+#define RADIO_PWRSAVE_LEVEL_MAX 9
#define RADIO_PWRSAVE_PPS_MIN 1
#define RADIO_PWRSAVE_QUIETTIME_MIN 1
#define RADIO_PWRSAVE_ASSOCCHECK_MIN 0
@@ -452,13 +479,14 @@
((RADIO_PWRSAVE_MAJOR_VER << RADIO_PWRSAVE_MAJOR_VER_SHIFT)| RADIO_PWRSAVE_MINOR_VER)
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-#define MIN_P2P_IE_LEN 8 /* p2p_ie->OUI(3) + p2p_ie->oui_type(1) +
- * Attribute ID(1) + Length(2) + 1(Mininum length:1)
- */
-#define MAX_P2P_IE_LEN 251 /* Up To 251 */
+/* SoftAP related parameters */
+#define DEFAULT_2G_SOFTAP_CHANNEL 1
+#define DEFAULT_5G_SOFTAP_CHANNEL 149
+#define WL_MAX_NUM_CSA_COUNTERS 255
-#define MAX_VNDR_OUI_STR_LEN 256
-#define VNDR_OUI_STR_LEN 10
+#define MAX_VNDR_OUI_STR_LEN 256u
+#define VNDR_OUI_STR_LEN 10u
+#define DOT11_DISCONNECT_RC 2u
static const uchar *exclude_vndr_oui_list[] = {
"\x00\x50\xf2", /* Microsoft */
"\x00\x00\xf0", /* Samsung Elec */
@@ -474,6 +502,33 @@
static int wl_vndr_ies_get_vendor_oui(struct bcm_cfg80211 *cfg,
struct net_device *ndev, char *vndr_oui, u32 vndr_oui_len);
static void wl_vndr_ies_clear_vendor_oui_list(struct bcm_cfg80211 *cfg);
+static s32 wl_cfg80211_parse_vndr_ies(const u8 *parse, u32 len,
+ struct parsed_vndr_ies *vndr_ies);
+
+#if defined(WL_FW_OCE_AP_SELECT)
+static bool
+wl_cfgoce_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
+
+/* Check whether the given IE looks like WFA OCE IE. */
+#define wl_cfgoce_is_oce_ie(ie, tlvs, len) wl_cfgoce_has_ie(ie, tlvs, len, \
+ (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_MBO_OCE)
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+wl_cfgoce_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
+{
+ /* If the contents match the OUI and the type */
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+ !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+ type == ie[TLV_BODY_OFF + oui_len]) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+#endif /* WL_FW_OCE_AP_SELECT */
/*
* cfg80211_ops api/callback list
@@ -481,20 +536,6 @@
static s32 wl_frame_get_mgmt(struct bcm_cfg80211 *cfg, u16 fc,
const struct ether_addr *da, const struct ether_addr *sa,
const struct ether_addr *bssid, u8 **pheader, u32 *body_len, u8 *pbody);
-static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request,
- struct cfg80211_ssid *this_ssid);
-#if defined(WL_CFG80211_P2P_DEV_IF)
-static s32
-wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request);
-#else
-static s32
-wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request);
-#endif /* WL_CFG80211_P2P_DEV_IF */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
-static void wl_cfg80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
#ifdef WLAIBSS_MCHAN
static bcm_struct_cfgdev* bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name);
@@ -513,11 +554,12 @@
struct net_device *dev, u8 *mac,
struct station_info *sinfo);
#endif // endif
-static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
- struct net_device *dev, bool enabled,
- s32 timeout);
static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme);
+#if defined(WL_FILS)
+static int wl_cfg80211_update_connect_params(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme, u32 changed);
+#endif /* WL_FILS */
static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code);
#if defined(WL_CFG80211_P2P_DEV_IF)
@@ -584,10 +626,6 @@
struct cfg80211_pmksa *pmksa);
static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
struct net_device *dev);
-void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
-static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg);
-static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
- struct net_device *ndev, bool aborted, bool fw_abort);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \
KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
@@ -616,13 +654,6 @@
u8 *peer, enum nl80211_tdls_operation oper);
#endif // endif
#endif /* LINUX_VERSION > KERNEL_VERSION(3,2,0) || WL_COMPAT_WIRELESS */
-#ifdef WL_SCHED_SCAN
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
-static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid);
-#else
-static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
-#endif /* WL_SCHED_SCAN */
static s32 wl_cfg80211_set_ap_role(struct bcm_cfg80211 *cfg, struct net_device *dev);
struct wireless_dev *
@@ -649,6 +680,23 @@
chanspec_t wl_chspec_driver_to_host(chanspec_t chanspec);
chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec);
static void wl_cfg80211_wait_for_disconnection(struct bcm_cfg80211 *cfg, struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+int wl_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_csa_settings *params);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0))
+static int wl_cfg80211_set_pmk(struct wiphy *wiphy, struct net_device *dev,
+ const struct cfg80211_pmk_conf *conf);
+static int wl_cfg80211_del_pmk(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *aa);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) */
+
+#ifdef WL_SAE
+static int
+wl_cfg80211_external_auth(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_external_auth_params *params);
+#endif /* WL_SAE */
/*
* event & event Q handlers for cfg80211 interfaces
@@ -672,8 +720,6 @@
bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
static s32 wl_notify_roaming_status(struct bcm_cfg80211 *cfg,
bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
-static s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
static s32 wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data, bool completed);
static s32 wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
@@ -684,18 +730,7 @@
static s32 wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg,
bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
#endif /* BT_WIFI_HANDOVER */
-#ifdef WL_SCHED_SCAN
-static s32
-wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data);
-#endif /* WL_SCHED_SCAN */
-#ifdef PNO_SUPPORT
-static s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-#endif /* PNO_SUPPORT */
#ifdef GSCAN_SUPPORT
-static s32 wl_notify_gscan_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
static s32 wl_handle_roam_exp_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
#endif /* GSCAN_SUPPORT */
@@ -716,13 +751,33 @@
#ifdef DHD_LOSSLESS_ROAMING
static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg);
#endif /* DHD_LOSSLESS_ROAMING */
+#if !defined(DISABLE_11H) && defined(DHD_NOSCAN_DURING_CSA)
+static void wl_del_csa_timeout(struct bcm_cfg80211 *cfg);
+#endif // endif
-#ifdef WLTDLS
-static s32 wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg,
- enum wl_tdls_config state, bool tdls_mode);
-static s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+#ifdef WL_MBO
+static s32
+wl_mbo_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
-#endif /* WLTDLS */
+#endif /* WL_MBO */
+
+static s32
+wl_notify_dos_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#ifdef WL_SAE
+static s32
+wl_notify_extauth_req_event(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32
+wl_notify_mgmt_frame_tx_complete(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+#endif /* WL_SAE */
+
+#ifdef ENABLE_HOGSQS
+static s32 wl_cfg80211_hogsqs_notify(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+#endif /* ENABLE_HOGSQS */
+
/*
* register/deregister parent device
*/
@@ -743,7 +798,6 @@
*/
static s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, const void *data, s32 item);
-static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item);
static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev);
/*
@@ -759,10 +813,18 @@
struct cfg80211_connect_params *sme);
static s32 wl_set_set_sharedkey(struct net_device *dev,
struct cfg80211_connect_params *sme);
+#ifdef WL_FILS
+static s32 wl_set_fils_params(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+#endif // endif
#ifdef BCMWAPI_WPI
static s32 wl_set_set_wapi_ie(struct net_device *dev,
struct cfg80211_connect_params *sme);
#endif // endif
+#ifdef WL_GCMP
+static s32 wl_set_wsec_info_algos(struct net_device *dev, uint32 algos, uint32 mask);
+#endif /* WL_GCMP */
+
static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev);
static s32 wl_ch_to_chanspec(struct net_device *dev, int ch,
struct wl_join_params *join_params, size_t *join_params_size);
@@ -774,7 +836,7 @@
static void wl_rst_ie(struct bcm_cfg80211 *cfg);
static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v);
static void wl_update_hidden_ap_ie(wl_bss_info_t *bi, const u8 *ie_stream, u32 *ie_size,
- bool roam);
+ bool update_ssid);
static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size);
static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size);
static u32 wl_get_ielen(struct bcm_cfg80211 *cfg);
@@ -782,34 +844,22 @@
static int wl_cfg80211_get_rsn_capa(const bcm_tlv_t *wpa2ie, const u8** rsn_cap);
#endif // endif
-#ifdef WL11U
-static bcm_tlv_t *
-wl_cfg80211_find_interworking_ie(const u8 *parse, u32 len);
-static s32
-wl_cfg80211_clear_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx);
-static s32
-wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
- uint8 ie_id, uint8 *data, uint8 data_len);
-#endif /* WL11U */
-
static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev, void *data);
static void wl_free_wdev(struct bcm_cfg80211 *cfg);
-static s32 wl_inform_bss(struct bcm_cfg80211 *cfg);
-static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi, bool roam);
-static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam);
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi, bool update_ssid);
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool update_ssid);
static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
s32 wl_cfg80211_channel_to_freq(u32 channel);
-
-#ifdef WL_IRQSET
-static void wl_irq_set_work_handler(struct work_struct *work);
-#define IRQ_SET_DURATION 23000
-#endif /* WL_IRQSET */
-
static void wl_cfg80211_work_handler(struct work_struct *work);
static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, const u8 *mac_addr,
struct key_params *params);
+
+#ifdef ENABLE_HOGSQS
+static void wl_cfg80211_hogsqs_event_handler(struct work_struct *work);
+#endif // endif
+
/*
* key indianess swap utilities
*/
@@ -835,14 +885,7 @@
*/
static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg);
static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg);
-
-#ifdef WL_LASTEVT
-static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, void *data);
-#define WL_IS_LINKDOWN(cfg, e, data) wl_is_linkdown(cfg, e, data)
-#else
static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
-#define WL_IS_LINKDOWN(cfg, e, data) wl_is_linkdown(cfg, e)
-#endif /* WL_LASTEVT */
static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e,
struct net_device *ndev);
@@ -867,9 +910,7 @@
static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg);
static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg);
#endif // endif
-static wl_scan_params_t *wl_cfg80211_scan_alloc_params(struct bcm_cfg80211 *cfg,
- int channel, int nprobes, int *out_params_size);
-static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role);
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, s32 mode, u32 dev_role);
#ifdef WL_CFG80211_ACL
/* ACL */
@@ -884,7 +925,12 @@
int dhd_del_monitor(struct net_device *ndev);
int dhd_monitor_init(void *dhd_pub);
int dhd_monitor_uninit(void);
-int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+#ifdef CFI_CHECK
+netdev_tx_t
+#else
+int
+#endif /* CFI_CHECK */
+dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
#ifdef ESCAN_CHANNEL_CACHE
void reset_roam_cache(struct bcm_cfg80211 *cfg);
@@ -904,6 +950,9 @@
s32 wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg);
#endif /* P2P_LISTEN_OFFLOADING */
+#ifdef CUSTOMER_HW4_DEBUG
+extern bool wl_scan_timeout_dbg_enabled;
+#endif /* CUSTOMER_HW4_DEBUG */
#ifdef PKT_FILTER_SUPPORT
extern uint dhd_pkt_filter_enable;
extern uint dhd_master_mode;
@@ -918,16 +967,6 @@
const struct ether_addr *bssid);
static s32 __wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify);
-static s32 cfg80211_to_wl_iftype(uint16 type, uint16 *role, uint16 *mode);
-static s32 wl_check_vif_support(struct bcm_cfg80211 *cfg, wl_iftype_t wl_iftype);
-bool wl_is_wps_enrollee_active(struct net_device *ndev, const u8 *ie_ptr, u16 len);
-
-#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
-static void wl_cfgvendor_send_hang_event(struct net_device *dev, u16 reason,
- char *string, int hang_info_cnt);
-static void wl_copy_hang_info_if_falure(struct net_device *dev, u16 reason, s32 err);
-#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
-
#ifdef WL_WPS_SYNC
static void wl_init_wps_reauth_sm(struct bcm_cfg80211 *cfg);
static void wl_deinit_wps_reauth_sm(struct bcm_cfg80211 *cfg);
@@ -939,26 +978,34 @@
static s32 wl_wps_session_update(struct net_device *ndev, u16 state, const u8 *peer_mac);
static void wl_wps_handle_ifdel(struct net_device *ndev);
#endif /* WL_WPS_SYNC */
-const u8 *wl_find_attribute(const u8 *buf, u16 len, u16 element_id);
+
+#if defined(WL_FW_OCE_AP_SELECT)
+bool static wl_cfg80211_is_oce_ap(struct wiphy *wiphy, const u8 *bssid_hint);
+#endif /* WL_FW_OCE_AP_SELECT */
+
+#ifdef WL_BCNRECV
+static s32 wl_bcnrecv_aborted_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WL_BCNRECV */
+
+#ifdef WL_CAC_TS
+static s32 wl_cfg80211_cac_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WL_CAC_TS */
+
+#if defined(WL_MBO) || defined(WL_OCE)
+static s32 wl_bssid_prune_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WL_MBO || WL_OCE */
static int bw2cap[] = { 0, 0, WLC_BW_CAP_20MHZ, WLC_BW_CAP_40MHZ, WLC_BW_CAP_80MHZ,
WLC_BW_CAP_160MHZ, WLC_BW_CAP_160MHZ };
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) || (defined(CONFIG_ARCH_MSM) && \
- defined(CFG80211_DISCONNECTED_V2))
-#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
- cfg80211_disconnected(dev, reason, ie, len, loc_gen, gfp);
-#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
-#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
- BCM_REFERENCE(loc_gen); \
- cfg80211_disconnected(dev, reason, ie, len, gfp);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) */
-
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) || (defined(CONFIG_ARCH_MSM) && \
defined(CFG80211_DISCONNECTED_V2))
#define CFG80211_GET_BSS(wiphy, channel, bssid, ssid, ssid_len) \
cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, \
- IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
#else
#define CFG80211_GET_BSS(wiphy, channel, bssid, ssid, ssid_len) \
cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, \
@@ -966,22 +1013,60 @@
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) || (defined(CONFIG_ARCH_MSM) && \
- defined(CFG80211_CONNECT_TIMEOUT_REASON_CODE))
-#define CFG80211_CONNECT_BSS(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) || \
+ defined(CFG80211_CONNECT_TIMEOUT_REASON_CODE) || defined(WL_FILS) || \
+ defined(CONFIG_CFG80211_FILS_BKPORT)
+#define CFG80211_CONNECT_RESULT(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
resp_ie_len, status, gfp) \
cfg80211_connect_bss(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
resp_ie_len, status, gfp, NL80211_TIMEOUT_UNSPECIFIED);
#else
-#define CFG80211_CONNECT_BSS(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
+#define CFG80211_CONNECT_RESULT(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
resp_ie_len, status, gfp) \
cfg80211_connect_bss(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
resp_ie_len, status, gfp);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || \
- * (CONFIG_ARCH_MSM && CFG80211_CONNECT_TIMEOUT_REASON_CODE)
+ * (CFG80211_CONNECT_TIMEOUT_REASON_CODE) ||
+ * WL_FILS || CONFIG_CFG80211_FILS_BKPORT
*/
+#elif defined(CFG80211_CONNECT_TIMEOUT_REASON_CODE)
+/* There are customer kernels with backported changes for
+ * connect timeout. CFG80211_CONNECT_TIMEOUT_REASON_CODE define
+ * is available for kernels < 4.7 in such cases.
+ */
+#define CFG80211_CONNECT_RESULT(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
+ resp_ie_len, status, gfp) \
+ cfg80211_connect_bss(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
+ resp_ie_len, status, gfp, NL80211_TIMEOUT_UNSPECIFIED);
+#else
+/* Kernels < 4.7 doesn't support cfg80211_connect_bss */
+#define CFG80211_CONNECT_RESULT(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
+ resp_ie_len, status, gfp) \
+ cfg80211_connect_result(dev, bssid, req_ie, req_ie_len, resp_ie, \
+ resp_ie_len, status, gfp);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+#define CFG80211_RX_MGMT(ndev, freq, sig_dbm, buf, len, flags, gfp) \
+ cfg80211_rx_mgmt(ndev_to_wdev(ndev), freq, sig_dbm, buf, len, flags)
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+#define CFG80211_RX_MGMT(ndev, freq, sig_dbm, buf, len, flags, gfp) \
+ cfg80211_rx_mgmt(ndev_to_wdev(ndev), freq, sig_dbm, buf, len, flags, gfp)
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)))
+#define CFG80211_RX_MGMT(ndev, freq, sig_dbm, buf, len, flags, gfp) \
+ cfg80211_rx_mgmt(ndev_to_wdev(ndev), freq, sig_dbm, buf, len, gfp)
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))) || \
+ defined(WL_COMPAT_WIRELESS)
+#define CFG80211_RX_MGMT(ndev, freq, sig_dbm, buf, len, flags, gfp) \
+ cfg80211_rx_mgmt(ndev, freq, sig_dbm, buf, len, gfp)
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+#define CFG80211_RX_MGMT(ndev, freq, sig_dbm, buf, len, flags, gfp) \
+ cfg80211_rx_mgmt(ndev, freq, buf, len, gfp)
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) */
+
#ifdef RSSI_OFFSET
static s32 wl_rssi_offset(s32 rssi)
{
@@ -1003,8 +1088,6 @@
extern int disable_proptx;
#endif /* PROP_TXSTATUS_VSDB */
-extern int passive_channel_skip;
-
static s32
wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
@@ -1133,13 +1216,25 @@
* are supporting MFP. So advertise only when MFP support is enabled.
*/
WLAN_CIPHER_SUITE_AES_CMAC,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) */
#endif /* MFP */
+
#ifdef BCMWAPI_WPI
WLAN_CIPHER_SUITE_SMS4,
#endif // endif
#if defined(WLAN_CIPHER_SUITE_PMK)
WLAN_CIPHER_SUITE_PMK,
#endif /* WLAN_CIPHER_SUITE_PMK */
+#ifdef WL_GCMP
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+#endif /* WL_GCMP */
};
#ifdef WL_SUPPORT_ACS
@@ -1169,7 +1264,8 @@
/* IOCtl version read from targeted driver */
int ioctl_version;
#ifdef DEBUGFS_CFG80211
-#define S_SUBLOGLEVEL 20
+#define SUBLOGLEVEL 20
+#define SUBLOGLEVELZ ((SUBLOGLEVEL) + (1))
static const struct {
u32 log_level;
char *sublogname;
@@ -1183,36 +1279,70 @@
};
#endif // endif
+typedef struct rsn_cipher_algo_entry {
+ u32 cipher_suite;
+ u32 wsec_algo;
+ u32 wsec_key_algo;
+} rsn_cipher_algo_entry_t;
+
+static const rsn_cipher_algo_entry_t rsn_cipher_algo_lookup_tbl[] = {
+ {WLAN_CIPHER_SUITE_WEP40, WEP_ENABLED, CRYPTO_ALGO_WEP1},
+ {WLAN_CIPHER_SUITE_WEP104, WEP_ENABLED, CRYPTO_ALGO_WEP128},
+ {WLAN_CIPHER_SUITE_TKIP, TKIP_ENABLED, CRYPTO_ALGO_TKIP},
+ {WLAN_CIPHER_SUITE_CCMP, AES_ENABLED, CRYPTO_ALGO_AES_CCM},
+ {WLAN_CIPHER_SUITE_AES_CMAC, AES_ENABLED, CRYPTO_ALGO_BIP},
+#ifdef BCMWAPI_WPI
+ {WLAN_CIPHER_SUITE_SMS4, SMS4_ENABLED, CRYPTO_ALGO_SMS4},
+#endif /* BCMWAPI_WPI */
+#ifdef WL_GCMP
+ {WLAN_CIPHER_SUITE_GCMP, AES_ENABLED, CRYPTO_ALGO_AES_GCM},
+ {WLAN_CIPHER_SUITE_GCMP_256, AES_ENABLED, CRYPTO_ALGO_AES_GCM256},
+ {WLAN_CIPHER_SUITE_BIP_GMAC_128, AES_ENABLED, CRYPTO_ALGO_BIP_GMAC},
+ {WLAN_CIPHER_SUITE_BIP_GMAC_256, AES_ENABLED, CRYPTO_ALGO_BIP_GMAC256},
+#endif /* WL_GCMP */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ {WLAN_CIPHER_SUITE_BIP_CMAC_256, AES_ENABLED, CRYPTO_ALGO_BIP_CMAC256},
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) */
+};
+
+typedef struct rsn_akm_wpa_auth_entry {
+ u32 akm_suite;
+ u32 wpa_auth;
+} rsn_akm_wpa_auth_entry_t;
+
+static const rsn_akm_wpa_auth_entry_t rsn_akm_wpa_auth_lookup_tbl[] = {
+#ifdef WL_OWE
+ {WLAN_AKM_SUITE_OWE, WPA3_AUTH_OWE},
+#endif /* WL_OWE */
+ {WLAN_AKM_SUITE_8021X, WPA2_AUTH_UNSPECIFIED},
+ {WL_AKM_SUITE_SHA256_1X, WPA2_AUTH_1X_SHA256},
+ {WL_AKM_SUITE_SHA256_PSK, WPA2_AUTH_PSK_SHA256},
+ {WLAN_AKM_SUITE_PSK, WPA2_AUTH_PSK},
+ {WLAN_AKM_SUITE_FT_8021X, WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT},
+ {WLAN_AKM_SUITE_FT_PSK, WPA2_AUTH_PSK | WPA2_AUTH_FT},
+ {WLAN_AKM_SUITE_FILS_SHA256, WPA2_AUTH_FILS_SHA256},
+ {WLAN_AKM_SUITE_FILS_SHA384, WPA2_AUTH_FILS_SHA384},
+ {WLAN_AKM_SUITE_8021X_SUITE_B, WPA3_AUTH_1X_SUITE_B_SHA256},
+ {WLAN_AKM_SUITE_8021X_SUITE_B_192, WPA3_AUTH_1X_SUITE_B_SHA384},
+#ifdef BCMWAPI_WPI
+ {WLAN_AKM_SUITE_WAPI_CERT, WAPI_AUTH_UNSPECIFIED},
+ {WLAN_AKM_SUITE_WAPI_PSK, WAPI_AUTH_PSK},
+#endif /* BCMWAPI_WPI */
+#ifdef WL_SAE
+ {WLAN_AKM_SUITE_SAE, WPA3_AUTH_SAE_PSK},
+#endif /* WL_SAE */
+ {WLAN_AKM_SUITE_FT_8021X_SHA384, WPA3_AUTH_1X_SHA384 | WPA2_AUTH_FT},
+ {WLAN_AKM_SUITE_DPP, WFA_AUTH_DPP}
+};
+
+#define BUFSZ 8
+#define BUFSZN BUFSZ + 1
+
+#define _S(x) #x
+#define S(x) _S(x)
+
#define SOFT_AP_IF_NAME "swlan0"
-#ifdef CUSTOMER_HW4_DEBUG
-uint prev_dhd_console_ms = 0;
-u32 prev_wl_dbg_level = 0;
-bool wl_scan_timeout_dbg_enabled = 0;
-static void wl_scan_timeout_dbg_set(void);
-static void wl_scan_timeout_dbg_clear(void);
-
-static void wl_scan_timeout_dbg_set(void)
-{
- WL_ERR(("Enter \n"));
- prev_dhd_console_ms = dhd_console_ms;
- prev_wl_dbg_level = wl_dbg_level;
-
- dhd_console_ms = 1;
- wl_dbg_level |= (WL_DBG_ERR | WL_DBG_P2P_ACTION | WL_DBG_SCAN);
-
- wl_scan_timeout_dbg_enabled = 1;
-}
-static void wl_scan_timeout_dbg_clear(void)
-{
- WL_ERR(("Enter \n"));
- dhd_console_ms = prev_dhd_console_ms;
- wl_dbg_level = prev_wl_dbg_level;
-
- wl_scan_timeout_dbg_enabled = 0;
-}
-#endif /* CUSTOMER_HW4_DEBUG */
-
/* watchdog timer for disconnecting when fw is not associated for FW_ASSOC_WATCHDOG_TIME ms */
uint32 fw_assoc_watchdog_ms = 0;
bool fw_assoc_watchdog_started = 0;
@@ -1357,6 +1487,11 @@
return lchspec;
}
+bool wl_cfg80211_is_hal_started(struct bcm_cfg80211 *cfg)
+{
+ return cfg->hal_started;
+}
+
/* given a chanspec value, do the endian and chanspec version conversion to
* a chanspec_t value
* Returns INVCHANSPEC on error
@@ -1383,17 +1518,14 @@
wl_ch_host_to_driver(u16 channel)
{
chanspec_t chanspec;
+ chanspec_band_t band;
- chanspec = channel & WL_CHANSPEC_CHAN_MASK;
+ band = WL_CHANNEL_BAND(channel);
- if (channel <= CH_MAX_2G_CHANNEL)
- chanspec |= WL_CHANSPEC_BAND_2G;
- else
- chanspec |= WL_CHANSPEC_BAND_5G;
-
- chanspec |= WL_CHANSPEC_BW_20;
-
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+ chanspec = wf_create_20MHz_chspec(channel, band);
+ if (chanspec == INVCHANSPEC) {
+ return chanspec;
+ }
return wl_chspec_host_to_driver(chanspec);
}
@@ -1423,7 +1555,7 @@
char *c = NULL;
int count = 0;
- memset(n, 0, ETHER_ADDR_LEN);
+ bzero(n, ETHER_ADDR_LEN);
for (;;) {
n->octet[count++] = (uint8)simple_strtoul(a, &c, 16);
if (!*c++ || count == ETHER_ADDR_LEN)
@@ -1443,6 +1575,9 @@
[NL80211_IFTYPE_STATION] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+#ifdef WL_SAE
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+#endif /* WL_SAE */
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_AP] = {
@@ -1512,6 +1647,47 @@
key->iv_initialized = dtoh32(key->iv_initialized);
}
+#if defined(WL_FW_OCE_AP_SELECT)
+bool static wl_cfg80211_is_oce_ap(struct wiphy *wiphy, const u8 *bssid_hint)
+{
+ const u8 *parse = NULL;
+ bcm_tlv_t *ie;
+ const struct cfg80211_bss_ies *ies;
+ u32 len;
+ struct cfg80211_bss *bss;
+
+ bss = CFG80211_GET_BSS(wiphy, NULL, bssid_hint, 0, 0);
+ if (!bss) {
+ WL_ERR(("Unable to find AP in the cache"));
+ return false;
+ }
+
+ if (rcu_access_pointer(bss->ies)) {
+ ies = rcu_access_pointer(bss->ies);
+ parse = ies->data;
+ len = ies->len;
+ } else {
+ WL_ERR(("ies is NULL"));
+ return false;
+ }
+
+ while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgoce_is_oce_ie((const uint8*)ie, (u8 const **)&parse, &len) == TRUE) {
+ return true;
+ } else {
+ ie = bcm_next_tlv((const bcm_tlv_t*) ie, &len);
+ if (!ie) {
+ return false;
+ }
+ parse = (uint8 *)ie;
+ WL_DBG(("NON OCE IE. next ie ptr:%p", parse));
+ }
+ }
+ WL_DBG(("OCE IE NOT found"));
+ return false;
+}
+#endif /* WL_FW_OCE_AP_SELECT */
+
/* Dump the contents of the encoded wps ie buffer and get pbc value */
static void
wl_validate_wps_ie(const char *wps_ie, s32 wps_ie_len, bool *pbc)
@@ -1674,7 +1850,7 @@
u16 channel = WL_P2P_TEMP_CHAN;
char *buf;
- memset(&bssid, 0, sizeof(bssid));
+ bzero(&bssid, sizeof(bssid));
if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, sizeof(bssid)))) {
/* STA interface is not associated. So start the new interface on a temp
* channel . Later proper channel will be applied by the above framework
@@ -1757,19 +1933,23 @@
{
u16 chspec;
s16 cfg_type;
- u32 timeout;
+ long timeout;
s32 err;
u16 p2p_iftype;
int dhd_mode;
struct net_device *new_ndev = NULL;
struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
struct ether_addr *p2p_addr;
+#if defined(WL_SUPPORT_MULTIP2P)
+ s32 is_mp2p_supported = BCME_ERROR;
+#endif // endif
*ret_err = BCME_OK;
if (!cfg->p2p) {
WL_ERR(("p2p not initialized\n"));
return NULL;
}
+
#if defined(WL_CFG80211_P2P_DEV_IF)
if (wl_iftype == WL_IF_TYPE_P2P_DISC) {
/* Handle Dedicated P2P discovery Interface */
@@ -1778,6 +1958,9 @@
}
#endif /* WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_SUPPORT_MULTIP2P)
+ is_mp2p_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_MP2P_MODE);
+#endif /* WL_SUPPORT_MULTIP2P */
if (wl_iftype == WL_IF_TYPE_P2P_GO) {
p2p_iftype = WL_P2P_IF_GO;
} else {
@@ -1788,7 +1971,11 @@
* p2p_go_count is the counter for GO creation
* requests.
*/
- if ((cfg->p2p->p2p_go_count > 0) && (wl_iftype == WL_IF_TYPE_P2P_GO)) {
+ if (TRUE &&
+#if defined(WL_SUPPORT_MULTIP2P)
+ (is_mp2p_supported <= 0) &&
+#endif // endif
+ ((cfg->p2p->p2p_go_count > 0) && (wl_iftype == WL_IF_TYPE_P2P_GO))) {
WL_ERR(("FW does not support multiple GO\n"));
*ret_err = -ENOTSUPP;
return NULL;
@@ -1799,8 +1986,7 @@
wl_cfgp2p_init_discovery(cfg);
}
- strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1);
- cfg->p2p->vir_ifname[IFNAMSIZ - 1] = '\0';
+ strlcpy(cfg->p2p->vir_ifname, name, sizeof(cfg->p2p->vir_ifname));
/* In concurrency case, STA may be already associated in a particular channel.
* so retrieve the current channel of primary interface and then start the virtual
* interface on that.
@@ -1811,7 +1997,7 @@
* bss: "cfg p2p_ifadd"
*/
wl_set_p2p_status(cfg, IF_ADDING);
- memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
cfg_type = wl_cfgp2p_get_conn_idx(cfg);
if (cfg_type == BCME_ERROR) {
wl_clr_p2p_status(cfg, IF_ADDING);
@@ -1869,30 +2055,26 @@
return NULL;
}
-static s32
-wl_check_vif_support(struct bcm_cfg80211 *cfg, wl_iftype_t wl_iftype)
+bool
+wl_cfg80211_check_vif_in_use(struct net_device *ndev)
{
-#ifdef WL_NAN
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ bool nan_enabled = FALSE;
- /* If NAN is enabled, another VIF other than
- * NAN iface create request can't be supported
- */
- if (cfg->nan_enable && (wl_iftype != WL_IF_TYPE_NAN)) {
- WL_ERR(("NAN enabled, can't support other interfaces\n"));
- return -ENOTSUPP;
- }
- /* If P2PGroup/Softap is enabled, another VIF
- * iface create request can't be supported
- */
- if ((wl_cfgp2p_vif_created(cfg)) ||
- (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- WL_ERR(("Additional vif can't be supported [%d:%d]\n",
- cfg->nan_enable, dhd->op_mode));
- return -ENOTSUPP;
- }
+#ifdef WL_NAN
+ nan_enabled = cfg->nan_enable;
#endif /* WL_NAN */
- return BCME_OK;
+
+ if (nan_enabled || (wl_cfgp2p_vif_created(cfg)) ||
+ (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_MEM(("%s: Virtual interfaces in use. NAN %d P2P %d softAP %d\n",
+ __FUNCTION__, nan_enabled, wl_cfgp2p_vif_created(cfg),
+ (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)));
+ return TRUE;
+ }
+
+ return FALSE;
}
void
@@ -1938,18 +2120,15 @@
switch (state) {
case WL_IF_CREATE_REQ:
+#ifdef WL_BCNRECV
+ /* check fakeapscan in progress then abort */
+ wl_android_bcnrecv_stop(ndev, WL_BCNRECV_CONCURRENCY);
+#endif /* WL_BCNRECV */
wl_cfg80211_scan_abort(cfg);
wl_wlfc_enable(cfg, true);
-
#ifdef WLTDLS
- if (wl_iftype == WL_IF_TYPE_NAN) {
- /* disable TDLS on NAN IF create */
- wl_cfg80211_tdls_config(cfg, TDLS_STATE_NDI_CREATE, false);
- }
- else {
- /* disable TDLS if number of connected interfaces is >= 1 */
- wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_CREATE, false);
- }
+ /* disable TDLS if number of connected interfaces is >= 1 */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_CREATE, false);
#endif /* WLTDLS */
break;
case WL_IF_DELETE_REQ:
@@ -1958,7 +2137,7 @@
#endif /* WPS_SYNC */
if (wl_get_drv_status(cfg, SCANNING, ndev)) {
/* Send completion for any pending scans */
- wl_notify_escan_complete(cfg, ndev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
}
#ifdef CUSTOM_SET_CPUCORE
@@ -1980,7 +2159,6 @@
}
if (wl_mode == WL_MODE_AP) {
/* Common code for AP/GO */
- wl_set_drv_status(cfg, CONNECTED, ndev);
}
break;
case WL_IF_DELETE_DONE:
@@ -2015,13 +2193,12 @@
s16 err;
s32 cfg_type;
struct net_device *ndev;
- s32 timeout;
+ long timeout;
if (unlikely(!wl_get_drv_status(cfg, READY, bcmcfg_to_prmry_ndev(cfg)))) {
WL_INFORM_MEM(("device is not ready\n"));
return BCME_NOTFOUND;
}
-
#ifdef WL_CFG80211_P2P_DEV_IF
if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
/* Handle dedicated P2P discovery interface. */
@@ -2037,7 +2214,7 @@
}
if (wl_cfgp2p_find_type(cfg, bssidx, &cfg_type) != BCME_OK) {
/* Couldn't find matching iftype */
- WL_ERR(("non P2P interface\n"));
+ WL_MEM(("non P2P interface\n"));
return BCME_NOTFOUND;
}
@@ -2070,7 +2247,7 @@
}
}
- memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
wl_set_p2p_status(cfg, IF_DELETING);
DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg));
@@ -2099,7 +2276,7 @@
* Firmware would be cleaned up via WiFi reset done by the
* user space from hang event context (for android only).
*/
- memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+ bzero(cfg->p2p->vir_ifname, IFNAMSIZ);
wl_to_p2p_bss_bssidx(cfg, cfg_type) = -1;
wl_to_p2p_bss_ndev(cfg, cfg_type) = NULL;
wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, cfg_type));
@@ -2112,6 +2289,507 @@
return err;
}
+#ifdef WL_IFACE_MGMT_CONF
+#ifdef WL_IFACE_MGMT
+static s32
+wl_cfg80211_is_policy_config_allowed(struct bcm_cfg80211 *cfg)
+{
+ s32 ret = BCME_OK;
+ wl_iftype_t active_sec_iface = WL_IFACE_NOT_PRESENT;
+ bool p2p_disc_on = false;
+ bool sta_assoc_state = false;
+
+ mutex_lock(&cfg->if_sync);
+
+ sta_assoc_state = (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg)) ||
+ wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg)));
+ active_sec_iface = wl_cfg80211_get_sec_iface(cfg);
+ p2p_disc_on = wl_get_p2p_status(cfg, SCANNING);
+
+ if ((sta_assoc_state == TRUE) || (p2p_disc_on == TRUE) ||
+ (cfg->nan_init_state == TRUE) ||
+ (active_sec_iface != WL_IFACE_NOT_PRESENT)) {
+ WL_INFORM_MEM(("Active iface matrix: sta_assoc_state = %d,"
+ " p2p_disc = %d, nan_disc = %d, active iface = %s\n",
+ sta_assoc_state, p2p_disc_on, cfg->nan_init_state,
+ wl_iftype_to_str(active_sec_iface)));
+ ret = BCME_BUSY;
+ }
+ mutex_unlock(&cfg->if_sync);
+ return ret;
+}
+#endif /* WL_IFACE_MGMT */
+#ifdef WL_NANP2P
+int
+wl_cfg80211_set_iface_conc_disc(struct net_device *ndev,
+ uint8 arg_val)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (wl_cfg80211_is_policy_config_allowed(cfg) != BCME_OK) {
+ WL_ERR(("Cant allow iface management modifications\n"));
+ return BCME_BUSY;
+ }
+
+ if (arg_val) {
+ cfg->conc_disc |= arg_val;
+ } else {
+ cfg->conc_disc &= ~arg_val;
+ }
+ return BCME_OK;
+}
+
+uint8
+wl_cfg80211_get_iface_conc_disc(struct net_device *ndev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ return cfg->conc_disc;
+}
+#endif /* WL_NANP2P */
+#ifdef WL_IFACE_MGMT
+int
+wl_cfg80211_set_iface_policy(struct net_device *ndev,
+ char *arg, int len)
+{
+ int ret = BCME_OK;
+ uint8 i = 0;
+ iface_mgmt_data_t *iface_data = NULL;
+
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (wl_cfg80211_is_policy_config_allowed(cfg) != BCME_OK) {
+ WL_ERR(("Cant allow iface management modifications\n"));
+ return BCME_BUSY;
+ }
+
+ if (!arg || len <= 0 || len > sizeof(iface_mgmt_data_t)) {
+ return BCME_BADARG;
+ }
+
+ iface_data = (iface_mgmt_data_t *)arg;
+ if (iface_data->policy >= WL_IF_POLICY_INVALID) {
+ WL_ERR(("Unexpected value of policy = %d\n",
+ iface_data->policy));
+ return BCME_BADARG;
+ }
+
+ bzero(&cfg->iface_data, sizeof(iface_mgmt_data_t));
+ ret = memcpy_s(&cfg->iface_data, sizeof(iface_mgmt_data_t), arg, len);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy iface data, src len = %d\n", len));
+ return ret;
+ }
+
+ if (cfg->iface_data.policy == WL_IF_POLICY_ROLE_PRIORITY) {
+ for (i = 0; i < WL_IF_TYPE_MAX; i++) {
+ WL_DBG(("iface = %s, priority[i] = %d\n",
+ wl_iftype_to_str(i), cfg->iface_data.priority[i]));
+ }
+ }
+
+ return ret;
+}
+
+uint8
+wl_cfg80211_get_iface_policy(struct net_device *ndev)
+
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return cfg->iface_data.policy;
+}
+#endif /* WL_IFACE_MGMT */
+#endif /* WL_IFACE_MGMT_CONF */
+
+#ifdef WL_IFACE_MGMT
+/* Get active secondary data iface type */
+wl_iftype_t
+wl_cfg80211_get_sec_iface(struct bcm_cfg80211 *cfg)
+{
+#ifndef WL_STATIC_IF
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* !WL_STATIC_IF */
+ struct net_device *p2p_ndev = NULL;
+
+ p2p_ndev = wl_to_p2p_bss_ndev(cfg,
+ P2PAPI_BSSCFG_CONNECTION1);
+
+#ifdef WL_STATIC_IF
+ if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) {
+ if (IS_AP_IFACE(cfg->static_ndev->ieee80211_ptr)) {
+ return WL_IF_TYPE_AP;
+ }
+ }
+#else
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ return WL_IF_TYPE_AP;
+ }
+#endif /* WL_STATIC_IF */
+
+ if (p2p_ndev && p2p_ndev->ieee80211_ptr) {
+ if (p2p_ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+ return WL_IF_TYPE_P2P_GO;
+ }
+
+ if (p2p_ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) {
+ return WL_IF_TYPE_P2P_GC;
+ }
+ }
+
+#ifdef WL_NAN
+ if (wl_cfgnan_is_dp_active(bcmcfg_to_prmry_ndev(cfg))) {
+ return WL_IF_TYPE_NAN;
+ }
+#endif /* WL_NAN */
+ return WL_IFACE_NOT_PRESENT;
+}
+
+/*
+* Handle incoming data interface request based on policy.
+* If there is any conflicting interface, that will be
+* deleted.
+*/
+s32
+wl_cfg80211_data_if_mgmt(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype)
+{
+ s32 ret = BCME_OK;
+ bool del_iface = false;
+ wl_iftype_t sec_wl_if_type = wl_cfg80211_get_sec_iface(cfg);
+
+ if (sec_wl_if_type == WL_IF_TYPE_NAN &&
+ new_wl_iftype == WL_IF_TYPE_NAN) {
+ /* Multi NDP is allowed irrespective of Policy */
+ return BCME_OK;
+ }
+
+ if (sec_wl_if_type == WL_IFACE_NOT_PRESENT) {
+ /*
+ * If there is no active secondary I/F, there
+ * is no interface conflict. Do nothing.
+ */
+ return BCME_OK;
+ }
+
+ /* Handle secondary data link case */
+ switch (cfg->iface_data.policy) {
+ case WL_IF_POLICY_CUSTOM:
+ case WL_IF_POLICY_DEFAULT: {
+ if (sec_wl_if_type == WL_IF_TYPE_NAN) {
+ /* NAN has the lowest priority */
+ del_iface = true;
+ } else {
+ /* Active iface is present, returning error */
+ ret = BCME_ERROR;
+ }
+ break;
+ }
+ case WL_IF_POLICY_FCFS: {
+ WL_INFORM_MEM(("Found active iface = %s, can't support new iface = %s\n",
+ wl_iftype_to_str(sec_wl_if_type), wl_iftype_to_str(new_wl_iftype)));
+ ret = BCME_ERROR;
+ break;
+ }
+ case WL_IF_POLICY_LP: {
+ WL_INFORM_MEM(("Remove active sec data interface, allow incoming iface\n"));
+ /* Delete existing data iface and allow incoming sec iface */
+ del_iface = true;
+ break;
+ }
+ case WL_IF_POLICY_ROLE_PRIORITY: {
+ WL_INFORM_MEM(("Existing iface = %s (%d) and new iface = %s (%d)\n",
+ wl_iftype_to_str(sec_wl_if_type),
+ cfg->iface_data.priority[sec_wl_if_type],
+ wl_iftype_to_str(new_wl_iftype),
+ cfg->iface_data.priority[new_wl_iftype]));
+ if (cfg->iface_data.priority[new_wl_iftype] >
+ cfg->iface_data.priority[sec_wl_if_type]) {
+ del_iface = true;
+ } else {
+ WL_ERR(("Can't support new iface = %s\n",
+ wl_iftype_to_str(new_wl_iftype)));
+ ret = BCME_ERROR;
+ }
+ break;
+ }
+ default: {
+ WL_ERR(("Unsupported interface policy = %d\n",
+ cfg->iface_data.policy));
+ return BCME_ERROR;
+ }
+ }
+ if (del_iface) {
+ ret = wl_cfg80211_delete_iface(cfg, sec_wl_if_type);
+ }
+ return ret;
+}
+
+/* Handle discovery ifaces based on policy */
+s32
+wl_cfg80211_disc_if_mgmt(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype, bool *disable_nan, bool *disable_p2p)
+{
+ s32 ret = BCME_OK;
+ wl_iftype_t sec_wl_if_type =
+ wl_cfg80211_get_sec_iface(cfg);
+ *disable_p2p = false;
+ *disable_nan = false;
+
+ if (sec_wl_if_type == WL_IF_TYPE_NAN &&
+ new_wl_iftype == WL_IF_TYPE_NAN) {
+ /* Multi NDP is allowed irrespective of Policy */
+ return BCME_OK;
+ }
+
+ /*
+ * Check for any policy conflicts with active secondary
+ * interface for incoming discovery iface
+ */
+ if ((sec_wl_if_type != WL_IFACE_NOT_PRESENT) &&
+ (is_discovery_iface(new_wl_iftype))) {
+ switch (cfg->iface_data.policy) {
+ case WL_IF_POLICY_CUSTOM: {
+ if (sec_wl_if_type == WL_IF_TYPE_NAN &&
+ new_wl_iftype == WL_IF_TYPE_P2P_DISC) {
+ WL_INFORM_MEM(("Allow P2P Discovery with active NDP\n"));
+ /* No further checks are required. */
+ return BCME_OK;
+ }
+ /*
+ * Intentional fall through to default policy
+ * as for AP and associated ifaces, both are same
+ */
+ }
+ /* fall through */
+ case WL_IF_POLICY_DEFAULT: {
+ if (sec_wl_if_type == WL_IF_TYPE_AP) {
+ WL_INFORM_MEM(("AP is active, cant support new iface\n"));
+ ret = BCME_ERROR;
+ } else if (sec_wl_if_type == WL_IF_TYPE_P2P_GC ||
+ sec_wl_if_type == WL_IF_TYPE_P2P_GO) {
+ if (new_wl_iftype == WL_IF_TYPE_P2P_DISC) {
+ /*
+ * Associated discovery case,
+ * Fall through
+ */
+ } else {
+ /* Active iface is present, returning error */
+ WL_INFORM_MEM(("P2P group is active,"
+ " cant support new iface\n"));
+ ret = BCME_ERROR;
+ }
+ } else if (sec_wl_if_type == WL_IF_TYPE_NAN) {
+ ret = wl_cfg80211_delete_iface(cfg, sec_wl_if_type);
+ }
+ break;
+ }
+ case WL_IF_POLICY_FCFS: {
+ WL_INFORM_MEM(("Can't support new iface = %s\n",
+ wl_iftype_to_str(new_wl_iftype)));
+ ret = BCME_ERROR;
+ break;
+ }
+ case WL_IF_POLICY_LP: {
+ /* Delete existing data iface n allow incoming sec iface */
+ WL_INFORM_MEM(("Remove active sec data interface = %s\n",
+ wl_iftype_to_str(sec_wl_if_type)));
+ ret = wl_cfg80211_delete_iface(cfg,
+ sec_wl_if_type);
+ break;
+ }
+ case WL_IF_POLICY_ROLE_PRIORITY: {
+ WL_INFORM_MEM(("Existing iface = %s (%d) and new iface = %s (%d)\n",
+ wl_iftype_to_str(sec_wl_if_type),
+ cfg->iface_data.priority[sec_wl_if_type],
+ wl_iftype_to_str(new_wl_iftype),
+ cfg->iface_data.priority[new_wl_iftype]));
+ if (cfg->iface_data.priority[new_wl_iftype] >
+ cfg->iface_data.priority[sec_wl_if_type]) {
+ WL_INFORM_MEM(("Remove active sec data iface\n"));
+ ret = wl_cfg80211_delete_iface(cfg,
+ sec_wl_if_type);
+ } else {
+ WL_ERR(("Can't support new iface = %s"
+ " due to low priority\n",
+ wl_iftype_to_str(new_wl_iftype)));
+ ret = BCME_ERROR;
+ }
+ break;
+ }
+ default: {
+ WL_ERR(("Unsupported policy\n"));
+ return BCME_ERROR;
+ }
+ }
+ } else {
+ /*
+ * Handle incoming new secondary iface request,
+ * irrespective of existing discovery ifaces
+ */
+ if ((cfg->iface_data.policy == WL_IF_POLICY_CUSTOM) &&
+ (new_wl_iftype == WL_IF_TYPE_NAN)) {
+ WL_INFORM_MEM(("Allow NAN Data Path\n"));
+ /* No further checks are required. */
+ return BCME_OK;
+ }
+ }
+
+ /* Check for any conflicting discovery iface */
+ switch (new_wl_iftype) {
+ case WL_IF_TYPE_P2P_DISC:
+ case WL_IF_TYPE_P2P_GO:
+ case WL_IF_TYPE_P2P_GC: {
+ *disable_nan = true;
+ break;
+ }
+ case WL_IF_TYPE_NAN_NMI:
+ case WL_IF_TYPE_NAN: {
+ *disable_p2p = true;
+ break;
+ }
+ case WL_IF_TYPE_STA:
+ case WL_IF_TYPE_AP: {
+ *disable_nan = true;
+ *disable_p2p = true;
+ break;
+ }
+ default: {
+ WL_ERR(("Unsupported\n"));
+ return BCME_ERROR;
+ }
+ }
+ return ret;
+}
+
+bool
+wl_cfg80211_is_associated_discovery(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype)
+{
+ struct net_device *p2p_ndev = NULL;
+ p2p_ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1);
+
+ if (new_wl_iftype == WL_IF_TYPE_P2P_DISC && p2p_ndev &&
+ p2p_ndev->ieee80211_ptr &&
+ is_p2p_group_iface(p2p_ndev->ieee80211_ptr)) {
+ return true;
+ }
+#ifdef WL_NAN
+ else if ((new_wl_iftype == WL_IF_TYPE_NAN_NMI) &&
+ (wl_cfgnan_is_dp_active(bcmcfg_to_prmry_ndev(cfg)))) {
+ return true;
+ }
+#endif /* WL_NAN */
+ return false;
+}
+
+/* Handle incoming discovery iface request */
+s32
+wl_cfg80211_handle_discovery_config(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype)
+{
+ s32 ret = BCME_OK;
+ bool disable_p2p = false;
+ bool disable_nan = false;
+
+ wl_iftype_t active_sec_iface =
+ wl_cfg80211_get_sec_iface(cfg);
+
+ if (is_discovery_iface(new_wl_iftype) &&
+ (active_sec_iface != WL_IFACE_NOT_PRESENT)) {
+ if (wl_cfg80211_is_associated_discovery(cfg,
+ new_wl_iftype) == TRUE) {
+ WL_DBG(("Associate iface request is allowed= %s\n",
+ wl_iftype_to_str(new_wl_iftype)));
+ return ret;
+ }
+ }
+
+ ret = wl_cfg80211_disc_if_mgmt(cfg, new_wl_iftype,
+ &disable_nan, &disable_p2p);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed at disc iface mgmt, ret = %d\n", ret));
+ return ret;
+ }
+#ifdef WL_NANP2P
+ if (((new_wl_iftype == WL_IF_TYPE_P2P_DISC) && disable_nan) ||
+ ((new_wl_iftype == WL_IF_TYPE_NAN_NMI) && disable_p2p)) {
+ if ((cfg->nan_p2p_supported == TRUE) &&
+ (cfg->conc_disc == WL_NANP2P_CONC_SUPPORT)) {
+ WL_INFORM_MEM(("P2P + NAN conc is supported\n"));
+ disable_p2p = false;
+ disable_nan = false;
+ }
+ }
+#endif /* WL_NANP2P */
+
+ if (disable_nan) {
+#ifdef WL_NAN
+ /* Disable nan */
+ ret = wl_cfgnan_disable(cfg, NAN_CONCURRENCY_CONFLICT);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to disable nan, error[%d]\n", ret));
+ return ret;
+ }
+#endif /* WL_NAN */
+ }
+
+ if (disable_p2p) {
+ /* Disable p2p discovery */
+ ret = wl_cfg80211_deinit_p2p_discovery(cfg);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to disable p2p_disc for allowing nan\n"));
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/*
+* Check for any conflicting iface before adding iface.
+* Based on policy, either conflicting iface is removed
+* or new iface add request is blocked.
+*/
+s32
+wl_cfg80211_handle_if_role_conflict(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype)
+{
+ s32 ret = BCME_OK;
+
+ WL_INFORM_MEM(("Incoming iface = %s\n", wl_iftype_to_str(new_wl_iftype)));
+
+ if (!is_discovery_iface(new_wl_iftype)) {
+ /* Incoming data interface request */
+ if (wl_cfg80211_get_sec_iface(cfg) != WL_IFACE_NOT_PRESENT) {
+ /* active interface present - Apply interface data policy */
+ ret = wl_cfg80211_data_if_mgmt(cfg, new_wl_iftype);
+ if (ret != BCME_OK) {
+ WL_ERR(("if_mgmt fail:%d\n", ret));
+ return ret;
+ }
+ }
+ }
+ /* Apply discovery config */
+ ret = wl_cfg80211_handle_discovery_config(cfg, new_wl_iftype);
+ return ret;
+}
+#endif /* WL_IFACE_MGMT */
+
static struct wireless_dev *
wl_cfg80211_add_monitor_if(struct wiphy *wiphy, const char *name)
{
@@ -2161,10 +2839,13 @@
u16 cur_toggle_bytes;
u16 toggled_bit;
- if (!ndev || !mac_addr) {
+ if (!ndev || !mac_addr || ETHER_ISNULLADDR(mac_addr)) {
return -EINVAL;
}
+ WL_DBG(("%s:Mac addr" MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(mac_addr)));
+#if defined(SPECIFIC_MAC_GEN_SCHEME)
if ((wl_iftype == WL_IF_TYPE_P2P_DISC) || (wl_iftype == WL_IF_TYPE_AP) ||
(wl_iftype == WL_IF_TYPE_P2P_GO) || (wl_iftype == WL_IF_TYPE_P2P_GC)) {
/* Avoid invoking release mac addr code for interfaces using
@@ -2172,6 +2853,11 @@
*/
return BCME_OK;
}
+#else /* SPECIFIC_MAC_GEN_SCHEME */
+ if (wl_iftype == WL_IF_TYPE_P2P_DISC) {
+ return BCME_OK;
+ }
+#endif /* SPECIFIC_MAC_GEN_SCHEME */
/* Fetch last two bytes of mac address */
org_toggle_bytes = ntoh16(*((u16 *)&ndev->dev_addr[4]));
@@ -2184,7 +2870,7 @@
/* This toggled_bit is marked in the used mac addr
* mask. Clear it.
*/
- cfg->vif_macaddr_mask &= ~toggled_bit;
+ cfg->vif_macaddr_mask &= ~toggled_bit;
WL_INFORM(("MAC address - " MACDBG " released. toggled_bit:%04X vif_mask:%04X\n",
MAC2STRDBG(mac_addr), toggled_bit, cfg->vif_macaddr_mask));
} else {
@@ -2223,12 +2909,19 @@
* released. Ensure to call wl_release_vif_macaddress to free up
* the mac address.
*/
- if (wl_iftype == WL_IF_TYPE_P2P_DISC || wl_iftype == WL_IF_TYPE_AP) {
+#if defined(SPECIFIC_MAC_GEN_SCHEME)
+ if (wl_iftype == WL_IF_TYPE_P2P_DISC || wl_iftype == WL_IF_TYPE_AP) {
mac_addr[0] |= 0x02;
} else if ((wl_iftype == WL_IF_TYPE_P2P_GO) || (wl_iftype == WL_IF_TYPE_P2P_GC)) {
mac_addr[0] |= 0x02;
mac_addr[4] ^= 0x80;
- } else {
+ }
+#else
+ if (wl_iftype == WL_IF_TYPE_P2P_DISC) {
+ mac_addr[0] |= 0x02;
+ }
+#endif /* SEPCIFIC_MAC_GEN_SCHEME */
+ else {
/* For locally administered mac addresses, we keep the
* OUI part constant and just work on the last two bytes.
*/
@@ -2253,7 +2946,7 @@
/* Shift by one */
toggle_mask = toggle_mask << 0x1;
offset++;
- if (offset > 15) {
+ if (offset > MAX_VIF_OFFSET) {
/* We have used up all macaddresses. Something wrong! */
WL_ERR(("Entire range of macaddress used up.\n"));
ASSERT(0);
@@ -2264,6 +2957,79 @@
WL_INFORM_MEM(("Get virtual I/F mac addr: "MACDBG"\n", MAC2STRDBG(mac_addr)));
return 0;
}
+#ifdef DNGL_AXI_ERROR_LOGGING
+static s32
+_wl_cfg80211_check_axi_error(struct bcm_cfg80211 *cfg)
+{
+ s32 ret = BCME_OK;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ hnd_ext_trap_hdr_t *hdr;
+ int axi_host_error_size;
+ uint8 *new_dst;
+ uint32 *ext_data = dhd->extended_trap_data;
+ struct file *fp = NULL;
+ char *filename = DHD_COMMON_DUMP_PATH
+ DHD_DUMP_AXI_ERROR_FILENAME
+ DHD_DUMP_HAL_FILENAME_SUFFIX;
+
+ WL_ERR(("%s: starts to read %s. Axi error \n", __FUNCTION__, filename));
+
+ fp = filp_open(filename, O_RDONLY, 0);
+
+ if (IS_ERR(fp) || (fp == NULL)) {
+ WL_ERR(("%s: Couldn't read the file, err %ld,File [%s] No previous axi error \n",
+ __FUNCTION__, PTR_ERR(fp), filename));
+ return ret;
+ }
+
+ kernel_read_compat(fp, fp->f_pos, (char *)dhd->axi_err_dump, sizeof(dhd_axi_error_dump_t));
+ filp_close(fp, NULL);
+
+ /* Delete axi error info file */
+ if (dhd_file_delete(filename) < 0) {
+ WL_ERR(("%s(): Failed to delete file: %s\n", __FUNCTION__, filename));
+ return ret;
+ }
+ WL_ERR(("%s(): Success to delete file: %s\n", __FUNCTION__, filename));
+
+ if (dhd->axi_err_dump->etd_axi_error_v1.signature != HND_EXT_TRAP_AXIERROR_SIGNATURE) {
+ WL_ERR(("%s: Invalid AXI signature: 0x%x\n",
+ __FUNCTION__, dhd->axi_err_dump->etd_axi_error_v1.signature));
+ }
+
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+ new_dst = hdr->data;
+
+ axi_host_error_size = sizeof(dhd->axi_err_dump->axid)
+ + sizeof(dhd->axi_err_dump->fault_address);
+
+ /* TAG_TRAP_AXI_HOST_INFO tlv : host's axid, fault address */
+ new_dst = bcm_write_tlv(TAG_TRAP_AXI_HOST_INFO,
+ (const void *)dhd->axi_err_dump,
+ axi_host_error_size, new_dst);
+
+ /* TAG_TRAP_AXI_ERROR tlv */
+ new_dst = bcm_write_tlv(TAG_TRAP_AXI_ERROR,
+ (const void *)&dhd->axi_err_dump->etd_axi_error_v1,
+ sizeof(dhd->axi_err_dump->etd_axi_error_v1), new_dst);
+ hdr->len = new_dst - hdr->data;
+
+ dhd->dongle_trap_occured = TRUE;
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+ copy_hang_info_trap(dhd);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+ memset(dhd->axi_err_dump, 0, sizeof(dhd_axi_error_dump_t));
+
+ dhd->hang_reason = HANG_REASON_DONGLE_TRAP;
+ net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+ ret = BCME_ERROR;
+ return ret;
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
/* All Android/Linux private/Vendor Interface calls should make
* use of below API for interface creation.
@@ -2280,6 +3046,9 @@
s32 wl_mode;
dhd_pub_t *dhd;
wl_iftype_t macaddr_iftype = wl_iftype;
+ char *tmp = NULL;
+ char vif_name[IFNAMSIZ] = {0};
+ u8 vif_mac_addr[ETH_ALEN] = {0};
WL_INFORM_MEM(("if name: %s, wl_iftype:%d \n",
name ? name : "NULL", wl_iftype));
@@ -2291,6 +3060,7 @@
WL_ERR(("Interface name %s exists!\n", name));
return NULL;
}
+
wiphy = bcmcfg_to_wiphy(cfg);
dhd = (dhd_pub_t *)(cfg->pub);
if (!dhd) {
@@ -2300,13 +3070,30 @@
if ((wl_mode = wl_iftype_to_mode(wl_iftype)) < 0) {
return NULL;
}
- /* Protect the interace op context */
mutex_lock(&cfg->if_sync);
-
- if ((err = wl_check_vif_support(cfg, wl_iftype)) < 0) {
- goto fail;
+#ifdef WL_NAN
+ if (wl_iftype == WL_IF_TYPE_NAN) {
+ /*
+ * Bypass the role conflict check for NDI and handle it
+ * from dp req and dp resp context
+ * because in aware comms, ndi gets created soon after nan enable.
+ */
+ } else
+#endif /* WL_NAN */
+#ifdef WL_IFACE_MGMT
+ if ((err = wl_cfg80211_handle_if_role_conflict(cfg, wl_iftype)) < 0) {
+ mutex_unlock(&cfg->if_sync);
+ return NULL;
}
-
+#endif /* WL_IFACE_MGMT */
+#ifdef DNGL_AXI_ERROR_LOGGING
+ /* Check the previous smmu fault error */
+ if ((err = _wl_cfg80211_check_axi_error(cfg)) < 0) {
+ mutex_unlock(&cfg->if_sync);
+ return NULL;
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+ /* Protect the interace op context */
/* Do pre-create ops */
wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr, WL_IF_CREATE_REQ,
wl_iftype, wl_mode);
@@ -2315,9 +3102,40 @@
macaddr_iftype = WL_IF_TYPE_AP;
}
+ tmp = strchr(name, ' ');
+
+ /* For MBSS AP create_interface, command will be
+ * create_interface <interface_name> <ap_mac_addr>
+ * parsing the vif_name and vif_mac_addr from char name
+ */
+ if (tmp) {
+ int i;
+
+ /* skip space from delim after finding char */
+ tmp++;
+ mac = tmp;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ vif_mac_addr[i] = (hex_to_bin(mac[i * 3]) << 4)
+ | hex_to_bin(mac[i * 3 + 1]);
+ }
+
+ for (i = 0; i <= strlen(name); i++) {
+ if (*(name + i) != ' ') {
+ *(vif_name + i) = *(name + i);
+ } else {
+ *(vif_name + i) = '\0';
+ break;
+ }
+ }
+ }
+
if (mac) {
- /* If mac address is provided, use that */
- memcpy(mac_addr, mac, ETH_ALEN);
+ /* If mac address is provided, use that */
+ if (tmp != NULL)
+ memcpy(mac_addr, vif_mac_addr, ETH_ALEN);
+ else
+ memcpy(mac_addr, mac, ETH_ALEN);
} else if ((wl_get_vif_macaddr(cfg, macaddr_iftype, mac_addr) != BCME_OK)) {
/* Fetch the mac address to be used for virtual interface */
err = -EINVAL;
@@ -2334,8 +3152,18 @@
case WL_IF_TYPE_STA:
case WL_IF_TYPE_AP:
case WL_IF_TYPE_NAN:
- wdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
- wl_iftype, mac_addr, name);
+ if (cfg->iface_cnt >= (IFACE_MAX_CNT - 1)) {
+ WL_ERR(("iface_cnt exceeds max cnt. created iface_cnt: %d\n",
+ cfg->iface_cnt));
+ err = -ENOTSUPP;
+ goto fail;
+ }
+ if (*vif_name)
+ wdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
+ wl_iftype, mac_addr, vif_name);
+ else
+ wdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
+ wl_iftype, mac_addr, name);
break;
case WL_IF_TYPE_P2P_DISC:
case WL_IF_TYPE_P2P_GO:
@@ -2357,10 +3185,10 @@
}
if (!wdev) {
+ WL_ERR(("vif create failed. err:%d\n", err));
if (err != -ENOTSUPP) {
err = -ENODEV;
}
- WL_ERR(("vif create failed. err:%d\n", err));
goto fail;
}
@@ -2370,9 +3198,9 @@
wl_cfg80211_iface_state_ops(wdev,
WL_IF_CREATE_DONE, wl_iftype, wl_mode);
- WL_INFORM_MEM(("Vif created. wdev:%p netdev:%p"
- " dev->ifindex:%d cfg_iftype:%d, vif_count:%d\n",
- wdev, wdev->netdev, (wdev->netdev ? wdev->netdev->ifindex : 0xff),
+ WL_INFORM_MEM(("Vif created. dev->ifindex:%d"
+ " cfg_iftype:%d, vif_count:%d\n",
+ (wdev->netdev ? wdev->netdev->ifindex : 0xff),
wdev->iftype, cfg->vif_count));
mutex_unlock(&cfg->if_sync);
return wdev;
@@ -2390,6 +3218,10 @@
#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
SUPP_LOG(("IF_ADD fail. err:%d\n", err));
wl_flush_fw_log_buffer(primary_ndev, FW_LOGSET_MASK_ALL);
+ if (dhd_query_bus_erros(dhd)) {
+ goto exit;
+ }
+ dhd->iface_op_failed = TRUE;
#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
if (dhd->memdump_enabled) {
dhd->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
@@ -2401,6 +3233,7 @@
net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
#endif /* BCMDONGLEHOST && OEM_ANDROID */
}
+exit:
mutex_unlock(&cfg->if_sync);
return NULL;
}
@@ -2474,24 +3307,30 @@
struct wireless_dev *wdev, char *ifname)
{
int ret = BCME_OK;
+ mutex_lock(&cfg->if_sync);
+ ret = _wl_cfg80211_del_if(cfg, primary_ndev, wdev, ifname);
+ mutex_unlock(&cfg->if_sync);
+ return ret;
+}
+
+s32
+_wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
+ struct wireless_dev *wdev, char *ifname)
+{
+ int ret = BCME_OK;
s32 bssidx;
struct wiphy *wiphy;
u16 wl_mode;
u16 wl_iftype;
struct net_info *netinfo;
-#if (defined(OEM_ANDROID) || defined(DHD_FW_COREDUMP))
dhd_pub_t *dhd;
BCM_REFERENCE(dhd);
-#endif /* BCMDONGLEHOST && (OEM_ANDROID || DHD_FW_COREDUMP) */
if (!cfg) {
return -EINVAL;
}
- mutex_lock(&cfg->if_sync);
-#if (defined(OEM_ANDROID) || defined(DHD_FW_COREDUMP))
dhd = (dhd_pub_t *)(cfg->pub);
-#endif /* BCMDONGLEHOST && (OEM_ANDROID || DHD_FW_COREDUMP) */
if (!wdev && ifname) {
/* If only ifname is provided, fetch corresponding wdev ptr from our
@@ -2503,11 +3342,10 @@
/* Check whether we have a valid wdev ptr */
if (unlikely(!wdev)) {
WL_ERR(("wdev not found. '%s' does not exists\n", ifname));
- mutex_unlock(&cfg->if_sync);
return -ENODEV;
}
- WL_INFORM_MEM(("del vif. wdev_ptr:%p cfg_iftype:%d\n", wdev, wdev->iftype));
+ WL_INFORM_MEM(("del vif. wdev cfg_iftype:%d\n", wdev->iftype));
wiphy = wdev->wiphy;
#ifdef WL_CFG80211_P2P_DEV_IF
@@ -2522,8 +3360,9 @@
goto exit;
} else {
/* success case. return from here */
- cfg->vif_count--;
- mutex_unlock(&cfg->if_sync);
+ if (cfg->vif_count) {
+ cfg->vif_count--;
+ }
return BCME_OK;
}
}
@@ -2571,10 +3410,17 @@
exit:
if (ret == BCME_OK) {
/* Successful case */
- cfg->vif_count--;
+ if (cfg->vif_count) {
+ cfg->vif_count--;
+ }
wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr,
- WL_IF_DELETE_DONE, wl_iftype, wl_mode);
- wl_release_vif_macaddr(cfg, wdev->netdev->dev_addr, wl_iftype);
+ WL_IF_DELETE_DONE, wl_iftype, wl_mode);
+#ifdef WL_NAN
+ if (!((cfg->nancfg.mac_rand) && (wl_iftype == WL_IF_TYPE_NAN)))
+#endif /* WL_NAN */
+ {
+ wl_release_vif_macaddr(cfg, wdev->netdev->dev_addr, wl_iftype);
+ }
WL_INFORM_MEM(("vif deleted. vif_count:%d\n", cfg->vif_count));
} else {
if (!wdev->netdev) {
@@ -2594,12 +3440,19 @@
#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
SUPP_LOG(("IF_DEL fail. err:%d\n", ret));
wl_flush_fw_log_buffer(primary_ndev, FW_LOGSET_MASK_ALL);
+ /* IF dongle is down due to previous hang or other conditions, sending
+ * one more hang notification is not needed.
+ */
+ if (dhd_query_bus_erros(dhd) || (ret == BCME_DONGLE_DOWN)) {
+ goto end;
+ }
+ dhd->iface_op_failed = TRUE;
#if defined(DHD_FW_COREDUMP)
if (dhd->memdump_enabled && (ret != -EBADTYPE)) {
dhd->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
dhd_bus_mem_dump(dhd);
}
-#endif /* BCMDONGLEHOST && DHD_FW_COREDUMP */
+#endif /* DHD_FW_COREDUMP */
#if defined(OEM_ANDROID)
WL_ERR(("Notify hang event to upper layer \n"));
dhd->hang_reason = HANG_REASON_IFACE_DEL_FAILURE;
@@ -2607,8 +3460,7 @@
#endif /* BCMDONGLEHOST && OEM_ANDROID */
}
}
-
- mutex_unlock(&cfg->if_sync);
+end:
return ret;
}
@@ -2658,6 +3510,9 @@
chanspec_t chspec;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#if defined(WL_SUPPORT_MULTIP2P)
+ s32 is_mp2p_supported = BCME_ERROR;
+#endif /* WL_SUPPORT_MULTIP2P */
WL_INFORM_MEM(("Enter. current_role:%d new_role:%d \n", ndev->ieee80211_ptr->iftype, type));
@@ -2672,7 +3527,7 @@
}
/* Abort any on-going scans to avoid race condition issues */
- wl_notify_escan_complete(cfg, ndev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
index = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
if (index < 0) {
@@ -2683,6 +3538,9 @@
return BCME_ERROR;
}
+#if defined(WL_SUPPORT_MULTIP2P)
+ is_mp2p_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_MP2P_MODE);
+#endif /* WL_SUPPORT_MULTIP2P */
/* In concurrency case, STA may be already associated in a particular
* channel. so retrieve the current channel of primary interface and
* then start the virtual interface on that.
@@ -2693,7 +3551,11 @@
* p2p_go_count is the counter for GO creation
* requests.
*/
- if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) {
+ if (TRUE &&
+#if defined(WL_SUPPORT_MULTIP2P)
+ (is_mp2p_supported <= 0) &&
+#endif // endif
+ (cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) {
WL_ERR(("FW does not support multiple GO\n"));
return BCME_ERROR;
}
@@ -2773,8 +3635,21 @@
mutex_lock(&cfg->if_sync);
netinfo = wl_get_netinfo_by_wdev(cfg, ndev->ieee80211_ptr);
if (unlikely(!netinfo)) {
- WL_ERR(("netinfo not found \n"));
- err = -ENODEV;
+#ifdef WL_STATIC_IF
+ if (is_static_iface(cfg, ndev)) {
+ /* Incase of static interfaces, the netinfo will be
+ * allocated only when FW interface is initialized. So
+ * store the value and use it during initialization.
+ */
+ WL_INFORM_MEM(("skip change vif for static if\n"));
+ ndev->ieee80211_ptr->iftype = type;
+ err = BCME_OK;
+ } else
+#endif /* WL_STATIC_IF */
+ {
+ WL_ERR(("netinfo not found \n"));
+ err = -ENODEV;
+ }
goto fail;
}
@@ -2865,6 +3740,7 @@
{
bool ifadd_expected = FALSE;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ bool bss_pending_op = TRUE;
/* P2P may send WLC_E_IF_ADD and/or WLC_E_IF_CHANGE during IF updating ("p2p_ifupd")
* redirect the IF_ADD event to ifchange as it is not a real "new" interface
@@ -2878,7 +3754,7 @@
wl_clr_p2p_status(cfg, IF_ADDING);
} else if (cfg->bss_pending_op) {
ifadd_expected = TRUE;
- cfg->bss_pending_op = FALSE;
+ bss_pending_op = FALSE;
}
if (ifadd_expected) {
@@ -2888,12 +3764,18 @@
if_event_info->ifidx = ifidx;
if_event_info->bssidx = bssidx;
if_event_info->role = role;
- strncpy(if_event_info->name, name, IFNAMSIZ);
- if_event_info->name[IFNAMSIZ] = '\0';
+ strlcpy(if_event_info->name, name, sizeof(if_event_info->name));
+ if_event_info->name[IFNAMSIZ - 1] = '\0';
if (mac)
memcpy(if_event_info->mac, mac, ETHER_ADDR_LEN);
+
+ /* Update bss pendig operation status */
+ if (!bss_pending_op) {
+ cfg->bss_pending_op = FALSE;
+ }
WL_INFORM_MEM(("IF_ADD ifidx:%d bssidx:%d role:%d\n",
ifidx, bssidx, role));
+ OSL_SMP_WMB();
wake_up_interruptible(&cfg->netif_change_event);
return BCME_OK;
}
@@ -2907,20 +3789,26 @@
bool ifdel_expected = FALSE;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
wl_if_event_info *if_event_info = &cfg->if_event_info;
+ bool bss_pending_op = TRUE;
if (wl_get_p2p_status(cfg, IF_DELETING)) {
ifdel_expected = TRUE;
wl_clr_p2p_status(cfg, IF_DELETING);
} else if (cfg->bss_pending_op) {
ifdel_expected = TRUE;
- cfg->bss_pending_op = FALSE;
+ bss_pending_op = FALSE;
}
if (ifdel_expected) {
if_event_info->valid = TRUE;
if_event_info->ifidx = ifidx;
if_event_info->bssidx = bssidx;
+ /* Update bss pendig operation status */
+ if (!bss_pending_op) {
+ cfg->bss_pending_op = FALSE;
+ }
WL_INFORM_MEM(("IF_DEL ifidx:%d bssidx:%d\n", ifidx, bssidx));
+ OSL_SMP_WMB();
wake_up_interruptible(&cfg->netif_change_event);
return BCME_OK;
}
@@ -2936,6 +3824,7 @@
if (wl_get_p2p_status(cfg, IF_CHANGING)) {
wl_set_p2p_status(cfg, IF_CHANGED);
+ OSL_SMP_WMB();
wake_up_interruptible(&cfg->netif_change_event);
return BCME_OK;
}
@@ -2943,981 +3832,6 @@
return BCME_ERROR;
}
-/* Find listen channel */
-static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg,
- const u8 *ie, u32 ie_len)
-{
- const wifi_p2p_ie_t *p2p_ie;
- const u8 *end, *pos;
- s32 listen_channel;
-
- pos = (const u8 *)ie;
-
- p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len);
-
- if (p2p_ie == NULL) {
- return 0;
- }
-
- if (p2p_ie->len < MIN_P2P_IE_LEN || p2p_ie->len > MAX_P2P_IE_LEN) {
- CFGP2P_ERR(("p2p_ie->len out of range - %d\n", p2p_ie->len));
- return 0;
- }
-
- pos = p2p_ie->subelts;
- end = p2p_ie->subelts + (p2p_ie->len - 4);
-
- CFGP2P_DBG((" found p2p ie ! lenth %d \n",
- p2p_ie->len));
-
- while (pos < end) {
- uint16 attr_len;
- if (pos + 2 >= end) {
- CFGP2P_DBG((" -- Invalid P2P attribute"));
- return 0;
- }
- attr_len = ((uint16) (((pos + 1)[1] << 8) | (pos + 1)[0]));
-
- if (pos + 3 + attr_len > end) {
- CFGP2P_DBG(("P2P: Attribute underflow "
- "(len=%u left=%d)",
- attr_len, (int) (end - pos - 3)));
- return 0;
- }
-
- /* if Listen Channel att id is 6 and the vailue is valid,
- * return the listen channel
- */
- if (pos[0] == 6) {
- /* listen channel subel length format
- * 1(id) + 2(len) + 3(country) + 1(op. class) + 1(chan num)
- */
- listen_channel = pos[1 + 2 + 3 + 1];
-
- if (listen_channel == SOCIAL_CHAN_1 ||
- listen_channel == SOCIAL_CHAN_2 ||
- listen_channel == SOCIAL_CHAN_3) {
- CFGP2P_DBG((" Found my Listen Channel %d \n", listen_channel));
- return listen_channel;
- }
- }
- pos += 3 + attr_len;
- }
- return 0;
-}
-
-static void wl_scan_prep(struct bcm_cfg80211 *cfg, struct wl_scan_params *params,
- struct cfg80211_scan_request *request)
-{
- u32 n_ssids;
- u32 n_channels;
- u16 channel;
- chanspec_t chanspec;
- s32 i = 0, j = 0, offset;
- char *ptr;
- wlc_ssid_t ssid;
-
- memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN);
- params->bss_type = DOT11_BSSTYPE_ANY;
- params->scan_type = 0;
- params->nprobes = -1;
- params->active_time = -1;
- params->passive_time = -1;
- params->home_time = -1;
- params->channel_num = 0;
- memset(¶ms->ssid, 0, sizeof(wlc_ssid_t));
-
- WL_SCAN(("Preparing Scan request\n"));
- WL_SCAN(("nprobes=%d\n", params->nprobes));
- WL_SCAN(("active_time=%d\n", params->active_time));
- WL_SCAN(("passive_time=%d\n", params->passive_time));
- WL_SCAN(("home_time=%d\n", params->home_time));
- WL_SCAN(("scan_type=%d\n", params->scan_type));
-
- params->nprobes = htod32(params->nprobes);
- params->active_time = htod32(params->active_time);
- params->passive_time = htod32(params->passive_time);
- params->home_time = htod32(params->home_time);
-
- /* if request is null just exit so it will be all channel broadcast scan */
- if (!request)
- return;
-
- n_ssids = request->n_ssids;
- n_channels = request->n_channels;
-
- /* Copy channel array if applicable */
- WL_SCAN(("### List of channelspecs to scan ###\n"));
- if (n_channels > 0) {
- for (i = 0; i < n_channels; i++) {
- channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
- /* SKIP DFS channels for Secondary interface */
- if ((cfg->escan_info.ndev != bcmcfg_to_prmry_ndev(cfg)) &&
- (request->channels[i]->flags &
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
- (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN)))
-#else
- (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)))
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
- continue;
-
- chanspec = WL_CHANSPEC_BW_20;
- if (chanspec == INVCHANSPEC) {
- WL_ERR(("Invalid chanspec! Skipping channel\n"));
- continue;
- }
-
- if (request->channels[i]->band == IEEE80211_BAND_2GHZ) {
-#ifdef WL_HOST_BAND_MGMT
- if (cfg->curr_band == WLC_BAND_5G) {
- WL_DBG(("In 5G only mode, omit 2G channel:%d\n", channel));
- continue;
- }
-#endif /* WL_HOST_BAND_MGMT */
- chanspec |= WL_CHANSPEC_BAND_2G;
- } else {
-#ifdef WL_HOST_BAND_MGMT
- if (cfg->curr_band == WLC_BAND_2G) {
- WL_DBG(("In 2G only mode, omit 5G channel:%d\n", channel));
- continue;
- }
-#endif /* WL_HOST_BAND_MGMT */
- chanspec |= WL_CHANSPEC_BAND_5G;
- }
- params->channel_list[j] = channel;
- params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
- params->channel_list[j] |= chanspec;
- WL_SCAN(("Chan : %d, Channel spec: %x \n",
- channel, params->channel_list[j]));
- params->channel_list[j] = wl_chspec_host_to_driver(params->channel_list[j]);
- j++;
- }
- } else {
- WL_SCAN(("Scanning all channels\n"));
- }
- n_channels = j;
- /* Copy ssid array if applicable */
- WL_SCAN(("### List of SSIDs to scan ###\n"));
- if (n_ssids > 0) {
- offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16);
- offset = roundup(offset, sizeof(u32));
- ptr = (char*)params + offset;
- for (i = 0; i < n_ssids; i++) {
- memset(&ssid, 0, sizeof(wlc_ssid_t));
- ssid.SSID_len = MIN(request->ssids[i].ssid_len, DOT11_MAX_SSID_LEN);
- memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len);
- if (!ssid.SSID_len)
- WL_SCAN(("%d: Broadcast scan\n", i));
- else
- WL_SCAN(("%d: scan for %s size =%d\n", i,
- ssid.SSID, ssid.SSID_len));
- memcpy(ptr, &ssid, sizeof(wlc_ssid_t));
- ptr += sizeof(wlc_ssid_t);
- }
- } else {
- WL_SCAN(("Broadcast scan\n"));
- }
- /* Adding mask to channel numbers */
- params->channel_num =
- htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
- (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
-
- if (n_channels == 1) {
- params->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
- params->nprobes = htod32(params->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
- }
-}
-
-static s32
-wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
-{
- wl_uint32_list_t *list;
- s32 err = BCME_OK;
- if (valid_chan_list == NULL || size <= 0)
- return -ENOMEM;
-
- memset(valid_chan_list, 0, size);
- list = (wl_uint32_list_t *)(void *) valid_chan_list;
- list->count = htod32(WL_NUMCHANNELS);
- err = wldev_ioctl_get(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size);
- if (err != 0) {
- WL_ERR(("get channels failed with %d\n", err));
- }
-
- return err;
-}
-
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
-#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40
-bool g_first_broadcast_scan = TRUE;
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
-
-static s32
-wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- struct cfg80211_scan_request *request, uint16 action)
-{
- s32 err = BCME_OK;
- u32 n_channels;
- u32 n_ssids;
- s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
- wl_escan_params_t *params = NULL;
- u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)];
- u32 num_chans = 0;
- s32 channel;
- u32 n_valid_chan;
- s32 search_state = WL_P2P_DISC_ST_SCAN;
- u32 i, j, n_nodfs = 0;
- u16 *default_chan_list = NULL;
- wl_uint32_list_t *list;
- s32 bssidx = -1;
- struct net_device *dev = NULL;
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
- bool is_first_init_2g_scan = false;
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
- p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN;
- u32 chan_mem = 0;
-
- WL_DBG(("Enter \n"));
-
- /* scan request can come with empty request : perform all default scan */
- if (!cfg) {
- err = -EINVAL;
- goto exit;
- }
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) && defined(SUPPORT_RANDOM_MAC_SCAN)
- if ((request != NULL) && !ETHER_ISNULLADDR(request->mac_addr) &&
- !ETHER_ISNULLADDR(request->mac_addr_mask) &&
- !wl_is_wps_enrollee_active(ndev, request->ie, request->ie_len)) {
- /* Call scanmac only for valid configuration */
- err = wl_cfg80211_scan_mac_enable(ndev, request->mac_addr,
- request->mac_addr_mask);
- if (err < 0) {
- if (err == BCME_UNSUPPORTED) {
- /* Ignore if chip doesnt support the feature */
- err = BCME_OK;
- } else {
- /* For errors other than unsupported fail the scan */
- WL_ERR(("%s : failed to set random mac for host scan, %d\n",
- __FUNCTION__, err));
- err = -EAGAIN;
- goto exit;
- }
- }
- } else {
- /* No randmac config provided. Ensure scanmac is disabled */
- wl_cfg80211_scan_mac_disable(ndev);
- }
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) && defined(SUPPORT_RANDOM_MAC_SCAN) */
-
- if (!cfg->p2p_supported || !p2p_scan(cfg)) {
- /* LEGACY SCAN TRIGGER */
- WL_SCAN((" LEGACY E-SCAN START\n"));
-
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
- if (!request) {
- err = -EINVAL;
- goto exit;
- }
- if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) {
-#ifdef USE_INITIAL_2G_SCAN
- struct ieee80211_channel tmp_channel_list[CH_MAX_2G_CHANNEL];
- /* allow one 5G channel to add previous connected channel in 5G */
- bool allow_one_5g_channel = TRUE;
- j = 0;
- for (i = 0; i < request->n_channels; i++) {
- int tmp_chan = ieee80211_frequency_to_channel
- (request->channels[i]->center_freq);
- if (tmp_chan > CH_MAX_2G_CHANNEL) {
- if (allow_one_5g_channel)
- allow_one_5g_channel = FALSE;
- else
- continue;
- }
- if (j > CH_MAX_2G_CHANNEL) {
- WL_ERR(("Index %d exceeds max 2.4GHz channels %d"
- " and previous 5G connected channel\n",
- j, CH_MAX_2G_CHANNEL));
- break;
- }
- bcopy(request->channels[i], &tmp_channel_list[j],
- sizeof(struct ieee80211_channel));
- WL_SCAN(("channel of request->channels[%d]=%d\n", i, tmp_chan));
- j++;
- }
- if ((j > 0) && (j <= CH_MAX_2G_CHANNEL)) {
- for (i = 0; i < j; i++)
- bcopy(&tmp_channel_list[i], request->channels[i],
- sizeof(struct ieee80211_channel));
-
- request->n_channels = j;
- is_first_init_2g_scan = true;
- }
- else
- WL_ERR(("Invalid number of 2.4GHz channels %d\n", j));
-
- WL_SCAN(("request->n_channels=%d\n", request->n_channels));
-#else /* USE_INITIAL_SHORT_DWELL_TIME */
- is_first_init_2g_scan = true;
-#endif /* USE_INITIAL_2G_SCAN */
- g_first_broadcast_scan = false;
- }
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
-
- /* if scan request is not empty parse scan request paramters */
- if (request != NULL) {
- n_channels = request->n_channels;
- n_ssids = request->n_ssids;
- if (n_channels % 2)
- /* If n_channels is odd, add a padd of u16 */
- params_size += sizeof(u16) * (n_channels + 1);
- else
- params_size += sizeof(u16) * n_channels;
-
- /* Allocate space for populating ssids in wl_escan_params_t struct */
- params_size += sizeof(struct wlc_ssid) * n_ssids;
- }
- params = (wl_escan_params_t *)MALLOCZ(cfg->osh, params_size);
- if (params == NULL) {
- err = -ENOMEM;
- goto exit;
- }
- wl_scan_prep(cfg, ¶ms->params, request);
-
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
- /* Override active_time to reduce scan time if it's first bradcast scan. */
- if (is_first_init_2g_scan)
- params->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
-
- params->version = htod32(ESCAN_REQ_VERSION);
- params->action = htod16(action);
- wl_escan_set_sync_id(params->sync_id, cfg);
- wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY);
- if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
- WL_ERR(("ioctl buffer length not sufficient\n"));
- MFREE(cfg->osh, params, params_size);
- err = -ENOMEM;
- goto exit;
- }
- if (cfg->active_scan == PASSIVE_SCAN) {
- params->params.scan_type = DOT11_SCANTYPE_PASSIVE;
- WL_DBG(("Passive scan_type %d \n", params->params.scan_type));
- }
-
- bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
-
- err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
- cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
- WL_INFORM_MEM(("LEGACY_SCAN sync ID: %d, bssidx: %d\n", params->sync_id, bssidx));
- if (unlikely(err)) {
- if (err == BCME_EPERM)
- /* Scan Not permitted at this point of time */
- WL_DBG((" Escan not permitted at this time (%d)\n", err));
- else
- WL_ERR((" Escan set error (%d)\n", err));
- } else {
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_REQUESTED);
- }
- MFREE(cfg->osh, params, params_size);
- }
- else if (p2p_is_on(cfg) && p2p_scan(cfg)) {
- /* P2P SCAN TRIGGER */
- s32 _freq = 0;
- n_nodfs = 0;
-
-#ifdef WL_NAN
- if (wl_cfgnan_check_state(cfg)) {
- WL_ERR(("nan is enabled, nan + p2p concurrency not supported\n"));
- return BCME_UNSUPPORTED;
- }
-#endif /* WL_NAN */
- if (request && request->n_channels) {
- num_chans = request->n_channels;
- WL_SCAN((" chann number : %d\n", num_chans));
- chan_mem = num_chans * sizeof(*default_chan_list);
- default_chan_list = MALLOCZ(cfg->osh, chan_mem);
- if (default_chan_list == NULL) {
- WL_ERR(("channel list allocation failed \n"));
- err = -ENOMEM;
- goto exit;
- }
- if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) {
-#ifdef P2P_SKIP_DFS
- int is_printed = false;
-#endif /* P2P_SKIP_DFS */
- list = (wl_uint32_list_t *) chan_buf;
- n_valid_chan = dtoh32(list->count);
- if (n_valid_chan > WL_NUMCHANNELS) {
- WL_ERR(("wrong n_valid_chan:%d\n", n_valid_chan));
- MFREE(cfg->osh, default_chan_list, chan_mem);
- err = -EINVAL;
- goto exit;
- }
-
- for (i = 0; i < num_chans; i++)
- {
-#ifdef WL_HOST_BAND_MGMT
- int channel_band = 0;
-#endif /* WL_HOST_BAND_MGMT */
- _freq = request->channels[i]->center_freq;
- channel = ieee80211_frequency_to_channel(_freq);
-#ifdef WL_HOST_BAND_MGMT
- channel_band = (channel > CH_MAX_2G_CHANNEL) ?
- WLC_BAND_5G : WLC_BAND_2G;
- if ((cfg->curr_band != WLC_BAND_AUTO) &&
- (cfg->curr_band != channel_band) &&
- !IS_P2P_SOCIAL_CHANNEL(channel))
- continue;
-#endif /* WL_HOST_BAND_MGMT */
-
- /* ignore DFS channels */
- if (request->channels[i]->flags &
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
- (IEEE80211_CHAN_NO_IR
- | IEEE80211_CHAN_RADAR))
-#else
- (IEEE80211_CHAN_RADAR
- | IEEE80211_CHAN_PASSIVE_SCAN))
-#endif // endif
- continue;
-#ifdef P2P_SKIP_DFS
- if (channel >= 52 && channel <= 144) {
- if (is_printed == false) {
- WL_ERR(("SKIP DFS CHANs(52~144)\n"));
- is_printed = true;
- }
- continue;
- }
-#endif /* P2P_SKIP_DFS */
-
- for (j = 0; j < n_valid_chan; j++) {
- /* allows only supported channel on
- * current reguatory
- */
- if (n_nodfs >= num_chans) {
- break;
- }
- if (channel == (dtoh32(list->element[j]))) {
- default_chan_list[n_nodfs++] =
- channel;
- }
- }
-
- }
- }
- if (num_chans == SOCIAL_CHAN_CNT && (
- (default_chan_list[0] == SOCIAL_CHAN_1) &&
- (default_chan_list[1] == SOCIAL_CHAN_2) &&
- (default_chan_list[2] == SOCIAL_CHAN_3))) {
- /* SOCIAL CHANNELS 1, 6, 11 */
- search_state = WL_P2P_DISC_ST_SEARCH;
- p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
- WL_DBG(("P2P SEARCH PHASE START \n"));
- } else if (((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1)) &&
- (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) ||
- ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2)) &&
- (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP))) {
- /* If you are already a GO, then do SEARCH only */
- WL_DBG(("Already a GO. Do SEARCH Only"));
- search_state = WL_P2P_DISC_ST_SEARCH;
- num_chans = n_nodfs;
- p2p_scan_purpose = P2P_SCAN_NORMAL;
-
- } else if (num_chans == 1) {
- p2p_scan_purpose = P2P_SCAN_CONNECT_TRY;
- WL_INFORM_MEM(("Trigger p2p join scan\n"));
- } else if (num_chans == SOCIAL_CHAN_CNT + 1) {
- /* SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan supported by
- * the supplicant
- */
- p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
- } else {
- WL_DBG(("P2P SCAN STATE START \n"));
- num_chans = n_nodfs;
- p2p_scan_purpose = P2P_SCAN_NORMAL;
- }
- } else {
- err = -EINVAL;
- goto exit;
- }
- err = wl_cfgp2p_escan(cfg, ndev, ACTIVE_SCAN, num_chans, default_chan_list,
- search_state, action,
- wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL,
- p2p_scan_purpose);
-
- if (!err)
- cfg->p2p->search_state = search_state;
-
- MFREE(cfg->osh, default_chan_list, chan_mem);
- }
-exit:
- if (unlikely(err)) {
- /* Don't print Error incase of Scan suppress */
- if ((err == BCME_EPERM) && cfg->scan_suppressed)
- WL_DBG(("Escan failed: Scan Suppressed \n"));
- else
- WL_ERR(("scan error (%d)\n", err));
- }
- return err;
-}
-
-static s32
-wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request)
-{
- s32 err = BCME_OK;
- s32 passive_scan;
- s32 passive_scan_time;
- s32 passive_scan_time_org;
- wl_scan_results_t *results;
- WL_SCAN(("Enter \n"));
-
- results = wl_escan_get_buf(cfg, FALSE);
- results->version = 0;
- results->count = 0;
- results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
-
- cfg->escan_info.ndev = ndev;
- cfg->escan_info.wiphy = wiphy;
- cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
- passive_scan = cfg->active_scan ? 0 : 1;
- err = wldev_ioctl_set(ndev, WLC_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan));
- if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
- goto exit;
- }
-
- if (passive_channel_skip) {
-
- err = wldev_ioctl_get(ndev, WLC_GET_SCAN_PASSIVE_TIME,
- &passive_scan_time_org, sizeof(passive_scan_time_org));
- if (unlikely(err)) {
- WL_ERR(("== error (%d)\n", err));
- goto exit;
- }
-
- WL_SCAN(("PASSIVE SCAN time : %d \n", passive_scan_time_org));
-
- passive_scan_time = 0;
- err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
- &passive_scan_time, sizeof(passive_scan_time));
- if (unlikely(err)) {
- WL_ERR(("== error (%d)\n", err));
- goto exit;
- }
-
- WL_SCAN(("PASSIVE SCAN SKIPED!! (passive_channel_skip:%d) \n",
- passive_channel_skip));
- }
-
- err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START);
-
- if (passive_channel_skip) {
- err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
- &passive_scan_time_org, sizeof(passive_scan_time_org));
- if (unlikely(err)) {
- WL_ERR(("== error (%d)\n", err));
- goto exit;
- }
-
- WL_SCAN(("PASSIVE SCAN RECOVERED!! (passive_scan_time_org:%d) \n",
- passive_scan_time_org));
- }
-
-exit:
- return err;
-}
-
-static s32
-__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request,
- struct cfg80211_ssid *this_ssid)
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct cfg80211_ssid *ssids;
- struct ether_addr primary_mac;
- bool p2p_ssid;
-#ifdef WL11U
- bcm_tlv_t *interworking_ie;
-#endif // endif
- s32 err = 0;
- s32 bssidx = -1;
- s32 i;
-
- unsigned long flags;
- static s32 busy_count = 0;
-#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- struct net_device *remain_on_channel_ndev = NULL;
-#endif // endif
- /*
- * Hostapd triggers scan before starting automatic channel selection
- * to collect channel characteristics. However firmware scan engine
- * doesn't support any channel characteristics collection along with
- * scan. Hence return scan success.
- */
- if (request && (scan_req_iftype(request) == NL80211_IFTYPE_AP)) {
- WL_DBG(("Scan Command on SoftAP Interface. Ignoring...\n"));
- return 0;
- }
-
- ndev = ndev_to_wlc_ndev(ndev, cfg);
-
- if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
- WL_ERR(("Sending Action Frames. Try it again.\n"));
- return -EAGAIN;
- }
-
- WL_DBG(("Enter wiphy (%p)\n", wiphy));
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- if (cfg->scan_request == NULL) {
- wl_clr_drv_status_all(cfg, SCANNING);
- WL_DBG(("<<<<<<<<<<<Force Clear Scanning Status>>>>>>>>>>>\n"));
- } else {
- WL_ERR(("Scanning already\n"));
- return -EAGAIN;
- }
- }
- if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) {
- WL_ERR(("Scanning being aborted\n"));
- return -EAGAIN;
- }
- if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
- WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
- return -EOPNOTSUPP;
- }
-
-#ifdef P2P_LISTEN_OFFLOADING
- if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
- WL_ERR(("P2P_FIND: Discovery offload is in progress\n"));
- return -EAGAIN;
- }
-#endif /* P2P_LISTEN_OFFLOADING */
-
-#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg);
- if (remain_on_channel_ndev) {
- WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n"));
- wl_notify_escan_complete(cfg, remain_on_channel_ndev, true, true);
- }
-#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
-
- if (request) { /* scan bss */
- ssids = request->ssids;
- p2p_ssid = false;
- for (i = 0; i < request->n_ssids; i++) {
- if (ssids[i].ssid_len &&
- IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) {
- p2p_ssid = true;
- break;
- }
- }
- if (p2p_ssid) {
- if (cfg->p2p_supported) {
- /* p2p scan trigger */
- if (p2p_on(cfg) == false) {
- /* p2p on at the first time */
- p2p_on(cfg) = true;
- wl_cfgp2p_set_firm_p2p(cfg);
- get_primary_mac(cfg, &primary_mac);
- wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
-#if defined(P2P_IE_MISSING_FIX)
- cfg->p2p_prb_noti = false;
-#endif // endif
- }
- wl_clr_p2p_status(cfg, GO_NEG_PHASE);
- WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
- p2p_scan(cfg) = true;
- }
- } else {
- /* legacy scan trigger
- * So, we have to disable p2p discovery if p2p discovery is on
- */
- if (cfg->p2p_supported) {
- p2p_scan(cfg) = false;
- /* If Netdevice is not equals to primary and p2p is on
- * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
- */
-
- if (p2p_scan(cfg) == false) {
- if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
- err = wl_cfgp2p_discover_enable_search(cfg,
- false);
- if (unlikely(err)) {
- goto scan_out;
- }
-
- }
- }
- }
- if (!cfg->p2p_supported || !p2p_scan(cfg)) {
- if ((bssidx = wl_get_bssidx_by_wdev(cfg,
- ndev->ieee80211_ptr)) < 0) {
- WL_ERR(("Find p2p index from ndev(%p) failed\n",
- ndev));
- err = BCME_ERROR;
- goto scan_out;
- }
-#ifdef WL11U
- if (request && (interworking_ie = wl_cfg80211_find_interworking_ie(
- request->ie, request->ie_len)) != NULL) {
- if ((err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
- VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
- interworking_ie->data,
- interworking_ie->len)) != BCME_OK) {
- WL_ERR(("Failed to add interworking IE"));
- }
- } else if (cfg->wl11u) {
- /* we have to clear IW IE and disable gratuitous APR */
- wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx);
- err = wldev_iovar_setint_bsscfg(ndev, "grat_arp",
- 0, bssidx);
- /* we don't care about error here
- * because the only failure case is unsupported,
- * which is fine
- */
- if (unlikely(err)) {
- WL_ERR(("Set grat_arp failed:(%d) Ignore!\n", err));
- }
- cfg->wl11u = FALSE;
- }
-#endif /* WL11U */
- if (request) {
- err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
- ndev_to_cfgdev(ndev), bssidx, VNDR_IE_PRBREQ_FLAG,
- request->ie, request->ie_len);
- }
-
- if (unlikely(err)) {
- goto scan_out;
- }
-
- }
- }
- } else { /* scan in ibss */
- ssids = this_ssid;
- }
-
- if (request && cfg->p2p_supported) {
- WL_TRACE_HW4(("START SCAN\n"));
-#if defined(OEM_ANDROID)
- DHD_OS_SCAN_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub),
- SCAN_WAKE_LOCK_TIMEOUT);
- DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
-#endif // endif
- }
-
- if (cfg->p2p_supported) {
- if (request && p2p_on(cfg) && p2p_scan(cfg)) {
-
- /* find my listen channel */
- cfg->afx_hdl->my_listen_chan =
- wl_find_listen_channel(cfg, request->ie,
- request->ie_len);
- err = wl_cfgp2p_enable_discovery(cfg, ndev,
- request->ie, request->ie_len);
-
- if (unlikely(err)) {
- goto scan_out;
- }
- }
- }
- err = wl_do_escan(cfg, wiphy, ndev, request);
- if (likely(!err))
- goto scan_success;
- else
- goto scan_out;
-
-scan_success:
- busy_count = 0;
- cfg->scan_request = request;
- wl_set_drv_status(cfg, SCANNING, ndev);
-
- return 0;
-
-scan_out:
- if (err == BCME_BUSY || err == BCME_NOTREADY) {
- WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY));
- err = -EBUSY;
- } else if ((err == BCME_EPERM) && cfg->scan_suppressed) {
- WL_ERR(("Scan not permitted due to scan suppress\n"));
- err = -EPERM;
- } else {
- /* For all other fw errors, use a generic error code as return
- * value to cfg80211 stack
- */
- err = -EAGAIN;
- }
-
-#define SCAN_EBUSY_RETRY_LIMIT 20
- if (err == -EBUSY) {
- /* Flush FW preserve buffer logs for checking failure */
- if (busy_count++ > (SCAN_EBUSY_RETRY_LIMIT/5)) {
- wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
- }
- if (busy_count > SCAN_EBUSY_RETRY_LIMIT) {
- struct ether_addr bssid;
- s32 ret = 0;
-#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
- busy_count = 0;
- WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n",
- wl_get_drv_status(cfg, SCANNING, ndev),
- wl_get_drv_status(cfg, SCAN_ABORTING, ndev),
- wl_get_drv_status(cfg, CONNECTING, ndev),
- wl_get_drv_status(cfg, CONNECTED, ndev),
- wl_get_drv_status(cfg, DISCONNECTING, ndev),
- wl_get_drv_status(cfg, AP_CREATING, ndev),
- wl_get_drv_status(cfg, AP_CREATED, ndev),
- wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev),
- wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev)));
-
-#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
- if (dhdp->memdump_enabled) {
- dhdp->memdump_type = DUMP_TYPE_SCAN_BUSY;
- dhd_bus_mem_dump(dhdp);
- }
-#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
-
- bzero(&bssid, sizeof(bssid));
- if ((ret = wldev_ioctl_get(ndev, WLC_GET_BSSID,
- &bssid, ETHER_ADDR_LEN)) == 0) {
- WL_ERR(("FW is connected with " MACDBG "/n",
- MAC2STRDBG(bssid.octet)));
- } else {
- WL_ERR(("GET BSSID failed with %d\n", ret));
- }
-
- wl_cfg80211_scan_abort(cfg);
-
- } else {
- /* Hold the context for 400msec, so that 10 subsequent scans
- * can give a buffer of 4sec which is enough to
- * cover any on-going scan in the firmware
- */
- WL_DBG(("Enforcing delay for EBUSY case \n"));
- msleep(400);
- }
- } else {
- busy_count = 0;
- }
-
- wl_clr_drv_status(cfg, SCANNING, ndev);
-#if defined(OEM_ANDROID)
- DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
-#endif // endif
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
- cfg->scan_request = NULL;
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
-
- return err;
-}
-
-static s32
-wl_get_scan_timeout_val(struct bcm_cfg80211 *cfg)
-{
- u32 scan_timer_interval_ms = WL_SCAN_TIMER_INTERVAL_MS;
-
-#ifdef WES_SUPPORT
-#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
- if ((cfg->custom_scan_channel_time > DHD_SCAN_ASSOC_ACTIVE_TIME) |
- (cfg->custom_scan_unassoc_time > DHD_SCAN_UNASSOC_ACTIVE_TIME) |
- (cfg->custom_scan_passive_time > DHD_SCAN_PASSIVE_TIME) |
- (cfg->custom_scan_home_time > DHD_SCAN_HOME_TIME) |
- (cfg->custom_scan_home_away_time > DHD_SCAN_HOME_AWAY_TIME)) {
- scan_timer_interval_ms = CUSTOMER_WL_SCAN_TIMER_INTERVAL_MS;
- }
-#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
-#endif /* WES_SUPPORT */
-
- /* If NAN is enabled adding +10 sec to the existing timeout value */
-#ifdef WL_NAN
- if (cfg->nan_enable) {
- scan_timer_interval_ms += WL_SCAN_TIMER_INTERVAL_MS_NAN;
- }
-#endif /* WL_NAN */
- WL_INFORM(("scan_timer_interval_ms %d\n", scan_timer_interval_ms));
- return scan_timer_interval_ms;
-}
-
-#if defined(WL_CFG80211_P2P_DEV_IF)
-static s32
-wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
-#else
-static s32
-wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request)
-#endif /* WL_CFG80211_P2P_DEV_IF */
-{
- s32 err = 0;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
-#if defined(WL_CFG80211_P2P_DEV_IF)
- struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg);
-#endif /* WL_CFG80211_P2P_DEV_IF */
-
- WL_DBG(("Enter\n"));
- RETURN_EIO_IF_NOT_UP(cfg);
-
-#ifdef DHD_IFDEBUG
-#ifdef WL_CFG80211_P2P_DEV_IF
- PRINT_WDEV_INFO(request->wdev);
-#else
- PRINT_WDEV_INFO(ndev);
-#endif /* WL_CFG80211_P2P_DEV_IF */
-#endif /* DHD_IFDEBUG */
-
- if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
- if (wl_cfg_multip2p_operational(cfg)) {
- WL_ERR(("wlan0 scan failed, p2p devices are operational"));
- return -ENODEV;
- }
- }
-
- mutex_lock(&cfg->usr_sync);
- err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
- if (unlikely(err)) {
- WL_ERR(("scan error (%d)\n", err));
- } else {
- /* Arm the timer */
- mod_timer(&cfg->scan_timeout,
- jiffies + msecs_to_jiffies(wl_get_scan_timeout_val(cfg)));
- }
- mutex_unlock(&cfg->usr_sync);
-#ifdef WL_DRV_AVOID_SCANCACHE
- /* Reset roam cache after successful scan request */
-#ifdef ROAM_CHANNEL_CACHE
- if (!err) {
- reset_roam_cache(cfg);
- }
-#endif /* ROAM_CHANNEL_CACHE */
-#endif /* WL_DRV_AVOID_SCANCACHE */
- return err;
-}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
-static void
-wl_cfg80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev)
-{
- struct bcm_cfg80211 *cfg;
-
- WL_DBG(("Enter %s\n", __FUNCTION__));
- cfg = wiphy_priv(wdev->wiphy);
-
- /* Check if any scan in progress only then abort */
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- wl_cfg80211_scan_abort(cfg);
- /* Only scan abort is issued here. As per the expectation of abort_scan
- * the status of abort is needed to be communicated using cfg80211_scan_done call.
- * Here we just issue abort request and let the scan complete path to indicate
- * abort to cfg80211 layer.
- */
- WL_DBG(("%s: Scan abort issued to FW\n", __FUNCTION__));
- }
-}
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
-
static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
{
s32 err = 0;
@@ -3947,6 +3861,14 @@
s32 err = 0;
u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
+#ifdef CUSTOM_LONG_RETRY_LIMIT
+ if ((cmd == WLC_SET_LRL) &&
+ (retry != CUSTOM_LONG_RETRY_LIMIT)) {
+ WL_DBG(("CUSTOM_LONG_RETRY_LIMIT is used.Ignore configuration"));
+ return err;
+ }
+#endif /* CUSTOM_LONG_RETRY_LIMIT */
+
retry = htod32(retry);
err = wldev_ioctl_set(dev, cmd, &retry, sizeof(retry));
if (unlikely(err)) {
@@ -4076,7 +3998,6 @@
/* free & initiralize VSIE (Vendor Specific IE) */
if (cfg->ibss_vsie != NULL) {
MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
- cfg->ibss_vsie = NULL;
cfg->ibss_vsie_len = 0;
}
}
@@ -4100,8 +4021,7 @@
goto end;
}
/* change the command from "add" to "del" */
- strncpy(cfg->ibss_vsie->cmd, "del", VNDR_IE_CMD_LEN - 1);
- cfg->ibss_vsie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+ strlcpy(cfg->ibss_vsie->cmd, "del", sizeof(cfg->ibss_vsie->cmd));
ret = wldev_iovar_setbuf_bsscfg(dev, "vndr_ie",
cfg->ibss_vsie, cfg->ibss_vsie_len,
@@ -4109,9 +4029,8 @@
WL_ERR(("ret=%d\n", ret));
if (ret == BCME_OK) {
- /* free & initialize VSIE */
+ /* Free & initialize VSIE */
MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
- cfg->ibss_vsie = NULL;
cfg->ibss_vsie_len = 0;
}
end:
@@ -4132,7 +4051,7 @@
struct wireless_dev* wdev = NULL;
struct net_device *new_ndev = NULL;
struct net_device *primary_ndev = NULL;
- s32 timeout;
+ long timeout;
wl_aibss_if_t aibss_if;
wl_if_event_info *event = NULL;
@@ -4146,13 +4065,13 @@
/* generate a new MAC address for the IBSS interface */
get_primary_mac(cfg, &cfg->ibss_if_addr);
cfg->ibss_if_addr.octet[4] ^= 0x40;
- memset(&aibss_if, sizeof(aibss_if), 0);
+ bzero(&aibss_if, sizeof(aibss_if));
memcpy(&aibss_if.addr, &cfg->ibss_if_addr, sizeof(aibss_if.addr));
aibss_if.chspec = 0;
aibss_if.len = sizeof(aibss_if);
cfg->bss_pending_op = TRUE;
- memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
err = wldev_iovar_setbuf(primary_ndev, "aibss_ifadd", &aibss_if,
sizeof(aibss_if), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (err) {
@@ -4215,7 +4134,7 @@
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct net_device *ndev = NULL;
struct net_device *primary_ndev = NULL;
- s32 timeout;
+ long timeout;
if (!cfgdev || cfg->ibss_cfgdev != cfgdev || ETHER_ISNULLADDR(&cfg->ibss_if_addr.octet))
return -EINVAL;
@@ -4223,7 +4142,7 @@
primary_ndev = bcmcfg_to_prmry_ndev(cfg);
cfg->bss_pending_op = TRUE;
- memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
err = wldev_iovar_setbuf(primary_ndev, "aibss_ifdel", &cfg->ibss_if_addr,
sizeof(cfg->ibss_if_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (err) {
@@ -4376,6 +4295,74 @@
return ret;
}
+#if defined(IGUANA_LEGACY_CHIPS)
+#define BCM4355_REV_C1 0x0c
+#define BCM4355_REV_D0 0x0d
+bool
+wl_customer6_legacy_chip_check(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev)
+{
+ u32 chipnum;
+ wlc_rev_info_t revinfo;
+ int ret;
+
+ /* Get the device rev info */
+ bzero(&revinfo, sizeof(revinfo));
+ ret = wldev_ioctl_get(ndev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo));
+ if (ret < 0) {
+ WL_ERR(("wl_customer6_legacy_chip_check: GET revinfo FAILED. ret:%d\n", ret));
+ ASSERT(0);
+ return false;
+ }
+
+ WL_DBG(("wl_customer6_legacy_chip_check: GET_REVINFO device 0x%x, vendor 0x%x,"
+ " chipnum 0x%x\n",
+ dtoh32(revinfo.deviceid), dtoh32(revinfo.vendorid), dtoh32(revinfo.chipnum)));
+ chipnum = revinfo.chipnum;
+ if (
+#ifdef BCM4350_CHIP_ID
+ (chipnum == BCM4350_CHIP_ID) ||
+#endif /* BCM4350_CHIP_ID */
+#ifdef BCM4355_CHIP_ID
+ ((chipnum == BCM4355_CHIP_ID) && (revinfo.chiprev < BCM4355_REV_C1 ||
+ revinfo.chiprev == BCM4355_REV_D0)) ||
+#endif /* BCM4355_CHIP_ID */
+#ifdef BCM4345_CHIP_ID
+ (chipnum == BCM4345_CHIP_ID) ||
+#endif /* BCM4345_CHIP_ID */
+#ifdef BCM4373_CHIP_ID
+ (chipnum == BCM4373_CHIP_ID) ||
+#endif /* BCM4373_CHIP_ID */
+ false) {
+ /* WAR required */
+ WL_DBG(("%s: Customer6 legacy chip identified\n", __FUNCTION__));
+ return true;
+ }
+
+ return false;
+}
+
+void
+wl_bss_iovar_war(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 *val)
+{
+ if (wl_customer6_legacy_chip_check(cfg, ndev)) {
+ /* Few firmware branches have issues in bss iovar handling and
+ * that can't be changed since they are in production.
+ */
+ if (*val == WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE) {
+ *val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
+ } else if (*val == WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE) {
+ *val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
+ } else {
+ /* Ignore for other bss enums */
+ return;
+ }
+ WL_ERR(("wl bss %d\n", *val));
+ }
+}
+#endif // endif
+
s32
wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
struct net_device *ndev, s32 bsscfg_idx,
@@ -4508,7 +4495,7 @@
return mode;
}
-static s32
+s32
cfg80211_to_wl_iftype(uint16 type, uint16 *role, uint16 *mode)
{
switch (type) {
@@ -4597,7 +4584,6 @@
}
}
-#define MAX_ACTIVE_IF_LINKS 2
struct net_device *
wl_cfg80211_post_ifcreate(struct net_device *ndev,
wl_if_event_info *event, u8 *addr,
@@ -4607,14 +4593,14 @@
struct net_device *primary_ndev;
struct net_device *new_ndev = NULL;
struct wireless_dev *wdev = NULL;
+#ifdef WL_STATIC_IF
+ int iface_num = 0;
+#endif /* WL_STATIC_IF */
s32 iface_type;
s32 ret = BCME_OK;
u16 mode;
u8 mac_addr[ETH_ALEN];
u16 wl_iftype;
-#ifdef WL_STATIC_IF
- bool static_if = false;
-#endif /* WL_STATIC_IF */
if (!ndev || !event) {
WL_ERR(("Wrong arg\n"));
@@ -4636,11 +4622,6 @@
return NULL;
}
- if (wl_get_drv_status_all(cfg, CONNECTED) > MAX_ACTIVE_IF_LINKS) {
- WL_ERR(("Can't support more than %d active links\n", MAX_ACTIVE_IF_LINKS));
- return NULL;
- }
-
iface_type = wl_role_to_cfg80211_type(event->role, &wl_iftype, &mode);
if (iface_type < 0) {
/* Unknown iface type */
@@ -4667,9 +4648,12 @@
}
#ifdef WL_STATIC_IF
- static_if = IS_CFG80211_STATIC_IF_NAME(cfg, name);
- if (static_if) {
- new_ndev = wl_cfg80211_post_static_ifcreate(cfg, event, addr, iface_type);
+ if (is_static_iface_name(name, cfg)) {
+ new_ndev = wl_cfg80211_post_static_ifcreate(cfg, event, addr, iface_type, name);
+ if (!new_ndev) {
+ WL_ERR(("failed to get I/F pointer\n"));
+ return NULL;
+ }
wdev = new_ndev->ieee80211_ptr;
} else
#endif /* WL_STATIC_IF */
@@ -4741,9 +4725,9 @@
fail:
#ifdef WL_STATIC_IF
/* remove static if from iflist */
- if (static_if) {
- cfg->static_ndev_state = NDEV_STATE_FW_IF_FAILED;
- wl_cfg80211_update_iflist_info(cfg, new_ndev, WL_STATIC_IFIDX, addr,
+ if ((iface_num = get_iface_num(name, cfg)) >= 0) {
+ cfg->static_ndev_state[iface_num] = NDEV_STATE_FW_IF_FAILED;
+ wl_cfg80211_update_iflist_info(cfg, new_ndev, (DHD_MAX_IFS + iface_num), addr,
event->bssidx, event->name, NDEV_STATE_FW_IF_FAILED);
}
#endif /* WL_STATIC_IF */
@@ -4755,6 +4739,78 @@
return NULL;
}
+s32
+wl_cfg80211_delete_iface(struct bcm_cfg80211 *cfg,
+ wl_iftype_t sec_data_if_type)
+{
+ struct net_info *iter, *next;
+ struct net_device *primary_ndev;
+ s32 ret = BCME_OK;
+ uint8 i = 0;
+
+ BCM_REFERENCE(i);
+ BCM_REFERENCE(ret);
+
+ /* Note: This function will clean up only the network interface and host
+ * data structures. The firmware interface clean up will happen in the
+ * during chip reset (ifconfig wlan0 down for built-in drivers/rmmod
+ * context for the module case).
+ */
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ WL_DBG(("Enter, deleting iftype %s\n",
+ wl_iftype_to_str(sec_data_if_type)));
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev && (iter->ndev != primary_ndev)) {
+ if (iter->iftype != sec_data_if_type) {
+ continue;
+ }
+ switch (sec_data_if_type) {
+ case WL_IF_TYPE_P2P_GO:
+ case WL_IF_TYPE_P2P_GC: {
+ ret = _wl_cfg80211_del_if(cfg,
+ iter->ndev, NULL, iter->ndev->name);
+ break;
+ }
+#ifdef WL_NAN
+ case WL_IF_TYPE_NAN: {
+ if (cfg->nan_enable == false) {
+ WL_INFORM_MEM(("Nan is not active,"
+ " ignore NDI delete\n"));
+ } else {
+ ret = wl_cfgnan_delete_ndp(cfg, iter->ndev);
+ }
+ break;
+ }
+#endif /* WL_NAN */
+ case WL_IF_TYPE_AP: {
+ /* Cleanup AP */
+#ifdef WL_STATIC_IF
+ /* handle static ap */
+ if (is_static_iface(cfg, iter->ndev)) {
+ dev_close(iter->ndev);
+ } else
+#endif /* WL_STATIC_IF */
+ {
+ /* handle virtual created AP */
+ ret = _wl_cfg80211_del_if(cfg, iter->ndev,
+ NULL, iter->ndev->name);
+ }
+ break;
+ }
+ default: {
+ WL_ERR(("Unsupported interface type\n"));
+ ret = -ENOTSUPP;
+ goto fail;
+ }
+ }
+ }
+ }
+fail:
+ return ret;
+}
+
void
wl_cfg80211_cleanup_virtual_ifaces(struct bcm_cfg80211 *cfg, bool rtnl_lock_reqd)
{
@@ -4768,39 +4824,42 @@
*/
primary_ndev = bcmcfg_to_prmry_ndev(cfg);
WL_DBG(("Enter\n"));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev && (iter->ndev != primary_ndev)) {
- WL_DBG(("Cleaning up iface:%s \n", iter->ndev->name));
- wl_cfg80211_post_ifdel(iter->ndev, rtnl_lock_reqd, 0);
+ /* Ensure interfaces are down before deleting */
+#ifdef WL_STATIC_IF
+ /* Avoiding cleaning static ifaces */
+ if (!is_static_iface(cfg, iter->ndev))
+#endif /* WL_STATIC_IF */
+ {
+ dev_close(iter->ndev);
+ WL_DBG(("Cleaning up iface:%s \n", iter->ndev->name));
+ wl_cfg80211_post_ifdel(iter->ndev, rtnl_lock_reqd, 0);
+ }
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
}
s32
wl_cfg80211_post_ifdel(struct net_device *ndev, bool rtnl_lock_reqd, s32 ifidx)
{
- struct bcm_cfg80211 *cfg;
-#ifdef WL_STATIC_IF
- bool static_if = false;
-#endif /* WL_STATIC_IF */
s32 ret = BCME_OK;
+ struct bcm_cfg80211 *cfg;
+ struct net_info *netinfo = NULL;
if (!ndev || !ndev->ieee80211_ptr) {
/* No wireless dev done for this interface */
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit;
}
cfg = wl_get_cfg(ndev);
if (!cfg) {
WL_ERR(("cfg null\n"));
- return BCME_ERROR;
+ ret = BCME_ERROR;
+ goto exit;
}
if (ifidx <= 0) {
@@ -4809,28 +4868,36 @@
BCM_REFERENCE(ifidx);
if (ifidx <= 0) {
ASSERT(0);
- return BCME_ERROR;
+ ret = BCME_ERROR;
+ goto exit;
}
}
- WL_INFORM_MEM(("[%s] cfg80211_remove_if ifidx:%d\n", ndev->name, ifidx));
+ if ((netinfo = wl_get_netinfo_by_wdev(cfg, ndev_to_wdev(ndev))) == NULL) {
+ WL_ERR(("Find netinfo from wdev %p failed\n", ndev_to_wdev(ndev)));
+ ret = -ENODEV;
+ goto exit;
+ }
#ifdef WL_STATIC_IF
- static_if = IS_CFG80211_STATIC_IF(cfg, ndev);
- if (static_if) {
+ if (is_static_iface(cfg, ndev)) {
ret = wl_cfg80211_post_static_ifdel(cfg, ndev);
} else
#endif /* WL_STATIC_IF */
{
+ WL_INFORM_MEM(("[%s] cfg80211_remove_if ifidx:%d, vif_count:%d\n",
+ ndev->name, ifidx, cfg->vif_count));
wl_cfg80211_remove_if(cfg, ifidx, ndev, rtnl_lock_reqd);
cfg->bss_pending_op = FALSE;
}
+
#ifdef SUPPORT_SET_CAC
wl_cfg80211_set_cac(cfg, 1);
#endif /* SUPPORT_SET_CAC */
-
+exit:
return ret;
}
+
int
wl_cfg80211_deinit_p2p_discovery(struct bcm_cfg80211 *cfg)
{
@@ -4871,7 +4938,7 @@
struct net_device *primary_ndev = NULL;
s32 ret = BCME_OK;
s32 bsscfg_idx = 0;
- u32 timeout;
+ long timeout;
wl_if_event_info *event = NULL;
u8 addr[ETH_ALEN];
struct net_info *iter, *next;
@@ -4882,21 +4949,16 @@
return NULL;
}
else {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev) {
- if (strcmp(iter->ndev->name, name) == 0) {
- WL_ERR(("Interface name, %s exists !\n", iter->ndev->name));
+ if (strncmp(iter->ndev->name, name, strlen(name)) == 0) {
+ WL_ERR(("Interface name,%s exists!\n", iter->ndev->name));
return NULL;
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
}
primary_ndev = bcmcfg_to_prmry_ndev(cfg);
if (likely(!mac_addr)) {
@@ -4911,10 +4973,7 @@
}
cfg->bss_pending_op = TRUE;
- memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
-
- /* De-initialize the p2p discovery interface, if operational */
- wl_cfg80211_deinit_p2p_discovery(cfg);
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
/*
* Intialize the firmware I/F.
@@ -4946,7 +5005,11 @@
timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
if (timeout <= 0 || cfg->bss_pending_op) {
- WL_ERR(("ADD_IF event, didn't come. Return \n"));
+ WL_ERR(("ADD_IF event, didn't come. Return. timeout:%lu bss_pending_op:%d\n",
+ timeout, cfg->bss_pending_op));
+ if (timeout == -ERESTARTSYS) {
+ WL_ERR(("waitqueue was interrupted by a signal, returns -ERESTARTSYS\n"));
+ }
goto exit;
}
@@ -4975,7 +5038,7 @@
struct net_device *ndev = NULL;
s32 ret = BCME_OK;
s32 bsscfg_idx = 1;
- u32 timeout;
+ long timeout;
u16 wl_iftype;
u16 wl_mode;
@@ -4984,7 +5047,7 @@
/* If any scan is going on, abort it */
if (wl_get_drv_status_all(cfg, SCANNING)) {
WL_DBG(("Scan in progress. Aborting the scan!\n"));
- wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
}
bsscfg_idx = wl_get_bssidx_by_wdev(cfg, wdev);
@@ -5021,6 +5084,12 @@
/* Delete the firmware interface. "interface_remove" command
* should go on the interface to be deleted
*/
+ if (wl_cfg80211_get_bus_state(cfg)) {
+ WL_ERR(("Bus state is down: %d\n", __LINE__));
+ ret = BCME_DONGLE_DOWN;
+ goto exit;
+ }
+
cfg->bss_pending_op = true;
ret = wl_cfg80211_interface_ops(cfg, ndev, bsscfg_idx,
wl_iftype, 1, NULL);
@@ -5049,15 +5118,29 @@
WL_ERR(("timeout in waiting IF_DEL event\n"));
/* The interface unregister will happen from wifi reset context */
ret = -ETIMEDOUT;
- goto exit;
- }
-
- ret = wl_cfg80211_post_ifdel(ndev, false, cfg->if_event_info.ifidx);
- if (unlikely(ret)) {
- WL_ERR(("post_ifdel failed\n"));
+ /* fall through */
}
exit:
+ if (ret < 0) {
+ WL_ERR(("iface del failed:%d\n", ret));
+#ifdef WL_STATIC_IF
+ if (is_static_iface(cfg, ndev)) {
+ /*
+ * For static interface, clean up the host data,
+ * irrespective of fw status. For dynamic
+ * interfaces it gets cleaned from dhd_stop context
+ */
+ wl_cfg80211_post_static_ifdel(cfg, ndev);
+ }
+#endif /* WL_STATIC_IF */
+ } else {
+ ret = wl_cfg80211_post_ifdel(ndev, false, cfg->if_event_info.ifidx);
+ if (unlikely(ret)) {
+ WL_ERR(("post_ifdel failed\n"));
+ }
+ }
+
cfg->bss_pending_op = false;
return ret;
}
@@ -5168,7 +5251,7 @@
* Join with specific BSSID and cached SSID
* If SSID is zero join based on BSSID only
*/
- memset(&join_params, 0, sizeof(join_params));
+ bzero(&join_params, sizeof(join_params));
memcpy((void *)join_params.ssid.SSID, (const void *)params->ssid,
params->ssid_len);
join_params.ssid.SSID_len = htod32(params->ssid_len);
@@ -5181,7 +5264,7 @@
return err;
}
} else
- memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
+ bzero(&join_params.params.bssid, ETHER_ADDR_LEN);
if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
scan_suppress = TRUE;
@@ -5331,16 +5414,24 @@
if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
val = WPA_AUTH_PSK |
-#ifdef BCMCCX
- WPA_AUTH_CCKM |
-#endif // endif
WPA_AUTH_UNSPECIFIED;
else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
- val = WPA2_AUTH_PSK|
-#ifdef BCMCCX
- WPA2_AUTH_CCKM |
-#endif // endif
- WPA2_AUTH_UNSPECIFIED;
+#ifdef WL_SAE
+ if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_SAE)
+ val = WPA3_AUTH_SAE_PSK;
+ else
+#endif /* WL_SAE */
+#ifdef WL_OWE
+ if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_OWE)
+ val = WPA3_AUTH_OWE;
+ else
+#endif /* WL_OWE */
+ val = WPA2_AUTH_PSK |
+ WPA2_AUTH_UNSPECIFIED;
+#if defined(WL_SAE) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
+ else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_3)
+ val = WPA3_AUTH_SAE_PSK;
+#endif /* WL_SAE */
else
val = WPA_AUTH_DISABLED;
@@ -5373,7 +5464,7 @@
s32 err = 0;
s32 bssidx;
- WL_DBG((" %s \n", __FUNCTION__));
+ WL_DBG((" wl_set_set_wapi_ie\n"));
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
@@ -5417,12 +5508,26 @@
val = WL_AUTH_OPEN_SHARED;
WL_DBG(("automatic\n"));
break;
-#ifdef BCMCCX
- case NL80211_AUTHTYPE_NETWORK_EAP:
- WL_DBG(("network eap\n"));
- val = DOT11_LEAP_AUTH;
+#ifdef WL_FILS
+ case NL80211_AUTHTYPE_FILS_SK:
+ WL_DBG(("fils shared key\n"));
+ val = WL_AUTH_FILS_SHARED;
break;
-#endif // endif
+ case NL80211_AUTHTYPE_FILS_SK_PFS:
+ val = WL_AUTH_FILS_SHARED_PFS;
+ WL_DBG(("fils shared key with pfs\n"));
+ break;
+ case NL80211_AUTHTYPE_FILS_PK:
+ WL_DBG(("fils public key\n"));
+ val = WL_AUTH_FILS_PUBLIC;
+ break;
+#endif /* WL_FILS */
+#ifdef WL_SAE
+ case NL80211_AUTHTYPE_SAE:
+ WL_DBG(("SAE authentication\n"));
+ val = WL_AUTH_SAE;
+ break;
+#endif /* WL_SAE */
default:
val = 2;
WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
@@ -5440,6 +5545,45 @@
return err;
}
+static u32
+wl_rsn_cipher_wsec_algo_lookup(uint32 cipher)
+{
+ uint i;
+
+ for (i = 0; i < ARRAYSIZE(rsn_cipher_algo_lookup_tbl); i++) {
+ if (cipher == rsn_cipher_algo_lookup_tbl[i].cipher_suite) {
+ return rsn_cipher_algo_lookup_tbl[i].wsec_algo;
+ }
+ }
+ return WSEC_NONE;
+}
+
+static u32
+wl_rsn_cipher_wsec_key_algo_lookup(uint32 cipher)
+{
+ uint i;
+
+ for (i = 0; i < ARRAYSIZE(rsn_cipher_algo_lookup_tbl); i++) {
+ if (cipher == rsn_cipher_algo_lookup_tbl[i].cipher_suite) {
+ return rsn_cipher_algo_lookup_tbl[i].wsec_key_algo;
+ }
+ }
+ return CRYPTO_ALGO_OFF;
+}
+
+static u32
+wl_rsn_akm_wpa_auth_lookup(uint32 akm)
+{
+ uint i;
+
+ for (i = 0; i < ARRAYSIZE(rsn_akm_wpa_auth_lookup_tbl); i++) {
+ if (akm == rsn_akm_wpa_auth_lookup_tbl[i].akm_suite) {
+ return rsn_akm_wpa_auth_lookup_tbl[i].wpa_auth;
+ }
+ }
+ return WPA_AUTH_DISABLED;
+}
+
static s32
wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
{
@@ -5454,6 +5598,9 @@
s32 val = 0;
#endif // endif
s32 bssidx;
+#ifdef WL_GCMP
+ uint32 algos = 0, mask = 0;
+#endif /* WL_GCMP */
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
@@ -5461,22 +5608,15 @@
}
if (sme->crypto.n_ciphers_pairwise) {
+ pval = wl_rsn_cipher_wsec_algo_lookup(sme->crypto.ciphers_pairwise[0]);
+ if (pval == WSEC_NONE) {
+ WL_ERR(("invalid cipher pairwise (%d)\n", sme->crypto.ciphers_pairwise[0]));
+ return BCME_BADARG;
+ }
switch (sme->crypto.ciphers_pairwise[0]) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- pval = WEP_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- pval = TKIP_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_AES_CMAC:
- pval = AES_ENABLED;
- break;
#ifdef BCMWAPI_WPI
case WLAN_CIPHER_SUITE_SMS4:
- val = SMS4_ENABLED;
- pval = SMS4_ENABLED;
+ val = pval;
err = wl_set_set_wapi_ie(dev, sme);
if (unlikely(err)) {
WL_DBG(("Set wapi ie failed \n"));
@@ -5493,10 +5633,16 @@
}
break;
#endif /* BCMWAPI_WPI */
- default:
- WL_ERR(("invalid cipher pairwise (%d)\n",
- sme->crypto.ciphers_pairwise[0]));
- return -EINVAL;
+#ifdef WL_GCMP
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ algos = KEY_ALGO_MASK(wl_rsn_cipher_wsec_key_algo_lookup(
+ sme->crypto.ciphers_pairwise[0]));
+ mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+ break;
+#endif /* WL_GCMP */
+ default: /* No post processing required */
+ break;
}
}
#if defined(BCMSUP_4WAY_HANDSHAKE)
@@ -5505,7 +5651,9 @@
* Note that the FW feature flag only exists on kernels that support the
* FT-EAP AKM suite.
*/
- if (cfg->wdev->wiphy->features & NL80211_FEATURE_FW_4WAY_HANDSHAKE) {
+ if ((cfg->wdev->wiphy->features & NL80211_FEATURE_FW_4WAY_HANDSHAKE) &&
+ (FW_SUPPORTED(dhdp, idsup)))
+ {
err = wldev_iovar_setint_bsscfg(dev, "sup_wpa", 1, bssidx);
if (err) {
WL_ERR(("FBT: Error setting sup_wpa (%d)\n", err));
@@ -5516,34 +5664,34 @@
}
#endif /* BCMSUP_4WAY_HANDSHAKE */
if (sme->crypto.cipher_group) {
+ gval = wl_rsn_cipher_wsec_algo_lookup(sme->crypto.cipher_group);
+ if (gval == WSEC_NONE) {
+ WL_ERR(("invalid cipher group (%d)\n", sme->crypto.cipher_group));
+ return BCME_BADARG;
+ }
switch (sme->crypto.cipher_group) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- gval = WEP_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- gval = TKIP_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- gval = AES_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- gval = AES_ENABLED;
- break;
#ifdef BCMWAPI_WPI
case WLAN_CIPHER_SUITE_SMS4:
- val = SMS4_ENABLED;
- gval = SMS4_ENABLED;
+ val = gval;
break;
#endif // endif
- default:
- WL_ERR(("invalid cipher group (%d)\n",
- sme->crypto.cipher_group));
- return -EINVAL;
+#ifdef WL_GCMP
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ algos = KEY_ALGO_MASK(
+ wl_rsn_cipher_wsec_key_algo_lookup(sme->crypto.cipher_group));
+ mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+ break;
+#endif /* WL_GCMP */
+ default: /* No post processing required */
+ break;
}
}
WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
+#ifdef WL_GCMP
+ WL_DBG(("algos:%x, mask:%x", algos, mask));
+#endif /* WL_GCMP */
if (is_wps_conn(sme)) {
if (sme->privacy) {
@@ -5571,13 +5719,68 @@
WL_ERR(("error (%d)\n", err));
return err;
}
-
+#ifdef WL_GCMP
+ if (wl_set_wsec_info_algos(dev, algos, mask)) {
+ WL_ERR(("set wsec_info error (%d)\n", err));
+ }
+#endif /* WL_GCMP */
sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
sec->cipher_group = sme->crypto.cipher_group;
return err;
}
+#ifdef WL_GCMP
+static s32
+wl_set_wsec_info_algos(struct net_device *dev, uint32 algos, uint32 mask)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx;
+ s32 err = 0;
+ wl_wsec_info_t *wsec_info;
+ bcm_xtlv_t *wsec_info_tlv;
+ uint16 tlv_data_len;
+ uint8 tlv_data[8];
+ uint32 param_len;
+ uint8 * buf;
+ WL_DBG(("enter.\n"));
+ if (!cfg) {
+ return BCME_ERROR;
+ }
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ buf = MALLOCZ(cfg->osh, sizeof(wl_wsec_info_t) + sizeof(tlv_data));
+ if (!buf) {
+ WL_ERR(("No memory"));
+ return BCME_NOMEM;
+ }
+ wsec_info = (wl_wsec_info_t *)buf;
+ wsec_info->version = WL_WSEC_INFO_VERSION;
+ wsec_info_tlv = (bcm_xtlv_t *)(buf + OFFSETOF(wl_wsec_info_t, tlvs));
+
+ wsec_info->num_tlvs++;
+ tlv_data_len = sizeof(tlv_data);
+ err = memcpy_s(tlv_data, sizeof(tlv_data), &algos, sizeof(algos));
+ if (err) {
+ goto exit;
+ }
+ err = memcpy_s(tlv_data + sizeof(algos), sizeof(mask), &mask, sizeof(mask));
+ if (err) {
+ goto exit;
+ }
+ bcm_xtlv_pack_xtlv(wsec_info_tlv, WL_WSEC_INFO_BSS_ALGOS, tlv_data_len, tlv_data, 0);
+ param_len = OFFSETOF(wl_wsec_info_t, tlvs) + WL_WSEC_INFO_TLV_HDR_LEN + tlv_data_len;
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_info", wsec_info, param_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+exit:
+ MFREE(cfg->osh, buf, sizeof(wl_wsec_info_t) + sizeof(tlv_data));
+ return err;
+}
+#endif /* WL_GCMP */
#ifdef MFP
static s32
wl_cfg80211_set_mfp(struct bcm_cfg80211 *cfg,
@@ -5611,10 +5814,15 @@
(wl_cfg80211_get_rsn_capa(wpa2_ie, &rsn_cap) == 0) && rsn_cap) {
WL_DBG(("rsn_cap 0x%x%x\n", rsn_cap[0], rsn_cap[1]));
/* Check for MFP cap in the RSN capability field */
- if (rsn_cap[0] & RSN_CAP_MFPR) {
- mfp = WL_MFP_REQUIRED;
- } else if (rsn_cap[0] & RSN_CAP_MFPC) {
- mfp = WL_MFP_CAPABLE;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+ if (sme->mfp)
+#endif // endif
+ {
+ if (rsn_cap[0] & RSN_CAP_MFPR) {
+ mfp = WL_MFP_REQUIRED;
+ } else if (rsn_cap[0] & RSN_CAP_MFPC) {
+ mfp = WL_MFP_CAPABLE;
+ }
}
/*
* eptr --> end/last byte addr of wpa2_ie
@@ -5669,14 +5877,27 @@
group_mgmt_cs, (WPA_SUITE_LEN - 1)) == 0) {
WL_DBG(("BIP is found\n"));
err = wldev_iovar_setbuf(dev, "bip",
- group_mgmt_cs, WPA_SUITE_LEN, cfg->ioctl_buf,
- WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ group_mgmt_cs, WPA_SUITE_LEN, cfg->ioctl_buf,
+ WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
/*
* Dont return failure for unsupported cases
* of bip iovar for backward compatibility
*/
if (err != BCME_UNSUPPORTED && err < 0) {
WL_ERR(("bip set error (%d)\n", err));
+#if defined(IGUANA_LEGACY_CHIPS)
+ if (wl_customer6_legacy_chip_check(cfg,
+ bcmcfg_to_prmry_ndev(cfg))) {
+ /* Ignore bip error: Some older firmwares doesn't
+ * support bip iovar/ return BCME_NOTUP while trying
+ * to set bip from connect context. These firmares
+ * include bip in RSNIE by default. So its okay to
+ * ignore the error.
+ */
+ err = BCME_OK;
+ goto exit;
+ } else
+#endif // endif
{
goto exit;
}
@@ -5686,20 +5907,220 @@
group_mgmt_cs[2]));
}
}
-
exit:
if (err) {
wl_flush_fw_log_buffer(bcmcfg_to_prmry_ndev(cfg),
FW_LOGSET_MASK_ALL);
}
+
return 0;
}
#endif /* MFP */
+#ifdef WL_FILS
+bool
+wl_is_fils_supported(struct net_device *ndev)
+{
+ s32 err;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN] = {0};
+ bcm_iov_buf_t *iov_buf = (bcm_iov_buf_t *)ioctl_buf;
+
+ iov_buf->version = WL_FILS_IOV_VERSION;
+ err = wldev_iovar_getbuf(ndev, "fils", (uint8*)iov_buf, sizeof(bcm_iov_buf_t),
+ iov_buf, WLC_IOCTL_SMLEN, NULL);
+ if (err == BCME_UNSUPPORTED) {
+ WL_DBG(("FILS NOT supported\n"));
+ return false;
+ }
+
+ WL_INFORM(("FILS supported\n"));
+ return true;
+}
+
+#define WL_NUM_OF_TLV_IN_SET_FILS_PARAMS 4u
+static s32
+wl_set_fils_params(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ bcm_iov_buf_t *iov_buf = NULL;
+ bcm_xtlvbuf_t tbuf;
+ s32 err = BCME_OK;
+ uint32 buf_size;
+
+ if ((sme->auth_type != NL80211_AUTHTYPE_FILS_SK) &&
+ (sme->auth_type != NL80211_AUTHTYPE_FILS_SK_PFS) &&
+ (sme->auth_type != NL80211_AUTHTYPE_FILS_PK)) {
+ return BCME_OK;
+ }
+ if (sme->fils_erp_rrk_len > WL_MAX_FILS_KEY_LEN) {
+ WL_ERR(("%s: FILS rRK exceed allowed size\n", __FUNCTION__));
+ err = BCME_BADARG;
+ goto exit;
+ }
+ /* Check incoming buffer length */
+ buf_size = sme->fils_erp_username_len + sme->fils_erp_realm_len + sme->fils_erp_rrk_len +
+ sizeof(sme->fils_erp_next_seq_num) +
+ WL_NUM_OF_TLV_IN_SET_FILS_PARAMS * BCM_XTLV_HDR_SIZE_EX(BCM_XTLV_OPTION_ALIGN32) +
+ sizeof(bcm_iov_buf_t) - 1u;
+
+ if (buf_size > WLC_IOCTL_SMLEN) {
+ WL_ERR(("%s: FILS connect params arguments exceed allowed size\n", __FUNCTION__));
+ err = BCME_BADARG;
+ goto exit;
+ }
+ iov_buf = MALLOCZ(cfg->osh, WLC_IOCTL_SMLEN);
+ if (!iov_buf) {
+ WL_ERR(("%s: iov_buf alloc failed! %d bytes\n", __FUNCTION__, WLC_IOCTL_SMLEN));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ iov_buf->version = WL_FILS_IOV_VERSION;
+ iov_buf->id = WL_FILS_CMD_ADD_CONNECT_PARAMS;
+ /* check if this should be len w/o headers */
+ err = bcm_xtlv_buf_init(&tbuf, (uint8*)&iov_buf->data[0],
+ WLC_IOCTL_SMLEN - sizeof(bcm_iov_buf_t) + sizeof(uint16),
+ BCM_XTLV_OPTION_ALIGN32);
+ if (err != BCME_OK) {
+ WL_ERR(("%s: xtlv_context initialization failed\n", __FUNCTION__));
+ goto exit;
+ }
+ if (sme->fils_erp_username_len && sme->fils_erp_username != NULL) {
+ err = bcm_xtlv_put_data(&tbuf, WL_FILS_XTLV_ERP_USERNAME,
+ sme->fils_erp_username, sme->fils_erp_username_len);
+ if (err != BCME_OK) {
+ WL_ERR(("%s: write xtlv failed\n", __FUNCTION__));
+ goto exit;
+ }
+ }
+ if (sme->fils_erp_realm_len && sme->fils_erp_realm != NULL) {
+ err = bcm_xtlv_put_data(&tbuf, WL_FILS_XTLV_ERP_REALM,
+ sme->fils_erp_realm, sme->fils_erp_realm_len);
+ if (err != BCME_OK) {
+ WL_ERR(("%s: write xtlv failed\n", __FUNCTION__));
+ goto exit;
+ }
+ }
+ if (sme->fils_erp_rrk_len && sme->fils_erp_rrk != NULL) {
+ err = bcm_xtlv_put_data(&tbuf, WL_FILS_XTLV_ERP_RRK,
+ sme->fils_erp_rrk, sme->fils_erp_rrk_len);
+ if (err != BCME_OK) {
+ WL_ERR(("%s: write xtlv failed\n", __FUNCTION__));
+ goto exit;
+ }
+ }
+ err = bcm_xtlv_put_data(&tbuf, WL_FILS_XTLV_ERP_NEXT_SEQ_NUM,
+ (u8 *)&sme->fils_erp_next_seq_num, sizeof(sme->fils_erp_next_seq_num));
+ if (err != BCME_OK) {
+ WL_ERR(("%s: write xtlv failed\n", __FUNCTION__));
+ goto exit;
+ }
+ iov_buf->len = bcm_xtlv_buf_len(&tbuf);
+ err = wldev_iovar_setbuf(dev, "fils", iov_buf, iov_buf->len + sizeof(bcm_iov_buf_t) -
+ sizeof(uint16), cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("set fils params ioctl error (%d)\n", err));
+ goto exit;
+ }
+
+exit:
+ if (err != BCME_OK) {
+ WL_ERR(("set FILS params error %d\n", err));
+ }
+ else {
+ WL_INFORM_MEM(("FILS parameters succesfully applied\n"));
+ }
+ if (iov_buf) {
+ MFREE(cfg->osh, iov_buf, WLC_IOCTL_SMLEN);
+ }
+ return err;
+}
+
+#if !defined(WL_FILS_ROAM_OFFLD) && defined(WL_FILS)
+static s32
+wl_get_bcn_timeout(struct net_device *dev, u32 *bcn_timeout)
+{
+ s32 err = 0;
+
+ err = wldev_iovar_getint(dev, "bcn_timeout", bcn_timeout);
+ if (unlikely(err)) {
+ WL_ERR(("could not get bcn_timeout (%d)\n", err));
+ }
+ return err;
+}
+
+#define WL_ROAM_ENABLE 0
+#define WL_ROAM_DISABLE 1
+/* Beacon Timeout beacon loss in case FILS roaming offload is not supported by fw */
+#define WL_BCN_TIMEOUT 3
+
+static s32
+wl_fils_toggle_roaming(struct net_device *dev, u32 auth_type)
+{
+ s32 err = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (WPA2_AUTH_IS_FILS(auth_type) && !cfg->fils_info.fils_roam_disabled) {
+ err = wl_get_bcn_timeout(dev, &cfg->fils_info.fils_bcn_timeout_cache);
+ if (unlikely(err)) {
+ return err;
+ }
+ wl_dongle_roam(dev, WL_ROAM_DISABLE, WL_BCN_TIMEOUT);
+ cfg->fils_info.fils_roam_disabled = true;
+ WL_INFORM_MEM(("fw roam disabled for FILS akm\n"));
+ } else if (cfg->fils_info.fils_roam_disabled) {
+ /* Enable roaming back for other auth types */
+ wl_dongle_roam(dev, WL_ROAM_ENABLE, cfg->fils_info.fils_bcn_timeout_cache);
+ cfg->fils_info.fils_roam_disabled = false;
+ WL_INFORM_MEM(("fw roam enabled\n"));
+ }
+ return err;
+}
+#endif /* !WL_FILS_ROAM_OFFLD && WL_FILS */
+#endif /* WL_FILS */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
+#ifdef WL_SAE
+static int
+wl_set_sae_password(struct net_device *net, const u8 *pwd_data, u16 pwd_len)
+{
+ struct wl_wsec_sae_pwd_le sae_pwd;
+ int err = 0;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+
+ if (pwd_len < WL_WSEC_MIN_SAE_PASSWORD_LEN || pwd_len > WL_WSEC_MAX_SAE_PASSWORD_LEN) {
+ WL_ERR(("len b/n >%d & <%d\n", WL_WSEC_MIN_SAE_PASSWORD_LEN,
+ WL_WSEC_MAX_SAE_PASSWORD_LEN));
+ return -EINVAL;
+ }
+ if (!pwd_data) {
+ WL_ERR(("pswd cannot be null\n"));
+ return -EINVAL;
+ }
+
+ sae_pwd.key_len = htod16(pwd_len);
+ memcpy(sae_pwd.key, pwd_data, pwd_len);
+
+ err = wldev_iovar_setbuf(net, "sae_password", &sae_pwd,
+ sizeof(sae_pwd), ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+ if (err < 0)
+ WL_ERR(("failed to set SAE password in firmware (len=%u)\n",
+ pwd_len));
+
+ return err;
+}
+#endif /* WL_SAE */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)) */
+
static s32
wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
{
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
+#ifdef WL_SAE
+
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* WL_SAE */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)) */
struct wl_security *sec;
s32 val = 0;
s32 err = 0;
@@ -5717,9 +6138,6 @@
return err;
}
if (val & (WPA_AUTH_PSK |
-#ifdef BCMCCX
- WPA_AUTH_CCKM |
-#endif // endif
WPA_AUTH_UNSPECIFIED)) {
switch (sme->crypto.akm_suites[0]) {
case WLAN_AKM_SUITE_8021X:
@@ -5728,56 +6146,66 @@
case WLAN_AKM_SUITE_PSK:
val = WPA_AUTH_PSK;
break;
-#ifdef BCMCCX
- case WLAN_AKM_SUITE_CCKM:
- val = WPA_AUTH_CCKM;
- break;
-#endif // endif
default:
WL_ERR(("invalid akm suite (0x%x)\n",
sme->crypto.akm_suites[0]));
return -EINVAL;
}
} else if (val & (WPA2_AUTH_PSK |
-#ifdef BCMCCX
- WPA2_AUTH_CCKM |
-#endif // endif
WPA2_AUTH_UNSPECIFIED)) {
switch (sme->crypto.akm_suites[0]) {
- case WLAN_AKM_SUITE_8021X:
- val = WPA2_AUTH_UNSPECIFIED;
- break;
#ifdef MFP
+#if defined(IGUANA_LEGACY_CHIPS)
+ case WL_AKM_SUITE_SHA256_1X:
+ if (wl_customer6_legacy_chip_check(cfg, dev)) {
+ val = WPA2_AUTH_UNSPECIFIED;
+ } else {
+ val = WPA2_AUTH_1X_SHA256;
+ }
+ break;
+ case WL_AKM_SUITE_SHA256_PSK:
+ if (wl_customer6_legacy_chip_check(cfg, dev)) {
+ val = WPA2_AUTH_PSK;
+ } else {
+ val = WPA2_AUTH_PSK_SHA256;
+ }
+ break;
+#else
case WL_AKM_SUITE_SHA256_1X:
val = WPA2_AUTH_1X_SHA256;
break;
case WL_AKM_SUITE_SHA256_PSK:
val = WPA2_AUTH_PSK_SHA256;
break;
+#endif // endif
#endif /* MFP */
+ case WLAN_AKM_SUITE_8021X:
case WLAN_AKM_SUITE_PSK:
- val = WPA2_AUTH_PSK;
- break;
#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_8021X)
case WLAN_AKM_SUITE_FT_8021X:
- val = WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT;
- break;
#endif // endif
#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_PSK)
case WLAN_AKM_SUITE_FT_PSK:
- val = WPA2_AUTH_PSK | WPA2_AUTH_FT;
- break;
-#endif // endif
-#ifdef BCMCCX
- case WLAN_AKM_SUITE_CCKM:
- val = WPA2_AUTH_CCKM;
- break;
#endif // endif
case WLAN_AKM_SUITE_FILS_SHA256:
- val = WPA2_AUTH_FILS_SHA256;
- break;
case WLAN_AKM_SUITE_FILS_SHA384:
- val = WPA2_AUTH_FILS_SHA384;
+ case WLAN_AKM_SUITE_8021X_SUITE_B:
+ case WLAN_AKM_SUITE_8021X_SUITE_B_192:
+#ifdef WL_OWE
+ case WLAN_AKM_SUITE_OWE:
+#endif /* WL_OWE */
+#ifdef WL_SAE
+ case WLAN_AKM_SUITE_SAE:
+#endif /* WL_SAE */
+ case WLAN_AKM_SUITE_DPP:
+ case WLAN_AKM_SUITE_FT_8021X_SHA384:
+ val = wl_rsn_akm_wpa_auth_lookup(sme->crypto.akm_suites[0]);
+ break;
+ case WLAN_AKM_SUITE_FT_FILS_SHA256:
+ val = WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FT;
+ break;
+ case WLAN_AKM_SUITE_FT_FILS_SHA384:
+ val = WPA2_AUTH_FILS_SHA384 | WPA2_AUTH_FT;
break;
default:
WL_ERR(("invalid akm suite (0x%x)\n",
@@ -5802,6 +6230,31 @@
}
#endif // endif
+#ifdef WL_FILS
+#if !defined(WL_FILS_ROAM_OFFLD)
+ err = wl_fils_toggle_roaming(dev, val);
+ if (unlikely(err)) {
+ return err;
+ }
+#endif /* !WL_FILS_ROAM_OFFLD */
+#endif /* !WL_FILS */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
+#ifdef WL_SAE
+ if ((val & (WPA3_AUTH_SAE_PSK)) &&
+ FW_SUPPORTED(dhd, sae)) {
+ err = wl_set_sae_password(dev, sme->crypto.sae_pwd, sme->crypto.sae_pwd_len);
+ if (!err && (FW_SUPPORTED(dhd, idsup))) {
+ err = wldev_iovar_setint_bsscfg(dev, "sup_wpa", 1, bssidx);
+ if (err) {
+ WL_ERR(("Error setting sup_wpa (%d)\n", err));
+ return err;
+ }
+ }
+ }
+#endif /* WL_SAE */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)) */
+
#ifdef MFP
if ((err = wl_cfg80211_set_mfp(cfg, dev, sme)) < 0) {
WL_ERR(("MFP set failed err:%d\n", err));
@@ -5851,7 +6304,7 @@
(sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
WLAN_CIPHER_SUITE_WEP104)))
{
- memset(&key, 0, sizeof(key));
+ bzero(&key, sizeof(key));
key.len = (u32) sme->key_len;
key.index = (u32) sme->key_idx;
if (unlikely(key.len > sizeof(key.data))) {
@@ -5860,14 +6313,10 @@
}
memcpy(key.data, sme->key, key.len);
key.flags = WL_PRIMARY_KEY;
- switch (sec->cipher_pairwise) {
- case WLAN_CIPHER_SUITE_WEP40:
- key.algo = CRYPTO_ALGO_WEP1;
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- key.algo = CRYPTO_ALGO_WEP128;
- break;
- default:
+ if ((sec->cipher_pairwise == WLAN_CIPHER_SUITE_WEP40) ||
+ (sec->cipher_pairwise == WLAN_CIPHER_SUITE_WEP104)) {
+ key.algo = wl_rsn_cipher_wsec_key_algo_lookup(sec->cipher_pairwise);
+ } else {
WL_ERR(("Invalid algorithm (%d)\n",
sme->crypto.ciphers_pairwise[0]));
return -EINVAL;
@@ -5913,7 +6362,7 @@
chanspec = wl_chspec_driver_to_host(chanspec);
isvht80 = chanspec & WL_CHANSPEC_BW_80;
- WL_DBG(("%s: chanspec(%x:%d)\n", __FUNCTION__, chanspec, isvht80));
+ WL_DBG(("wl_get_chan_isvht80: chanspec(%x:%d)\n", chanspec, isvht80));
return isvht80;
}
@@ -5927,6 +6376,10 @@
int wait_cnt;
if (disassociate) {
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, dev), DOT11_RC_DISASSOC_LEAVING);
WL_ERR(("Disassociate previous connection!\n"));
wl_set_drv_status(cfg, DISCONNECTING, dev);
scbval.val = DOT11_RC_DISASSOC_LEAVING;
@@ -5957,11 +6410,233 @@
if (wait_cnt == 0) {
WL_ERR(("DISCONNECING clean up failed!\n"));
+ /* Clear DISCONNECTING driver status as we have made sufficient attempts
+ * for driver clean up.
+ */
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
return BCME_NOTREADY;
}
return BCME_OK;
}
+#ifdef WL_FILS
+static int
+wl_fils_add_hlp_container(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ const uint8* ie_buf, uint16 ie_len)
+{
+ const bcm_tlv_ext_t *hlp_ie;
+
+ if ((hlp_ie = (const bcm_tlv_ext_t*)bcm_parse_tlvs_dot11((const uint8 *)ie_buf, ie_len,
+ FILS_HLP_CONTAINER_EXT_ID, TRUE))) {
+ u16 hlp_len = hlp_ie->len;
+ u16 left_len = (ie_len - ((const uint8*)hlp_ie - ie_buf));
+ bcm_iov_buf_t *iov_buf = 0;
+ uint8* pxtlv;
+ int err;
+ size_t iov_buf_len;
+ bcm_tlv_dot11_frag_tot_len(ie_buf, ie_len, FILS_HLP_CONTAINER_EXT_ID,
+ TRUE, (uint*)&hlp_len);
+
+ hlp_len += BCM_TLV_EXT_HDR_SIZE;
+
+ if ((hlp_len > DOT11_MAX_MPDU_BODY_LEN) || (hlp_len > left_len)) {
+ WL_ERR(("bad HLP length %d\n", hlp_len));
+ return EFAULT;
+ }
+ iov_buf_len = sizeof(bcm_iov_buf_t) + sizeof(bcm_xtlv_t) - 1 + hlp_len;
+ iov_buf = MALLOCZ(cfg->osh, iov_buf_len);
+ if (iov_buf == NULL) {
+ WL_ERR(("failed to allocated iov_buf\n"));
+ return ENOMEM;
+ }
+
+ prhex("HLP, HLP", (const uchar *)hlp_ie, hlp_len);
+
+ pxtlv = (uint8 *)&iov_buf->data[0];
+ ((bcm_xtlv_t*)pxtlv)->id = WL_FILS_XTLV_HLP_IE;
+ ((bcm_xtlv_t*)pxtlv)->len = hlp_len;
+
+ memcpy(((bcm_xtlv_t*)pxtlv)->data, hlp_ie, ((bcm_xtlv_t*)pxtlv)->len);
+
+ iov_buf->version = WL_FILS_IOV_VERSION;
+ iov_buf->id = WL_FILS_CMD_ADD_HLP_IE;
+ iov_buf->len = ((sizeof(bcm_xtlv_t)-1) + ((bcm_xtlv_t*)pxtlv)->len);
+
+ err = wldev_iovar_setbuf(dev, "fils", iov_buf,
+ sizeof(bcm_iov_buf_t) + iov_buf->len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("fils wldev_iovar_setbuf error (%d)\n", err));
+ }
+ else {
+ WL_INFORM_MEM(("FILS HLP Packet succesfully updated\n"));
+ }
+ MFREE(cfg->osh, iov_buf, iov_buf_len);
+ }
+ return BCME_OK;
+}
+#endif /* WL_FILS */
+
+#if defined(WL_FILS)
+#ifndef UPDATE_FILS_ERP_INFO
+#define UPDATE_FILS_ERP_INFO BIT(1)
+#define UPDATE_AUTH_TYPE BIT(2)
+#endif // endif
+
+static int
+wl_cfg80211_update_connect_params(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme, u32 changed)
+{
+ s32 err = BCME_OK;
+ if (changed & UPDATE_FILS_ERP_INFO) {
+ err = wl_set_fils_params(dev, sme);
+
+ if (unlikely(err)) {
+ WL_ERR(("Invalid FILS params\n"));
+ goto exit;
+ }
+ }
+ if (changed & UPDATE_AUTH_TYPE) {
+ err = wl_set_auth_type(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid auth type\n"));
+ goto exit;
+ }
+ }
+ if ((changed & UPDATE_FILS_ERP_INFO) && !(changed & UPDATE_AUTH_TYPE)) {
+ WL_DBG(("Warning: FILS ERP params are set, but authentication type - not\n"));
+ }
+exit:
+ return err;
+
+}
+#endif /* WL_FILS */
+
+#ifdef WL_SAE
+static int
+wl_cfg80211_external_auth(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_external_auth_params *params)
+{
+ int ret = 0;
+ struct wl_auth_req_status auth_status;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+
+ WL_DBG(("Enter\n"));
+
+ if (params->status == WLAN_STATUS_SUCCESS) {
+ auth_status.flags = WL_EXTAUTH_SUCCESS;
+ } else {
+ WL_ERR(("External authentication failed with %d\n",
+ params->status));
+ auth_status.flags = WL_EXTAUTH_FAIL;
+ }
+ memcpy(auth_status.peer_mac.octet, params->bssid, ETH_ALEN);
+ auth_status.ssid_len = min_t(u8, params->ssid.ssid_len,
+ IEEE80211_MAX_SSID_LEN);
+ memcpy(auth_status.ssid, params->ssid.ssid, auth_status.ssid_len);
+ memset(auth_status.pmkid, 0, WLAN_PMKID_LEN);
+ if (params->pmkid)
+ memcpy(auth_status.pmkid, params->pmkid, WLAN_PMKID_LEN);
+
+ ret = wldev_iovar_setbuf(dev, "auth_status", &auth_status,
+ sizeof(auth_status), ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+ if (ret < 0)
+ WL_ERR(("auth_status iovar failed: ret=%d\n", ret));
+
+ return ret;
+}
+
+/**
+ * wl_notify_extauth_req_event() - host authentication request
+ *
+ * @cfg: object to handle cfg80211 interface
+ * @cfgdev: represents ndev or wdev
+ * @e: event message. Not used, to make it usable for fweh event dispatcher
+ * @data: payload of message, containing auth frame data
+ *
+ */
+static s32
+wl_notify_extauth_req_event(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data)
+{
+ struct cfg80211_external_auth_params params;
+ struct wl_auth_req_status *auth_req = (struct wl_auth_req_status *)data;
+ struct net_device *ndev = cfgdev_to_ndev(cfgdev);
+ int err = 0;
+
+ WL_DBG(("EVENT: EXT_AUTH_REQ received\n"));
+
+ if (e->datalen < sizeof(*auth_req)) {
+ WL_ERR(("Ext auth req event data too small. Ignoring event\n"));
+ return -EINVAL;
+ }
+
+ memset(¶ms, 0, sizeof(params));
+ params.action = NL80211_EXTERNAL_AUTH_START;
+ params.key_mgmt_suite = ntohl(WLAN_AKM_SUITE_SAE);
+ params.status = WLAN_STATUS_SUCCESS;
+ params.ssid.ssid_len = min_t(u32, IEEE80211_MAX_SSID_LEN, auth_req->ssid_len);
+ memcpy(params.ssid.ssid, auth_req->ssid, params.ssid.ssid_len);
+ memcpy(params.bssid, auth_req->peer_mac.octet, ETH_ALEN);
+
+ err = cfg80211_external_auth_request(ndev, ¶ms, GFP_ATOMIC);
+ if (err) {
+ WL_ERR(("EXT_AUTH_REQ to supplicant failed\n"));
+ }
+ return err;
+}
+
+/**
+ * wl_notify_mgmt_frame_tx_complete() - transmit mgmt frame complete
+ *
+ * @cfg: object to handle cfg80211 interface
+ * @cfgdev: represents ndev or wdev
+ * @e: event message. Not used, to make it usable for fweh event dispatcher
+ * @data: payload of message, containing auth frame data
+ *
+ */
+static s32
+wl_notify_mgmt_frame_tx_complete(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data)
+{
+ u32 event_type = ntoh32(e->event_type);
+ u32 status = ntoh32(e->status);
+ struct net_device *ndev = NULL;
+ u32 *packetid = (u32 *)data;
+ struct net_info *_net_info;
+
+ WL_DBG((" Enter\n"));
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ _net_info = wl_get_netinfo_by_netdev(cfg, ndev);
+
+ WL_DBG(("EVENT: mgmt tx status: event %s, status=%d\n",
+ event_type == WLC_E_MGMT_FRAME_TXSTATUS ?
+ "MGMT_FRAME_TXSTATUS" : "MGMT_FRAME_OFF_CHAN_COMPLETE", e->status));
+
+ if (!test_bit(MGMT_TX_SEND_FRAME, &_net_info->mgmt_txstatus) ||
+ (*packetid != _net_info->mgmt_txid)) {
+ return 0;
+ }
+
+ if (event_type == WLC_E_MGMT_FRAME_TXSTATUS) {
+ if (status == WLC_E_STATUS_SUCCESS) {
+ set_bit(MGMT_TX_ACK,
+ &_net_info->mgmt_txstatus);
+ } else {
+ set_bit(MGMT_TX_NOACK,
+ &_net_info->mgmt_txstatus);
+ }
+ } else {
+ set_bit(MGMT_TX_OFF_CHAN_COMPLETED,
+ &_net_info->mgmt_txstatus);
+ }
+
+ complete(&_net_info->mgmt_tx_cpl);
+ return BCME_OK;
+}
+#endif /* WL_SAE */
+
#define MAX_SCAN_ABORT_WAIT_CNT 20
#define WAIT_SCAN_ABORT_OSL_SLEEP_TIME 10
@@ -5986,15 +6661,21 @@
u32 chan_cnt = 0;
struct ether_addr bssid;
s32 bssidx = -1;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
+ bool skip_hints = fw_ap_select;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
#ifdef ESCAN_CHANNEL_CACHE
chanspec_t chanspec_list[MAX_ROAM_CHANNEL];
#endif /* ESCAN_CHANNEL_CACHE */
-#if (defined(BCM4334_CHIP) || defined(BCM4359_CHIP) || !defined(ESCAN_RESULT_PATCH))
int wait_cnt;
-#endif // endif
WL_DBG(("In\n"));
+ if (!dev) {
+ WL_ERR(("dev is null\n"));
+ return -EINVAL;
+ }
BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(ASSOC_START), dhd_net2idx(dhdp->info, dev), 0);
#ifdef ESCAN_CHANNEL_CACHE
memset(chanspec_list, 0, (sizeof(chanspec_t) * MAX_ROAM_CHANNEL));
@@ -6002,21 +6683,36 @@
/* Connection attempted via linux-wireless */
wl_set_drv_status(cfg, CFG80211_CONNECT, dev);
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ dhd_reset_tcpsync_info_by_dev(dev);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
#if defined(SUPPORT_RANDOM_MAC_SCAN)
- wl_cfg80211_scan_mac_disable(dev);
+ /* Disable scanmac if enabled */
+ if (cfg->scanmac_enabled) {
+ wl_cfg80211_scan_mac_disable(dev);
+ }
#endif /* SUPPORT_RANDOM_MAC_SCAN */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
- if (sme->channel_hint) {
- chan = sme->channel_hint;
- WL_INFORM_MEM(("channel_hint (%d), channel_hint center_freq (%d)\n",
- ieee80211_frequency_to_channel(sme->channel_hint->center_freq),
- sme->channel_hint->center_freq));
- }
- if (sme->bssid_hint) {
- sme->bssid = sme->bssid_hint;
- WL_INFORM_MEM(("bssid_hint "MACDBG" \n", MAC2STRDBG(sme->bssid_hint)));
+#if defined(WL_FW_OCE_AP_SELECT)
+ /* override bssid_hint for oce networks */
+ skip_hints = (fw_ap_select && wl_cfg80211_is_oce_ap(wiphy, sme->bssid_hint));
+#endif // endif
+ if (skip_hints) {
+ /* Let fw choose the best AP */
+ WL_INFORM(("skipping bssid & channel hint\n"));
+ } else {
+ if (sme->channel_hint) {
+ chan = sme->channel_hint;
+ WL_INFORM_MEM(("channel_hint (%d), channel_hint center_freq (%d)\n",
+ ieee80211_frequency_to_channel(sme->channel_hint->center_freq),
+ sme->channel_hint->center_freq));
+ }
+ if (sme->bssid_hint) {
+ sme->bssid = sme->bssid_hint;
+ WL_INFORM_MEM(("bssid_hint "MACDBG" \n", MAC2STRDBG(sme->bssid_hint)));
+ }
}
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
@@ -6040,7 +6736,6 @@
/*
* Cancel ongoing scan to sync up with sme state machine of cfg80211.
*/
-#if (defined(BCM4359_CHIP) || !defined(ESCAN_RESULT_PATCH))
if (cfg->scan_request) {
WL_TRACE_HW4(("Aborting the scan! \n"));
wl_cfg80211_scan_abort(cfg);
@@ -6051,10 +6746,9 @@
OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME);
}
if (wl_get_drv_status(cfg, SCANNING, dev)) {
- wl_notify_escan_complete(cfg, dev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
}
}
-#endif // endif
#ifdef WL_SCHED_SCAN
/* Locks are taken in wl_cfg80211_sched_scan_stop()
* A start scan occuring during connect is unlikely
@@ -6065,7 +6759,7 @@
cfg->sched_scan_req->reqid);
#else
wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg));
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(4, 11, 0) */
}
#endif /* WL_SCHED_SCAN */
#ifdef WL_CFG80211_GON_COLLISION
@@ -6100,6 +6794,12 @@
}
}
+ if (sme->bssid) {
+ wl_update_prof(cfg, dev, NULL, sme->bssid, WL_PROF_LATEST_BSSID);
+ } else {
+ wl_update_prof(cfg, dev, NULL, ðer_bcast, WL_PROF_LATEST_BSSID);
+ }
+
/* 'connect' request received */
wl_set_drv_status(cfg, CONNECTING, dev);
/* clear nested connect bit on proceeding for connection */
@@ -6137,26 +6837,33 @@
sme->ie_len)) != NULL) {
WL_DBG((" WPA IE is found\n"));
}
- if (wpa_ie != NULL || wpa2_ie != NULL) {
+#ifdef WL_SAE
+ if (((wpa_ie != NULL) || (wpa2_ie != NULL))&&
+ (!((FW_SUPPORTED(dhdp, sae)) && (FW_SUPPORTED(dhdp, idsup)) &&
+ (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_SAE))))
+#else
+ if ((wpa_ie != NULL || wpa2_ie != NULL))
+#endif // endif
+ {
wpaie = (wpa_ie != NULL) ? (const u8 *)wpa_ie : (const u8 *)wpa2_ie;
wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len;
wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN;
err = wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (unlikely(err)) {
WL_ERR(("wpaie set error (%d)\n", err));
goto exit;
}
} else {
err = wldev_iovar_setbuf(dev, "wpaie", NULL, 0,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (unlikely(err)) {
WL_ERR(("wpaie set error (%d)\n", err));
goto exit;
}
}
err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
- VNDR_IE_ASSOCREQ_FLAG, (const u8 *)sme->ie, sme->ie_len);
+ VNDR_IE_ASSOCREQ_FLAG, (const u8 *)sme->ie, sme->ie_len);
if (unlikely(err)) {
goto exit;
}
@@ -6166,7 +6873,6 @@
bool is_roamtrig_reset = TRUE;
bool is_roam_env_ok = (wldev_iovar_setint(dev, "roam_env_detection",
AP_ENV_DETECT_NOT_USED) == BCME_OK);
-
#ifdef SKIP_ROAM_TRIGGER_RESET
roam_trigger[1] = WLC_BAND_2G;
is_roamtrig_reset =
@@ -6186,12 +6892,12 @@
}
}
#endif /* ROAM_ENABLE && ROAM_AP_ENV_DETECTION */
-
if (chan &&
#ifdef H2_BRING_UP
FALSE &&
#endif /* H2_BRING_UP */
TRUE) {
+
cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
chan_cnt = 1;
WL_DBG(("channel (%d), center_req (%d), %d channels\n", cfg->channel,
@@ -6223,7 +6929,7 @@
}
memcpy(ssid.SSID, sme->ssid, sme->ssid_len);
- ssid.SSID_len = sme->ssid_len;
+ ssid.SSID_len = (uint32)sme->ssid_len;
chan_cnt = get_roam_channel_list(cfg->channel, chanspec_list,
MAX_ROAM_CHANNEL, &ssid, ioctl_version);
WL_DBG(("RCC channel count:%d \n", chan_cnt));
@@ -6250,7 +6956,15 @@
#ifdef BCMWAPI_WPI
}
#endif // endif
-
+#ifdef WL_FILS
+ if (sme->ie && sme->ie_len) {
+ err = wl_fils_add_hlp_container(cfg, dev, sme->ie, sme->ie_len);
+ if (unlikely(err)) {
+ WL_ERR(("FILS sending HLP failed\n"));
+ goto exit;
+ }
+ }
+#endif /* WL_FILS */
err = wl_set_set_cipher(dev, sme);
if (unlikely(err)) {
WL_ERR(("Invalid ciper\n"));
@@ -6268,6 +6982,13 @@
WL_ERR(("Invalid shared key\n"));
goto exit;
}
+#ifdef WL_FILS
+ err = wl_set_fils_params(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid FILS params\n"));
+ goto exit;
+ }
+#endif /* WL_FILS */
/*
* Join with specific BSSID and cached SSID
@@ -6281,7 +7002,8 @@
wl_clr_drv_status(cfg, CONNECTING, dev);
goto exit;
}
- ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
+ ext_join_params->ssid.SSID_len =
+ (uint32)min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len);
wl_update_prof(cfg, dev, NULL, &ext_join_params->ssid, WL_PROF_SSID);
ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
@@ -6355,16 +7077,28 @@
(uint8 *)(&ext_join_params->assoc.bssid), cfg->channel);
}
#endif /* DHD_EVENT_LOG_FILTER */
+#ifdef WLTDLS
+ /* disable TDLS if number of connected interfaces is >= 1 */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_CONNECT, false);
+#endif /* WLTDLS */
err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size,
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (cfg->rcc_enabled) {
+ WL_ERR_KERN(("[%s] Connecting with " MACDBG " ssid \"%s\","
+ " len (%d) with rcc channels. chan_cnt:%d \n\n",
+ dev->name, MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
+ ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, chan_cnt));
WL_INFORM_MEM(("[%s] Connecting with " MACDBG " ssid \"%s\","
" len (%d) with rcc channels. chan_cnt:%d \n\n",
dev->name, MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
"*****", ext_join_params->ssid.SSID_len, chan_cnt));
} else {
+ WL_ERR_KERN(("[%s] Connecting with " MACDBG " ssid \"%s\","
+ " len (%d) channels:%d \n\n",
+ dev->name, MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
+ ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, cfg->channel));
WL_INFORM_MEM(("[%s] Connecting with " MACDBG " ssid \"%s\","
- "len (%d) channel:%d\n\n",
+ " len (%d) channels:%d \n\n",
dev->name, MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
"*****", ext_join_params->ssid.SSID_len, cfg->channel));
}
@@ -6386,7 +7120,7 @@
goto exit;
set_ssid:
-#if defined(OEM_ANDROID) && defined(ROAMEXP_SUPPORT)
+#if defined(ROAMEXP_SUPPORT)
/* Clear Blacklist bssid and Whitelist ssid list before join issue
* This is temporary fix since currently firmware roaming is not
* disabled by android framework before SSID join from framework
@@ -6395,11 +7129,11 @@
dhd_dev_set_blacklist_bssid(dev, NULL, 0, true);
/* Flush whitelist ssid content */
dhd_dev_set_whitelist_ssid(dev, NULL, 0, true);
-#endif /* OEM_ANDROID && ROAMEXP_SUPPORT */
- memset(&join_params, 0, sizeof(join_params));
+#endif /* ROAMEXP_SUPPORT */
+ bzero(&join_params, sizeof(join_params));
join_params_size = sizeof(join_params.ssid);
- join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
+ join_params.ssid.SSID_len = (uint32)min(sizeof(join_params.ssid.SSID), sme->ssid_len);
memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
@@ -6425,11 +7159,11 @@
WL_ERR(("error (%d)\n", err));
wl_clr_drv_status(cfg, CONNECTING, dev);
wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
- }
#ifdef WLTDLS
- /* disable TDLS if number of connected interfaces is >= 1 */
- wl_cfg80211_tdls_config(cfg, TDLS_STATE_CONNECT, false);
+ /* If connect fails, check whether we can enable back TDLS */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_DISCONNECT, false);
#endif /* WLTDLS */
+ }
#ifdef DBG_PKT_MON
if ((dev == bcmcfg_to_prmry_ndev(cfg)) && !err) {
DHD_DBG_PKT_MON_START(dhdp);
@@ -6438,18 +7172,56 @@
return err;
}
-#define WAIT_FOR_DISCONNECT_MAX 10
+static void wl_cfg80211_disconnect_state_sync(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+ struct wireless_dev *wdev;
+ uint8 wait_cnt;
+
+ if (!dev || !dev->ieee80211_ptr) {
+ WL_ERR(("wrong ndev\n"));
+ return;
+ }
+
+ wdev = dev->ieee80211_ptr;
+ wait_cnt = WAIT_FOR_DISCONNECT_STATE_SYNC;
+ while ((wdev->current_bss) && wait_cnt) {
+ WL_DBG(("Waiting for disconnect sync, wait_cnt: %d\n", wait_cnt));
+ wait_cnt--;
+ OSL_SLEEP(50);
+ }
+
+ if (wait_cnt == 0) {
+ /* state didn't get cleared within given timeout */
+ WL_INFORM_MEM(("cfg80211 state. wdev->current_bss non null\n"));
+ } else {
+ WL_MEM(("cfg80211 disconnect state sync done\n"));
+ }
+
+}
+
static void wl_cfg80211_wait_for_disconnection(struct bcm_cfg80211 *cfg, struct net_device *dev)
{
uint8 wait_cnt;
+ u32 status = 0;
wait_cnt = WAIT_FOR_DISCONNECT_MAX;
- while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
+ while ((status = wl_get_drv_status(cfg, DISCONNECTING, dev)) && wait_cnt) {
WL_DBG(("Waiting for disconnection, wait_cnt: %d\n", wait_cnt));
wait_cnt--;
OSL_SLEEP(50);
}
+ WL_INFORM_MEM(("Wait for disconnection done. status:%d wait_cnt:%d\n", status, wait_cnt));
+ if (!wait_cnt && wl_get_drv_status(cfg, DISCONNECTING, dev)) {
+ /* No response from firmware. Indicate connect result
+ * to clear cfg80211 state machine
+ */
+ WL_INFORM_MEM(("force send connect result\n"));
+ CFG80211_CONNECT_RESULT(dev, NULL, NULL, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ }
return;
}
@@ -6461,7 +7233,9 @@
scb_val_t scbval;
bool act = false;
s32 err = 0;
- u8 *curbssid;
+ u8 *curbssid = NULL;
+ u8 null_bssid[ETHER_ADDR_LEN];
+ s32 bssidx = 0;
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
WL_ERR(("Reason %d\n", reason_code));
RETURN_EIO_IF_NOT_UP(cfg);
@@ -6469,16 +7243,30 @@
curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_START),
+ dhd_net2idx(dhdp->info, dev), reason_code);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ dhd_cleanup_m4_state_work(dhdp, dhd_net2idx(dhdp->info, dev));
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
#ifdef ESCAN_RESULT_PATCH
- if (wl_get_drv_status(cfg, CONNECTING, dev) && curbssid &&
- (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0)) {
- WL_ERR(("Disconnecting from connecting device: " MACDBG "\n",
- MAC2STRDBG(curbssid)));
+ if (wl_get_drv_status(cfg, CONNECTING, dev)) {
+ if (curbssid) {
+ WL_ERR(("Disconnecting while CONNECTING status"
+ " connecting device: " MACDBG "\n", MAC2STRDBG(curbssid)));
+ } else {
+ WL_ERR(("Disconnecting while CONNECTING status \n"));
+ }
act = true;
}
#endif /* ESCAN_RESULT_PATCH */
+ if (!curbssid) {
+ WL_ERR(("Disconnecting while CONNECTING status %d\n", (int)sizeof(null_bssid)));
+ bzero(null_bssid, sizeof(null_bssid));
+ curbssid = null_bssid;
+ }
+
if (act) {
#ifdef DBG_PKT_MON
/* Stop packet monitor */
@@ -6489,16 +7277,15 @@
/*
* Cancel ongoing scan to sync up with sme state machine of cfg80211.
*/
-#if !defined(ESCAN_RESULT_PATCH)
/* Let scan aborted by F/W */
if (cfg->scan_request) {
WL_TRACE_HW4(("Aborting the scan! \n"));
- wl_notify_escan_complete(cfg, dev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
}
-#endif /* ESCAN_RESULT_PATCH */
+ /* Set DISCONNECTING state. We are clearing this state in all exit paths */
+ wl_set_drv_status(cfg, DISCONNECTING, dev);
if (wl_get_drv_status(cfg, CONNECTING, dev) ||
wl_get_drv_status(cfg, CONNECTED, dev)) {
- wl_set_drv_status(cfg, DISCONNECTING, dev);
scbval.val = reason_code;
memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
scbval.val = htod32(scbval.val);
@@ -6508,7 +7295,7 @@
if (unlikely(err)) {
wl_clr_drv_status(cfg, DISCONNECTING, dev);
WL_ERR(("error (%d)\n", err));
- return err;
+ goto exit;
}
}
#ifdef WL_WPS_SYNC
@@ -6523,6 +7310,13 @@
}
#endif /* WPS_SYNC */
wl_cfg80211_wait_for_disconnection(cfg, dev);
+ } else {
+ /* Not in connecting or connected state. However since disconnect came
+ * from upper layer, indicate connect fail to clear any state mismatch
+ */
+ WL_INFORM_MEM(("act is false. report connect result fail.\n"));
+ CFG80211_CONNECT_RESULT(dev, NULL, NULL, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL);
}
#ifdef CUSTOM_SET_CPUCORE
/* set default cpucore */
@@ -6535,6 +7329,17 @@
cfg->rssi = 0; /* reset backup of rssi */
+exit:
+ /* Clear IEs for disaasoc */
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ err = -EINVAL;
+ return err;
+ }
+ WL_ERR(("Clearing disconnect IEs \n"));
+ err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ ndev_to_cfgdev(dev), bssidx, VNDR_IE_DISASSOC_FLAG, NULL, 0);
+
return err;
}
@@ -6652,11 +7457,12 @@
s32 bssidx;
s32 mode = wl_get_mode_by_netdev(cfg, dev);
+ WL_ERR(("key index (%d)\n", key_idx));
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
- memset(&key, 0, sizeof(key));
+ bzero(&key, sizeof(key));
key.index = (u32) key_idx;
if (!ETHER_ISMULTI(mac_addr))
@@ -6699,35 +7505,8 @@
key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
key.iv_initialized = true;
}
-
- switch (params->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- key.algo = CRYPTO_ALGO_WEP1;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- key.algo = CRYPTO_ALGO_WEP128;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- key.algo = CRYPTO_ALGO_TKIP;
- WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- key.algo = CRYPTO_ALGO_AES_CCM;
- WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- key.algo = CRYPTO_ALGO_AES_CCM;
- WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
- break;
-#ifdef BCMWAPI_WPI
- case WLAN_CIPHER_SUITE_SMS4:
- key.algo = CRYPTO_ALGO_SMS4;
- WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
- break;
-#endif // endif
- default:
+ key.algo = wl_rsn_cipher_wsec_key_algo_lookup(params->cipher);
+ if (key.algo == CRYPTO_ALGO_OFF) { //not found.
WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
return -EINVAL;
}
@@ -6782,23 +7561,20 @@
struct net_info *iter, *next;
if (name == NULL) {
+ WL_ERR(("Iface name is not provided\n"));
return NULL;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev) {
if (strcmp(iter->ndev->name, name) == 0) {
return iter->ndev->ieee80211_ptr;
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+
WL_DBG(("Iface %s not found\n", name));
return NULL;
}
@@ -6859,7 +7635,20 @@
s32 bssidx = 0;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 mode = wl_get_mode_by_netdev(cfg, dev);
- WL_DBG(("key index (%d)\n", key_idx));
+#ifdef WL_GCMP
+ uint32 algos = 0, mask = 0;
+#endif /* WL_GCMP */
+#if defined(WLAN_CIPHER_SUITE_PMK)
+ int j;
+ wsec_pmk_t pmk;
+ char keystring[WSEC_MAX_PSK_LEN + 1];
+ char* charptr = keystring;
+ u16 len;
+ struct wl_security *sec;
+#endif /* defined(WLAN_CIPHER_SUITE_PMK) */
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ WL_INFORM_MEM(("key index (%d) (0x%x)\n", key_idx, params->cipher));
RETURN_EIO_IF_NOT_UP(cfg);
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
@@ -6873,9 +7662,13 @@
wl_add_keyext(wiphy, dev, key_idx, mac_addr, params);
goto exit;
}
- memset(&key, 0, sizeof(key));
+
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(INSTALL_KEY), dhd_net2idx(dhdp->info, dev), 0);
+
+ bzero(&key, sizeof(key));
/* Clear any buffered wep key */
- memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
+ bzero(&cfg->wep_key, sizeof(struct wl_wsec_key));
key.len = (u32) params->key_len;
key.index = (u32) key_idx;
@@ -6887,20 +7680,23 @@
memcpy(key.data, params->key, key.len);
key.flags = WL_PRIMARY_KEY;
+
+ key.algo = wl_rsn_cipher_wsec_key_algo_lookup(params->cipher);
+ val = wl_rsn_cipher_wsec_algo_lookup(params->cipher);
+ if (val == WSEC_NONE) {
+ WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+#if defined(WLAN_CIPHER_SUITE_PMK)
+ /* WLAN_CIPHER_SUITE_PMK is not NL80211 standard ,but BRCM proprietary cipher suite.
+ * so it doesn't have right algo type too. Just for now, bypass this check for
+ * backward compatibility.
+ * TODO: deprecate this proprietary way and replace to nl80211 set_pmk API.
+ */
+ if (params->cipher != WLAN_CIPHER_SUITE_PMK)
+#endif /* defined(WLAN_CIPHER_SUITE_PMK) */
+ return -EINVAL;
+ }
switch (params->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- key.algo = CRYPTO_ALGO_WEP1;
- val = WEP_ENABLED;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- key.algo = CRYPTO_ALGO_WEP128;
- val = WEP_ENABLED;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
- break;
case WLAN_CIPHER_SUITE_TKIP:
- key.algo = CRYPTO_ALGO_TKIP;
- val = TKIP_ENABLED;
/* wpa_supplicant switches the third and fourth quarters of the TKIP key */
if (mode == WL_MODE_BSS) {
bcopy(&key.data[24], keybuf, sizeof(keybuf));
@@ -6909,34 +7705,19 @@
}
WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- key.algo = CRYPTO_ALGO_AES_CCM;
- val = AES_ENABLED;
- WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- key.algo = CRYPTO_ALGO_AES_CCM;
- val = AES_ENABLED;
- WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
- break;
-#ifdef BCMWAPI_WPI
- case WLAN_CIPHER_SUITE_SMS4:
- key.algo = CRYPTO_ALGO_SMS4;
- WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
- val = SMS4_ENABLED;
- break;
-#endif /* BCMWAPI_WPI */
#if defined(WLAN_CIPHER_SUITE_PMK)
- case WLAN_CIPHER_SUITE_PMK: {
- int j;
- wsec_pmk_t pmk;
- char keystring[WSEC_MAX_PSK_LEN + 1];
- char* charptr = keystring;
- uint len;
- struct wl_security *sec;
-
+ case WLAN_CIPHER_SUITE_PMK:
sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
- if (sec->wpa_auth == WLAN_AKM_SUITE_8021X) {
+
+ WL_MEM(("set_pmk: wpa_auth:%x akm:%x\n", sec->wpa_auth, params->cipher));
+ /* Avoid pmk set for SAE and OWE for external supplicant case. */
+ if (IS_AKM_SAE(sec->wpa_auth) || IS_AKM_OWE(sec->wpa_auth)) {
+ WL_INFORM_MEM(("skip pmk set for akm:%x\n", sec->wpa_auth));
+ break;
+ }
+
+ if ((sec->wpa_auth == WLAN_AKM_SUITE_8021X) ||
+ (sec->wpa_auth == WL_AKM_SUITE_SHA256_1X)) {
err = wldev_iovar_setbuf(dev, "okc_info_pmk", (const void *)params->key,
WSEC_MAX_PSK_LEN / 2, keystring, sizeof(keystring), NULL);
if (err) {
@@ -6948,19 +7729,31 @@
for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
charptr += snprintf(charptr, sizeof(keystring), "%02x", params->key[j]);
}
- len = strlen(keystring);
+ len = (u16)strlen(keystring);
pmk.key_len = htod16(len);
bcopy(keystring, pmk.key, len);
pmk.flags = htod16(WSEC_PASSPHRASE);
err = wldev_ioctl_set(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
- if (err)
+ if (err) {
return err;
- } break;
+ }
+ /* Clear key length to delete key */
+ key.len = 0;
+ break;
#endif /* WLAN_CIPHER_SUITE_PMK */
- default:
- WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
- return -EINVAL;
+#ifdef WL_GCMP
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ algos = KEY_ALGO_MASK(key.algo);
+ mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+ break;
+#endif /* WL_GCMP */
+ default: /* No post processing required */
+ WL_DBG(("no post processing required (0x%x)\n", params->cipher));
+ break;
}
/* Set the new key/index */
@@ -7002,7 +7795,11 @@
WL_ERR(("set wsec error (%d)\n", err));
return err;
}
-
+#ifdef WL_GCMP
+ if (wl_set_wsec_info_algos(dev, algos, mask)) {
+ WL_ERR(("set wsec_info error (%d)\n", err));
+ }
+#endif /* WL_GCMP */
return err;
}
@@ -7014,6 +7811,7 @@
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
s32 bssidx;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
@@ -7027,7 +7825,9 @@
#endif // endif
RETURN_EIO_IF_NOT_UP(cfg);
- memset(&key, 0, sizeof(key));
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DELETE_KEY), dhd_net2idx(dhdp->info, dev), 0);
+ bzero(&key, sizeof(key));
key.flags = WL_PRIMARY_KEY;
key.algo = CRYPTO_ALGO_OFF;
@@ -7072,10 +7872,10 @@
}
WL_DBG(("key index (%d)\n", key_idx));
RETURN_EIO_IF_NOT_UP(cfg);
- memset(&key, 0, sizeof(key));
+ bzero(&key, sizeof(key));
key.index = key_idx;
swap_key_to_BE(&key);
- memset(¶ms, 0, sizeof(params));
+ bzero(¶ms, sizeof(params));
params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len);
params.key = key.data;
@@ -7137,6 +7937,93 @@
#endif /* MFP */
}
+static bool
+wl_check_assoc_state(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+ wl_assoc_info_t asinfo;
+ uint32 state = 0;
+ int err;
+
+ err = wldev_iovar_getbuf_bsscfg(dev, "assoc_info",
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("failed to get assoc_info : err=%d\n", err));
+ return FALSE;
+ } else {
+ memcpy(&asinfo, cfg->ioctl_buf, sizeof(wl_assoc_info_t));
+ state = dtoh32(asinfo.state);
+ WL_DBG(("assoc state=%d\n", state));
+ }
+
+ return (state > 0)? TRUE:FALSE;
+}
+
+static s32
+wl_cfg80211_get_rssi(struct net_device *dev, struct bcm_cfg80211 *cfg, s32 *rssi)
+{
+ s32 err = BCME_OK;
+ scb_val_t scb_val;
+#ifdef SUPPORT_RSSI_SUM_REPORT
+ wl_rssi_ant_mimo_t rssi_ant_mimo;
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+
+ if (dev == NULL || cfg == NULL) {
+ return BCME_ERROR;
+ }
+
+ /* initialize rssi */
+ *rssi = 0;
+
+#ifdef SUPPORT_RSSI_SUM_REPORT
+ /* Query RSSI sum across antennas */
+ bzero(&rssi_ant_mimo, sizeof(rssi_ant_mimo));
+ err = wl_get_rssi_per_ant(dev, dev->name, NULL, &rssi_ant_mimo);
+ if (err) {
+ WL_ERR(("Could not get rssi sum (%d)\n", err));
+ /* set rssi to zero and do not return error,
+ * because iovar phy_rssi_ant could return BCME_UNSUPPORTED
+ * when bssid was null during roaming
+ */
+ err = BCME_OK;
+ } else {
+ cfg->rssi_sum_report = TRUE;
+ if ((*rssi = rssi_ant_mimo.rssi_sum) >= 0) {
+ *rssi = 0;
+ }
+ }
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+
+ /* if SUPPORT_RSSI_SUM_REPORT works once, do not use legacy method anymore */
+ if (cfg->rssi_sum_report == FALSE) {
+ bzero(&scb_val, sizeof(scb_val));
+ scb_val.val = 0;
+ err = wldev_ioctl_get(dev, WLC_GET_RSSI, &scb_val,
+ sizeof(scb_val_t));
+ if (err) {
+ WL_ERR(("Could not get rssi (%d)\n", err));
+ return err;
+ }
+ *rssi = wl_rssi_offset(dtoh32(scb_val.val));
+ }
+
+ if (*rssi >= 0) {
+ /* check assoc status including roaming */
+ DHD_OS_WAKE_LOCK((dhd_pub_t *)(cfg->pub));
+ if (wl_get_drv_status(cfg, CONNECTED, dev) && wl_check_assoc_state(cfg, dev)) {
+ *rssi = cfg->rssi; /* use previous RSSI */
+ WL_DBG(("use previous RSSI %d dBm\n", cfg->rssi));
+ } else {
+ *rssi = 0;
+ }
+ DHD_OS_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+ } else {
+ /* backup the current rssi */
+ cfg->rssi = *rssi;
+ }
+
+ return err;
+}
+
static int
wl_cfg80211_ifstats_counters_cb(void *ctx, const uint8 *data, uint16 type, uint16 len)
{
@@ -7315,211 +8202,100 @@
}
#undef IF_COUNTERS_PARAM_CONTAINER_LEN_MAX
-static bool
-wl_check_assoc_state(struct bcm_cfg80211 *cfg, struct net_device *dev)
-{
- wl_assoc_info_t asinfo;
- uint32 state = 0;
- int err;
-
- err = wldev_iovar_getbuf_bsscfg(dev, "assoc_info",
- NULL, 0, cfg->ioctl_buf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("failed to get assoc_info : err=%d\n", err));
- return FALSE;
- } else {
- memcpy(&asinfo, cfg->ioctl_buf, sizeof(wl_assoc_info_t));
- state = dtoh32(asinfo.state);
- WL_DBG(("assoc state=%d\n", state));
- }
-
- return (state > 0)? TRUE:FALSE;
-}
-
-static s32
-wl_cfg80211_get_rssi(struct net_device *dev, struct bcm_cfg80211 *cfg, s32 *rssi)
-{
- s32 err = BCME_OK;
- scb_val_t scb_val;
-#ifdef SUPPORT_RSSI_SUM_REPORT
- wl_rssi_ant_mimo_t rssi_ant_mimo;
-#endif /* SUPPORT_RSSI_SUM_REPORT */
-
- if (dev == NULL || cfg == NULL) {
- return BCME_ERROR;
- }
-
- /* initialize rssi */
- *rssi = 0;
-
-#ifdef SUPPORT_RSSI_SUM_REPORT
- /* Query RSSI sum across antennas */
- memset(&rssi_ant_mimo, 0, sizeof(rssi_ant_mimo));
- err = wl_get_rssi_per_ant(dev, dev->name, NULL, &rssi_ant_mimo);
- if (err) {
- WL_ERR(("Could not get rssi sum (%d)\n", err));
- /* set rssi to zero and do not return error,
- * because iovar phy_rssi_ant could return BCME_UNSUPPORTED
- * when bssid was null during roaming
- */
- err = BCME_OK;
- } else {
- cfg->rssi_sum_report = TRUE;
- if ((*rssi = rssi_ant_mimo.rssi_sum) >= 0) {
- *rssi = 0;
- }
- }
-#endif /* SUPPORT_RSSI_SUM_REPORT */
-
- /* if SUPPORT_RSSI_SUM_REPORT works once, do not use legacy method anymore */
- if (cfg->rssi_sum_report == FALSE) {
- memset(&scb_val, 0, sizeof(scb_val));
- scb_val.val = 0;
- err = wldev_ioctl_get(dev, WLC_GET_RSSI, &scb_val,
- sizeof(scb_val_t));
- if (err) {
- WL_ERR(("Could not get rssi (%d)\n", err));
- return err;
- }
- *rssi = wl_rssi_offset(dtoh32(scb_val.val));
- }
-
- if (*rssi >= 0) {
- /* check assoc status including roaming */
- DHD_OS_WAKE_LOCK((dhd_pub_t *)(cfg->pub));
- if (wl_get_drv_status(cfg, CONNECTED, dev) && wl_check_assoc_state(cfg, dev)) {
- *rssi = cfg->rssi; /* use previous RSSI */
- WL_DBG(("use previous RSSI %d dBm\n", cfg->rssi));
- } else {
- *rssi = 0;
- }
- DHD_OS_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
- } else {
- /* backup the current rssi */
- cfg->rssi = *rssi;
- }
-
- return err;
-}
-
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
static s32
wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
- const u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
#else
static s32
wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
+ u8 *mac, struct station_info *sinfo)
#endif // endif
{
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 rssi = 0;
- s32 rate;
+ s32 rate = 0;
s32 err = 0;
- sta_info_v4_t *sta;
- s32 mode;
+ u16 wl_iftype = 0;
+ u16 wl_mode = 0;
+ get_pktcnt_t pktcnt;
+ wl_if_stats_t *if_stats = NULL;
+ sta_info_v4_t *sta = NULL;
+ u8 *curmacp = NULL;
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || defined(WL_COMPAT_WIRELESS)
s8 eabuf[ETHER_ADDR_STR_LEN];
#endif // endif
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
bool fw_assoc_state = FALSE;
u32 dhd_assoc_state = 0;
void *buf;
+
RETURN_EIO_IF_NOT_UP(cfg);
- mode = wl_get_mode_by_netdev(cfg, dev);
- if (mode < 0) {
- return -ENODEV;
+ if (cfg80211_to_wl_iftype(dev->ieee80211_ptr->iftype, &wl_iftype, &wl_mode) < 0) {
+ return -EINVAL;
}
buf = MALLOC(cfg->osh, MAX(sizeof(wl_if_stats_t), WLC_IOCTL_SMLEN));
if (buf == NULL) {
- WL_ERR(("%s(%d): MALLOC failed\n", __FUNCTION__, __LINE__));
+ WL_ERR(("wl_cfg80211_get_station: MALLOC failed\n"));
goto error;
}
- if (mode == WL_MODE_AP) {
- err = wldev_iovar_getbuf(dev, "sta_info", (const void*)mac,
- ETHER_ADDR_LEN, buf, WLC_IOCTL_SMLEN, NULL);
- if (err < 0) {
- WL_ERR(("GET STA INFO failed, %d\n", err));
- goto error;
- }
- sinfo->filled = STA_INFO_BIT(INFO_INACTIVE_TIME);
- sta = (sta_info_v4_t *)buf;
- if (sta->ver != WL_STA_VER_4 && sta->ver != WL_STA_VER_5) {
- WL_ERR(("GET STA INFO version mismatch, %d\n", err));
- return BCME_VERSION;
- }
- sta->len = dtoh16(sta->len);
- sta->cap = dtoh16(sta->cap);
- sta->flags = dtoh32(sta->flags);
- sta->idle = dtoh32(sta->idle);
- sta->in = dtoh32(sta->in);
- sinfo->inactive_time = sta->idle * 1000;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || defined(WL_COMPAT_WIRELESS)
- if (sta->flags & WL_STA_ASSOC) {
- sinfo->filled |= STA_INFO_BIT(INFO_CONNECTED_TIME);
- sinfo->connected_time = sta->in;
- }
- WL_INFORM_MEM(("[%s] STA %s : idle time : %d sec, connected time :%d ms\n",
- dev->name, bcm_ether_ntoa((const struct ether_addr *)mac, eabuf),
- sinfo->inactive_time, sta->idle * 1000));
-#endif // endif
- } else if ((mode == WL_MODE_BSS) || (mode == WL_MODE_IBSS)) {
- get_pktcnt_t pktcnt;
- wl_if_stats_t *if_stats = NULL;
- u8 *curmacp;
- if (cfg->roam_offload) {
- struct ether_addr bssid;
- memset(&bssid, 0, sizeof(bssid));
- err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
- if (err) {
- WL_ERR(("Failed to get current BSSID\n"));
- } else {
- if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
- /* roaming is detected */
- err = wl_cfg80211_delayed_roam(cfg, dev, &bssid);
- if (err)
- WL_ERR(("Failed to handle the delayed roam, "
- "err=%d", err));
- mac = (u8 *)bssid.octet;
+ switch (wl_iftype) {
+ case WL_IF_TYPE_STA:
+ case WL_IF_TYPE_IBSS:
+ if (cfg->roam_offload) {
+ struct ether_addr bssid;
+ bzero(&bssid, sizeof(bssid));
+ err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+ if (err) {
+ WL_ERR(("Failed to get current BSSID\n"));
+ } else {
+ if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
+ /* roaming is detected */
+ err = wl_cfg80211_delayed_roam(cfg, dev, &bssid);
+ if (err)
+ WL_ERR(("Failed to handle the delayed"
+ " roam, err=%d", err));
+ mac = (u8 *)bssid.octet;
+ }
}
}
- }
- dhd_assoc_state = wl_get_drv_status(cfg, CONNECTED, dev);
- DHD_OS_WAKE_LOCK(dhd);
- fw_assoc_state = dhd_is_associated(dhd, 0, &err);
- if (dhd_assoc_state && !fw_assoc_state) {
- /* check roam (join) status */
- if (wl_check_assoc_state(cfg, dev)) {
- fw_assoc_state = TRUE;
- WL_DBG(("roam status\n"));
+ dhd_assoc_state = wl_get_drv_status(cfg, CONNECTED, dev);
+ DHD_OS_WAKE_LOCK(dhd);
+ fw_assoc_state = dhd_is_associated(dhd, 0, &err);
+ if (dhd_assoc_state && !fw_assoc_state) {
+ /* check roam (join) status */
+ if (wl_check_assoc_state(cfg, dev)) {
+ fw_assoc_state = TRUE;
+ WL_DBG(("roam status\n"));
+ }
}
- }
- DHD_OS_WAKE_UNLOCK(dhd);
- if (!dhd_assoc_state || !fw_assoc_state) {
- WL_ERR(("NOT assoc\n"));
- if (err == -ENODATA)
- goto error;
- if (!dhd_assoc_state) {
- WL_TRACE_HW4(("drv state is not connected \n"));
- }
- if (!fw_assoc_state) {
- WL_TRACE_HW4(("fw state is not associated \n"));
- }
- /* Disconnect due to fw is not associated for FW_ASSOC_WATCHDOG_TIME ms.
- * 'err == 0' of dhd_is_associated() and '!fw_assoc_state'
- * means that BSSID is null.
- */
- if (dhd_assoc_state && !fw_assoc_state && !err) {
- if (!fw_assoc_watchdog_started) {
- fw_assoc_watchdog_ms = OSL_SYSUPTIME();
- fw_assoc_watchdog_started = TRUE;
- WL_TRACE_HW4(("fw_assoc_watchdog_started \n"));
- } else {
- if (OSL_SYSUPTIME() - fw_assoc_watchdog_ms >
- FW_ASSOC_WATCHDOG_TIME) {
+ DHD_OS_WAKE_UNLOCK(dhd);
+ if (!dhd_assoc_state || !fw_assoc_state) {
+ WL_ERR(("NOT assoc\n"));
+ if (err == -ENODATA)
+ goto error;
+ if (!dhd_assoc_state) {
+ WL_TRACE_HW4(("drv state is not connected \n"));
+ }
+ if (!fw_assoc_state) {
+ WL_TRACE_HW4(("fw state is not associated \n"));
+ }
+ /* Disconnect due to fw is not associated for
+ * FW_ASSOC_WATCHDOG_TIME ms.
+ * 'err == 0' of dhd_is_associated() and '!fw_assoc_state'
+ * means that BSSID is null.
+ */
+ if (dhd_assoc_state && !fw_assoc_state && !err) {
+ if (!fw_assoc_watchdog_started) {
+ fw_assoc_watchdog_ms = OSL_SYSUPTIME();
+ fw_assoc_watchdog_started = TRUE;
+ WL_TRACE_HW4(("fw_assoc_watchdog_started \n"));
+ } else if (OSL_SYSUPTIME() - fw_assoc_watchdog_ms >
+ FW_ASSOC_WATCHDOG_TIME) {
fw_assoc_watchdog_started = FALSE;
err = -ENODEV;
WL_TRACE_HW4(("fw is not associated for %d ms \n",
@@ -7527,117 +8303,158 @@
goto get_station_err;
}
}
+ err = -ENODEV;
+ goto error;
}
- err = -ENODEV;
- goto error;
- }
- fw_assoc_watchdog_started = FALSE;
- curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID);
- if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
- WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n",
- MAC2STRDBG(mac), MAC2STRDBG(curmacp)));
- }
-
- /* Report the current tx rate */
- rate = 0;
- err = wldev_ioctl_get(dev, WLC_GET_RATE, &rate, sizeof(rate));
- if (err) {
- WL_ERR(("Could not get rate (%d)\n", err));
- } else {
+ if (dhd_is_associated(dhd, 0, NULL)) {
+ fw_assoc_watchdog_started = FALSE;
+ }
+ curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+ if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
+ WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n",
+ MAC2STRDBG(mac), MAC2STRDBG(curmacp)));
+ }
+ /* go through to get another information */
+ /* fall through */
+ case WL_IF_TYPE_P2P_GC:
+ case WL_IF_TYPE_P2P_DISC:
+ if ((err = wl_cfg80211_get_rssi(dev, cfg, &rssi)) != BCME_OK) {
+ goto get_station_err;
+ }
+ sinfo->filled |= STA_INFO_BIT(INFO_SIGNAL);
+ sinfo->signal = rssi;
+ WL_DBG(("RSSI %d dBm\n", rssi));
+ /* go through to get another information */
+ /* fall through */
+ case WL_IF_TYPE_P2P_GO:
+ /* Report the current tx rate */
+ rate = 0;
+ err = wldev_ioctl_get(dev, WLC_GET_RATE, &rate, sizeof(rate));
+ if (err) {
+ WL_ERR(("Could not get rate (%d)\n", err));
+ } else {
#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
- int rxpktglom;
+ int rxpktglom;
#endif // endif
- rate = dtoh32(rate);
- sinfo->filled |= STA_INFO_BIT(INFO_TX_BITRATE);
- sinfo->txrate.legacy = rate * 5;
- WL_DBG(("Rate %d Mbps\n", (rate / 2)));
+ rate = dtoh32(rate);
+ sinfo->filled |= STA_INFO_BIT(INFO_TX_BITRATE);
+ sinfo->txrate.legacy = rate * 5;
+ WL_DBG(("Rate %d Mbps\n", (rate / 2)));
#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
- rxpktglom = ((rate/2) > 150) ? 20 : 10;
+ rxpktglom = ((rate/2) > 150) ? 20 : 10;
- if (maxrxpktglom != rxpktglom) {
- maxrxpktglom = rxpktglom;
- WL_DBG(("Rate %d Mbps, update bus:maxtxpktglom=%d\n", (rate/2),
- maxrxpktglom));
- err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom",
- (char*)&maxrxpktglom, 4, cfg->ioctl_buf,
- WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (err < 0) {
- WL_ERR(("set bus:maxtxpktglom failed, %d\n", err));
+ if (maxrxpktglom != rxpktglom) {
+ maxrxpktglom = rxpktglom;
+ WL_DBG(("Rate %d Mbps, update bus:"
+ "maxtxpktglom=%d\n", (rate/2), maxrxpktglom));
+ err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom",
+ (char*)&maxrxpktglom, 4, cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("set bus:maxtxpktglom failed, %d\n", err));
+ }
}
- }
#endif // endif
- }
-
- if ((err = wl_cfg80211_get_rssi(dev, cfg, &rssi)) != BCME_OK) {
- goto get_station_err;
- }
- sinfo->filled |= STA_INFO_BIT(INFO_SIGNAL);
- sinfo->signal = rssi;
- WL_DBG(("RSSI %d dBm\n", rssi));
-
- if_stats = (wl_if_stats_t *)buf;
- memset(if_stats, 0, sizeof(*if_stats));
-
- if (FW_SUPPORTED(dhd, ifst)) {
- err = wl_cfg80211_ifstats_counters(dev, if_stats);
- } else
- {
- err = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
- (char *)if_stats, sizeof(*if_stats), NULL);
- }
-
- if (err) {
- WL_ERR(("if_counters not supported ret=%d\n", err));
- memset(&pktcnt, 0, sizeof(pktcnt));
- err = wldev_ioctl_get(dev, WLC_GET_PKTCNTS, &pktcnt,
- sizeof(pktcnt));
- if (!err) {
- sinfo->rx_packets = pktcnt.rx_good_pkt;
- sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt;
- sinfo->tx_packets = pktcnt.tx_good_pkt;
- sinfo->tx_failed = pktcnt.tx_bad_pkt;
}
- } else {
- sinfo->rx_packets = (uint32)dtoh64(if_stats->rxframe);
- sinfo->rx_dropped_misc = 0;
- sinfo->tx_packets = (uint32)dtoh64(if_stats->txfrmsnt);
- sinfo->tx_failed = (uint32)dtoh64(if_stats->txnobuf) +
- (uint32)dtoh64(if_stats->txrunt) +
- (uint32)dtoh64(if_stats->txfail);
- }
+ if_stats = (wl_if_stats_t *)buf;
+ bzero(if_stats, sizeof(*if_stats));
+ if (FW_SUPPORTED(dhd, ifst)) {
+ err = wl_cfg80211_ifstats_counters(dev, if_stats);
+ } else
+ {
+ err = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
+ (char *)if_stats, sizeof(*if_stats), NULL);
+ }
- sinfo->filled |= (STA_INFO_BIT(INFO_RX_PACKETS) |
- STA_INFO_BIT(INFO_RX_DROP_MISC) |
- STA_INFO_BIT(INFO_TX_PACKETS) |
- STA_INFO_BIT(INFO_TX_FAILED));
+ if (err) {
+ WL_ERR(("if_counters not supported ret=%d\n",
+ err));
+ bzero(&pktcnt, sizeof(pktcnt));
+ err = wldev_ioctl_get(dev, WLC_GET_PKTCNTS, &pktcnt,
+ sizeof(pktcnt));
+ if (!err) {
+ sinfo->rx_packets = pktcnt.rx_good_pkt;
+ sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt;
+ sinfo->tx_packets = pktcnt.tx_good_pkt;
+ sinfo->tx_failed = pktcnt.tx_bad_pkt;
+ }
+ } else {
+ sinfo->rx_packets = (uint32)dtoh64(if_stats->rxframe);
+ sinfo->rx_dropped_misc = 0;
+ sinfo->tx_packets = (uint32)dtoh64(if_stats->txfrmsnt);
+ sinfo->tx_failed = (uint32)dtoh64(if_stats->txnobuf) +
+ (uint32)dtoh64(if_stats->txrunt) +
+ (uint32)dtoh64(if_stats->txfail);
+ }
+ sinfo->filled |= (STA_INFO_BIT(INFO_RX_PACKETS) |
+ STA_INFO_BIT(INFO_RX_DROP_MISC) |
+ STA_INFO_BIT(INFO_TX_PACKETS) |
+ STA_INFO_BIT(INFO_TX_FAILED));
get_station_err:
- if (err && (err != -ENODATA)) {
- /* Disconnect due to zero BSSID or error to get RSSI */
- scb_val_t scbval;
- scbval.val = htod32(DOT11_RC_DISASSOC_LEAVING);
- err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
- if (unlikely(err)) {
- WL_ERR(("disassoc error (%d)\n", err));
- }
+ if (err && (err != -ENODATA)) {
+ /* Disconnect due to zero BSSID or error to get RSSI */
+ scb_val_t scbval;
+ DHD_STATLOG_CTRL(dhd, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhd->info, dev), DOT11_RC_DISASSOC_LEAVING);
+ scbval.val = htod32(DOT11_RC_DISASSOC_LEAVING);
+ err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t));
+ if (unlikely(err)) {
+ WL_ERR(("disassoc error (%d)\n", err));
+ }
- WL_ERR(("force cfg80211_disconnected: %d\n", err));
- wl_clr_drv_status(cfg, CONNECTED, dev);
- CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
- wl_link_down(cfg);
- }
- }
- else {
- WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev)));
+ WL_ERR(("force cfg80211_disconnected: %d\n", err));
+ wl_clr_drv_status(cfg, CONNECTED, dev);
+ DHD_STATLOG_CTRL(dhd, ST(DISASSOC_DONE),
+ dhd_net2idx(dhd->info, dev), DOT11_RC_DISASSOC_LEAVING);
+ CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
+ wl_link_down(cfg);
+ }
+ break;
+ case WL_IF_TYPE_AP:
+ err = wldev_iovar_getbuf(dev, "sta_info", (const void*)mac,
+ ETHER_ADDR_LEN, buf, WLC_IOCTL_SMLEN, NULL);
+ if (err < 0) {
+ WL_ERR(("GET STA INFO failed, %d\n", err));
+ goto error;
+ }
+ sinfo->filled = STA_INFO_BIT(INFO_INACTIVE_TIME);
+ sta = (sta_info_v4_t *)buf;
+ if (sta->ver != WL_STA_VER_4 && sta->ver != WL_STA_VER_5) {
+ WL_ERR(("GET STA INFO version mismatch, %d\n", err));
+ return BCME_VERSION;
+ }
+ sta->len = dtoh16(sta->len);
+ sta->cap = dtoh16(sta->cap);
+ sta->flags = dtoh32(sta->flags);
+ sta->idle = dtoh32(sta->idle);
+ sta->in = dtoh32(sta->in);
+ sinfo->inactive_time = sta->idle * 1000;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || defined(WL_COMPAT_WIRELESS)
+ if (sta->flags & WL_STA_ASSOC) {
+ sinfo->filled |= STA_INFO_BIT(INFO_CONNECTED_TIME);
+ sinfo->connected_time = sta->in;
+ }
+ WL_INFORM_MEM(("[%s] STA %s : idle time : %d sec,"
+ " connected time :%d ms\n",
+ dev->name,
+ bcm_ether_ntoa((const struct ether_addr *)mac, eabuf),
+ sinfo->inactive_time, sta->idle * 1000));
+#endif // endif
+ break;
+ default :
+ WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev)));
}
error:
if (buf) {
MFREE(cfg->osh, buf, MAX(sizeof(wl_if_stats_t), WLC_IOCTL_SMLEN));
}
+
return err;
}
-static s32
+s32
wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
bool enabled, s32 timeout)
{
@@ -7697,24 +8514,11 @@
err = wldev_ioctl_get(dev, WLC_GET_PM, &pm, sizeof(pm));
if (err)
- WL_ERR(("%s:error (%d)\n", __FUNCTION__, err));
+ WL_ERR(("wl_cfg80211_update_power_mode: error (%d)\n", err));
else if (pm != -1 && dev->ieee80211_ptr)
dev->ieee80211_ptr->ps = (pm == PM_OFF) ? false : true;
}
-void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-
- if (strcmp(command, "SCAN-ACTIVE") == 0) {
- cfg->active_scan = 1;
- } else if (strcmp(command, "SCAN-PASSIVE") == 0) {
- cfg->active_scan = 0;
- } else
- WL_ERR(("Unknown command \n"));
- return;
-}
-
static __used u32 wl_find_msb(u16 bit16)
{
u32 ret = 0;
@@ -7817,7 +8621,7 @@
/* reset buffers */
buf_len = 0;
- memset(pkt_filterp, 0, max_buf_size);
+ bzero(pkt_filterp, max_buf_size);
/* copy filter id */
store32_ua(&pkt_filterp->id, (WL_WOWLAN_PKT_FILTER_ID_FIRST + i));
@@ -7916,7 +8720,7 @@
if (iter->ndev)
wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
}
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
if (cfg->scan_request) {
cfg80211_scan_done(cfg->scan_request, true);
cfg->scan_request = NULL;
@@ -7927,7 +8731,7 @@
wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
}
}
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
for_each_ndev(cfg, iter, next) {
if (iter->ndev) {
if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
@@ -7952,7 +8756,9 @@
int i, j;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+ int npmkids = cfg->pmk_list->pmkids.count;
+ ASSERT(cfg->pmk_list->pmkids.length >= (sizeof(u16)*2));
if (!pmk_list) {
WL_INFORM_MEM(("pmk_list is NULL\n"));
return -EINVAL;
@@ -7966,22 +8772,110 @@
return err;
}
- WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid));
- for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
+ WL_DBG(("No of elements %d\n", npmkids));
+ for (i = 0; i < npmkids; i++) {
WL_DBG(("PMKID[%d]: %pM =\n", i,
- &pmk_list->pmkids.pmkid[i].BSSID));
+ &pmk_list->pmkids.pmkid[i].bssid));
for (j = 0; j < WPA2_PMKID_LEN; j++) {
- WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]));
+ WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].pmkid[j]));
}
}
- if (likely(!err)) {
- err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
- sizeof(*pmk_list), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (cfg->wlc_ver.wlc_ver_major >= MIN_PMKID_LIST_V3_FW_MAJOR) {
+ pmk_list->pmkids.version = PMKID_LIST_VER_3;
+ err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
+ sizeof(*pmk_list), cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
}
+ else if (cfg->wlc_ver.wlc_ver_major == MIN_PMKID_LIST_V2_FW_MAJOR) {
+ u32 v2_list_size = (u32)(sizeof(pmkid_list_v2_t) + npmkids*sizeof(pmkid_v2_t));
+ pmkid_list_v2_t *pmkid_v2_list = (pmkid_list_v2_t *)MALLOCZ(cfg->osh, v2_list_size);
+ if (pmkid_v2_list == NULL) {
+ WL_ERR(("failed to allocate pmkid list\n"));
+ return BCME_NOMEM;
+ }
+
+ pmkid_v2_list->version = PMKID_LIST_VER_2;
+ /* Account for version, length and pmkid_v2_t fields */
+ pmkid_v2_list->length = (npmkids * sizeof(pmkid_v2_t)) + (2 * sizeof(u16));
+
+ for (i = 0; i < npmkids; i++) {
+ /* memcpy_s return checks not needed as buffers are of same size */
+ (void)memcpy_s(&pmkid_v2_list->pmkid[i].BSSID,
+ ETHER_ADDR_LEN, &pmk_list->pmkids.pmkid[i].bssid,
+ ETHER_ADDR_LEN);
+
+ /* copy pmkid if available */
+ if (pmk_list->pmkids.pmkid[i].pmkid_len) {
+ (void)memcpy_s(pmkid_v2_list->pmkid[i].PMKID,
+ WPA2_PMKID_LEN,
+ pmk_list->pmkids.pmkid[i].pmkid,
+ pmk_list->pmkids.pmkid[i].pmkid_len);
+ }
+
+ if (pmk_list->pmkids.pmkid[i].pmk_len) {
+ (void)memcpy_s(pmkid_v2_list->pmkid[i].pmk,
+ pmk_list->pmkids.pmkid[i].pmk_len,
+ pmk_list->pmkids.pmkid[i].pmk,
+ pmk_list->pmkids.pmkid[i].pmk_len);
+ pmkid_v2_list->pmkid[i].pmk_len = pmk_list->pmkids.pmkid[i].pmk_len;
+ }
+
+ if (pmk_list->pmkids.pmkid[i].ssid_len) {
+ (void)memcpy_s(pmkid_v2_list->pmkid[i].ssid.ssid,
+ pmk_list->pmkids.pmkid[i].ssid_len,
+ pmk_list->pmkids.pmkid[i].ssid,
+ pmk_list->pmkids.pmkid[i].ssid_len);
+ pmkid_v2_list->pmkid[i].ssid.ssid_len
+ = pmk_list->pmkids.pmkid[i].ssid_len;
+ }
+
+ (void)memcpy_s(pmkid_v2_list->pmkid[i].fils_cache_id,
+ FILS_CACHE_ID_LEN, &pmk_list->pmkids.pmkid[i].fils_cache_id,
+ FILS_CACHE_ID_LEN);
+ pmkid_v2_list->pmkid[i].length = PMKID_ELEM_V2_LENGTH;
+ }
+ err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmkid_v2_list,
+ v2_list_size, cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("pmkid_info failed (%d)\n", err));
+ }
+
+ MFREE(cfg->osh, pmkid_v2_list, v2_list_size);
+ }
+ else {
+ u32 v1_list_size = (u32)(sizeof(pmkid_list_v1_t) + npmkids*sizeof(pmkid_v1_t));
+ pmkid_list_v1_t *pmkid_v1_list = (pmkid_list_v1_t *)MALLOCZ(cfg->osh, v1_list_size);
+ if (pmkid_v1_list == NULL) {
+ WL_ERR(("failed to allocate pmkid list\n"));
+ return BCME_NOMEM;
+ }
+ for (i = 0; i < npmkids; i++) {
+ /* memcpy_s return checks not needed as buffers are of same size */
+ (void)memcpy_s(&pmkid_v1_list->pmkid[i].BSSID,
+ ETHER_ADDR_LEN, &pmk_list->pmkids.pmkid[i].bssid,
+ ETHER_ADDR_LEN);
+ (void)memcpy_s(pmkid_v1_list->pmkid[i].PMKID,
+ WPA2_PMKID_LEN, pmk_list->pmkids.pmkid[i].pmkid,
+ WPA2_PMKID_LEN);
+ pmkid_v1_list->npmkid++;
+ }
+ err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmkid_v1_list,
+ v1_list_size, cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("pmkid_info failed (%d)\n", err));
+ }
+
+ MFREE(cfg->osh, pmkid_v1_list, v1_list_size);
+ }
return err;
}
+/* TODO: remove temporal cfg->pmk_list list, and call wl_cfg80211_update_pmksa for single
+ * entry operation.
+ */
static s32
wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa)
@@ -7989,136 +8883,283 @@
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
int i;
+ int npmkids = cfg->pmk_list->pmkids.count;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
RETURN_EIO_IF_NOT_UP(cfg);
- for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
- if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
- ETHER_ADDR_LEN))
- break;
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(INSTALL_PMKSA), dhd_net2idx(dhdp->info, dev), 0);
+
+ for (i = 0; i < npmkids; i++) {
+ if (pmksa->bssid != NULL) {
+ if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].bssid,
+ ETHER_ADDR_LEN))
+ break;
+ }
+#ifdef WL_FILS
+ else if (pmksa->ssid != NULL) {
+ if (!memcmp(pmksa->ssid, &cfg->pmk_list->pmkids.pmkid[i].ssid,
+ pmksa->ssid_len))
+ break;
+ }
+#endif /* WL_FILS */
+ }
if (i < WL_NUM_PMKIDS_MAX) {
- memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
- ETHER_ADDR_LEN);
- memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
+ if (pmksa->bssid != NULL) {
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].bssid, pmksa->bssid,
+ ETHER_ADDR_LEN);
+ }
+#ifdef WL_FILS
+ else if (pmksa->ssid != NULL) {
+ cfg->pmk_list->pmkids.pmkid[i].ssid_len = pmksa->ssid_len;
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].ssid, pmksa->ssid,
+ pmksa->ssid_len);
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].fils_cache_id, pmksa->cache_id,
+ FILS_CACHE_ID_LEN);
+ }
+#endif /* WL_FILS */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) || defined(WL_FILS))
+ if (pmksa->pmk_len) {
+ if (memcpy_s(&cfg->pmk_list->pmkids.pmkid[i].pmk, PMK_LEN_MAX, pmksa->pmk,
+ pmksa->pmk_len)) {
+ WL_ERR(("invalid pmk len = %lu", pmksa->pmk_len));
+ } else {
+ cfg->pmk_list->pmkids.pmkid[i].pmk_len = pmksa->pmk_len;
+ }
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) || defined(WL_FILS) */
+ /* return check not required as buffer lengths are same */
+ (void)memcpy_s(cfg->pmk_list->pmkids.pmkid[i].pmkid, WPA2_PMKID_LEN, pmksa->pmkid,
WPA2_PMKID_LEN);
- if (i == cfg->pmk_list->pmkids.npmkid)
- cfg->pmk_list->pmkids.npmkid++;
+ cfg->pmk_list->pmkids.pmkid[i].pmkid_len = WPA2_PMKID_LEN;
+
+ /* set lifetime not to expire in firmware by default.
+ * Currently, wpa_supplicant control PMKID lifetime on his end. e.g) set 12 hours
+ * when it expired, wpa_supplicant should call set_pmksa/del_pmksa to update
+ * corresponding entry.
+ */
+ cfg->pmk_list->pmkids.pmkid[i].time_left = KEY_PERM_PMK;
+ if (i == npmkids) {
+ cfg->pmk_list->pmkids.length += sizeof(pmkid_v3_t);
+ cfg->pmk_list->pmkids.count++;
+ }
} else {
err = -EINVAL;
}
- WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
- &cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].BSSID));
+
+#if (WL_DBG_LEVEL > 0)
+ if (pmksa->bssid != NULL) {
+ WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+ &cfg->pmk_list->pmkids.pmkid[npmkids - 1].bssid));
+ }
for (i = 0; i < WPA2_PMKID_LEN; i++) {
WL_DBG(("%02x\n",
- cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].
- PMKID[i]));
+ cfg->pmk_list->pmkids.pmkid[npmkids - 1].
+ pmkid[i]));
}
+#endif /* (WL_DBG_LEVEL > 0) */
err = wl_update_pmklist(dev, cfg->pmk_list, err);
return err;
}
+/* sending pmkid_info IOVAR to manipulate PMKID(PMKSA) list in firmware.
+ * input @pmksa: host given single pmksa info.
+ * if it's NULL, assume whole list manipulated. e.g) flush all PMKIDs in firmware.
+ * input @set: TRUE means adding PMKSA operation. FALSE means deleting.
+ * return: log internal BCME_XXX error, and convert it to -EINVAL to linux generic error code.
+ */
+static s32 wl_cfg80211_update_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa, bool set) {
+
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 err = 0;
+ pmkid_list_v3_t *pmk_list;
+ uint32 alloc_len;
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+ if (cfg->wlc_ver.wlc_ver_major < MIN_PMKID_LIST_V3_FW_MAJOR) {
+ WL_ERR(("wlc_ver_major not supported:%d\n", cfg->wlc_ver.wlc_ver_major));
+ return BCME_VERSION;
+ }
+
+ alloc_len = OFFSETOF(pmkid_list_v3_t, pmkid) + ((pmksa) ? sizeof(pmkid_v3_t) : 0);
+ pmk_list = (pmkid_list_v3_t *)MALLOCZ(cfg->osh, alloc_len);
+
+ if (pmk_list == NULL) {
+ return BCME_NOMEM;
+ }
+
+ pmk_list->version = PMKID_LIST_VER_3;
+ pmk_list->length = alloc_len;
+ pmk_list->count = (pmksa) ? 1 : 0; // 1 means single entry operation, 0 means whole list.
+
+ /* controll set/del action by lifetime parameter accordingly.
+ * if set == TRUE, it's set PMKID action with lifetime permanent.
+ * if set == FALSE, it's del PMKID action with lifetime zero.
+ */
+ pmk_list->pmkid->time_left = (set) ? KEY_PERM_PMK : 0;
+
+ if (pmksa) {
+ if (pmksa->bssid) {
+ err = memcpy_s(&pmk_list->pmkid->bssid, sizeof(pmk_list->pmkid->bssid),
+ pmksa->bssid, ETHER_ADDR_LEN);
+ if (err) {
+ goto exit;
+ }
+ }
+ if (pmksa->pmkid) {
+ err = memcpy_s(&pmk_list->pmkid->pmkid, sizeof(pmk_list->pmkid->pmkid),
+ pmksa->pmkid, WPA2_PMKID_LEN);
+ if (err) {
+ goto exit;
+ }
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ if (pmksa->pmk) {
+ err = memcpy_s(&pmk_list->pmkid->pmk, sizeof(pmk_list->pmkid->pmk),
+ pmksa->pmk, pmksa->pmk_len);
+ if (err) {
+ goto exit;
+ }
+ pmk_list->pmkid->pmk_len = pmksa->pmk_len;
+ }
+ if (pmksa->ssid) {
+ err = memcpy_s(&pmk_list->pmkid->ssid, sizeof(pmk_list->pmkid->ssid),
+ pmksa->ssid, pmksa->ssid_len);
+ if (err) {
+ goto exit;
+ }
+ pmk_list->pmkid->ssid_len = pmksa->ssid_len;
+ }
+ if (pmksa->cache_id) {
+ pmk_list->pmkid->fils_cache_id = *pmksa->cache_id;
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
+ }
+ err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
+ alloc_len, cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+exit:
+ if (pmk_list) {
+ MFREE(cfg->osh, pmk_list, alloc_len);
+ }
+ return err;
+}
+
+/* TODO: remove temporal cfg->pmk_list list, and call wl_cfg80211_update_pmksa for single
+ * entry operation.
+ */
static s32
wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa)
{
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
-
- pmkid_list_t pmkid = {.npmkid = 0};
s32 err = 0;
int i;
-
+ int npmkids = cfg->pmk_list->pmkids.count;
RETURN_EIO_IF_NOT_UP(cfg);
- memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN);
- memcpy(pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN);
- WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
- &pmkid.pmkid[0].BSSID));
- for (i = 0; i < WPA2_PMKID_LEN; i++) {
- WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i]));
+ if (!pmksa) {
+ WL_ERR(("pmksa is not initialized\n"));
+ return BCME_ERROR;
+ }
+ if (!npmkids) {
+ /* nmpkids = 0, nothing to delete */
+ WL_DBG(("npmkids=0. Skip del\n"));
+ return BCME_OK;
}
- for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
- if (!memcmp
- (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
- ETHER_ADDR_LEN))
- break;
-
- if ((cfg->pmk_list->pmkids.npmkid > 0) &&
- (i < cfg->pmk_list->pmkids.npmkid)) {
- memset(&cfg->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t));
- for (; i < (cfg->pmk_list->pmkids.npmkid - 1); i++) {
- memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID,
- &cfg->pmk_list->pmkids.pmkid[i + 1].BSSID,
- ETHER_ADDR_LEN);
- memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID,
- &cfg->pmk_list->pmkids.pmkid[i + 1].PMKID,
- WPA2_PMKID_LEN);
+#if (WL_DBG_LEVEL > 0)
+ if (pmksa->bssid) {
+ WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+ pmksa->bssid));
+ }
+#ifdef WL_FILS
+ else if (pmksa->ssid) {
+ WL_DBG(("FILS: del_pmksa for ssid: "));
+ for (i = 0; i < pmksa->ssid_len; i++) {
+ WL_DBG(("%c", pmksa->ssid[i]));
}
- cfg->pmk_list->pmkids.npmkid--;
+ WL_DBG(("\n"));
+ }
+#endif /* WL_FILS */
+ if (pmksa->pmkid) {
+ for (i = 0; i < WPA2_PMKID_LEN; i++) {
+ WL_DBG(("%02x\n", pmksa->pmkid[i]));
+ }
+ }
+#endif /* (WL_DBG_LEVEL > 0) */
+
+ for (i = 0; i < npmkids; i++) {
+ if (pmksa->bssid) {
+ if (!memcmp
+ (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].bssid,
+ ETHER_ADDR_LEN)) {
+ break;
+ }
+ }
+#ifdef WL_FILS
+ else if (pmksa->ssid) {
+ if (!memcmp
+ (pmksa->ssid, &cfg->pmk_list->pmkids.pmkid[i].ssid,
+ pmksa->ssid_len)) {
+ break;
+ }
+ }
+#endif /* WL_FILS */
+ }
+ if ((npmkids > 0) && (i < npmkids)) {
+ bzero(&cfg->pmk_list->pmkids.pmkid[i], sizeof(pmkid_v3_t));
+ for (; i < (npmkids - 1); i++) {
+ (void)memcpy_s(&cfg->pmk_list->pmkids.pmkid[i],
+ sizeof(pmkid_v3_t),
+ &cfg->pmk_list->pmkids.pmkid[i + 1],
+ sizeof(pmkid_v3_t));
+ }
+ npmkids--;
+ cfg->pmk_list->pmkids.length -= sizeof(pmkid_v3_t);
+ cfg->pmk_list->pmkids.count--;
+
} else {
err = -EINVAL;
}
+ /* current wl_update_pmklist() doesn't delete corresponding PMKID entry.
+ * inside firmware. So we need to issue delete action explicitely through
+ * this function.
+ */
+ err = wl_cfg80211_update_pmksa(wiphy, dev, pmksa, FALSE);
+ /* intentional fall through even on error.
+ * it should work above MIN_PMKID_LIST_V3_FW_MAJOR, otherwise let ignore it.
+ */
+
err = wl_update_pmklist(dev, cfg->pmk_list, err);
return err;
}
+/* TODO: remove temporal cfg->pmk_list list, and call wl_cfg80211_update_pmksa for single
+ * entry operation.
+ */
static s32
wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
{
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
RETURN_EIO_IF_NOT_UP(cfg);
- memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
+ bzero(cfg->pmk_list, sizeof(*cfg->pmk_list));
+ cfg->pmk_list->pmkids.length = OFFSETOF(pmkid_list_v3_t, pmkid);
+ cfg->pmk_list->pmkids.count = 0;
+ cfg->pmk_list->pmkids.version = PMKID_LIST_VER_3;
err = wl_update_pmklist(dev, cfg->pmk_list, err);
return err;
}
-static wl_scan_params_t *
-wl_cfg80211_scan_alloc_params(struct bcm_cfg80211 *cfg, int channel, int nprobes,
- int *out_params_size)
-{
- wl_scan_params_t *params;
- int params_size;
- int num_chans;
-
- *out_params_size = 0;
-
- /* Our scan params only need space for 1 channel and 0 ssids */
- params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
- params = (wl_scan_params_t *)MALLOCZ(cfg->osh, params_size);
- if (params == NULL) {
- WL_ERR(("mem alloc failed (%d bytes)\n", params_size));
- return params;
- }
- memset(params, 0, params_size);
- params->nprobes = nprobes;
-
- num_chans = (channel == 0) ? 0 : 1;
-
- memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN);
- params->bss_type = DOT11_BSSTYPE_ANY;
- params->scan_type = DOT11_SCANTYPE_ACTIVE;
- params->nprobes = htod32(1);
- params->active_time = htod32(-1);
- params->passive_time = htod32(-1);
- params->home_time = htod32(10);
- if (channel == -1)
- params->channel_list[0] = htodchanspec(channel);
- else
- params->channel_list[0] = wl_ch_host_to_driver(channel);
-
- /* Our scan params have 1 channel and 0 ssids */
- params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
- (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
-
- *out_params_size = params_size; /* rtn size to the caller */
- return params;
-}
-
#if defined(WL_CFG80211_P2P_DEV_IF)
static s32
wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
@@ -8137,20 +9178,24 @@
struct ether_addr primary_mac;
struct net_device *ndev = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev_dpp_listen = NULL;
+ RETURN_EIO_IF_NOT_UP(cfg);
#ifdef DHD_IFDEBUG
PRINT_WDEV_INFO(cfgdev);
#endif /* DHD_IFDEBUG */
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-#ifdef WL_NAN
- if (wl_cfgnan_check_state(cfg)) {
- WL_ERR(("nan is enabled, nan + p2p concurrency not supported\n"));
- return BCME_UNSUPPORTED;
- }
-#endif /* WL_NAN */
+ /*
+ * dpp listen request will arrive on primary interface
+ * If so, mark dpp listen flag on this interface
+ */
+ ndev_dpp_listen = cfgdev_to_ndev(cfgdev);
+ if (ndev_dpp_listen)
+ wl_set_dpp_listen_by_netdev(cfg, ndev_dpp_listen, 1);
+ mutex_lock(&cfg->usr_sync);
WL_DBG(("Enter, channel: %d, duration ms (%d) SCANNING ?? %s \n",
ieee80211_frequency_to_channel(channel->center_freq),
duration, (wl_get_drv_status(cfg, SCANNING, ndev)) ? "YES":"NO"));
@@ -8164,13 +9209,14 @@
#ifdef P2P_LISTEN_OFFLOADING
if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
WL_ERR(("P2P_FIND: Discovery offload is in progress\n"));
- return -EAGAIN;
+ err = -EAGAIN;
+ goto exit;
}
#endif /* P2P_LISTEN_OFFLOADING */
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
if (wl_get_drv_status_all(cfg, SCANNING)) {
- wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
}
#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
@@ -8210,6 +9256,10 @@
}
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+#ifdef WL_BCNRECV
+ /* check fakeapscan in progress then abort */
+ wl_android_bcnrecv_stop(ndev, WL_BCNRECV_LISTENBUSY);
+#endif /* WL_BCNRECV */
#ifdef WL_CFG80211_SYNC_GON
if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
/* do not enter listen mode again if we are in listen mode already for next af.
@@ -8276,6 +9326,7 @@
} else {
WL_ERR(("Fail to Set (err=%d cookie:%llu)\n", err, *cookie));
}
+ mutex_unlock(&cfg->usr_sync);
return err;
}
@@ -8318,8 +9369,9 @@
wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
} else {
- WL_ERR(("%s : ignore, request cookie(%llu) is not matched. (cur : %llu)\n",
- __FUNCTION__, cookie, cfg->last_roc_id));
+ WL_ERR(("wl_cfg80211_cancel_remain_on_channel: ignore, request cookie(%llu)"
+ " is not matched. (cur : %llu)\n",
+ cookie, cfg->last_roc_id));
}
return err;
@@ -8433,6 +9485,60 @@
bool search_channel; /* 1: search peer's channel to send af */
};
+#ifdef WL_DISABLE_HE_P2P
+static s32
+wl_cfg80211_he_p2p_disable(struct wiphy *wiphy, struct ether_addr peer_mac)
+{
+ struct cfg80211_bss *bss;
+ u8 *ie = NULL;
+ u32 ie_len = 0;
+ struct net_device *ndev = NULL;
+ s32 bssidx = 0;
+ s32 err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ bss = CFG80211_GET_BSS(wiphy, NULL, peer_mac.octet, NULL, 0);
+ if (!bss) {
+ WL_ERR(("Could not find the Peer device\n"));
+ return BCME_ERROR;
+ } else {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ ie = (u8 *)bss->ies->data;
+ ie_len = bss->ies->len;
+#else
+ ie = bss->information_elements;
+ ie_len = bss->len_information_elements;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ GCC_DIAGNOSTIC_POP();
+ }
+ if (ie) {
+ if ((bcm_parse_tlvs_dot11(ie, ie_len,
+ EXT_MNG_HE_CAP_ID, TRUE)) == NULL) {
+ WL_DBG(("Peer does not support HE capability\n"));
+ ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1);
+ if (ndev && (bssidx =
+ wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ err = BCME_ERROR;
+ } else {
+ WL_DBG(("Disabling HE for P2P\n"));
+ err = wl_cfg80211_set_he_mode(ndev, cfg, bssidx,
+ WL_IF_TYPE_P2P_DISC, FALSE);
+ if (err < 0) {
+ WL_ERR(("failed to set he features, error=%d\n", err));
+ }
+ }
+ } else {
+ WL_DBG(("Peer supports HE capability\n"));
+ }
+ }
+ CFG80211_PUT_BSS(wiphy, bss);
+
+ return err;
+}
+#endif /* WL_DISABLE_HE_P2P */
+
static s32
wl_cfg80211_config_p2p_pub_af_tx(struct wiphy *wiphy,
wl_action_frame_t *action_frame, wl_af_params_t *af_params,
@@ -8456,6 +9562,10 @@
switch (act_frm->subtype) {
case P2P_PAF_GON_REQ: {
+ /* Disable he if peer does not support before starting GONEG */
+#ifdef WL_DISABLE_HE_P2P
+ wl_cfg80211_he_p2p_disable(wiphy, action_frame->da);
+#endif /* WL_DISABLE_HE_P2P */
WL_DBG(("P2P: GO_NEG_PHASE status set \n"));
wl_set_p2p_status(cfg, GO_NEG_PHASE);
@@ -8740,7 +9850,7 @@
/* if scan is ongoing, abort current scan. */
if (wl_get_drv_status_all(cfg, SCANNING)) {
- wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
}
/* Abort P2P listen */
@@ -8807,7 +9917,7 @@
* but after the check of piggyback algorithm.
* To take care of current piggback algo, lets abort the scan here itself.
*/
- wl_notify_escan_complete(cfg, dev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
/* Suspend P2P discovery's search-listen to prevent it from
* starting a scan or changing the channel.
*/
@@ -8959,6 +10069,13 @@
u32 id;
bool ack = false;
s8 eabuf[ETHER_ADDR_STR_LEN];
+#ifdef WL_SAE
+ struct net_info *netinfo = NULL;
+ struct wl_mf_params *mf_params;
+ u32 mf_params_len = 0;
+ s32 timeout = 0;
+ s32 chan_nr;
+#endif // endif
WL_DBG(("Enter \n"));
@@ -9022,8 +10139,8 @@
#if defined(P2P_IE_MISSING_FIX)
if (!cfg->p2p_prb_noti) {
cfg->p2p_prb_noti = true;
- WL_DBG(("%s: TX 802_1X Probe Response first time.\n",
- __FUNCTION__));
+ WL_DBG(("wl_cfg80211_mgmt_tx: TX 802_1X Probe"
+ " Response first time.\n"));
}
#endif // endif
goto exit;
@@ -9071,10 +10188,78 @@
* And previous off-channel action frame must be ended before new af tx.
*/
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- wl_notify_escan_complete(cfg, dev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
- }
+#ifdef WL_SAE
+ } else if (ieee80211_is_auth(mgmt->frame_control)) {
+ netinfo = wl_get_netinfo_by_wdev(cfg, cfgdev_to_wdev(cfgdev));
+ reinit_completion(&netinfo->mgmt_tx_cpl);
+ clear_bit(MGMT_TX_ACK, &netinfo->mgmt_txstatus);
+ clear_bit(MGMT_TX_NOACK, &netinfo->mgmt_txstatus);
+ clear_bit(MGMT_TX_OFF_CHAN_COMPLETED,
+ &netinfo->mgmt_txstatus);
+ mf_params_len = offsetof(struct wl_mf_params, data) +
+ (len - DOT11_MGMT_HDR_LEN);
+ mf_params = (wl_mf_params_t *)MALLOCZ(cfg->osh, mf_params_len);
+ if (!mf_params) {
+ WL_ERR(("Insufficient memory to allocate auth frame\n"));
+ err = -ENOMEM;
+ goto exit;
+ }
+ mf_params->dwell_time = MGMT_AUTH_FRAME_DWELL_TIME;
+ mf_params->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN);
+ mf_params->fc = mgmt->frame_control;
+
+ /* update channel */
+ if (!channel) {
+ mf_params->channel = 0;
+ } else {
+ chan_nr = ieee80211_frequency_to_channel(channel->center_freq);
+ mf_params->channel = cpu_to_le32(chan_nr);
+ }
+
+ memcpy(&mf_params->da.octet, &mgmt->da[0], ETH_ALEN);
+ memcpy(&mf_params->bssid.octet, &mgmt->bssid[0], ETH_ALEN);
+ *cookie = (u64)mf_params->data;
+ mf_params->packetId = cpu_to_le32(*cookie);
+
+ memcpy(mf_params->data, &buf[DOT11_MGMT_HDR_LEN],
+ le16_to_cpu(mf_params->len));
+
+ WL_DBG(("Auth frame, cookie=%lld, fc=%x, len=%d, channel=%d\n",
+ *cookie, mf_params->fc,
+ le16_to_cpu(mf_params->len),
+ mf_params->channel));
+
+ netinfo->mgmt_txid = mf_params->packetId;
+ set_bit(MGMT_TX_SEND_FRAME, &netinfo->mgmt_txstatus);
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "mgmt_frame", mf_params, mf_params_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("Failed to send auth frame %d\n", err));
+ ack = false;
+ goto txstatus;
+ }
+
+ timeout = wait_for_completion_timeout(&netinfo->mgmt_tx_cpl,
+ MGMT_AUTH_FRAME_WAIT_TIME);
+ if ((timeout > 0) || test_bit(MGMT_TX_ACK, &netinfo->mgmt_txstatus)) {
+ WL_DBG(("TX auth frame operation is success\n"));
+ ack = true;
+ } else {
+ ack = false;
+ WL_ERR(("TX auth frame operation has failed, txstatus %ld\n",
+ netinfo->mgmt_txstatus));
+ }
+txstatus:
+ cfg80211_mgmt_tx_status(cfgdev_to_wdev(cfgdev), *cookie,
+ buf, len, ack, GFP_KERNEL);
+ MFREE(cfg->osh, mf_params, mf_params_len);
+ goto exit;
+#endif /* WL_SAE */
+ }
} else {
WL_ERR(("Driver only allows MGMT packet type\n"));
goto exit;
@@ -9145,10 +10330,7 @@
{
s32 err = 0;
s32 ap_isolate = 0;
-#ifdef PCIE_FULL_DONGLE
s32 ifidx = DHD_BAD_IF;
-#endif // endif
-#if defined(PCIE_FULL_DONGLE)
dhd_pub_t *dhd;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
dhd = (dhd_pub_t *)(cfg->pub);
@@ -9156,7 +10338,6 @@
if (cfg->p2p_net == dev)
dev = bcmcfg_to_prmry_ndev(cfg);
#endif // endif
-#endif // endif
if (params->use_cts_prot >= 0) {
}
@@ -9172,7 +10353,6 @@
if (params->ap_isolate >= 0) {
ap_isolate = params->ap_isolate;
-#ifdef PCIE_FULL_DONGLE
ifidx = dhd_net2idx(dhd->info, dev);
if (ifidx != DHD_BAD_IF) {
@@ -9180,13 +10360,16 @@
} else {
WL_ERR(("Failed to set ap_isolate\n"));
}
-#else
- err = wldev_iovar_setint(dev, "ap_isolate", ap_isolate);
+#ifdef BCMSDIO
+ /* Onus of intra-BSS packet forwarding moved to DHD.
+ * DHD will handle packet intra-bss packet forwarding.
+ */
+ err = wldev_iovar_setint(dev, "ap_isolate", AP_ISOLATE_SENDUP_ALL);
if (unlikely(err))
{
WL_ERR(("set ap_isolate Error (%d)\n", err));
}
-#endif /* PCIE_FULL_DONGLE */
+#endif /* BCMSDIO */
}
if (params->ht_opmode >= 0) {
@@ -9195,6 +10378,71 @@
return err;
}
+static int
+wl_get_bandwidth_cap(struct net_device *ndev, uint32 band, uint32 *bandwidth)
+{
+ u32 bw = WL_CHANSPEC_BW_20;
+ s32 err = BCME_OK;
+ s32 bw_cap = 0;
+ struct {
+ u32 band;
+ u32 bw_cap;
+ } param = {0, 0};
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ u32 channel_width = 0;
+ struct wireless_dev *wdev = ndev_to_wdev(ndev);
+ struct wiphy *wiphy = wdev->wiphy;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ param.band = WLC_BAND_5G;
+ channel_width = wl_get_chanwidth_by_netdev(cfg, ndev);
+ switch (channel_width) {
+ case WL_CHANSPEC_BW_80:
+ case WL_CHANSPEC_BW_40:
+ case WL_CHANSPEC_BW_20:
+ bw = channel_width;
+ /* resetting user specified channel width */
+ wl_set_chanwidth_by_netdev(cfg, ndev, 0);
+ break;
+ default:
+ err = wldev_iovar_getbuf(ndev, "bw_cap", ¶m, sizeof(param),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (err) {
+ if (err != BCME_UNSUPPORTED) {
+ WL_ERR(("bw_cap failed, %d\n", err));
+ return err;
+ } else {
+ err = wldev_iovar_getint(ndev, "mimo_bw_cap",
+ &bw_cap);
+ if (err) {
+ WL_ERR(("error get mimo_bw_cap (%d)\n",
+ err));
+ }
+ if (bw_cap != WLC_N_BW_20ALL) {
+ bw = WL_CHANSPEC_BW_40;
+ }
+ }
+ } else {
+ if (WL_BW_CAP_80MHZ(ioctl_buf[0])) {
+ bw = WL_CHANSPEC_BW_80;
+ } else if (WL_BW_CAP_40MHZ(ioctl_buf[0])) {
+ bw = WL_CHANSPEC_BW_40;
+ } else {
+ bw = WL_CHANSPEC_BW_20;
+ }
+ }
+ break;
+ }
+ } else if (band == IEEE80211_BAND_2GHZ) {
+ bw = WL_CHANSPEC_BW_20;
+ }
+
+ *bandwidth = bw;
+
+ return err;
+}
+
static s32
wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
@@ -9204,14 +10452,7 @@
chanspec_t chspec = 0;
chanspec_t fw_chspec = 0;
u32 bw = WL_CHANSPEC_BW_20;
-
s32 err = BCME_OK;
- s32 bw_cap = 0;
- struct {
- u32 band;
- u32 bw_cap;
- } param = {0, 0};
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
#if defined(CUSTOM_SET_CPUCORE) || defined(APSTA_RESTRICTED_CHANNEL)
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
@@ -9223,8 +10464,6 @@
dev->ifindex, channel_type, _chan));
#if defined(APSTA_RESTRICTED_CHANNEL)
-#define DEFAULT_2G_SOFTAP_CHANNEL 1
-#define DEFAULT_5G_SOFTAP_CHANNEL 149
if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP &&
DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) &&
wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) {
@@ -9244,38 +10483,14 @@
}
}
}
-#undef DEFAULT_2G_SOFTAP_CHANNEL
-#undef DEFAULT_5G_SOFTAP_CHANNEL
#endif /* APSTA_RESTRICTED_CHANNEL */
- if (chan->band == IEEE80211_BAND_5GHZ) {
- param.band = WLC_BAND_5G;
- err = wldev_iovar_getbuf(dev, "bw_cap", ¶m, sizeof(param),
- ioctl_buf, sizeof(ioctl_buf), NULL);
- if (err) {
- if (err != BCME_UNSUPPORTED) {
- WL_ERR(("bw_cap failed, %d\n", err));
- return err;
- } else {
- err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
- if (err) {
- WL_ERR(("error get mimo_bw_cap (%d)\n", err));
- }
- if (bw_cap != WLC_N_BW_20ALL)
- bw = WL_CHANSPEC_BW_40;
- }
- } else {
- if (WL_BW_CAP_80MHZ(ioctl_buf[0]))
- bw = WL_CHANSPEC_BW_80;
- else if (WL_BW_CAP_40MHZ(ioctl_buf[0]))
- bw = WL_CHANSPEC_BW_40;
- else
- bw = WL_CHANSPEC_BW_20;
+ err = wl_get_bandwidth_cap(dev, chan->band, &bw);
+ if (err < 0) {
+ WL_ERR(("Failed to get bandwidth information, err=%d\n", err));
+ return err;
+ }
- }
-
- } else if (chan->band == IEEE80211_BAND_2GHZ)
- bw = WL_CHANSPEC_BW_20;
set_channel:
chspec = wf_channel2chspec(_chan, bw);
if (wf_chspec_valid(chspec)) {
@@ -9358,22 +10573,50 @@
wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg)
{
struct net_info *_net_info, *next;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (_net_info->ndev &&
test_bit(WL_STATUS_REMAINING_ON_CHANNEL, &_net_info->sme_state))
return _net_info->ndev;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+
return NULL;
}
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+#ifdef BCMWAPI_WPI
+static s32
+wl_validate_wapisecurity(struct net_device *dev, s32 bssidx)
+{
+ s32 err = BCME_OK;
+
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", 0, bssidx);
+ if (err < 0) {
+ WL_ERR(("WAPI auth error %d\n", err));
+ return BCME_ERROR;
+ }
+
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", SMS4_ENABLED, bssidx);
+ if (err < 0) {
+ WL_ERR(("WAPI wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+
+ /* set upper-layer auth */
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", WAPI_AUTH_PSK, bssidx);
+ if (err < 0) {
+ WL_ERR(("WAPI wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+ return 0;
+}
+#endif /* BCMWAPI_WPI */
+#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+
static s32
wl_validate_opensecurity(struct net_device *dev, s32 bssidx, bool privacy)
{
@@ -9415,22 +10658,60 @@
return 0;
}
+#define MAX_FILS_IND_IE_LEN 1024u
static s32
wl_validate_fils_ind_ie(struct net_device *dev, const bcm_tlv_t *filsindie, s32 bssidx)
{
s32 err = BCME_OK;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct bcm_cfg80211 *cfg = NULL;
+ bcm_iov_buf_t *iov_buf = NULL;
+ bcm_xtlv_t* pxtlv;
+ int iov_buf_size = 0;
- if (filsindie == NULL)
- return err;
+ if (!dev || !filsindie) {
+ WL_ERR(("%s: dev/filsidie is null\n", __FUNCTION__));
+ goto exit;
+ }
- err = wldev_iovar_setbuf_bsscfg(dev, "fils_ind", (const void *)filsindie->data,
- filsindie->len, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &cfg->ioctl_buf_sync);
+ cfg = wl_get_cfg(dev);
+ if (!cfg) {
+ WL_ERR(("%s: cfg is null\n", __FUNCTION__));
+ goto exit;
+ }
+
+ iov_buf_size = sizeof(bcm_iov_buf_t) + sizeof(bcm_xtlv_t) + filsindie->len - 1;
+ iov_buf = MALLOCZ(cfg->osh, iov_buf_size);
+ if (!iov_buf) {
+ WL_ERR(("%s: iov_buf alloc failed! %d bytes\n", __FUNCTION__, iov_buf_size));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ iov_buf->version = WL_FILS_IOV_VERSION;
+ iov_buf->id = WL_FILS_CMD_ADD_IND_IE;
+ iov_buf->len = sizeof(bcm_xtlv_t) + filsindie->len - 1;
+ pxtlv = (bcm_xtlv_t*)&iov_buf->data[0];
+ pxtlv->id = WL_FILS_XTLV_IND_IE;
+ pxtlv->len = filsindie->len;
+ /* memcpy_s return check not required as buffer is allocated based on ie
+ * len
+ */
+ (void)memcpy_s(pxtlv->data, filsindie->len, filsindie->data, filsindie->len);
+
+ err = wldev_iovar_setbuf(dev, "fils", iov_buf, iov_buf_size,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("fils indication ioctl error (%d)\n", err));
+ goto exit;
+ }
+
+exit:
if (err < 0) {
WL_ERR(("FILS Ind setting error %d\n", err));
}
+ if (iov_buf) {
+ MFREE(cfg->osh, iov_buf, iov_buf_size);
+ }
return err;
}
@@ -9525,6 +10806,7 @@
mgmt = (const wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
suite_count = cnt = ltoh16_ua(&mgmt->count);
while (cnt--) {
+ if (!bcmp(mgmt->list[cnt].oui, WPA2_OUI, WPA2_OUI_LEN)) {
switch (mgmt->list[cnt].type) {
case RSN_AKM_NONE:
wpa_auth |= WPA_AUTH_NONE;
@@ -9548,10 +10830,17 @@
case RSN_AKM_FILS_SHA384:
wpa_auth |= WPA2_AUTH_FILS_SHA384;
break;
+#ifdef WL_SAE
+ case RSN_AKM_SAE_PSK:
+ wpa_auth |= WPA3_AUTH_SAE_PSK;
+ break;
+#endif /* WL_SAE */
#endif /* MFP */
default:
WL_ERR(("No Key Mgmt Info\n"));
}
+ } else if (!bcmp(mgmt->list[cnt].oui, WFA_OUI, WFA_OUI_LEN))
+ wpa_auth |= WFA_AUTH_DPP;
}
if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
@@ -9847,6 +11136,11 @@
ret = WPA_AUTH_PSK;
}
break;
+#ifdef WL_SAE
+ case RSN_AKM_SAE_PSK:
+ ret = WPA3_AUTH_SAE_PSK;
+ break;
+#endif /* WL_SAE */
default:
WL_ERR(("No Key Mgmt Info\n"));
}
@@ -10095,7 +11389,6 @@
if (bss->fils_ind_ie) {
MFREE(cfg->osh, bss->fils_ind_ie, bss->fils_ind_ie[1]
+ FILS_INDICATION_IE_TAG_FIXED_LEN);
- bss->fils_ind_ie = NULL;
}
if (ies->wpa_ie != NULL) {
/* WPAIE */
@@ -10120,7 +11413,7 @@
+ WPA_RSN_IE_TAG_FIXED_LEN);
}
}
-#ifdef FILS_SUPPORT
+#ifdef WL_FILS
if (ies->fils_ind_ie) {
bss->fils_ind_ie = MALLOCZ(cfg->osh,
ies->fils_ind_ie->len
@@ -10131,7 +11424,7 @@
+ FILS_INDICATION_IE_TAG_FIXED_LEN);
}
}
-#endif // endif
+#endif /* WL_FILS */
#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
}
#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
@@ -10185,23 +11478,17 @@
WL_DBG(("SSID (%s) len:%zd \n", info->ssid, info->ssid_len));
if (dev_role == NL80211_IFTYPE_AP) {
/* Store the hostapd SSID */
- memset(cfg->hostapd_ssid.SSID, 0x00, DOT11_MAX_SSID_LEN);
+ bzero(cfg->hostapd_ssid.SSID, DOT11_MAX_SSID_LEN);
memcpy(cfg->hostapd_ssid.SSID, info->ssid, info->ssid_len);
- cfg->hostapd_ssid.SSID_len = info->ssid_len;
+ cfg->hostapd_ssid.SSID_len = (uint32)info->ssid_len;
} else {
/* P2P GO */
- memset(cfg->p2p->ssid.SSID, 0x00, DOT11_MAX_SSID_LEN);
+ bzero(cfg->p2p->ssid.SSID, DOT11_MAX_SSID_LEN);
memcpy(cfg->p2p->ssid.SSID, info->ssid, info->ssid_len);
- cfg->p2p->ssid.SSID_len = info->ssid_len;
+ cfg->p2p->ssid.SSID_len = (uint32)info->ssid_len;
}
}
- if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) {
- if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0)
- WL_ERR(("failed to set hidden : %d\n", err));
- WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid));
- }
-
return err;
}
#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
@@ -10211,7 +11498,7 @@
{
s32 err = BCME_OK;
- memset(ies, 0, sizeof(struct parsed_ies));
+ bzero(ies, sizeof(struct parsed_ies));
/* find the WPSIE */
if ((ies->wps_ie = wl_cfgp2p_find_wpsie(ptr, len)) != NULL) {
@@ -10251,16 +11538,11 @@
{
s32 err = BCME_OK;
s32 infra = 1;
- s32 ap = 1;
+ s32 ap = 0;
s32 pm;
- s32 is_rsdb_supported = BCME_ERROR;
s32 bssidx;
s32 apsta = 0;
- is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
- if (is_rsdb_supported < 0)
- return (-ENODEV);
-
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return -EINVAL;
@@ -10268,40 +11550,60 @@
WL_INFORM_MEM(("[%s] Bringup SoftAP on bssidx:%d \n", dev->name, bssidx));
- /* AP on primary Interface */
- if (bssidx == 0) {
- if (is_rsdb_supported) {
- if ((err = wl_cfg80211_add_del_bss(cfg, dev, bssidx,
- WL_IF_TYPE_AP, 0, NULL)) < 0) {
- WL_ERR(("wl add_del_bss returned error:%d\n", err));
+ if ((err = wl_cfg80211_add_del_bss(cfg, dev, bssidx,
+ WL_IF_TYPE_AP, 0, NULL)) < 0) {
+ WL_ERR(("wl add_del_bss returned error:%d\n", err));
+ return err;
+ }
+
+ /*
+ * For older chips, "bss" iovar does not support
+ * bsscfg role change/upgradation, and still
+ * return BCME_OK on attempt
+ * Hence, below traditional way to handle the same
+ */
+
+ if ((err = wldev_ioctl_get(dev,
+ WLC_GET_AP, &ap, sizeof(s32))) < 0) {
+ WL_ERR(("Getting AP mode failed %d \n", err));
+ return err;
+ }
+
+ if (!ap) {
+ /* AP mode switch not supported. Try setting up AP explicitly */
+ err = wldev_iovar_getint(dev, "apsta", (s32 *)&apsta);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get apsta %d\n", err));
+ return err;
+ }
+ if (apsta == 0) {
+ /* If apsta is not set, set it */
+
+ /* Check for any connected interfaces before wl down */
+ if (wl_get_drv_status_all(cfg, CONNECTED) > 0) {
+ WL_ERR(("Concurrent i/f operational. can't do wl down"));
+ return BCME_ERROR;
+ }
+ err = wldev_ioctl_set(dev, WLC_DOWN, &ap, sizeof(s32));
+ if (err < 0) {
+ WL_ERR(("WLC_DOWN error %d\n", err));
return err;
}
- } else if (is_rsdb_supported == 0) {
- /* AP mode switch not supported. Try setting up AP explicitly */
- err = wldev_iovar_getint(dev, "apsta", (s32 *)&apsta);
- if (unlikely(err)) {
- WL_ERR(("Could not get apsta %d\n", err));
+ err = wldev_iovar_setint(dev, "apsta", 1);
+ if (err < 0) {
+ WL_ERR(("wl apsta 0 error %d\n", err));
+ return err;
}
- if (apsta == 0) {
- /* If apsta is not set, set it */
- err = wldev_ioctl_set(dev, WLC_DOWN, &ap, sizeof(s32));
- if (err < 0) {
- WL_ERR(("WLC_DOWN error %d\n", err));
- return err;
- }
- err = wldev_iovar_setint(dev, "apsta", 0);
- if (err < 0) {
- WL_ERR(("wl apsta 0 error %d\n", err));
- return err;
- }
- if ((err = wldev_ioctl_set(dev,
+ ap = 1;
+ if ((err = wldev_ioctl_set(dev,
WLC_SET_AP, &ap, sizeof(s32))) < 0) {
- WL_ERR(("setting AP mode failed %d \n", err));
- return err;
- }
+ WL_ERR(("setting AP mode failed %d \n", err));
+ return err;
}
}
+ }
+ if (bssidx == 0) {
pm = 0;
if ((err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm))) != 0) {
WL_ERR(("wl PM 0 returned error:%d\n", err));
@@ -10313,12 +11615,6 @@
WL_ERR(("SET INFRA error %d\n", err));
return err;
}
- } else {
- if ((err = wl_cfg80211_add_del_bss(cfg, dev,
- bssidx, WL_IF_TYPE_AP, 0, NULL)) < 0) {
- WL_ERR(("wl bss ap returned error:%d\n", err));
- return err;
- }
}
/* On success, mark AP creation in progress. */
@@ -10339,7 +11635,6 @@
bool is_bssup = false;
s32 infra = 1;
s32 join_params_size = 0;
- s32 ap = 1;
s32 wsec;
#ifdef DISABLE_11H_SOFTAP
s32 spect = 0;
@@ -10349,10 +11644,11 @@
#endif /* SOFTAP_UAPSD_OFF */
s32 err = BCME_OK;
s32 is_rsdb_supported = BCME_ERROR;
- u32 timeout;
-#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
+ long timeout;
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+#ifndef IGUANA_LEGACY_CHIPS
+ s32 ap = 1;
+#endif // endif
is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
if (is_rsdb_supported < 0)
@@ -10376,7 +11672,11 @@
if (dev_role == NL80211_IFTYPE_P2P_GO) {
is_bssup = wl_cfg80211_bss_isup(dev, bssidx);
if (!is_bssup && (ies->wpa2_ie != NULL)) {
-
+ err = wldev_iovar_setint_bsscfg(dev, "mpc", 0, bssidx);
+ if (err < 0) {
+ WL_ERR(("MPC setting failed, ret=%d\n", err));
+ goto exit;
+ }
err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &cfg->p2p->ssid,
sizeof(cfg->p2p->ssid), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
bssidx, &cfg->ioctl_buf_sync);
@@ -10385,6 +11685,17 @@
goto exit;
}
+#ifdef MFP
+ err = wldev_iovar_setint_bsscfg(dev, "mfp", cfg->mfp_mode, bssidx);
+ if (err < 0) {
+ WL_ERR(("MFP Setting failed. ret = %d \n", err));
+ /* If fw doesn't support mfp, Ignore the error */
+ if (err != BCME_UNSUPPORTED) {
+ goto exit;
+ }
+ }
+#endif /* MFP */
+
if ((err = wl_cfg80211_bss_up(cfg, dev, bssidx, 1)) < 0) {
WL_ERR(("GO Bring up error %d\n", err));
goto exit;
@@ -10423,6 +11734,13 @@
}
#endif /* DISABLE_11H_SOFTAP */
+#ifdef WL_DISABLE_HE_SOFTAP
+ err = wl_cfg80211_set_he_mode(dev, cfg, bssidx, WL_IF_TYPE_AP, FALSE);
+ if (err < 0) {
+ WL_ERR(("failed to set he features, error=%d\n", err));
+ }
+#endif /* WL_DISABLE_HE_SOFTAP */
+
#ifdef SOFTAP_UAPSD_OFF
err = wldev_iovar_setbuf_bsscfg(dev, "wme_apsd", &wme_apsd, sizeof(wme_apsd),
cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
@@ -10430,13 +11748,13 @@
WL_ERR(("failed to disable uapsd, error=%d\n", err));
}
#endif /* SOFTAP_UAPSD_OFF */
-
+#ifndef IGUANA_LEGACY_CHIPS
err = wldev_ioctl_set(dev, WLC_UP, &ap, sizeof(s32));
if (unlikely(err)) {
WL_ERR(("WLC_UP error (%d)\n", err));
goto exit;
}
-
+#endif // endif
#ifdef MFP
if (cfg->bip_pos) {
err = wldev_iovar_setbuf_bsscfg(dev, "bip",
@@ -10444,6 +11762,18 @@
WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
if (err < 0) {
WL_ERR(("bip set error %d\n", err));
+#if defined(IGUANA_LEGACY_CHIPS)
+ if (wl_customer6_legacy_chip_check(cfg,
+ bcmcfg_to_prmry_ndev(cfg))) {
+ /* Ignore bip error: Some older firmwares doesn't
+ * support bip iovar/ return BCME_NOTUP while trying
+ * to set bip from AP bring up context. These firmares
+ * include bip in RSNIE by default. So its okay to ignore
+ * the error.
+ */
+ err = BCME_OK;
+ } else
+#endif // endif
{
goto exit;
}
@@ -10462,7 +11792,7 @@
sizeof(struct wl_wsec_key), cfg->ioctl_buf,
WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
/* clear the key after use */
- memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
+ bzero(&cfg->wep_key, sizeof(struct wl_wsec_key));
if (unlikely(err)) {
WL_ERR(("WLC_SET_KEY error (%d)\n", err));
goto exit;
@@ -10470,22 +11800,20 @@
}
#ifdef MFP
- if (cfg->mfp_mode) {
- /* This needs to go after wsec otherwise the wsec command will
- * overwrite the values set by MFP
- */
- err = wldev_iovar_setint_bsscfg(dev, "mfp", cfg->mfp_mode, bssidx);
- if (err < 0) {
- WL_ERR(("MFP Setting failed. ret = %d \n", err));
- /* If fw doesn't support mfp, Ignore the error */
- if (err != BCME_UNSUPPORTED) {
- goto exit;
- }
+ /* This needs to go after wsec otherwise the wsec command will
+ * overwrite the values set by MFP
+ */
+ err = wldev_iovar_setint_bsscfg(dev, "mfp", cfg->mfp_mode, bssidx);
+ if (err < 0) {
+ WL_ERR(("MFP Setting failed. ret = %d \n", err));
+ /* If fw doesn't support mfp, Ignore the error */
+ if (err != BCME_UNSUPPORTED) {
+ goto exit;
}
}
#endif /* MFP */
- memset(&join_params, 0, sizeof(join_params));
+ bzero(&join_params, sizeof(join_params));
/* join parameters starts with ssid */
join_params_size = sizeof(join_params.ssid);
join_params.ssid.SSID_len = MIN(cfg->hostapd_ssid.SSID_len,
@@ -10503,13 +11831,10 @@
WL_DBG((" SoftAP SSID \"%s\" \n", join_params.ssid.SSID));
}
- if (bssidx != 0) {
- /* AP on Virtual Interface */
if ((err = wl_cfg80211_bss_up(cfg, dev, bssidx, 1)) < 0) {
WL_ERR(("AP Bring up error %d\n", err));
goto exit;
}
- }
} else {
WL_ERR(("Wrong interface type %d\n", dev_role));
@@ -10526,6 +11851,11 @@
err = -ERESTARTSYS;
goto exit;
}
+ if (dhd_query_bus_erros(dhdp)) {
+ err = -ENODEV;
+ goto exit;
+ }
+ dhdp->iface_op_failed = TRUE;
#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
if (dhdp->memdump_enabled) {
dhdp->memdump_type = DUMP_TYPE_AP_LINKUP_FAILURE;
@@ -10539,7 +11869,7 @@
exit:
if (cfg->wep_key.len) {
- memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
+ bzero(&cfg->wep_key, sizeof(struct wl_wsec_key));
}
#ifdef MFP
@@ -10581,7 +11911,7 @@
}
vndr = (const u8 *)info->proberesp_ies;
- vndr_ie_len = info->proberesp_ies_len;
+ vndr_ie_len = (uint32)info->proberesp_ies_len;
if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
/* SoftAP mode */
@@ -10589,8 +11919,8 @@
mgmt = (const struct ieee80211_mgmt *)info->probe_resp;
if (mgmt != NULL) {
vndr = (const u8 *)&mgmt->u.probe_resp.variable;
- vndr_ie_len = info->probe_resp_len -
- offsetof(const struct ieee80211_mgmt, u.probe_resp.variable);
+ vndr_ie_len = (uint32)(info->probe_resp_len -
+ offsetof(const struct ieee80211_mgmt, u.probe_resp.variable));
}
}
/* Parse Probe Response IEs */
@@ -10625,7 +11955,7 @@
}
vndr = (const u8 *)info->proberesp_ies;
- vndr_ie_len = info->proberesp_ies_len;
+ vndr_ie_len = (uint32)info->proberesp_ies_len;
if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
/* SoftAP mode */
@@ -10633,19 +11963,30 @@
mgmt = (const struct ieee80211_mgmt *)info->probe_resp;
if (mgmt != NULL) {
vndr = (const u8 *)&mgmt->u.probe_resp.variable;
- vndr_ie_len = info->probe_resp_len -
- offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ vndr_ie_len = (uint32)(info->probe_resp_len -
+ offsetof(struct ieee80211_mgmt, u.probe_resp.variable));
}
}
/* Set Probe Response IEs to FW */
if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
VNDR_IE_PRBRSP_FLAG, vndr, vndr_ie_len)) < 0) {
- WL_ERR(("Set Probe Resp IE Failed \n"));
+ WL_ERR(("Set Probe Resp Vndr IE Failed \n"));
} else {
WL_DBG(("Applied Vndr IEs for Probe Resp \n"));
}
+ /* Set Assoc Response IEs to FW */
+ vndr = (const u8 *)info->assocresp_ies;
+ vndr_ie_len = (uint32)info->assocresp_ies_len;
+
+ if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+ VNDR_IE_ASSOCRSP_FLAG, vndr, vndr_ie_len)) < 0) {
+ WL_ERR(("Set Assoc Resp Vndr IE Failed \n"));
+ } else {
+ WL_DBG(("Applied Vndr IEs for Assoc Resp \n"));
+ }
+
return err;
}
#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
@@ -10800,7 +12141,6 @@
update_bss = true;
MFREE(cfg->osh, bss->rsn_ie,
bss->rsn_ie[1] + WPA_RSN_IE_TAG_FIXED_LEN);
- bss->rsn_ie = NULL;
bss->wpa_ie = MALLOCZ(cfg->osh,
ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN);
if (bss->wpa_ie) {
@@ -10926,7 +12266,11 @@
/* need to guarantee EAP-Failure send out before deauth */
dhd_wait_pend8021x(dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+ scb_val.val = cpu_to_le32(params->reason_code);
+#else
scb_val.val = DOT11_RC_DEAUTH_LEAVING;
+#endif // endif
err = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
sizeof(scb_val_t));
if (err < 0) {
@@ -10964,15 +12308,23 @@
struct station_parameters *params)
#endif // endif
{
- int err;
-#ifdef DHD_LOSSLESS_ROAMING
+ int err = BCME_OK;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
-#endif // endif
+#ifdef WBTEXT
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* WBTEXT */
WL_DBG(("SCB_AUTHORIZE mac_addr:"MACDBG" sta_flags_mask:0x%x "
"sta_flags_set:0x%x iface:%s \n", MAC2STRDBG(mac),
params->sta_flags_mask, params->sta_flags_set, dev->name));
+ if ((wl_get_mode_by_netdev(cfg, dev) == WL_MODE_BSS) &&
+ !(wl_get_drv_status(cfg, CONNECTED, dev))) {
+ /* Return error indicating not in connected state */
+ WL_ERR(("Ignore SCB_AUTHORIZE/DEAUTHORIZE in non connected state\n"));
+ return -ENOTSUPP;
+ }
+
/* Processing only authorize/de-authorize flag for now */
if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
WL_ERR(("WLC_SCB_AUTHORIZE sta_flags_mask not set \n"));
@@ -10996,10 +12348,23 @@
} else {
WL_INFORM_MEM(("[%s] WLC_SCB_AUTHORIZE " MACDBG "\n",
dev->name, MAC2STRDBG(mac)));
+#ifdef WL_WPS_SYNC
+ wl_wps_session_update(dev, WPS_STATE_AUTHORIZE, mac);
+#endif /* WL_WPS_SYNC */
}
#ifdef DHD_LOSSLESS_ROAMING
wl_del_roam_timeout(cfg);
#endif // endif
+#ifdef WBTEXT
+ /* send nbr request or BTM query to update RCC
+ * after 4-way handshake is completed
+ */
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
+ dhdp->wbtext_support) {
+ wl_cfg80211_wbtext_update_rcc(cfg, dev);
+ }
+#endif /* WBTEXT */
+
return err;
}
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
@@ -11070,10 +12435,21 @@
s32 bssidx = 0;
u32 dev_role = 0;
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#ifdef WL11U
+ bcm_tlv_t *interworking_ie;
+ u32 iw_ie_len = 0;
+ u8 iw_ie[IW_IES_MAX_BUF_LEN];
+#endif // endif
+#ifdef ENABLE_HOGSQS
+ struct ieee80211_channel *chan_h;
+#endif /* ENABLE_HOGSQS */
WL_DBG(("Enter \n"));
#if defined(SUPPORT_RANDOM_MAC_SCAN)
- wl_cfg80211_scan_mac_disable(dev);
+ /* Disable scanmac if enabled */
+ if (cfg->scanmac_enabled) {
+ wl_cfg80211_scan_mac_disable(dev);
+ }
#endif /* SUPPORT_RANDOM_MAC_SCAN */
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
@@ -11087,9 +12463,9 @@
dev_role = NL80211_IFTYPE_AP;
dhd->op_mode |= DHD_FLAG_HOSTAP_MODE;
err = dhd_ndo_enable(dhd, FALSE);
- WL_DBG(("%s: Disabling NDO on Hostapd mode %d\n", __FUNCTION__, err));
+ WL_DBG(("Disabling NDO on Hostapd mode %d\n", err));
if (err) {
- WL_ERR(("%s: Disabling NDO Failed %d\n", __FUNCTION__, err));
+ WL_ERR(("Disabling NDO Failed %d\n", err));
}
#ifdef PKT_FILTER_SUPPORT
/* Disable packet filter */
@@ -11121,7 +12497,7 @@
}
#endif /* WLTDLS */
- if (!check_dev_role_integrity(cfg, dev_role)) {
+ if (!check_dev_role_integrity(cfg, wl_get_mode_by_netdev(cfg, dev), dev_role)) {
err = -EINVAL;
goto fail;
}
@@ -11147,11 +12523,22 @@
goto fail;
}
- if ((err = wl_cfg80211_bcn_validate_sec(dev, &ies,
- dev_role, bssidx, info->privacy)) < 0)
+#ifdef BCMWAPI_WPI
+ if (info->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_SMS4) {
+ wl_validate_wapisecurity(dev, bssidx);
+ }
+ else
+#endif // endif
{
- WL_ERR(("Beacon set security failed \n"));
- goto fail;
+ WL_ERR(("%s info->crypto.ciphers_pairwise[0] is not "
+ "WLAN_CIPHER_SUITE_SMS4 \n", __FUNCTION__));
+
+ if ((err = wl_cfg80211_bcn_validate_sec(dev, &ies,
+ dev_role, bssidx, info->privacy)) < 0)
+ {
+ WL_ERR(("Beacon set security failed \n"));
+ goto fail;
+ }
}
if ((err = wl_cfg80211_bcn_bringup_ap(dev, &ies,
@@ -11159,6 +12546,13 @@
WL_ERR(("Beacon bring up AP/GO failed \n"));
goto fail;
}
+#ifdef BCMSDIO
+ if (dev_role == NL80211_IFTYPE_AP) {
+ dhd_set_role(dhd, WLC_E_IF_ROLE_AP, bssidx);
+ } else if (dev_role == NL80211_IFTYPE_P2P_GO) {
+ dhd_set_role(dhd, WLC_E_IF_ROLE_P2P_GO, bssidx);
+ }
+#endif /* BCMSDIO */
/* Set GC/STA SCB expiry timings. */
if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) {
@@ -11166,6 +12560,7 @@
goto fail;
}
+ wl_set_drv_status(cfg, CONNECTED, dev);
WL_DBG(("** AP/GO Created **\n"));
#ifdef WL_CFG80211_ACL
@@ -11175,6 +12570,29 @@
}
#endif /* WL_CFG80211_ACL */
+#ifdef WL11U
+ wl_get_iwdata_by_netdev(cfg, dev, iw_ie, &iw_ie_len);
+ /* Add interworking IE from beacon data */
+ if ((interworking_ie = wl_cfg80211_find_interworking_ie(
+ info->beacon.beacon_ies, info->beacon.beacon_ies_len)) != NULL) {
+ err = wl_cfg80211_add_iw_ie(cfg, dev, bssidx,
+ VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
+ interworking_ie->data, interworking_ie->len);
+ if (unlikely(err)) {
+ WL_ERR(("Failed to add interworking IE"));
+ }
+ } else if (iw_ie_len != 0) {
+ /* we have to clear IW IE and disable gratuitous APR */
+ wl_cfg80211_clear_iw_ie(cfg, dev, bssidx);
+
+ (void)wldev_iovar_setint_bsscfg(dev, "grat_arp", 0,
+ bssidx);
+ wl_clear_iwdata_by_netdev(cfg, dev);
+ /* we don't care about error */
+ cfg->wl11u = FALSE;
+ }
+#endif /* WL11U */
+
/* Set IEs to FW */
if ((err = wl_cfg80211_set_ies(dev, &info->beacon, bssidx)) < 0)
WL_ERR(("Set IEs failed \n"));
@@ -11189,8 +12607,15 @@
}
}
+ /* Configure hidden SSID */
+ if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) {
+ if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0)
+ WL_ERR(("failed to set hidden : %d\n", err));
+ WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid));
+ }
+
#ifdef SUPPORT_AP_RADIO_PWRSAVE
- if ((dev_role == NL80211_IFTYPE_AP)) {
+ if (dev_role == NL80211_IFTYPE_AP) {
if (!wl_set_ap_rps(dev, FALSE, dev->name)) {
wl_cfg80211_init_ap_rps(cfg);
} else {
@@ -11198,9 +12623,24 @@
}
}
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-#ifdef WL_IRQSET
- dhd_irq_set_affinity(dhd);
-#endif /* WL_IRQSET */
+
+#ifdef ENABLE_HOGSQS
+ chan_h = dev->ieee80211_ptr->preset_chandef.chan;
+ if (chan_h->band == IEEE80211_BAND_5GHZ) {
+ s32 value = 0x0;
+
+ value = M_HOGSQS_CFG;
+ err = wldev_iovar_getint_bsscfg(dev, "hogsqs", &value, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("hogsqs command is failed %d %\n", err));
+ } else {
+ value |= 0x1003; /* enable mitigation */
+ value |= (M_HOGSQS_CFG << 16);
+ (void)wldev_iovar_setint_bsscfg(dev, "hogsqs",
+ value, bssidx);
+ }
+ }
+#endif /* ENABLE_HOGSQS */
fail:
if (err) {
@@ -11254,6 +12694,11 @@
WL_DBG(("Enter \n"));
+ if (wl_cfg80211_get_bus_state(cfg)) {
+ /* since bus is down, iovar will fail. recovery path will bringup the bus. */
+ WL_ERR(("bus is not ready\n"));
+ return BCME_OK;
+ }
is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
if (is_rsdb_supported < 0)
return (-ENODEV);
@@ -11265,9 +12710,27 @@
if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
dev_role = NL80211_IFTYPE_AP;
WL_DBG(("stopping AP operation\n"));
+#ifdef DHD_BANDSTEER
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+ /* Disable bandsteer */
+ cfg->ap_bs = 1;
+ cfg->p2p_bs = 1;
+ dhd_bandsteer_module_deinit(
+ bcmcfg_to_prmry_ndev(cfg), cfg->ap_bs, cfg->p2p_bs);
+ }
+#endif /* DHD_BANDSTEER */
} else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
dev_role = NL80211_IFTYPE_P2P_GO;
WL_DBG(("stopping P2P GO operation\n"));
+#ifdef DHD_BANDSTEER
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+ /* Disable bandsteer */
+ cfg->ap_bs = 1;
+ cfg->p2p_bs = 1;
+ dhd_bandsteer_module_deinit(
+ bcmcfg_to_prmry_ndev(cfg), cfg->ap_bs, cfg->p2p_bs);
+ }
+#endif /* DHD_BANDSTEER */
} else {
WL_ERR(("no AP/P2P GO interface is operational.\n"));
return -EINVAL;
@@ -11278,12 +12741,15 @@
return BCME_ERROR;
}
- if (!check_dev_role_integrity(cfg, dev_role)) {
+ if (!check_dev_role_integrity(cfg, wl_get_mode_by_netdev(cfg, dev), dev_role)) {
WL_ERR(("role integrity check failed \n"));
err = -EINVAL;
goto exit;
}
+ /* Free up resources */
+ wl_cfg80211_cleanup_if(dev);
+
/* Clear AP/GO connected status */
wl_clr_drv_status(cfg, CONNECTED, dev);
if ((err = wl_cfg80211_bss_up(cfg, dev, bssidx, 0)) < 0) {
@@ -11319,6 +12785,12 @@
}
}
+#ifdef WL_DISABLE_HE_SOFTAP
+ if (wl_cfg80211_set_he_mode(dev, cfg, bssidx, WL_IF_TYPE_AP, TRUE) != BCME_OK) {
+ WL_ERR(("failed to set he features\n"));
+ }
+#endif /* WL_DISABLE_HE_SOFTAP */
+
wl_cfg80211_clear_per_bss_ies(cfg, dev->ieee80211_ptr);
#ifdef SUPPORT_AP_RADIO_PWRSAVE
if (!wl_set_ap_rps(dev, FALSE, dev->name)) {
@@ -11338,6 +12810,9 @@
SUPP_LOG(("AP/GO Link down\n"));
exit:
+#ifdef WL11U
+ wl_clear_iwdata_by_netdev(cfg, dev);
+#endif // endif
if (err) {
/* In case of failure, flush fw logs */
wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
@@ -11369,6 +12844,11 @@
u32 dev_role = 0;
s32 bssidx = 0;
bool pbc = 0;
+#ifdef WL11U
+ bcm_tlv_t *interworking_ie;
+ u32 iw_ie_len = 0;
+ u8 iw_ie[IW_IES_MAX_BUF_LEN];
+#endif // endif
WL_DBG(("Enter \n"));
@@ -11386,7 +12866,7 @@
goto fail;
}
- if (!check_dev_role_integrity(cfg, dev_role)) {
+ if (!check_dev_role_integrity(cfg, wl_get_mode_by_netdev(cfg, dev), dev_role)) {
err = -EINVAL;
goto fail;
}
@@ -11403,6 +12883,31 @@
goto fail;
}
+#ifdef WL11U
+ wl_get_iwdata_by_netdev(cfg, dev, iw_ie, &iw_ie_len);
+ /* Add interworking IE from beacon data */
+ if ((interworking_ie = wl_cfg80211_find_interworking_ie(
+ info->beacon_ies, info->beacon_ies_len)) != NULL) {
+ err = wl_cfg80211_add_iw_ie(cfg, dev, bssidx,
+ VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
+ interworking_ie->data, interworking_ie->len);
+ if (unlikely(err)) {
+ WL_ERR(("Failed to add interworking IE"));
+ }
+ } else if (iw_ie_len != 0) {
+ /* we have to clear IW IE and disable gratuitous APR */
+ wl_cfg80211_clear_iw_ie(cfg, dev, bssidx);
+
+ /* we don't bother whether grat_arp gets disabled or not */
+ (void)wldev_iovar_setint_bsscfg(dev, "grat_arp", 0,
+ bssidx);
+ wl_clear_iwdata_by_netdev(cfg, dev);
+ cfg->wl11u = FALSE;
+ } else {
+ WL_DBG(("no update in iw ie\n"));
+ }
+#endif /* WL11U */
+
/* Set IEs to FW */
if ((err = wl_cfg80211_set_ies(dev, info, bssidx)) < 0) {
WL_ERR(("Set IEs failed \n"));
@@ -11474,7 +12979,7 @@
dhd->op_mode |= DHD_FLAG_HOSTAP_MODE;
}
- if (!check_dev_role_integrity(cfg, dev_role)) {
+ if (!check_dev_role_integrity(cfg, wl_get_mode_by_netdev(cfg, dev), dev_role)) {
err = -ENODEV;
goto fail;
}
@@ -11492,13 +12997,13 @@
DOT11_MNG_SSID_ID)) != NULL) {
if (dev_role == NL80211_IFTYPE_AP) {
/* Store the hostapd SSID */
- memset(&cfg->hostapd_ssid.SSID[0], 0x00, DOT11_MAX_SSID_LEN);
+ bzero(&cfg->hostapd_ssid.SSID[0], DOT11_MAX_SSID_LEN);
cfg->hostapd_ssid.SSID_len = MIN(ssid_ie->len, DOT11_MAX_SSID_LEN);
memcpy(&cfg->hostapd_ssid.SSID[0], ssid_ie->data,
cfg->hostapd_ssid.SSID_len);
} else {
/* P2P GO */
- memset(&cfg->p2p->ssid.SSID[0], 0x00, DOT11_MAX_SSID_LEN);
+ bzero(&cfg->p2p->ssid.SSID[0], DOT11_MAX_SSID_LEN);
cfg->p2p->ssid.SSID_len = MIN(ssid_ie->len, DOT11_MAX_SSID_LEN);
memcpy(cfg->p2p->ssid.SSID, ssid_ie->data,
cfg->p2p->ssid.SSID_len);
@@ -11595,6 +13100,7 @@
}
WL_DBG(("** ADD/SET beacon done **\n"));
+ wl_set_drv_status(cfg, CONNECTED, dev);
fail:
if (err) {
@@ -11663,211 +13169,6 @@
}
#endif /* LINUX_VERSION < VERSION(3,4,0) || WL_COMPAT_WIRELESS */
-#ifdef WL_SCHED_SCAN
-#define PNO_TIME 30
-#define PNO_REPEAT 4
-#define PNO_FREQ_EXPO_MAX 2
-static bool
-is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count)
-{
- int i;
-
- if (!ssid || !ssid_list)
- return FALSE;
-
- for (i = 0; i < count; i++) {
- if (ssid->ssid_len == ssid_list[i].ssid_len) {
- if (strncmp(ssid->ssid, ssid_list[i].ssid, ssid->ssid_len) == 0)
- return TRUE;
- }
- }
- return FALSE;
-}
-
-static int
-wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
- struct net_device *dev,
- struct cfg80211_sched_scan_request *request)
-{
- ushort pno_time = PNO_TIME;
- int pno_repeat = PNO_REPEAT;
- int pno_freq_expo_max = PNO_FREQ_EXPO_MAX;
- wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- struct cfg80211_ssid *ssid = NULL;
- struct cfg80211_ssid *hidden_ssid_list = NULL;
- log_conn_event_t *event_data = NULL;
- tlv_log *tlv_data = NULL;
- u32 alloc_len, tlv_len;
- u32 payload_len;
- int ssid_cnt = 0;
- int i;
- int ret = 0;
- unsigned long flags;
-
- if (!request) {
- WL_ERR(("Sched scan request was NULL\n"));
- return -EINVAL;
- }
-
- WL_DBG(("Enter \n"));
- WL_PNO((">>> SCHED SCAN START\n"));
- WL_PNO(("Enter n_match_sets:%d n_ssids:%d \n",
- request->n_match_sets, request->n_ssids));
- WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n",
- request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max));
-
- if (!request->n_ssids || !request->n_match_sets) {
- WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids));
- return -EINVAL;
- }
-
- memset(&ssids_local, 0, sizeof(ssids_local));
-
- if (request->n_ssids > 0) {
- hidden_ssid_list = request->ssids;
- }
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN;
- event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
- if (!event_data) {
- WL_ERR(("%s: failed to allocate log_conn_event_t with "
- "length(%d)\n", __func__, alloc_len));
- return -ENOMEM;
- }
- memset(event_data, 0, alloc_len);
- event_data->tlvs = NULL;
- tlv_len = sizeof(tlv_log);
- event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
- if (!event_data->tlvs) {
- WL_ERR(("%s: failed to allocate log_tlv with "
- "length(%d)\n", __func__, tlv_len));
- MFREE(cfg->osh, event_data, alloc_len);
- return -ENOMEM;
- }
- }
- for (i = 0; i < request->n_match_sets && ssid_cnt < MAX_PFN_LIST_COUNT; i++) {
- ssid = &request->match_sets[i].ssid;
- /* No need to include null ssid */
- if (ssid->ssid_len) {
- ssids_local[ssid_cnt].SSID_len = MIN(ssid->ssid_len,
- (uint32)DOT11_MAX_SSID_LEN);
- memcpy(ssids_local[ssid_cnt].SSID, ssid->ssid,
- ssids_local[ssid_cnt].SSID_len);
- if (is_ssid_in_list(ssid, hidden_ssid_list, request->n_ssids)) {
- ssids_local[ssid_cnt].hidden = TRUE;
- WL_PNO((">>> PNO hidden SSID (%s) \n", ssid->ssid));
- } else {
- ssids_local[ssid_cnt].hidden = FALSE;
- WL_PNO((">>> PNO non-hidden SSID (%s) \n", ssid->ssid));
- }
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0))
- if (request->match_sets[i].rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) {
- ssids_local[ssid_cnt].rssi_thresh =
- (int8)request->match_sets[i].rssi_thold;
- }
-#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0)) */
- ssid_cnt++;
- }
- }
-
- if (ssid_cnt) {
- if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, ssid_cnt,
- pno_time, pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) {
- WL_ERR(("PNO setup failed!! ret=%d \n", ret));
- ret = -EINVAL;
- goto exit;
- }
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- for (i = 0; i < ssid_cnt; i++) {
- payload_len = sizeof(log_conn_event_t);
- event_data->event = WIFI_EVENT_DRIVER_PNO_ADD;
- tlv_data = event_data->tlvs;
- /* ssid */
- tlv_data->tag = WIFI_TAG_SSID;
- tlv_data->len = ssids_local[i].SSID_len;
- memcpy(tlv_data->value, ssids_local[i].SSID,
- ssids_local[i].SSID_len);
- payload_len += TLV_LOG_SIZE(tlv_data);
-
- dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
- event_data, payload_len);
- }
- }
-
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
- cfg->sched_scan_req = request;
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
- } else {
- ret = -EINVAL;
- }
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && \
- defined(SUPPORT_RANDOM_MAC_SCAN)
- if (!ETHER_ISNULLADDR(request->mac_addr) && !ETHER_ISNULLADDR(request->mac_addr_mask)) {
- ret = wl_cfg80211_scan_mac_enable(dev, request->mac_addr, request->mac_addr_mask);
- /* Ignore if chip doesnt support the feature */
- if (ret < 0) {
- if (ret == BCME_UNSUPPORTED) {
- /* If feature is not supported, ignore the error (legacy chips) */
- ret = BCME_OK;
- } else {
- WL_ERR(("set random mac failed (%d). Ignore.\n", ret));
- /* Cleanup the states and stop the pno */
- if (dhd_dev_pno_stop_for_ssid(dev) < 0) {
- WL_ERR(("PNO Stop for SSID failed"));
- }
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
- cfg->sched_scan_req = NULL;
- cfg->sched_scan_running = FALSE;
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
- }
- }
- }
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && (defined(SUPPORT_RANDOM_MAC_SCAN)) */
-exit:
- if (event_data) {
- MFREE(cfg->osh, event_data->tlvs, tlv_len);
- MFREE(cfg->osh, event_data, alloc_len);
- }
- return ret;
-}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
-static int
-wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid)
-#else
-static int
-wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- unsigned long flags;
-
- WL_DBG(("Enter \n"));
- WL_PNO((">>> SCHED SCAN STOP\n"));
-
- if (dhd_dev_pno_stop_for_ssid(dev) < 0) {
- WL_ERR(("PNO Stop for SSID failed"));
- } else {
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_REMOVE);
- }
-
- if (cfg->scan_request && cfg->sched_scan_running) {
- WL_PNO((">>> Sched scan running. Aborting it..\n"));
- wl_notify_escan_complete(cfg, dev, true, true);
- }
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
- cfg->sched_scan_req = NULL;
- cfg->sched_scan_running = FALSE;
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
- return 0;
-}
-#endif /* WL_SCHED_SCAN */
-
#ifdef WL_SUPPORT_ACS
/*
* Currently the dump_obss IOVAR is returning string as output so we need to
@@ -12145,6 +13446,20 @@
.set_rekey_data = wl_cfg80211_set_rekey_data,
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) */
#endif /* GTK_OFFLOAD_SUPPORT */
+#if defined(WL_FILS)
+ /* This should be enabled from kernel version which supports this */
+ .update_connect_params = wl_cfg80211_update_connect_params,
+#endif /* WL_FILS */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0))
+ .set_pmk = wl_cfg80211_set_pmk,
+ .del_pmk = wl_cfg80211_del_pmk,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ .channel_switch = wl_cfg80211_channel_switch,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) */
+#ifdef WL_SAE
+ .external_auth = wl_cfg80211_external_auth,
+#endif /* WL_SAE */
};
s32 wl_mode_to_nl80211_iftype(s32 mode)
@@ -12165,6 +13480,33 @@
return err;
}
+s32
+wl_cfg80211_set_country_code(struct net_device *net, char *country_code,
+ bool notify, bool user_enforced, int revinfo)
+{
+ s32 ret = BCME_OK;
+#ifdef WL_NAN
+ struct wireless_dev *wdev = ndev_to_wdev(net);
+ struct wiphy *wiphy = wdev->wiphy;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ if (cfg->nan_enable) {
+ mutex_lock(&cfg->if_sync);
+ ret = wl_cfgnan_disable(cfg, NAN_COUNTRY_CODE_CHANGE);
+ mutex_unlock(&cfg->if_sync);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to disable nan, error[%d]\n", ret));
+ return ret;
+ }
+ }
+#endif /* WL_NAN */
+ ret = wldev_set_country(net, country_code,
+ notify, user_enforced, revinfo);
+ if (ret < 0) {
+ WL_ERR(("set country Failed :%d\n", ret));
+ }
+ return ret;
+}
+
#ifdef CONFIG_PM
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
static const struct wiphy_wowlan_support brcm_wowlan_support = {
@@ -12179,6 +13521,32 @@
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) */
#endif /* CONFIG_PM */
+int wl_features_set(u8 *array, uint8 len, u32 ftidx)
+{
+ u8* ft_byte;
+
+ if ((ftidx / 8u) >= len)
+ return BCME_BADARG;
+
+ ft_byte = &array[ftidx / 8u];
+ *ft_byte |= BIT(ftidx % 8u);
+ return BCME_OK;
+}
+
+#ifdef WL_SAE
+static s32 wl_wiphy_update_sae(struct wiphy *wiphy, dhd_pub_t *dhd)
+{
+
+ if (FW_SUPPORTED(dhd, sae_ext)) {
+ WL_DBG(("%s extsae enabled\n", __FUNCTION__));
+ wiphy->features |= NL80211_FEATURE_SAE;
+ } else if ((FW_SUPPORTED(dhd, sae)) && (FW_SUPPORTED(dhd, idsup))) {
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SAE_OFFLOAD);
+ WL_DBG(("%s intsae enabled\n", __FUNCTION__));
+ }
+ return BCME_OK;
+}
+#endif /* WL_SAE */
static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, void *context)
{
s32 err = 0;
@@ -12215,10 +13583,11 @@
wdev->wiphy->max_sched_scan_ssids = MAX_PFN_LIST_COUNT;
wdev->wiphy->max_match_sets = MAX_PFN_LIST_COUNT;
wdev->wiphy->max_sched_scan_ie_len = WL_SCAN_IE_LEN_MAX;
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 11, 0))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
-#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 11, 0)) */
-
+#else
+ wdev->wiphy->max_sched_scan_reqs = 1;
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
#endif /* WL_SCHED_SCAN */
wdev->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION)
@@ -12261,23 +13630,16 @@
WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
#endif // endif
WIPHY_FLAG_4ADDR_STATION;
-#if ((defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && ((LINUX_VERSION_CODE >= \
- KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
/*
- * If FW ROAM flag is advertised, upper layer wouldn't provide
- * the bssid & freq in the connect command. This will result a
- * delay in initial connection time due to firmware doing a full
- * channel scan to figure out the channel & bssid. However kernel
- * ver >= 3.15, provides bssid_hint & freq_hint and hence kernel
- * ver >= 3.15 won't have any issue. So if this flags need to be
- * advertised for kernel < 3.15, suggest to use RCC along with it
- * to avoid the initial connection delay.
+ * If FW ROAM flag is advertised, upper layer doesn't provide the
+ * bssid & freq in the connect command. However, kernel ver >= 3.15,
+ * provides bssid_hint & freq_hint which can be used by the firmware.
+ * fw_ap_select variable determines whether FW selects the AP or the
+ * user space selects the target AP within the given ESS.
*/
wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
-#endif /* (ROAM_ENABLE || BCMFW_ROAM_ENABLE) && (LINUX_VERSION 3.2.0 || WL_COMPAT_WIRELESS) */
-#ifdef UNSET_FW_ROAM_WIPHY_FLAG
- wdev->wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_FW_ROAM;
-#endif /* UNSET_FW_ROAM_WIPHY_FLAG */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || defined(WL_COMPAT_WIRELESS)
wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_OFFCHAN_TX;
@@ -12324,8 +13686,10 @@
wdev->wiphy->wowlan = &brcm_wowlan_support;
/* If this is not provided cfg stack will get disconnect
* during suspend.
+ * Note: wiphy->wowlan_config is freed by cfg80211 layer.
+ * so use malloc instead of MALLOC(osh) to avoid false alarm.
*/
- brcm_wowlan_config = MALLOC(dhd->osh, sizeof(struct cfg80211_wowlan));
+ brcm_wowlan_config = kmalloc(sizeof(struct cfg80211_wowlan), GFP_KERNEL);
if (brcm_wowlan_config) {
brcm_wowlan_config->disconnect = true;
brcm_wowlan_config->gtk_rekey_failure = true;
@@ -12368,13 +13732,14 @@
WL_ERR(("Couldn not attach vendor commands (%d)\n", err));
}
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+#ifdef WL_FILS
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_FILS_SK_OFFLOAD);
+#endif /* WL_FILS */
- /* Now we can register wiphy with cfg80211 module */
- err = wiphy_register(wdev->wiphy);
- if (unlikely(err < 0)) {
- WL_ERR(("Couldn not register wiphy device (%d)\n", err));
- wiphy_free(wdev->wiphy);
- }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ wdev->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wdev->wiphy->max_num_csa_counters = WL_MAX_NUM_CSA_COUNTERS;
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3, 12, 0) */
#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && (LINUX_VERSION_CODE <= \
KERNEL_VERSION(3, 3, 0))) && defined(WL_IFACE_COMB_NUM_CHANNELS)
@@ -12388,6 +13753,39 @@
wdev->wiphy->max_sched_scan_plans = 1; /* multiple plans not supported */
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(SUPPORT_RANDOM_MAC_SCAN) */
+#ifdef WL_SAE
+ wdev->wiphy->features |= NL80211_FEATURE_SAE;
+#endif /* WL_SAE */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)) && defined(BCMSUP_4WAY_HANDSHAKE)
+ if (FW_SUPPORTED(dhd, idsup)) {
+ err = wiphy_ext_feature_set(wdev->wiphy,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK);
+ if (err) {
+ return err;
+ }
+ err = wiphy_ext_feature_set(wdev->wiphy,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X);
+ if (err) {
+ return err;
+ }
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) && defined(BCMSUP_4WAY_HANDSHAKE) */
+#ifdef WL_SCAN_TYPE
+ /* These scan types will be mapped to default scan on non-supported chipset */
+ /* Advertise scan type capability. */
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_LOW_SPAN_SCAN);
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_LOW_POWER_SCAN);
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN);
+ wdev->wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN;
+#endif /* WL_SCAN_TYPE */
+
+ /* Now we can register wiphy with cfg80211 module */
+ err = wiphy_register(wdev->wiphy);
+ if (unlikely(err < 0)) {
+ WL_ERR(("Couldn not register wiphy device (%d)\n", err));
+ wiphy_free(wdev->wiphy);
+ }
+
return err;
}
@@ -12407,12 +13805,7 @@
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
/* Reset wowlan & wowlan_config before Unregister to avoid Kernel Panic */
- WL_DBG(("wl_free_wdev Clearing wowlan Config \n"));
- if (wdev->wiphy->wowlan_config) {
- MFREE(cfg->osh, wdev->wiphy->wowlan_config,
- sizeof(struct cfg80211_wowlan));
- wdev->wiphy->wowlan_config = NULL;
- }
+ WL_DBG(("clear wowlan\n"));
wdev->wiphy->wowlan = NULL;
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
wiphy_unregister(wdev->wiphy);
@@ -12431,7 +13824,7 @@
*/
}
-static s32 wl_inform_bss(struct bcm_cfg80211 *cfg)
+s32 wl_inform_bss(struct bcm_cfg80211 *cfg)
{
struct wl_scan_results *bss_list;
wl_bss_info_t *bi = NULL; /* must be initialized */
@@ -12463,7 +13856,7 @@
return err;
}
-static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi, bool roam)
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi, bool update_ssid)
{
struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
struct ieee80211_mgmt *mgmt;
@@ -12483,11 +13876,18 @@
u32 freq;
s32 err = 0;
gfp_t aflags;
+ u8 tmp_buf[IEEE80211_MAX_SSID_LEN + 1];
if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
WL_DBG(("Beacon is larger than buffer. Discarding\n"));
return err;
}
+
+ if (bi->SSID_len > IEEE80211_MAX_SSID_LEN) {
+ WL_ERR(("wrong SSID len:%d\n", bi->SSID_len));
+ return -EINVAL;
+ }
+
aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
notif_bss_info = (struct wl_cfg80211_bss_info *)MALLOCZ(cfg->osh,
sizeof(*notif_bss_info) + sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX);
@@ -12523,7 +13923,7 @@
beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period);
beacon_proberesp->capab_info = cpu_to_le16(bi->capability);
wl_rst_ie(cfg);
- wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, roam);
+ wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, update_ssid);
wl_mrg_ie(cfg, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
wl_cp_ie(cfg, beacon_proberesp->variable, WL_BSS_INFO_MAX -
offsetof(struct wl_cfg80211_bss_info, frame_buf));
@@ -12548,8 +13948,10 @@
+ sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX);
return -EINVAL;
}
+ memcpy(tmp_buf, bi->SSID, bi->SSID_len);
+ tmp_buf[bi->SSID_len] = '\0';
WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM"
- "mgmt_type %d frame_len %d\n", bi->SSID,
+ "mgmt_type %d frame_len %d\n", tmp_buf,
notif_bss_info->rssi, notif_bss_info->channel,
mgmt->u.beacon.capab_info, &bi->BSSID, mgmt_type,
notif_bss_info->frame_len));
@@ -12584,7 +13986,7 @@
(cfg->sched_scan_req && !cfg->scan_request)) {
alloc_len = sizeof(log_conn_event_t) + IEEE80211_MAX_SSID_LEN + sizeof(uint16) +
sizeof(int16);
- event_data = (log_conn_event_t *)MALLOCZ(cfg->osh, alloc_len);
+ event_data = (log_conn_event_t *)MALLOCZ(dhdp->osh, alloc_len);
if (!event_data) {
WL_ERR(("%s: failed to allocate the log_conn_event_t with "
"length(%d)\n", __func__, alloc_len));
@@ -12655,11 +14057,13 @@
int ocl_enable = 0;
err = wldev_iovar_setint(ndev, "ocl_enable", ocl_enable);
if (err != 0) {
- WL_ERR(("[WIFI_SEC] %s: Set ocl_enable %d failed %d\n",
- __FUNCTION__, ocl_enable, err));
+ WL_ERR(("[WIFI_SEC] wl_is_linkup: Set ocl_enable %d"
+ " failed %d\n",
+ ocl_enable, err));
} else {
- WL_ERR(("[WIFI_SEC] %s: Set ocl_enable %d succeeded %d\n",
- __FUNCTION__, ocl_enable, err));
+ WL_ERR(("[WIFI_SEC] wl_is_linkup: Set ocl_enable %d"
+ " succeeded %d\n",
+ ocl_enable, err));
}
}
#endif /* CUSTOM_SET_OCLOFF */
@@ -12690,41 +14094,6 @@
return false;
}
-#ifdef WL_LASTEVT
-static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, void *data)
-{
- u32 event = ntoh32(e->event_type);
- u16 flags = ntoh16(e->flags);
- wl_last_event_t *last_event = (wl_last_event_t *)data;
- u32 len = ntoh32(e->datalen);
-
- if (event == WLC_E_DEAUTH_IND ||
- event == WLC_E_DISASSOC_IND ||
- event == WLC_E_DISASSOC ||
- event == WLC_E_DEAUTH) {
- WL_ERR(("Link down Reason : %s\n", bcmevent_get_name(event)));
- return true;
- } else if (event == WLC_E_LINK) {
- if (!(flags & WLC_EVENT_MSG_LINK)) {
- if (last_event && len > 0) {
- u32 current_time = last_event->current_time;
- u32 timestamp = last_event->timestamp;
- u32 event_type = last_event->event.event_type;
- u32 status = last_event->event.status;
- u32 reason = last_event->event.reason;
-
- WL_ERR(("Last roam event before disconnection : current_time %d,"
- " time %d, type %d, status %d, reason %d\n",
- current_time, timestamp, event_type, status, reason));
- }
- WL_ERR(("Link down Reason : %s\n", bcmevent_get_name(event)));
- return true;
- }
- }
-
- return false;
-}
-#else
static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
{
u32 event = ntoh32(e->event_type);
@@ -12745,7 +14114,6 @@
return false;
}
-#endif /* WL_LASTEVT */
static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
{
@@ -12756,27 +14124,151 @@
return true;
if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS)
return true;
+ if (event == WLC_E_ASSOC_RESP_IE && status != WLC_E_STATUS_SUCCESS)
+ return true;
return false;
}
-/* The mainline kernel >= 3.2.0 has support for indicating new/del station
- * to AP/P2P GO via events. If this change is backported to kernel for which
- * this driver is being built, then define WL_CFG80211_STA_EVENT. You
- * should use this new/del sta event mechanism for BRCM supplicant >= 22.
- */
+#ifdef WL_SAE
static s32
-wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+wl_cfg80211_event_sae_key(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ wl_sae_key_info_t *sae_key)
+{
+ struct sk_buff *skb;
+ gfp_t kflags;
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ int err = BCME_OK;
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ skb = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(ndev), BRCM_SAE_VENDOR_EVENT_BUF_LEN,
+ BRCM_VENDOR_EVENT_SAE_KEY, kflags);
+#else
+ skb = cfg80211_vendor_event_alloc(wiphy, BRCM_SAE_VENDOR_EVENT_BUF_LEN,
+ BRCM_VENDOR_EVENT_SAE_KEY, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ err = BCME_NOMEM;
+ goto done;
+ }
+
+ WL_INFORM_MEM(("Received Sae Key event for "MACDBG" key length %x %x",
+ MAC2STRDBG(sae_key->peer_mac), sae_key->pmk_len, sae_key->pmkid_len));
+ nla_put(skb, BRCM_SAE_KEY_ATTR_PEER_MAC, ETHER_ADDR_LEN, sae_key->peer_mac);
+ nla_put(skb, BRCM_SAE_KEY_ATTR_PMK, sae_key->pmk_len, sae_key->pmk);
+ nla_put(skb, BRCM_SAE_KEY_ATTR_PMKID, sae_key->pmkid_len, sae_key->pmkid);
+ cfg80211_vendor_event(skb, kflags);
+
+done:
+ return err;
+}
+
+static s32
+wl_bss_handle_sae_auth(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *event, void *data)
+{
+ int err = BCME_OK;
+ uint status = ntoh32(event->status);
+ wl_auth_event_t *auth_data;
+ wl_sae_key_info_t sae_key;
+ uint16 tlv_buf_len;
+
+ if (status == WLC_E_STATUS_SUCCESS) {
+ auth_data = (wl_auth_event_t *)data;
+ if (auth_data->version != WL_AUTH_EVENT_DATA_V1) {
+ WL_ERR(("unknown auth event data version %x\n",
+ auth_data->version));
+ err = BCME_VERSION;
+ goto done;
+ }
+
+ tlv_buf_len = auth_data->length - WL_AUTH_EVENT_FIXED_LEN_V1;
+
+ /* check if PMK info present */
+ sae_key.pmk = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
+ WL_AUTH_PMK_TLV_ID, &(sae_key.pmk_len), BCM_XTLV_OPTION_ALIGN32);
+ if (!sae_key.pmk || !sae_key.pmk_len) {
+ WL_ERR(("Mandatory PMK info not present"));
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+ /* check if PMKID info present */
+ sae_key.pmkid = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
+ WL_AUTH_PMKID_TLV_ID, &(sae_key.pmkid_len), BCM_XTLV_OPTION_ALIGN32);
+ if (!sae_key.pmkid || !sae_key.pmkid_len) {
+ WL_ERR(("Mandatory PMKID info not present\n"));
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+ memcpy_s(sae_key.peer_mac, ETHER_ADDR_LEN, event->addr.octet, ETHER_ADDR_LEN);
+ err = wl_cfg80211_event_sae_key(cfg, ndev, &sae_key);
+ if (err) {
+ WL_ERR(("Failed to event sae key info\n"));
+ }
+ } else {
+ WL_ERR(("sae auth status failure:%d\n", status));
+ }
+done:
+ return err;
+}
+#endif /* WL_SAE */
+
+static s32
+wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ u32 reason = ntoh32(e->reason);
+ u32 event = ntoh32(e->event_type);
+#ifdef WL_SAE
+ uint auth_type = ntoh32(e->auth_type);
+#endif /* WL_SAE */
+ struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+ WL_DBG(("event type : %d, reason : %d\n", event, reason));
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ (void)memcpy_s(&cfg->event_auth_assoc, sizeof(wl_event_msg_t),
+ e, sizeof(wl_event_msg_t));
+ WL_ERR(("event=%d status %d reason %d \n",
+ ntoh32(cfg->event_auth_assoc.event_type),
+ ntoh32(cfg->event_auth_assoc.status),
+ ntoh32(cfg->event_auth_assoc.reason)));
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+ if (sec) {
+ switch (event) {
+ case WLC_E_ASSOC:
+ case WLC_E_AUTH:
+ case WLC_E_AUTH_IND:
+ sec->auth_assoc_res_status = reason;
+#ifdef WL_SAE
+ if ((event == WLC_E_AUTH || event == WLC_E_AUTH_IND) &&
+ auth_type == DOT11_SAE) {
+ wl_bss_handle_sae_auth(cfg, ndev, e, data);
+ }
+#endif /* WL_SAE */
+ break;
+ default:
+ break;
+ }
+ } else {
+ WL_ERR(("sec is NULL\n"));
+ }
+ return 0;
+}
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) \
+ && !defined(WL_COMPAT_WIRELESS)) || defined(WL_CFG80211_AP_RX_MGMT_DISCONNECT)
+static s32
+wl_notify_connect_status_ap_rx_mgmt(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data)
{
s32 err = 0;
u32 event = ntoh32(e->event_type);
u32 reason = ntoh32(e->reason);
u32 len = ntoh32(e->datalen);
- u32 status = ntoh32(e->status);
-#if !defined(WL_CFG80211_STA_EVENT) && !defined(WL_COMPAT_WIRELESS) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
bool isfree = false;
u8 *mgmt_frame;
u8 bsscfgidx = e->bsscfgidx;
@@ -12784,72 +14276,21 @@
s32 channel;
u8 *body = NULL;
u16 fc = 0;
- u32 body_len = 0;
struct ieee80211_supported_band *band;
struct ether_addr da;
struct ether_addr bssid;
struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
channel_info_t ci;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
-#else
- struct station_info sinfo;
-#endif /* (LINUX_VERSION < VERSION(3,2,0)) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
-#ifdef BIGDATA_SOFTAP
- dhd_pub_t *dhdp;
-#endif /* BIGDATA_SOFTAP */
- WL_INFORM_MEM(("[%s] Mode AP/GO. Event:%d status:%d reason:%d\n",
- ndev->name, event, ntoh32(e->status), reason));
- /* if link down, bsscfg is disabled. */
- if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS &&
- wl_get_p2p_status(cfg, IF_DELETING) && (ndev != bcmcfg_to_prmry_ndev(cfg))) {
- wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
- WL_INFORM_MEM(("AP mode link down !! \n"));
- complete(&cfg->iface_disable);
- return 0;
- }
-
- if ((event == WLC_E_LINK) && (status == WLC_E_STATUS_SUCCESS) &&
- (reason == WLC_E_REASON_INITIAL_ASSOC) &&
- (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP)) {
- if (!wl_get_drv_status(cfg, AP_CREATED, ndev)) {
- /* AP/GO brought up successfull in firmware */
- WL_INFORM_MEM(("AP/GO Link up\n"));
- wl_set_drv_status(cfg, AP_CREATED, ndev);
- wake_up_interruptible(&cfg->netif_change_event);
-#ifdef BIGDATA_SOFTAP
- wl_ap_stainfo_init(cfg);
-#endif /* BIGDATA_SOFTAP */
- return 0;
- }
- }
-
- if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) {
- WL_DBG(("event %s(%d) status %d reason %d\n",
- bcmevent_get_name(event), event, ntoh32(e->status), reason));
- }
-
-#ifdef BIGDATA_SOFTAP
- if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS) {
- WL_ERR(("AP link down - skip get sta data\n"));
- } else {
- dhdp = (dhd_pub_t *)(cfg->pub);
- if (dhdp && dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) {
- dhd_schedule_gather_ap_stadata(cfg, ndev, e);
- }
- }
-#endif /* BIGDATA_SOFTAP */
-
-#if !defined(WL_CFG80211_STA_EVENT) && !defined(WL_COMPAT_WIRELESS) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
WL_DBG(("Enter \n"));
if (!len && (event == WLC_E_DEAUTH)) {
len = 2; /* reason code field */
data = &reason;
}
if (len) {
- body = (u8 *)MALLOCZ(cfg->osh, len);
+ body = kzalloc(len, GFP_KERNEL);
+
if (body == NULL) {
WL_ERR(("wl_notify_connect_status: Failed to allocate body\n"));
return WL_INVALID;
@@ -12858,15 +14299,15 @@
memset(&bssid, 0, ETHER_ADDR_LEN);
WL_DBG(("Enter event %d ndev %p\n", event, ndev));
if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) {
- MFREE(cfg->osh, body, len);
+ kfree(body);
return WL_INVALID;
}
if (len)
memcpy(body, data, len);
wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
- NULL, 0, ioctl_buf, sizeof(ioctl_buf), bsscfgidx, NULL);
- memcpy(da.octet, ioctl_buf, ETHER_ADDR_LEN);
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync);
+ memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
memset(&bssid, 0, sizeof(bssid));
err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
switch (event) {
@@ -12891,7 +14332,7 @@
}
memset(&ci, 0, sizeof(ci));
if ((err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) {
- MFREE(cfg->osh, body, len);
+ kfree(body);
return err;
}
@@ -12902,9 +14343,8 @@
band = wiphy->bands[IEEE80211_BAND_5GHZ];
if (!band) {
WL_ERR(("No valid band"));
- if (body) {
- MFREE(cfg->osh, body, len);
- }
+ if (body)
+ kfree(body);
return -EINVAL;
}
#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
@@ -12913,42 +14353,116 @@
#else
freq = ieee80211_channel_to_frequency(channel, band->band);
#endif // endif
- body_len = len;
err = wl_frame_get_mgmt(cfg, fc, &da, &e->addr, &bssid,
&mgmt_frame, &len, body);
if (err < 0)
goto exit;
isfree = true;
- if ((event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) ||
- (event == WLC_E_DISASSOC_IND) ||
- ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH))) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
- defined(WL_COMPAT_WIRELESS)
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
-#else
- cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
-#endif /* LINUX_VERSION >= VERSION(3, 18,0) || WL_COMPAT_WIRELESS */
- }
-
-exit:
- if (isfree) {
- MFREE(cfg->osh, mgmt_frame, len);
- }
- if (body) {
- MFREE(cfg->osh, body, body_len);
- }
-#else /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
- sinfo.filled = 0;
if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) &&
reason == DOT11_SC_SUCCESS) {
+ CFG80211_RX_MGMT(ndev, freq, 0, mgmt_frame, len,
+ NL80211_RXMGMT_FLAG_ANSWERED, GFP_ATOMIC);
+ } else if (event == WLC_E_DISASSOC_IND) {
+ CFG80211_RX_MGMT(ndev, freq, 0, mgmt_frame, len,
+ NL80211_RXMGMT_FLAG_ANSWERED, GFP_ATOMIC);
+ } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
+ CFG80211_RX_MGMT(ndev, freq, 0, mgmt_frame, len,
+ NL80211_RXMGMT_FLAG_ANSWERED, GFP_ATOMIC);
+ }
+
+ exit:
+ if (isfree)
+ kfree(mgmt_frame);
+ if (body)
+ kfree(body);
+
+ return err;
+}
+#endif /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+
+/* The mainline kernel >= 3.2.0 has support for indicating new/del station
+ * to AP/P2P GO via events. If this change is backported to kernel for which
+ * this driver is being built, then define WL_CFG80211_STA_EVENT. You
+ * should use this new/del sta event mechanism for BRCM supplicant >= 22.
+ */
+static s32
+wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ u32 event = ntoh32(e->event_type);
+ u32 reason = ntoh32(e->reason);
+ u32 status = ntoh32(e->status);
+#ifdef BIGDATA_SOFTAP
+ dhd_pub_t *dhdp;
+#endif /* BIGDATA_SOFTAP */
+
+ WL_INFORM_MEM(("[%s] Mode AP/GO. Event:%d status:%d reason:%d\n",
+ ndev->name, event, ntoh32(e->status), reason));
+
+ if (event == WLC_E_AUTH_IND) {
+ wl_get_auth_assoc_status(cfg, ndev, e, data);
+ return 0;
+ }
+ /* if link down, bsscfg is disabled. */
+ if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS &&
+ wl_get_p2p_status(cfg, IF_DELETING) && (ndev != bcmcfg_to_prmry_ndev(cfg))) {
+ wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
+ WL_INFORM_MEM(("AP mode link down !! \n"));
+ complete(&cfg->iface_disable);
+ return 0;
+ }
+
+ if ((event == WLC_E_LINK) && (status == WLC_E_STATUS_SUCCESS) &&
+ (reason == WLC_E_REASON_INITIAL_ASSOC) &&
+ (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP)) {
+ if (!wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+ /* AP/GO brought up successfull in firmware */
+ WL_INFORM_MEM(("AP/GO Link up\n"));
+ wl_set_drv_status(cfg, AP_CREATED, ndev);
+ OSL_SMP_WMB();
+ wake_up_interruptible(&cfg->netif_change_event);
+#ifdef BIGDATA_SOFTAP
+ wl_ap_stainfo_init(cfg);
+#endif /* BIGDATA_SOFTAP */
+#ifdef WL_BCNRECV
+ /* check fakeapscan is in progress, if progress then abort */
+ wl_android_bcnrecv_stop(ndev, WL_BCNRECV_CONCURRENCY);
+#endif /* WL_BCNRECV */
+ return 0;
+ }
+ }
+
+ if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) {
+ WL_DBG(("event %s(%d) status %d reason %d\n",
+ bcmevent_get_name(event), event, ntoh32(e->status), reason));
+ }
+
+#ifdef BIGDATA_SOFTAP
+ if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS) {
+ WL_ERR(("AP link down - skip get sta data\n"));
+ } else {
+ dhdp = (dhd_pub_t *)(cfg->pub);
+ if (dhdp && dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ dhd_schedule_gather_ap_stadata(cfg, ndev, e);
+ }
+ }
+#endif /* BIGDATA_SOFTAP */
+
+#if !defined(WL_CFG80211_STA_EVENT) && !defined(WL_COMPAT_WIRELESS) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+ return wl_notify_connect_status_ap_rx_mgmt(cfg, ndev, e, data);
+#else /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+ if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) &&
+ reason == DOT11_SC_SUCCESS) {
+ u32 len = ntoh32(e->datalen);
+ struct station_info sinfo;
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0))
memset(&sinfo, 0, sizeof(struct station_info));
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) */
+ sinfo.filled = 0;
/* Linux ver >= 4.0 assoc_req_ies_len is used instead of
* STATION_INFO_ASSOC_REQ_IES flag
*/
@@ -12970,9 +14484,13 @@
} else if ((event == WLC_E_DEAUTH_IND) ||
((event == WLC_E_DEAUTH) && (reason != DOT11_RC_RESERVED)) ||
(event == WLC_E_DISASSOC_IND)) {
+#if defined(WL_CFG80211_AP_RX_MGMT_DISCONNECT)
+ err = wl_notify_connect_status_ap_rx_mgmt(cfg, ndev, e, data);
+#else
WL_INFORM_MEM(("[%s] del sta event for "MACDBG "\n",
ndev->name, MAC2STRDBG(e->addr.octet)));
cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+#endif /* WL_CFG80211_AP_RX_MGMT_DISCONNECT */
#ifdef WL_WPS_SYNC
wl_wps_session_update(ndev, WPS_STATE_LINKDOWN, e->addr.octet);
#endif /* WL_WPS_SYNC */
@@ -13058,35 +14576,6 @@
#endif /* DHD_ENABLE_BIGDATA_LOGGING */
static s32
-wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e)
-{
- u32 reason = ntoh32(e->reason);
- u32 event = ntoh32(e->event_type);
- struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
- WL_DBG(("event type : %d, reason : %d\n", event, reason));
-
-#if defined(DHD_ENABLE_BIGDATA_LOGGING)
- memcpy(&cfg->event_auth_assoc, e, sizeof(wl_event_msg_t));
- WL_ERR(("event=%d status %d reason %d \n",
- ntoh32(cfg->event_auth_assoc.event_type),
- ntoh32(cfg->event_auth_assoc.status),
- ntoh32(cfg->event_auth_assoc.reason)));
-#endif /* DHD_ENABLE_BIGDATA_LOGGING */
- if (sec) {
- switch (event) {
- case WLC_E_ASSOC:
- case WLC_E_AUTH:
- sec->auth_assoc_res_status = reason;
- default:
- break;
- }
- } else
- WL_ERR(("sec is NULL\n"));
- return 0;
-}
-
-static s32
wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data)
{
@@ -13270,7 +14759,7 @@
int wl_get_bss_info(struct bcm_cfg80211 *cfg, struct net_device *dev, struct ether_addr const *mac)
{
s32 err = 0;
- wl_bss_info_t *bi;
+ wl_bss_info_v109_1_t *bi;
uint8 eabuf[ETHER_ADDR_LEN];
u32 rate, channel, freq, supported_rate, nss = 0, mcs_map, mode_80211 = 0;
char rate_str[4];
@@ -13291,7 +14780,7 @@
/* get BSS information */
- strncpy(cfg->bss_info, "x x x x x x x x x x x x x x x", GET_BSS_INFO_LEN);
+ strlcpy(cfg->bss_info, "x x x x x x x x x x x x x x x", sizeof(cfg->bss_info));
*(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX);
@@ -13310,7 +14799,7 @@
memcpy(eabuf, mac, ETHER_ADDR_LEN);
- bi = (wl_bss_info_t *)(cfg->extra_buf + 4);
+ bi = (wl_bss_info_v109_1_t *)(cfg->extra_buf + 4);
channel = wf_chspec_ctlchan(bi->chanspec);
#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
@@ -13326,35 +14815,35 @@
err = wldev_ioctl_get(dev, WLC_GET_RATE, &rate, sizeof(rate));
if (err) {
WL_ERR(("Could not get rate (%d)\n", err));
- snprintf(rate_str, sizeof(rate_str), "x"); // Unknown
+ snprintf(rate_str, sizeof(rate_str), "x"); /* Unknown */
} else {
rate = dtoh32(rate);
snprintf(rate_str, sizeof(rate_str), "%d", (rate/2));
}
- //supported maximum rate
+ /* supported maximum rate */
supported_rate = (bi->rateset.rates[bi->rateset.count - 1] & 0x7f) / 2;
if (supported_rate < 12) {
- mode_80211 = 0; //11b maximum rate is 11Mbps. 11b mode
+ mode_80211 = BIGDATA_DOT11_11B_MODE; /* 11b maximum rate is 11Mbps. 11b mode */
} else {
- //It's not HT Capable case.
+ /* It's not HT Capable case. */
if (channel > 14) {
- mode_80211 = 3; // 11a mode
+ mode_80211 = BIGDATA_DOT11_11A_MODE; /* 11a mode */
} else {
- mode_80211 = 1; // 11g mode
+ mode_80211 = BIGDATA_DOT11_11G_MODE; /* 11g mode */
}
}
if (bi->n_cap) {
/* check Rx MCS Map for HT */
nss = 0;
- mode_80211 = 2;
+ mode_80211 = BIGDATA_DOT11_11N_MODE;
for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) {
- int8 bitmap = 0xFF;
+ int8 bitmap = DOT11_HT_MCS_RATE_MASK;
if (i == MAX_STREAMS_SUPPORTED-1) {
- bitmap = 0x7F;
+ bitmap = DOT11_RATE_MASK;
}
if (bi->basic_mcs[i] & bitmap) {
nss++;
@@ -13364,7 +14853,7 @@
if (bi->vht_cap) {
nss = 0;
- mode_80211 = 4;
+ mode_80211 = BIGDATA_DOT11_11AC_MODE;
for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) {
mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap));
if (mcs_map != VHT_CAP_MCS_MAP_NONE) {
@@ -13373,6 +14862,19 @@
}
}
+#if defined(WL11AX)
+ if (bi->he_cap) {
+ nss = 0;
+ mode_80211 = BIGDATA_DOT11_11AX_MODE;
+ for (i = 1; i <= HE_MCS_MAP_NSS_MAX; i++) {
+ mcs_map = HE_MCS_NSS_GET_MCS(i, dtoh32(bi->he_rxmcsmap));
+ if (mcs_map != HE_MCS_CODE_NONE) {
+ nss++;
+ }
+ }
+ }
+#endif /* WL11AX */
+
if (nss) {
nss = nss - 1;
}
@@ -13382,10 +14884,7 @@
if (!bss) {
WL_ERR(("Could not find the AP\n"));
} else {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
#if defined(WL_CFG80211_P2P_DEV_IF)
ie = (u8 *)bss->ies->data;
ie_len = bss->ies->len;
@@ -13393,9 +14892,7 @@
ie = bss->information_elements;
ie_len = bss->len_information_elements;
#endif /* WL_CFG80211_P2P_DEV_IF */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
}
if (ie) {
@@ -13403,7 +14900,7 @@
ie_11u_rel_num = 0;
if (bi->vht_cap) {
- if ((vht_ie = bcm_parse_tlvs(ie, (u32)ie_len,
+ if ((vht_ie = bcm_parse_tlvs(ie, ie_len,
DOT11_MNG_VHT_CAP_ID)) != NULL) {
if (vht_ie->len >= VHT_CAP_IE_LEN) {
ie_mu_mimo_cap = (vht_ie->data[2] & 0x08) >> 3;
@@ -13411,9 +14908,9 @@
}
}
- if ((interworking_ie = bcm_parse_tlvs(ie, (u32)ie_len,
+ if ((interworking_ie = bcm_parse_tlvs(ie, ie_len,
DOT11_MNG_INTERWORKING_ID)) != NULL) {
- if ((tlv_ie = bcm_parse_tlvs(ie, (u32)ie_len, DOT11_MNG_VS_ID)) != NULL) {
+ if ((tlv_ie = bcm_parse_tlvs(ie, ie_len, DOT11_MNG_VS_ID)) != NULL) {
remained_len = ie_len;
while (tlv_ie) {
@@ -13424,9 +14921,9 @@
vndrie = (vndr_ie_t *) tlv_ie;
if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
- WL_ERR(("%s: invalid vndr ie."
+ WL_ERR(("wl_get_bss_info: invalid vndr ie."
"length is too small %d\n",
- __FUNCTION__, vndrie->len));
+ vndrie->len));
break;
}
@@ -13458,18 +14955,13 @@
}
}
- //0 : None, 1 : OKC, 2 : FT, 3 : CCKM
+ /* 0 : None, 1 : OKC, 2 : FT, 3 : CCKM */
err = wldev_iovar_getint(dev, "wpa_auth", &val);
if (unlikely(err)) {
WL_ERR(("could not get wpa_auth (%d)\n", err));
- snprintf(akm_str, sizeof(akm_str), "x"); // Unknown
+ snprintf(akm_str, sizeof(akm_str), "x"); /* Unknown */
} else {
WL_ERR(("wpa_auth val %d \n", val));
-#if defined(BCMCCX) || defined(BCMEXTCCX)
- if (val & (WPA_AUTH_CCKM | WPA2_AUTH_CCKM)) {
- snprintf(akm_str, sizeof(akm_str), "3");
- } else
-#endif /* BCMCCX || BCMEXTCCX */
if (val & WPA2_AUTH_FT) {
snprintf(akm_str, sizeof(akm_str), "2");
} else if (val & (WPA_AUTH_UNSPECIFIED | WPA2_AUTH_UNSPECIFIED)) {
@@ -13480,7 +14972,7 @@
}
if (cfg->roam_offload) {
- snprintf(roam_count_str, sizeof(roam_count_str), "x"); // Unknown
+ snprintf(roam_count_str, sizeof(roam_count_str), "x"); /* Unknown */
} else {
snprintf(roam_count_str, sizeof(roam_count_str), "%d", cfg->roam_count);
}
@@ -13503,7 +14995,7 @@
ie_11u_rel_num, bi->SNR, bi->phy_noise, akm_str, roam_count_str,
support_11kv, flag_11kv);
} else {
- //ie_mu_mimo_cap and ie_11u_rel_num is unknow.
+ /* ie_mu_mimo_cap and ie_11u_rel_num is unknow. */
snprintf(cfg->bss_info, GET_BSS_INFO_LEN,
MACOUI" %d %s %d %s %d %d x x %d %d %s %s x x",
MACOUI2STR(eabuf), freq, wf_chspec_to_bw_str(bi->chanspec),
@@ -13525,11 +15017,11 @@
}
if (total_len < GET_BSS_INFO_LEN) {
- WL_ERR(("%s: Buffer insuffient %d\n", __FUNCTION__, total_len));
+ WL_ERR(("wl_cfg80211_get_bss_info: Buffer insuffient %d\n", total_len));
return -1;
}
- memset(cmd, 0, total_len);
+ bzero(cmd, total_len);
memcpy(cmd, cfg->bss_info, GET_BSS_INFO_LEN);
WL_ERR_KERN(("cmd: %s \n", cmd));
@@ -13538,18 +15030,126 @@
}
#endif /* DHD_ENABLE_BIGDATA_LOGGING */
-void wl_cfg80211_disassoc(struct net_device *ndev)
+void wl_cfg80211_disassoc(struct net_device *ndev, uint32 reason)
{
scb_val_t scbval;
s32 err;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- memset(&scbval, 0x0, sizeof(scb_val_t));
- scbval.val = htod32(WLAN_REASON_DEAUTH_LEAVING);
+ BCM_REFERENCE(cfg);
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev), WLAN_REASON_DEAUTH_LEAVING);
+
+ memset_s(&scbval, sizeof(scb_val_t), 0x0, sizeof(scb_val_t));
+ scbval.val = htod32(reason);
err = wldev_ioctl_set(ndev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
if (err < 0) {
WL_ERR(("WLC_DISASSOC error %d\n", err));
}
}
+void wl_cfg80211_del_all_sta(struct net_device *ndev, uint32 reason)
+{
+ struct net_device *dev;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ scb_val_t scb_val;
+ int err;
+ char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
+ sizeof(struct ether_addr) + sizeof(uint)] = {0};
+ struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+ int num_associated = 0;
+
+ dev = ndev_to_wlc_ndev(ndev, cfg);
+
+ if (p2p_is_on(cfg)) {
+ /* Suspend P2P discovery search-listen to prevent it from changing the
+ * channel.
+ */
+ if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+ WL_ERR(("Can not disable discovery mode\n"));
+ return;
+ }
+ }
+
+ assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+ err = wldev_ioctl_get(ndev, WLC_GET_ASSOCLIST,
+ assoc_maclist, sizeof(mac_buf));
+ if (err < 0)
+ WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+ else
+ num_associated = assoc_maclist->count;
+
+ memset(scb_val.ea.octet, 0xff, ETHER_ADDR_LEN);
+ scb_val.val = DOT11_RC_DEAUTH_LEAVING;
+ scb_val.val = htod32(reason);
+ err = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+ sizeof(scb_val_t));
+ if (err < 0) {
+ WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err));
+ }
+
+ if (num_associated > 0)
+ wl_delay(400);
+
+ return;
+}
+/* API to handle the Deauth from the AP.
+* For now we are deleting the PMKID cache in DHD/FW
+* in case of current connection is using SAE authnetication
+*/
+static s32
+wl_cfg80211_handle_deauth_ind(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ int err = BCME_OK;
+#ifdef WL_SAE
+ uint8 bssid[ETHER_ADDR_LEN];
+ struct cfg80211_pmksa pmksa;
+ s32 val = 0;
+
+ err = wldev_iovar_getint(ndev, "wpa_auth", &val);
+ if (unlikely(err)) {
+ WL_ERR(("could not get wpa_auth (%d)\n", err));
+ goto done;
+ }
+ if (val == WPA3_AUTH_SAE_PSK) {
+ (void)memcpy_s(bssid, ETHER_ADDR_LEN,
+ (const uint8*)&e->addr, ETHER_ADDR_LEN);
+ memset_s(&pmksa, sizeof(pmksa), 0, sizeof(pmksa));
+ pmksa.bssid = bssid;
+ WL_INFORM_MEM(("Deleting the PMKSA for SAE AP "MACDBG,
+ MAC2STRDBG(e->addr.octet)));
+ wl_cfg80211_del_pmksa(cfg->wdev->wiphy, ndev, &pmksa);
+ }
+done:
+#endif /* WL_SAE */
+ return err;
+}
+
+static void
+wl_cache_assoc_resp_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ u32 datalen = ntoh32(e->datalen);
+ u32 event_type = ntoh32(e->event_type);
+
+ if (datalen > VNDR_IE_MIN_LEN &&
+ datalen < VNDR_IE_MAX_LEN &&
+ data) {
+ conn_info->resp_ie_len = datalen;
+ WL_DBG((" assoc resp IES len = %d\n", conn_info->resp_ie_len));
+ bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
+ (void)memcpy_s(conn_info->resp_ie, sizeof(conn_info->resp_ie),
+ data, datalen);
+
+ WL_INFORM_MEM(("[%s] copied assoc resp ies, sent to upper layer:"
+ "event %d reason=%d ie_len=%d from " MACDBG "\n",
+ ndev->name, event_type, ntoh32(e->reason), datalen,
+ MAC2STRDBG((const u8*)(&e->addr))));
+ }
+}
static s32
wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
@@ -13559,17 +15159,33 @@
struct net_device *ndev = NULL;
s32 err = 0;
u32 event = ntoh32(e->event_type);
+ u32 datalen = ntoh32(e->datalen);
struct wiphy *wiphy = NULL;
struct cfg80211_bss *bss = NULL;
struct wlc_ssid *ssid = NULL;
u8 *bssid = 0;
+ s32 bssidx = 0;
+ u8 *ie_ptr = NULL;
+ uint32 ie_len = 0;
+#ifdef WL_ANALYTICS
+ struct parsed_vndr_ies disco_vndr_ie;
+ struct parsed_vndr_ie_info *vndrie_info = NULL;
+ uint32 i = 0;
+#endif /* WL_ANALYTICS */
+
dhd_pub_t *dhdp;
u32 mode;
int vndr_oui_num = 0;
char vndr_oui[MAX_VNDR_OUI_STR_LEN] = {0, };
bool loc_gen = false;
+#ifdef DHD_LOSSLESS_ROAMING
+ struct wl_security *sec;
+#endif /* DHD_LOSSLESS_ROAMING */
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+#ifdef DHD_LOSSLESS_ROAMING
+ sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+#endif /* DHD_LOSSLESS_ROAMING */
dhdp = (dhd_pub_t *)(cfg->pub);
BCM_REFERENCE(dhdp);
@@ -13583,9 +15199,9 @@
} else if (mode == WL_MODE_IBSS) {
err = wl_notify_connect_status_ibss(cfg, ndev, e, data);
} else if (mode == WL_MODE_BSS) {
- WL_INFORM_MEM(("[%s] Mode BSS. event:%d status:%d reason:%d wdev:%p\n",
+ WL_INFORM_MEM(("[%s] Mode BSS. event:%d status:%d reason:%d\n",
ndev->name, ntoh32(e->event_type),
- ntoh32(e->status), ntoh32(e->reason), ndev->ieee80211_ptr));
+ ntoh32(e->status), ntoh32(e->reason)));
if (!wl_get_drv_status(cfg, CFG80211_CONNECT, ndev)) {
/* Join attempt via non-cfg80211 interface.
@@ -13598,9 +15214,16 @@
}
if (event == WLC_E_ASSOC || event == WLC_E_AUTH) {
- wl_get_auth_assoc_status(cfg, ndev, e);
+ wl_get_auth_assoc_status(cfg, ndev, e, data);
return 0;
}
+ if (event == WLC_E_ASSOC_RESP_IE) {
+ if (ntoh32(e->status) != WLC_E_STATUS_SUCCESS) {
+ wl_cache_assoc_resp_ies(cfg, ndev, e, data);
+ }
+ return 0;
+ }
+
#if defined(OEM_ANDROID)
DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
#endif // endif
@@ -13610,14 +15233,13 @@
if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
WL_INFORM_MEM(("[%s] link up for bssid " MACDBG "\n",
ndev->name, MAC2STRDBG((const u8*)(&e->addr))));
-
if ((event == WLC_E_LINK) &&
(ntoh16(e->flags) & WLC_EVENT_MSG_LINK) &&
!wl_get_drv_status(cfg, CONNECTED, ndev) &&
!wl_get_drv_status(cfg, CONNECTING, ndev)) {
WL_INFORM_MEM(("link up in non-connected/"
"non-connecting state\n"));
- wl_cfg80211_disassoc(ndev);
+ wl_cfg80211_disassoc(ndev, WLAN_REASON_DEAUTH_LEAVING);
return BCME_OK;
}
@@ -13630,16 +15252,6 @@
}
#endif /* WL_WPS_SYNC */
- if (((event == WLC_E_ROAM) || (event == WLC_E_BSSID)) &&
- !wl_get_drv_status(cfg, CONNECTED, ndev)) {
- /* Roam event in disconnected state. DHD-FW state
- * mismatch. Issue disassoc to clear fw state
- */
- WL_INFORM_MEM(("Roam even in disconnected state."
- " clear fw state\n"));
- wl_cfg80211_disassoc(ndev);
- return BCME_OK;
- }
#ifdef DHD_EVENT_LOG_FILTER
if (event == WLC_E_LINK && ndev == bcmcfg_to_prmry_ndev(cfg)) {
int roam = FALSE;
@@ -13656,44 +15268,65 @@
eth_addr, roam);
}
#endif /* DHD_EVENT_LOG_FILTER */
- if (event == WLC_E_LINK &&
+
#ifdef DHD_LOSSLESS_ROAMING
+ if (event == WLC_E_LINK &&
!cfg->roam_offload &&
-#endif /* DHD_LOSSLESS_ROAMING */
- wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ !IS_AKM_SUITE_FT(sec) &&
+ wl_get_drv_status(cfg, CONNECTED, ndev))
wl_bss_roaming_done(cfg, ndev, e, data);
- } else {
- /* Initial Association */
- wl_bss_connect_done(cfg, ndev, e, data, true);
- if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
- vndr_oui_num = wl_vndr_ies_get_vendor_oui(cfg,
- ndev, vndr_oui, ARRAY_SIZE(vndr_oui));
- if (vndr_oui_num > 0) {
- WL_INFORM_MEM(("[%s] vendor oui: %s\n",
- ndev->name, vndr_oui));
- }
+#endif /* DHD_LOSSLESS_ROAMING */
+
+ wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
+ wl_bss_connect_done(cfg, ndev, e, data, true);
+ if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ vndr_oui_num = wl_vndr_ies_get_vendor_oui(cfg,
+ ndev, vndr_oui, ARRAY_SIZE(vndr_oui));
+ if (vndr_oui_num > 0) {
+ WL_INFORM_MEM(("[%s] vendor oui: %s\n",
+ ndev->name, vndr_oui));
}
- WL_DBG(("joined in BSS network \"%s\"\n",
- ((struct wlc_ssid *)
- wl_read_prof(cfg, ndev,
- WL_PROF_SSID))->SSID));
}
+
+ WL_DBG(("joined in BSS network \"%s\"\n",
+ ((struct wlc_ssid *)wl_read_prof(cfg, ndev,
+ WL_PROF_SSID))->SSID));
#ifdef WBTEXT
if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
- dhdp->wbtext_support && event == WLC_E_SET_SSID) {
+ dhdp->wbtext_support && event == WLC_E_SET_SSID) {
/* set wnm_keepalives_max_idle after association */
wl_cfg80211_wbtext_set_wnm_maxidle(cfg, ndev);
- /* send nbr request or BTM query to update RCC */
- wl_cfg80211_wbtext_update_rcc(cfg, ndev);
}
#endif /* WBTEXT */
}
wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
- } else if (WL_IS_LINKDOWN(cfg, e, data) ||
+#if defined(IGUANA_LEGACY_CHIPS)
+ } else if (wl_is_linkdown(cfg, e)) {
+ /* Lagacy chips like 4350 sends faliure status for WLC_E_SET_SSID even in
+ * case of successful connection. Since these firmware are in production,
+ * firmware change is avoided.
+ */
+#else
+ } else if (wl_is_linkdown(cfg, e) ||
((event == WLC_E_SET_SSID) &&
(ntoh32(e->status) != WLC_E_STATUS_SUCCESS) &&
(wl_get_drv_status(cfg, CONNECTED, ndev)))) {
+#endif // endif
+ if (wl_is_linkdown(cfg, e)) {
+ /* Clear IEs for disaasoc */
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg,
+ ndev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ } else {
+ WL_ERR(("link down--clearing disconnect IEs\n"));
+ if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ ndev_to_cfgdev(ndev), bssidx, VNDR_IE_DISASSOC_FLAG,
+ NULL, 0)) != BCME_OK) {
+ WL_ERR(("Failed to clear ies err = %d\n", err));
+ }
+ }
+ }
WL_INFORM_MEM(("link down. connection state bit status: [%u:%u:%u:%u]\n",
wl_get_drv_status(cfg, CONNECTING, ndev),
@@ -13735,8 +15368,10 @@
return 0;
}
- wl_flush_fw_log_buffer(bcmcfg_to_prmry_ndev(cfg),
- FW_LOGSET_MASK_ALL);
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ wl_flush_fw_log_buffer(bcmcfg_to_prmry_ndev(cfg),
+ FW_LOGSET_MASK_ALL);
+ }
#ifdef DHD_LOSSLESS_ROAMING
wl_del_roam_timeout(cfg);
#endif // endif
@@ -13781,16 +15416,18 @@
"changed 0xFF\n", event, reason));
reason = WLC_E_DEAUTH_MAX_REASON;
}
+ wl_cfg80211_handle_deauth_ind(cfg, ndev, e, data);
}
#ifdef SET_SSID_FAIL_CUSTOM_RC
- if (event == WLC_E_SET_SSID) {
+ if ((event == WLC_E_SET_SSID) &&
+ (ntoh32(e->status) == WLC_E_STATUS_TIMEOUT)) {
reason = SET_SSID_FAIL_CUSTOM_RC;
}
#endif /* SET_SSID_FAIL_CUSTOM_RC */
/* roam offload does not sync BSSID always, get it from dongle */
if (cfg->roam_offload) {
- memset(&bssid_dongle, 0, sizeof(bssid_dongle));
+ bzero(&bssid_dongle, sizeof(bssid_dongle));
if (wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid_dongle,
sizeof(bssid_dongle)) == BCME_OK) {
/* if not roam case, it would return null bssid */
@@ -13832,6 +15469,9 @@
wl_clr_drv_status(cfg, CONNECTED, ndev);
if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev),
+ WLAN_REASON_DEAUTH_LEAVING);
/* To make sure disconnect, explictly send dissassoc
* for BSSID 00:00:00:00:00:00 issue
*/
@@ -13854,11 +15494,6 @@
ndev->name, event, ntoh32(e->reason),
MAC2STRDBG((const u8*)(&e->addr))));
- /* Send up deauth and clear states */
- CFG80211_DISCONNECTED(ndev, reason, NULL, 0,
- loc_gen, GFP_KERNEL);
- wl_link_down(cfg);
- wl_init_prof(cfg, ndev);
#ifdef WBTEXT
/* when STA was disconnected, clear join pref and set wbtext */
if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
@@ -13877,40 +15512,125 @@
== BCME_OK) {
wl_cfg80211_wbtext_set_default(ndev);
} else {
- WL_ERR(("%s: Failed to set wbtext = %d\n",
- __FUNCTION__, err));
+ WL_ERR(("wl_notify_connect_status:"
+ " Failed to"
+ " set wbtext = %d\n",
+ err));
}
} else {
- WL_ERR(("%s: Failed to clear join pref = %d\n",
- __FUNCTION__, err));
+ WL_ERR(("wl_notify_connect_status:"
+ " Failed to clear join pref = %d\n",
+ err));
}
wl_cfg80211_wbtext_clear_bssid_list(cfg);
}
#endif /* WBTEXT */
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_DONE),
+ dhd_net2idx(dhdp->info, ndev), reason);
+ /* Send up deauth and clear states */
+
+ /*
+ * FW sends body and body len as a part of deauth
+ * and disassoc events (WLC_E_DISASSOC_IND, WLC_E_DEAUTH_IND)
+ * The VIEs sits after reason code in the body. Reason code is
+ * 2 bytes long.
+ */
+ WL_DBG(("recv disconnect ies ie_len = %d\n", ie_len));
+ if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND) {
+ if ((datalen > DOT11_DISCONNECT_RC) &&
+ datalen < (VNDR_IE_MAX_LEN + DOT11_DISCONNECT_RC) &&
+ data) {
+ ie_ptr = (uchar*)data + DOT11_DISCONNECT_RC;
+ ie_len = datalen - DOT11_DISCONNECT_RC;
+ }
+ } else if (event == WLC_E_LINK &&
+ ntoh32(e->reason) == WLC_E_LINK_BCN_LOSS) {
+#ifdef WL_ANALYTICS
+ /*
+ * In case of linkdown, FW sends prb rsp IEs. Disco VIE
+ * are appended with prb rsp ies. Remove prb rsp IES and
+ * send disco vie to upper layer.
+ * Disco VIE has fixed len of 11 octets.
+ * As per SS spec.(2 octet header + 9 octet VIE)
+ */
+ if (datalen < (VNDR_IE_MAX_LEN + DOT11_DISCONNECT_RC) &&
+ datalen >= DOT11_DISCONNECT_RC &&
+ ((err = wl_cfg80211_parse_vndr_ies(
+ (const u8 *)data, datalen,
+ &disco_vndr_ie)) == BCME_OK)) {
+ for (i = 0; i < disco_vndr_ie.count; i++) {
+ vndrie_info = &disco_vndr_ie.ie_info[i];
+ if ((vndrie_info->vndrie.id ==
+ 0xDD) && (!memcmp(
+ vndrie_info->vndrie.oui,
+ SSE_OUI, DOT11_OUI_LEN)) &&
+ (vndrie_info->vndrie.data[0] ==
+ VENDOR_ENTERPRISE_STA_OUI_TYPE)) {
+ ie_ptr = (u8 *)vndrie_info->ie_ptr;
+ ie_len = vndrie_info->ie_len;
+ }
+ }
+ }
+#endif /* WL_ANALYTICS */
+ }
+
+ CFG80211_DISCONNECTED(ndev, reason, ie_ptr, ie_len,
+ loc_gen, GFP_KERNEL);
+ WL_INFORM_MEM(("[%s] Disconnect event sent to upper layer"
+ "event:%d reason=%d ie_len=%d from " MACDBG "\n",
+ ndev->name, event, ntoh32(e->reason), ie_len,
+ MAC2STRDBG((const u8*)(&e->addr))));
+
+ /* Wait for status to be cleared to prevent race condition
+ * issues with connect context
+ * In DISCONNECTING state, There is rtnl_lock issue on cfg80211.
+ */
+ if (!loc_gen)
+ wl_cfg80211_disconnect_state_sync(cfg, ndev);
+ wl_link_down(cfg);
+ wl_init_prof(cfg, ndev);
}
else if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev), 0);
WL_INFORM_MEM(("link down, during connecting\n"));
- /* Issue WLC_DISASSOC to prevent FW roam attempts */
- err = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
- if (err < 0) {
- WL_ERR(("CONNECTING state, WLC_DISASSOC error %d\n", err));
- err = 0;
- }
- WL_DBG(("Clear drv CONNECTING status\n"));
- wl_clr_drv_status(cfg, CONNECTING, ndev);
+ /* Issue WLC_DISASSOC to prevent FW roam attempts.
+ * Do not issue WLC_DISASSOC again if the linkdown is
+ * generated due to local disassoc, to avoid connect-disconnect
+ * loop.
+ */
+ if (!((event == WLC_E_LINK) &&
+ (ntoh32(e->reason) == WLC_E_LINK_DISASSOC) &&
+ (ntoh32(e->status) == WLC_E_STATUS_SUCCESS))) {
+ err = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
+ if (err < 0) {
+ WL_ERR(("CONNECTING state,"
+ " WLC_DISASSOC error %d\n",
+ err));
+ err = 0;
+ }
#ifdef ESCAN_RESULT_PATCH
- if ((memcmp(connect_req_bssid, broad_bssid, ETHER_ADDR_LEN) == 0) ||
- (memcmp(&e->addr, broad_bssid, ETHER_ADDR_LEN) == 0) ||
- (memcmp(&e->addr, connect_req_bssid, ETHER_ADDR_LEN) == 0))
- /* In case this event comes while associating another AP */
+ if ((memcmp(connect_req_bssid, broad_bssid,
+ ETHER_ADDR_LEN) == 0) ||
+ (memcmp(&e->addr, broad_bssid,
+ ETHER_ADDR_LEN) == 0) ||
+ (memcmp(&e->addr, connect_req_bssid,
+ ETHER_ADDR_LEN) == 0))
+ /* In case this event comes while associating
+ * another AP
+ */
#endif /* ESCAN_RESULT_PATCH */
- wl_bss_connect_done(cfg, ndev, e, data, false);
+ wl_bss_connect_done(cfg, ndev, e, data, false);
+ }
}
wl_clr_drv_status(cfg, DISCONNECTING, ndev);
/* if link down, bsscfg is diabled */
if (ndev != bcmcfg_to_prmry_ndev(cfg))
complete(&cfg->iface_disable);
+#ifdef REVERSE_AIFSN
+ ((dhd_pub_t *)cfg->pub)->aifsn_reverse = FALSE;
+#endif /* REVERSE_AIFSN */
#ifdef WLTDLS
/* re-enable TDLS if the number of connected interfaces
* is less than 2.
@@ -13935,23 +15655,27 @@
/* Dump FW preserve buffer content */
wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
- if (wl_get_drv_status(cfg, DISCONNECTING, ndev) &&
- wl_get_drv_status(cfg, CONNECTING, ndev)) {
- wl_clr_drv_status(cfg, DISCONNECTING, ndev);
- wl_clr_drv_status(cfg, CONNECTING, ndev);
- wl_cfg80211_scan_abort(cfg);
-#if defined(OEM_ANDROID)
- DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
-#endif // endif
- return err;
- }
/* Clean up any pending scan request */
wl_cfg80211_cancel_scan(cfg);
- if (wl_get_drv_status(cfg, CONNECTING, ndev))
+ if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
+ if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+ WL_INFORM_MEM(("wl dissassoc\n"));
+ err = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
+ if (err < 0) {
+ WL_ERR(("WLC_DISASSOC error %d\n", err));
+ err = 0;
+ }
+ } else {
+ WL_DBG(("connect fail. clear disconnecting bit\n"));
+ wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+ }
wl_bss_connect_done(cfg, ndev, e, data, false);
+ wl_clr_drv_status(cfg, CONNECTING, ndev);
+ WL_INFORM_MEM(("connect fail reported\n"));
+ }
} else {
- WL_DBG(("%s nothing\n", __FUNCTION__));
+ WL_DBG(("wl_notify_connect_status nothing\n"));
}
#if defined(OEM_ANDROID)
DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
@@ -13997,7 +15721,7 @@
uint8 ifindex;
wl_event_msg_t event;
- memset(&event, 0, sizeof(wl_event_msg_t));
+ bzero(&event, sizeof(wl_event_msg_t));
memcpy(&event, e, sizeof(wl_event_msg_t));
ifindex = (uint8)dhd_ifname2idx(dhd->info, event.ifname);
@@ -14152,16 +15876,28 @@
wl_del_roam_timeout(cfg);
}
#else
+#if !defined(DHD_NONFT_ROAMING)
wl_bss_roaming_done(cfg, ndev, e, data);
+#endif /* !DHD_NONFT_ROAMING */
#endif /* DHD_LOSSLESS_ROAMING */
#ifdef WBTEXT
if (dhdp->wbtext_support) {
/* set wnm_keepalives_max_idle after association */
wl_cfg80211_wbtext_set_wnm_maxidle(cfg, ndev);
- /* send nbr request or BTM query to update RCC
- * after roaming completed (receiving the first beacon)
+
+ /* Mostly nbr request of BTM query will be handled
+ * from wl_cfg80211_change_station
+ * after key negotiation is finished.
+ * This part is only for some specific security
+ * types (FT, CCKM) that don't call
+ * wl_cfg80211_change_station after roaming
*/
- wl_cfg80211_wbtext_update_rcc(cfg, ndev);
+ if (IS_AKM_SUITE_FT(sec) || IS_AKM_SUITE_CCKM(sec)) {
+ /* send nbr request or BTM query to update RCC
+ * after roaming completed
+ */
+ wl_cfg80211_wbtext_update_rcc(cfg, ndev);
+ }
}
#endif /* WBTEXT */
} else {
@@ -14185,7 +15921,7 @@
#ifdef CUSTOM_EVENT_PM_WAKE
uint32 last_dpm_upd_time = 0; /* ms */
-#define DPM_UPD_LMT_TIME 25000 /* ms */
+#define DPM_UPD_LMT_TIME ((CUSTOM_EVENT_PM_WAKE + (5)) * (1000) * (4)) /* ms */
#define DPM_UPD_LMT_RSSI -85 /* dbm */
static s32
@@ -14210,7 +15946,8 @@
}
err = wldev_iovar_getbuf_bsscfg(ndev, "dump",
- "pm", strlen("pm"), pbuf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync);
+ "pm", strlen("pm"), pbuf, WLC_IOCTL_MEDLEN,
+ 0, &cfg->ioctl_buf_sync);
if (err) {
WL_ERR(("dump ioctl err = %d", err));
@@ -14259,14 +15996,17 @@
cur_dpm_upd_time = OSL_SYSUPTIME();
if (cur_dpm_upd_time - last_dpm_upd_time < DPM_UPD_LMT_TIME) {
scb_val_t scbval;
+ DHD_STATLOG_CTRL(dhd, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhd->info, ndev), 0);
bzero(&scbval, sizeof(scb_val_t));
- err = wldev_ioctl_set(ndev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+ err = wldev_ioctl_set(ndev, WLC_DISASSOC,
+ &scbval, sizeof(scb_val_t));
if (err < 0) {
- WL_ERR(("%s: Disassoc error %d\n", __FUNCTION__, err));
+ WL_ERR(("Disassoc error %d\n", err));
return err;
}
- WL_ERR(("%s: Force Disassoc due to updated DPM event.\n", __FUNCTION__));
+ WL_ERR(("Force Disassoc due to updated DPM event.\n"));
last_dpm_upd_time = 0;
} else {
@@ -14314,6 +16054,12 @@
WL_ERR(("Attempting roam with reason code : %d\n", reason));
}
+#ifdef CONFIG_SILENT_ROAM
+ if (dhdp->in_suspend && reason == WLC_E_REASON_SILENT_ROAM) {
+ dhdp->sroamed = TRUE;
+ }
+#endif /* CONFIG_SILENT_ROAM */
+
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
#ifdef DBG_PKT_MON
@@ -14361,6 +16107,50 @@
return BCME_OK;
}
+#ifdef ENABLE_HOGSQS
+static s32
+wl_cfg80211_hogsqs_notify(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct net_device *ndev = NULL;
+ struct wireless_dev *wdev = NULL;
+ struct ether_addr *hog_etheraddr;
+ gfp_t aflags;
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ wdev = ndev_to_wdev(ndev);
+ aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+
+ hog_etheraddr = (struct ether_addr *)data;
+ WL_DBG(("RX HOGGER EVENT: " MACDBG "\n", MAC2STRDBG(hog_etheraddr->octet)));
+
+ mutex_lock(&cfg->usr_sync);
+ if ((wdev->iftype != NL80211_IFTYPE_AP) &&
+ (wdev->iftype != NL80211_IFTYPE_P2P_GO)) {
+ WL_DBG(("Ignore RX HOGGER EVENT \n"));
+ mutex_unlock(&cfg->usr_sync);
+ return -EINVAL;
+ }
+
+ /* Kernel cfg80211 API. this API makes NL80211_ATTR_CQM_PKT_LOSS_EVENT
+ * in wpa_supplicant
+ */
+ cfg80211_cqm_pktloss_notify(ndev, hog_etheraddr->octet, 10, aflags);
+ mutex_unlock(&cfg->usr_sync);
+
+ /* disable the Event of HOGGER */
+ wl_add_remove_eventextmsg(ndev, WLC_E_LDF_HOGGER, false);
+
+ /* after WL_HOGSQS_TIMEOUT_MS timeout, workqueue handler enable the
+ * event
+ */
+ schedule_delayed_work(&cfg->hogsqs_eventwork,
+ msecs_to_jiffies(WL_HOGSQS_TIMEOUT_MS));
+ return 0;
+
+}
+#endif /* ENABLE_HOGSQS */
+
static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
wl_assoc_info_t assoc_info;
@@ -14381,27 +16171,6 @@
assoc_info.req_len = htod32(assoc_info.req_len);
assoc_info.resp_len = htod32(assoc_info.resp_len);
assoc_info.flags = htod32(assoc_info.flags);
-
- if (assoc_info.req_len > (MAX_REQ_LINE + sizeof(struct dot11_assoc_req) +
- ((assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) ? ETHER_ADDR_LEN : 0))) {
- err = BCME_BADLEN;
- goto exit;
- }
- if ((assoc_info.req_len > 0) &&
- (assoc_info.req_len < (sizeof(struct dot11_assoc_req) +
- ((assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) ? ETHER_ADDR_LEN : 0)))) {
- err = BCME_BADLEN;
- goto exit;
- }
- if (assoc_info.resp_len > (MAX_REQ_LINE + sizeof(struct dot11_assoc_resp))) {
- err = BCME_BADLEN;
- goto exit;
- }
- if ((assoc_info.resp_len > 0) && (assoc_info.resp_len < sizeof(struct dot11_assoc_resp))) {
- err = BCME_BADLEN;
- goto exit;
- }
-
if (conn_info->req_ie_len) {
conn_info->req_ie_len = 0;
bzero(conn_info->req_ie, sizeof(conn_info->req_ie));
@@ -14416,13 +16185,27 @@
assoc_info.req_len, NULL);
if (unlikely(err)) {
WL_ERR(("could not get assoc req (%d)\n", err));
- goto exit;
+ return err;
}
- conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req);
+ if (assoc_info.req_len < sizeof(struct dot11_assoc_req)) {
+ WL_ERR(("req_len %d lessthan %d \n", assoc_info.req_len,
+ (int)sizeof(struct dot11_assoc_req)));
+ return BCME_BADLEN;
+ }
+ conn_info->req_ie_len = (uint32)(assoc_info.req_len
+ - sizeof(struct dot11_assoc_req));
if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) {
conn_info->req_ie_len -= ETHER_ADDR_LEN;
}
- memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len);
+ if (conn_info->req_ie_len <= MAX_REQ_LINE)
+ memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len);
+ else {
+ WL_ERR(("IE size %d above max %d size \n",
+ conn_info->req_ie_len, MAX_REQ_LINE));
+ return err;
+ }
+ } else {
+ conn_info->req_ie_len = 0;
}
if (assoc_info.resp_len) {
@@ -14430,10 +16213,23 @@
assoc_info.resp_len, NULL);
if (unlikely(err)) {
WL_ERR(("could not get assoc resp (%d)\n", err));
- goto exit;
+ return err;
}
- conn_info->resp_ie_len = assoc_info.resp_len - sizeof(struct dot11_assoc_resp);
- memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len);
+ if (assoc_info.resp_len < sizeof(struct dot11_assoc_resp)) {
+ WL_ERR(("resp_len %d is lessthan %d \n", assoc_info.resp_len,
+ (int)sizeof(struct dot11_assoc_resp)));
+ return BCME_BADLEN;
+ }
+ conn_info->resp_ie_len = assoc_info.resp_len -
+ (uint32)sizeof(struct dot11_assoc_resp);
+ if (conn_info->resp_ie_len <= MAX_REQ_LINE) {
+ memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len);
+ } else {
+ WL_ERR(("IE size %d above max %d size \n",
+ conn_info->resp_ie_len, MAX_REQ_LINE));
+ return err;
+ }
+
#ifdef QOS_MAP_SET
/* find qos map set ie */
if ((qos_map_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
@@ -14445,17 +16241,16 @@
wl_set_up_table(cfg->up_table, qos_map_ie);
} else {
MFREE(cfg->osh, cfg->up_table, UP_TABLE_MAX);
- cfg->up_table = NULL;
}
#endif /* QOS_MAP_SET */
+ } else {
+ conn_info->resp_ie_len = 0;
}
-
-exit:
- if (err) {
- WL_ERR(("err:%d, assoc_info-req:%u,resp:%u conn_info-req:%u,resp:%u\n",
- err, assoc_info.req_len, assoc_info.resp_len,
- conn_info->req_ie_len, conn_info->resp_ie_len));
- }
+ WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len,
+ conn_info->resp_ie_len));
+#ifdef REVERSE_AIFSN
+ DHD_REVERSE_AIFSN(cfg->pub, ndev);
+#endif /* REVERSE_AIFSN */
return err;
}
@@ -14515,7 +16310,8 @@
return 0;
}
-static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam)
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ bool update_ssid)
{
struct cfg80211_bss *bss;
wl_bss_info_t *bi;
@@ -14561,7 +16357,7 @@
err = -EIO;
goto update_bss_info_out;
}
- err = wl_inform_single_bss(cfg, bi, roam);
+ err = wl_inform_single_bss(cfg, bi, update_ssid);
if (unlikely(err))
goto update_bss_info_out;
@@ -14640,15 +16436,25 @@
#endif /* BCM4359_CHIP */
#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */
#if (defined(CONFIG_ARCH_MSM) && defined(CFG80211_ROAMED_API_UNIFIED)) || \
- (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) || defined(WL_FILS_ROAM_OFFLD) || \
+ defined(CFG80211_ROAM_API_GE_4_12)
struct cfg80211_roam_info roam_info;
#endif /* (CONFIG_ARCH_MSM && CFG80211_ROAMED_API_UNIFIED) || LINUX_VERSION >= 4.12.0 */
+#if defined(WL_FILS_ROAM_OFFLD)
+ struct wl_fils_info *fils_info = wl_to_fils_info(cfg);
+ struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+#endif // endif
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ dhd_if_t *ifp = NULL;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
#ifdef WLFBT
uint32 data_len = 0;
if (data)
data_len = ntoh32(e->datalen);
#endif /* WLFBT */
+ BCM_REFERENCE(dhdp);
curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
#ifdef BCM4359_CHIP
@@ -14658,7 +16464,7 @@
* Also clear timer roam_timeout.
* Only used on BCM4359 devices.
*/
- memset(&ci, 0, sizeof(ci));
+ bzero(&ci, sizeof(ci));
if ((wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &ci,
sizeof(ci))) < 0) {
WL_ERR(("Failed to get current channel !"));
@@ -14677,12 +16483,14 @@
#endif /* BCM4359 CHIP */
if ((err = wl_get_assoc_ies(cfg, ndev)) != BCME_OK) {
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev), WLAN_REASON_DEAUTH_LEAVING);
WL_ERR(("Fetching Assoc IEs failed, Skipping roamed event to"
" upper layer\n"));
/* To make sure disconnect, and fw sync, explictly send dissassoc
* for BSSID 00:00:00:00:00:00 issue
*/
- memset(&scbval, 0, sizeof(scb_val_t));
+ bzero(&scbval, sizeof(scb_val_t));
scbval.val = WLAN_REASON_DEAUTH_LEAVING;
memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
scbval.val = htod32(scbval.val);
@@ -14720,11 +16528,19 @@
memcpy(cfg->fbt_key, data, FBT_KEYLEN);
}
#endif /* WLFBT */
+#ifdef CUSTOM_LONG_RETRY_LIMIT
+ if (wl_set_retry(ndev, CUSTOM_LONG_RETRY_LIMIT, 1) < 0) {
+ WL_ERR(("CUSTOM_LONG_RETRY_LIMIT set fail!\n"));
+ }
+#endif /* CUSTOM_LONG_RETRY_LIMIT */
+ DHD_STATLOG_CTRL(dhdp, ST(REASSOC_INFORM),
+ dhd_net2idx(dhdp->info, ndev), 0);
WL_ERR(("Report roam event to upper layer. " MACDBG " (ch:%d)\n",
MAC2STRDBG((const u8*)(&e->addr)), *channel));
#if (defined(CONFIG_ARCH_MSM) && defined(CFG80211_ROAMED_API_UNIFIED)) || \
- (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) || defined(WL_FILS_ROAM_OFFLD) || \
+ defined(CFG80211_ROAM_API_GE_4_12)
memset(&roam_info, 0, sizeof(struct cfg80211_roam_info));
roam_info.channel = notify_channel;
roam_info.bssid = curbssid;
@@ -14732,7 +16548,17 @@
roam_info.req_ie_len = conn_info->req_ie_len;
roam_info.resp_ie = conn_info->resp_ie;
roam_info.resp_ie_len = conn_info->resp_ie_len;
-
+#if defined(WL_FILS_ROAM_OFFLD)
+ if ((sec->auth_type == DOT11_FILS_SKEY_PFS)||(sec->auth_type == DOT11_FILS_SKEY)) {
+ roam_info.fils.kek = fils_info->fils_kek;
+ roam_info.fils.kek_len = fils_info->fils_kek_len;
+ roam_info.fils.update_erp_next_seq_num = true;
+ roam_info.fils.erp_next_seq_num = fils_info->fils_erp_next_seq_num;
+ roam_info.fils.pmk = fils_info->fils_pmk;
+ roam_info.fils.pmk_len = fils_info->fils_kek_len;
+ roam_info.fils.pmkid = fils_info->fils_pmkid;
+ }
+#endif // endif
cfg80211_roamed(ndev, &roam_info, GFP_KERNEL);
#else
cfg80211_roamed(ndev,
@@ -14750,6 +16576,23 @@
#if defined(DHD_ENABLE_BIGDATA_LOGGING)
cfg->roam_count++;
#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+#ifdef WL_BAM
+ if (wl_adps_bad_ap_check(cfg, &e->addr)) {
+ if (wl_adps_enabled(cfg, ndev)) {
+ wl_adps_set_suspend(cfg, ndev, ADPS_SUSPEND);
+ }
+ }
+#endif /* WL_BAM */
+
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ ifp = dhd_get_ifp(dhdp, e->ifidx);
+ if (ifp) {
+ ifp->post_roam_evt = TRUE;
+ }
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+
+ /* Arm pkt logging timer */
+ dhd_dump_mod_pkt_timer(dhdp, PKT_CNT_RSN_ROAM);
return err;
@@ -14809,19 +16652,84 @@
return ret;
}
-static void wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted)
+
+_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"")
+#ifdef WL_FILS
+static s32
+wl_get_fils_connect_params(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
- struct cfg80211_scan_info info;
+ const bcm_xtlv_t* pxtlv_out;
+ struct wl_fils_info *fils_info = wl_to_fils_info(cfg);
+ int err = BCME_OK;
+ bcm_iov_buf_t *iov_buf_in = NULL;
+ bcm_iov_buf_t iov_buf_out = {0};
+ u16 len;
+ u16 type;
+ const u8 *data;
+ iov_buf_in = MALLOCZ(cfg->osh, WLC_IOCTL_SMLEN);
+ if (!iov_buf_in) {
+ WL_ERR(("buf memory alloc failed\n"));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ iov_buf_out.version = WL_FILS_IOV_VERSION;
+ iov_buf_out.id = WL_FILS_CMD_GET_CONNECT_PARAMS;
+ err = wldev_iovar_getbuf(ndev, "fils", (uint8*)&iov_buf_out, sizeof(bcm_iov_buf_t),
+ iov_buf_in, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("Get FILS Params Error (%d)\n", err));
+ goto exit;
+ }
+ pxtlv_out = (bcm_xtlv_t*)((bcm_iov_buf_t*)iov_buf_in)->data;
+ len = iov_buf_in->len;
+ do {
+ if (!bcm_valid_xtlv(pxtlv_out, iov_buf_in->len, BCM_XTLV_OPTION_ALIGN32)) {
+ WL_ERR(("%s: XTLV is not valid\n", __func__));
+ err = BCME_BADARG;
+ goto exit;
+ }
+ bcm_xtlv_unpack_xtlv(pxtlv_out, &type, &len, &data, BCM_XTLV_OPTION_ALIGN32);
+ switch (type) {
+ case WL_FILS_XTLV_ERP_NEXT_SEQ_NUM:
+ fils_info->fils_erp_next_seq_num = *(const u16 *)data;
+ break;
+ case WL_FILS_XTLV_KEK:
+ if (memcpy_s(fils_info->fils_kek,
+ WL_MAX_FILS_KEY_LEN, data, len) < 0) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+ fils_info->fils_kek_len = len;
+ break;
+ case WL_FILS_XTLV_PMK:
+ if (memcpy_s(fils_info->fils_pmk,
+ WL_MAX_FILS_KEY_LEN, data, len) < 0) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+ fils_info->fils_pmk_len = len;
+ break;
+ case WL_FILS_XTLV_PMKID:
+ if (memcpy_s(fils_info->fils_pmkid,
+ WL_MAX_FILS_KEY_LEN, data, len) < 0) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+ break;
+ default:
+ WL_ERR(("%s: wrong XTLV code\n", __func__));
+ break;
- memset(&info, 0, sizeof(struct cfg80211_scan_info));
- info.aborted = aborted;
- cfg80211_scan_done(cfg->scan_request, &info);
-#else
- cfg80211_scan_done(cfg->scan_request, aborted);
-#endif // endif
+ }
+ } while ((pxtlv_out = bcm_next_xtlv(pxtlv_out, (int *)&iov_buf_in->len,
+ BCM_XTLV_OPTION_ALIGN32)) && iov_buf_in->len);
+exit:
+ if (iov_buf_in) {
+ MFREE(cfg->osh, iov_buf_in, WLC_IOCTL_SMLEN);
+ }
+ return err;
}
-
+#endif /* WL_FILS */
static s32
wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data, bool completed)
@@ -14829,11 +16737,15 @@
struct wl_connect_info *conn_info = wl_to_conn(cfg);
struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
s32 err = 0;
+#ifdef WL_FILS
+ struct cfg80211_connect_resp_params resp_params = {0};
+ struct wl_fils_info *fils_info = NULL;
+ struct wlc_ssid *ssid = NULL;
+ struct wiphy *wiphy = NULL;
+
+#endif /* WL_FILS */
u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
u32 event_type = ntoh32(e->event_type);
-#ifdef WL_IRQSET
- int delta_time = 0;
-#endif /* WL_IRQSET */
struct cfg80211_bss *bss = NULL;
dhd_pub_t *dhdp;
dhdp = (dhd_pub_t *)(cfg->pub);
@@ -14847,8 +16759,8 @@
#ifdef ESCAN_RESULT_PATCH
if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
if (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0) {
- WL_INFORM_MEM((" Connected event of connected "
- "device e=%d s=%d, ignore it\n",
+ WL_INFORM_MEM((" Connected event of connected device "
+ "e=%d s=%d, ignore it\n",
ntoh32(e->event_type), ntoh32(e->status)));
return err;
}
@@ -14860,18 +16772,22 @@
}
#else
if (cfg->scan_request) {
- wl_notify_escan_complete(cfg, ndev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
}
#endif /* ESCAN_RESULT_PATCH */
if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
wl_cfg80211_scan_abort(cfg);
- wl_clr_drv_status(cfg, CONNECTING, ndev);
if (completed) {
wl_get_assoc_ies(cfg, ndev);
wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet),
WL_PROF_BSSID);
curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
- wl_update_bss_info(cfg, ndev, false);
+ /*
+ * CFG layer relies on cached IEs (from probe/beacon) to fetch matching bss.
+ * For cases, there is no match available,
+ * need to update the cache based on bss info from fw.
+ */
+ wl_update_bss_info(cfg, ndev, true);
wl_update_pmklist(ndev, cfg->pmk_list, err);
wl_set_drv_status(cfg, CONNECTED, ndev);
#if defined(ROAM_ENABLE) && defined(ROAM_AP_ENV_DETECTION)
@@ -14896,16 +16812,14 @@
dhd_set_cpucore(dhdp, TRUE);
}
#endif /* CUSTOM_SET_CPUCORE */
-#ifdef WL_IRQSET
- dhd_irq_set_affinity(dhdp);
- delta_time = IRQ_SET_DURATION - local_clock() / USEC_PER_SEC;
- if (delta_time > 0) {
- schedule_delayed_work(&cfg->irq_set_work,
- msecs_to_jiffies((const unsigned int)delta_time));
+#ifdef CUSTOM_LONG_RETRY_LIMIT
+ if (wl_set_retry(ndev, CUSTOM_LONG_RETRY_LIMIT, 1) < 0) {
+ WL_ERR(("CUSTOM_LONG_RETRY_LIMIT set fail!\n"));
}
-#endif /* WL_IRQSET */
- memset(&cfg->last_roamed_addr, 0, ETHER_ADDR_LEN);
+#endif /* CUSTOM_LONG_RETRY_LIMIT */
+ bzero(&cfg->last_roamed_addr, ETHER_ADDR_LEN);
}
+ wl_clr_drv_status(cfg, CONNECTING, ndev);
if (completed && (wl_cfg80211_verify_bss(cfg, ndev, &bss) != true)) {
/* If bss entry is not available in the cfg80211 bss cache
@@ -14916,36 +16830,69 @@
completed = false;
sec->auth_assoc_res_status = WLAN_STATUS_UNSPECIFIED_FAILURE;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
- CFG80211_CONNECT_BSS(ndev,
- curbssid,
- bss,
- conn_info->req_ie,
- conn_info->req_ie_len,
- conn_info->resp_ie,
- conn_info->resp_ie_len,
- completed ? WLAN_STATUS_SUCCESS :
- (sec->auth_assoc_res_status) ?
- sec->auth_assoc_res_status :
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
+#ifdef WL_FILS
+ if ((sec->auth_type == DOT11_FILS_SKEY_PFS)||(sec->auth_type == DOT11_FILS_SKEY)) {
+ wl_get_fils_connect_params(cfg, ndev);
+ fils_info = wl_to_fils_info(cfg);
+ ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+ wiphy = bcmcfg_to_wiphy(cfg);
+ resp_params.status = completed ? WLAN_STATUS_SUCCESS :
+ (sec->auth_assoc_res_status) ?
+ sec->auth_assoc_res_status :
+ WLAN_STATUS_UNSPECIFIED_FAILURE;
+ resp_params.bssid = curbssid;
+ resp_params.bss = CFG80211_GET_BSS(wiphy, NULL, curbssid,
+ ssid->SSID, ssid->SSID_len);
+ resp_params.req_ie = conn_info->req_ie;
+ resp_params.req_ie_len = conn_info->req_ie_len;
+ resp_params.resp_ie = conn_info->resp_ie;
+ resp_params.resp_ie_len = conn_info->resp_ie_len;
+#ifdef WL_FILS_ROAM_OFFLD
+ resp_params.fils.kek = fils_info->fils_kek;
+ resp_params.fils.kek_len = fils_info->fils_kek_len;
+ resp_params.fils.update_erp_next_seq_num = true;
+ resp_params.fils.erp_next_seq_num = fils_info->fils_erp_next_seq_num;
+ resp_params.fils.pmk = fils_info->fils_pmk;
+ resp_params.fils.pmk_len = fils_info->fils_kek_len;
+ resp_params.fils.pmkid = fils_info->fils_pmkid;
#else
- cfg80211_connect_result(ndev,
- curbssid,
- conn_info->req_ie,
- conn_info->req_ie_len,
- conn_info->resp_ie,
- conn_info->resp_ie_len,
- completed ? WLAN_STATUS_SUCCESS :
- (sec->auth_assoc_res_status) ?
- sec->auth_assoc_res_status :
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) */
- if (completed)
+ resp_params.fils_kek = fils_info->fils_kek;
+ resp_params.fils_kek_len = fils_info->fils_kek_len;
+ resp_params.update_erp_next_seq_num = true;
+ resp_params.fils_erp_next_seq_num = fils_info->fils_erp_next_seq_num;
+ resp_params.pmk = fils_info->fils_pmk;
+ resp_params.pmk_len = fils_info->fils_kek_len;
+ resp_params.pmkid = fils_info->fils_pmkid;
+#endif /* WL_FILS_ROAM_OFFLD */
+ cfg80211_connect_done(ndev, &resp_params, GFP_KERNEL);
+ }
+ else
+#endif /* WL_FILS */
+ {
+ CFG80211_CONNECT_RESULT(ndev,
+ curbssid,
+ bss,
+ conn_info->req_ie,
+ conn_info->req_ie_len,
+ conn_info->resp_ie,
+ conn_info->resp_ie_len,
+ completed ? WLAN_STATUS_SUCCESS :
+ (sec->auth_assoc_res_status) ?
+ sec->auth_assoc_res_status :
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ }
+ if (completed) {
WL_INFORM_MEM(("[%s] Report connect result - "
"connection succeeded\n", ndev->name));
- else
+#ifdef WL_BAM
+ if (wl_adps_bad_ap_check(cfg, &e->addr)) {
+ if (wl_adps_enabled(cfg, ndev)) {
+ wl_adps_set_suspend(cfg, ndev, ADPS_SUSPEND);
+ }
+ }
+#endif /* WL_BAM */
+ } else
WL_ERR(("[%s] Report connect result - connection failed\n", ndev->name));
} else {
WL_INFORM_MEM(("[%s] Ignore event:%d. drv status"
@@ -15007,209 +16954,6 @@
}
#endif /* BT_WIFI_HANDOVER */
-#ifdef PNO_SUPPORT
-static s32
-wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- struct net_device *ndev = NULL;
-#ifdef GSCAN_SUPPORT
- void *ptr;
- int send_evt_bytes = 0;
- u32 event = be32_to_cpu(e->event_type);
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
-#endif /* GSCAN_SUPPORT */
-
- WL_INFORM_MEM((">>> PNO Event\n"));
-
- if (!data) {
- WL_ERR(("Data received is NULL!\n"));
- return 0;
- }
-
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-#ifdef GSCAN_SUPPORT
- ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
- if (ptr) {
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
- MFREE(cfg->osh, ptr, send_evt_bytes);
- }
- if (!dhd_dev_is_legacy_pno_enabled(ndev))
- return 0;
-#endif /* GSCAN_SUPPORT */
-
-#ifndef WL_SCHED_SCAN
- mutex_lock(&cfg->usr_sync);
- /* TODO: Use cfg80211_sched_scan_results(wiphy); */
- CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
- mutex_unlock(&cfg->usr_sync);
-#else
- /* If cfg80211 scheduled scan is supported, report the pno results via sched
- * scan results
- */
- wl_notify_sched_scan_results(cfg, ndev, e, data);
-#endif /* WL_SCHED_SCAN */
- return 0;
-}
-#endif /* PNO_SUPPORT */
-
-#ifdef GSCAN_SUPPORT
-static s32
-wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- s32 err = 0;
- u32 event = be32_to_cpu(e->event_type);
- void *ptr = NULL;
- int send_evt_bytes = 0;
- int event_type;
- struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- u32 len = ntoh32(e->datalen);
- u32 buf_len = 0;
-
- switch (event) {
- case WLC_E_PFN_BEST_BATCHING:
- err = dhd_dev_retrieve_batch_scan(ndev);
- if (err < 0) {
- WL_ERR(("Batch retrieval already in progress %d\n", err));
- } else {
- event_type = WIFI_SCAN_THRESHOLD_NUM_SCANS;
- if (data && len) {
- event_type = *((int *)data);
- }
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_GSCAN_BATCH_SCAN_EVENT,
- &event_type, sizeof(int));
- }
- break;
- case WLC_E_PFN_SCAN_COMPLETE:
- event_type = WIFI_SCAN_COMPLETE;
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_SCAN_COMPLETE_EVENT,
- &event_type, sizeof(int));
- break;
- case WLC_E_PFN_BSSID_NET_FOUND:
- ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
- HOTLIST_FOUND, &buf_len);
- if (ptr) {
- wl_cfgvendor_send_hotlist_event(wiphy, ndev,
- ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT);
- dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_FOUND);
- } else {
- err = -ENOMEM;
- }
- break;
- case WLC_E_PFN_BSSID_NET_LOST:
- /* WLC_E_PFN_BSSID_NET_LOST is conflict shared with WLC_E_PFN_SCAN_ALLGONE
- * We currently do not use WLC_E_PFN_SCAN_ALLGONE, so if we get it, ignore
- */
- if (len) {
- ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
- HOTLIST_LOST, &buf_len);
- if (ptr) {
- wl_cfgvendor_send_hotlist_event(wiphy, ndev,
- ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT);
- dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_LOST);
- MFREE(cfg->osh, ptr, buf_len);
- } else {
- err = -ENOMEM;
- }
- } else {
- err = -EINVAL;
- }
- break;
- case WLC_E_PFN_GSCAN_FULL_RESULT:
- ptr = dhd_dev_process_full_gscan_result(ndev, data, len, &send_evt_bytes);
- if (ptr) {
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes);
- MFREE(cfg->osh, ptr, send_evt_bytes);
- } else {
- err = -ENOMEM;
- }
- break;
- case WLC_E_PFN_SSID_EXT:
- ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
- if (ptr) {
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
- MFREE(cfg->osh, ptr, send_evt_bytes);
- } else {
- err = -ENOMEM;
- }
- break;
- default:
- WL_ERR(("Unknown event %d\n", event));
- break;
- }
- return err;
-}
-#endif /* GSCAN_SUPPORT */
-
-static s32
-wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- struct channel_info channel_inform;
- struct wl_scan_results *bss_list;
- struct net_device *ndev = NULL;
- u32 len = WL_SCAN_BUF_MAX;
- s32 err = 0;
- unsigned long flags;
-
- WL_DBG(("Enter \n"));
- if (!wl_get_drv_status(cfg, SCANNING, ndev)) {
- WL_DBG(("scan is not ready \n"));
- return err;
- }
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-
- mutex_lock(&cfg->usr_sync);
- wl_clr_drv_status(cfg, SCANNING, ndev);
- memset(&channel_inform, 0, sizeof(channel_inform));
- err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &channel_inform,
- sizeof(channel_inform));
- if (unlikely(err)) {
- WL_ERR(("scan busy (%d)\n", err));
- goto scan_done_out;
- }
- channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
- if (unlikely(channel_inform.scan_channel)) {
-
- WL_DBG(("channel_inform.scan_channel (%d)\n",
- channel_inform.scan_channel));
- }
- cfg->bss_list = cfg->scan_results;
- bss_list = cfg->bss_list;
- memset(bss_list, 0, len);
- bss_list->buflen = htod32(len);
- err = wldev_ioctl_get(ndev, WLC_SCAN_RESULTS, bss_list, len);
- if (unlikely(err) && unlikely(!cfg->scan_suppressed)) {
- WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
- err = -EINVAL;
- goto scan_done_out;
- }
- bss_list->buflen = dtoh32(bss_list->buflen);
- bss_list->version = dtoh32(bss_list->version);
- bss_list->count = dtoh32(bss_list->count);
-
- err = wl_inform_bss(cfg);
-
-scan_done_out:
- del_timer_sync(&cfg->scan_timeout);
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
- if (cfg->scan_request) {
- wl_notify_scan_done(cfg, false);
- cfg->scan_request = NULL;
- }
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
- WL_DBG(("cfg80211_scan_done\n"));
- mutex_unlock(&cfg->usr_sync);
- return err;
-}
-
static s32
wl_frame_get_mgmt(struct bcm_cfg80211 *cfg, u16 fc,
const struct ether_addr *da, const struct ether_addr *sa,
@@ -15446,14 +17190,14 @@
u8 bsscfgidx;
u32 mgmt_frame_len;
u16 channel;
-#if defined(TDLS_MSG_ONLY_WFD)
+#if defined(TDLS_MSG_ONLY_WFD) && defined(WLTDLS)
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-#endif /* BCMDONGLEHOST && TDLS_MSG_ONLY_WFD */
+#endif /* BCMDONGLEHOST && TDLS_MSG_ONLY_WFD && WLTDLS */
if (ntoh32(e->datalen) < sizeof(wl_event_rx_frame_data_t)) {
WL_ERR(("wrong datalen:%d\n", ntoh32(e->datalen)));
return -EINVAL;
}
- mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t);
+ mgmt_frame_len = ntoh32(e->datalen) - (uint32)sizeof(wl_event_rx_frame_data_t);
event = ntoh32(e->event_type);
bsscfgidx = e->bsscfgidx;
rxframe = (wl_event_rx_frame_data_t *)data;
@@ -15462,17 +17206,14 @@
return -EINVAL;
}
channel = (ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK);
- memset(&bssid, 0, ETHER_ADDR_LEN);
+ bzero(&bssid, ETHER_ADDR_LEN);
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
if ((ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) &&
(event == WLC_E_PROBREQ_MSG)) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
struct net_info *iter, *next;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev && iter->wdev &&
iter->wdev->iftype == NL80211_IFTYPE_AP) {
ndev = iter->ndev;
@@ -15480,10 +17221,6 @@
break;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
}
if (channel <= CH_MAX_2G_CHANNEL)
@@ -15560,7 +17297,7 @@
#ifdef TDLS_MSG_ONLY_WFD
if (!dhdp->tdls_mode) {
WL_DBG((" TDLS Frame filtered \n"));
- return 0;
+ goto exit;
}
#else
if (mgmt_frame[DOT11_MGMT_HDR_LEN + 1] == TDLS_ACTION_SETUP_RESP) {
@@ -15585,7 +17322,6 @@
wl_set_up_table(cfg->up_table, qos_map_ie);
} else {
MFREE(cfg->osh, cfg->up_table, UP_TABLE_MAX);
- cfg->up_table = NULL;
}
#endif /* QOS_MAP_SET */
#ifdef WBTEXT
@@ -15655,9 +17391,8 @@
if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
if (cfg->next_af_subtype == act_frm->subtype) {
- WL_DBG(("Abort wait for next frame, Recieved frame (%d) "
- "Next action frame(%d)\n",
- act_frm->subtype, cfg->next_af_subtype));
+ WL_DBG(("We got a right next frame!(%d)\n",
+ act_frm->subtype));
wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
if (cfg->next_af_subtype == P2P_PAF_GON_CONF) {
@@ -15717,6 +17452,47 @@
return 0;
} else
return 0;
+#ifdef WL_SAE
+ } else if (event == WLC_E_EXT_AUTH_FRAME_RX) {
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ u8 *frame;
+
+ WL_DBG(("EVENT: auth frame rx received\n"));
+ if (e->datalen < sizeof(*rxframe)) {
+ WL_ERR(("EXT_AUTH_RX: event data too small. Ignoring event\n"));
+ return -EINVAL;
+ }
+
+ bzero(&da, sizeof(da));
+ if ((err = wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+ NULL, 0, ioctl_buf, sizeof(ioctl_buf), bsscfgidx,
+ NULL)) != BCME_OK) {
+ WL_ERR(("EXT_AUTH_RX: cur_etheraddr iovar failed, error %d\n", err));
+ goto exit;
+ }
+ memcpy(da.octet, ioctl_buf, ETHER_ADDR_LEN);
+
+ err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+ if (err < 0) {
+ if (cfgdev_to_wdev(cfgdev)->iftype == NL80211_IFTYPE_STATION) {
+ memcpy(bssid.octet, da.octet, ETHER_ADDR_LEN);
+ } else {
+ WL_ERR(("EXT_AUTH_RX: WLC_GET_BSSID failed, error %d\n", err));
+ }
+ }
+
+ frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1);
+ frame += DOT11_MGMT_HDR_LEN;
+ mgmt_frame_len -= DOT11_MGMT_HDR_LEN;
+ err = wl_frame_get_mgmt(cfg, FC_AUTH, &da, &e->addr, &bssid,
+ &mgmt_frame, &mgmt_frame_len, frame);
+ if (err < 0) {
+ WL_ERR(("EXT_AUTH_RX: mgmt frame to cfg80211, len %d channel %d freq %d\n",
+ mgmt_frame_len, channel, freq));
+ goto exit;
+ }
+ isfree = true;
+#endif /* WL_SAE */
} else {
mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1);
@@ -15783,365 +17559,6 @@
return err;
}
-#ifdef WL_SCHED_SCAN
-/* If target scan is not reliable, set the below define to "1" to do a
- * full escan
- */
-#define FULL_ESCAN_ON_PFN_NET_FOUND 0
-static s32
-wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
-{
- wl_pfn_net_info_v1_t *netinfo, *pnetinfo;
- wl_pfn_net_info_v2_t *netinfo_v2, *pnetinfo_v2;
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- int err = 0;
- struct cfg80211_scan_request *request = NULL;
- struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT];
- struct ieee80211_channel *channel = NULL;
- int channel_req = 0;
- int band = 0;
- wl_pfn_scanresults_v1_t *pfn_result_v1 = (wl_pfn_scanresults_v1_t *)data;
- wl_pfn_scanresults_v2_t *pfn_result_v2 = (wl_pfn_scanresults_v2_t *)data;
- int n_pfn_results = 0;
- log_conn_event_t *event_data = NULL;
- tlv_log *tlv_data = NULL;
- u32 alloc_len, tlv_len;
- u32 payload_len;
-
- WL_DBG(("Enter\n"));
-
- /* These static asserts guarantee v1/v2 net_info and subnet_info are compatible
- * in size and SSID offset, allowing v1 to be used below except for the results
- * fields themselves (status, count, offset to netinfo).
- */
- STATIC_ASSERT(sizeof(wl_pfn_net_info_v1_t) == sizeof(wl_pfn_net_info_v2_t));
- STATIC_ASSERT(sizeof(wl_pfn_lnet_info_v1_t) == sizeof(wl_pfn_lnet_info_v2_t));
- STATIC_ASSERT(sizeof(wl_pfn_subnet_info_v1_t) == sizeof(wl_pfn_subnet_info_v2_t));
- STATIC_ASSERT(OFFSETOF(wl_pfn_subnet_info_v1_t, SSID) ==
- OFFSETOF(wl_pfn_subnet_info_v2_t, u.SSID));
-
- /* Extract the version-specific items */
- if (pfn_result_v1->version == PFN_SCANRESULT_VERSION_V1) {
- n_pfn_results = pfn_result_v1->count;
- pnetinfo = pfn_result_v1->netinfo;
- WL_INFORM_MEM(("PFN NET FOUND event. count:%d \n", n_pfn_results));
-
- if (n_pfn_results > 0) {
- int i;
-
- if (n_pfn_results > MAX_PFN_LIST_COUNT)
- n_pfn_results = MAX_PFN_LIST_COUNT;
-
- memset(&ssid, 0x00, sizeof(ssid));
-
- request = (struct cfg80211_scan_request *)MALLOCZ(cfg->osh,
- sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
- channel = (struct ieee80211_channel *)MALLOCZ(cfg->osh,
- (sizeof(struct ieee80211_channel) * n_pfn_results));
- if (!request || !channel) {
- WL_ERR(("No memory"));
- err = -ENOMEM;
- goto out_err;
- }
-
- request->wiphy = wiphy;
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN +
- sizeof(uint16) + sizeof(int16);
- event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
- if (!event_data) {
- WL_ERR(("%s: failed to allocate the log_conn_event_t with "
- "length(%d)\n", __func__, alloc_len));
- goto out_err;
- }
- tlv_len = 3 * sizeof(tlv_log);
- event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
- if (!event_data->tlvs) {
- WL_ERR(("%s: failed to allocate the tlv_log with "
- "length(%d)\n", __func__, tlv_len));
- goto out_err;
- }
- }
-
- for (i = 0; i < n_pfn_results; i++) {
- netinfo = &pnetinfo[i];
- if (!netinfo) {
- WL_ERR(("Invalid netinfo ptr. index:%d", i));
- err = -EINVAL;
- goto out_err;
- }
- WL_PNO((">>> SSID:%s Channel:%d \n",
- netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.channel));
- /* PFN result doesn't have all the info which are required by
- * the supplicant. (For e.g IEs) Do a target Escan so that
- * sched scan results are reported via wl_inform_single_bss in
- * the required format. Escan does require the scan request in
- * the form of cfg80211_scan_request. For timebeing, create
- * cfg80211_scan_request one out of the received PNO event.
- */
-
- ssid[i].ssid_len = MIN(DOT11_MAX_SSID_LEN,
- netinfo->pfnsubnet.SSID_len);
- memcpy(ssid[i].ssid, netinfo->pfnsubnet.SSID,
- ssid[i].ssid_len);
- request->n_ssids++;
-
- channel_req = netinfo->pfnsubnet.channel;
- band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
- : NL80211_BAND_5GHZ;
- channel[i].center_freq =
- ieee80211_channel_to_frequency(channel_req, band);
- channel[i].band = band;
- channel[i].flags |= IEEE80211_CHAN_NO_HT40;
- request->channels[i] = &channel[i];
- request->n_channels++;
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- payload_len = sizeof(log_conn_event_t);
- event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
- tlv_data = event_data->tlvs;
-
- /* ssid */
- tlv_data->tag = WIFI_TAG_SSID;
- tlv_data->len = netinfo->pfnsubnet.SSID_len;
- memcpy(tlv_data->value, ssid[i].ssid, ssid[i].ssid_len);
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- /* channel */
- tlv_data->tag = WIFI_TAG_CHANNEL;
- tlv_data->len = sizeof(uint16);
- memcpy(tlv_data->value, &channel_req, sizeof(uint16));
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- /* rssi */
- tlv_data->tag = WIFI_TAG_RSSI;
- tlv_data->len = sizeof(int16);
- memcpy(tlv_data->value, &netinfo->RSSI, sizeof(int16));
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
- &event_data->event, payload_len);
- }
- }
-
- /* assign parsed ssid array */
- if (request->n_ssids)
- request->ssids = &ssid[0];
-
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- /* Abort any on-going scan */
- wl_notify_escan_complete(cfg, ndev, true, true);
- }
-
- if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
- WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
- err = wl_cfgp2p_discover_enable_search(cfg, false);
- if (unlikely(err)) {
- wl_clr_drv_status(cfg, SCANNING, ndev);
- goto out_err;
- }
- p2p_scan(cfg) = false;
- }
- wl_set_drv_status(cfg, SCANNING, ndev);
-#if FULL_ESCAN_ON_PFN_NET_FOUND
- WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
- err = wl_do_escan(cfg, wiphy, ndev, NULL);
-#else
- WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
- err = wl_do_escan(cfg, wiphy, ndev, request);
-#endif // endif
- if (err) {
- wl_clr_drv_status(cfg, SCANNING, ndev);
- goto out_err;
- }
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED);
- cfg->sched_scan_running = TRUE;
- }
- else {
- WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
- }
-
- } else if (pfn_result_v2->version == PFN_SCANRESULT_VERSION_V2) {
- n_pfn_results = pfn_result_v2->count;
- pnetinfo_v2 = (wl_pfn_net_info_v2_t *)pfn_result_v2->netinfo;
-
- if (e->event_type == WLC_E_PFN_NET_LOST) {
- WL_PNO(("Do Nothing %d\n", e->event_type));
- return 0;
- }
-
- WL_INFORM_MEM(("PFN NET FOUND event. count:%d \n", n_pfn_results));
-
- if (n_pfn_results > 0) {
- int i;
-
- if (n_pfn_results > MAX_PFN_LIST_COUNT)
- n_pfn_results = MAX_PFN_LIST_COUNT;
-
- memset(&ssid, 0x00, sizeof(ssid));
-
- request = (struct cfg80211_scan_request *)MALLOCZ(cfg->osh,
- sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
- channel = (struct ieee80211_channel *)MALLOCZ(cfg->osh,
- (sizeof(struct ieee80211_channel) * n_pfn_results));
- if (!request || !channel) {
- WL_ERR(("No memory"));
- err = -ENOMEM;
- goto out_err;
- }
-
- request->wiphy = wiphy;
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN +
- sizeof(uint16) + sizeof(int16);
- event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
- if (!event_data) {
- WL_ERR(("%s: failed to allocate the log_conn_event_t with "
- "length(%d)\n", __func__, alloc_len));
- goto out_err;
- }
- tlv_len = 3 * sizeof(tlv_log);
- event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
- if (!event_data->tlvs) {
- WL_ERR(("%s: failed to allocate the tlv_log with "
- "length(%d)\n", __func__, tlv_len));
- goto out_err;
- }
- }
-
- for (i = 0; i < n_pfn_results; i++) {
- netinfo_v2 = &pnetinfo_v2[i];
- if (!netinfo_v2) {
- WL_ERR(("Invalid netinfo ptr. index:%d", i));
- err = -EINVAL;
- goto out_err;
- }
- WL_PNO((">>> SSID:%s Channel:%d \n",
- netinfo_v2->pfnsubnet.u.SSID,
- netinfo_v2->pfnsubnet.channel));
- /* PFN result doesn't have all the info which are required by the
- * supplicant. (For e.g IEs) Do a target Escan so that sched scan
- * results are reported via wl_inform_single_bss in the required
- * format. Escan does require the scan request in the form of
- * cfg80211_scan_request. For timebeing, create
- * cfg80211_scan_request one out of the received PNO event.
- */
- ssid[i].ssid_len = MIN(DOT11_MAX_SSID_LEN,
- netinfo_v2->pfnsubnet.SSID_len);
- memcpy(ssid[i].ssid, netinfo_v2->pfnsubnet.u.SSID,
- ssid[i].ssid_len);
- request->n_ssids++;
-
- channel_req = netinfo_v2->pfnsubnet.channel;
- band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
- : NL80211_BAND_5GHZ;
- channel[i].center_freq =
- ieee80211_channel_to_frequency(channel_req, band);
- channel[i].band = band;
- channel[i].flags |= IEEE80211_CHAN_NO_HT40;
- request->channels[i] = &channel[i];
- request->n_channels++;
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- payload_len = sizeof(log_conn_event_t);
- event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
- tlv_data = event_data->tlvs;
-
- /* ssid */
- tlv_data->tag = WIFI_TAG_SSID;
- tlv_data->len = netinfo_v2->pfnsubnet.SSID_len;
- memcpy(tlv_data->value, ssid[i].ssid, ssid[i].ssid_len);
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- /* channel */
- tlv_data->tag = WIFI_TAG_CHANNEL;
- tlv_data->len = sizeof(uint16);
- memcpy(tlv_data->value, &channel_req, sizeof(uint16));
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- /* rssi */
- tlv_data->tag = WIFI_TAG_RSSI;
- tlv_data->len = sizeof(int16);
- memcpy(tlv_data->value, &netinfo_v2->RSSI, sizeof(int16));
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
- &event_data->event, payload_len);
- }
- }
-
- /* assign parsed ssid array */
- if (request->n_ssids)
- request->ssids = &ssid[0];
-
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- /* Abort any on-going scan */
- wl_notify_escan_complete(cfg, ndev, true, true);
- }
-
- if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
- WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
- err = wl_cfgp2p_discover_enable_search(cfg, false);
- if (unlikely(err)) {
- wl_clr_drv_status(cfg, SCANNING, ndev);
- goto out_err;
- }
- p2p_scan(cfg) = false;
- }
-
- wl_set_drv_status(cfg, SCANNING, ndev);
-#if FULL_ESCAN_ON_PFN_NET_FOUND
- WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
- err = wl_do_escan(cfg, wiphy, ndev, NULL);
-#else
- WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
- err = wl_do_escan(cfg, wiphy, ndev, request);
-#endif // endif
- if (err) {
- wl_clr_drv_status(cfg, SCANNING, ndev);
- goto out_err;
- }
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED);
- cfg->sched_scan_running = TRUE;
- }
- else {
- WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
- }
- } else {
- WL_ERR(("Unsupported version %d, expected %d or %d\n", pfn_result_v1->version,
- PFN_SCANRESULT_VERSION_V1, PFN_SCANRESULT_VERSION_V2));
- return 0;
- }
-out_err:
- if (request) {
- MFREE(cfg->osh, request,
- sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
- }
- if (channel) {
- MFREE(cfg->osh, channel,
- (sizeof(struct ieee80211_channel) * n_pfn_results));
- }
-
- if (event_data) {
- if (event_data->tlvs) {
- MFREE(cfg->osh, event_data->tlvs, tlv_len);
- }
- MFREE(cfg->osh, event_data, alloc_len);
- }
- return err;
-}
-#endif /* WL_SCHED_SCAN */
-
static void wl_init_conf(struct wl_conf *conf)
{
WL_DBG(("Enter \n"));
@@ -16157,14 +17574,19 @@
unsigned long flags;
struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
- memset(profile, 0, sizeof(struct wl_profile));
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ if (!profile) {
+ WL_ERR(("profile null\n"));
+ return;
+ }
+
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ bzero(profile, sizeof(struct wl_profile));
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
}
static void wl_init_event_handler(struct bcm_cfg80211 *cfg)
{
- memset(cfg->evt_handler, 0, sizeof(cfg->evt_handler));
+ bzero(cfg->evt_handler, sizeof(cfg->evt_handler));
cfg->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
cfg->evt_handler[WLC_E_AUTH] = wl_notify_connect_status;
@@ -16186,6 +17608,8 @@
cfg->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
cfg->evt_handler[WLC_E_JOIN] = wl_notify_connect_status;
cfg->evt_handler[WLC_E_START] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_AUTH_IND] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_ASSOC_RESP_IE] = wl_notify_connect_status;
#ifdef PNO_SUPPORT
cfg->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status;
#endif /* PNO_SUPPORT */
@@ -16228,6 +17652,39 @@
cfg->evt_handler[WLC_E_ROAM_PREP] = wl_notify_roam_prep_status;
#endif /* DHD_LOSSLESS_ROAMING || DBG_PKT_MON */
cfg->evt_handler[WLC_E_ROAM_START] = wl_notify_roam_start_status;
+#ifdef WL_BAM
+ cfg->evt_handler[WLC_E_ADPS] = wl_adps_event_handler;
+#endif /* WL_BAM */
+ cfg->evt_handler[WLC_E_PSK_SUP] = wl_cfg80211_sup_event_handler;
+#ifdef WL_BCNRECV
+ cfg->evt_handler[WLC_E_BCNRECV_ABORTED] = wl_bcnrecv_aborted_event_handler;
+#endif /* WL_BCNRECV */
+#ifdef WL_MBO
+ cfg->evt_handler[WLC_E_MBO] = wl_mbo_event_handler;
+#endif /* WL_MBO */
+#ifdef WL_CAC_TS
+ cfg->evt_handler[WLC_E_ADDTS_IND] = wl_cfg80211_cac_event_handler;
+ cfg->evt_handler[WLC_E_DELTS_IND] = wl_cfg80211_cac_event_handler;
+#endif /* WL_CAC_TS */
+#if defined(WL_MBO) || defined(WL_OCE)
+ cfg->evt_handler[WLC_E_PRUNE] = wl_bssid_prune_event_handler;
+#endif /* WL_MBO || WL_OCE */
+#ifdef RTT_SUPPORT
+ cfg->evt_handler[WLC_E_PROXD] = wl_cfg80211_rtt_event_handler;
+#endif // endif
+#ifdef WL_CHAN_UTIL
+ cfg->evt_handler[WLC_E_BSS_LOAD] = wl_cfg80211_bssload_report_event_handler;
+#endif /* WL_CHAN_UTIL */
+ cfg->evt_handler[WLC_E_IND_DOS_STATUS] = wl_notify_dos_status;
+#ifdef WL_SAE
+ cfg->evt_handler[WLC_E_EXT_AUTH_REQ] = wl_notify_extauth_req_event;
+ cfg->evt_handler[WLC_E_EXT_AUTH_FRAME_RX] = wl_notify_rx_mgmt_frame;
+ cfg->evt_handler[WLC_E_MGMT_FRAME_TXSTATUS] = wl_notify_mgmt_frame_tx_complete;
+ cfg->evt_handler[WLC_E_MGMT_FRAME_OFF_CHAN_COMPLETE] = wl_notify_mgmt_frame_tx_complete;
+#endif /* WL_SAE */
+#ifdef ENABLE_HOGSQS
+ cfg->evt_handler[WLC_E_LDF_HOGGER] = wl_cfg80211_hogsqs_notify;
+#endif /* ENABLE_HOGSQS */
}
@@ -16373,30 +17830,20 @@
static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg)
{
MFREE(cfg->osh, cfg->scan_results, WL_SCAN_BUF_MAX);
- cfg->scan_results = NULL;
MFREE(cfg->osh, cfg->conf, sizeof(*cfg->conf));
- cfg->conf = NULL;
MFREE(cfg->osh, cfg->scan_req_int, sizeof(*cfg->scan_req_int));
- cfg->scan_req_int = NULL;
MFREE(cfg->osh, cfg->ioctl_buf, WLC_IOCTL_MAXLEN);
- cfg->ioctl_buf = NULL;
MFREE(cfg->osh, cfg->escan_ioctl_buf, WLC_IOCTL_MAXLEN);
- cfg->escan_ioctl_buf = NULL;
MFREE(cfg->osh, cfg->extra_buf, WL_EXTRA_BUF_MAX);
- cfg->extra_buf = NULL;
MFREE(cfg->osh, cfg->pmk_list, sizeof(*cfg->pmk_list));
- cfg->pmk_list = NULL;
#if defined(STATIC_WL_PRIV_STRUCT)
MFREE(cfg->osh, cfg->conn_info, sizeof(*cfg->conn_info));
- cfg->conn_info = NULL;
MFREE(cfg->osh, cfg->ie, sizeof(*cfg->ie));
- cfg->ie = NULL;
wl_deinit_escan_result_buf(cfg);
#endif /* STATIC_WL_PRIV_STRUCT */
if (cfg->afx_hdl) {
cancel_work_sync(&cfg->afx_hdl->work);
MFREE(cfg->osh, cfg->afx_hdl, sizeof(*cfg->afx_hdl));
- cfg->afx_hdl = NULL;
}
}
@@ -16451,92 +17898,6 @@
}
}
-static void wl_scan_timeout(unsigned long data)
-{
- wl_event_msg_t msg;
- struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
- struct wireless_dev *wdev = NULL;
- struct net_device *ndev = NULL;
- struct wl_scan_results *bss_list;
- wl_bss_info_t *bi = NULL;
- s32 i;
- u32 channel;
- u64 cur_time = OSL_SYSUPTIME_US();
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
- uint32 prev_memdump_mode = dhdp->memdump_enabled;
-#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
-
- if (!(cfg->scan_request)) {
- WL_ERR(("timer expired but no scan request\n"));
- return;
- }
-
- WL_ERR(("***SCAN event timeout. WQ state:0x%x enq_time:%llu current_time:%llu\n",
- work_busy(&cfg->event_work), cfg->scan_enq_time, cur_time));
- if (cfg->scan_enq_time) {
- WL_ERR(("Elapsed time: %llu\n", (cur_time - cfg->scan_enq_time)));
- }
- WL_ERR(("lock_states:[%d:%d:%d:%d:%d:%d]\n",
- mutex_is_locked(&cfg->if_sync),
- mutex_is_locked(&cfg->usr_sync),
- mutex_is_locked(&cfg->pm_sync),
- mutex_is_locked(&cfg->scan_complete),
- spin_is_locked(&cfg->cfgdrv_lock),
- spin_is_locked(&cfg->eq_lock)));
- dhd_bus_intr_count_dump(dhdp);
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) && !defined(CONFIG_MODULES)
- /* Print WQ states. Enable only for in-built drivers as the symbol is not exported */
- show_workqueue_state();
-#endif /* LINUX_VER >= 4.1 && !CONFIG_MODULES */
-
- bss_list = wl_escan_get_buf(cfg, FALSE);
- if (!bss_list) {
- WL_ERR(("bss_list is null. Didn't receive any partial scan results\n"));
- } else {
- WL_ERR(("Dump scan buffer:\n"
- "scanned AP count (%d)\n", bss_list->count));
-
- bi = next_bss(bss_list, bi);
- for_each_bss(bss_list, bi, i) {
- channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
- WL_ERR(("SSID :%s Channel :%d\n", bi->SSID, channel));
- }
- }
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
- if (cfg->scan_request->dev)
- wdev = cfg->scan_request->dev->ieee80211_ptr;
-#else
- wdev = cfg->scan_request->wdev;
-#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */
- if (!wdev) {
- WL_ERR(("No wireless_dev present\n"));
- return;
- }
- ndev = wdev_to_wlc_ndev(wdev, cfg);
-
- bzero(&msg, sizeof(wl_event_msg_t));
- WL_ERR(("timer expired\n"));
-#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
- if (dhdp->memdump_enabled) {
- dhdp->memdump_enabled = DUMP_MEMFILE;
- dhdp->memdump_type = DUMP_TYPE_SCAN_TIMEOUT;
- dhd_bus_mem_dump(dhdp);
- dhdp->memdump_enabled = prev_memdump_mode;
- }
-#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
- msg.event_type = hton32(WLC_E_ESCAN_RESULT);
- msg.status = hton32(WLC_E_STATUS_TIMEOUT);
- msg.reason = 0xFFFFFFFF;
- wl_cfg80211_event(ndev, &msg, NULL);
-#ifdef CUSTOMER_HW4_DEBUG
- if (!wl_scan_timeout_dbg_enabled)
- wl_scan_timeout_dbg_set();
-#endif /* CUSTOMER_HW4_DEBUG */
-}
-
#ifdef DHD_LOSSLESS_ROAMING
static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg)
{
@@ -16563,6 +17924,124 @@
#endif /* DHD_LOSSLESS_ROAMING */
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+#define CP_CHAN_INFO_RAT_MODE_LTE 3
+#define CP_CHAN_INFO_RAT_MODE_NR5G 7
+int g_mhs_chan_for_cpcoex = 0;
+
+struct __packed cam_cp_noti_info {
+ u8 rat;
+ u32 band;
+ u32 channel;
+};
+
+int
+wl_cfg80211_send_msg_to_ril()
+{
+ int id, buf = 1;
+
+ id = IPC_SYSTEM_CP_CHANNEL_INFO;
+ dev_ril_bridge_send_msg(id, sizeof(int), &buf);
+ WL_ERR(("[BeyondX] send message to ril.\n"));
+
+ OSL_SLEEP(500);
+ return 0;
+}
+
+int
+wl_cfg80211_ril_bridge_notifier_call(struct notifier_block *nb,
+ unsigned long size, void *buf)
+{
+ struct dev_ril_bridge_msg *msg;
+ struct cam_cp_noti_info *cp_noti_info;
+ static int mhs_channel_for_4g, mhs_channel_for_5g;
+ static int recv_msg_4g, recv_msg_5g;
+
+ WL_ERR(("[BeyondX] receive message from ril.\n"));
+ msg = (struct dev_ril_bridge_msg *)buf;
+
+ if (msg->dev_id == IPC_SYSTEM_CP_CHANNEL_INFO &&
+ msg->data_len <= sizeof(struct cam_cp_noti_info)) {
+ u8 rat;
+ u32 band;
+ u32 channel;
+
+ cp_noti_info = (struct cam_cp_noti_info *)msg->data;
+ rat = cp_noti_info->rat;
+ band = cp_noti_info->band;
+ channel = cp_noti_info->channel;
+
+ /* LTE/5G Band/Freq information => Mobile Hotspot channel mapping.
+ * LTE/B40: 38650~39649 => Ch.11
+ * LTE/B41: 39650~41589 => Ch.1
+ * 5G/N41: 499200~537999 => Ch.1
+ */
+ if (rat == CP_CHAN_INFO_RAT_MODE_LTE) {
+ recv_msg_4g = 1;
+ if (channel >= 38650 && channel <= 39649) {
+ mhs_channel_for_4g = 11;
+ } else if (channel >= 39650 && channel <= 41589) {
+ mhs_channel_for_4g = 1;
+ }
+ }
+ if (rat == CP_CHAN_INFO_RAT_MODE_NR5G) {
+ recv_msg_5g = 1;
+ if (channel >= 499200 && channel <= 537999) {
+ mhs_channel_for_5g = 1;
+ }
+ }
+
+ WL_DBG(("[BeyondX] rat: %u, band: %u, channel: %u, mhs_channel_for_4g: %u, "
+ "mhs_channel_for_5g: %u\n", rat, band, channel,
+ mhs_channel_for_4g, mhs_channel_for_5g));
+
+ if (recv_msg_4g && recv_msg_5g) {
+ if (mhs_channel_for_4g && mhs_channel_for_5g) {
+ /* if 4G/B40 + 5G/N41, select channel 6 for MHS */
+ if (mhs_channel_for_4g == 11 && mhs_channel_for_5g == 1) {
+ g_mhs_chan_for_cpcoex = 6;
+ /* if 4G(except for B40) + 5G/N41, select channel 1 for MHS */
+ } else {
+ g_mhs_chan_for_cpcoex = 1;
+ }
+ } else {
+ g_mhs_chan_for_cpcoex = mhs_channel_for_4g ? mhs_channel_for_4g :
+ mhs_channel_for_5g ? mhs_channel_for_5g : 0;
+ }
+ mhs_channel_for_4g = mhs_channel_for_5g = 0;
+ recv_msg_4g = recv_msg_5g = 0;
+ }
+ }
+
+ return 0;
+}
+
+static struct notifier_block wl_cfg80211_ril_bridge_notifier = {
+ .notifier_call = wl_cfg80211_ril_bridge_notifier_call,
+};
+
+static bool wl_cfg80211_ril_bridge_notifier_registered = FALSE;
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+
+#if !defined(DISALBE_11H) && defined(DHD_NOSCAN_DURING_CSA)
+static void wl_del_csa_timeout(struct bcm_cfg80211 *cfg)
+{
+ if (cfg) {
+ if (timer_pending(&cfg->csa_timeout)) {
+ del_timer_sync(&cfg->csa_timeout);
+ }
+ }
+}
+
+static void wl_csa_timeout(unsigned long data)
+{
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+
+ if (cfg->in_csa)
+ cfg->in_csa = FALSE;
+}
+#endif /* !defined(DISALBE_11H) && defined(DHD_NOSCAN_DURING_CSA) */
+
static s32
wl_cfg80211_netdev_notifier_call(struct notifier_block * nb,
unsigned long state, void *ptr)
@@ -16648,7 +18127,7 @@
* wdev_cleanup_work call WARN_ON and make the scan done forcibly.
*/
if (wl_get_drv_status(cfg, SCANNING, dev))
- wl_notify_escan_complete(cfg, dev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
break;
}
return NOTIFY_DONE;
@@ -16664,779 +18143,6 @@
*/
static bool wl_cfg80211_netdev_notifier_registered = FALSE;
-static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg)
-{
- struct wireless_dev *wdev = NULL;
- struct net_device *ndev = NULL;
-
- if (!cfg->scan_request)
- return;
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
- if (cfg->scan_request->dev)
- wdev = cfg->scan_request->dev->ieee80211_ptr;
-#else
- wdev = cfg->scan_request->wdev;
-#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */
-
- if (!wdev) {
- WL_ERR(("No wireless_dev present\n"));
- return;
- }
-
- ndev = wdev_to_wlc_ndev(wdev, cfg);
- wl_notify_escan_complete(cfg, ndev, true, true);
- WL_INFORM_MEM(("Scan aborted! \n"));
-}
-
-void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
-{
- wl_scan_params_t *params = NULL;
- s32 params_size = 0;
- s32 err = BCME_OK;
- struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
- if (!in_atomic()) {
- /* Our scan params only need space for 1 channel and 0 ssids */
- params = wl_cfg80211_scan_alloc_params(cfg, -1, 0, ¶ms_size);
- if (params == NULL) {
- WL_ERR(("scan params allocation failed \n"));
- err = -ENOMEM;
- } else {
- /* Do a scan abort to stop the driver's scan engine */
- err = wldev_ioctl_set(dev, WLC_SCAN, params, params_size);
- if (err < 0) {
- /* scan abort can fail if there is no outstanding scan */
- WL_DBG(("scan abort failed \n"));
- }
- MFREE(cfg->osh, params, params_size);
- }
- }
-#ifdef WLTDLS
- if (cfg->tdls_mgmt_frame) {
- MFREE(cfg->osh, cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len);
- cfg->tdls_mgmt_frame = NULL;
- cfg->tdls_mgmt_frame_len = 0;
- }
-#endif /* WLTDLS */
-}
-
-static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
- struct net_device *ndev,
- bool aborted, bool fw_abort)
-{
- s32 err = BCME_OK;
- unsigned long flags;
- struct net_device *dev;
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-
- WL_DBG(("Enter \n"));
- BCM_REFERENCE(dhdp);
-
- mutex_lock(&cfg->scan_complete);
- if (!ndev) {
- WL_ERR(("ndev is null\n"));
- err = BCME_ERROR;
- goto out;
- }
-
- if (cfg->escan_info.ndev != ndev) {
- WL_ERR(("Outstanding scan req ndev not matching (%p:%p)\n",
- cfg->escan_info.ndev, ndev));
- err = BCME_ERROR;
- goto out;
- }
-
- if (cfg->scan_request) {
- dev = bcmcfg_to_prmry_ndev(cfg);
-#if defined(WL_ENABLE_P2P_IF)
- if (cfg->scan_request->dev != cfg->p2p_net)
- dev = cfg->scan_request->dev;
-#elif defined(WL_CFG80211_P2P_DEV_IF)
- if (cfg->scan_request->wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
- dev = cfg->scan_request->wdev->netdev;
-#endif /* WL_ENABLE_P2P_IF */
- }
- else {
- WL_DBG(("cfg->scan_request is NULL. Internal scan scenario."
- "doing scan_abort for ndev %p primary %p",
- ndev, bcmcfg_to_prmry_ndev(cfg)));
- dev = ndev;
- }
- if (fw_abort && !in_atomic())
- wl_cfg80211_scan_abort(cfg);
- if (timer_pending(&cfg->scan_timeout))
- del_timer_sync(&cfg->scan_timeout);
- cfg->scan_enq_time = 0;
-#if defined(ESCAN_RESULT_PATCH)
- if (likely(cfg->scan_request)) {
- cfg->bss_list = wl_escan_get_buf(cfg, aborted);
- wl_inform_bss(cfg);
- }
-#endif /* ESCAN_RESULT_PATCH */
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
-#ifdef WL_SCHED_SCAN
- if (cfg->sched_scan_req && !cfg->scan_request) {
- if (!aborted) {
- WL_INFORM_MEM(("[%s] Report sched scan done.\n", dev->name));
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
- cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy,
- cfg->sched_scan_req->reqid);
-#else
- cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */
- }
-
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE);
- cfg->sched_scan_running = FALSE;
- }
-#endif /* WL_SCHED_SCAN */
- if (likely(cfg->scan_request)) {
- WL_INFORM_MEM(("[%s] Report scan done.\n", dev->name));
- wl_notify_scan_done(cfg, aborted);
- cfg->scan_request = NULL;
-#if defined(OEM_ANDROID)
- DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
- DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
-#endif // endif
- }
- if (p2p_is_on(cfg))
- wl_clr_p2p_status(cfg, SCANNING);
- wl_clr_drv_status(cfg, SCANNING, dev);
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
-
-out:
- mutex_unlock(&cfg->scan_complete);
- return err;
-}
-
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
-#ifndef WL_DRV_AVOID_SCANCACHE
-static void
-wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate)
-{
- int idx;
- for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) {
- int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1;
- if (bss->RSSI < candidate[idx].RSSI) {
- if (len)
- memcpy(&candidate[idx + 1], &candidate[idx],
- sizeof(removal_element_t) * len);
- candidate[idx].RSSI = bss->RSSI;
- candidate[idx].length = bss->length;
- memcpy(&candidate[idx].BSSID, &bss->BSSID, ETHER_ADDR_LEN);
- return;
- }
- }
-}
-
-static void
-wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate,
- wl_bss_info_t *bi)
-{
- int idx1, idx2;
- int total_delete_len = 0;
- for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) {
- int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
- wl_bss_info_t *bss = NULL;
- if (candidate[idx1].RSSI >= bi->RSSI)
- continue;
- for (idx2 = 0; idx2 < list->count; idx2++) {
- bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) :
- list->bss_info;
- if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
- candidate[idx1].RSSI == bss->RSSI &&
- candidate[idx1].length == dtoh32(bss->length)) {
- u32 delete_len = dtoh32(bss->length);
- WL_DBG(("delete scan info of " MACDBG " to add new AP\n",
- MAC2STRDBG(bss->BSSID.octet)));
- if (idx2 < list->count -1) {
- memmove((u8 *)bss, (u8 *)bss + delete_len,
- list->buflen - cur_len - delete_len);
- }
- list->buflen -= delete_len;
- list->count--;
- total_delete_len += delete_len;
- /* if delete_len is greater than or equal to result length */
- if (total_delete_len >= bi->length) {
- return;
- }
- break;
- }
- cur_len += dtoh32(bss->length);
- }
- }
-}
-#endif /* WL_DRV_AVOID_SCANCACHE */
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-
-#ifdef WL_DRV_AVOID_SCANCACHE
-static u32 wl_p2p_find_peer_channel(struct bcm_cfg80211 *cfg, s32 status, wl_bss_info_t *bi,
- u32 bi_length)
-{
- u32 ret;
- u8 *p2p_dev_addr = NULL;
-
- ret = wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL);
- if (!ret) {
- return ret;
- }
- if (status == WLC_E_STATUS_PARTIAL) {
- p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
- if (p2p_dev_addr && !memcmp(p2p_dev_addr,
- cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
- s32 channel = wf_chspec_ctlchan(
- wl_chspec_driver_to_host(bi->chanspec));
-
- if ((channel > MAXCHANNEL) || (channel <= 0)) {
- channel = WL_INVALID;
- } else {
- WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
- " channel : %d\n",
- MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
- channel));
- }
- wl_clr_p2p_status(cfg, SCANNING);
- cfg->afx_hdl->peer_chan = channel;
- complete(&cfg->act_frm_scan);
- }
- } else {
- WL_INFORM_MEM(("ACTION FRAME SCAN DONE\n"));
- wl_clr_p2p_status(cfg, SCANNING);
- wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
- if (cfg->afx_hdl->peer_chan == WL_INVALID)
- complete(&cfg->act_frm_scan);
- }
-
- return ret;
-}
-
-static s32 wl_escan_without_scan_cache(struct bcm_cfg80211 *cfg, wl_escan_result_t *escan_result,
- struct net_device *ndev, const wl_event_msg_t *e, s32 status)
-{
- s32 err = BCME_OK;
- wl_bss_info_t *bi;
- u32 bi_length;
- bool aborted = false;
- bool fw_abort = false;
- bool notify_escan_complete = false;
-
- if (wl_escan_check_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id) < 0) {
- goto exit;
- }
-
- wl_escan_print_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id);
-
- if (!(status == WLC_E_STATUS_TIMEOUT) || !(status == WLC_E_STATUS_PARTIAL)) {
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- }
-
- if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
- notify_escan_complete = true;
- }
-
- if (status == WLC_E_STATUS_PARTIAL) {
- WL_DBG(("WLC_E_STATUS_PARTIAL \n"));
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
- if ((!escan_result) || (dtoh16(escan_result->bss_count) != 1)) {
- WL_ERR(("Invalid escan result (NULL pointer) or invalid bss_count\n"));
- goto exit;
- }
-
- bi = escan_result->bss_info;
- bi_length = dtoh32(bi->length);
- if ((!bi) ||
- (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE))) {
- WL_ERR(("Invalid escan bss info (NULL pointer)"
- "or invalid bss_info length\n"));
- goto exit;
- }
-
- if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
- if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
- WL_DBG(("Ignoring IBSS result\n"));
- goto exit;
- }
- }
-
- if (wl_p2p_find_peer_channel(cfg, status, bi, bi_length)) {
- goto exit;
- } else {
- if (scan_req_match(cfg)) {
- /* p2p scan && allow only probe response */
- if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
- (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
- goto exit;
- }
-#ifdef ROAM_CHANNEL_CACHE
- add_roam_cache(cfg, bi);
-#endif /* ROAM_CHANNEL_CACHE */
- err = wl_inform_single_bss(cfg, bi, false);
-#ifdef ROAM_CHANNEL_CACHE
- /* print_roam_cache(); */
- update_roam_cache(cfg, ioctl_version);
-#endif /* ROAM_CHANNEL_CACHE */
-
- /*
- * !Broadcast && number of ssid = 1 && number of channels =1
- * means specific scan to association
- */
- if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
- WL_ERR(("P2P assoc scan fast aborted.\n"));
- aborted = false;
- fw_abort = true;
- }
- /* Directly exit from function here and
- * avoid sending notify completion to cfg80211
- */
- goto exit;
- }
- } else if (status == WLC_E_STATUS_SUCCESS) {
- if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
- goto exit;
- }
- WL_INFORM_MEM(("ESCAN COMPLETED\n"));
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
-
- /* Update escan complete status */
- aborted = false;
- fw_abort = false;
-
-#ifdef CUSTOMER_HW4_DEBUG
- if (wl_scan_timeout_dbg_enabled)
- wl_scan_timeout_dbg_clear();
-#endif /* CUSTOMER_HW4_DEBUG */
- } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
-#ifdef BCMCCX
- (status == WLC_E_STATUS_CCXFASTRM) ||
-#endif /* BCMCCX */
- (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
- (status == WLC_E_STATUS_NEWASSOC)) {
- /* Handle all cases of scan abort */
-
- WL_DBG(("ESCAN ABORT reason: %d\n", status));
- if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
- goto exit;
- }
- WL_INFORM_MEM(("ESCAN ABORTED\n"));
-
- /* Update escan complete status */
- aborted = true;
- fw_abort = false;
-
- } else if (status == WLC_E_STATUS_TIMEOUT) {
- WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
- WL_ERR(("reason[0x%x]\n", e->reason));
- if (e->reason == 0xFFFFFFFF) {
- /* Update escan complete status */
- aborted = true;
- fw_abort = true;
- }
- } else {
- WL_ERR(("unexpected Escan Event %d : abort\n", status));
-
- if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
- goto exit;
- }
- /* Update escan complete status */
- aborted = true;
- fw_abort = false;
- }
-
- /* Notify escan complete status */
- if (notify_escan_complete) {
- wl_notify_escan_complete(cfg, ndev, aborted, fw_abort);
- }
-
-exit:
- return err;
-
-}
-#endif /* WL_DRV_AVOID_SCANCACHE */
-static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- s32 err = BCME_OK;
- s32 status = ntoh32(e->status);
- wl_escan_result_t *escan_result;
- struct net_device *ndev = NULL;
-#ifndef WL_DRV_AVOID_SCANCACHE
- wl_bss_info_t *bi;
- u32 bi_length;
- const wifi_p2p_ie_t * p2p_ie;
- const u8 *p2p_dev_addr = NULL;
- wl_scan_results_t *list;
- wl_bss_info_t *bss = NULL;
- u32 i;
-#endif /* WL_DRV_AVOID_SCANCACHE */
-
- WL_DBG((" enter event type : %d, status : %d \n",
- ntoh32(e->event_type), ntoh32(e->status)));
-
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-
- mutex_lock(&cfg->usr_sync);
- /* P2P SCAN is coming from primary interface */
- if (wl_get_p2p_status(cfg, SCANNING)) {
- if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
- ndev = cfg->afx_hdl->dev;
- else
- ndev = cfg->escan_info.ndev;
-
- }
- if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) {
- WL_ERR_RLMT(("escan is not ready. ndev:%p drv_scan_status 0x%x"
- " e_type %d e_states %d\n",
- ndev, wl_get_drv_status(cfg, SCANNING, ndev),
- ntoh32(e->event_type), ntoh32(e->status)));
- goto exit;
- }
- escan_result = (wl_escan_result_t *)data;
-
-#ifndef WL_DRV_AVOID_SCANCACHE
- if (status == WLC_E_STATUS_PARTIAL) {
- WL_DBG(("WLC_E_STATUS_PARTIAL \n"));
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
- if (!escan_result) {
- WL_ERR(("Invalid escan result (NULL pointer)\n"));
- goto exit;
- }
- if ((dtoh32(escan_result->buflen) > (int)ESCAN_BUF_SIZE) ||
- (dtoh32(escan_result->buflen) < sizeof(wl_escan_result_t))) {
- WL_ERR(("Invalid escan buffer len:%d\n", dtoh32(escan_result->buflen)));
- goto exit;
- }
- if (dtoh16(escan_result->bss_count) != 1) {
- WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
- goto exit;
- }
- bi = escan_result->bss_info;
- if (!bi) {
- WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
- goto exit;
- }
- bi_length = dtoh32(bi->length);
- if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
- WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
- goto exit;
- }
- if (wl_escan_check_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id) < 0)
- goto exit;
-
- if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
- if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
- WL_DBG(("Ignoring IBSS result\n"));
- goto exit;
- }
- }
-
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
- if (p2p_dev_addr && !memcmp(p2p_dev_addr,
- cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
- s32 channel = wf_chspec_ctlchan(
- wl_chspec_driver_to_host(bi->chanspec));
-
- if ((channel > MAXCHANNEL) || (channel <= 0))
- channel = WL_INVALID;
- else
- WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
- " channel : %d\n",
- MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
- channel));
-
- wl_clr_p2p_status(cfg, SCANNING);
- cfg->afx_hdl->peer_chan = channel;
- complete(&cfg->act_frm_scan);
- goto exit;
- }
-
- } else {
- int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
- removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT];
- int remove_lower_rssi = FALSE;
-
- bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT);
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-
- list = wl_escan_get_buf(cfg, FALSE);
- if (scan_req_match(cfg)) {
-#ifdef WL_HOST_BAND_MGMT
- s32 channel_band = 0;
- chanspec_t chspec;
-#endif /* WL_HOST_BAND_MGMT */
- /* p2p scan && allow only probe response */
- if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
- (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
- goto exit;
- if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset,
- bi->ie_length)) == NULL) {
- WL_ERR(("Couldn't find P2PIE in probe"
- " response/beacon\n"));
- goto exit;
- }
-#ifdef WL_HOST_BAND_MGMT
- chspec = wl_chspec_driver_to_host(bi->chanspec);
- channel_band = CHSPEC2WLC_BAND(chspec);
-
- if ((cfg->curr_band == WLC_BAND_5G) &&
- (channel_band == WLC_BAND_2G)) {
- /* Avoid sending the GO results in band conflict */
- if (wl_cfgp2p_retreive_p2pattrib(p2p_ie,
- P2P_SEID_GROUP_ID) != NULL)
- goto exit;
- }
-#endif /* WL_HOST_BAND_MGMT */
- }
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
- if (bi_length > ESCAN_BUF_SIZE - list->buflen)
- remove_lower_rssi = TRUE;
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-
- for (i = 0; i < list->count; i++) {
- bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
- : list->bss_info;
- if (!bss) {
- WL_ERR(("bss is NULL\n"));
- goto exit;
- }
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
- WL_TRACE(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n",
- bss->SSID, MAC2STRDBG(bss->BSSID.octet),
- i, bss->RSSI, list->count));
-
- if (remove_lower_rssi)
- wl_cfg80211_find_removal_candidate(bss, candidate);
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-
- if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
- (CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
- == CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) &&
- bi->SSID_len == bss->SSID_len &&
- !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
-
- /* do not allow beacon data to update
- *the data recd from a probe response
- */
- if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) &&
- (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
- goto exit;
-
- WL_DBG(("%s("MACDBG"), i=%d prev: RSSI %d"
- " flags 0x%x, new: RSSI %d flags 0x%x\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
- bss->RSSI, bss->flags, bi->RSSI, bi->flags));
-
- if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
- (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
- /* preserve max RSSI if the measurements are
- * both on-channel or both off-channel
- */
- WL_SCAN(("%s("MACDBG"), same onchan"
- ", RSSI: prev %d new %d\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet),
- bss->RSSI, bi->RSSI));
- bi->RSSI = MAX(bss->RSSI, bi->RSSI);
- } else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
- (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
- /* preserve the on-channel rssi measurement
- * if the new measurement is off channel
- */
- WL_SCAN(("%s("MACDBG"), prev onchan"
- ", RSSI: prev %d new %d\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet),
- bss->RSSI, bi->RSSI));
- bi->RSSI = bss->RSSI;
- bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
- }
- if (dtoh32(bss->length) != bi_length) {
- u32 prev_len = dtoh32(bss->length);
-
- WL_SCAN(("bss info replacement"
- " is occured(bcast:%d->probresp%d)\n",
- bss->ie_length, bi->ie_length));
- WL_DBG(("%s("MACDBG"), replacement!(%d -> %d)\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet),
- prev_len, bi_length));
-
- if (list->buflen - prev_len + bi_length
- > ESCAN_BUF_SIZE) {
- WL_ERR(("Buffer is too small: keep the"
- " previous result of this AP\n"));
- /* Only update RSSI */
- bss->RSSI = bi->RSSI;
- bss->flags |= (bi->flags
- & WL_BSS_FLAGS_RSSI_ONCHANNEL);
- goto exit;
- }
-
- if (i < list->count - 1) {
- /* memory copy required by this case only */
- memmove((u8 *)bss + bi_length,
- (u8 *)bss + prev_len,
- list->buflen - cur_len - prev_len);
- }
- list->buflen -= prev_len;
- list->buflen += bi_length;
- }
- list->version = dtoh32(bi->version);
- memcpy((u8 *)bss, (u8 *)bi, bi_length);
- goto exit;
- }
- cur_len += dtoh32(bss->length);
- }
- if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
- wl_cfg80211_remove_lowRSSI_info(list, candidate, bi);
- if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
- WL_DBG(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n",
- MAC2STRDBG(bi->BSSID.octet), bi->RSSI));
- goto exit;
- }
-#else
- WL_ERR(("Buffer is too small: ignoring\n"));
- goto exit;
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
- }
-
- memcpy(&(((char *)list)[list->buflen]), bi, bi_length);
- list->version = dtoh32(bi->version);
- list->buflen += bi_length;
- list->count++;
-
- /*
- * !Broadcast && number of ssid = 1 && number of channels =1
- * means specific scan to association
- */
- if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
- WL_ERR(("P2P assoc scan fast aborted.\n"));
- wl_notify_escan_complete(cfg, cfg->escan_info.ndev, false, true);
- goto exit;
- }
- }
- }
- else if (status == WLC_E_STATUS_SUCCESS) {
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id,
- escan_result->sync_id);
-
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- WL_DBG(("ACTION FRAME SCAN DONE\n"));
- wl_clr_p2p_status(cfg, SCANNING);
- wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
- if (cfg->afx_hdl->peer_chan == WL_INVALID)
- complete(&cfg->act_frm_scan);
- } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
- WL_INFORM_MEM(("ESCAN COMPLETED\n"));
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
- cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
- if (!scan_req_match(cfg)) {
- WL_TRACE_HW4(("SCAN COMPLETED: scanned AP count=%d\n",
- cfg->bss_list->count));
- }
- wl_inform_bss(cfg);
- wl_notify_escan_complete(cfg, ndev, false, false);
- }
- wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
-#ifdef CUSTOMER_HW4_DEBUG
- if (wl_scan_timeout_dbg_enabled)
- wl_scan_timeout_dbg_clear();
-#endif /* CUSTOMER_HW4_DEBUG */
- } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
-#ifdef BCMCCX
- (status == WLC_E_STATUS_CCXFASTRM) ||
-#endif /* BCMCCX */
- (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
- (status == WLC_E_STATUS_NEWASSOC)) {
- /* Dump FW preserve buffer content */
- if (status == WLC_E_STATUS_ABORT) {
- wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
- }
- /* Handle all cases of scan abort */
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_print_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id);
- WL_DBG(("ESCAN ABORT reason: %d\n", status));
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- WL_DBG(("ACTION FRAME SCAN DONE\n"));
- wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
- wl_clr_p2p_status(cfg, SCANNING);
- if (cfg->afx_hdl->peer_chan == WL_INVALID)
- complete(&cfg->act_frm_scan);
- } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
- WL_INFORM_MEM(("ESCAN ABORTED\n"));
- cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
- if (!scan_req_match(cfg)) {
- WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
- cfg->bss_list->count));
- }
-#ifdef DUAL_ESCAN_RESULT_BUFFER
- if (escan_result->sync_id != cfg->escan_info.cur_sync_id) {
- /* If sync_id is not matching, then the abort might have
- * come for the old scan req or for the in-driver initiated
- * scan. So do abort for scan_req for which sync_id is
- * matching.
- */
- WL_INFORM_MEM(("sync_id mismatch (%d != %d). "
- "Ignore the scan abort event.\n",
- escan_result->sync_id, cfg->escan_info.cur_sync_id));
- goto exit;
- } else {
- /* sync id is matching, abort the scan */
- WL_INFORM_MEM(("scan aborted for sync_id: %d \n",
- cfg->escan_info.cur_sync_id));
- wl_inform_bss(cfg);
- wl_notify_escan_complete(cfg, ndev, true, false);
- }
-#else
- wl_inform_bss(cfg);
- wl_notify_escan_complete(cfg, ndev, true, false);
-#endif /* DUAL_ESCAN_RESULT_BUFFER */
- } else {
- /* If there is no pending host initiated scan, do nothing */
- WL_DBG(("ESCAN ABORT: No pending scans. Ignoring event.\n"));
- }
- wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT);
- } else if (status == WLC_E_STATUS_TIMEOUT) {
- WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
- WL_ERR(("reason[0x%x]\n", e->reason));
- if (e->reason == 0xFFFFFFFF) {
- wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
- }
- } else {
- WL_ERR(("unexpected Escan Event %d : abort\n", status));
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_print_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id);
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- WL_DBG(("ACTION FRAME SCAN DONE\n"));
- wl_clr_p2p_status(cfg, SCANNING);
- wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
- if (cfg->afx_hdl->peer_chan == WL_INVALID)
- complete(&cfg->act_frm_scan);
- } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
- cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
- if (!scan_req_match(cfg)) {
- WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): "
- "scanned AP count=%d\n",
- cfg->bss_list->count));
- }
- wl_inform_bss(cfg);
- wl_notify_escan_complete(cfg, ndev, true, false);
- }
- wl_escan_increment_sync_id(cfg, 2);
- }
-#else /* WL_DRV_AVOID_SCANCACHE */
- err = wl_escan_without_scan_cache(cfg, escan_result, ndev, e, status);
-#endif /* WL_DRV_AVOID_SCANCACHE */
-exit:
- mutex_unlock(&cfg->usr_sync);
- return err;
-}
-
static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable)
{
u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
@@ -17453,12 +18159,9 @@
/* Mark it as to be reverted */
cfg->roam_flags |= WL_ROAM_REVERT_STATUS;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev && iter->wdev &&
iter->wdev->iftype == NL80211_IFTYPE_STATION) {
if (wldev_iovar_setint(iter->ndev, "roam_off", TRUE)
@@ -17470,19 +18173,12 @@
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
}
else if (!enable && (cfg->roam_flags & WL_ROAM_REVERT_STATUS)) {
cfg->roam_flags &= ~WL_ROAM_REVERT_STATUS;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev && iter->wdev &&
iter->wdev->iftype == NL80211_IFTYPE_STATION) {
if (iter->roam_off != WL_INVALID) {
@@ -17496,10 +18192,6 @@
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
}
return;
@@ -17517,12 +18209,9 @@
if (connected_cnt <= 1) {
return;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
/* p2p discovery iface ndev could be null */
if (iter->ndev) {
chanspec = 0;
@@ -17545,10 +18234,6 @@
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
WL_ERR(("%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel"));
return;
}
@@ -17569,12 +18254,9 @@
return 0;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
/* p2p discovery iface ndev could be null */
if (iter->ndev) {
chanspec = 0;
@@ -17594,10 +18276,6 @@
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
WL_DBG(("RSDB mode is %s\n", rsdb_mode ? "enabled" : "disabled"));
return rsdb_mode;
@@ -17616,7 +18294,7 @@
rtt_status_info_t *rtt_status;
#endif /* RTT_SUPPORT */
if (dhd->busstate == DHD_BUS_DOWN) {
- WL_ERR(("%s : busstate is DHD_BUS_DOWN!\n", __FUNCTION__));
+ WL_ERR(("busstate is DHD_BUS_DOWN!\n"));
return 0;
}
WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n",
@@ -17659,7 +18337,8 @@
}
#endif /* DISABLE_FRAMEBURST_VSDB */
#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
- if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd)) {
+ if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) &&
+ wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) {
/* Enable frameburst for
* STA/SoftAP concurrent mode
*/
@@ -17711,19 +18390,6 @@
}
return err;
}
-static s32 wl_init_scan(struct bcm_cfg80211 *cfg)
-{
- int err = 0;
-
- cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_init_sync_id(cfg);
-
- /* Init scan_timeout timer */
- init_timer_compat(&cfg->scan_timeout, wl_scan_timeout, cfg);
-
- return err;
-}
#ifdef DHD_LOSSLESS_ROAMING
static s32 wl_init_roam_timeout(struct bcm_cfg80211 *cfg)
@@ -17737,6 +18403,15 @@
}
#endif /* DHD_LOSSLESS_ROAMING */
+#if !defined(DISABLE_11H) && defined(DHD_NOSCAN_DURING_CSA)
+static s32 wl_init_csa_timeout(struct bcm_cfg80211 *cfg)
+{
+ /* Init csa timer */
+ init_timer_compat(&cfg->csa_timeout, wl_csa_timeout, cfg);
+ return 0;
+}
+#endif /* !defined(DISABLE_11H) && defined(DHD_NOSCAN_DURING_CSA) */
+
static s32 wl_init_priv(struct bcm_cfg80211 *cfg)
{
struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
@@ -17774,11 +18449,14 @@
wl_init_event_handler(cfg);
mutex_init(&cfg->usr_sync);
mutex_init(&cfg->event_sync);
- mutex_init(&cfg->scan_complete);
mutex_init(&cfg->if_sync);
+ mutex_init(&cfg->scan_sync);
#ifdef WLTDLS
mutex_init(&cfg->tdls_sync);
#endif /* WLTDLS */
+#ifdef WL_BCNRECV
+ mutex_init(&cfg->bcn_sync);
+#endif /* WL_BCNRECV */
#ifdef WL_WPS_SYNC
wl_init_wps_reauth_sm(cfg);
#endif /* WL_WPS_SYNC */
@@ -17791,6 +18469,12 @@
return err;
}
#endif /* DHD_LOSSLESS_ROAMING */
+#if !defined(DISABLE_11H) && defined(DHD_NOSCAN_DURING_CSA)
+ err = wl_init_csa_timeout(cfg);
+ if (err) {
+ return err;
+ }
+#endif // endif
wl_init_conf(cfg->conf);
wl_init_prof(cfg, ndev);
wl_link_down(cfg);
@@ -17799,6 +18483,9 @@
cfg->nan_dp_state = NAN_DP_STATE_DISABLED;
init_waitqueue_head(&cfg->ndp_if_change_event);
#endif /* WL_NAN */
+ cfg->pmk_list->pmkids.length = OFFSETOF(pmkid_list_v3_t, pmkid);
+ cfg->pmk_list->pmkids.count = 0;
+ cfg->pmk_list->pmkids.version = PMKID_LIST_VER_3;
return err;
}
@@ -17812,6 +18499,9 @@
#ifdef DHD_LOSSLESS_ROAMING
del_timer_sync(&cfg->roam_timeout);
#endif // endif
+#if !defined(DISABLE_11H) && defined(DHD_NOSCAN_DURING_CSA)
+ del_timer_sync(&cfg->csa_timeout);
+#endif // endif
wl_deinit_priv_mem(cfg);
if (wl_cfg80211_netdev_notifier_registered) {
wl_cfg80211_netdev_notifier_registered = FALSE;
@@ -17871,7 +18561,7 @@
struct bcm_cfg80211 * cfg;
s32 err = 0;
s32 ret = 0;
- WL_INFORM_MEM(("In\n"));
+ WL_TRACE(("In\n"));
if (unlikely(!ndev)) {
WL_ERR(("ndev is invaild\n"));
return -ENODEV;
@@ -17933,6 +18623,33 @@
return wiphy_priv(wdev->wiphy);
}
+s32
+wl_cfg80211_net_attach(struct net_device *primary_ndev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(primary_ndev);
+
+#ifdef WL_STATIC_IF
+ char iname[STATIC_INAME_STRING_LEN];
+ int i = 0;
+#endif /* WL_STATIC_IF */
+ if (!cfg) {
+ WL_ERR(("cfg null\n"));
+ return BCME_ERROR;
+ }
+#ifdef WL_STATIC_IF
+ /* Register dummy n/w iface. FW init will happen only from dev_open */
+ for (i = 0; i < DHD_NUM_STATIC_IFACES; i++) {
+ snprintf(iname, sizeof(iname), WL_STATIC_IFNAME_PREFIX, i+1);
+ if (wl_cfg80211_register_static_if(cfg, NL80211_IFTYPE_STATION,
+ iname, (DHD_MAX_IFS + i)) == NULL) {
+ WL_ERR(("static i/f registration failed!\n"));
+ return BCME_ERROR;
+ }
+ }
+#endif /* WL_STATIC_IF */
+ return BCME_OK;
+}
+
s32 wl_cfg80211_attach(struct net_device *ndev, void *context)
{
struct wireless_dev *wdev;
@@ -18025,28 +18742,22 @@
#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
-#ifdef WL_IRQSET
- INIT_DELAYED_WORK(&cfg->irq_set_work, wl_irq_set_work_handler);
-#endif /* WL_IRQSET */
mutex_init(&cfg->pm_sync);
#ifdef WL_NAN
mutex_init(&cfg->nancfg.nan_sync);
init_waitqueue_head(&cfg->nancfg.nan_event_wait);
#endif /* WL_NAN */
- wl_cfg80211_set_bcmcfg(cfg);
+ cfg->rssi_sum_report = FALSE;
+#ifdef WL_BAM
+ wl_bad_ap_mngr_init(cfg);
+#endif /* WL_BAM */
#ifdef BIGDATA_SOFTAP
wl_attach_ap_stainfo(cfg);
#endif /* BIGDATA_SOFTAP */
- cfg->rssi_sum_report = FALSE;
-#ifdef WL_STATIC_IF
- /* Register dummy n/w iface. FW init will happen only from dev_open */
- if (wl_cfg80211_register_static_if(cfg, WL_IF_TYPE_AP,
- WL_STATIC_IFNAME_PREFIX) == NULL) {
- WL_ERR(("static i/f registration failed!\n"));
- err = -ENODEV;
- goto cfg80211_attach_out;
- }
-#endif /* WL_STATIC_IF */
+#ifdef ENABLE_HOGSQS
+ INIT_DELAYED_WORK(&cfg->hogsqs_eventwork,
+ wl_cfg80211_hogsqs_event_handler);
+#endif // endif
return err;
@@ -18063,9 +18774,10 @@
return;
}
wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
-#ifdef WL_IRQSET
- cancel_delayed_work_sync(&cfg->irq_set_work);
-#endif /* WL_IRQSET */
+
+#ifdef ENABLE_HOGSQS
+ cancel_delayed_work_sync(&cfg->hogsqs_eventwork);
+#endif // endif
#if defined(OEM_ANDROID) && defined(COEX_DHCP)
wl_cfg80211_btcoex_deinit();
@@ -18097,6 +18809,11 @@
#ifdef WL_STATIC_IF
wl_cfg80211_unregister_static_if(cfg);
#endif /* WL_STATIC_IF */
+#if !defined(DISABLE_11H) && defined(DHD_NOSCAN_DURING_CSA)
+ if (timer_pending(&cfg->csa_timeout)) {
+ del_timer_sync(&cfg->csa_timeout);
+ }
+#endif // endif
#if defined(WL_CFG80211_P2P_DEV_IF)
if (cfg->p2p_wdev)
wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
@@ -18104,6 +18821,9 @@
#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
wl_cfg80211_detach_p2p(cfg);
#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
+#ifdef WL_BAM
+ wl_bad_ap_mngr_deinit(cfg);
+#endif /* WL_BAM */
#ifdef BIGDATA_SOFTAP
wl_detach_ap_stainfo(cfg);
#endif /* BIGDATA_SOFTAP */
@@ -18120,6 +18840,31 @@
WL_DBG(("Exit\n"));
}
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+void wl_cfg80211_register_dev_ril_bridge_event_notifier()
+{
+ WL_DBG(("Enter\n"));
+ if (!wl_cfg80211_ril_bridge_notifier_registered) {
+ s32 err = 0;
+ wl_cfg80211_ril_bridge_notifier_registered = TRUE;
+ err = register_dev_ril_bridge_event_notifier(&wl_cfg80211_ril_bridge_notifier);
+ if (err) {
+ wl_cfg80211_ril_bridge_notifier_registered = FALSE;
+ WL_ERR(("Failed to register ril_notifier! %d\n", err));
+ }
+ }
+}
+
+void wl_cfg80211_unregister_dev_ril_bridge_event_notifier()
+{
+ WL_DBG(("Enter\n"));
+ if (wl_cfg80211_ril_bridge_notifier_registered) {
+ wl_cfg80211_ril_bridge_notifier_registered = FALSE;
+ unregister_dev_ril_bridge_event_notifier(&wl_cfg80211_ril_bridge_notifier);
+ }
+}
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+
static void wl_print_event_data(struct bcm_cfg80211 *cfg,
uint32 event_type, const wl_event_msg_t *e)
{
@@ -18162,47 +18907,99 @@
WL_DBG(("Enter \n"));
BCM_SET_CONTAINER_OF(cfg, work_data, struct bcm_cfg80211, event_work);
+ cfg->wl_evt_hdlr_entry_time = OSL_LOCALTIME_NS();
DHD_EVENT_WAKE_LOCK(cfg->pub);
while ((e = wl_deq_event(cfg))) {
+ s32 status = ntoh32(e->emsg.status);
+ u32 event_type = ntoh32(e->emsg.event_type);
+ bool scan_cmplt_evt = (event_type == WLC_E_ESCAN_RESULT) &&
+ ((status == WLC_E_STATUS_SUCCESS) || (status == WLC_E_STATUS_ABORT));
- /* Print only critical events to avoid too many prints */
- wl_print_event_data(cfg, e->etype, &e->emsg);
+ cfg->wl_evt_deq_time = OSL_LOCALTIME_NS();
+ if (scan_cmplt_evt) {
+ cfg->scan_deq_time = OSL_LOCALTIME_NS();
+ }
+ /* Print only critical events to avoid too many prints */
+ wl_print_event_data(cfg, e->etype, &e->emsg);
- if (e->emsg.ifidx > WL_MAX_IFS) {
- WL_ERR((" Event ifidx not in range. val:%d \n", e->emsg.ifidx));
- goto fail;
- }
+ if (e->emsg.ifidx > WL_MAX_IFS) {
+ WL_ERR((" Event ifidx not in range. val:%d \n", e->emsg.ifidx));
+ goto fail;
+ }
- /* Make sure iface operations, don't creat race conditions */
- mutex_lock(&cfg->if_sync);
- if (!(wdev = wl_get_wdev_by_fw_idx(cfg,
- e->emsg.bsscfgidx, e->emsg.ifidx))) {
- /* For WLC_E_IF would be handled by wl_host_event */
- if (e->etype != WLC_E_IF)
- WL_ERR(("No wdev corresponding to bssidx: 0x%x found!"
- " Ignoring event.\n", e->emsg.bsscfgidx));
- } else if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) {
- dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
- if (dhd->busstate == DHD_BUS_DOWN) {
- WL_ERR((": BUS is DOWN.\n"));
- } else
- {
- WL_DBG(("event_type %d event_sub %d\n",
- ntoh32(e->emsg.event_type),
- ntoh32(e->emsg.reason)));
- cfg->evt_handler[e->etype](cfg, wdev_to_cfgdev(wdev),
- &e->emsg, e->edata);
+ /* Make sure iface operations, don't creat race conditions */
+ mutex_lock(&cfg->if_sync);
+ if (!(wdev = wl_get_wdev_by_fw_idx(cfg,
+ e->emsg.bsscfgidx, e->emsg.ifidx))) {
+ /* For WLC_E_IF would be handled by wl_host_event */
+ if (e->etype != WLC_E_IF)
+ WL_ERR(("No wdev corresponding to bssidx: 0x%x found!"
+ " Ignoring event.\n", e->emsg.bsscfgidx));
+ } else if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) {
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ WL_ERR((": BUS is DOWN.\n"));
+ } else
+ {
+ WL_DBG(("event_type %d event_sub %d\n",
+ ntoh32(e->emsg.event_type),
+ ntoh32(e->emsg.reason)));
+ cfg->evt_handler[e->etype](cfg, wdev_to_cfgdev(wdev),
+ &e->emsg, e->edata);
+ if (scan_cmplt_evt) {
+ cfg->scan_hdlr_cmplt_time = OSL_LOCALTIME_NS();
}
- } else {
- WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
}
- mutex_unlock(&cfg->if_sync);
+ } else {
+ WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
+ }
+ mutex_unlock(&cfg->if_sync);
fail:
- wl_put_event(cfg, e);
+ wl_put_event(cfg, e);
+ if (scan_cmplt_evt) {
+ cfg->scan_cmplt_time = OSL_LOCALTIME_NS();
+ }
+ cfg->wl_evt_hdlr_exit_time = OSL_LOCALTIME_NS();
}
DHD_EVENT_WAKE_UNLOCK(cfg->pub);
}
+/*
+* Generic API to handle critical events which doesnt need
+* cfg enquening and sleepable API calls.
+*/
+s32
+wl_cfg80211_handle_critical_events(struct bcm_cfg80211 *cfg,
+ const wl_event_msg_t * e)
+{
+ s32 ret = BCME_ERROR;
+ u32 event_type = ntoh32(e->event_type);
+
+ if (event_type >= WLC_E_LAST) {
+ return BCME_ERROR;
+ }
+
+ switch (event_type) {
+ case WLC_E_NAN_CRITICAL: {
+#ifdef WL_NAN
+ if (ntoh32(e->reason) == WL_NAN_EVENT_STOP) {
+ /* Wakeup nan stop event waiting thread */
+ WL_INFORM_MEM((">> Critical Nan Stop Event Received\n"));
+ OSL_SMP_WMB();
+ cfg->nancfg.nan_event_recvd = true;
+ OSL_SMP_WMB();
+ wake_up(&cfg->nancfg.nan_event_wait);
+ ret = BCME_OK;
+ }
+#endif /* WL_NAN */
+ break;
+ }
+ default:
+ ret = BCME_ERROR;
+ }
+ return ret;
+}
+
void
wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
{
@@ -18211,7 +19008,7 @@
struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
struct net_info *netinfo;
- WL_DBG(("event_type (%d): reason (%d): %s\n", event_type, ntoh32(e->reason),
+ WL_TRACE(("event_type (%d): reason (%d): %s\n", event_type, ntoh32(e->reason),
bcmevent_get_name(event_type)));
if ((cfg == NULL) || (cfg->p2p_supported && cfg->p2p == NULL)) {
WL_ERR(("Stale event ignored\n"));
@@ -18230,11 +19027,6 @@
}
#endif /* OEM_ANDROID */
- if (wl_get_p2p_status(cfg, IF_CHANGING) || wl_get_p2p_status(cfg, IF_ADDING)) {
- WL_ERR(("during IF change, ignore event %d\n", event_type));
- return;
- }
-
if (event_type == WLC_E_IF) {
/* Don't process WLC_E_IF events in wl_cfg80211 layer */
return;
@@ -18246,7 +19038,12 @@
* created via cfg80211 interface. so the event is not of interest
* to the cfg80211 layer.
*/
- WL_DBG(("ignore event %d, not interested\n", event_type));
+ WL_TRACE(("ignore event %d, not interested\n", event_type));
+ return;
+ }
+
+ /* Handle wl_cfg80211_critical_events */
+ if (wl_cfg80211_handle_critical_events(cfg, e) == BCME_OK) {
return;
}
@@ -18257,15 +19054,6 @@
WL_DBG((" PNOEVENT: PNO_NET_LOST\n"));
}
- /* Mark timeout value for thread sched */
- if ((event_type == WLC_E_ESCAN_RESULT) &&
- ((status == WLC_E_STATUS_SUCCESS) ||
- (status == WLC_E_STATUS_ABORT))) {
- cfg->scan_enq_time = OSL_SYSUPTIME_US();
- WL_INFORM_MEM(("Enqueing escan completion (%d). WQ state:0x%x \n",
- status, work_busy(&cfg->event_work)));
- }
-
if (likely(!wl_enq_event(cfg, ndev, event_type, e, data))) {
#ifdef OEM_ANDROID
queue_work(cfg->event_workq, &cfg->event_work);
@@ -18273,6 +19061,14 @@
schedule_work(&cfg->event_work);
#endif /* OEM_ANDROID */
}
+ /* Mark timeout value for thread sched */
+ if ((event_type == WLC_E_ESCAN_RESULT) &&
+ ((status == WLC_E_STATUS_SUCCESS) ||
+ (status == WLC_E_STATUS_ABORT))) {
+ cfg->scan_enq_time = OSL_LOCALTIME_NS();
+ WL_INFORM_MEM(("Enqueing escan completion (%d). WQ state:0x%x \n",
+ status, work_busy(&cfg->event_work)));
+ }
}
static void wl_init_eq(struct bcm_cfg80211 *cfg)
@@ -18331,7 +19127,7 @@
data_len = 0;
if (data)
data_len = ntoh32(msg->datalen);
- evtq_size = sizeof(struct wl_event_q) + data_len;
+ evtq_size = (uint32)(sizeof(struct wl_event_q) + data_len);
e = (struct wl_event_q *)MALLOCZ(cfg->osh, evtq_size);
if (unlikely(!e)) {
WL_ERR(("event alloc failed\n"));
@@ -18491,7 +19287,275 @@
return err;
}
-static int wl_construct_reginfo(struct bcm_cfg80211 *cfg, s32 bw_cap)
+static void wl_get_bwcap(struct bcm_cfg80211 *cfg, u32 bw_cap[])
+{
+ u32 band, mimo_bwcap;
+ int err;
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+
+ band = WLC_BAND_2G;
+ err = wldev_iovar_getint(dev, "bw_cap", &band);
+ if (likely(!err)) {
+ bw_cap[NL80211_BAND_2GHZ] = band;
+ band = WLC_BAND_5G;
+ err = wldev_iovar_getint(dev, "bw_cap", &band);
+ if (likely(!err)) {
+ bw_cap[NL80211_BAND_5GHZ] = band;
+ return;
+ }
+ bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
+ WARN_ON(1);
+ return;
+ }
+
+ WL_ERR(("fallback to mimo_bw_cap info\n"));
+ mimo_bwcap = 0;
+ err = wldev_iovar_getint(dev, "mimo_bw_cap", &mimo_bwcap);
+ if (unlikely(err))
+ /* assume 20MHz if firmware does not give a clue */
+ mimo_bwcap = WLC_N_BW_20ALL;
+
+ switch (mimo_bwcap) {
+ case WLC_N_BW_40ALL:
+ bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
+ /* fall-thru */
+ case WLC_N_BW_20IN2G_40IN5G:
+ bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
+ /* fall-thru */
+ case WLC_N_BW_20ALL:
+ bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
+ bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
+ break;
+ default:
+ WL_ERR(("invalid mimo_bw_cap value\n"));
+ bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
+ bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
+ }
+}
+
+static void wl_update_ht_cap(struct ieee80211_supported_band *band,
+ u32 bwcap, u32 nchain)
+{
+ band->ht_cap.ht_supported = TRUE;
+ if (bwcap & WLC_BW_40MHZ_BIT) {
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ }
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+ band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+ /* An HT shall support all EQM rates for one spatial stream */
+ memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static void wl_update_vht_cap(struct bcm_cfg80211 *cfg, struct ieee80211_supported_band *band,
+ u32 bwcap)
+{
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ s32 err = 0;
+ u32 j = 0;
+ s32 txstreams = 0;
+ s32 rxstreams = 0;
+ s32 ldpc_cap = 0;
+ s32 stbc_rx = 0;
+ s32 stbc_tx = 0;
+ s32 txbf_bfe_cap = 0;
+ s32 txbf_bfr_cap = 0;
+
+ /* not allowed in 2.4G band */
+ if (band->band == IEEE80211_BAND_2GHZ)
+ return;
+
+ if (bwcap == WLC_N_BW_40ALL || bwcap == WLC_N_BW_20IN2G_40IN5G)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+ band->vht_cap.vht_supported = true;
+
+ err = wldev_iovar_getint(dev, "txstreams", &txstreams);
+ if (unlikely(err)) {
+ WL_ERR(("error reading txstreams (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "rxstreams", &rxstreams);
+ if (unlikely(err)) {
+ WL_ERR(("error reading rxstreams (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "ldpc_cap", &ldpc_cap);
+ if (unlikely(err)) {
+ WL_ERR(("error reading ldpc_cap (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "stbc_rx", &stbc_rx);
+ if (unlikely(err)) {
+ WL_ERR(("error reading stbc_rx (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "stbc_tx", &stbc_tx);
+ if (unlikely(err)) {
+ WL_ERR(("error reading stbc_tx (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "txbf_bfe_cap", &txbf_bfe_cap);
+ if (unlikely(err)) {
+ WL_ERR(("error reading txbf_bfe_cap (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "txbf_bfr_cap", &txbf_bfr_cap);
+ if (unlikely(err)) {
+ WL_ERR(("error reading txbf_bfr_cap (%d)\n", err));
+ }
+
+ /* Supported */
+ band->vht_cap.vht_supported = TRUE;
+
+ for (j = 1; j <= VHT_CAP_MCS_MAP_NSS_MAX; j++) {
+ /* TX stream rates. */
+ if (j <= txstreams) {
+ VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9,
+ band->vht_cap.vht_mcs.tx_mcs_map);
+ } else {
+ VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE,
+ band->vht_cap.vht_mcs.tx_mcs_map);
+ }
+
+ /* RX stream rates. */
+ if (j <= rxstreams) {
+ VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9,
+ band->vht_cap.vht_mcs.rx_mcs_map);
+ } else {
+ VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE,
+ band->vht_cap.vht_mcs.rx_mcs_map);
+ }
+ }
+
+ /* Capabilities */
+ /* 80 MHz is mandatory */
+ band->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_80;
+
+ if (WL_BW_CAP_160MHZ(bwcap)) {
+ band->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ band->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160;
+ }
+ band->vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+
+ if (ldpc_cap)
+ band->vht_cap.cap |=
+ IEEE80211_VHT_CAP_RXLDPC;
+
+ if (stbc_tx)
+ band->vht_cap.cap |=
+ IEEE80211_VHT_CAP_TXSTBC;
+
+ if (stbc_rx)
+ band->vht_cap.cap |=
+ (stbc_rx << VHT_CAP_INFO_RX_STBC_SHIFT);
+
+ if (txbf_bfe_cap)
+ band->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+
+ if (txbf_bfr_cap) {
+ band->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
+ }
+
+ if (txbf_bfe_cap || txbf_bfr_cap) {
+ band->vht_cap.cap |=
+ (2 << VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT);
+ band->vht_cap.cap |=
+ ((txstreams - 1) <<
+ VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT);
+ band->vht_cap.cap |=
+ IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB;
+ }
+
+ /* AMPDU length limit, support max 1MB (2 ^ (13 + 7)) */
+ band->vht_cap.cap |=
+ (7 << VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT);
+
+ WL_DBG(("%s 5GHz band vht_enab=%d vht_cap=%08x "
+ "vht_rx_mcs_map=%04x vht_tx_mcs_map=%04x\n",
+ __FUNCTION__,
+ band->vht_cap.vht_supported,
+ band->vht_cap.cap,
+ band->vht_cap.vht_mcs.rx_mcs_map,
+ band->vht_cap.vht_mcs.tx_mcs_map));
+}
+#endif // endif
+
+/* make up event mask ext message iovar for event larger than 128 */
+s32 wl_add_remove_eventextmsg(struct net_device *ndev, u16 event, bool add)
+{
+ uint8 msglen;
+ eventmsgs_ext_t *eventmask_msg = NULL;
+ char* iov_buf = NULL;
+ s32 err = 0;
+ struct bcm_cfg80211 *cfg = NULL;
+
+ if (!ndev)
+ return -ENODEV;
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg)
+ return -ENODEV;
+
+ iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
+ if (iov_buf == NULL) {
+ WL_ERR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
+ return BCME_NOMEM;
+ }
+
+ msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
+ eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
+ if (eventmask_msg == NULL) {
+ WL_ERR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
+ return BCME_NOMEM;
+ }
+ bzero(eventmask_msg, msglen);
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
+
+ /* Setup event_msgs */
+ err = wldev_iovar_getbuf(ndev, "event_msgs_ext", (void *)eventmask_msg,
+ msglen, iov_buf, WLC_IOCTL_SMLEN, NULL);
+ if (err == 0) {
+ bcopy(iov_buf, eventmask_msg, msglen);
+
+ if (add) {
+ setbit(eventmask_msg->mask, event);
+ } else {
+ clrbit(eventmask_msg->mask, event);
+ }
+
+ /* Write updated Event mask */
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->command = EVENTMSGS_SET_MASK;
+ eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
+
+ err = wldev_iovar_setbuf(ndev, "event_msgs_ext", (void *)eventmask_msg,
+ msglen, iov_buf, WLC_IOCTL_SMLEN, NULL);
+
+ if (err)
+ WL_ERR(("Get event_msgs error (%d)\n", err));
+ }
+
+ if (eventmask_msg)
+ kfree(eventmask_msg);
+ if (iov_buf)
+ kfree(iov_buf);
+ return err;
+}
+
+static int wl_construct_reginfo(struct bcm_cfg80211 *cfg, s32 bw_cap[])
{
struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
struct ieee80211_channel *band_chan_arr = NULL;
@@ -18505,7 +19569,7 @@
u8 *pbuf = NULL;
bool dfs_radar_disabled = FALSE;
-#define LOCAL_BUF_LEN 1024
+#define LOCAL_BUF_LEN 2048
pbuf = (u8 *)MALLOCZ(cfg->osh, LOCAL_BUF_LEN);
if (pbuf == NULL) {
WL_ERR(("failed to allocate local buf\n"));
@@ -18540,13 +19604,13 @@
array_size = ARRAYSIZE(__wl_2ghz_channels);
n_cnt = &n_2g;
band = IEEE80211_BAND_2GHZ;
- ht40_allowed = (bw_cap == WLC_N_BW_40ALL)? true : false;
+ ht40_allowed = (bw_cap[band] == WLC_N_BW_40ALL)? true : false;
} else if (CHSPEC_IS5G(c) && channel >= CH_MIN_5G_CHANNEL) {
band_chan_arr = __wl_5ghz_a_channels;
array_size = ARRAYSIZE(__wl_5ghz_a_channels);
n_cnt = &n_5g;
band = IEEE80211_BAND_5GHZ;
- ht40_allowed = (bw_cap == WLC_N_BW_20ALL)? false : true;
+ ht40_allowed = (bw_cap[band] == WLC_N_BW_20ALL)? false : true;
} else {
WL_ERR(("Invalid channel Sepc. 0x%x.\n", c));
continue;
@@ -18652,22 +19716,19 @@
s32 err = 0;
s32 index = 0;
s32 nmode = 0;
+ u32 rxchain;
+ u32 nchain;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
- u32 j = 0;
s32 vhtmode = 0;
- s32 txstreams = 0;
- s32 rxstreams = 0;
- s32 ldpc_cap = 0;
- s32 stbc_rx = 0;
- s32 stbc_tx = 0;
- s32 txbf_bfe_cap = 0;
- s32 txbf_bfr_cap = 0;
#endif // endif
- s32 bw_cap = 0;
+#ifdef WL_SAE
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* WL_SAE */
+ u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT };
s32 cur_band = -1;
struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS] = {NULL, };
- memset(bandlist, 0, sizeof(bandlist));
+ bzero(bandlist, sizeof(bandlist));
err = wldev_ioctl_get(dev, WLC_GET_BANDLIST, bandlist,
sizeof(bandlist));
if (unlikely(err)) {
@@ -18691,57 +19752,27 @@
if (unlikely(err)) {
WL_ERR(("error reading vhtmode (%d)\n", err));
}
-
- if (vhtmode) {
- err = wldev_iovar_getint(dev, "txstreams", &txstreams);
- if (unlikely(err)) {
- WL_ERR(("error reading txstreams (%d)\n", err));
- }
-
- err = wldev_iovar_getint(dev, "rxstreams", &rxstreams);
- if (unlikely(err)) {
- WL_ERR(("error reading rxstreams (%d)\n", err));
- }
-
- err = wldev_iovar_getint(dev, "ldpc_cap", &ldpc_cap);
- if (unlikely(err)) {
- WL_ERR(("error reading ldpc_cap (%d)\n", err));
- }
-
- err = wldev_iovar_getint(dev, "stbc_rx", &stbc_rx);
- if (unlikely(err)) {
- WL_ERR(("error reading stbc_rx (%d)\n", err));
- }
-
- err = wldev_iovar_getint(dev, "stbc_tx", &stbc_tx);
- if (unlikely(err)) {
- WL_ERR(("error reading stbc_tx (%d)\n", err));
- }
-
- err = wldev_iovar_getint(dev, "txbf_bfe_cap", &txbf_bfe_cap);
- if (unlikely(err)) {
- WL_ERR(("error reading txbf_bfe_cap (%d)\n", err));
- }
-
- err = wldev_iovar_getint(dev, "txbf_bfr_cap", &txbf_bfr_cap);
- if (unlikely(err)) {
- WL_ERR(("error reading txbf_bfr_cap (%d)\n", err));
- }
- }
#endif // endif
/* For nmode and vhtmode check bw cap */
if (nmode ||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
- vhtmode ||
+ vhtmode ||
#endif // endif
- 0) {
- err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
- if (unlikely(err)) {
- WL_ERR(("error get mimo_bw_cap (%d)\n", err));
- }
+ 0) {
+ wl_get_bwcap(cfg, bw_cap);
}
+ err = wldev_iovar_getint(dev, "rxchain", &rxchain);
+ if (err) {
+ WL_ERR(("rxchain error (%d)\n", err));
+ nchain = 1;
+ } else {
+ for (nchain = 0; rxchain; nchain++)
+ rxchain = rxchain & (rxchain - 1);
+ }
+ WL_DBG(("nchain=%d\n", nchain));
+
err = wl_construct_reginfo(cfg, bw_cap);
if (err) {
WL_ERR(("wl_construct_reginfo() fails err=%d\n", err));
@@ -18755,115 +19786,22 @@
for (i = 1; i <= nband && i < ARRAYSIZE(bandlist); i++) {
index = -1;
if (bandlist[i] == WLC_BAND_5G && __wl_band_5ghz_a.n_channels > 0) {
- bands[IEEE80211_BAND_5GHZ] =
- &__wl_band_5ghz_a;
index = IEEE80211_BAND_5GHZ;
- if (nmode && (bw_cap == WLC_N_BW_40ALL || bw_cap == WLC_N_BW_20IN2G_40IN5G))
- bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
-
+ bands[index] = &__wl_band_5ghz_a;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
- /* VHT capabilities. */
if (vhtmode) {
- /* Supported */
- bands[index]->vht_cap.vht_supported = TRUE;
-
- for (j = 1; j <= VHT_CAP_MCS_MAP_NSS_MAX; j++) {
- /* TX stream rates. */
- if (j <= txstreams) {
- VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9,
- bands[index]->vht_cap.vht_mcs.tx_mcs_map);
- } else {
- VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE,
- bands[index]->vht_cap.vht_mcs.tx_mcs_map);
- }
-
- /* RX stream rates. */
- if (j <= rxstreams) {
- VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9,
- bands[index]->vht_cap.vht_mcs.rx_mcs_map);
- } else {
- VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE,
- bands[index]->vht_cap.vht_mcs.rx_mcs_map);
- }
- }
-
- /* Capabilities */
- /* 80 MHz is mandatory */
- bands[index]->vht_cap.cap |=
- IEEE80211_VHT_CAP_SHORT_GI_80;
-
- if (WL_BW_CAP_160MHZ(bw_cap)) {
- bands[index]->vht_cap.cap |=
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
- bands[index]->vht_cap.cap |=
- IEEE80211_VHT_CAP_SHORT_GI_160;
- }
-
- bands[index]->vht_cap.cap |=
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
-
- if (ldpc_cap)
- bands[index]->vht_cap.cap |=
- IEEE80211_VHT_CAP_RXLDPC;
-
- if (stbc_tx)
- bands[index]->vht_cap.cap |=
- IEEE80211_VHT_CAP_TXSTBC;
-
- if (stbc_rx)
- bands[index]->vht_cap.cap |=
- (stbc_rx << VHT_CAP_INFO_RX_STBC_SHIFT);
-
- if (txbf_bfe_cap)
- bands[index]->vht_cap.cap |=
- IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
-
- if (txbf_bfr_cap) {
- bands[index]->vht_cap.cap |=
- IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
- }
-
- if (txbf_bfe_cap || txbf_bfr_cap) {
- bands[index]->vht_cap.cap |=
- (2 << VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT);
- bands[index]->vht_cap.cap |=
- ((txstreams - 1) <<
- VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT);
- bands[index]->vht_cap.cap |=
- IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB;
- }
-
- /* AMPDU length limit, support max 1MB (2 ^ (13 + 7)) */
- bands[index]->vht_cap.cap |=
- (7 << VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT);
- WL_DBG(("%s band[%d] vht_enab=%d vht_cap=%08x "
- "vht_rx_mcs_map=%04x vht_tx_mcs_map=%04x\n",
- __FUNCTION__, index,
- bands[index]->vht_cap.vht_supported,
- bands[index]->vht_cap.cap,
- bands[index]->vht_cap.vht_mcs.rx_mcs_map,
- bands[index]->vht_cap.vht_mcs.tx_mcs_map));
+ wl_update_vht_cap(cfg, bands[index], bw_cap[index]);
}
#endif // endif
}
else if (bandlist[i] == WLC_BAND_2G && __wl_band_2ghz.n_channels > 0) {
- bands[IEEE80211_BAND_2GHZ] =
- &__wl_band_2ghz;
index = IEEE80211_BAND_2GHZ;
- if (bw_cap == WLC_N_BW_40ALL)
- bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ bands[index] = &__wl_band_2ghz;
}
if ((index >= 0) && nmode) {
- bands[index]->ht_cap.cap |=
- (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40);
- bands[index]->ht_cap.ht_supported = TRUE;
- bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
- /* An HT shall support all EQM rates for one spatial stream */
- bands[index]->ht_cap.mcs.rx_mask[0] = 0xff;
+ wl_update_ht_cap(bands[index], bw_cap[index], nchain);
}
-
}
wiphy->bands[IEEE80211_BAND_2GHZ] = bands[IEEE80211_BAND_2GHZ];
@@ -18878,6 +19816,9 @@
if (notify)
wiphy_apply_custom_regulatory(wiphy, &brcm_regdom);
+#ifdef WL_SAE
+ (void)wl_wiphy_update_sae(wiphy, dhd);
+#endif /* WL_SAE */
return 0;
}
@@ -18899,6 +19840,7 @@
#ifdef WL_HOST_BAND_MGMT
s32 ret = 0;
#endif /* WL_HOST_BAND_MGMT */
+ struct net_info *netinfo = NULL;
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
struct wireless_dev *wdev = ndev->ieee80211_ptr;
#ifdef WBTEXT
@@ -18907,6 +19849,9 @@
#ifdef WLTDLS
u32 tdls;
#endif /* WLTDLS */
+ u16 wl_iftype = 0;
+ u16 wl_mode = 0;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
WL_DBG(("In\n"));
@@ -18917,7 +19862,23 @@
if (unlikely(err))
return err;
- err = wl_config_infra(cfg, ndev, wdev->iftype);
+ /* Always bring up interface in STA mode.
+ * Did observe , if previous SofAP Bringup/cleanup
+ * is not done properly, iftype is stuck with AP mode.
+ * So during next wlan0 up, forcing the type to STA
+ */
+ netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
+ if (!netinfo) {
+ WL_ERR(("there is no netinfo\n"));
+ return -ENODEV;
+ }
+ ndev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
+ netinfo->iftype = WL_IF_TYPE_STA;
+
+ if (cfg80211_to_wl_iftype(wdev->iftype, &wl_iftype, &wl_mode) < 0) {
+ return -EINVAL;
+ }
+ err = wl_config_infra(cfg, ndev, wl_iftype);
if (unlikely(err && err != -EINPROGRESS)) {
WL_ERR(("wl_config_infra failed\n"));
if (err == -1) {
@@ -18946,6 +19907,27 @@
WL_ERR(("wl_init_scan failed\n"));
return err;
}
+ err = wldev_iovar_getbuf(ndev, "wlc_ver", NULL, 0,
+ &cfg->wlc_ver, sizeof(wl_wlc_version_t), NULL);
+ if (likely(!err)) {
+ WL_INFORM(("wl version. Major: %d\n",
+ cfg->wlc_ver.wlc_ver_major));
+ if ((cfg->wlc_ver.wlc_ver_major >= MIN_ESCAN_PARAM_V2_FW_MAJOR) &&
+ (wldev_iovar_getbuf(ndev, "scan_ver", NULL, 0,
+ ioctl_buf, sizeof(ioctl_buf), NULL) == BCME_OK)) {
+ WL_INFORM_MEM(("scan_params v2\n"));
+ /* use scan_params ver2 */
+ cfg->scan_params_v2 = true;
+ }
+ } else {
+ if (err == BCME_UNSUPPORTED) {
+ /* Ignore on unsupported chips */
+ err = BCME_OK;
+ } else {
+ WL_ERR(("wlc_ver query failed. err: %d\n", err));
+ return err;
+ }
+ }
#ifdef DHD_LOSSLESS_ROAMING
if (timer_pending(&cfg->roam_timeout)) {
del_timer_sync(&cfg->roam_timeout);
@@ -18988,10 +19970,24 @@
cfg->tdls_supported = true;
}
#endif /* WLTDLS */
+#ifdef WL_IFACE_MGMT
+#ifdef CUSTOM_IF_MGMT_POLICY
+ cfg->iface_data.policy = CUSTOM_IF_MGMT_POLICY;
+#else
+ cfg->iface_data.policy = WL_IF_POLICY_DEFAULT;
+#endif /* CUSTOM_IF_MGMT_POLICY */
+#endif /* WL_IFACE_MGMT */
#ifdef WL_NAN
- WL_ERR(("Initializing NAN\n"));
- wl_cfgnan_init(cfg);
-#endif /* WL_NAN */
+#ifdef WL_NANP2P
+ if (FW_SUPPORTED(dhd, nanp2p)) {
+ /* Enable NANP2P concurrent support */
+ cfg->conc_disc = WL_NANP2P_CONC_SUPPORT;
+ WL_INFORM_MEM(("nan + p2p conc discovery is supported\n"));
+ cfg->nan_p2p_supported = true;
+ }
+#endif /* WL_NANP2P */
+#endif /* WL_NAN */
+
INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
wl_set_drv_status(cfg, READY, ndev);
return err;
@@ -19000,7 +19996,6 @@
static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg)
{
s32 err = 0;
- unsigned long flags;
struct net_info *iter, *next;
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF) || \
@@ -19026,9 +20021,6 @@
/* Delete pm_enable_work */
wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
-#ifdef WL_IRQSET
- cancel_delayed_work_sync(&cfg->irq_set_work);
-#endif /* WL_IRQSET */
if (cfg->p2p_supported) {
wl_clr_p2p_status(cfg, GO_NEG_PHASE);
@@ -19047,6 +20039,12 @@
#endif /* PROP_TXSTATUS_VSDB */
}
+#ifdef WL_NAN
+ mutex_lock(&cfg->if_sync);
+ wl_cfgnan_disable(cfg, NAN_BUS_IS_DOWN);
+ mutex_unlock(&cfg->if_sync);
+#endif /* WL_NAN */
+
if (!dhd_download_fw_on_driverload) {
/* For built-in drivers/other drivers that do reset on
* "ifconfig <primary_iface> down", cleanup any left
@@ -19057,11 +20055,6 @@
/* Clear used mac addr mask */
cfg->vif_macaddr_mask = 0;
-#ifdef WL_NAN
- wl_cfgnan_stop_handler(ndev, cfg, dhd->busstate, false);
- wl_cfgnan_deinit(cfg, dhd->busstate);
-#endif /* WL_NAN */
-
if (dhd->up)
{
/* If primary BSS is operational (for e.g SoftAP), bring it down */
@@ -19074,43 +20067,57 @@
wl_cfg80211_clear_security(cfg);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev) /* p2p discovery iface is null */
wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
#ifdef P2P_LISTEN_OFFLOADING
wl_cfg80211_p2plo_deinit(cfg);
#endif /* P2P_LISTEN_OFFLOADING */
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
- if (cfg->scan_request) {
- wl_notify_scan_done(cfg, true);
- cfg->scan_request = NULL;
- }
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ /* cancel and notify scan complete, if scan request is pending */
+ wl_cfg80211_cancel_scan(cfg);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
/* p2p discovery iface ndev ptr could be null */
if (iter->ndev == NULL)
continue;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
- if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+ WL_INFORM_MEM(("wl_cfg80211_down. connection state bit status: [%u:%u:%u:%u]"
+ " for %s\n",
+ wl_get_drv_status(cfg, CONNECTING, iter->ndev),
+ wl_get_drv_status(cfg, CONNECTED, iter->ndev),
+ wl_get_drv_status(cfg, DISCONNECTING, iter->ndev),
+ wl_get_drv_status(cfg, NESTED_CONNECT, iter->ndev),
+ iter->ndev->name));
+
+ if ((iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION ||
+ iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) &&
+ wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+
CFG80211_DISCONNECTED(iter->ndev, 0, NULL, 0, false, GFP_KERNEL);
}
+
+ if ((iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) &&
+ wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
+
+ u8 *latest_bssid = wl_read_prof(cfg, ndev, WL_PROF_LATEST_BSSID);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct cfg80211_bss *bss = CFG80211_GET_BSS(wiphy, NULL, latest_bssid,
+ wdev->ssid, wdev->ssid_len);
+
+ BCM_REFERENCE(bss);
+
+ CFG80211_CONNECT_RESULT(ndev,
+ latest_bssid, bss, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ }
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
wl_clr_drv_status(cfg, READY, iter->ndev);
wl_clr_drv_status(cfg, SCANNING, iter->ndev);
@@ -19123,10 +20130,6 @@
wl_clr_drv_status(cfg, NESTED_CONNECT, iter->ndev);
wl_clr_drv_status(cfg, CFG80211_CONNECT, iter->ndev);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype =
NL80211_IFTYPE_STATION;
#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF) || \
@@ -19170,6 +20173,7 @@
#ifdef WL11U
/* Clear interworking element. */
if (cfg->wl11u) {
+ wl_clear_iwdata(cfg);
cfg->wl11u = FALSE;
}
#endif /* WL11U */
@@ -19192,6 +20196,63 @@
return err;
}
+#ifdef WL_STATIC_IF
+/* Gets index at which ndev is stored in static_ndev array - using name
+ * Returns error in case match not found. Caller needs to check
+ * validity of return value
+ */
+int
+get_iface_num(const char *name, struct bcm_cfg80211 *cfg)
+{
+ int i = 0;
+ for (i = 0; i < DHD_NUM_STATIC_IFACES; i++) {
+ if (strcmp(name, cfg->static_ndev[i]->name) == 0) {
+ return i;
+ }
+ }
+ return BCME_ERROR;
+}
+
+/* Checks whether interface is static or not by parsing static_ndev array */
+bool
+is_static_iface(struct bcm_cfg80211 *cfg, struct net_device *net)
+{
+ int i = 0;
+ for (i = 0; i < DHD_NUM_STATIC_IFACES; i++) {
+ if ((cfg && (cfg->static_ndev[i] == net))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Checks whether interface is static or not in the case when ndev is not available,
+* using the net dev name which is passed.
+*/
+bool
+is_static_iface_name(const char *name, struct bcm_cfg80211 *cfg)
+{
+ int inum = 0;
+ inum = get_iface_num(name, cfg);
+ if (inum >= 0) {
+ return true;
+ }
+ return false;
+}
+
+/* Returns the static_ndev_state of the virtual interface */
+int
+static_if_ndev_get_state(struct bcm_cfg80211 *cfg, struct net_device *net)
+{
+ int i = 0;
+ for (i = 0; i < DHD_NUM_STATIC_IFACES; i++) {
+ if ((cfg && (cfg->static_ndev[i] == net))) {
+ return cfg->static_ndev_state[i];
+ }
+ }
+ return NDEV_STATE_NONE;
+}
+#endif /* WL_STATIC_IF */
s32 wl_cfg80211_up(struct net_device *net)
{
struct bcm_cfg80211 *cfg;
@@ -19263,18 +20324,19 @@
WL_ERR(("Set pm_bcnrx returned (%d)\n", interr));
}
#endif /* DISABLE_PM_BCNRX */
+#ifdef WL_CHAN_UTIL
+ interr = wl_cfg80211_start_bssload_report(net);
+ if (unlikely(interr)) {
+ WL_ERR(("%s: Failed to start bssload_report eventing, err=%d\n",
+ __FUNCTION__, interr));
+ }
+#endif /* WL_CHAN_UTIL */
mutex_unlock(&cfg->usr_sync);
#ifdef WLAIBSS_MCHAN
bcm_cfg80211_add_ibss_if(cfg->wdev->wiphy, IBSS_IF_NAME);
#endif /* WLAIBSS_MCHAN */
-
-#ifdef DUAL_STA_STATIC_IF
- /* Static Interface support is currently supported only for STA only builds (without P2P) */
- wl_cfg80211_create_iface(cfg->wdev->wiphy, WL_IF_TYPE_STA, NULL, "wlan%d");
-#endif /* DUAL_STA_STATIC_IF */
-
return err;
}
@@ -19296,14 +20358,14 @@
dhd = (dhd_pub_t *)(cfg->pub);
#if defined(DHD_HANG_SEND_UP_TEST)
if (dhd->req_hang_type) {
- WL_ERR(("%s, Clear HANG test request 0x%x\n",
- __FUNCTION__, dhd->req_hang_type));
+ WL_ERR(("wl_cfg80211_hang, Clear HANG test request 0x%x\n",
+ dhd->req_hang_type));
dhd->req_hang_type = 0;
}
#endif /* DHD_HANG_SEND_UP_TEST */
if ((dhd->hang_reason <= HANG_REASON_MASK) || (dhd->hang_reason >= HANG_REASON_MAX)) {
- WL_ERR(("%s, Invalid hang reason 0x%x\n",
- __FUNCTION__, dhd->hang_reason));
+ WL_ERR(("wl_cfg80211_hang, Invalid hang reason 0x%x\n",
+ dhd->hang_reason));
dhd->hang_reason = HANG_REASON_UNKNOWN;
}
#ifdef DHD_USE_EXTENDED_HANG_REASON
@@ -19351,7 +20413,26 @@
return err;
}
-static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item)
+void
+wl_cfg80211_sta_ifdown(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ WL_DBG(("In\n"));
+
+ if (cfg) {
+ /* cancel scan if anything pending */
+ wl_cfg80211_cancel_scan(cfg);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ if ((dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) &&
+ wl_get_drv_status(cfg, CONNECTED, dev)) {
+ CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
+ }
+}
+
+void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item)
{
unsigned long flags;
void *rptr = NULL;
@@ -19359,7 +20440,7 @@
if (!profile)
return NULL;
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
switch (item) {
case WL_PROF_SEC:
rptr = &profile->sec;
@@ -19376,8 +20457,11 @@
case WL_PROF_CHAN:
rptr = &profile->channel;
break;
+ case WL_PROF_LATEST_BSSID:
+ rptr = profile->latest_bssid;
+ break;
}
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
if (!rptr)
WL_ERR(("invalid item (%d)\n", item));
return rptr;
@@ -19394,11 +20478,11 @@
if (!profile)
return WL_INVALID;
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
switch (item) {
case WL_PROF_SSID:
ssid = (const wlc_ssid_t *) data;
- memset(profile->ssid.SSID, 0,
+ bzero(profile->ssid.SSID,
sizeof(profile->ssid.SSID));
profile->ssid.SSID_len = MIN(ssid->SSID_len, DOT11_MAX_SSID_LEN);
memcpy(profile->ssid.SSID, ssid->SSID, profile->ssid.SSID_len);
@@ -19407,7 +20491,7 @@
if (data)
memcpy(profile->bssid, data, ETHER_ADDR_LEN);
else
- memset(profile->bssid, 0, ETHER_ADDR_LEN);
+ bzero(profile->bssid, ETHER_ADDR_LEN);
break;
case WL_PROF_SEC:
memcpy(&profile->sec, data, sizeof(profile->sec));
@@ -19424,11 +20508,20 @@
case WL_PROF_CHAN:
profile->channel = *(const u32*)data;
break;
+ case WL_PROF_LATEST_BSSID:
+ if (data) {
+ memcpy_s(profile->latest_bssid, sizeof(profile->latest_bssid),
+ data, ETHER_ADDR_LEN);
+ } else {
+ memset_s(profile->latest_bssid, sizeof(profile->latest_bssid),
+ 0, ETHER_ADDR_LEN);
+ }
+ break;
default:
err = -EOPNOTSUPP;
break;
}
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
if (err == -EOPNOTSUPP)
WL_ERR(("unsupported item (%d)\n", item));
@@ -19462,6 +20555,7 @@
struct wl_ie *ie = wl_to_ie(cfg);
ie->offset = 0;
+ bzero(ie->buf, sizeof(ie->buf));
}
static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v)
@@ -19482,23 +20576,16 @@
}
static void wl_update_hidden_ap_ie(wl_bss_info_t *bi, const u8 *ie_stream, u32 *ie_size,
- bool roam)
+ bool update_ssid)
{
u8 *ssidie;
int32 ssid_len = MIN(bi->SSID_len, DOT11_MAX_SSID_LEN);
int32 remaining_ie_buf_len, available_buffer_len, unused_buf_len;
/* cfg80211_find_ie defined in kernel returning const u8 */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
/* ERROR out if
* 1. No ssid IE is FOUND or
@@ -19514,16 +20601,22 @@
remaining_ie_buf_len = available_buffer_len - (int)ssidie[1];
unused_buf_len = WL_EXTRA_BUF_MAX - (4 + bi->length + *ie_size);
if (ssidie[1] > available_buffer_len) {
- WL_ERR_MEM(("%s: skip wl_update_hidden_ap_ie : overflow\n", __FUNCTION__));
+ WL_ERR_MEM(("wl_update_hidden_ap_ie: skip wl_update_hidden_ap_ie : overflow\n"));
return;
}
if (ssidie[1] != ssid_len) {
if (ssidie[1]) {
- WL_INFORM_MEM(("%s: Wrong SSID len: %d != %d\n",
- __FUNCTION__, ssidie[1], bi->SSID_len));
+ WL_ERR_RLMT(("wl_update_hidden_ap_ie: Wrong SSID len: %d != %d\n",
+ ssidie[1], bi->SSID_len));
}
- if ((roam && (ssid_len > ssidie[1])) && (unused_buf_len > ssid_len)) {
+ /*
+ * The bss info in firmware gets updated from beacon and probe resp.
+ * In case of hidden network, the bss_info that got updated by beacon,
+ * will not carry SSID and this can result in cfg80211_get_bss not finding a match.
+ * so include the SSID element.
+ */
+ if ((update_ssid && (ssid_len > ssidie[1])) && (unused_buf_len > ssid_len)) {
WL_INFORM_MEM(("Changing the SSID Info.\n"));
memmove(ssidie + ssid_len + 2,
(ssidie + 2) + ssidie[1],
@@ -19532,8 +20625,8 @@
*ie_size = *ie_size + ssid_len - ssidie[1];
ssidie[1] = ssid_len;
} else if (ssid_len < ssidie[1]) {
- WL_ERR_MEM(("%s: Invalid SSID len: %d < %d\n",
- __FUNCTION__, bi->SSID_len, ssidie[1]));
+ WL_ERR_MEM(("wl_update_hidden_ap_ie: Invalid SSID len: %d < %d\n",
+ bi->SSID_len, ssidie[1]));
}
return;
}
@@ -19597,13 +20690,13 @@
{
unsigned long flags;
- spin_lock_irqsave(&cfg->eq_lock, flags);
+ WL_CFG_EQ_LOCK(&cfg->eq_lock, flags);
return flags;
}
static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags)
{
- spin_unlock_irqrestore(&cfg->eq_lock, flags);
+ WL_CFG_EQ_UNLOCK(&cfg->eq_lock, flags);
}
static void wl_init_eq_lock(struct bcm_cfg80211 *cfg)
@@ -19708,7 +20801,7 @@
}
#ifdef WLTDLS
-static s32
+s32
wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data) {
@@ -19751,7 +20844,6 @@
case WLC_E_TDLS_PEER_DISCONNECTED :
if (cfg->tdls_mgmt_frame) {
MFREE(cfg->osh, cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len);
- cfg->tdls_mgmt_frame = NULL;
cfg->tdls_mgmt_frame_len = 0;
cfg->tdls_mgmt_freq = 0;
}
@@ -19795,10 +20887,10 @@
#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
{
s32 ret = 0;
-#ifdef WLTDLS
+#if defined(TDLS_MSG_ONLY_WFD) && defined(WLTDLS)
struct bcm_cfg80211 *cfg;
tdls_wfd_ie_iovar_t info;
- memset(&info, 0, sizeof(tdls_wfd_ie_iovar_t));
+ bzero(&info, sizeof(info));
cfg = wl_get_cfg(dev);
#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)
@@ -19813,23 +20905,31 @@
* using tdls_wfd_ie iovar
*/
case WLAN_TDLS_SET_PROBE_WFD_IE:
- WL_ERR(("%s WLAN_TDLS_SET_PROBE_WFD_IE\n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_PROBE_WFD_IE\n"));
info.mode = TDLS_WFD_PROBE_IE_TX;
+
+ if (len > sizeof(info.data)) {
+ return -EINVAL;
+ }
memcpy(&info.data, buf, len);
info.length = len;
break;
case WLAN_TDLS_SET_SETUP_WFD_IE:
- WL_ERR(("%s WLAN_TDLS_SET_SETUP_WFD_IE\n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_SETUP_WFD_IE\n"));
info.mode = TDLS_WFD_IE_TX;
+
+ if (len > sizeof(info.data)) {
+ return -EINVAL;
+ }
memcpy(&info.data, buf, len);
info.length = len;
break;
case WLAN_TDLS_SET_WFD_ENABLED:
- WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_ENABLED\n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_MODE_WFD_ENABLED\n"));
dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), true);
goto out;
case WLAN_TDLS_SET_WFD_DISABLED:
- WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_DISABLED\n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_MODE_WFD_DISABLED\n"));
dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), false);
goto out;
default:
@@ -19844,7 +20944,7 @@
}
out:
-#endif /* WLTDLS */
+#endif /* TDLS_MSG_ONLY_WFD && WLTDLS */
return ret;
}
@@ -19865,7 +20965,7 @@
dhd_pub_t *dhdp;
bool tdls_auto_mode = false;
dhdp = (dhd_pub_t *)(cfg->pub);
- memset(&info, 0, sizeof(tdls_iovar_t));
+ bzero(&info, sizeof(tdls_iovar_t));
if (peer) {
memcpy(&info.ea, peer, ETHER_ADDR_LEN);
} else {
@@ -19878,7 +20978,7 @@
*/
if (memcmp(peer, (const uint8 *)BSSID_BROADCAST, ETHER_ADDR_LEN) == 0) {
info.mode = TDLS_MANUAL_EP_WFD_TPQ;
- WL_ERR(("%s TDLS TUNNELED PRBOBE REQUEST\n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_tdls_oper: TDLS TUNNELED PRBOBE REQUEST\n"));
} else {
info.mode = TDLS_MANUAL_EP_DISCOVERY;
}
@@ -20018,11 +21118,8 @@
ret = BCME_OK;
goto done;
}
- ret = wl_notify_escan_complete(cfg, ndev, true, true);
- if (ret < 0) {
- WL_ERR(("set scan abort failed, error = %d\n", ret));
- goto done;
- }
+
+ wl_cfg80211_cancel_scan(cfg);
done:
return ret;
@@ -20038,6 +21135,16 @@
if ((chanspec >= 0x2b01) && (chanspec <= 0x2b0e)) {
valid = true;
}
+#ifdef IGUANA_LEGACY_CHIPS
+ /* channel 36 to 48 */
+ else if ((chanspec >= 0xd024) && (chanspec <= 0xd030)) {
+ valid = true;
+ }
+ /* channel 149 to 161 */
+ else if ((chanspec >= 0xd095) && (chanspec <= 0xd0a5)) {
+ valid = true;
+ }
+#else
/* channel 36 to 48 */
else if ((chanspec >= 0x1b24) && (chanspec <= 0x1b30)) {
valid = true;
@@ -20046,6 +21153,7 @@
else if ((chanspec >= 0x1b95) && (chanspec <= 0x1ba1)) {
valid = true;
}
+#endif /* IGUANA_LEGACY_CHIPS */
else {
valid = false;
WL_INFORM_MEM(("invalid P2P chanspec, chanspec = %s\n",
@@ -20191,7 +21299,7 @@
struct bcm_cfg80211 *cfg = NULL;
struct net_device *ndev = NULL;
- memset(cmd, 0, total_len);
+ bzero(cmd, total_len);
cfg = wl_get_cfg(dev);
buf = (u8 *)MALLOC(cfg->osh, CHANSPEC_BUF_SIZE);
@@ -20346,24 +21454,24 @@
wl_debuglevel_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
- char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)], sublog[S_SUBLOGLEVEL];
+ char tbuf[SUBLOGLEVELZ * ARRAYSIZE(sublogname_map)], sublog[SUBLOGLEVELZ];
char *params, *token, *colon;
uint i, tokens, log_on = 0;
size_t minsize = min_t(size_t, (sizeof(tbuf) - 1), count);
- memset(tbuf, 0, sizeof(tbuf));
- memset(sublog, 0, sizeof(sublog));
+ bzero(tbuf, sizeof(tbuf));
+ bzero(sublog, sizeof(sublog));
if (copy_from_user(&tbuf, userbuf, minsize)) {
return -EFAULT;
}
- tbuf[minsize + 1] = '\0';
+ tbuf[minsize] = '\0';
params = &tbuf[0];
colon = strchr(params, '\n');
if (colon != NULL)
*colon = '\0';
while ((token = strsep(¶ms, " ")) != NULL) {
- memset(sublog, 0, sizeof(sublog));
+ bzero(sublog, sizeof(sublog));
if (token == NULL || !*token)
break;
if (*token == '\0')
@@ -20372,7 +21480,7 @@
if (colon != NULL) {
*colon = ' ';
}
- tokens = sscanf(token, "%s %u", sublog, &log_on);
+ tokens = sscanf(token, "%"S(SUBLOGLEVEL)"s %u", sublog, &log_on);
if (colon != NULL)
*colon = ':';
@@ -20402,9 +21510,9 @@
size_t count, loff_t *ppos)
{
char *param;
- char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)];
+ char tbuf[SUBLOGLEVELZ * ARRAYSIZE(sublogname_map)];
uint i;
- memset(tbuf, 0, sizeof(tbuf));
+ bzero(tbuf, sizeof(tbuf));
param = &tbuf[0];
for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ",
@@ -20493,16 +21601,14 @@
0, NULL) == BCME_OK) {
memcpy(mac->octet, ioctl_buf, ETHER_ADDR_LEN);
} else {
- memset(mac->octet, 0, ETHER_ADDR_LEN);
+ bzero(mac->octet, ETHER_ADDR_LEN);
}
}
-static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role)
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, s32 mode, u32 dev_role)
{
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- if (((dev_role == NL80211_IFTYPE_AP) &&
- !(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) ||
- ((dev_role == NL80211_IFTYPE_P2P_GO) &&
- !(dhd->op_mode & DHD_FLAG_P2P_GO_MODE)))
+ if (((dev_role == NL80211_IFTYPE_AP) || (dev_role == NL80211_IFTYPE_P2P_GO)) &&
+ (mode != WL_MODE_AP))
{
WL_ERR(("device role select failed role:%d op_mode:%d \n", dev_role, dhd->op_mode));
return false;
@@ -20546,120 +21652,6 @@
}
#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */
-#ifdef WL11U
-static bcm_tlv_t *
-wl_cfg80211_find_interworking_ie(const u8 *parse, u32 len)
-{
- bcm_tlv_t *ie;
-
-/* unfortunately it's too much work to dispose the const cast - bcm_parse_tlvs
- * is used everywhere and changing its prototype to take const qualifier needs
- * a massive change to all its callers...
- */
-
- if ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_INTERWORKING_ID))) {
- return ie;
- }
- return NULL;
-}
-
-static s32
-wl_cfg80211_clear_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx)
-{
- ie_setbuf_t ie_setbuf;
-
- WL_DBG(("clear interworking IE\n"));
-
- memset(&ie_setbuf, 0, sizeof(ie_setbuf_t));
-
- ie_setbuf.ie_buffer.iecount = htod32(1);
- ie_setbuf.ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
- ie_setbuf.ie_buffer.ie_list[0].ie_data.len = 0;
-
- return wldev_iovar_setbuf_bsscfg(ndev, "ie", &ie_setbuf, sizeof(ie_setbuf),
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
-}
-
-static s32
-wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
- uint8 ie_id, uint8 *data, uint8 data_len)
-{
- s32 err = BCME_OK;
- s32 buf_len;
- ie_setbuf_t *ie_setbuf;
- ie_getbuf_t ie_getbufp;
- char getbuf[WLC_IOCTL_SMLEN];
-
- if (ie_id != DOT11_MNG_INTERWORKING_ID) {
- WL_ERR(("unsupported (id=%d)\n", ie_id));
- return BCME_UNSUPPORTED;
- }
-
- /* access network options (1 octet) is the mandatory field */
- if (!data || data_len == 0 || data_len > IW_IES_MAX_BUF_LEN) {
- WL_ERR(("wrong interworking IE (len=%d)\n", data_len));
- return BCME_BADARG;
- }
-
- /* Validate the pktflag parameter */
- if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
- VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
- VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG|
- VNDR_IE_CUSTOM_FLAG))) {
- WL_ERR(("invalid packet flag 0x%x\n", pktflag));
- return BCME_BADARG;
- }
-
- buf_len = sizeof(ie_setbuf_t) + data_len - 1;
-
- ie_getbufp.id = DOT11_MNG_INTERWORKING_ID;
- if (wldev_iovar_getbuf_bsscfg(ndev, "ie", (void *)&ie_getbufp,
- sizeof(ie_getbufp), getbuf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)
- == BCME_OK) {
- if (!memcmp(&getbuf[TLV_HDR_LEN], data, data_len)) {
- WL_DBG(("skip to set interworking IE\n"));
- return BCME_OK;
- }
- }
-
- /* if already set with previous values, delete it first */
- if (cfg->wl11u) {
- if ((err = wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx)) != BCME_OK) {
- return err;
- }
- }
-
- ie_setbuf = (ie_setbuf_t *)MALLOCZ(cfg->osh, buf_len);
- if (!ie_setbuf) {
- WL_ERR(("Error allocating buffer for IE\n"));
- return -ENOMEM;
- }
- strncpy(ie_setbuf->cmd, "add", sizeof(ie_setbuf->cmd));
- ie_setbuf->cmd[sizeof(ie_setbuf->cmd) - 1] = '\0';
-
- /* Buffer contains only 1 IE */
- ie_setbuf->ie_buffer.iecount = htod32(1);
- /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */
- ie_setbuf->ie_buffer.ie_list[0].pktflag = htod32(pktflag);
-
- /* Now, add the IE to the buffer */
- ie_setbuf->ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
- ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len;
- memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len);
-
- if ((err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync))
- == BCME_OK) {
- WL_DBG(("set interworking IE\n"));
- cfg->wl11u = TRUE;
- err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
- }
-
- MFREE(cfg->osh, ie_setbuf, buf_len);
- return err;
-}
-#endif /* WL11U */
-
#ifdef WL_HOST_BAND_MGMT
s32
wl_cfg80211_set_band(struct net_device *ndev, int band)
@@ -20698,6 +21690,10 @@
return -EINVAL;
}
if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev), 0);
ret = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
if (ret < 0) {
WL_ERR(("WLC_DISASSOC error %d\n", ret));
@@ -20808,603 +21804,6 @@
return err;
}
-#ifdef WBTEXT
-s32
-wl_cfg80211_wbtext_set_default(struct net_device *ndev)
-{
- char commandp[WLC_IOCTL_SMLEN];
- s32 ret = BCME_OK;
- char *data;
-
- WL_DBG(("set wbtext to default\n"));
-
- /* set roam profile */
- memset(commandp, 0, sizeof(commandp));
- snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
- CMD_WBTEXT_PROFILE_CONFIG, DEFAULT_WBTEXT_PROFILE_A);
- data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
- ret = wl_cfg80211_wbtext_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
- if (ret != BCME_OK) {
- WL_ERR(("%s: Failed to set roam_prof %s error = %d\n",
- __FUNCTION__, data, ret));
- return ret;
- }
-
- memset(commandp, 0, sizeof(commandp));
- snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
- CMD_WBTEXT_PROFILE_CONFIG, DEFAULT_WBTEXT_PROFILE_B);
- data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
- ret = wl_cfg80211_wbtext_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
- if (ret != BCME_OK) {
- WL_ERR(("%s: Failed to set roam_prof %s error = %d\n",
- __FUNCTION__, data, ret));
- return ret;
- }
-
- /* set RSSI weight */
- memset(commandp, 0, sizeof(commandp));
- snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
- CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_RSSI_A);
- data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
- ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
- if (ret != BCME_OK) {
- WL_ERR(("%s: Failed to set weight config %s error = %d\n",
- __FUNCTION__, data, ret));
- return ret;
- }
-
- memset(commandp, 0, sizeof(commandp));
- snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
- CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_RSSI_B);
- data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
- ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
- if (ret != BCME_OK) {
- WL_ERR(("%s: Failed to set weight config %s error = %d\n",
- __FUNCTION__, data, ret));
- return ret;
- }
-
- /* set CU weight */
- memset(commandp, 0, sizeof(commandp));
- snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
- CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_CU_A);
- data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
- ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
- if (ret != BCME_OK) {
- WL_ERR(("%s: Failed to set weight config %s error = %d\n",
- __FUNCTION__, data, ret));
- return ret;
- }
-
- memset(commandp, 0, sizeof(commandp));
- snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
- CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_CU_B);
- data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
- ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
- if (ret != BCME_OK) {
- WL_ERR(("%s: Failed to set weight config %s error = %d\n",
- __FUNCTION__, data, ret));
- return ret;
- }
-
- /* set RSSI table */
- memset(commandp, 0, sizeof(commandp));
- snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
- CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_RSSI_A);
- data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
- ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
- if (ret != BCME_OK) {
- WL_ERR(("%s: Failed to set RSSI table %s error = %d\n",
- __FUNCTION__, data, ret));
- return ret;
- }
-
- memset(commandp, 0, sizeof(commandp));
- snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
- CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_RSSI_B);
- data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
- ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
- if (ret != BCME_OK) {
- WL_ERR(("%s: Failed to set RSSI table %s error = %d\n",
- __FUNCTION__, data, ret));
- return ret;
- }
-
- /* set CU table */
- memset(commandp, 0, sizeof(commandp));
- snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
- CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_CU_A);
- data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
- ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
- if (ret != BCME_OK) {
- WL_ERR(("%s: Failed to set CU table %s error = %d\n",
- __FUNCTION__, data, ret));
- return ret;
- }
-
- memset(commandp, 0, sizeof(commandp));
- snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
- CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_CU_B);
- data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
- ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
- if (ret != BCME_OK) {
- WL_ERR(("%s: Failed to set CU table %s error = %d\n",
- __FUNCTION__, data, ret));
- return ret;
- }
-
- return ret;
-}
-
-s32
-wl_cfg80211_wbtext_config(struct net_device *ndev, char *data, char *command, int total_len)
-{
- uint i = 0;
- long int rssi_lower, roam_trigger;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- wl_roam_prof_band_v2_t *rp;
- int err = -EINVAL, bytes_written = 0;
- size_t len = strlen(data);
- int rp_len = 0;
- u8 ioctl_buf[WLC_IOCTL_MEDLEN];
-
- data[len] = '\0';
- rp = (wl_roam_prof_band_v2_t *)MALLOCZ(cfg->osh, sizeof(*rp)
- * WL_MAX_ROAM_PROF_BRACKETS);
- if (unlikely(!rp)) {
- WL_ERR(("%s: failed to allocate memory\n", __func__));
- err = -ENOMEM;
- goto exit;
- }
- rp->ver = WL_MAX_ROAM_PROF_VER;
- if (*data && (!strncmp(data, "b", 1))) {
- rp->band = WLC_BAND_2G;
- } else if (*data && (!strncmp(data, "a", 1))) {
- rp->band = WLC_BAND_5G;
- } else {
- err = snprintf(command, total_len, "Missing band\n");
- goto exit;
- }
- data++;
- rp->len = 0;
- /* Getting roam profile from fw */
- if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp),
- ioctl_buf, sizeof(ioctl_buf), NULL))) {
- WL_ERR(("Getting roam_profile failed with err=%d \n", err));
- goto exit;
- }
- memcpy(rp, ioctl_buf, sizeof(*rp) * WL_MAX_ROAM_PROF_BRACKETS);
- /* roam_prof version get */
- if (rp->ver != WL_MAX_ROAM_PROF_VER) {
- WL_ERR(("bad version (=%d) in return data\n", rp->ver));
- err = -EINVAL;
- goto exit;
- }
- if ((rp->len % sizeof(wl_roam_prof_v2_t)) != 0) {
- WL_ERR(("bad length (=%d) in return data\n", rp->len));
- err = -EINVAL;
- goto exit;
- }
-
- if (!*data) {
- for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
- /* printing contents of roam profile data from fw and exits
- * if code hits any of one of the below condtion. If remaining
- * length of buffer is less than roam profile size or
- * if there is no valid entry.
- */
- if (((i * sizeof(wl_roam_prof_v2_t)) > rp->len) ||
- (rp->roam_prof[i].fullscan_period == 0)) {
- break;
- }
- bytes_written += snprintf(command+bytes_written,
- total_len - bytes_written,
- "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)\n",
- rp->roam_prof[i].roam_trigger, rp->roam_prof[i].rssi_lower,
- rp->roam_prof[i].channel_usage,
- rp->roam_prof[i].cu_avg_calc_dur);
- }
- err = bytes_written;
- goto exit;
- } else {
- for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
- /* reading contents of roam profile data from fw and exits
- * if code hits any of one of the below condtion, If remaining
- * length of buffer is less than roam profile size or if there
- * is no valid entry.
- */
- if (((i * sizeof(wl_roam_prof_v2_t)) > rp->len) ||
- (rp->roam_prof[i].fullscan_period == 0)) {
- break;
- }
- }
- /* Do not set roam_prof from upper layer if fw doesn't have 2 rows */
- if (i != 2) {
- WL_ERR(("FW must have 2 rows to fill roam_prof\n"));
- err = -EINVAL;
- goto exit;
- }
- /* setting roam profile to fw */
- data++;
- for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
- roam_trigger = simple_strtol(data, &data, 10);
- if (roam_trigger >= 0) {
- WL_ERR(("roam trigger[%d] value must be negative\n", i));
- err = -EINVAL;
- goto exit;
- }
- rp->roam_prof[i].roam_trigger = roam_trigger;
- data++;
- rssi_lower = simple_strtol(data, &data, 10);
- if (rssi_lower >= 0) {
- WL_ERR(("rssi lower[%d] value must be negative\n", i));
- err = -EINVAL;
- goto exit;
- }
- rp->roam_prof[i].rssi_lower = rssi_lower;
- data++;
- rp->roam_prof[i].channel_usage = simple_strtol(data, &data, 10);
- data++;
- rp->roam_prof[i].cu_avg_calc_dur = simple_strtol(data, &data, 10);
-
- rp_len += sizeof(wl_roam_prof_v2_t);
-
- if (*data == '\0') {
- break;
- }
- data++;
- }
- if (i != 1) {
- WL_ERR(("Only two roam_prof rows supported.\n"));
- err = -EINVAL;
- goto exit;
- }
- rp->len = rp_len;
- if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp,
- sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN,
- &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("seting roam_profile failed with err %d\n", err));
- }
- }
-exit:
- if (rp) {
- MFREE(cfg->osh, rp, sizeof(*rp) * WL_MAX_ROAM_PROF_BRACKETS);
- }
- return err;
-}
-
-#define BUFSZ 5
-#define BUFSZN BUFSZ + 1
-
-#define _S(x) #x
-#define S(x) _S(x)
-
-int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data,
- char *command, int total_len)
-{
- int bytes_written = 0, err = -EINVAL, argc = 0;
- char rssi[BUFSZN], band[BUFSZN], weight[BUFSZN];
- char *endptr = NULL;
- wnm_bss_select_weight_cfg_t *bwcfg;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
-
- bwcfg = (wnm_bss_select_weight_cfg_t *)MALLOCZ(cfg->osh, sizeof(*bwcfg));
- if (unlikely(!bwcfg)) {
- WL_ERR(("%s: failed to allocate memory\n", __func__));
- err = -ENOMEM;
- goto exit;
- }
- bwcfg->version = WNM_BSSLOAD_MONITOR_VERSION;
- bwcfg->type = 0;
- bwcfg->weight = 0;
-
- argc = sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s %"S(BUFSZ)"s", rssi, band, weight);
-
- if (!strcasecmp(rssi, "rssi"))
- bwcfg->type = WNM_BSS_SELECT_TYPE_RSSI;
- else if (!strcasecmp(rssi, "cu"))
- bwcfg->type = WNM_BSS_SELECT_TYPE_CU;
- else {
- /* Usage DRIVER WBTEXT_WEIGHT_CONFIG <rssi/cu> <band> <weight> */
- WL_ERR(("%s: Command usage error\n", __func__));
- goto exit;
- }
-
- if (!strcasecmp(band, "a"))
- bwcfg->band = WLC_BAND_5G;
- else if (!strcasecmp(band, "b"))
- bwcfg->band = WLC_BAND_2G;
- else if (!strcasecmp(band, "all"))
- bwcfg->band = WLC_BAND_ALL;
- else {
- WL_ERR(("%s: Command usage error\n", __func__));
- goto exit;
- }
-
- if (argc == 2) {
- /* If there is no data after band, getting wnm_bss_select_weight from fw */
- if (bwcfg->band == WLC_BAND_ALL) {
- WL_ERR(("band option \"all\" is for set only, not get\n"));
- goto exit;
- }
- if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_weight", bwcfg,
- sizeof(*bwcfg),
- ioctl_buf, sizeof(ioctl_buf), NULL))) {
- WL_ERR(("Getting wnm_bss_select_weight failed with err=%d \n", err));
- goto exit;
- }
- memcpy(bwcfg, ioctl_buf, sizeof(*bwcfg));
- bytes_written = snprintf(command, total_len, "%s %s weight = %d\n",
- (bwcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU",
- (bwcfg->band == WLC_BAND_2G) ? "2G" : "5G", bwcfg->weight);
- err = bytes_written;
- goto exit;
- } else {
- /* if weight is non integer returns command usage error */
- bwcfg->weight = simple_strtol(weight, &endptr, 0);
- if (*endptr != '\0') {
- WL_ERR(("%s: Command usage error", __func__));
- goto exit;
- }
- /* setting weight for iovar wnm_bss_select_weight to fw */
- if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_weight", bwcfg,
- sizeof(*bwcfg),
- ioctl_buf, sizeof(ioctl_buf), NULL))) {
- WL_ERR(("Getting wnm_bss_select_weight failed with err=%d\n", err));
- }
- }
-exit:
- if (bwcfg) {
- MFREE(cfg->osh, bwcfg, sizeof(*bwcfg));
- }
- return err;
-}
-
-/* WBTEXT_TUPLE_MIN_LEN_CHECK :strlen(low)+" "+strlen(high)+" "+strlen(factor) */
-#define WBTEXT_TUPLE_MIN_LEN_CHECK 5
-
-int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data,
- char *command, int total_len)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- int bytes_written = 0, err = -EINVAL;
- char rssi[BUFSZN], band[BUFSZN];
- int btcfg_len = 0, i = 0, parsed_len = 0;
- wnm_bss_select_factor_cfg_t *btcfg;
- size_t slen = strlen(data);
- char *start_addr = NULL;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
-
- data[slen] = '\0';
- btcfg = (wnm_bss_select_factor_cfg_t *)MALLOCZ(cfg->osh,
- (sizeof(*btcfg) + sizeof(*btcfg) * WL_FACTOR_TABLE_MAX_LIMIT));
- if (unlikely(!btcfg)) {
- WL_ERR(("%s: failed to allocate memory\n", __func__));
- err = -ENOMEM;
- goto exit;
- }
-
- btcfg->version = WNM_BSS_SELECT_FACTOR_VERSION;
- btcfg->band = WLC_BAND_AUTO;
- btcfg->type = 0;
- btcfg->count = 0;
-
- sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s", rssi, band);
-
- if (!strcasecmp(rssi, "rssi")) {
- btcfg->type = WNM_BSS_SELECT_TYPE_RSSI;
- }
- else if (!strcasecmp(rssi, "cu")) {
- btcfg->type = WNM_BSS_SELECT_TYPE_CU;
- }
- else {
- WL_ERR(("%s: Command usage error\n", __func__));
- goto exit;
- }
-
- if (!strcasecmp(band, "a")) {
- btcfg->band = WLC_BAND_5G;
- }
- else if (!strcasecmp(band, "b")) {
- btcfg->band = WLC_BAND_2G;
- }
- else if (!strcasecmp(band, "all")) {
- btcfg->band = WLC_BAND_ALL;
- }
- else {
- WL_ERR(("%s: Command usage, Wrong band\n", __func__));
- goto exit;
- }
-
- if ((slen - 1) == (strlen(rssi) + strlen(band))) {
- /* Getting factor table using iovar 'wnm_bss_select_table' from fw */
- if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_table", btcfg,
- sizeof(*btcfg),
- ioctl_buf, sizeof(ioctl_buf), NULL))) {
- WL_ERR(("Getting wnm_bss_select_table failed with err=%d \n", err));
- goto exit;
- }
- memcpy(btcfg, ioctl_buf, sizeof(*btcfg));
- memcpy(btcfg, ioctl_buf, (btcfg->count+1) * sizeof(*btcfg));
-
- bytes_written += snprintf(command + bytes_written, total_len - bytes_written,
- "No of entries in table: %d\n", btcfg->count);
- bytes_written += snprintf(command + bytes_written, total_len - bytes_written,
- "%s factor table\n",
- (btcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU");
- bytes_written += snprintf(command + bytes_written, total_len - bytes_written,
- "low\thigh\tfactor\n");
- for (i = 0; i <= btcfg->count-1; i++) {
- bytes_written += snprintf(command + bytes_written,
- total_len - bytes_written, "%d\t%d\t%d\n", btcfg->params[i].low,
- btcfg->params[i].high, btcfg->params[i].factor);
- }
- err = bytes_written;
- goto exit;
- } else {
- memset(btcfg->params, 0, sizeof(wnm_bss_select_factor_params_t)
- * WL_FACTOR_TABLE_MAX_LIMIT);
- data += (strlen(rssi) + strlen(band) + 2);
- start_addr = data;
- slen = slen - (strlen(rssi) + strlen(band) + 2);
- for (i = 0; i < WL_FACTOR_TABLE_MAX_LIMIT; i++) {
- if (parsed_len + WBTEXT_TUPLE_MIN_LEN_CHECK <= slen) {
- btcfg->params[i].low = simple_strtol(data, &data, 10);
- data++;
- btcfg->params[i].high = simple_strtol(data, &data, 10);
- data++;
- btcfg->params[i].factor = simple_strtol(data, &data, 10);
- btcfg->count++;
- if (*data == '\0') {
- break;
- }
- data++;
- parsed_len = data - start_addr;
- } else {
- WL_ERR(("%s:Command usage:less no of args\n", __func__));
- goto exit;
- }
- }
- btcfg_len = sizeof(*btcfg) + ((btcfg->count) * sizeof(*btcfg));
- if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_table", btcfg, btcfg_len,
- cfg->ioctl_buf, WLC_IOCTL_MEDLEN, &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("seting wnm_bss_select_table failed with err %d\n", err));
- goto exit;
- }
- }
-exit:
- if (btcfg) {
- MFREE(cfg->osh, btcfg,
- (sizeof(*btcfg) + sizeof(*btcfg) * WL_FACTOR_TABLE_MAX_LIMIT));
- }
- return err;
-}
-
-s32
-wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data, char *command, int total_len)
-{
- uint i = 0;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- int err = -EINVAL, bytes_written = 0, argc = 0, val, len = 0;
- char delta[BUFSZN], band[BUFSZN], *endptr = NULL;
- wl_roam_prof_band_v2_t *rp;
- u8 ioctl_buf[WLC_IOCTL_MEDLEN];
-
- rp = (wl_roam_prof_band_v2_t *)MALLOCZ(cfg->osh, sizeof(*rp)
- * WL_MAX_ROAM_PROF_BRACKETS);
- if (unlikely(!rp)) {
- WL_ERR(("%s: failed to allocate memory\n", __func__));
- err = -ENOMEM;
- goto exit;
- }
-
- argc = sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s", band, delta);
- if (!strcasecmp(band, "a"))
- rp->band = WLC_BAND_5G;
- else if (!strcasecmp(band, "b"))
- rp->band = WLC_BAND_2G;
- else {
- WL_ERR(("%s: Missing band\n", __func__));
- goto exit;
- }
- /* Getting roam profile from fw */
- if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp),
- ioctl_buf, sizeof(ioctl_buf), NULL))) {
- WL_ERR(("Getting roam_profile failed with err=%d \n", err));
- goto exit;
- }
- memcpy(rp, ioctl_buf, sizeof(wl_roam_prof_band_v2_t));
- if (rp->ver != WL_MAX_ROAM_PROF_VER) {
- WL_ERR(("bad version (=%d) in return data\n", rp->ver));
- err = -EINVAL;
- goto exit;
- }
- if ((rp->len % sizeof(wl_roam_prof_v2_t)) != 0) {
- WL_ERR(("bad length (=%d) in return data\n", rp->len));
- err = -EINVAL;
- goto exit;
- }
-
- if (argc == 2) {
- /* if delta is non integer returns command usage error */
- val = simple_strtol(delta, &endptr, 0);
- if (*endptr != '\0') {
- WL_ERR(("%s: Command usage error", __func__));
- goto exit;
- }
- for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
- /*
- * Checking contents of roam profile data from fw and exits
- * if code hits below condtion. If remaining length of buffer is
- * less than roam profile size or if there is no valid entry.
- */
- if (((i * sizeof(wl_roam_prof_v2_t)) > rp->len) ||
- (rp->roam_prof[i].fullscan_period == 0)) {
- break;
- }
- if (rp->roam_prof[i].channel_usage != 0) {
- rp->roam_prof[i].roam_delta = val;
- }
- len += sizeof(wl_roam_prof_v2_t);
- }
- }
- else {
- if (rp->roam_prof[i].channel_usage != 0) {
- bytes_written = snprintf(command, total_len,
- "%s Delta %d\n", (rp->band == WLC_BAND_2G) ? "2G" : "5G",
- rp->roam_prof[0].roam_delta);
- }
- err = bytes_written;
- goto exit;
- }
- rp->len = len;
- if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp,
- sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN,
- &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("seting roam_profile failed with err %d\n", err));
- }
-exit :
- if (rp) {
- MFREE(cfg->osh, rp, sizeof(*rp)
- * WL_MAX_ROAM_PROF_BRACKETS);
- }
- return err;
-}
-#endif /* WBTEXT */
-
-int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev)
-{
- struct net_device *ndev = NULL;
- unsigned long flags;
- int clear_flag = 0;
- int ret = 0;
-
- WL_TRACE(("Enter\n"));
-
- if (!cfg || !cfgdev)
- return -EINVAL;
-
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
-#ifdef WL_CFG80211_P2P_DEV_IF
- if (cfg->scan_request && cfg->scan_request->wdev == cfgdev) {
-#else
- if (cfg->scan_request && cfg->scan_request->dev == cfgdev) {
-#endif // endif
- wl_notify_scan_done(cfg, true);
- cfg->scan_request = NULL;
- clear_flag = 1;
- }
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
-
- if (clear_flag)
- wl_clr_drv_status(cfg, SCANNING, ndev);
-
- return ret;
-}
-
bool wl_cfg80211_is_concurrent_mode(struct net_device *dev)
{
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
@@ -21441,6 +21840,9 @@
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
if (!cfg) {
+ /* When interface is created using wl
+ * ndev->ieee80211_ptr will be NULL.
+ */
return NULL;
}
curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
@@ -21459,12 +21861,9 @@
s32 pm = PM_FAST;
BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, pm_enable_work.work);
WL_DBG(("Enter \n"));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
/* p2p discovery iface ndev could be null */
if (iter->ndev) {
if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) ||
@@ -21485,15 +21884,24 @@
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
#if defined(OEM_ANDROID)
DHD_PM_WAKE_UNLOCK(cfg->pub);
#endif /* BCMDONGLEHOST && OEM_ANDROID */
}
+#ifdef ENABLE_HOGSQS
+static void wl_cfg80211_hogsqs_event_handler(struct work_struct *work)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ struct net_device *ndev = NULL;
+ BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211,
+ hogsqs_eventwork.work);
+
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ wl_add_remove_eventextmsg(ndev, WLC_E_LDF_HOGGER, true);
+}
+#endif // endif
+
u8
wl_get_action_category(void *frame, u32 frame_len)
{
@@ -21531,15 +21939,15 @@
int bytes_written = -1;
if (total_len < FBT_KEYLEN) {
- WL_ERR(("%s: Insufficient buffer \n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_get_fbt_key: Insufficient buffer \n"));
goto end;
}
if (cfg) {
memcpy(key, cfg->fbt_key, FBT_KEYLEN);
bytes_written = FBT_KEYLEN;
} else {
- memset(key, 0, FBT_KEYLEN);
- WL_ERR(("%s: Failed to copy KCK and KEK \n", __FUNCTION__));
+ bzero(key, FBT_KEYLEN);
+ WL_ERR(("wl_cfg80211_get_fbt_key: Failed to copy KCK and KEK \n"));
}
prhex("KCK, KEK", (uchar *)key, FBT_KEYLEN);
end:
@@ -21572,10 +21980,10 @@
const bcm_tlv_t *ie;
struct parsed_vndr_ie_info *parsed_info;
u32 count = 0;
- s32 remained_len;
+ u32 remained_len;
- remained_len = (s32)len;
- memset(vndr_ies, 0, sizeof(*vndr_ies));
+ remained_len = len;
+ bzero(vndr_ies, sizeof(*vndr_ies));
WL_DBG(("---> len %d\n", len));
ie = (const bcm_tlv_t *) parse;
@@ -21584,20 +21992,34 @@
while (ie) {
if (count >= MAX_VNDR_IE_NUMBER)
break;
- if (ie->id == DOT11_MNG_VS_ID) {
+ if (ie->id == DOT11_MNG_VS_ID || (ie->id == DOT11_MNG_ID_EXT_ID)) {
vndrie = (const vndr_ie_t *) ie;
- /* len should be bigger than OUI length + one data length at least */
- if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
- WL_ERR(("%s: invalid vndr ie. length is too small %d\n",
- __FUNCTION__, vndrie->len));
- goto end;
- }
- /* if wpa or wme ie, do not add ie */
- if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
- ((vndrie->data[0] == WPA_OUI_TYPE) ||
- (vndrie->data[0] == WME_OUI_TYPE))) {
- CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n"));
- goto end;
+ if (ie->id == DOT11_MNG_ID_EXT_ID) {
+ /* len should be bigger than sizeof ID extn field at least */
+ if (vndrie->len < MIN_VENDOR_EXTN_IE_LEN) {
+ WL_ERR(("%s: invalid vndr extn ie."
+ " length %d\n",
+ __FUNCTION__, vndrie->len));
+ goto end;
+ }
+ } else {
+ /* len should be bigger than OUI length +
+ * one data length at least
+ */
+ if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
+ WL_ERR(("wl_cfg80211_parse_vndr_ies:"
+ " invalid vndr ie. length is too small %d\n",
+ vndrie->len));
+ goto end;
+ }
+
+ /* if wpa or wme ie, do not add ie */
+ if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
+ ((vndrie->data[0] == WPA_OUI_TYPE) ||
+ (vndrie->data[0] == WME_OUI_TYPE))) {
+ CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n"));
+ goto end;
+ }
}
parsed_info = &vndr_ies->ie_info[count++];
@@ -21607,10 +22029,14 @@
parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN);
memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t));
vndr_ies->count = count;
-
- WL_DBG(("\t ** OUI "MACOUIDBG", type 0x%02x len:%d\n",
- MACOUI2STRDBG(parsed_info->vndrie.oui),
- parsed_info->vndrie.data[0], parsed_info->ie_len));
+ if (ie->id == DOT11_MNG_ID_EXT_ID) {
+ WL_DBG(("\t ** Vendor Extension ie id: 0x%02x, len:%d\n",
+ ie->id, parsed_info->ie_len));
+ } else {
+ WL_DBG(("\t ** OUI "MACOUIDBG", type 0x%02x len:%d\n",
+ MACOUI2STRDBG(parsed_info->vndrie.oui),
+ parsed_info->vndrie.data[0], parsed_info->ie_len));
+ }
}
end:
ie = bcm_next_tlv(ie, &remained_len);
@@ -21642,21 +22068,16 @@
wl_vndr_oui_entry_t *oui_entry = NULL;
unsigned long flags;
- spin_lock_irqsave(&cfg->vndr_oui_sync, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (!memcmp(oui_entry->oui, vndr_info->vndrie.oui, DOT11_OUI_LEN)) {
- spin_unlock_irqrestore(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
return TRUE;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- spin_unlock_irqrestore(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
return FALSE;
}
@@ -21667,7 +22088,7 @@
wl_vndr_oui_entry_t *oui_entry = NULL;
unsigned long flags;
- oui_entry = (wl_vndr_oui_entry_t *)MALLOC(cfg->osh, sizeof(*oui_entry));
+ oui_entry = kmalloc(sizeof(*oui_entry), GFP_KERNEL);
if (oui_entry == NULL) {
WL_ERR(("alloc failed\n"));
return FALSE;
@@ -21676,9 +22097,9 @@
memcpy(oui_entry->oui, vndr_info->vndrie.oui, DOT11_OUI_LEN);
INIT_LIST_HEAD(&oui_entry->list);
- spin_lock_irqsave(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
list_add_tail(&oui_entry->list, &cfg->vndr_oui_list);
- spin_unlock_irqrestore(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
return TRUE;
}
@@ -21689,22 +22110,17 @@
wl_vndr_oui_entry_t *oui_entry = NULL;
unsigned long flags;
- spin_lock_irqsave(&cfg->vndr_oui_sync, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
while (!list_empty(&cfg->vndr_oui_list)) {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
oui_entry = list_entry(cfg->vndr_oui_list.next, wl_vndr_oui_entry_t, list);
+ GCC_DIAGNOSTIC_POP();
if (oui_entry) {
list_del(&oui_entry->list);
- MFREE(cfg->osh, oui_entry, sizeof(*oui_entry));
+ kfree(oui_entry);
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- spin_unlock_irqrestore(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
}
static int
@@ -21747,24 +22163,19 @@
}
if (vndr_oui) {
- spin_lock_irqsave(&cfg->vndr_oui_sync, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (remained_buf_len < VNDR_OUI_STR_LEN) {
- spin_unlock_irqrestore(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
return BCME_ERROR;
}
pos += snprintf(pos, VNDR_OUI_STR_LEN, "%02X-%02X-%02X ",
oui_entry->oui[0], oui_entry->oui[1], oui_entry->oui[2]);
remained_buf_len -= VNDR_OUI_STR_LEN;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- spin_unlock_irqrestore(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
}
return vndr_oui_num;
@@ -21842,18 +22253,11 @@
struct net_info *iter, *next;
WL_DBG(("clear management vendor IEs \n"));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
wl_cfg80211_clear_per_bss_ies(cfg, iter->wdev);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
return 0;
}
@@ -21896,7 +22300,7 @@
}
/* Clear the global buffer */
- memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf));
+ bzero(g_mgmt_ie_buf, sizeof(g_mgmt_ie_buf));
curr_ie_buf = g_mgmt_ie_buf;
ies = &netinfo->bss.ies;
@@ -21929,6 +22333,11 @@
mgmt_ie_len = &ies->assoc_req_ie_len;
mgmt_ie_buf_len = sizeof(ies->assoc_req_ie);
break;
+ case VNDR_IE_DISASSOC_FLAG :
+ mgmt_ie_buf = ies->disassoc_ie;
+ mgmt_ie_len = &ies->disassoc_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->disassoc_ie);
+ break;
default:
mgmt_ie_buf = NULL;
mgmt_ie_len = NULL;
@@ -21984,10 +22393,31 @@
for (i = 0; i < old_vndr_ies.count; i++) {
struct parsed_vndr_ie_info *vndrie_info =
&old_vndr_ies.ie_info[i];
+#if defined(WL_MBO) || defined(WL_OCE)
+ {
+ if ((vndrie_info->vndrie.id == 0xDD) &&
+ (!memcmp(vndrie_info->vndrie.oui, WFA_OUI, WFA_OUI_LEN)) &&
+ (vndrie_info->vndrie.data[0] == WFA_OUI_TYPE_MBO_OCE)) {
+ WL_DBG(("skipping ID : %d, Len: %d, OUI:"MACOUIDBG
+ ", type: %0x\n",
+ vndrie_info->vndrie.id,
+ vndrie_info->vndrie.len,
+ MACOUI2STRDBG(vndrie_info->vndrie.oui),
+ vndrie_info->vndrie.data[0]));
+ continue;
+ }
+ }
+#endif /* WL_MBO || WL_OCE */
- WL_DBG(("DELETED ID : %d, Len: %d , OUI:"MACOUIDBG"\n",
- vndrie_info->vndrie.id, vndrie_info->vndrie.len,
- MACOUI2STRDBG(vndrie_info->vndrie.oui)));
+ if (vndrie_info->vndrie.id == DOT11_MNG_ID_EXT_ID) {
+ WL_DBG(("DELETED VENDOR EXTN ID : %d, TYPE: %d Len: %d\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.oui[0],
+ vndrie_info->vndrie.len));
+ } else {
+ WL_DBG(("DELETED ID : %d, Len: %d , OUI:"MACOUIDBG"\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+ MACOUI2STRDBG(vndrie_info->vndrie.oui)));
+ }
del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
pktflag, vndrie_info->vndrie.oui,
@@ -22012,11 +22442,31 @@
for (i = 0; i < new_vndr_ies.count; i++) {
struct parsed_vndr_ie_info *vndrie_info =
&new_vndr_ies.ie_info[i];
-
- WL_DBG(("ADDED ID : %d, Len: %d(%d), OUI:"MACOUIDBG"\n",
- vndrie_info->vndrie.id, vndrie_info->vndrie.len,
- vndrie_info->ie_len - 2,
- MACOUI2STRDBG(vndrie_info->vndrie.oui)));
+#if defined(WL_MBO) || defined(WL_OCE)
+ {
+ if ((vndrie_info->vndrie.id == 0xDD) &&
+ (!memcmp(vndrie_info->vndrie.oui, WFA_OUI, WFA_OUI_LEN)) &&
+ (vndrie_info->vndrie.data[0] == WFA_OUI_TYPE_MBO_OCE)) {
+ WL_DBG(("skipping ID : %d, Len: %d, OUI:"MACOUIDBG
+ ",type :%0x\n",
+ vndrie_info->vndrie.id,
+ vndrie_info->vndrie.len,
+ MACOUI2STRDBG(vndrie_info->vndrie.oui),
+ vndrie_info->vndrie.data[0]));
+ continue;
+ }
+ }
+#endif /* WL_MBO || WL_OCE */
+ if (vndrie_info->vndrie.id == DOT11_MNG_ID_EXT_ID) {
+ WL_DBG(("ADDED VENDOR EXTN ID : %d, TYPE = %d, Len: %d\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.oui[0],
+ vndrie_info->vndrie.len));
+ } else {
+ WL_DBG(("ADDED ID : %d, Len: %d(%d), OUI:"MACOUIDBG"\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+ vndrie_info->ie_len - 2,
+ MACOUI2STRDBG(vndrie_info->vndrie.oui)));
+ }
del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
pktflag, vndrie_info->vndrie.oui,
@@ -22082,15 +22532,16 @@
/* if acl == NULL, macmode is still disabled.. */
if (macmode == MACLIST_MODE_DISABLED) {
if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0)
- WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+ WL_ERR(("wl_cfg80211_set_mac_acl: Setting MAC list"
+ " failed error=%d\n", ret));
return ret;
}
macnum = acl->n_acl_entries;
if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
- WL_ERR(("%s : invalid number of MAC address entries %d\n",
- __FUNCTION__, macnum));
+ WL_ERR(("wl_cfg80211_set_mac_acl: invalid number of MAC address entries %d\n",
+ macnum));
return -1;
}
@@ -22098,7 +22549,7 @@
list = (struct maclist *)MALLOC(cfg->osh, sizeof(int) +
sizeof(struct ether_addr) * macnum);
if (!list) {
- WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_set_mac_acl: failed to allocate memory\n"));
return -1;
}
@@ -22109,7 +22560,7 @@
}
/* set the list */
if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0)
- WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+ WL_ERR(("wl_cfg80211_set_mac_acl: Setting MAC list failed error=%d\n", ret));
MFREE(cfg->osh, list, sizeof(int) +
sizeof(struct ether_addr) * macnum);
@@ -22146,11 +22597,12 @@
{
if (CHSPEC_SB_UPPER(chanspec)) {
channel += CH_10MHZ_APART;
+ chan_type = NL80211_CHAN_HT40MINUS;
} else {
channel -= CH_10MHZ_APART;
+ chan_type = NL80211_CHAN_HT40PLUS;
}
}
- chan_type = NL80211_CHAN_HT40PLUS;
break;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
@@ -22170,9 +22622,9 @@
channel += (CH_10MHZ_APART + CH_20MHZ_APART);
}
- if (sb == WL_CHANSPEC_CTL_SB_LL || sb == WL_CHANSPEC_CTL_SB_LU)
+ if (sb == WL_CHANSPEC_CTL_SB_LU || sb == WL_CHANSPEC_CTL_SB_UU)
chan_type = NL80211_CHAN_HT40MINUS;
- else if (sb == WL_CHANSPEC_CTL_SB_UL || sb == WL_CHANSPEC_CTL_SB_UU)
+ else if (sb == WL_CHANSPEC_CTL_SB_LL || sb == WL_CHANSPEC_CTL_SB_UL)
chan_type = NL80211_CHAN_HT40PLUS;
}
break;
@@ -22221,6 +22673,9 @@
0)))
struct chan_info chaninfo;
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+#if !defined(DISALBE_11H) && defined(DHD_NOSCAN_DURING_CSA)
+ struct bcm_cfg80211 *cfg = NULL;
+#endif // endif
if (!wiphy) {
WL_ERR(("wiphy is null\n"));
@@ -22234,6 +22689,14 @@
return;
}
#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION (3, 18, 0)) */
+
+#if !defined(DISALBE_11H) && defined(DHD_NOSCAN_DURING_CSA)
+ cfg = wl_get_cfg(dev);
+ if (cfg->in_csa) {
+ cfg->in_csa = FALSE;
+ wl_del_csa_timeout(cfg);
+ }
+#endif // endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
if (wl_chspec_chandef(chanspec, &chandef, wiphy)) {
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \
@@ -22302,10 +22765,12 @@
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
chanspec = *((chanspec_t *)data);
+#ifndef NO_CHANIND_FOR_AP_START
if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
/* For AP/GO role */
wl_ap_channel_ind(cfg, ndev, chanspec);
}
+#endif /* NO_CHANIND_FOR_AP_START */
}
return 0;
@@ -22318,6 +22783,7 @@
int error = 0;
u32 chanspec = 0;
struct net_device *ndev = NULL;
+ struct ether_addr bssid;
WL_DBG(("Enter\n"));
if (unlikely(e->status)) {
@@ -22327,6 +22793,16 @@
if (likely(cfgdev)) {
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ /* Get association state if not AP and then query chanspec */
+ if (!((wl_get_mode_by_netdev(cfg, ndev)) == WL_MODE_AP)) {
+ error = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+ if (error) {
+ WL_ERR(("CSA on %s. Not associated. error=%d\n",
+ ndev->name, error));
+ return BCME_ERROR;
+ }
+ }
+
error = wldev_iovar_getint(ndev, "chanspec", &chanspec);
if (unlikely(error)) {
WL_ERR(("Get chanspec error: %d \n", error));
@@ -22338,6 +22814,11 @@
/* For AP/GO role */
wl_ap_channel_ind(cfg, ndev, chanspec);
} else {
+ /* STA/GC roles */
+ if (!wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ WL_ERR(("CSA on %s. Not associated.\n", ndev->name));
+ return BCME_ERROR;
+ }
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg));
#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
@@ -22423,33 +22904,99 @@
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) */
#endif /* GTK_OFFLOAD_SUPPORT */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0))
+static int wl_cfg80211_set_pmk(struct wiphy *wiphy, struct net_device *dev,
+ const struct cfg80211_pmk_conf *conf)
+{
+ int ret = 0;
+ wsec_pmk_t pmk;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wl_security *sec;
+ s32 bssidx;
+
+ pmk.key_len = conf->pmk_len;
+ if (pmk.key_len > sizeof(pmk.key)) {
+ ret = -EINVAL;
+ return ret;
+ }
+ pmk.flags = 0;
+ ret = memcpy_s(&pmk.key, sizeof(pmk.key), conf->pmk, conf->pmk_len);
+ if (ret) {
+ ret = -EINVAL;
+ return ret;
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ ret = -EINVAL;
+ return ret;
+ }
+
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+ if ((sec->wpa_auth == WLAN_AKM_SUITE_8021X) ||
+ (sec->wpa_auth == WL_AKM_SUITE_SHA256_1X)) {
+ ret = wldev_iovar_setbuf_bsscfg(dev, "okc_info_pmk", pmk.key, pmk.key_len,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (ret) {
+ /* could fail in case that 'okc' is not supported */
+ WL_INFORM_MEM(("okc_info_pmk failed, err=%d (ignore)\n", ret));
+ }
+ }
+
+ ret = wldev_ioctl_set(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+ if (ret) {
+ WL_ERR(("wl_cfg80211_set_pmk error:%d", ret));
+ ret = -EINVAL;
+ return ret;
+ }
+ return 0;
+}
+
+static int wl_cfg80211_del_pmk(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *aa)
+{
+ int err = BCME_OK;
+ struct cfg80211_pmksa pmksa;
+
+ /* build up cfg80211_pmksa structure to use existing wl_cfg80211_update_pmksa API */
+ bzero(&pmksa, sizeof(pmksa));
+ pmksa.bssid = aa;
+
+ err = wl_cfg80211_update_pmksa(wiphy, dev, &pmksa, FALSE);
+
+ if (err) {
+ WL_ERR(("wl_cfg80211_update_pmksa err:%d\n", err));
+ err = -EINVAL;
+ }
+
+ return err;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) */
+
#if defined(WL_SUPPORT_AUTO_CHANNEL)
int
wl_cfg80211_set_spect(struct net_device *dev, int spect)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
int wlc_down = 1;
int wlc_up = 1;
int err = BCME_OK;
- if (!wl_get_drv_status_all(cfg, CONNECTED)) {
- err = wldev_ioctl_set(dev, WLC_DOWN, &wlc_down, sizeof(wlc_down));
- if (err) {
- WL_ERR(("%s: WLC_DOWN failed: code: %d\n", __func__, err));
- return err;
- }
+ err = wldev_ioctl_set(dev, WLC_DOWN, &wlc_down, sizeof(wlc_down));
+ if (err) {
+ WL_ERR(("%s: WLC_DOWN failed: code: %d\n", __func__, err));
+ return err;
+ }
- err = wldev_ioctl_set(dev, WLC_SET_SPECT_MANAGMENT, &spect, sizeof(spect));
- if (err) {
- WL_ERR(("%s: error setting spect: code: %d\n", __func__, err));
- return err;
- }
+ err = wldev_ioctl_set(dev, WLC_SET_SPECT_MANAGMENT, &spect, sizeof(spect));
+ if (err) {
+ WL_ERR(("%s: error setting spect: code: %d\n", __func__, err));
+ return err;
+ }
- err = wldev_ioctl_set(dev, WLC_UP, &wlc_up, sizeof(wlc_up));
- if (err) {
- WL_ERR(("%s: WLC_UP failed: code: %d\n", __func__, err));
- return err;
- }
+ err = wldev_ioctl_set(dev, WLC_UP, &wlc_up, sizeof(wlc_up));
+ if (err) {
+ WL_ERR(("%s: WLC_UP failed: code: %d\n", __func__, err));
+ return err;
}
return err;
}
@@ -22481,314 +23028,8 @@
return id;
}
-#if defined(SUPPORT_RANDOM_MAC_SCAN)
-int
-wl_cfg80211_set_random_mac(struct net_device *dev, bool enable)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- int ret;
-
- if (cfg->random_mac_enabled == enable) {
- WL_ERR(("Random MAC already %s\n", enable ? "Enabled" : "Disabled"));
- return BCME_OK;
- }
-
- if (enable) {
- ret = wl_cfg80211_random_mac_enable(dev);
- } else {
- ret = wl_cfg80211_random_mac_disable(dev);
- }
-
- if (!ret) {
- cfg->random_mac_enabled = enable;
- }
-
- return ret;
-}
-
-int
-wl_cfg80211_random_mac_enable(struct net_device *dev)
-{
- u8 random_mac[ETH_ALEN] = {0, };
- u8 rand_bytes[3] = {0, };
- s32 err = BCME_ERROR;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-#if !defined(LEGACY_RANDOM_MAC)
- uint8 buffer[WLC_IOCTL_SMLEN] = {0, };
- wl_scanmac_t *sm = NULL;
- int len = 0;
- wl_scanmac_enable_t *sm_enable = NULL;
- wl_scanmac_config_t *sm_config = NULL;
-#endif /* !LEGACY_RANDOM_MAC */
-
- if (wl_get_drv_status_all(cfg, CONNECTED) || wl_get_drv_status_all(cfg, CONNECTING) ||
- wl_get_drv_status_all(cfg, AP_CREATED) || wl_get_drv_status_all(cfg, AP_CREATING)) {
- WL_ERR(("fail to Set random mac, current state is wrong\n"));
- return err;
- }
-
- memcpy(random_mac, bcmcfg_to_prmry_ndev(cfg)->dev_addr, ETH_ALEN);
- get_random_bytes(&rand_bytes, sizeof(rand_bytes));
-
- if (rand_bytes[2] == 0x0 || rand_bytes[2] == 0xff) {
- rand_bytes[2] = 0xf0;
- }
-
-#if defined(LEGACY_RANDOM_MAC)
- memcpy(&random_mac[3], rand_bytes, sizeof(rand_bytes));
-
- err = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr",
- random_mac, ETH_ALEN, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
-
- if (err != BCME_OK) {
- WL_ERR(("failed to set random generate MAC address\n"));
- } else {
- WL_ERR(("set mac " MACDBG " to " MACDBG "\n",
- MAC2STRDBG((const u8 *)bcmcfg_to_prmry_ndev(cfg)->dev_addr),
- MAC2STRDBG((const u8 *)&random_mac)));
- WL_ERR(("random MAC enable done"));
- }
-#else
- /* Enable scan mac */
- sm = (wl_scanmac_t *)buffer;
- sm_enable = (wl_scanmac_enable_t *)sm->data;
- sm->len = sizeof(*sm_enable);
- sm_enable->enable = 1;
- len = OFFSETOF(wl_scanmac_t, data) + sm->len;
- sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE;
-
- err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
- sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
-
- /* For older chip which which does not have scanmac support can still use
- * cur_etheraddr to set the randmac. rand_mask and rand_mac comes from upper
- * cfg80211 layer. If rand_mask and rand_mac is not passed then fallback
- * to default cur_etheraddr and default mask.
- */
- if (err == BCME_UNSUPPORTED) {
- /* In case of host based legacy randomization, random address is
- * generated by mixing 3 bytes of cur_etheraddr and 3 bytes of
- * random bytes generated.In that case rand_mask is nothing but
- * random bytes.
- */
- memcpy(&random_mac[3], rand_bytes, sizeof(rand_bytes));
- err = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr",
- random_mac, ETH_ALEN, cfg->ioctl_buf,
- WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
- if (err != BCME_OK) {
- WL_ERR(("failed to set random generate MAC address\n"));
- } else {
- WL_ERR(("set mac " MACDBG " to " MACDBG "\n",
- MAC2STRDBG((const u8 *)bcmcfg_to_prmry_ndev(cfg)->dev_addr),
- MAC2STRDBG((const u8 *)&random_mac)));
- WL_ERR(("random MAC enable done using legacy randmac"));
- }
- } else if (err == BCME_OK) {
- /* Configure scanmac */
- memset(buffer, 0x0, sizeof(buffer));
- sm_config = (wl_scanmac_config_t *)sm->data;
- sm->len = sizeof(*sm_config);
- sm->subcmd_id = WL_SCANMAC_SUBCMD_CONFIG;
- sm_config->scan_bitmap = WL_SCANMAC_SCAN_UNASSOC;
-
- /* Set randomize mac address recv from upper layer */
- memcpy(&sm_config->mac.octet, random_mac, ETH_ALEN);
-
- /* Set randomize mask recv from upper layer */
-
- /* Currently in samsung case, upper layer does not provide
- * variable randmask and its using fixed 3 byte randomization
- */
- memset(&sm_config->random_mask.octet, 0x0, ETH_ALEN);
- memset(&sm_config->random_mask.octet[3], 0xFF, 3);
-
- WL_DBG(("recv random mac addr " MACDBG " recv rand mask" MACDBG "\n",
- MAC2STRDBG((const u8 *)&sm_config->mac.octet),
- MAC2STRDBG((const u8 *)&sm_config->random_mask)));
-
- len = OFFSETOF(wl_scanmac_t, data) + sm->len;
- err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
- sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
-
- if (err != BCME_OK) {
- WL_ERR(("failed scanmac configuration\n"));
-
- /* Disable scan mac for clean-up */
- wl_cfg80211_random_mac_disable(dev);
- return err;
- }
- WL_DBG(("random MAC enable done using scanmac"));
- } else {
- WL_ERR(("failed to enable scanmac, err=%d\n", err));
- }
-#endif /* LEGACY_RANDOM_MAC */
-
- return err;
-}
-
-int
-wl_cfg80211_random_mac_disable(struct net_device *dev)
-{
- s32 err = BCME_ERROR;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-#if !defined(LEGACY_RANDOM_MAC)
- uint8 buffer[WLC_IOCTL_SMLEN] = {0, };
- wl_scanmac_t *sm = NULL;
- int len = 0;
- wl_scanmac_enable_t *sm_enable = NULL;
-#endif /* !LEGACY_RANDOM_MAC */
-
-#if defined(LEGACY_RANDOM_MAC)
- WL_ERR(("set original mac " MACDBG "\n",
- MAC2STRDBG((const u8 *)bcmcfg_to_prmry_ndev(cfg)->dev_addr)));
-
- err = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr",
- bcmcfg_to_prmry_ndev(cfg)->dev_addr, ETH_ALEN,
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
-
- if (err != BCME_OK) {
- WL_ERR(("failed to set original MAC address\n"));
- } else {
- WL_ERR(("legacy random MAC disable done \n"));
- }
-#else
- sm = (wl_scanmac_t *)buffer;
- sm_enable = (wl_scanmac_enable_t *)sm->data;
- sm->len = sizeof(*sm_enable);
- /* Disable scanmac */
- sm_enable->enable = 0;
- len = OFFSETOF(wl_scanmac_t, data) + sm->len;
-
- sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE;
-
- err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
- sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
-
- if (err != BCME_OK) {
- WL_ERR(("failed to disable scanmac, err=%d\n", err));
- return err;
- }
- WL_DBG(("random MAC disable done\n"));
-#endif /* LEGACY_RANDOM_MAC */
-
- return err;
-}
-
-/*
- * This is new interface for mac randomization. It takes randmac and randmask
- * as arg and it uses scanmac iovar to offload the mac randomization to firmware.
- */
-int wl_cfg80211_scan_mac_enable(struct net_device *dev, uint8 *rand_mac, uint8 *rand_mask)
-{
- int byte_index = 0;
- s32 err = BCME_ERROR;
- uint8 buffer[WLC_IOCTL_SMLEN] = {0, };
- wl_scanmac_t *sm = NULL;
- int len = 0;
- wl_scanmac_enable_t *sm_enable = NULL;
- wl_scanmac_config_t *sm_config = NULL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-
- if ((rand_mac == NULL) || (rand_mask == NULL)) {
- err = BCME_BADARG;
- WL_ERR(("fail to Set random mac, bad argument\n"));
- /* Disable the current scanmac config */
- wl_cfg80211_scan_mac_disable(dev);
- return err;
- }
-
- if (ETHER_ISNULLADDR(rand_mac)) {
- WL_DBG(("fail to Set random mac, Invalid rand mac\n"));
- /* Disable the current scanmac config */
- wl_cfg80211_scan_mac_disable(dev);
- return err;
- }
-
- if (wl_get_drv_status_all(cfg, CONNECTED) || wl_get_drv_status_all(cfg, CONNECTING) ||
- wl_get_drv_status_all(cfg, AP_CREATED) || wl_get_drv_status_all(cfg, AP_CREATING)) {
- WL_ERR(("fail to Set random mac, current state is wrong\n"));
- return BCME_UNSUPPORTED;
- }
-
- /* Enable scan mac */
- sm = (wl_scanmac_t *)buffer;
- sm_enable = (wl_scanmac_enable_t *)sm->data;
- sm->len = sizeof(*sm_enable);
- sm_enable->enable = 1;
- len = OFFSETOF(wl_scanmac_t, data) + sm->len;
- sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE;
-
- err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
- sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
-
- if (err == BCME_OK) {
- /* Configure scanmac */
- memset(buffer, 0x0, sizeof(buffer));
- sm_config = (wl_scanmac_config_t *)sm->data;
- sm->len = sizeof(*sm_config);
- sm->subcmd_id = WL_SCANMAC_SUBCMD_CONFIG;
- sm_config->scan_bitmap = WL_SCANMAC_SCAN_UNASSOC;
-
- /* Set randomize mac address recv from upper layer */
- memcpy(&sm_config->mac.octet, rand_mac, ETH_ALEN);
-
- /* Set randomize mask recv from upper layer */
-
- /* There is a difference in how to interpret rand_mask between
- * upperlayer and firmware. If the byte is set as FF then for
- * upper layer it means keep that byte and do not randomize whereas
- * for firmware it means randomize those bytes and vice versa. Hence
- * conversion is needed before setting the iovar
- */
- memset(&sm_config->random_mask.octet, 0x0, ETH_ALEN);
- /* Only byte randomization is supported currently. If mask recv is 0x0F
- * for a particular byte then it will be treated as no randomization
- * for that byte.
- */
- while (byte_index < ETH_ALEN) {
- if (rand_mask[byte_index] == 0xFF) {
- sm_config->random_mask.octet[byte_index] = 0x00;
- } else if (rand_mask[byte_index] == 0x00) {
- sm_config->random_mask.octet[byte_index] = 0xFF;
- }
- byte_index++;
- }
-
- WL_DBG(("recv random mac addr " MACDBG "recv rand mask" MACDBG "\n",
- MAC2STRDBG((const u8 *)&sm_config->mac.octet),
- MAC2STRDBG((const u8 *)&sm_config->random_mask)));
-
- len = OFFSETOF(wl_scanmac_t, data) + sm->len;
- err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
- sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
-
- if (err != BCME_OK) {
- WL_ERR(("failed scanmac configuration\n"));
-
- /* Disable scan mac for clean-up */
- wl_cfg80211_random_mac_disable(dev);
- return err;
- }
- WL_DBG(("scanmac enable done"));
- } else {
- WL_ERR(("failed to enable scanmac, err=%d\n", err));
- }
-
- return err;
-}
-
-int wl_cfg80211_scan_mac_disable(struct net_device *dev)
-{
- s32 err = BCME_ERROR;
-
- err = wl_cfg80211_random_mac_disable(dev);
-
- return err;
-}
-#endif /* SUPPORT_RANDOM_MAC_SCAN */
-
#ifdef WLTDLS
-static s32
+s32
wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg, enum wl_tdls_config state, bool auto_mode)
{
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
@@ -22813,12 +23054,12 @@
/* Protect tdls config session */
mutex_lock(&cfg->tdls_sync);
- if ((state == TDLS_STATE_TEARDOWN)) {
+ if (state == TDLS_STATE_TEARDOWN) {
/* Host initiated TDLS tear down */
err = dhd_tdls_enable(ndev, false, auto_mode, NULL);
goto exit;
} else if ((state == TDLS_STATE_AP_CREATE) ||
- (state == TDLS_STATE_NDI_CREATE)) {
+ (state == TDLS_STATE_NMI_CREATE)) {
/* We don't support tdls while AP/GO/NAN is operational */
update_reqd = true;
enable = false;
@@ -22851,24 +23092,18 @@
* Verify whether its a STA interface before
* we enable back tdls.
*/
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
- if ((iter->ndev) &&
- (wl_get_drv_status(cfg, CONNECTED, ndev)) &&
+ GCC_DIAGNOSTIC_POP();
+ if ((iter->ndev) && (wl_get_drv_status(cfg, CONNECTED, ndev)) &&
(ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)) {
- WL_DBG(("Non STA iface operational. cfg_iftype:%d "
- "Can't enable tdls.\n",
+ WL_DBG(("Non STA iface operational. cfg_iftype:%d"
+ " Can't enable tdls.\n",
ndev->ieee80211_ptr->iftype));
err = -ENOTSUPP;
goto exit;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
/* No AP/GO found. Enable back tdls */
update_reqd = true;
enable = true;
@@ -22894,7 +23129,7 @@
WL_ERR(("tdls_enable setting failed. err:%d\n", err));
goto exit;
} else {
- WL_INFORM_MEM(("wl tdls_enable %d\n", enable));
+ WL_INFORM_MEM(("tdls_enable %d state:%d\n", enable, state));
/* Update the dhd state variable to be in sync */
dhdp->tdls_enable = enable;
if (state == TDLS_STATE_SETUP) {
@@ -22926,11 +23161,9 @@
struct net_info *iter, *next;
struct net_device *ndev = NULL;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev) {
if (strncmp(iter->ndev->name, ifname, IFNAMSIZ) == 0) {
if (iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
@@ -22940,9 +23173,6 @@
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
return ndev;
}
@@ -22953,11 +23183,9 @@
struct net_info *iter, *next;
struct net_device *ndev = NULL;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev) {
if (strncmp(iter->ndev->name, ifname, IFNAMSIZ) == 0) {
ndev = iter->ndev;
@@ -22965,9 +23193,6 @@
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
return ndev;
}
@@ -23093,15 +23318,17 @@
static int
_wl_update_ap_rps_params(struct net_device *dev)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct bcm_cfg80211 *cfg = NULL;
rpsnoa_iovar_params_t iovar;
u8 smbuf[WLC_IOCTL_SMLEN];
if (!dev)
return BCME_BADARG;
- memset(&iovar, 0, sizeof(iovar));
- memset(smbuf, 0, sizeof(smbuf));
+ cfg = wl_get_cfg(dev);
+
+ bzero(&iovar, sizeof(iovar));
+ bzero(smbuf, sizeof(smbuf));
iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
iovar.hdr.subcmd = WL_RPSNOA_CMD_PARAMS;
@@ -23158,8 +23385,8 @@
goto fail;
}
- memset(&iovar, 0, sizeof(iovar));
- memset(smbuf, 0, sizeof(smbuf));
+ bzero(&iovar, sizeof(iovar));
+ bzero(smbuf, sizeof(smbuf));
iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
iovar.hdr.subcmd = WL_RPSNOA_CMD_STATUS;
@@ -23197,8 +23424,8 @@
time_since_enable = ((rpsnoa_iovar_status_t *)smbuf)->stats[idx].sleep_avail_dur;
/* Conver ms to minute, round down only */
- do_div(sleep, MSEC_PER_MIN);
- do_div(time_since_enable, MSEC_PER_MIN);
+ sleep = DIV_U64_BY_U32(sleep, MSEC_PER_MIN);
+ time_since_enable = DIV_U64_BY_U32(time_since_enable, MSEC_PER_MIN);
bytes_written += snprintf(command + bytes_written, total_len,
"state=%d sleep=%d time_since_enable=%d", state, sleep, time_since_enable);
@@ -23248,8 +23475,8 @@
goto exit;
}
}
- memset(&iovar, 0, sizeof(iovar));
- memset(smbuf, 0, sizeof(smbuf));
+ bzero(&iovar, sizeof(iovar));
+ bzero(smbuf, sizeof(smbuf));
iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
iovar.hdr.subcmd = WL_RPSNOA_CMD_ENABLE;
@@ -23346,71 +23573,24 @@
/* Return the count of network interfaces (skip netless p2p discovery
* interface)
*/
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev) {
iface_count++;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
return iface_count;
}
-#ifdef WES_SUPPORT
-#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
-s32 wl_cfg80211_custom_scan_time(struct net_device *dev,
- enum wl_custom_scan_time_type type, int time)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-
- if (cfg == NULL) {
- return FALSE;
- }
-
- switch (type) {
- case WL_CUSTOM_SCAN_CHANNEL_TIME :
- WL_ERR(("Scan Channel Time %d\n", time));
- cfg->custom_scan_channel_time = time;
- break;
- case WL_CUSTOM_SCAN_UNASSOC_TIME :
- WL_ERR(("Scan Unassoc Time %d\n", time));
- cfg->custom_scan_unassoc_time = time;
- break;
- case WL_CUSTOM_SCAN_PASSIVE_TIME :
- WL_ERR(("Scan Passive Time %d\n", time));
- cfg->custom_scan_passive_time = time;
- break;
- case WL_CUSTOM_SCAN_HOME_TIME :
- WL_ERR(("Scan Home Time %d\n", time));
- cfg->custom_scan_home_time = time;
- break;
- case WL_CUSTOM_SCAN_HOME_AWAY_TIME :
- WL_ERR(("Scan Home Away Time %d\n", time));
- cfg->custom_scan_home_away_time = time;
- break;
- default:
- return FALSE;
- }
- return TRUE;
-}
-#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
-#endif /* WES_SUPPORT */
#ifdef WBTEXT
static bool wl_cfg80211_wbtext_check_bssid_list(struct bcm_cfg80211 *cfg, struct ether_addr *ea)
{
wl_wbtext_bssid_t *bssid = NULL;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
-
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
/* check duplicate */
list_for_each_entry(bssid, &cfg->wbtext_bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (!memcmp(bssid->ea.octet, ea, ETHER_ADDR_LEN)) {
return FALSE;
}
@@ -23446,7 +23626,9 @@
char eabuf[ETHER_ADDR_STR_LEN];
while (!list_empty(&cfg->wbtext_bssid_list)) {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
bssid = list_entry(cfg->wbtext_bssid_list.next, wl_wbtext_bssid_t, list);
+ GCC_DIAGNOSTIC_POP();
if (bssid) {
WL_DBG(("clear wbtext bssid : %s\n", bcm_ether_ntoa(&bssid->ea, eabuf)));
list_del(&bssid->list);
@@ -23480,6 +23662,7 @@
if ((cap_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
DOT11_MNG_RRM_CAP_ID)) != NULL) {
if (isset(cap_ie->data, DOT11_RRM_CAP_NEIGHBOR_REPORT)) {
+ WL_DBG(("sending neighbor report\n"));
req_sent = wl_cfg80211_wbtext_send_nbr_req(cfg, dev, profile);
}
}
@@ -23490,6 +23673,7 @@
DOT11_MNG_EXT_CAP_ID)) != NULL) {
if (cap_ie->len >= DOT11_EXTCAP_LEN_BSSTRANS &&
isset(cap_ie->data, DOT11_EXT_CAP_BSSTRANS_MGMT)) {
+ WL_DBG(("sending btm query\n"));
wl_cfg80211_wbtext_send_btm_query(cfg, dev, profile);
}
}
@@ -23562,7 +23746,7 @@
WL_DBG(("Enter\n"));
- memset(&btq, 0, sizeof(wl_bsstrans_query_t));
+ bzero(&btq, sizeof(wl_bsstrans_query_t));
btq.version = WL_BSSTRANS_QUERY_VERSION_1;
error = wldev_iovar_setbuf(dev, "wnm_bsstrans_query", &btq,
@@ -23571,7 +23755,8 @@
ret = wl_cfg80211_wbtext_add_bssid_list(cfg,
(struct ether_addr *)&profile->bssid);
} else {
- WL_ERR(("%s: failed to set BTM query, error=%d\n", __FUNCTION__, error));
+ WL_ERR(("wl_cfg80211_wbtext_send_btm_query: failed to set BTM query,"
+ " error=%d\n", error));
}
return ret;
}
@@ -23610,11 +23795,12 @@
}
static int
-wl_cfg80211_recv_nbr_resp(struct net_device *dev, uint8 *body, int body_len)
+wl_cfg80211_recv_nbr_resp(struct net_device *dev, uint8 *body, uint body_len)
{
dot11_rm_action_t *rm_rep;
bcm_tlv_t *tlvs;
- int tlv_len, i, error;
+ uint tlv_len;
+ int i, error;
dot11_neighbor_rep_ie_t *nbr_rep_ie;
chanspec_t ch;
wl_roam_channel_list_t channel_list;
@@ -23715,7 +23901,7 @@
int err = BCME_OK;
int iftype = 0;
- memset(iobuf, 0, WLC_IOCTL_SMLEN);
+ bzero(iobuf, WLC_IOCTL_SMLEN);
/* Check the interface type */
ifdev = wl_get_netdev_by_name(cfg, ifname);
@@ -23770,8 +23956,8 @@
char iobuf[WLC_IOCTL_SMLEN];
int err = BCME_OK;
- memset(iobuf, 0, WLC_IOCTL_SMLEN);
- memset(get_param, 0, sizeof(*get_param));
+ bzero(iobuf, WLC_IOCTL_SMLEN);
+ bzero(get_param, sizeof(*get_param));
err = wldev_iovar_getbuf(dev, "rssilog", NULL, 0, (void *)iobuf,
sizeof(iobuf), NULL);
if (err) {
@@ -23800,14 +23986,14 @@
return err;
}
#endif /* SUPPORT_RSSI_SUM_REPORT */
-
-#ifdef DHD_LOG_DUMP
/* Function to flush the FW preserve buffer content
* The buffer content is sent to host in form of events.
*/
void
wl_flush_fw_log_buffer(struct net_device *dev, uint32 logset_mask)
{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
int i;
int err = 0;
u8 buf[WLC_IOCTL_SMLEN] = {0};
@@ -23817,7 +24003,7 @@
memset(&set_param, 0, sizeof(set_param));
set_param.size = WLC_IOCTL_SMLEN;
- for (i = 0; i < WL_MAX_PRESERVE_BUFFER; i++)
+ for (i = 0; i < dhd->event_log_max_sets; i++)
{
if ((0x01u << i) & logset_mask) {
set_param.set = i;
@@ -23830,169 +24016,51 @@
}
}
}
-#endif /* DHD_LOG_DUMP */
+#ifdef USE_WFA_CERT_CONF
+extern int g_frameburst;
+#endif /* USE_WFA_CERT_CONF */
-#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
-static void
-wl_cfgvendor_send_hang_event(struct net_device *dev, u16 reason, char *string, int hang_info_cnt)
+int
+wl_cfg80211_set_frameburst(struct bcm_cfg80211 *cfg, bool enable)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- struct wiphy *wiphy;
- char *hang_info;
- int len = 0;
- int bytes_written;
- uint32 dumy_data = 0;
- int reason_hang_info = 0;
- int cnt = 0;
- dhd_pub_t *dhd;
- int hang_reason_mismatch = FALSE;
+ int ret = BCME_OK;
+ int val = enable ? 1 : 0;
- if (!cfg || !cfg->wdev) {
- WL_ERR(("cfg=%p wdev=%p\n", cfg, (cfg ? cfg->wdev : NULL)));
- return;
+#ifdef USE_WFA_CERT_CONF
+ if (!g_frameburst) {
+ WL_DBG(("Skip setting frameburst\n"));
+ return 0;
}
+#endif /* USE_WFA_CERT_CONF */
- wiphy = cfg->wdev->wiphy;
-
- if (!wiphy) {
- WL_ERR(("wiphy is NULL\n"));
- return;
- }
-
- hang_info = MALLOCZ(cfg->osh, VENDOR_SEND_HANG_EXT_INFO_LEN);
- if (hang_info == NULL) {
- WL_ERR(("alloc hang_info failed\n"));
- return;
- }
-
- dhd = (dhd_pub_t *)(cfg->pub);
-
- sscanf(string, "%d", &reason_hang_info);
- bytes_written = 0;
- len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
- if (strlen(string) == 0 || (reason_hang_info != reason)) {
- WL_ERR(("hang reason mismatch: string len %d reason_hang_info %d\n",
- (int)strlen(string), reason_hang_info));
- hang_reason_mismatch = TRUE;
- if (dhd) {
- get_debug_dump_time(dhd->debug_dump_time_hang_str);
- copy_debug_dump_time(dhd->debug_dump_time_str,
- dhd->debug_dump_time_hang_str);
- }
- bytes_written += scnprintf(&hang_info[bytes_written], len,
- "%d %d %s %08x %08x %08x %08x %08x %08x %08x",
- reason, VENDOR_SEND_HANG_EXT_INFO_VER,
- dhd->debug_dump_time_hang_str,
- 0, 0, 0, 0, 0, 0, 0);
- if (dhd) {
- clear_debug_dump_time(dhd->debug_dump_time_hang_str);
- }
+ WL_DBG(("Set frameburst %d\n", val));
+ ret = wldev_ioctl_set(bcmcfg_to_prmry_ndev(cfg), WLC_SET_FAKEFRAG, &val, sizeof(val));
+ if (ret < 0) {
+ WL_ERR(("Failed set frameburst, ret=%d\n", ret));
} else {
- bytes_written += scnprintf(&hang_info[bytes_written], len, "%s", string);
+ WL_INFORM_MEM(("frameburst is %s\n", enable ? "enabled" : "disabled"));
}
- WL_ERR(("hang reason: %d info cnt: %d\n", reason, hang_info_cnt));
-
- if (hang_reason_mismatch == FALSE) {
- cnt = hang_info_cnt;
- } else {
- cnt = HANG_FIELD_MISMATCH_CNT;
- }
-
- while (cnt < HANG_FIELD_CNT_MAX) {
- len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
- bytes_written += scnprintf(&hang_info[bytes_written], len,
- "%c%08x", HANG_RAW_DEL, dumy_data);
- cnt++;
- }
-
- WL_ERR(("hang info cnt: %d len: %d\n", cnt, (int)strlen(hang_info)));
- WL_ERR(("hang info data: %s\n", hang_info));
-
- wl_cfgvendor_send_async_event(wiphy,
- bcmcfg_to_prmry_ndev(cfg), BRCM_VENDOR_EVENT_HANGED,
- hang_info, (int)strlen(hang_info));
-
- memset(string, 0, VENDOR_SEND_HANG_EXT_INFO_LEN);
-
- if (hang_info) {
- MFREE(cfg->osh, hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN);
- }
-
-#ifdef DHD_LOG_DUMP
- if (dhd->debug_dump_time_hang_str) {
- dhd_logdump_cookie_save(dhd, dhd->debug_dump_time_hang_str, "HANG");
- }
-#endif /* DHD_LOG_DUMP */
-
- if (dhd) {
- clear_debug_dump_time(dhd->debug_dump_time_str);
- }
+ return ret;
}
-void
-wl_copy_hang_info_if_falure(struct net_device *dev, u16 reason, s32 ret)
+s32
+wl_cfg80211_set_transition_mode(struct net_device *ndev, u32 transition_disabled)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhd;
- s32 err = 0;
- char ioctl_buf[WLC_IOCTL_SMLEN];
- memuse_info_t mu;
- int bytes_written = 0;
- int remain_len = 0;
+ int ret = BCME_OK;
+ int val = transition_disabled ? 0 : 1;
- if (!dev || !cfg) {
- WL_ERR(("dev=%p cfg=%p\n", dev, cfg));
- return;
+ WL_DBG(("Set SAE transition mode %d\n", val));
+ ret = wldev_iovar_setint(ndev, "extsae_transition_mode", val);
+ if (ret < 0) {
+ WL_ERR(("Failed set SAE transition mode, ret=%d\n", ret));
+ } else {
+ WL_INFORM(("SAE transition mode is %s\n",
+ transition_disabled ? "disabled" : "enabled"));
}
- dhd = (dhd_pub_t *)(cfg->pub);
-
- if (!dhd || !dhd->hang_info) {
- WL_ERR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
- dhd, (dhd ? dhd->hang_info : NULL)));
- return;
- }
-
- err = wldev_iovar_getbuf_bsscfg(dev, "memuse",
- NULL, 0, ioctl_buf, WLC_IOCTL_SMLEN, 0, NULL);
- if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
- return;
- }
-
- memcpy(&mu, ioctl_buf, sizeof(memuse_info_t));
-
- if (mu.len >= sizeof(memuse_info_t)) {
- WL_ERR(("Heap Total: %d(%dK)\n", mu.arena_size, KB(mu.arena_size)));
- WL_ERR(("Free: %d(%dK), LWM: %d(%dK)\n",
- mu.arena_free, KB(mu.arena_free),
- mu.free_lwm, KB(mu.free_lwm)));
- WL_ERR(("In use: %d(%dK), HWM: %d(%dK)\n",
- mu.inuse_size, KB(mu.inuse_size),
- mu.inuse_hwm, KB(mu.inuse_hwm)));
- WL_ERR(("Malloc failure count: %d\n", mu.mf_count));
- }
-
- memset(dhd->hang_info, 0, VENDOR_SEND_HANG_EXT_INFO_LEN);
- remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
-
- get_debug_dump_time(dhd->debug_dump_time_hang_str);
- copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
-
- bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len,
- "%d %d %s %d %d %d %d %d %08x %08x",
- reason, VENDOR_SEND_HANG_EXT_INFO_VER,
- dhd->debug_dump_time_hang_str,
- ret, mu.arena_size, mu.arena_free, mu.inuse_size, mu.mf_count, 0, 0);
-
- dhd->hang_info_cnt = HANG_FIELD_IF_FAILURE_CNT;
-
- clear_debug_dump_time(dhd->debug_dump_time_hang_str);
-
- return;
+ return ret;
}
-#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
s32
wl_cfg80211_set_dbg_verbose(struct net_device *ndev, u32 level)
@@ -24010,32 +24078,60 @@
return BCME_OK;
}
-s32
-wl_cfg80211_check_for_nan_support(struct bcm_cfg80211 *cfg)
+const u8 *
+wl_find_attribute(const u8 *buf, u16 len, u16 element_id)
+{
+ const u8 *attrib;
+ u16 attrib_id;
+ u16 attrib_len;
+
+ if (!buf) {
+ WL_ERR(("buf null\n"));
+ return NULL;
+ }
+
+ attrib = buf;
+ while (len >= 4) {
+ /* attribute id */
+ attrib_id = *attrib++ << 8;
+ attrib_id |= *attrib++;
+ len -= 2;
+
+ /* 2-byte little endian */
+ attrib_len = *attrib++ << 8;
+ attrib_len |= *attrib++;
+
+ len -= 2;
+ if (attrib_id == element_id) {
+ /* This will point to start of subelement attrib after
+ * attribute id & len
+ */
+ return attrib;
+ }
+ if (len > attrib_len) {
+ len -= attrib_len; /* for the remaining subelt fields */
+ WL_DBG(("Attribue:%4x attrib_len:%d rem_len:%d\n",
+ attrib_id, attrib_len, len));
+
+ /* Go to next subelement */
+ attrib += attrib_len;
+ } else {
+ WL_ERR(("Incorrect Attribue:%4x attrib_len:%d\n",
+ attrib_id, attrib_len));
+ return NULL;
+ }
+ }
+ return NULL;
+}
+
+uint8 wl_cfg80211_get_bus_state(struct bcm_cfg80211 *cfg)
{
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- if (((p2p_is_on(cfg)) && (wl_get_p2p_status(cfg, SCANNING) ||
- wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1))) ||
- (dhd->op_mode & DHD_FLAG_HOSTAP_MODE))
- {
- WL_ERR(("p2p/softap is enabled, cannot support nan\n"));
- return FALSE;
- }
- return TRUE;
+ WL_INFORM(("dhd->hang_was_sent = %d and busstate = %d\n",
+ dhd->hang_was_sent, dhd->busstate));
+ return ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent);
}
-#ifdef WL_IRQSET
-static void wl_irq_set_work_handler(struct work_struct * work)
-{
- struct bcm_cfg80211 *cfg = NULL;
- BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, irq_set_work.work);
-
- if (cfg) {
- dhd_irq_set_affinity(cfg->pub);
- }
-}
-#endif /* WL_IRQSET */
-
#ifdef WL_WPS_SYNC
static void wl_wps_reauth_timeout(unsigned long data)
{
@@ -24044,7 +24140,7 @@
s32 inst;
unsigned long flags;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
inst = wl_get_wps_inst_match(cfg, ndev);
if (inst >= 0) {
WL_ERR(("[%s][WPS] Reauth Timeout Inst:%d! state:%d\n",
@@ -24060,7 +24156,7 @@
cfg->wps_session[inst].in_use = false;
}
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
}
static void wl_init_wps_reauth_sm(struct bcm_cfg80211 *cfg)
@@ -24129,20 +24225,21 @@
struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
unsigned long flags;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
/* Fetch and initialize a wps instance */
inst = wl_get_free_wps_inst(cfg);
if (inst == BCME_ERROR) {
WL_ERR(("[WPS] No free insance\n"));
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
return BCME_ERROR;
}
cfg->wps_session[inst].in_use = true;
cfg->wps_session[inst].state = WPS_STATE_STARTED;
cfg->wps_session[inst].ndev = ndev;
cfg->wps_session[inst].mode = mode;
- memcpy(cfg->wps_session[inst].peer_mac, mac_addr, ETH_ALEN);
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ /* return check not required since both buffer lens are same */
+ (void)memcpy_s(cfg->wps_session[inst].peer_mac, ETH_ALEN, mac_addr, ETH_ALEN);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
WL_INFORM_MEM(("[%s][WPS] session created. Peer: " MACDBG "\n",
ndev->name, MAC2STRDBG(mac_addr)));
@@ -24157,27 +24254,27 @@
unsigned long flags;
u16 cur_state;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
/* Get current instance for the given ndev */
inst = wl_get_wps_inst_match(cfg, ndev);
if (inst == BCME_ERROR) {
WL_DBG(("[WPS] instance match NOT found\n"));
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
return;
}
cur_state = cfg->wps_session[inst].state;
if (cur_state != WPS_STATE_DONE) {
WL_DBG(("[WPS] wrong state:%d\n", cur_state));
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
return;
}
/* Mark this as unused */
cfg->wps_session[inst].in_use = false;
cfg->wps_session[inst].state = WPS_STATE_IDLE;
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
/* Ensure this API is called from sleepable context. */
if (timer_pending(&cfg->wps_session[inst].timer)) {
@@ -24195,11 +24292,16 @@
u16 cur_state;
s32 inst;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
inst = wl_get_wps_inst_match(cfg, ndev);
+ if (inst == BCME_ERROR) {
+ WL_DBG(("[WPS] instance match NOT found\n"));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return;
+ }
cur_state = cfg->wps_session[inst].state;
cfg->wps_session[inst].state = WPS_STATE_DONE;
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
WL_INFORM_MEM(("[%s][WPS] state:%x\n", ndev->name, cur_state));
if (cur_state > WPS_STATE_IDLE) {
@@ -24215,10 +24317,10 @@
u16 cur_state;
bool wps_done = false;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
cur_state = cfg->wps_session[inst].state;
if (cur_state == WPS_STATE_REAUTH_WAIT) {
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
wl_clr_drv_status(cfg, CONNECTED, ndev);
wl_clr_drv_status(cfg, DISCONNECTING, ndev);
WL_INFORM_MEM(("[%s][WPS] REAUTH link down\n", ndev->name));
@@ -24234,7 +24336,7 @@
ndev->name, cur_state));
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
if (wps_done) {
wl_wps_session_del(ndev);
@@ -24251,7 +24353,7 @@
s32 ret = BCME_OK;
bool wps_done = false;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
cur_state = cfg->wps_session[inst].state;
if (!peer_mac) {
@@ -24291,7 +24393,7 @@
ret = BCME_ERROR;
}
exit:
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
if (wps_done) {
wl_wps_session_del(ndev);
}
@@ -24307,7 +24409,7 @@
s32 ret = BCME_OK;
bool wps_done = false;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
cur_state = cfg->wps_session[inst].state;
if (cur_state == WPS_STATE_REAUTH_WAIT) {
/* WPS session succeeded. del session. */
@@ -24320,7 +24422,7 @@
ndev->name, cur_state));
ret = BCME_ERROR;
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
if (wps_done) {
wl_wps_session_del(ndev);
}
@@ -24333,17 +24435,51 @@
struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
unsigned long flags;
u16 cur_state;
- bool wps_done = false;
s32 ret = BCME_OK;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
cur_state = cfg->wps_session[inst].state;
/* For AP case, check whether call came for right peer */
if (!peer_mac ||
memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
WL_ERR(("[WPS] macaddr mismatch\n"));
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ /* Mac addr not matching. Ignore. */
+ return BCME_ERROR;
+ }
+
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ WL_INFORM_MEM(("[%s][WPS] REAUTH link up\n", ndev->name));
+ ret = BCME_OK;
+ } else {
+ WL_INFORM_MEM(("[%s][WPS] unexpected link up in state:%d \n",
+ ndev->name, cur_state));
+ ret = BCME_ERROR;
+ }
+
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+
+ return ret;
+}
+
+static s32
+wl_wps_handle_authorize(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ bool wps_done = false;
+ s32 ret = BCME_OK;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+
+ /* For AP case, check whether call came for right peer */
+ if (!peer_mac ||
+ memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ WL_ERR(("[WPS] macaddr mismatch\n"));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
/* Mac addr not matching. Ignore. */
return BCME_ERROR;
}
@@ -24352,15 +24488,15 @@
/* WPS session succeeded. del session. */
cfg->wps_session[inst].state = WPS_STATE_DONE;
wps_done = true;
- WL_INFORM_MEM(("[%s][WPS] REAUTH link up (WPS DONE)\n", ndev->name));
+ WL_INFORM_MEM(("[%s][WPS] Authorize done (WPS DONE)\n", ndev->name));
ret = BCME_OK;
} else {
- WL_INFORM_MEM(("[%s][WPS] unexpected link up in state:%d \n",
+ WL_INFORM_MEM(("[%s][WPS] unexpected Authorize in state:%d \n",
ndev->name, cur_state));
ret = BCME_ERROR;
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
if (wps_done) {
wl_wps_session_del(ndev);
}
@@ -24376,16 +24512,17 @@
u16 mode;
s32 ret = BCME_OK;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
cur_state = cfg->wps_session[inst].state;
mode = cfg->wps_session[inst].mode;
- if (cur_state == WPS_STATE_STARTED) {
+ if (((mode == WL_MODE_BSS) && (cur_state == WPS_STATE_STARTED)) ||
+ ((mode == WL_MODE_AP) && (cur_state == WPS_STATE_M8_SENT))) {
/* Move to reauth wait */
cfg->wps_session[inst].state = WPS_STATE_REAUTH_WAIT;
/* Use ndev to find the wps instance which fired the timer */
timer_set_private(&cfg->wps_session[inst].timer, ndev);
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
mod_timer(&cfg->wps_session[inst].timer,
jiffies + msecs_to_jiffies(WL_WPS_REAUTH_TIMEOUT));
WL_INFORM_MEM(("[%s][WPS] STATE_REAUTH_WAIT mode:%d Peer: " MACDBG "\n",
@@ -24395,7 +24532,7 @@
/* 802.1x cases */
WL_DBG(("[%s][WPS] EAP-FAIL\n", ndev->name));
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
return ret;
}
@@ -24407,7 +24544,7 @@
u16 cur_state;
s32 ret = BCME_OK;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
cur_state = cfg->wps_session[inst].state;
/* If Disconnect command comes from user space for STA/GC,
* respond with event without waiting for event from fw as
@@ -24420,14 +24557,14 @@
/* Notify link down */
CFG80211_DISCONNECTED(ndev,
WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
- true, GFP_KERNEL);
+ true, GFP_ATOMIC);
}
} else {
WL_DBG(("[%s][WPS] Not valid state to report disconnected:%d",
ndev->name, cur_state));
ret = BCME_UNSUPPORTED;
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
return ret;
}
@@ -24440,7 +24577,7 @@
s32 ret = BCME_OK;
bool wps_done = false;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
cur_state = cfg->wps_session[inst].state;
/* For GO/AP, ignore disconnect client during reauth state */
if (cur_state == WPS_STATE_REAUTH_WAIT) {
@@ -24451,11 +24588,15 @@
WL_INFORM_MEM(("[%s][WPS] BCAST deauth. WPS stopped.\n", ndev->name));
ret = BCME_OK;
goto exit;
+ } else if (!(memcmp(cfg->wps_session[inst].peer_mac,
+ peer_mac, ETH_ALEN))) {
+ WL_ERR(("[%s][WPS] Drop disconnect client\n", ndev->name));
+ ret = BCME_UNSUPPORTED;
}
}
exit:
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
if (wps_done) {
wl_wps_session_del(ndev);
}
@@ -24470,10 +24611,11 @@
u16 cur_state;
bool wps_done = false;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
cur_state = cfg->wps_session[inst].state;
if (cur_state == WPS_STATE_REAUTH_WAIT) {
cfg->wps_session[inst].state = WPS_STATE_DONE;
+ wl_clr_drv_status(cfg, CONNECTED, ndev);
wps_done = true;
WL_INFORM_MEM(("[%s][WPS] Connect fail. WPS stopped.\n",
ndev->name));
@@ -24481,7 +24623,7 @@
WL_ERR(("[%s][WPS] Connect fail. state:%d\n",
ndev->name, cur_state));
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
if (wps_done) {
wl_wps_session_del(ndev);
}
@@ -24489,6 +24631,30 @@
}
static s32
+wl_wps_handle_m8_sent(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 ret = BCME_OK;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+
+ if (cur_state == WPS_STATE_STARTED) {
+ /* Move to M8 sent state */
+ cfg->wps_session[inst].state = WPS_STATE_M8_SENT;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return BCME_OK;
+ } else {
+ /* 802.1x cases */
+ WL_DBG(("[%s][WPS] Not valid state to send M8\n", ndev->name));
+ }
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return ret;
+}
+
+static s32
wl_wps_session_update(struct net_device *ndev, u16 state, const u8 *peer_mac)
{
s32 inst;
@@ -24497,17 +24663,17 @@
s32 ret = BCME_ERROR;
unsigned long flags;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
/* Get current instance for the given ndev */
inst = wl_get_wps_inst_match(cfg, ndev);
if (inst == BCME_ERROR) {
/* No active WPS session. Do Nothing. */
WL_DBG(("[%s][WPS] No matching instance.\n", ndev->name));
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- return BCME_OK;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return BCME_NOTFOUND;
}
mode = cfg->wps_session[inst].mode;
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
WL_DBG(("[%s][WPS] state:%d mode:%d Peer: " MACDBG "\n",
ndev->name, state, mode, MAC2STRDBG(peer_mac)));
@@ -24530,6 +24696,19 @@
}
break;
}
+ case WPS_STATE_M8_SENT:
+ {
+ /* Mantain the M8 sent state to verify
+ * EAP-FAIL sent is valid
+ */
+ if (mode == WL_MODE_AP) {
+ ret = wl_wps_handle_m8_sent(ndev, inst, peer_mac);
+ } else {
+ /* Nothing to be done for STA/GC mode */
+ ret = BCME_OK;
+ }
+ break;
+ }
case WPS_STATE_EAP_FAIL:
{
/* Move to REAUTH WAIT following EAP-FAIL TX on GO/AP.
@@ -24594,6 +24773,18 @@
}
break;
}
+ case WPS_STATE_AUTHORIZE:
+ {
+ if (mode == WL_MODE_AP) {
+ /* Take action only for matching peer mac */
+ if (!memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ wl_wps_handle_authorize(ndev, inst, peer_mac);
+ } else {
+ WL_INFORM_MEM(("[WPS] Authorize Request for wrong peer\n"));
+ }
+ }
+ break;
+ }
default:
WL_ERR(("[WPS] Unsupported state:%d mode:%d\n", state, mode));
@@ -24654,7 +24845,29 @@
ndev->name, opcode, eap_len));
if (opcode == EAP_WSC_MSG) {
const u8 *msg;
- msg = wl_find_attribute(exp->data,
+ const u8* parse_buf = exp->data;
+ /* Check if recvd pkt is fragmented */
+ if ((!tx_packet) &&
+ (exp->flags &
+ EAP_EXP_FLAGS_FRAGMENTED_DATA)) {
+ if ((eap_len - EAP_EXP_ATTRIB_DATA_OFFSET)
+ > 2) {
+ parse_buf +=
+ EAP_EXP_FRAGMENT_LEN_OFFSET;
+ eap_len -=
+ EAP_EXP_FRAGMENT_LEN_OFFSET;
+ WL_DBG(("Rcvd EAP"
+ " fragmented pkt\n"));
+ } else {
+ /* If recvd pkt is fragmented
+ * and does not have
+ * length field drop the packet.
+ */
+ return;
+ }
+ }
+
+ msg = wl_find_attribute(parse_buf,
(eap_len - EAP_EXP_ATTRIB_DATA_OFFSET),
EAP_ATTRIB_MSGTYPE);
if (unlikely(!msg)) {
@@ -24665,6 +24878,12 @@
ndev->name));
wl_wps_session_update(ndev,
WPS_STATE_M8_RECVD, peer_mac);
+ } else if ((*msg == EAP_WSC_MSG_M8) &&
+ tx_packet) {
+ WL_INFORM_MEM(("[%s][WPS] M8 Sent\n",
+ ndev->name));
+ wl_wps_session_update(ndev,
+ WPS_STATE_M8_SENT, peer_mac);
} else {
WL_DBG(("[%s][WPS] EAP WSC MSG: 0x%X\n",
ndev->name, *msg));
@@ -24701,122 +24920,686 @@
}
#endif /* WL_WPS_SYNC */
-const u8 *
-wl_find_attribute(const u8 *buf, u16 len, u16 element_id)
+s32
+wl_cfg80211_sup_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *event, void *data)
{
- const u8 *attrib;
- u16 attrib_id;
- u16 attrib_len;
+ int err = BCME_OK;
+ u32 status = ntoh32(event->status);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ u32 reason = ntoh32(event->reason);
- if (!buf) {
- WL_ERR(("buf null\n"));
- return NULL;
+ if ((status == WLC_SUP_KEYED || status == WLC_SUP_KEYXCHANGE_WAIT_G1) &&
+ reason == WLC_E_SUP_OTHER) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0))
+ /* NL80211_CMD_PORT_AUTHORIZED supported above >= 4.15 */
+ cfg80211_port_authorized(ndev, (u8 *)wl_read_prof(cfg, ndev, WL_PROF_BSSID),
+ GFP_KERNEL);
+ WL_INFORM_MEM(("4way HS finished. port authorized event sent\n"));
+#elif ((LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || \
+ defined(WL_VENDOR_EXT_SUPPORT))
+ err = wl_cfgvendor_send_async_event(bcmcfg_to_wiphy(cfg), ndev,
+ BRCM_VENDOR_EVENT_PORT_AUTHORIZED, NULL, 0);
+ WL_INFORM_MEM(("4way HS finished. port authorized event sent\n"));
+#else
+ /* not supported in kernel <= 3,14,0 */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
+ } else if (status < WLC_SUP_KEYXCHANGE_WAIT_G1 && reason != WLC_E_SUP_OTHER) {
+ /* if any failure seen while 4way HS, should send NL80211_CMD_DISCONNECT */
+ WL_ERR(("4way HS error. status:%d, reason:%d\n", status, reason));
+ CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
}
- attrib = buf;
- while (len >= 4) {
- /* attribute id */
- attrib_id = *attrib++ << 8;
- attrib_id |= *attrib++;
- len -= 2;
+ return err;
+}
- /* 2-byte little endian */
- attrib_len = *attrib++ << 8;
- attrib_len |= *attrib++;
+#ifdef WL_BCNRECV
+static s32
+wl_bcnrecv_aborted_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 status = ntoh32(e->status);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ /* Abort fakeapscan, when Roam is in progress */
+ if (status == WLC_E_STATUS_RXBCN_ABORT) {
+ wl_android_bcnrecv_stop(ndev, WL_BCNRECV_ROAMABORT);
+ } else {
+ WL_ERR(("UNKNOWN STATUS. status:%d\n", status));
+ }
+ return BCME_OK;
+}
+#endif /* WL_BCNRECV */
- len -= 2;
- len -= attrib_len; /* for the remaining subelt fields */
- WL_DBG(("Attribue:%4x attrib_len:%d rem_len:%d\n",
- attrib_id, attrib_len, len));
- if (attrib_id == element_id) {
- /* This will point to start of subelement attrib after
- * attribute id & len
- */
- return attrib;
+#ifdef WL_MBO
+static s32
+wl_mbo_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ wl_event_mbo_t *mbo_evt = (wl_event_mbo_t *)data;
+ wl_event_mbo_cell_nw_switch_t *cell_sw_evt = NULL;
+ wl_btm_event_type_data_t *evt_data = NULL;
+
+ WL_INFORM(("MBO: Evt %u\n", mbo_evt->type));
+
+ if (mbo_evt->type == WL_MBO_E_CELLULAR_NW_SWITCH) {
+ cell_sw_evt = (wl_event_mbo_cell_nw_switch_t *)mbo_evt->data;
+ BCM_REFERENCE(cell_sw_evt);
+ SUPP_EVENT(("CTRL-EVENT-CELLULAR-SWITCH", "reason %d cur_assoc_time_left %u "
+ "reassoc_delay %u\n", cell_sw_evt->reason,
+ cell_sw_evt->assoc_time_remain, cell_sw_evt->reassoc_delay));
+ } else if (mbo_evt->type == WL_MBO_E_BTM_RCVD) {
+ evt_data = (wl_btm_event_type_data_t *)mbo_evt->data;
+ if (evt_data->version != WL_BTM_EVENT_DATA_VER_1) {
+ WL_ERR(("version mismatch. rcvd %u expected %u\n",
+ evt_data->version, WL_BTM_EVENT_DATA_VER_1));
+ return -1;
}
- /* Go to next subelement */
- attrib += attrib_len;
- }
- return NULL;
-}
-
-static const u8 *
-wl_retrieve_wps_attribute(const u8 *buf, u16 element_id)
-{
- const wl_wps_ie_t *ie = NULL;
- u16 len = 0;
- const u8 *attrib;
-
- if (!buf) {
- WL_ERR(("WPS IE not present"));
- return 0;
- }
-
- ie = (const wl_wps_ie_t*) buf;
- len = ie->len;
-
- /* Point subel to the P2P IE's subelt field.
- * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
- */
- attrib = ie->attrib;
- len -= 4; /* exclude OUI + OUI_TYPE */
-
- /* Search for attrib */
- return wl_find_attribute(attrib, len, element_id);
-}
-
-#define WPS_ATTR_REQ_TYPE 0x103a
-#define WPS_REQ_TYPE_ENROLLEE 0x01
-bool
-wl_is_wps_enrollee_active(struct net_device *ndev, const u8 *ie_ptr, u16 len)
-{
- const u8 *ie;
- const u8 *attrib;
-
- if ((ie = (const u8 *)wl_cfgp2p_find_wpsie(ie_ptr, len)) == NULL) {
- WL_DBG(("WPS IE not present. Do nothing.\n"));
- return false;
- }
-
- if ((attrib = wl_retrieve_wps_attribute(ie, WPS_ATTR_REQ_TYPE)) == NULL) {
- WL_DBG(("WPS_ATTR_REQ_TYPE not found!\n"));
- return false;
- }
-
- if (*attrib == WPS_REQ_TYPE_ENROLLEE) {
- WL_INFORM_MEM(("WPS Enrolle Active\n"));
- return true;
+ SUPP_EVENT(("CTRL-EVENT-BRCM-BTM-REQ-RCVD", "reason=%u\n",
+ evt_data->transition_reason));
} else {
- WL_DBG(("WPS_REQ_TYPE:%d\n", *attrib));
+ WL_INFORM(("UNKNOWN EVENT. type:%u\n", mbo_evt->type));
+ }
+ return err;
+}
+#endif /* WL_MBO */
+
+#ifdef WL_CAC_TS
+static s32
+wl_cfg80211_cac_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ u32 event = ntoh32(e->event_type);
+ s32 status = ntoh32(e->status);
+ s32 reason = ntoh32(e->reason);
+
+ BCM_REFERENCE(reason);
+
+ if (event == WLC_E_ADDTS_IND) {
+ /* The supp log format of adding ts_delay in success case needs to be maintained */
+ if (status == WLC_E_STATUS_SUCCESS) {
+ uint *ts_delay = (uint *)data;
+ BCM_REFERENCE(ts_delay);
+ SUPP_EVENT(("CTRL-EVENT-CAC-ADDTS", "status=%d reason=%d ts_delay=%u\n",
+ status, reason, *ts_delay));
+ } else {
+ SUPP_EVENT(("CTRL-EVENT-CAC-ADDTS", "status=%d reason=%d\n",
+ status, reason));
+ }
+ } else if (event == WLC_E_DELTS_IND) {
+ SUPP_EVENT(("CTRL-EVENT-CAC-DELTS", "status=%d reason=%d\n", status, reason));
}
- return false;
+ return BCME_OK;
+}
+#endif /* WL_CAC_TS */
+
+#if defined(WL_MBO) || defined(WL_OCE)
+static s32
+wl_bssid_prune_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ uint reason = 0;
+ wl_bssid_pruned_evt_info_t *evt_info = (wl_bssid_pruned_evt_info_t *)data;
+
+ if (evt_info->version == WL_BSSID_PRUNE_EVT_VER_1) {
+ if (evt_info->reason == WLC_E_PRUNE_ASSOC_RETRY_DELAY) {
+ /* MBO assoc retry delay */
+ reason = WIFI_PRUNE_ASSOC_RETRY_DELAY;
+ SUPP_EVENT(("CTRL-EVENT-BRCM-BSSID-PRUNED", "ssid=%s bssid=" MACF
+ " reason=%u timeout_val=%u(ms)\n", evt_info->SSID,
+ ETHER_TO_MACF(evt_info->BSSID), reason, evt_info->time_remaining));
+ } else if (evt_info->reason == WLC_E_PRUNE_RSSI_ASSOC_REJ) {
+ /* OCE RSSI-based assoc rejection */
+ reason = WIFI_PRUNE_RSSI_ASSOC_REJ;
+ SUPP_EVENT(("CTRL-EVENT-BRCM-BSSID-PRUNED", "ssid=%s bssid=" MACF
+ " reason=%u timeout_val=%u(ms) rssi_threshold=%d(dBm)\n",
+ evt_info->SSID, ETHER_TO_MACF(evt_info->BSSID),
+ reason, evt_info->time_remaining, evt_info->rssi_threshold));
+ } else {
+ /* Invalid other than the assoc retry delay/RSSI assoc rejection
+ * in the current handler
+ */
+ BCM_REFERENCE(reason);
+ WL_INFORM(("INVALID. reason:%u\n", evt_info->reason));
+ }
+ } else {
+ WL_INFORM(("version mismatch. rcvd %u expected %u\n", evt_info->version,
+ WL_BSSID_PRUNE_EVT_VER_1));
+ }
+ return err;
+}
+#endif /* WL_MBO || WL_OCE */
+#ifdef RTT_SUPPORT
+static s32
+wl_cfg80211_rtt_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ wl_event_msg_t event;
+
+ (void)memcpy_s(&event, sizeof(wl_event_msg_t),
+ e, sizeof(wl_event_msg_t));
+ return dhd_rtt_event_handler(dhdp, &event, data);
+}
+#endif /* RTT_SUPPORT */
+
+static s32
+wl_notify_dos_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ u32 event = ntoh32(e->event_type);
+ s32 status = ntoh32(e->status);
+
+ if (event == WLC_E_IND_DOS_STATUS)
+ {
+ WL_INFORM(("DOS_STATUS_IND_EVENT_DETECTED\n"));
+ if ((status) == 1) {
+ WL_INFORM(("NORMAL\n"));
+ }
+ else if ((status) == 2) {
+ WL_INFORM(("ALERT\n"));
+ }
+ else if ((status) == 3) {
+ WL_INFORM(("PROTECTED\n"));
+ }
+ else if ((status) == 4) {
+ WL_INFORM(("MONITOR\n"));
+ }
+ else {
+ WL_INFORM(("STATE_UNKNOWN\n"));
+ }
+
+ }
+ else
+ WL_INFORM(("unknown_event\n"));
+ return 0;
+}
+void
+wl_print_verinfo(struct bcm_cfg80211 *cfg)
+{
+ char *ver_ptr;
+ uint32 alloc_len = MOD_PARAM_INFOLEN;
+
+ if (!cfg) {
+ WL_ERR(("cfg is NULL\n"));
+ return;
+ }
+
+ ver_ptr = (char *)MALLOCZ(cfg->osh, alloc_len);
+ if (!ver_ptr) {
+ WL_ERR(("Failed to alloc ver_ptr\n"));
+ return;
+ }
+
+ if (!dhd_os_get_version(bcmcfg_to_prmry_ndev(cfg),
+ TRUE, &ver_ptr, alloc_len)) {
+ WL_ERR(("DHD Version: %s\n", ver_ptr));
+ }
+
+ if (!dhd_os_get_version(bcmcfg_to_prmry_ndev(cfg),
+ FALSE, &ver_ptr, alloc_len)) {
+ WL_ERR(("F/W Version: %s\n", ver_ptr));
+ }
+
+ MFREE(cfg->osh, ver_ptr, alloc_len);
+}
+#if defined(WL_DISABLE_HE_SOFTAP) || defined(WL_DISABLE_HE_P2P)
+typedef struct {
+ uint16 id;
+ uint16 len;
+ uint32 val;
+} he_xtlv_v32;
+
+ static bool
+wl_he_get_uint_cb(void *ctx, uint16 *id, uint16 *len)
+{
+ he_xtlv_v32 *v32 = ctx;
+
+ *id = v32->id;
+ *len = v32->len;
+
+ return FALSE;
}
-#ifdef USE_WFA_CERT_CONF
-extern int g_frameburst;
-#endif /* USE_WFA_CERT_CONF */
+ static void
+wl_he_pack_uint_cb(void *ctx, uint16 id, uint16 len, uint8 *buf)
+{
+ he_xtlv_v32 *v32 = ctx;
+ BCM_REFERENCE(id);
+ BCM_REFERENCE(len);
+
+ v32->val = htod32(v32->val);
+
+ switch (v32->len) {
+ case sizeof(uint8):
+ *buf = (uint8)v32->val;
+ break;
+ case sizeof(uint16):
+ store16_ua(buf, (uint16)v32->val);
+ break;
+ case sizeof(uint32):
+ store32_ua(buf, v32->val);
+ break;
+ default:
+ /* ASSERT(0); */
+ break;
+ }
+}
+
+int wl_cfg80211_set_he_mode(struct net_device *dev, struct bcm_cfg80211 *cfg,
+ s32 bssidx, u32 interface_type, bool set)
+{
+ bcm_xtlv_t read_he_xtlv;
+ uint8 se_he_xtlv[32];
+ int se_he_xtlv_len = sizeof(se_he_xtlv);
+ he_xtlv_v32 v32;
+ u32 he_feature = 0;
+ s32 err = 0;
+ u32 he_interface = 0;
+
+ read_he_xtlv.id = WL_HE_CMD_FEATURES;
+ read_he_xtlv.len = 0;
+ err = wldev_iovar_getbuf_bsscfg(dev, "he", &read_he_xtlv, sizeof(read_he_xtlv),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL);
+ if (err < 0) {
+ if (err == BCME_UNSUPPORTED) {
+ /* HE not supported. Do nothing. */
+ return BCME_OK;
+ }
+ WL_ERR(("HE get failed. error=%d\n", err));
+ } else {
+ he_feature = *(int*)cfg->ioctl_buf;
+ he_feature = dtoh32(he_feature);
+ }
+
+ v32.id = WL_HE_CMD_FEATURES;
+ v32.len = sizeof(s32);
+ if (interface_type == WL_IF_TYPE_P2P_DISC) {
+ he_interface = WL_HE_FEATURES_HE_P2P;
+ } else if (interface_type == WL_IF_TYPE_AP) {
+ he_interface = WL_HE_FEATURES_HE_AP;
+ } else {
+ WL_ERR(("HE request for Invalid interface type"));
+ err = BCME_BADARG;
+ return err;
+ }
+
+ if (set) {
+ v32.val = (he_feature | he_interface);
+ } else {
+ v32.val = (he_feature & ~he_interface);
+ }
+
+ err = bcm_pack_xtlv_buf((void *)&v32, se_he_xtlv, sizeof(se_he_xtlv),
+ BCM_XTLV_OPTION_ALIGN32, wl_he_get_uint_cb, wl_he_pack_uint_cb,
+ &se_he_xtlv_len);
+ if (err != BCME_OK) {
+ WL_ERR(("failed to pack he settvl=%d\n", err));
+ }
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "he", &se_he_xtlv, sizeof(se_he_xtlv),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("failed to set he features, error=%d\n", err));
+ }
+ WL_INFORM(("Set HE[%d] done\n", set));
+
+ return err;
+}
+#endif /* WL_DISABLE_HE_SOFTAP || WL_DISABLE_HE_P2P */
+
+/* Get the concurrency mode */
+int wl_cfg80211_get_concurrency_mode(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *iter, *next;
+ uint cmode = CONCURRENCY_MODE_NONE;
+ u32 connected_cnt = 0;
+ u32 pre_channel = 0, channel = 0;
+ u32 pre_band = 0;
+ u32 chanspec = 0;
+ u32 band = 0;
+
+ connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
+ if (connected_cnt <= 1) {
+ return cmode;
+ }
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) {
+ if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+ if (wldev_iovar_getint(iter->ndev, "chanspec",
+ (s32 *)&chanspec) == BCME_OK) {
+ channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(chanspec));
+ band = (channel <= CH_MAX_2G_CHANNEL) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ }
+ if ((!pre_channel && channel)) {
+ pre_band = band;
+ pre_channel = channel;
+ } else if (pre_channel) {
+ if ((pre_band == band) && (pre_channel == channel)) {
+ cmode = CONCURRENCY_SCC_MODE;
+ goto exit;
+ } else if ((pre_band == band) && (pre_channel != channel)) {
+ cmode = CONCURRENCY_VSDB_MODE;
+ goto exit;
+ } else if (pre_band != band) {
+ cmode = CONCURRENCY_RSDB_MODE;
+ goto exit;
+ }
+ }
+ }
+ }
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif // endif
+exit:
+ return cmode;
+}
+#ifdef WL_CHAN_UTIL
+static s32
+wl_cfg80211_bssload_report_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = BCME_OK;
+ struct sk_buff *skb = NULL;
+ s32 status = ntoh32(e->status);
+ u8 chan_use_percentage = 0;
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ uint len;
+ gfp_t kflags;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ len = CU_ATTR_HDR_LEN + sizeof(u8);
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ skb = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(ndev), len,
+ BRCM_VENDOR_EVENT_CU, kflags);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ skb = cfg80211_vendor_event_alloc(wiphy, len, BRCM_VENDOR_EVENT_CU, kflags);
+#else
+ /* No support exist */
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return -ENOMEM;
+ }
+
+ if ((status == WLC_E_STATUS_SUCCESS) && data) {
+ wl_bssload_t *bssload_report = (wl_bssload_t *)data;
+ chan_use_percentage = (bssload_report->chan_util * 100) / 255;
+ WL_DBG(("ChannelUtilization=%hhu\n", chan_use_percentage));
+ err = nla_put_u8(skb, CU_ATTR_PERCENTAGE, chan_use_percentage);
+ if (err < 0) {
+ WL_ERR(("Failed to put CU_ATTR_PERCENTAGE, err:%d\n", err));
+ }
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ cfg80211_vendor_event(skb, kflags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+
+ return err;
+}
+
+#define WL_CHAN_UTIL_DEFAULT_INTERVAL 3000
+#define WL_CHAN_UTIL_THRESH_MIN 15
+#define WL_CHAN_UTIL_THRESH_INTERVAL 10
+#ifndef CUSTOM_CU_INTERVAL
+#define CUSTOM_CU_INTERVAL WL_CHAN_UTIL_DEFAULT_INTERVAL
+#endif /* CUSTOM_CU_INTERVAL */
+
+static s32
+wl_cfg80211_start_bssload_report(struct net_device *ndev)
+{
+ s32 err = BCME_OK;
+ wl_bssload_cfg_t blcfg;
+ u8 i;
+ struct bcm_cfg80211 *cfg;
+
+ if (!ndev) {
+ return -ENODEV;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ return -ENODEV;
+ }
+
+ /* Typecasting to void as the buffer size is same as the memset size */
+ (void)memset_s(&blcfg, sizeof(wl_bssload_cfg_t), 0, sizeof(wl_bssload_cfg_t));
+ /* Set default report interval 3 sec and 8 threshhold levels between 15 to 85% */
+ blcfg.rate_limit_msec = CUSTOM_CU_INTERVAL;
+ blcfg.num_util_levels = MAX_BSSLOAD_LEVELS;
+ for (i = 0; i < MAX_BSSLOAD_LEVELS; i++) {
+ blcfg.util_levels[i] = (((WL_CHAN_UTIL_THRESH_MIN +
+ (i * WL_CHAN_UTIL_THRESH_INTERVAL)) * 255) / 100);
+ }
+
+ err = wldev_iovar_setbuf(ndev, "bssload_report_event", &blcfg,
+ sizeof(wl_bssload_cfg_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("Set event_msgs error (%d)\n", err));
+ }
+
+ return err;
+}
+#endif /* WL_CHAN_UTIL */
+
+s32
+wl_cfg80211_config_suspend_events(struct net_device *ndev, bool enable)
+{
+ s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+ s8 eventmask[WL_EVENTING_MASK_LEN];
+ s32 err = 0;
+ struct bcm_cfg80211 *cfg;
+
+ if (!ndev) {
+ return -ENODEV;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ return -ENODEV;
+ }
+
+ mutex_lock(&cfg->event_sync);
+ err = wldev_iovar_getbuf(ndev, "event_msgs", NULL, 0, iovbuf, sizeof(iovbuf), NULL);
+ if (unlikely(err)) {
+ WL_ERR(("Get event_msgs error (%d)\n", err));
+ goto eventmsg_out;
+ }
+
+ (void)memcpy_s(eventmask, WL_EVENTING_MASK_LEN, iovbuf, WL_EVENTING_MASK_LEN);
+ /* Add set/clear of event mask under feature specific flags */
+ if (enable) {
+ WL_DBG(("%s: Enabling events on resume\n", __FUNCTION__));
+#ifdef WL_CHAN_UTIL
+ setbit(eventmask, WLC_E_BSS_LOAD);
+#endif /* WL_CHAN_UTIL */
+ } else {
+ WL_DBG(("%s: Disabling events before suspend\n", __FUNCTION__));
+#ifdef WL_CHAN_UTIL
+ clrbit(eventmask, WLC_E_BSS_LOAD);
+#endif /* WL_CHAN_UTIL */
+ }
+
+ err = wldev_iovar_setbuf(ndev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+ sizeof(iovbuf), NULL);
+ if (unlikely(err)) {
+ WL_ERR(("Set event_msgs error (%d)\n", err));
+ goto eventmsg_out;
+ }
+
+eventmsg_out:
+ mutex_unlock(&cfg->event_sync);
+ return err;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
int
-wl_cfg80211_set_frameburst(struct bcm_cfg80211 *cfg, bool enable)
+wl_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_csa_settings *params)
{
- int ret = BCME_OK;
- int val = enable ? 1 : 0;
+ s32 err = BCME_OK;
+ s32 chan = 0;
+ u32 band = 0;
+ u32 bw = WL_CHANSPEC_BW_20;
+ chanspec_t chspec = 0;
+ wl_chan_switch_t csa_arg;
+ struct cfg80211_chan_def *chandef = ¶ms->chandef;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
-#ifdef USE_WFA_CERT_CONF
- if (!g_frameburst) {
- WL_DBG(("Skip setting frameburst\n"));
- return 0;
+ dev = ndev_to_wlc_ndev(dev, cfg);
+ chan = ieee80211_frequency_to_channel(chandef->chan->center_freq);
+ band = chandef->chan->band;
+
+ WL_ERR(("netdev_ifidx(%d), target channel(%d) target bandwidth(%d),"
+ " mode(%d), count(%d)\n", dev->ifindex, chan, chandef->width,
+ params->block_tx, params->count));
+
+ if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP) {
+ WL_ERR(("Channel Switch doesn't support on "
+ "the non-SoftAP mode\n"));
+ return -EINVAL;
}
-#endif /* USE_WFA_CERT_CONF */
- WL_DBG(("Set frameburst %d\n", val));
- ret = wldev_ioctl_set(bcmcfg_to_prmry_ndev(cfg), WLC_SET_FAKEFRAG, &val, sizeof(val));
- if (ret < 0) {
- WL_ERR(("Failed set frameburst, ret=%d\n", ret));
+ if (chan == cfg->ap_oper_channel) {
+ WL_ERR(("Channel %d is same as current operating channel,"
+ " so skip\n", chan));
+ return BCME_OK;
+ }
+
+ if (band == IEEE80211_BAND_5GHZ) {
+#ifdef APSTA_RESTRICTED_CHANNEL
+ if (chan != DEFAULT_5G_SOFTAP_CHANNEL) {
+ WL_ERR(("Invalid 5G Channel, chan=%d\n", chan));
+ return -EINVAL;
+ }
+#endif /* APSTA_RESTRICTED_CHANNEL */
+ err = wl_get_bandwidth_cap(bcmcfg_to_prmry_ndev(cfg),
+ band, &bw);
+ if (err < 0) {
+ WL_ERR(("Failed to get bandwidth information,"
+ " err=%d\n", err));
+ return err;
+ }
+ } else if (band == IEEE80211_BAND_2GHZ) {
+#ifdef APSTA_RESTRICTED_CHANNEL
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ u32 *sta_chan = (u32 *)wl_read_prof(cfg,
+ bcmcfg_to_prmry_ndev(cfg), WL_PROF_CHAN);
+
+ /* In 2GHz STA/SoftAP concurrent mode, the operating channel
+ * of STA and SoftAP should be confgiured to the same 2GHz
+ * channel. Otherwise, it is an invalid configuration.
+ */
+ if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhdp) &&
+ wl_get_drv_status(cfg, CONNECTED,
+ bcmcfg_to_prmry_ndev(cfg)) && sta_chan &&
+ (*sta_chan != chan)) {
+ WL_ERR(("Invalid 2G Channel in case of STA/SoftAP"
+ " concurrent mode, sta_chan=%d, chan=%d\n",
+ *sta_chan, chan));
+ return -EINVAL;
+ }
+#endif /* APSTA_RESTRICTED_CHANNEL */
+ bw = WL_CHANSPEC_BW_20;
} else {
- WL_INFORM_MEM(("frameburst is %s\n", enable ? "enabled" : "disabled"));
+ WL_ERR(("invalid band (%d)\n", band));
+ return -EINVAL;
}
- return ret;
+ chspec = wf_channel2chspec(chan, bw);
+ if (!wf_chspec_valid(chspec)) {
+ WL_ERR(("Invalid chanspec 0x%x\n", chspec));
+ return -EINVAL;
+ }
+
+ /* Send CSA to associated STAs */
+ memset(&csa_arg, 0, sizeof(wl_chan_switch_t));
+ csa_arg.mode = params->block_tx;
+ csa_arg.count = params->count;
+ csa_arg.chspec = chspec;
+ csa_arg.frame_type = CSA_BROADCAST_ACTION_FRAME;
+ csa_arg.reg = 0;
+
+ err = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(wl_chan_switch_t),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("Failed to switch channel, err=%d\n", err));
+ }
+
+ return err;
}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) */
+
+#ifdef WL_WIPSEVT
+int
+wl_cfg80211_wips_event(uint16 misdeauth, char* bssid)
+{
+ s32 err = BCME_OK;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ struct sk_buff *skb;
+ gfp_t kflags;
+ struct bcm_cfg80211 *cfg;
+ struct net_device *ndev;
+ struct wiphy *wiphy;
+
+ cfg = wl_cfg80211_get_bcmcfg();
+ if (!cfg || !cfg->wdev) {
+ WL_ERR(("WIPS evt invalid arg\n"));
+ return err;
+ }
+
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ wiphy = bcmcfg_to_wiphy(cfg);
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ skb = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(ndev),
+ BRCM_VENDOR_WIPS_EVENT_BUF_LEN, BRCM_VENDOR_EVENT_WIPS, kflags);
+
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return BCME_NOMEM;
+ }
+
+ err = nla_put_u16(skb, WIPS_ATTR_DEAUTH_CNT, misdeauth);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put_u16 WIPS_ATTR_DEAUTH_CNT failed\n"));
+ goto fail;
+ }
+ err = nla_put(skb, WPPS_ATTR_DEAUTH_BSSID, ETHER_ADDR_LEN, bssid);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put WPPS_ATTR_DEAUTH_BSSID failed\n"));
+ goto fail;
+ }
+ cfg80211_vendor_event(skb, kflags);
+
+ return err;
+
+fail:
+ if (skb) {
+ nlmsg_free(skb);
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+ return err;
+}
+#endif /* WL_WIPSEVT */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfg80211.h
index 64d20d0..18433ea 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfg80211.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfg80211.h
@@ -1,7 +1,9 @@
/*
* Linux cfg80211 driver
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -24,7 +26,7 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfg80211.h 701747 2017-05-26 08:40:23Z $
+ * $Id: wl_cfg80211.h 815697 2019-04-19 03:53:33Z $
*/
/**
@@ -42,20 +44,62 @@
#include <net/cfg80211.h>
#include <linux/rfkill.h>
#include <osl.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#define WL_CFG_DRV_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_DRV_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_WPS_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_WPS_SYNC_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_NET_LIST_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_NET_LIST_SYNC_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_EQ_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_EQ_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_BAM_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_BAM_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_VNDR_OUI_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_VNDR_OUI_SYNC_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
#include <wl_cfgp2p.h>
#ifdef WL_NAN
#include <wl_cfgnan.h>
#endif /* WL_NAN */
+#ifdef WL_BAM
+#include <wl_bam.h>
+#endif /* WL_BAM */
#ifdef BIGDATA_SOFTAP
#include <wl_bigdata.h>
-#endif // endif
+#endif /* BIGDATA_SOFTAP */
struct wl_conf;
struct wl_iface;
struct bcm_cfg80211;
struct wl_security;
struct wl_ibss;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) && !defined(WL_SAE))
+#define WL_SAE
+#endif // endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) && !defined(WL_FILS_ROAM_OFFLD))
+#define WL_FILS_ROAM_OFFLD
+#endif // endif
+
+#ifdef WL_SAE
+#define IS_AKM_SAE(akm) (akm == WLAN_AKM_SUITE_SAE)
+#else
+#define IS_AKM_SAE(akm) FALSE
+#endif // endif
+#ifdef WL_OWE
+#define IS_AKM_OWE(akm) (akm == WLAN_AKM_SUITE_OWE)
+#else
+#define IS_AKM_OWE(akm) FALSE
+#endif // endif
+
#define htod32(i) (i)
#define htod16(i) (i)
#define dtoh64(i) (i)
@@ -72,10 +116,16 @@
#define WL_DBG_INFO (1 << 1)
#define WL_DBG_ERR (1 << 0)
+#ifndef WAIT_FOR_DISCONNECT_MAX
+#define WAIT_FOR_DISCONNECT_MAX 10
+#endif /* WAIT_FOR_DISCONNECT_MAX */
+#define WAIT_FOR_DISCONNECT_STATE_SYNC 10
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
/* Newer kernels use defines from nl80211.h */
#define IEEE80211_BAND_2GHZ NL80211_BAND_2GHZ
#define IEEE80211_BAND_5GHZ NL80211_BAND_5GHZ
+#define IEEE80211_BAND_60GHZ NL80211_BAND_60GHZ
#define IEEE80211_NUM_BANDS NUM_NL80211_BANDS
#endif /* LINUX_VER >= 4.7 */
@@ -84,8 +134,6 @@
int binary_len, const char *fmt, ...);
extern char *dhd_log_dump_get_timestamp(void);
#ifndef _DHD_LOG_DUMP_DEFINITIONS_
-#define DLD_BUF_TYPE_GENERAL 0
-#define DLD_BUF_TYPE_SPECIAL 1
#define DHD_LOG_DUMP_WRITE(fmt, ...) \
dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, NULL, 0, fmt, ##__VA_ARGS__)
#define DHD_LOG_DUMP_WRITE_EX(fmt, ...) \
@@ -93,6 +141,16 @@
#endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */
#endif /* DHD_LOG_DUMP */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) || (defined(CONFIG_ARCH_MSM) && \
+ defined(CFG80211_DISCONNECTED_V2))
+#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
+ cfg80211_disconnected(dev, reason, ie, len, loc_gen, gfp);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
+#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
+ BCM_REFERENCE(loc_gen); \
+ cfg80211_disconnected(dev, reason, ie, len, gfp);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) */
+
/* 0 invalidates all debug messages. default is 1 */
#define WL_DBG_LEVEL 0xFF
@@ -109,7 +167,7 @@
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
- printk args; \
+ pr_cont args; \
DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
DHD_LOG_DUMP_WRITE args; \
} \
@@ -118,7 +176,7 @@
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
- printk args; \
+ pr_cont args; \
} \
} while (0)
#define WL_ERR_MEM(args) \
@@ -132,7 +190,7 @@
do { \
if (wl_dbg_level & WL_DBG_INFO) { \
printk(KERN_INFO CFG80211_INFO_TEXT "%s : ", __func__); \
- printk args; \
+ pr_cont args; \
DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
DHD_LOG_DUMP_WRITE args; \
} \
@@ -141,7 +199,7 @@
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
- printk args; \
+ pr_cont args; \
DHD_LOG_DUMP_WRITE_EX("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
DHD_LOG_DUMP_WRITE_EX args; \
} \
@@ -156,7 +214,7 @@
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
- printk args; \
+ pr_cont args; \
} \
} while (0)
#define WL_ERR_KERN(args) WL_ERR(args)
@@ -170,7 +228,7 @@
do { \
if ((wl_dbg_level & WL_DBG_ERR) && net_ratelimit()) { \
printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
- printk args; \
+ pr_cont args; \
} \
} while (0)
#define WL_ERR_KERN(args) WL_ERR(args)
@@ -208,7 +266,7 @@
do { \
if (wl_dbg_level & WL_DBG_INFO) { \
printk(KERN_INFO "CFG80211-INFO) %s : ", __func__); \
- printk args; \
+ pr_cont args; \
} \
} while (0)
@@ -219,7 +277,7 @@
do { \
if (wl_dbg_level & WL_DBG_SCAN) { \
printk(KERN_INFO "CFG80211-SCAN) %s :", __func__); \
- printk args; \
+ pr_cont args; \
} \
} while (0)
#ifdef WL_TRACE
@@ -229,7 +287,7 @@
do { \
if (wl_dbg_level & WL_DBG_TRACE) { \
printk(KERN_INFO "CFG80211-TRACE) %s :", __func__); \
- printk args; \
+ pr_cont args; \
} \
} while (0)
#ifdef WL_TRACE_HW4
@@ -240,8 +298,8 @@
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
printk(KERN_INFO "CFG80211-TRACE) %s : ", __func__); \
- printk args; \
- } \
+ pr_cont args; \
+ } \
} while (0)
#else
#define WL_TRACE_HW4 WL_TRACE
@@ -251,7 +309,7 @@
do { \
if (wl_dbg_level & WL_DBG_DBG) { \
printk(KERN_INFO "CFG80211-DEBUG) %s :", __func__); \
- printk args; \
+ pr_cont args; \
} \
} while (0)
#else /* !(WL_DBG_LEVEL > 0) */
@@ -276,7 +334,7 @@
#define WL_MED_DWELL_TIME 400
#define WL_MIN_DWELL_TIME 100
#define WL_LONG_DWELL_TIME 1000
-#define IFACE_MAX_CNT 4
+#define IFACE_MAX_CNT 6
#define WL_SCAN_CONNECT_DWELL_TIME_MS 200
#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20
#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
@@ -296,6 +354,11 @@
#ifdef DHD_LOSSLESS_ROAMING
#define WL_ROAM_TIMEOUT_MS 1000 /* Roam timeout */
#endif // endif
+
+#ifdef ENABLE_HOGSQS
+#define WL_HOGSQS_TIMEOUT_MS 5000 /* Hogger detection timeout */
+#endif // endif
+
/* Bring down SCB Timeout to 20secs from 60secs default */
#ifndef WL_SCB_TIMEOUT
#define WL_SCB_TIMEOUT 20
@@ -323,24 +386,46 @@
/* Cipher suites */
#ifndef WLAN_CIPHER_SUITE_PMK
-#define WLAN_CIPHER_SUITE_PMK 0x00904C00
+#define WLAN_CIPHER_SUITE_PMK 0x00904C00
#endif /* WLAN_CIPHER_SUITE_PMK */
#ifndef WLAN_AKM_SUITE_FT_8021X
-#define WLAN_AKM_SUITE_FT_8021X 0x000FAC03
+#define WLAN_AKM_SUITE_FT_8021X 0x000FAC03
#endif /* WLAN_AKM_SUITE_FT_8021X */
#ifndef WLAN_AKM_SUITE_FT_PSK
-#define WLAN_AKM_SUITE_FT_PSK 0x000FAC04
+#define WLAN_AKM_SUITE_FT_PSK 0x000FAC04
#endif /* WLAN_AKM_SUITE_FT_PSK */
+#ifndef WLAN_AKM_SUITE_8021X_SUITE_B
+#define WLAN_AKM_SUITE_8021X_SUITE_B 0x000FAC0B
+#define WLAN_AKM_SUITE_8021X_SUITE_B_192 0x000FAC0C
+#endif /* WLAN_AKM_SUITE_8021X_SUITE_B */
+
+/* TODO: even in upstream linux(v5.0), FT-1X-SHA384 isn't defined and supported yet.
+ * need to revisit here to sync correct name later.
+ */
+#define WLAN_AKM_SUITE_FT_8021X_SHA384 0x000FAC0D
+
+#define WL_AKM_SUITE_SHA256_1X 0x000FAC05
+#define WL_AKM_SUITE_SHA256_PSK 0x000FAC06
+#define WLAN_AKM_SUITE_DPP 0x506F9A02
+#define WFA_AUTH_DPP 0x200000 /* WFA DPP AUTH */
+
#ifndef WLAN_AKM_SUITE_FILS_SHA256
-#define WLAN_AKM_SUITE_FILS_SHA256 0x000FAC0E
-#define WLAN_AKM_SUITE_FILS_SHA384 0x000FAC0F
-#define WLAN_AKM_SUITE_FT_FILS_SHA256 0x000FAC10
-#define WLAN_AKM_SUITE_FT_FILS_SHA384 0x000FAC11
+#define WLAN_AKM_SUITE_FILS_SHA256 0x000FAC0E
+#define WLAN_AKM_SUITE_FILS_SHA384 0x000FAC0F
+#define WLAN_AKM_SUITE_FT_FILS_SHA256 0x000FAC10
+#define WLAN_AKM_SUITE_FT_FILS_SHA384 0x000FAC11
#endif /* WLAN_AKM_SUITE_FILS_SHA256 */
+#define MIN_VENDOR_EXTN_IE_LEN 2
+#ifdef WL_OWE
+#ifndef WLAN_AKM_SUITE_OWE
+#define WLAN_AKM_SUITE_OWE 0X000FAC12
+#endif /* WPA_KEY_MGMT_OWE */
+#endif /* WL_OWE */
+
/*
* BRCM local.
* Use a high number that's unlikely to clash with linux upstream for a while until we can
@@ -429,18 +514,18 @@
};
typedef enum wl_iftype {
- WL_IF_TYPE_STA = 0,
- WL_IF_TYPE_AP = 1,
- WL_IF_TYPE_AWDL = 2,
- WL_IF_TYPE_NAN_NMI = 3,
- WL_IF_TYPE_NAN = 4,
- WL_IF_TYPE_P2P_GO = 5,
- WL_IF_TYPE_P2P_GC = 6,
- WL_IF_TYPE_P2P_DISC = 7,
- WL_IF_TYPE_IBSS = 8,
- WL_IF_TYPE_MONITOR = 9,
- WL_IF_TYPE_AIBSS = 10,
- WL_IF_TYPE_MAX
+ WL_IF_TYPE_STA = 0,
+ WL_IF_TYPE_AP = 1,
+ WL_IF_TYPE_AWDL = 2,
+ WL_IF_TYPE_NAN_NMI = 3,
+ WL_IF_TYPE_NAN = 4,
+ WL_IF_TYPE_P2P_GO = 5,
+ WL_IF_TYPE_P2P_GC = 6,
+ WL_IF_TYPE_P2P_DISC = 7,
+ WL_IF_TYPE_IBSS = 8,
+ WL_IF_TYPE_MONITOR = 9,
+ WL_IF_TYPE_AIBSS = 10,
+ WL_IF_TYPE_MAX
} wl_iftype_t;
typedef enum wl_interface_state {
@@ -473,7 +558,8 @@
WL_PROF_BSSID,
WL_PROF_ACT,
WL_PROF_BEACONINT,
- WL_PROF_DTIMPERIOD
+ WL_PROF_DTIMPERIOD,
+ WL_PROF_LATEST_BSSID
};
/* donlge escan state */
@@ -508,7 +594,7 @@
TDLS_STATE_TEARDOWN,
TDLS_STATE_IF_CREATE,
TDLS_STATE_IF_DELETE,
- TDLS_STATE_NDI_CREATE
+ TDLS_STATE_NMI_CREATE
};
/* beacon / probe_response */
@@ -586,11 +672,13 @@
u8 assoc_req_ie[VNDR_IES_BUF_LEN];
u8 assoc_res_ie[VNDR_IES_BUF_LEN];
u8 beacon_ie[VNDR_IES_MAX_BUF_LEN];
+ u8 disassoc_ie[VNDR_IES_BUF_LEN];
u32 probe_req_ie_len;
u32 probe_res_ie_len;
u32 assoc_req_ie_len;
u32 assoc_res_ie_len;
u32 beacon_ie_len;
+ u32 disassoc_ie_len;
} wl_bss_vndr_ies_t;
typedef struct wl_cfgbss {
@@ -602,6 +690,11 @@
struct wl_bss_vndr_ies ies; /* Common for STA, P2P GC, GO, AP, P2P Disc Interface */
} wl_cfgbss_t;
+#ifdef WL11U
+/* Max length of Interworking element */
+#define IW_IES_MAX_BUF_LEN 8
+#endif // endif
+
/* cfg driver profile */
struct wl_profile {
u32 mode;
@@ -614,6 +707,13 @@
u16 beacon_interval;
u8 dtim_period;
bool active;
+ u8 latest_bssid[ETHER_ADDR_LEN];
+ u32 channel_width;
+#ifdef WL11U
+ u8 iw_ie[IW_IES_MAX_BUF_LEN];
+ u32 iw_ie_len;
+#endif /* WL11U */
+ bool dpp_listen;
};
struct wl_wps_ie {
@@ -654,8 +754,79 @@
s32 bssidx;
wl_cfgbss_t bss;
u8 ifidx;
+#ifdef WL_SAE
+ unsigned long mgmt_txstatus;
+ size_t mgmt_txid;
+ struct completion mgmt_tx_cpl;
+#endif /* WL_SAE */
struct list_head list; /* list of all net_info structure */
};
+#ifdef WL_SAE
+#define WL_WSEC_MAX_SAE_PASSWORD_LEN 128
+#define WL_WSEC_MIN_SAE_PASSWORD_LEN 8
+/**
+ * struct brcmf_wsec_sae_pwd_le - firmware SAE password material.
+ *
+ * @key_len: number of octets in key materials.
+ * @key: SAE password material.
+ */
+struct wl_wsec_sae_pwd_le {
+ u16 key_len;
+ u8 key[WL_WSEC_MAX_SAE_PASSWORD_LEN];
+};
+
+#endif // endif
+#ifdef WL_BCNRECV
+/* PERIODIC Beacon receive for detecting FakeAPs */
+typedef struct wl_bcnrecv_result {
+ uint8 SSID[DOT11_MAX_SSID_LEN]; /**< SSID String */
+ struct ether_addr BSSID; /**< Network BSSID */
+ uint8 channel; /**< Channel */
+ uint16 beacon_interval;
+ uint32 timestamp[2]; /**< Beacon Timestamp */
+ uint64 system_time;
+} wl_bcnrecv_result_t;
+
+typedef struct wl_bcnrecv_info {
+ uint bcnrecv_state; /* TO know the fakeap state */
+} wl_bcnrecv_info_t;
+
+typedef enum wl_bcnrecv_state {
+ BEACON_RECV_IDLE = 0,
+ BEACON_RECV_STARTED,
+ BEACON_RECV_STOPPED,
+ BEACON_RECV_SUSPENDED
+} wl_bcnrecv_state_t;
+
+typedef enum wl_bcnrecv_reason {
+ WL_BCNRECV_INVALID = 0,
+ WL_BCNRECV_USER_TRIGGER,
+ WL_BCNRECV_SUSPEND,
+ WL_BCNRECV_SCANBUSY,
+ WL_BCNRECV_CONCURRENCY,
+ WL_BCNRECV_LISTENBUSY,
+ WL_BCNRECV_ROAMABORT,
+ WL_BCNRECV_HANG
+} wl_bcnrecv_reason_t;
+
+typedef enum wl_bcnrecv_status {
+ WL_BCNRECV_STARTED = 0,
+ WL_BCNRECV_STOPPED,
+ WL_BCNRECV_ABORTED,
+ WL_BCNRECV_SUSPENDED,
+ WL_BCNRECV_MAX
+} wl_bcnrecv_status_t;
+
+typedef enum wl_bcnrecv_attr_type {
+ BCNRECV_ATTR_STATUS = 1,
+ BCNRECV_ATTR_REASON,
+ BCNRECV_ATTR_BCNINFO
+} wl_bcnrecv_attr_type_t;
+#endif /* WL_BCNRECV */
+#ifdef WL_CHAN_UTIL
+#define CU_ATTR_PERCENTAGE 1
+#define CU_ATTR_HDR_LEN 30
+#endif /* WL_CHAN_UTIL */
/* association inform */
#define MAX_REQ_LINE 1024u
@@ -665,6 +836,18 @@
u8 resp_ie[MAX_REQ_LINE];
u32 resp_ie_len;
};
+#define WL_MAX_FILS_KEY_LEN 64
+
+struct wl_fils_info {
+ u8 fils_kek[WL_MAX_FILS_KEY_LEN];
+ u32 fils_kek_len;
+ u8 fils_pmk[WL_MAX_FILS_KEY_LEN];
+ u32 fils_pmk_len;
+ u8 fils_pmkid[WL_MAX_FILS_KEY_LEN];
+ u16 fils_erp_next_seq_num;
+ bool fils_roam_disabled;
+ u32 fils_bcn_timeout_cache;
+};
/* firmware /nvram downloading controller */
struct wl_fw_ctrl {
@@ -681,18 +864,30 @@
u32 resp_len;
};
+#define MIN_PMKID_LIST_V3_FW_MAJOR 13
+#define MIN_PMKID_LIST_V3_FW_MINOR 0
+
+#define MIN_PMKID_LIST_V2_FW_MAJOR 12
+#define MIN_PMKID_LIST_V2_FW_MINOR 0
+
+#define MIN_ESCAN_PARAM_V2_FW_MAJOR 14
+#define MIN_ESCAN_PARAM_V2_FW_MINOR 0
+
/* wpa2 pmk list */
struct wl_pmk_list {
- pmkid_list_t pmkids;
- pmkid_t foo[MAXPMKID - 1];
+ pmkid_list_v3_t pmkids;
+ pmkid_v3_t foo[MAXPMKID - 1];
};
+#define KEY_PERM_PMK 0xFFFFFFFF
+
#ifdef DHD_MAX_IFS
#define WL_MAX_IFS DHD_MAX_IFS
#else
#define WL_MAX_IFS 16
#endif // endif
+#define MAC_RAND_BYTES 3
#define ESCAN_BUF_SIZE (64 * 1024)
struct escan_info {
@@ -765,10 +960,6 @@
} wl_p2plo_listen_t;
#endif /* P2P_LISTEN_OFFLOADING */
-#ifdef WL11U
-/* Max length of Interworking element */
-#define IW_IES_MAX_BUF_LEN 9
-#endif // endif
#ifdef WLFBT
#define FBT_KEYLEN 32
#endif // endif
@@ -793,7 +984,7 @@
#ifdef SUPPORT_AP_RADIO_PWRSAVE
typedef struct ap_rps_info {
bool enable;
- bool sta_assoc_check;
+ int sta_assoc_check;
int pps;
int quiet_time;
int level;
@@ -831,41 +1022,41 @@
} wl_rssi_ant_mimo_t;
#endif /* SUPPORT_RSSI_SUM_REPORT */
+/* MBO-OCE prune event reason codes */
+#if defined(WL_MBO) || defined(WL_OCE)
+typedef enum wl_prune_evt_reason {
+ WIFI_PRUNE_UNSPECIFIED = 0, /* Unspecified event reason code */
+ WIFI_PRUNE_ASSOC_RETRY_DELAY = 1, /* MBO assoc retry delay */
+ WIFI_PRUNE_RSSI_ASSOC_REJ = 2 /* OCE RSSI-based assoc rejection */
+} wl_prune_evt_reason_t;
+#endif /* WL_MBO || WL_OCE */
+
#if defined(DHD_ENABLE_BIGDATA_LOGGING)
#define GET_BSS_INFO_LEN 90
#endif /* DHD_ENABLE_BIGDATA_LOGGING */
-#ifdef DHD_LB_IRQSET
-#if defined(CONFIG_ARCH_MSM8998) || defined(CONFIG_ARCH_SDM845)
-#define WL_IRQSET
-#endif /* CONFIG_ARCH_MSM8998 | CONFIG_ARCH_SDM845) */
-#endif /* DHD_LB_IRQSET */
+#ifdef WL_MBO
+typedef struct wl_event_mbo wl_event_mbo_t;
+typedef struct wl_event_mbo_cell_nw_switch wl_event_mbo_cell_nw_switch_t;
+typedef struct wl_btm_event_type_data wl_btm_event_type_data_t;
+#endif /* WL_MBO */
-#ifdef WES_SUPPORT
-#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
-#define CUSTOMER_WL_SCAN_TIMER_INTERVAL_MS 25000 /* Scan timeout */
-enum wl_custom_scan_time_type {
- WL_CUSTOM_SCAN_CHANNEL_TIME = 0,
- WL_CUSTOM_SCAN_UNASSOC_TIME,
- WL_CUSTOM_SCAN_PASSIVE_TIME,
- WL_CUSTOM_SCAN_HOME_TIME,
- WL_CUSTOM_SCAN_HOME_AWAY_TIME
-};
-extern s32 wl_cfg80211_custom_scan_time(struct net_device *dev,
- enum wl_custom_scan_time_type type, int time);
-#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
-#endif /* WES_SUPPORT */
+#if defined(WL_MBO) || defined(WL_OCE)
+typedef struct wl_bssid_prune_evt_info wl_bssid_pruned_evt_info_t;
+#endif /* WL_MBO || WL_OCE */
#ifdef WL_NAN
-#define NAN_MAX_NDI 2
+#define NAN_MAX_NDI 1u
typedef struct wl_ndi_data
{
u8 ifname[IFNAMSIZ];
u8 in_use;
u8 created;
+ struct net_device *nan_ndev;
} wl_ndi_data_t;
typedef struct wl_nancfg
{
+ wl_nan_ver_t version;
wl_ndi_data_t ndi[NAN_MAX_NDI];
struct mutex nan_sync;
uint8 svc_inst_id_mask[NAN_SVC_INST_SIZE];
@@ -873,15 +1064,45 @@
/* wait queue and condition variable for nan event */
bool nan_event_recvd;
wait_queue_head_t nan_event_wait;
- bool disable_pending;
+ nan_stop_reason_code_t disable_reason;
bool mac_rand;
+ int range_type;
+ uint8 max_ndp_count; /* Max no. of NDPs */
+ nan_ndp_peer_t *nan_ndp_peer_info;
} wl_nancfg_t;
+
+#ifdef WL_NANP2P
+#define WL_CFG_P2P_DISC_BIT 0x1u
+#define WL_CFG_NAN_DISC_BIT 0x2u
+#define WL_NANP2P_CONC_SUPPORT (WL_CFG_P2P_DISC_BIT | WL_CFG_NAN_DISC_BIT)
+#endif /* WL_NAN2P */
#endif /* WL_NAN */
+#ifdef WL_IFACE_MGMT
+#define WL_IFACE_NOT_PRESENT -1
+
+typedef enum iface_conc_policy {
+ WL_IF_POLICY_DEFAULT = 0,
+ WL_IF_POLICY_FCFS = 1,
+ WL_IF_POLICY_LP = 2,
+ WL_IF_POLICY_ROLE_PRIORITY = 3,
+ WL_IF_POLICY_CUSTOM = 4,
+ WL_IF_POLICY_INVALID
+} iface_conc_policy_t;
+
+typedef struct iface_mgmt_data {
+ uint8 policy;
+ uint8 priority[WL_IF_TYPE_MAX];
+} iface_mgmt_data_t;
+#endif /* WL_IFACE_MGMT */
+
#ifdef WL_WPS_SYNC
#define EAP_PACKET 0
#define EAP_EXPANDED_TYPE 254
#define EAP_EXP_OPCODE_OFFSET 7
+#define EAP_EXP_FRAGMENT_LEN_OFFSET 2
+#define EAP_EXP_FLAGS_FRAGMENTED_DATA 2
+#define EAP_EXP_FLAGS_MORE_DATA 1
#define EAPOL_EAP_HDR_LEN 5
#define EAP_EXP_HDR_MIN_LENGTH (EAPOL_EAP_HDR_LEN + EAP_EXP_OPCODE_OFFSET)
#define EAP_ATTRIB_MSGTYPE 0x1022
@@ -893,7 +1114,7 @@
#define EAP_WSC_DONE 5
#define EAP_WSC_MSG_M8 12
#define EAP_CODE_FAILURE 4
-#define WL_WPS_REAUTH_TIMEOUT 8000
+#define WL_WPS_REAUTH_TIMEOUT 10000
struct wl_eap_header {
unsigned char code; /* EAP code */
@@ -907,6 +1128,7 @@
typedef enum wl_wps_state {
WPS_STATE_IDLE = 0,
WPS_STATE_STARTED,
+ WPS_STATE_M8_SENT,
WPS_STATE_M8_RECVD,
WPS_STATE_EAP_FAIL,
WPS_STATE_REAUTH_WAIT,
@@ -915,6 +1137,7 @@
WPS_STATE_DISCONNECT,
WPS_STATE_DISCONNECT_CLIENT,
WPS_STATE_CONNECT_FAIL,
+ WPS_STATE_AUTHORIZE,
WPS_STATE_DONE,
WPS_STATE_INVALID
} wl_wps_state_t;
@@ -931,8 +1154,20 @@
#endif /* WL_WPS_SYNC */
#ifndef WL_STATIC_IFNAME_PREFIX
-#define WL_STATIC_IFNAME_PREFIX "wlan1"
+#define WL_STATIC_IFNAME_PREFIX "wlan%d"
#endif /* WL_STATIC_IFNAME */
+#define STATIC_INAME_STRING_LEN 6
+#ifndef DHD_NUM_STATIC_IFACES
+#define DHD_NUM_STATIC_IFACES 2
+#endif // endif
+
+typedef struct buf_data {
+ u32 ver; /* version of struct */
+ u32 len; /* Total len */
+ /* size of each buffer in case of split buffers (0 - single buffer). */
+ u32 buf_threshold;
+ const void *data_buf[1]; /* array of user space buffer pointers. */
+} buf_data_t;
/* private data of cfg80211 interface */
struct bcm_cfg80211 {
@@ -954,7 +1189,7 @@
struct completion wait_next_af;
struct mutex usr_sync; /* maily for up/down synchronization */
struct mutex if_sync; /* maily for iface op synchronization */
- struct mutex scan_complete; /* serialize scan_complete call */
+ struct mutex scan_sync; /* scan sync from different scan contexts */
struct wl_scan_results *bss_list;
struct wl_scan_results *scan_results;
@@ -1049,9 +1284,6 @@
struct mutex event_sync; /* maily for up/down synchronization */
bool disable_roam_event;
struct delayed_work pm_enable_work;
-#ifdef WL_IRQSET
- struct delayed_work irq_set_work;
-#endif /* WL_IRQSET */
#ifdef OEM_ANDROID
struct workqueue_struct *event_workq; /* workqueue for event */
#else
@@ -1096,7 +1328,14 @@
nan_svc_info_t svc_info[NAN_MAX_SVC_INST];
nan_ranging_inst_t nan_ranging_info[NAN_MAX_RANGING_INST];
#endif /* WL_NAN_DISC_CACHE */
+#ifdef WL_NANP2P
+ uint8 conc_disc;
+ bool nan_p2p_supported;
+#endif /* WL_NANP2P */
#endif /* WL_NAN */
+#ifdef WL_IFACE_MGMT
+ iface_mgmt_data_t iface_data;
+#endif /* WL_IFACE_MGMT */
#ifdef WL_CFG80211_P2P_DEV_IF
bool down_disc_if;
#endif /* WL_CFG80211_P2P_DEV_IF */
@@ -1156,24 +1395,54 @@
ap_rps_info_t ap_rps_info;
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
u16 vif_macaddr_mask;
+ osl_t *osh;
struct list_head vndr_oui_list;
spinlock_t vndr_oui_sync; /* to protect vndr_oui_list */
- osl_t *osh;
-#ifdef BIGDATA_SOFTAP
- struct wl_ap_sta_info *ap_sta_info;
-#endif /* BIGDATA_SOFTAP */
bool rssi_sum_report;
int rssi; /* previous RSSI (backup) of get_station */
uint64 scan_enq_time;
+ uint64 scan_deq_time;
+ uint64 scan_hdlr_cmplt_time;
+ uint64 scan_cmplt_time;
+ uint64 wl_evt_deq_time;
+ uint64 wl_evt_hdlr_entry_time;
+ uint64 wl_evt_hdlr_exit_time;
#ifdef WL_WPS_SYNC
wl_wps_session_t wps_session[WPS_MAX_SESSIONS];
spinlock_t wps_sync; /* to protect wps states (and others if needed) */
#endif /* WL_WPS_SYNC */
- struct net_device *static_ndev;
- uint8 static_ndev_state;
+ struct wl_fils_info fils_info;
+#ifdef WL_BAM
+ wl_bad_ap_mngr_t bad_ap_mngr;
+#endif /* WL_BAM */
+#ifdef BIGDATA_SOFTAP
+ struct wl_ap_sta_info *ap_sta_info;
+#endif /* BIGDATA_SOFTAP */
+ uint8 scanmac_enabled;
+#ifdef WL_BCNRECV
+ /* structure used for fake ap detection info */
+ struct mutex bcn_sync; /* mainly for bcn resume/suspend synchronization */
+ wl_bcnrecv_info_t bcnrecv_info;
+#endif /* WL_BCNRECV */
+ struct net_device *static_ndev[DHD_MAX_STATIC_IFS];
+ uint8 static_ndev_state[DHD_MAX_STATIC_IFS];
+ bool hal_started;
+ wl_wlc_version_t wlc_ver;
+ bool scan_params_v2;
+#ifdef DHD_BANDSTEER
+ void *dhd_bandsteer_cntx;
+ bool p2p_bs;
+ bool ap_bs;
+#endif /* DHD_BANDSTEER */
+#ifdef ENABLE_HOGSQS
+ struct delayed_work hogsqs_eventwork; /* hog detection event work */
+#endif // endif
+#if !defined(DISABLE_11H) && defined(DHD_NOSCAN_DURING_CSA)
+ bool in_csa;
+ timer_list_compat_t csa_timeout; /* Timer for csa timeout */
+#endif // endif
};
-
-#define WL_STATIC_IFIDX (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1)
+#define WL_STATIC_IFIDX (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1)
enum static_ndev_states {
NDEV_STATE_NONE,
NDEV_STATE_OS_IF_CREATED,
@@ -1181,28 +1450,30 @@
NDEV_STATE_FW_IF_FAILED,
NDEV_STATE_FW_IF_DELETED
};
-#define IS_CFG80211_STATIC_IF(cfg, ndev) \
- ((cfg && (cfg->static_ndev == ndev)) ? true : false)
+#ifdef WL_IFACE_MGMT
#define IS_CFG80211_STATIC_IF_ACTIVE(cfg) \
((cfg && cfg->static_ndev && \
(cfg->static_ndev_state & NDEV_STATE_FW_IF_CREATED)) ? true : false)
-#define IS_CFG80211_STATIC_IF_NAME(cfg, name) \
- ((cfg && !strcmp(cfg->static_ndev->name, name)))
+#endif // endif
+#ifdef WL_SAE
+typedef struct wl_sae_key_info {
+ uint8 peer_mac[ETHER_ADDR_LEN];
+ uint16 pmk_len;
+ uint16 pmkid_len;
+ const uint8 *pmk;
+ const uint8 *pmkid;
+} wl_sae_key_info_t;
+#endif /* WL_SAE */
+
+typedef enum wl_concurrency_mode {
+ CONCURRENCY_MODE_NONE = 0,
+ CONCURRENCY_SCC_MODE,
+ CONCURRENCY_VSDB_MODE,
+ CONCURRENCY_RSDB_MODE
+} wl_concurrency_mode_t;
s32 wl_iftype_to_mode(wl_iftype_t iftype);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-#define GCC_DIAGNOSTIC_PUSH() \
-_Pragma("GCC diagnostic push") \
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#define GCC_DIAGNOSTIC_POP() \
-_Pragma("GCC diagnostic pop")
-#else
-#define GCC_DIAGNOSTIC_PUSH()
-#define GCC_DIAGNOSTIC_POP()
-#endif /* STRICT_GCC_WARNINGS */
-
#define BCM_LIST_FOR_EACH_ENTRY_SAFE(pos, next, head, member) \
list_for_each_entry_safe((pos), (next), (head), member)
extern int ioctl_version;
@@ -1219,18 +1490,15 @@
struct net_info *_net_info, *next;
unsigned long int flags;
int idx = 0;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next,
&cfg->net_list, list) {
- WL_INFORM_MEM(("%s: net_list[%d] bssidx: %d, "
- "ndev: %p, wdev: %p \n", __FUNCTION__,
- idx++, _net_info->bssidx,
- OSL_OBFUSCATE_BUF(_net_info->ndev),
- OSL_OBFUSCATE_BUF(_net_info->wdev)));
+ GCC_DIAGNOSTIC_POP();
+ WL_INFORM_MEM(("wl_probe_wdev_all: net_list[%d] bssidx: %d\n",
+ idx++, _net_info->bssidx));
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return;
}
@@ -1240,17 +1508,17 @@
struct net_info *_net_info, *next, *info = NULL;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if ((bssidx >= 0) && (_net_info->bssidx == bssidx) &&
(_net_info->ifidx == ifidx)) {
info = _net_info;
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return info;
}
@@ -1263,9 +1531,10 @@
#ifdef DHD_IFDEBUG
WL_INFORM_MEM(("dealloc_netinfo enter wdev=%p \n", OSL_OBFUSCATE_BUF(wdev)));
#endif // endif
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (wdev && (_net_info->wdev == wdev)) {
wl_cfgbss_t *bss = &_net_info->bss;
@@ -1290,8 +1559,7 @@
MFREE(cfg->osh, _net_info, sizeof(struct net_info));
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
#ifdef DHD_IFDEBUG
WL_INFORM_MEM(("dealloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
#endif // endif
@@ -1339,10 +1607,13 @@
_net_info->roam_off = WL_INVALID;
_net_info->bssidx = bssidx;
_net_info->ifidx = ifidx;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
cfg->iface_cnt++;
list_add(&_net_info->list, &cfg->net_list);
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+#ifdef WL_SAE
+ init_completion(&_net_info->mgmt_tx_cpl);
+#endif /* WL_SAE */
}
#ifdef DHD_IFDEBUG
WL_DBG(("alloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
@@ -1356,10 +1627,11 @@
struct net_info *_net_info, *next;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
wl_cfgbss_t *bss = &_net_info->bss;
+ GCC_DIAGNOSTIC_POP();
if (bss->wpa_ie) {
MFREE(cfg->osh, bss->wpa_ie, bss->wpa_ie[1]
@@ -1390,8 +1662,7 @@
MFREE(cfg->osh, _net_info, sizeof(struct net_info));
}
cfg->iface_cnt = 0;
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
}
static inline u32
wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status)
@@ -1401,15 +1672,15 @@
u32 cnt = 0;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (_net_info->ndev &&
test_bit(status, &_net_info->sme_state))
cnt++;
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return cnt;
}
static inline void
@@ -1418,9 +1689,10 @@
struct net_info *_net_info, *next;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
switch (op) {
case 1:
break; /* set all status is not allowed */
@@ -1429,7 +1701,7 @@
* Release the spinlock before calling notifier. Else there
* will be nested calls
*/
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
clear_bit(status, &_net_info->sme_state);
if (cfg->state_notifier &&
test_bit(status, &(cfg->interrested_state)))
@@ -1441,8 +1713,7 @@
break; /* unknown operation */
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
}
static inline void
wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
@@ -1452,17 +1723,28 @@
struct net_info *_net_info, *next;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ if (status >= BITS_PER_LONG) {
+ /* max value for shift operation is
+ * (BITS_PER_LONG -1) for unsigned long.
+ * if status crosses BIT_PER_LONG, the variable
+ * sme_state should be correspondingly updated.
+ */
+ ASSERT(0);
+ return;
+ }
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
if (ndev && (_net_info->ndev == ndev)) {
+ GCC_DIAGNOSTIC_POP();
switch (op) {
case 1:
/*
* Release the spinlock before calling notifier. Else there
* will be nested calls
*/
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
set_bit(status, &_net_info->sme_state);
if (cfg->state_notifier &&
test_bit(status, &(cfg->interrested_state)))
@@ -1473,7 +1755,7 @@
* Release the spinlock before calling notifier. Else there
* will be nested calls
*/
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
clear_bit(status, &_net_info->sme_state);
if (cfg->state_notifier &&
test_bit(status, &(cfg->interrested_state)))
@@ -1486,8 +1768,7 @@
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
}
@@ -1499,17 +1780,17 @@
wl_cfgbss_t *bss = NULL;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (wdev && (_net_info->wdev == wdev)) {
bss = &_net_info->bss;
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return bss;
}
@@ -1521,16 +1802,16 @@
u32 stat = 0;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (ndev && (_net_info->ndev == ndev)) {
stat = test_bit(status, &_net_info->sme_state);
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return stat;
}
@@ -1541,16 +1822,16 @@
s32 mode = -1;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (_net_info->ndev && (_net_info->ndev == ndev)) {
mode = wl_iftype_to_mode(_net_info->iftype);
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return mode;
}
@@ -1561,16 +1842,16 @@
s32 bssidx = -1;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (_net_info->wdev && (_net_info->wdev == wdev)) {
bssidx = _net_info->bssidx;
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return bssidx;
}
@@ -1583,16 +1864,16 @@
if (bssidx < 0)
return NULL;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if ((_net_info->bssidx == bssidx) && (_net_info->ifidx == ifidx)) {
wdev = _net_info->wdev;
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return wdev;
}
@@ -1603,16 +1884,16 @@
struct wl_profile *prof = NULL;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (ndev && (_net_info->ndev == ndev)) {
prof = &_net_info->profile;
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return prof;
}
static inline struct net_info *
@@ -1621,16 +1902,16 @@
struct net_info *_net_info, *next, *info = NULL;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (ndev && (_net_info->ndev == ndev)) {
info = _net_info;
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return info;
}
@@ -1640,19 +1921,120 @@
struct net_info *_net_info, *next, *info = NULL;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (wdev && (_net_info->wdev == wdev)) {
info = _net_info;
break;
}
}
- GCC_DIAGNOSTIC_POP();
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ return info;
+}
+
+static inline u32
+wl_get_chanwidth_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+ u32 info = 0;
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (ndev && (_net_info->ndev == ndev)) {
+ info = _net_info->profile.channel_width;
+ break;
+ }
+ }
spin_unlock_irqrestore(&cfg->net_list_sync, flags);
return info;
}
+static inline void
+wl_set_chanwidth_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 chanwidth)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (ndev && (_net_info->ndev == ndev))
+ _net_info->profile.channel_width = chanwidth;
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+}
+
+static inline struct wireless_dev *
+wl_get_wdev_by_dpp_listen(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev = NULL;
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (_net_info->profile.dpp_listen) {
+ wdev = _net_info->wdev;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ return wdev;
+}
+
+static inline void
+wl_set_dpp_listen_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool dpp_listen)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (ndev && (_net_info->ndev == ndev))
+ _net_info->profile.dpp_listen = dpp_listen;
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+}
+
+static inline char *
+wl_iftype_to_str(int wl_iftype)
+{
+ switch (wl_iftype) {
+ case (WL_IF_TYPE_STA):
+ return "WL_IF_TYPE_STA";
+ case (WL_IF_TYPE_AP):
+ return "WL_IF_TYPE_AP";
+ case (WL_IF_TYPE_AWDL):
+ return "WL_IF_TYPE_AWDL";
+ case (WL_IF_TYPE_NAN_NMI):
+ return "WL_IF_TYPE_NAN_NMI";
+ case (WL_IF_TYPE_NAN):
+ return "WL_IF_TYPE_NAN";
+ case (WL_IF_TYPE_P2P_GO):
+ return "WL_IF_TYPE_P2P_GO";
+ case (WL_IF_TYPE_P2P_GC):
+ return "WL_IF_TYPE_P2P_GC";
+ case (WL_IF_TYPE_P2P_DISC):
+ return "WL_IF_TYPE_P2P_DISC";
+ case (WL_IF_TYPE_IBSS):
+ return "WL_IF_TYPE_IBSS";
+ case (WL_IF_TYPE_MONITOR):
+ return "WL_IF_TYPE_MONITOR";
+ case (WL_IF_TYPE_AIBSS):
+ return "WL_IF_TYPE_AIBSS";
+ default:
+ return "WL_IF_TYPE_UNKNOWN";
+ }
+}
+
+#define is_discovery_iface(iface) (((iface == WL_IF_TYPE_P2P_DISC) || \
+ (iface == WL_IF_TYPE_NAN_NMI)) ? 1 : 0)
#define is_p2p_group_iface(wdev) (((wdev->iftype == NL80211_IFTYPE_P2P_GO) || \
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) ? 1 : 0)
#define bcmcfg_to_wiphy(cfg) (cfg->wdev->wiphy)
@@ -1663,6 +2045,19 @@
#define ndev_to_wdev(ndev) (ndev->ieee80211_ptr)
#define wdev_to_ndev(wdev) (wdev->netdev)
+#ifdef WL_BLOCK_P2P_SCAN_ON_STA
+#define IS_P2P_IFACE(wdev) (wdev && \
+ ((wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) || \
+ (wdev->iftype == NL80211_IFTYPE_P2P_GO) || \
+ (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)))
+#endif /* WL_BLOCK_P2P_SCAN_ON_STA */
+
+#define IS_STA_IFACE(wdev) (wdev && \
+ (wdev->iftype == NL80211_IFTYPE_STATION))
+
+#define IS_AP_IFACE(wdev) (wdev && \
+ (wdev->iftype == NL80211_IFTYPE_AP))
+
#if defined(WL_ENABLE_P2P_IF)
#define ndev_to_wlc_ndev(ndev, cfg) ((ndev == cfg->p2p_net) ? \
bcmcfg_to_prmry_ndev(cfg) : ndev)
@@ -1733,6 +2128,7 @@
#define wl_to_ie(w) (&w->ie)
#define wl_to_conn(w) (&w->conn_info)
#endif // endif
+#define wl_to_fils_info(w) (&w->fils_info)
#define wiphy_from_scan(w) (w->escan_info.wiphy)
#define wl_get_drv_status_all(cfg, stat) \
(wl_get_status_all(cfg, WL_STATUS_ ## stat))
@@ -1776,11 +2172,7 @@
#define IS_AKM_SUITE_FT(sec) ({BCM_REFERENCE(sec); FALSE;})
#endif /* WLFBT */
-#ifdef BCMCCX
-#define IS_AKM_SUITE_CCKM(sec) (sec->wpa_auth == WLAN_AKM_SUITE_CCKM)
-#else
#define IS_AKM_SUITE_CCKM(sec) ({BCM_REFERENCE(sec); FALSE;})
-#endif /* BCMCCX */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
#define STA_INFO_BIT(info) (1ul << NL80211_STA_ ## info)
@@ -1797,6 +2189,9 @@
extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e,
void *data);
+extern s32 wl_cfg80211_handle_critical_events(struct bcm_cfg80211 *cfg,
+ const wl_event_msg_t * e);
+
void wl_cfg80211_set_parent_dev(void *dev);
struct device *wl_cfg80211_get_parent_dev(void);
struct bcm_cfg80211 *wl_cfg80211_get_bcmcfg(void);
@@ -1809,9 +2204,14 @@
#ifdef WL_STATIC_IF
extern int32 wl_cfg80211_update_iflist_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
int ifidx, uint8 *addr, int bssidx, char *name, int if_state);
+extern int get_iface_num(const char *name, struct bcm_cfg80211 *cfg);
+extern bool is_static_iface(struct bcm_cfg80211 *cfg, struct net_device *net);
+extern int static_if_ndev_get_state(struct bcm_cfg80211 *cfg, struct net_device *net);
+extern bool is_static_iface_name(const char *name, struct bcm_cfg80211 *cfg);
#endif /* WL_STATIC_IF */
extern s32 wl_cfg80211_up(struct net_device *net);
extern s32 wl_cfg80211_down(struct net_device *net);
+extern void wl_cfg80211_sta_ifdown(struct net_device *net);
extern s32 wl_cfg80211_notify_ifadd(struct net_device * dev, int ifidx, char *name, uint8 *mac,
uint8 bssidx, uint8 role);
extern s32 wl_cfg80211_notify_ifdel(struct net_device * dev, int ifidx, char *name, uint8 *mac,
@@ -1824,9 +2224,10 @@
int ifidx, struct net_device* ndev, bool rtnl_lock_reqd);
extern int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg,
int ifidx, struct net_device* ndev, bool rtnl_lock_reqd);
-extern int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev);
-extern void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+extern void wl_cfg80211_cleanup_if(struct net_device *dev);
extern bool wl_cfg80211_is_concurrent_mode(struct net_device * dev);
+extern void wl_cfg80211_disassoc(struct net_device *ndev, uint32 reason);
+extern void wl_cfg80211_del_all_sta(struct net_device *ndev, uint32 reason);
extern void* wl_cfg80211_get_dhdp(struct net_device * dev);
extern bool wl_cfg80211_is_p2p_active(struct net_device * dev);
extern bool wl_cfg80211_is_roam_offload(struct net_device * dev);
@@ -1841,6 +2242,7 @@
extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
extern s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len);
extern s32 wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len);
+extern bool wl_cfg80211_check_vif_in_use(struct net_device *ndev);
#ifdef P2PLISTEN_AP_SAMECHN
extern s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable);
#endif /* P2PLISTEN_AP_SAMECHN */
@@ -1878,6 +2280,7 @@
enum nl80211_tx_power_setting type, s32 dbm);
extern s32 wl_get_tx_power(struct net_device *dev, s32 *dbm);
extern s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add);
+extern s32 wl_add_remove_eventextmsg(struct net_device *ndev, u16 event, bool add);
extern void wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev,
u8 bsscfgidx);
#ifdef WL_HOST_BAND_MGMT
@@ -1888,7 +2291,6 @@
struct bcm_cfg80211 *cfg, wl_eventmsg_buf_t *ev);
extern void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
extern void wl_cfg80211_update_power_mode(struct net_device *dev);
-extern void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command);
extern void wl_terminate_event_handler(struct net_device *dev);
#if defined(DHD_ENABLE_BIGDATA_LOGGING)
extern s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len);
@@ -1896,6 +2298,12 @@
#endif /* DHD_ENABLE_BIGDATA_LOGGING */
extern struct bcm_cfg80211 *wl_get_cfg(struct net_device *ndev);
extern s32 wl_cfg80211_set_if_band(struct net_device *ndev, int band);
+extern s32 wl_cfg80211_set_country_code(struct net_device *dev, char *country_code,
+ bool notify, bool user_enforced, int revinfo);
+extern bool wl_cfg80211_is_hal_started(struct bcm_cfg80211 *cfg);
+#ifdef WL_WIPSEVT
+extern int wl_cfg80211_wips_event(uint16 misdeauth, char* bssid);
+#endif /* WL_WIPSEVT */
#define SCAN_BUF_CNT 2
#define SCAN_BUF_NEXT 1
@@ -2010,6 +2418,11 @@
extern s32 wl_cfg80211_get_chanspecs_5g(struct net_device *ndev,
void *buf, s32 buflen);
+#if defined(SAVE_CONNECTION_WHEN_CC_UPDATE)
+extern bool wl_update_ap_chandef(struct net_device *ndev);
+extern bool wl_check_valid_channel_in_country(struct net_device *ndev, char *ccode, int forced);
+#endif // endif
+
extern s32 wl_cfg80211_bss_up(struct bcm_cfg80211 *cfg,
struct net_device *ndev, s32 bsscfg_idx, s32 up);
extern bool wl_cfg80211_bss_isup(struct net_device *ndev, int bsscfg_idx);
@@ -2021,13 +2434,30 @@
extern void wl_cfg80211_block_arp(struct net_device *dev, int enable);
#endif /* PKT_FILTER_SUPPORT && APSTA_BLOCK_ARP_DURING_DHCP */
+#ifdef WLTDLS
+extern s32 wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg,
+ enum wl_tdls_config state, bool tdls_mode);
+extern s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WLTDLS */
+
#ifdef WL_NAN
-int wl_cfgvendor_send_nan_event(struct wiphy * wiphy,
+extern int wl_cfgvendor_send_nan_event(struct wiphy * wiphy,
struct net_device *dev, int event_id,
nan_event_data_t *nan_event_data);
extern int wl_cfgnan_init(struct bcm_cfg80211 *cfg);
extern int wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate);
extern bool wl_cfgnan_check_state(struct bcm_cfg80211 *cfg);
+#ifdef RTT_SUPPORT
+extern s32 wl_cfgvendor_send_as_rtt_legacy_event(struct wiphy *wiphy,
+ struct net_device *dev, wl_nan_ev_rng_rpt_ind_t *range_res,
+ uint32 status);
+#endif /* RTT_SUPPORT */
+#ifdef WL_NANP2P
+extern int wl_cfg80211_set_iface_conc_disc(struct net_device *ndev,
+ uint8 arg_val);
+extern uint8 wl_cfg80211_get_iface_conc_disc(struct net_device *ndev);
+#endif /* WL_NANP2P */
#endif /* WL_NAN */
#ifdef WL_CFG80211_P2P_DEV_IF
@@ -2049,6 +2479,9 @@
extern s32 wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg);
#endif /* P2P_LISTEN_OFFLOADING */
+/* Function to flush the FW log buffer content */
+extern void wl_flush_fw_log_buffer(struct net_device *dev, uint32 logset_mask);
+
#define RETURN_EIO_IF_NOT_UP(wlpriv) \
do { \
struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv); \
@@ -2065,13 +2498,6 @@
#define P2PO_COOKIE 65535
u64 wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg);
-#if defined(SUPPORT_RANDOM_MAC_SCAN)
-int wl_cfg80211_set_random_mac(struct net_device *dev, bool enable);
-int wl_cfg80211_random_mac_enable(struct net_device *dev);
-int wl_cfg80211_random_mac_disable(struct net_device *dev);
-int wl_cfg80211_scan_mac_enable(struct net_device *dev, uint8 *rand_mac, uint8 *rand_mask);
-int wl_cfg80211_scan_mac_disable(struct net_device *dev);
-#endif /* SUPPORT_RANDOM_MAC_SCAN */
#ifdef SUPPORT_AP_HIGHER_BEACONRATE
int wl_set_ap_beacon_rate(struct net_device *dev, int val, char *ifname);
int wl_get_ap_basic_rate(struct net_device *dev, char* command, char *ifname, int total_len);
@@ -2090,18 +2516,34 @@
int wl_cfg80211_iface_count(struct net_device *dev);
struct net_device* wl_get_ap_netdev(struct bcm_cfg80211 *cfg, char *ifname);
void wl_cfg80211_cleanup_virtual_ifaces(struct bcm_cfg80211 *cfg, bool rtnl_lock_reqd);
+#ifdef WL_IFACE_MGMT
+extern int wl_cfg80211_set_iface_policy(struct net_device *ndev, char *arg, int len);
+extern uint8 wl_cfg80211_get_iface_policy(struct net_device *ndev);
+extern s32 wl_cfg80211_handle_if_role_conflict(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype);
+s32 wl_cfg80211_data_if_mgmt(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype);
+s32 wl_cfg80211_disc_if_mgmt(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype,
+ bool *disable_nan, bool *disable_p2p);
+s32 wl_cfg80211_handle_discovery_config(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype);
+wl_iftype_t wl_cfg80211_get_sec_iface(struct bcm_cfg80211 *cfg);
+bool wl_cfg80211_is_associated_discovery(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype);
+#endif /* WL_IFACE_MGMT */
struct wireless_dev * wl_cfg80211_add_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
wl_iftype_t wl_iftype, const char *name, u8 *mac);
extern s32 wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
struct wireless_dev *wdev, char *name);
+s32 _wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
+ struct wireless_dev *wdev, char *ifname);
+s32 wl_cfg80211_delete_iface(struct bcm_cfg80211 *cfg, wl_iftype_t sec_data_if_type);
+s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, s32 timeout);
+
#ifdef WL_STATIC_IF
extern struct net_device *wl_cfg80211_register_static_if(struct bcm_cfg80211 *cfg,
- u16 iftype, char *ifname);
+ u16 iftype, char *ifname, int ifidx);
extern void wl_cfg80211_unregister_static_if(struct bcm_cfg80211 * cfg);
extern s32 wl_cfg80211_static_if_open(struct net_device *net);
extern s32 wl_cfg80211_static_if_close(struct net_device *net);
extern struct net_device * wl_cfg80211_post_static_ifcreate(struct bcm_cfg80211 *cfg,
- wl_if_event_info *event, u8 *addr, s32 iface_type);
+ wl_if_event_info *event, u8 *addr, s32 iface_type, const char *iface_name);
extern s32 wl_cfg80211_post_static_ifdel(struct bcm_cfg80211 *cfg, struct net_device *ndev);
#endif /* WL_STATIC_IF */
extern struct wireless_dev *wl_cfg80211_get_wdev_from_ifname(struct bcm_cfg80211 *cfg,
@@ -2111,18 +2553,119 @@
extern s32 wl_release_vif_macaddr(struct bcm_cfg80211 *cfg, u8 *mac_addr, u16 wl_iftype);
extern int wl_cfg80211_ifstats_counters(struct net_device *dev, wl_if_stats_t *if_stats);
extern s32 wl_cfg80211_set_dbg_verbose(struct net_device *ndev, u32 level);
-extern s32 wl_cfg80211_check_for_nan_support(struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_set_transition_mode(struct net_device *ndev, u32 transition_disabled);
extern int wl_cfg80211_deinit_p2p_discovery(struct bcm_cfg80211 * cfg);
extern int wl_cfg80211_set_frameburst(struct bcm_cfg80211 *cfg, bool enable);
extern int wl_cfg80211_determine_p2p_rsdb_mode(struct bcm_cfg80211 *cfg);
+extern uint8 wl_cfg80211_get_bus_state(struct bcm_cfg80211 *cfg);
#ifdef WL_WPS_SYNC
void wl_handle_wps_states(struct net_device *ndev, u8 *dump_data, u16 len, bool direction);
#endif /* WL_WPS_SYNC */
+extern int wl_features_set(u8 *array, uint8 len, u32 ftidx);
+extern void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item);
+extern s32 wl_cfg80211_sup_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *event, void *data);
+extern s32 wl_inform_bss(struct bcm_cfg80211 *cfg);
+extern void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg);
+extern s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, bool aborted, bool fw_abort);
+#ifdef CUSTOMER_HW4_DEBUG
+extern void wl_scan_timeout_dbg_clear(void);
+#endif /* CUSTOMER_HW4_DEBUG */
+extern s32 cfg80211_to_wl_iftype(uint16 type, uint16 *role, uint16 *mode);
+extern s32 wl_cfg80211_net_attach(struct net_device *primary_ndev);
+extern void wl_print_verinfo(struct bcm_cfg80211 *cfg);
+extern const u8 *wl_find_attribute(const u8 *buf, u16 len, u16 element_id);
+extern int wl_cfg80211_get_concurrency_mode(struct bcm_cfg80211 *cfg);
+#if defined(WL_DISABLE_HE_SOFTAP) || defined(WL_DISABLE_HE_P2P)
+int wl_cfg80211_set_he_mode(struct net_device *dev, struct bcm_cfg80211 *cfg,
+ s32 bssidx, u32 interface_type, bool set);
+#define WL_HE_FEATURES_HE_AP 0x8
+#define WL_HE_FEATURES_HE_P2P 0x20
+#endif /* WL_DISABLE_HE_SOFTAP || WL_DISABLE_HE_P2P */
+extern s32 wl_cfg80211_config_suspend_events(struct net_device *ndev, bool enable);
+#ifdef WL11U
+extern bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(const u8 *parse, u32 len);
+extern s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+ uint8 ie_id, uint8 *data, uint8 data_len);
+extern s32
+wl_cfg80211_clear_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx);
-/* Function to flush the FW log buffer content */
-#ifdef DHD_LOG_DUMP
-extern void wl_flush_fw_log_buffer(struct net_device *dev, uint32 logset_mask);
-#else
-#define wl_flush_fw_log_buffer(x, y)
-#endif // endif
+static inline void
+wl_get_iwdata_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev, u8 *iw_ie,
+ u32 *iw_ie_len)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (ndev && (_net_info->ndev == ndev)) {
+ *iw_ie_len = _net_info->profile.iw_ie_len;
+ memcpy(iw_ie, _net_info->profile.iw_ie, _net_info->profile.iw_ie_len);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+}
+
+static inline void
+wl_set_iwdata_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev, u8 *iw_ie, u32 iw_ie_len)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (ndev && (_net_info->ndev == ndev)) {
+ _net_info->profile.iw_ie_len = iw_ie_len;
+ memcpy(_net_info->profile.iw_ie, iw_ie, _net_info->profile.iw_ie_len);
+ }
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+}
+
+static inline void
+wl_clear_iwdata_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (ndev && (_net_info->ndev == ndev)) {
+ _net_info->profile.iw_ie_len = 0;
+ memset(_net_info->profile.iw_ie, 0, IW_IES_MAX_BUF_LEN);
+ }
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+}
+
+static inline void
+wl_clear_iwdata(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next,
+ &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (_net_info->profile.iw_ie_len) {
+ _net_info->profile.iw_ie_len = 0;
+ memset(_net_info->profile.iw_ie, 0, IW_IES_MAX_BUF_LEN);
+ }
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ return;
+}
+#endif /* WL11U */
#endif /* _wl_cfg80211_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c
index 4b072be..d364b6b 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c
@@ -1,7 +1,9 @@
/*
* Linux cfg80211 driver - Dongle Host Driver (DHD) related
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -24,7 +26,7 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfg_btcoex.c 700323 2017-05-18 16:12:11Z $
+ * $Id: wl_cfg_btcoex.c 814554 2019-04-11 23:06:22Z $
*/
#include <net/rtnetlink.h>
@@ -72,6 +74,9 @@
/* T2 turn off SCO/SCO supperesion is (timeout) */
#define BT_DHCP_FLAG_FORCE_TIME 5500
+#define BTCOEXMODE "BTCOEXMODE"
+#define POWERMODE "POWERMODE"
+
enum wl_cfg80211_btcoex_status {
BT_DHCP_IDLE,
BT_DHCP_START,
@@ -93,7 +98,7 @@
} var;
int error;
- memset(&var, 0, sizeof(var));
+ bzero(&var, sizeof(var));
error = bcm_mkiovar(name, (char *)(®), sizeof(reg), (char *)(&var), sizeof(var.buf));
if (error == 0) {
return BCME_BUFTOOSHORT;
@@ -125,7 +130,7 @@
{
char reg_addr[8];
- memset(reg_addr, 0, sizeof(reg_addr));
+ bzero(reg_addr, sizeof(reg_addr));
memcpy((char *)®_addr[0], (char *)addr, 4);
memcpy((char *)®_addr[4], (char *)val, 4);
@@ -312,14 +317,9 @@
{
struct btcoex_info *btcx_inf;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
btcx_inf = container_of(work, struct btcoex_info, work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
if (btcx_inf->timer_on) {
btcx_inf->timer_on = 0;
@@ -433,6 +433,7 @@
#endif /* OEM_ANDROID */
struct btcoex_info *btco_inf = btcoex_info_loc;
char powermode_val = 0;
+ uint8 cmd_len = 0;
char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
@@ -447,12 +448,13 @@
/* Figure out powermode 1 or o command */
#ifdef OEM_ANDROID
- strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1);
+ cmd_len = sizeof(BTCOEXMODE);
#else
- strncpy((char *)&powermode_val, command + strlen("POWERMODE") +1, 1);
+ cmd_len = sizeof(POWERMODE);
#endif // endif
+ powermode_val = command[cmd_len];
- if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+ if (powermode_val == '1') {
WL_TRACE_HW4(("DHCP session starts\n"));
#ifdef PKT_FILTER_SUPPORT
@@ -518,9 +520,9 @@
}
}
#ifdef OEM_ANDROID
- else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+ else if (powermode_val == '2') {
#else
- else if (strnicmp((char *)&powermode_val, "0", strlen("0")) == 0) {
+ else if (powermode_val == '0') {
#endif // endif
#ifdef PKT_FILTER_SUPPORT
@@ -586,8 +588,6 @@
WL_ERR(("Unkwown yet power setting, ignored\n"));
}
- snprintf(command, 3, "OK");
-
- return (strlen("OK"));
+ return (snprintf(command, sizeof("OK"), "OK") + 1);
}
#endif /* defined(OEM_ANDROID) */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
index 7690680..fa567bc 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
@@ -1,7 +1,9 @@
/*
* Linux cfgp2p driver
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -24,7 +26,7 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgp2p.c 696378 2017-04-26 09:08:51Z $
+ * $Id: wl_cfgp2p.c 815562 2019-04-18 02:33:27Z $
*
*/
#include <typedefs.h>
@@ -41,6 +43,7 @@
#include <asm/uaccess.h>
#include <bcmutils.h>
+#include <bcmstdlib_s.h>
#include <bcmendian.h>
#include <ethernet.h>
#include <802.11.h>
@@ -48,6 +51,7 @@
#include <wl_cfg80211.h>
#include <wl_cfgp2p.h>
+#include <wl_cfgscan.h>
#include <wldev_common.h>
#ifdef OEM_ANDROID
#include <wl_android.h>
@@ -58,13 +62,11 @@
#include <dhdioctl.h>
#include <wlioctl.h>
#include <dhd_cfg80211.h>
+#include <dhd_bus.h>
-#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
-extern int dhd_bus_mem_dump(dhd_pub_t *dhd);
-#endif /* BCMPCIE && DHD_FW_COREDUMP */
static s8 scanparambuf[WLC_IOCTL_SMLEN];
-static bool
-wl_cfgp2p_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
+static bool wl_cfgp2p_has_ie(const bcm_tlv_t *ie, const u8 **tlvs, u32 *tlvs_len,
+ const u8 *oui, u32 oui_len, u8 type);
static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
struct wireless_dev *wdev, bool notify);
@@ -151,7 +153,7 @@
u16 id, flen;
/* Skipped first ANQP Element, if frame has anqp elemnt */
- ie = bcm_parse_tlvs(ie, (int)len, DOT11_MNG_ADVERTISEMENT_ID);
+ ie = bcm_parse_tlvs(ie, len, DOT11_MNG_ADVERTISEMENT_ID);
if (ie == NULL)
return false;
@@ -344,7 +346,6 @@
s32
wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg)
{
- CFGP2P_INFO(("In\n"));
cfg->p2p = MALLOCZ(cfg->osh, sizeof(struct p2p_info));
if (cfg->p2p == NULL) {
CFGP2P_ERR(("struct p2p_info allocation failed\n"));
@@ -504,6 +505,9 @@
wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
{
s32 ret;
+#ifdef WL_DISABLE_HE_P2P
+ s32 bssidx = 0;
+#endif /* WL_DISABLE_HE_P2P */
struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
CFGP2P_ERR(("------ cfg p2p_ifdel "MACDBG" dev->ifindex:%d\n",
@@ -513,6 +517,19 @@
if (unlikely(ret < 0)) {
printk("'cfg p2p_ifdel' error %d\n", ret);
}
+#ifdef WL_DISABLE_HE_P2P
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, netdev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ ret = BCME_ERROR;
+ return ret;
+ }
+ WL_DBG(("Enabling back HE for P2P\n"));
+ wl_cfg80211_set_he_mode(netdev, cfg, bssidx, WL_IF_TYPE_P2P_DISC, TRUE);
+ if (ret < 0) {
+ WL_ERR(("failed to set he features, error=%d\n", ret));
+ }
+#endif /* WL_DISABLE_HE_P2P */
+
return ret;
}
@@ -682,7 +699,7 @@
CFGP2P_ERR(("Multi p2p not supported"));
return BCME_ERROR;
}
- if ((connected_cnt = wl_get_drv_status_all(cfg, CONNECTED)) > 1) {
+ if ((connected_cnt = wl_get_drv_status_all(cfg, CONNECTED)) > 2) {
CFGP2P_ERR(("Failed to create second p2p interface"
"Already one connection exists"));
return BCME_ERROR;
@@ -704,9 +721,10 @@
BCM_REFERENCE(ndev);
CFGP2P_DBG(("enter\n"));
+
if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) > 0) {
CFGP2P_ERR(("do nothing, already initialized\n"));
- return ret;
+ goto exit;
}
ret = wl_cfgp2p_set_discovery(cfg, 1);
@@ -731,7 +749,7 @@
* so that time, the ifidx returned in WLC_E_IF should be used for populating
* the netinfo
*/
- ret = wl_alloc_netinfo(cfg, NULL, cfg->p2p_wdev, WL_MODE_BSS, 0, bssidx, 0);
+ ret = wl_alloc_netinfo(cfg, NULL, cfg->p2p_wdev, WL_IF_TYPE_STA, 0, bssidx, 0);
if (unlikely(ret)) {
goto exit;
}
@@ -742,6 +760,7 @@
/* Set the initial discovery state to SCAN */
ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+
if (unlikely(ret != 0)) {
CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
wl_cfgp2p_set_discovery(cfg, 0);
@@ -812,6 +831,14 @@
bcm_struct_cfgdev *cfgdev;
CFGP2P_DBG(("enter\n"));
+ mutex_lock(&cfg->if_sync);
+#ifdef WL_IFACE_MGMT
+ if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_P2P_DISC)) != BCME_OK) {
+ WL_ERR(("secondary iface is active, p2p enable discovery is not supported\n"));
+ goto exit;
+ }
+#endif /* WL_IFACE_MGMT */
+
if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
CFGP2P_DBG((" DISCOVERY is already initialized, we have nothing to do\n"));
goto set_ie;
@@ -840,7 +867,8 @@
bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
} else if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfg->p2p_wdev)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", cfg->p2p_wdev));
- return BCME_ERROR;
+ ret = BCME_ERROR;
+ goto exit;
}
#if defined(WL_CFG80211_P2P_DEV_IF)
@@ -861,6 +889,7 @@
if (ret) {
wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
}
+ mutex_unlock(&cfg->if_sync);
return ret;
}
@@ -896,12 +925,18 @@
*/
wl_clr_p2p_status(cfg, DISCOVERY_ON);
ret = wl_cfgp2p_deinit_discovery(cfg);
-
return ret;
}
+/* Scan parameters */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+#define P2PAPI_SCAN_NPROBS_TIME_MS 30
+#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
s32
-wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active,
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active_scan,
u32 num_chans, u16 *channels,
s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
p2p_scan_purpose_t p2p_scan_purpose)
@@ -913,20 +948,26 @@
s8 *memblk;
wl_p2p_scan_t *p2p_params;
wl_escan_params_t *eparams;
+ wl_escan_params_v2_t *eparams_v2;
wlc_ssid_t ssid;
- /* Scan parameters */
-#define P2PAPI_SCAN_NPROBES 1
-#define P2PAPI_SCAN_DWELL_TIME_MS 80
-#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
-#define P2PAPI_SCAN_HOME_TIME_MS 60
-#define P2PAPI_SCAN_NPROBS_TIME_MS 30
-#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
+ u32 sync_id = 0;
+ s32 nprobes = 0;
+ s32 active_time = 0;
+ const struct ether_addr *mac_addr = NULL;
+ u32 scan_type = 0;
+ struct net_device *pri_dev = NULL;
- struct net_device *pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+ pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
/* Allocate scan params which need space for 3 channels and 0 ssids */
- eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
- OFFSETOF(wl_escan_params_t, params)) +
- num_chans * sizeof(eparams->params.channel_list[0]);
+ if (cfg->scan_params_v2) {
+ eparams_size = (WL_SCAN_PARAMS_V2_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_v2_t, params)) +
+ num_chans * sizeof(eparams->params.channel_list[0]);
+ } else {
+ eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_t, params)) +
+ num_chans * sizeof(eparams->params.channel_list[0]);
+ }
memsize = sizeof(wl_p2p_scan_t) + eparams_size;
memblk = scanparambuf;
@@ -935,8 +976,8 @@
memsize, sizeof(scanparambuf)));
return -1;
}
- memset(memblk, 0, memsize);
- memset(cfg->ioctl_buf, 0, WLC_IOCTL_MAXLEN);
+ bzero(memblk, memsize);
+ bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN);
if (search_state == WL_P2P_DISC_ST_SEARCH) {
/*
* If we in SEARCH STATE, we don't need to set SSID explictly
@@ -945,7 +986,7 @@
wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
/* use null ssid */
ssid.SSID_len = 0;
- memset(&ssid.SSID, 0, sizeof(ssid.SSID));
+ bzero(&ssid.SSID, sizeof(ssid.SSID));
} else if (search_state == WL_P2P_DISC_ST_SCAN) {
/* SCAN STATE 802.11 SCAN
* WFD Supplicant has p2p_find command with (type=progressive, type= full)
@@ -956,7 +997,7 @@
wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
/* use wild card ssid */
ssid.SSID_len = WL_P2P_WILDCARD_SSID_LEN;
- memset(&ssid.SSID, 0, sizeof(ssid.SSID));
+ bzero(&ssid.SSID, sizeof(ssid.SSID));
memcpy(&ssid.SSID, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN);
} else {
CFGP2P_ERR((" invalid search state %d\n", search_state));
@@ -966,68 +1007,100 @@
/* Fill in the P2P scan structure at the start of the iovar param block */
p2p_params = (wl_p2p_scan_t*) memblk;
p2p_params->type = 'E';
- /* Fill in the Scan structure that follows the P2P scan structure */
- eparams = (wl_escan_params_t*) (p2p_params + 1);
- eparams->params.bss_type = DOT11_BSSTYPE_ANY;
- if (active)
- eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE;
- else
- eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE;
- if (tx_dst_addr == NULL)
- memcpy(&eparams->params.bssid, ðer_bcast, ETHER_ADDR_LEN);
- else
- memcpy(&eparams->params.bssid, tx_dst_addr, ETHER_ADDR_LEN);
+ if (!active_scan) {
+ scan_type = WL_SCANFLAGS_PASSIVE;
+ }
- if (ssid.SSID_len)
- memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t));
-
- eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+ if (tx_dst_addr == NULL) {
+ mac_addr = ðer_bcast;
+ } else {
+ mac_addr = tx_dst_addr;
+ }
switch (p2p_scan_purpose) {
case P2P_SCAN_SOCIAL_CHANNEL:
- eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS);
+ active_time = P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS;
break;
case P2P_SCAN_AFX_PEER_NORMAL:
case P2P_SCAN_AFX_PEER_REDUCED:
- eparams->params.active_time = htod32(P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS);
+ active_time = P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS;
break;
case P2P_SCAN_CONNECT_TRY:
- eparams->params.active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+ active_time = WL_SCAN_CONNECT_DWELL_TIME_MS;
break;
- default :
- if (wl_get_drv_status_all(cfg, CONNECTED))
- eparams->params.active_time = -1;
- else
- eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS);
+ default:
+ active_time = wl_get_drv_status_all(cfg, CONNECTED) ?
+ -1 : P2PAPI_SCAN_DWELL_TIME_MS;
break;
}
- if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY)
- eparams->params.nprobes = htod32(eparams->params.active_time /
- WL_SCAN_JOIN_PROBE_INTERVAL_MS);
- else
- eparams->params.nprobes = htod32((eparams->params.active_time /
- P2PAPI_SCAN_NPROBS_TIME_MS));
-
- if (eparams->params.nprobes <= 0)
- eparams->params.nprobes = 1;
- CFGP2P_DBG(("nprobes # %d, active_time %d\n",
- eparams->params.nprobes, eparams->params.active_time));
- eparams->params.passive_time = htod32(-1);
- eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
- (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
-
- for (i = 0; i < num_chans; i++) {
- eparams->params.channel_list[i] =
- wl_ch_host_to_driver(channels[i]);
+ if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY) {
+ nprobes = active_time /
+ WL_SCAN_JOIN_PROBE_INTERVAL_MS;
+ } else {
+ nprobes = active_time /
+ P2PAPI_SCAN_NPROBS_TIME_MS;
}
- eparams->version = htod32(ESCAN_REQ_VERSION);
- eparams->action = htod16(action);
- wl_escan_set_sync_id(eparams->sync_id, cfg);
+
+ if (nprobes <= 0) {
+ nprobes = 1;
+ }
+
+ wl_escan_set_sync_id(sync_id, cfg);
+ /* Fill in the Scan structure that follows the P2P scan structure */
+ if (cfg->scan_params_v2) {
+ eparams_v2 = (wl_escan_params_v2_t*) (p2p_params + 1);
+ eparams_v2->version = htod16(ESCAN_REQ_VERSION_V2);
+ eparams_v2->action = htod16(action);
+ eparams_v2->params.version = htod16(WL_SCAN_PARAMS_VERSION_V2);
+ eparams_v2->params.length = htod16(sizeof(wl_scan_params_v2_t));
+ eparams_v2->params.bss_type = DOT11_BSSTYPE_ANY;
+ eparams_v2->params.scan_type = htod32(scan_type);
+ (void)memcpy_s(&eparams_v2->params.bssid, ETHER_ADDR_LEN, mac_addr, ETHER_ADDR_LEN);
+ eparams_v2->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+ eparams_v2->params.active_time = htod32(active_time);
+ eparams_v2->params.nprobes = htod32(nprobes);
+ eparams_v2->params.passive_time = htod32(-1);
+ eparams_v2->sync_id = sync_id;
+ for (i = 0; i < num_chans; i++) {
+ eparams_v2->params.channel_list[i] =
+ wl_ch_host_to_driver(channels[i]);
+ }
+ eparams_v2->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+ if (ssid.SSID_len)
+ (void)memcpy_s(&eparams_v2->params.ssid,
+ sizeof(wlc_ssid_t), &ssid, sizeof(wlc_ssid_t));
+ sync_id = eparams_v2->sync_id;
+ } else {
+ eparams = (wl_escan_params_t*) (p2p_params + 1);
+ eparams->version = htod32(ESCAN_REQ_VERSION);
+ eparams->action = htod16(action);
+ eparams->params.bss_type = DOT11_BSSTYPE_ANY;
+ eparams->params.scan_type = htod32(scan_type);
+ (void)memcpy_s(&eparams->params.bssid, ETHER_ADDR_LEN, mac_addr, ETHER_ADDR_LEN);
+ eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+ eparams->params.active_time = htod32(active_time);
+ eparams->params.nprobes = htod32(nprobes);
+ eparams->params.passive_time = htod32(-1);
+ eparams->sync_id = sync_id;
+ for (i = 0; i < num_chans; i++) {
+ eparams->params.channel_list[i] =
+ wl_ch_host_to_driver(channels[i]);
+ }
+ eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+ if (ssid.SSID_len)
+ (void)memcpy_s(&eparams->params.ssid,
+ sizeof(wlc_ssid_t), &ssid, sizeof(wlc_ssid_t));
+ sync_id = eparams->sync_id;
+ }
+
wl_escan_set_type(cfg, WL_SCANTYPE_P2P);
- CFGP2P_DBG(("SCAN CHANNELS : "));
+ CFGP2P_DBG(("nprobes:%d active_time:%d\n", nprobes, active_time));
+ CFGP2P_DBG(("SCAN CHANNELS : "));
CFGP2P_DBG(("%d", channels[0]));
for (i = 1; i < num_chans; i++) {
CFGP2P_DBG((",%d", channels[i]));
@@ -1036,9 +1109,10 @@
ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
memblk, memsize, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
- WL_ERR(("P2P_SEARCH sync ID: %d, bssidx: %d\n", eparams->sync_id, bssidx));
- if (ret == BCME_OK)
+ WL_INFORM(("P2P_SEARCH sync ID: %d, bssidx: %d\n", sync_id, bssidx));
+ if (ret == BCME_OK) {
wl_set_p2p_status(cfg, SCANNING);
+ }
return ret;
}
@@ -1121,23 +1195,20 @@
* not update the tlvs buffer pointer/length.
*/
static bool
-wl_cfgp2p_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
+wl_cfgp2p_has_ie(const bcm_tlv_t *ie, const u8 **tlvs, u32 *tlvs_len,
+ const u8 *oui, u32 oui_len, u8 type)
{
/* If the contents match the OUI and the type */
- if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
- !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
- type == ie[TLV_BODY_OFF + oui_len]) {
+ if (ie->len >= oui_len + 1 &&
+ !bcmp(ie->data, oui, oui_len) &&
+ type == ie->data[oui_len]) {
return TRUE;
}
- if (tlvs == NULL)
- return FALSE;
/* point to the next ie */
- ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
- /* calculate the length of the rest of the buffer */
- *tlvs_len -= (int)(ie - *tlvs);
- /* update the pointer to the start of the buffer */
- *tlvs = ie;
+ if (tlvs != NULL) {
+ bcm_tlv_buffer_advance_past(ie, tlvs, tlvs_len);
+ }
return FALSE;
}
@@ -1147,8 +1218,8 @@
{
const bcm_tlv_t *ie;
- while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_wpa_ie((const u8*)ie, (u8 const **)&parse, &len)) {
+ while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wpa_ie(ie, &parse, &len)) {
return (const wpa_ie_fixed_t *)ie;
}
}
@@ -1160,8 +1231,8 @@
{
const bcm_tlv_t *ie;
- while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_wps_ie((const u8*)ie, (u8 const **)&parse, &len)) {
+ while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wps_ie(ie, &parse, &len)) {
return (const wpa_ie_fixed_t *)ie;
}
}
@@ -1173,8 +1244,8 @@
{
bcm_tlv_t *ie;
- while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_p2p_ie((const uint8*)ie, (u8 const **)&parse, &len)) {
+ while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_p2p_ie(ie, &parse, &len)) {
return (wifi_p2p_ie_t *)ie;
}
}
@@ -1186,8 +1257,8 @@
{
const bcm_tlv_t *ie;
- while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_wfd_ie((const uint8*)ie, (u8 const **)&parse, &len)) {
+ while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wfd_ie(ie, &parse, &len)) {
return (const wifi_wfd_ie_t *)ie;
}
}
@@ -1205,19 +1276,24 @@
/* Validate the pktflag parameter */
if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
- VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) {
+ VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG |
+ VNDR_IE_DISASSOC_FLAG))) {
CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag));
return -1;
}
/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
- strncpy(hdr.cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1);
- hdr.cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+ strlcpy(hdr.cmd, add_del_cmd, sizeof(hdr.cmd));
/* Set the IE count - the buffer contains only 1 IE */
iecount = htod32(1);
memcpy((void *)&hdr.vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+ /* For vendor ID DOT11_MNG_ID_EXT_ID, need to set pkt flag to VNDR_IE_CUSTOM_FLAG */
+ if (ie_id == DOT11_MNG_ID_EXT_ID) {
+ pktflag = pktflag | VNDR_IE_CUSTOM_FLAG;
+ }
+
/* Copy packet flags that indicate which packets will contain this IE */
pktflag = htod32(pktflag);
memcpy((void *)&hdr.vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
@@ -1286,12 +1362,10 @@
CFGP2P_ERR((" argument is invalid\n"));
goto exit;
}
-
if (!cfg->p2p) {
CFGP2P_ERR(("p2p if does not exist\n"));
goto exit;
}
-
for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
*type = i;
@@ -1387,8 +1461,22 @@
* To prevent kernel panic,
* if cfgdev->wiphy may be invalid, adding explicit check
*/
- cfg80211_remain_on_channel_expired(bcmcfg_to_p2p_wdev(cfg),
+ struct wireless_dev *wdev_dpp_listen = NULL;
+ wdev_dpp_listen = wl_get_wdev_by_dpp_listen(cfg);
+ /*
+ * check if dpp listen was trigerred
+ * if so, clear dpp disten flag and route the event for listen
+ * complete on the interface on which listen was reqeusted.
+ */
+ if (wdev_dpp_listen) {
+ wl_set_dpp_listen_by_netdev(cfg, wdev_dpp_listen->netdev, 0);
+ cfg80211_remain_on_channel_expired(wdev_dpp_listen,
+ cfg->last_roc_id, &cfg->remain_on_chan, GFP_KERNEL);
+ } else {
+ cfg80211_remain_on_channel_expired(bcmcfg_to_p2p_wdev(cfg),
cfg->last_roc_id, &cfg->remain_on_chan, GFP_KERNEL);
+ }
+
} else
CFGP2P_ERR(("Invalid cfgdev. Dropping the"
"remain_on_channel_expired event.\n"));
@@ -1587,7 +1675,7 @@
CFGP2P_DBG((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
if (status == WLC_E_STATUS_SUCCESS) {
wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
- CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : ACK\n"));
+ CFGP2P_ACTION(("TX actfrm : ACK\n"));
if (!cfg->need_wait_afrx && cfg->af_sent_channel) {
CFGP2P_DBG(("no need to wait next AF.\n"));
wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
@@ -1595,7 +1683,11 @@
}
else if (!wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
wl_set_p2p_status(cfg, ACTION_TX_NOACK);
- CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n"));
+ if (status == WLC_E_STATUS_SUPPRESS) {
+ CFGP2P_ACTION(("TX actfrm : SUPPRES\n"));
+ } else {
+ CFGP2P_ACTION(("TX actfrm : NO ACK\n"));
+ }
wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
}
} else {
@@ -1647,7 +1739,7 @@
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (ret < 0) {
- CFGP2P_ERR((" sending action frame is failed\n"));
+ CFGP2P_ACTION(("TX actfrm : ERROR\n"));
goto exit;
}
@@ -1836,7 +1928,7 @@
int iovar_len = sizeof(dongle_noa);
CFGP2P_DBG((" Enter\n"));
- memset(&dongle_noa, 0, sizeof(dongle_noa));
+ bzero(&dongle_noa, sizeof(dongle_noa));
if (wl_cfgp2p_vif_created(cfg)) {
cfg->p2p->noa.desc[0].start = 0;
@@ -2032,6 +2124,7 @@
return BCME_ERROR;
}
+ memset_s(&csa_arg, sizeof(csa_arg), 0, sizeof(csa_arg));
csa_arg.mode = DOT11_CSA_MODE_ADVISORY;
csa_arg.count = P2P_ECSA_CNT;
csa_arg.reg = 0;
@@ -2148,12 +2241,14 @@
const u8*
wl_cfgp2p_find_attrib_in_all_p2p_Ies(const u8 *parse, u32 len, u32 attrib)
{
- const bcm_tlv_t *ie;
+ bcm_tlv_t *ie;
const u8* pAttrib;
+ uint ie_len;
CFGP2P_DBG(("Starting parsing parse %p attrib %d remaining len %d ", parse, attrib, len));
- while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_p2p_ie((const uint8*)ie, (u8 const **)&parse, &len) == TRUE) {
+ ie_len = len;
+ while ((ie = bcm_parse_tlvs(parse, ie_len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_p2p_ie(ie, &parse, &ie_len) == TRUE) {
/* Have the P2p ie. Now check for attribute */
if ((pAttrib = wl_cfgp2p_retreive_p2pattrib(ie, attrib)) != NULL) {
CFGP2P_DBG(("P2P attribute %d was found at parse %p",
@@ -2161,15 +2256,16 @@
return pAttrib;
}
else {
- parse += (ie->len + TLV_HDR_LEN);
- len -= (ie->len + TLV_HDR_LEN);
+ /* move to next IE */
+ bcm_tlv_buffer_advance_past(ie, &parse, &ie_len);
+
CFGP2P_INFO(("P2P Attribute %d not found Moving parse"
- " to %p len to %d", attrib, parse, len));
+ " to %p len to %d", attrib, parse, ie_len));
}
}
else {
/* It was not p2p IE. parse will get updated automatically to next TLV */
- CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, len));
+ CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, ie_len));
}
}
CFGP2P_ERR(("P2P attribute %d was NOT found", attrib));
@@ -2259,8 +2355,7 @@
}
#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
- strncpy(net->name, "p2p%d", sizeof(net->name) - 1);
- net->name[IFNAMSIZ - 1] = '\0';
+ strlcpy(net->name, "p2p%d", sizeof(net->name));
/* Copy the reference to bcm_cfg80211 */
memcpy((void *)netdev_priv(net), &cfg, sizeof(struct bcm_cfg80211 *));
@@ -2486,7 +2581,7 @@
return ERR_PTR(-ENOMEM);
}
- memset(&primary_mac, 0, sizeof(primary_mac));
+ bzero(&primary_mac, sizeof(primary_mac));
get_primary_mac(cfg, &primary_mac);
wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
@@ -2545,6 +2640,7 @@
wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
{
int ret = 0;
+ struct net_device *ndev = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
if (!cfg)
@@ -2552,13 +2648,25 @@
CFGP2P_DBG(("Enter\n"));
+ /* Check if cfg80211 interface is already down */
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ if (!wl_get_drv_status(cfg, READY, ndev)) {
+ WL_DBG(("cfg80211 interface is already down\n"));
+ return; /* it is even not ready */
+ }
+
ret = wl_cfg80211_scan_stop(cfg, wdev);
if (unlikely(ret < 0)) {
CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
}
- if (!cfg->p2p)
+ if (!p2p_is_on(cfg)) {
return;
+ }
+
+#ifdef P2P_LISTEN_OFFLOADING
+ wl_cfg80211_p2plo_deinit(cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
/* Cancel any on-going listen */
wl_cfgp2p_cancel_listen(cfg, bcmcfg_to_prmry_ndev(cfg), wdev, TRUE);
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgp2p.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
index 7ea307c..95be464 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
@@ -1,7 +1,9 @@
/*
* Linux cfgp2p driver
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -24,7 +26,7 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgp2p.h 689766 2017-03-13 12:11:16Z $
+ * $Id: wl_cfgp2p.h 794110 2018-12-12 05:03:21Z $
*/
#ifndef _wl_cfgp2p_h_
#define _wl_cfgp2p_h_
@@ -161,6 +163,16 @@
DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
+#define CFGP2P_ACTION(args) \
+ do { \
+ if (wl_dbg_level & WL_DBG_P2P_ACTION) { \
+ printk(KERN_DEBUG "CFGP2P-ACTION) %s :", __func__); \
+ printk args; \
+ DHD_LOG_DUMP_WRITE("[%s] %s: ", \
+ dhd_log_dump_get_timestamp(), __func__); \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+ } while (0)
#else
#define CFGP2P_ERR(args) \
do { \
@@ -176,7 +188,15 @@
printk args; \
} \
} while (0)
+#define CFGP2P_ACTION(args) \
+ do { \
+ if (wl_dbg_level & WL_DBG_P2P_ACTION) { \
+ printk(KERN_DEBUG "CFGP2P-ACTION) %s :", __func__); \
+ printk args; \
+ } \
+ } while (0)
#endif /* DHD_LOG_DUMP */
+
#define CFGP2P_DBG(args) \
do { \
if (wl_dbg_level & WL_DBG_DBG) { \
@@ -185,13 +205,6 @@
} \
} while (0)
-#define CFGP2P_ACTION(args) \
- do { \
- if (wl_dbg_level & WL_DBG_P2P_ACTION) { \
- printk(KERN_DEBUG "CFGP2P-ACTION) %s :", __func__); \
- printk args; \
- } \
- } while (0)
#define INIT_TIMER(timer, func, duration, extra_delay) \
do { \
init_timer_compat(timer, func, cfg); \
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgscan.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgscan.c
new file mode 100644
index 0000000..4aacd91
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgscan.c
@@ -0,0 +1,3939 @@
+/*
+ * Linux cfg80211 driver scan related code
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+/* */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+
+#include <bcmutils.h>
+#include <bcmstdlib_s.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+#include <802.11.h>
+#include <bcmiov.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <bcmevent.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgscan.h>
+#include <wl_cfgp2p.h>
+#include <bcmdevs.h>
+#ifdef OEM_ANDROID
+#include <wl_android.h>
+#endif // endif
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhd_debug.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+#include <dhd_bus.h>
+#include <wl_cfgvendor.h>
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif // endif
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif /* PNO_SUPPORT */
+#ifdef RTT_SUPPORT
+#include "dhd_rtt.h"
+#endif /* RTT_SUPPORT */
+
+#define ACTIVE_SCAN 1
+#define PASSIVE_SCAN 0
+
+#define MIN_P2P_IE_LEN 8 /* p2p_ie->OUI(3) + p2p_ie->oui_type(1) +
+ * Attribute ID(1) + Length(2) + 1(Mininum length:1)
+ */
+#define MAX_P2P_IE_LEN 251 /* Up To 251 */
+
+#define WPS_ATTR_REQ_TYPE 0x103a
+#define WPS_REQ_TYPE_ENROLLEE 0x01
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40
+bool g_first_broadcast_scan = TRUE;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+#ifdef CUSTOMER_HW4_DEBUG
+bool wl_scan_timeout_dbg_enabled = 0;
+#endif /* CUSTOMER_HW4_DEBUG */
+#ifdef P2P_LISTEN_OFFLOADING
+void wl_cfg80211_cancel_p2plo(struct bcm_cfg80211 *cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+static void _wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted);
+
+void
+wl_cfg80211_ch_switch_notify(struct net_device *dev, uint16 chanspec, struct wiphy *wiphy);
+
+extern int passive_channel_skip;
+
+#ifdef WL11U
+bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(const u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+/* unfortunately it's too much work to dispose the const cast - bcm_parse_tlvs
+ * is used everywhere and changing its prototype to take const qualifier needs
+ * a massive change to all its callers...
+ */
+
+ if ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_INTERWORKING_ID))) {
+ return ie;
+ }
+ return NULL;
+}
+
+s32
+wl_cfg80211_clear_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx)
+{
+ ie_setbuf_t ie_setbuf;
+
+ WL_DBG(("clear interworking IE\n"));
+
+ bzero(&ie_setbuf, sizeof(ie_setbuf_t));
+
+ ie_setbuf.ie_buffer.iecount = htod32(1);
+ ie_setbuf.ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
+ ie_setbuf.ie_buffer.ie_list[0].ie_data.len = 0;
+
+ return wldev_iovar_setbuf_bsscfg(ndev, "ie", &ie_setbuf, sizeof(ie_setbuf),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+}
+
+s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+ uint8 ie_id, uint8 *data, uint8 data_len)
+{
+ s32 err = BCME_OK;
+ s32 buf_len;
+ ie_setbuf_t *ie_setbuf;
+ ie_getbuf_t ie_getbufp;
+ char getbuf[WLC_IOCTL_SMLEN];
+ u32 iw_ie_len = 0;
+ u8 iw_ie[IW_IES_MAX_BUF_LEN];
+
+ if (ie_id != DOT11_MNG_INTERWORKING_ID) {
+ WL_ERR(("unsupported (id=%d)\n", ie_id));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* access network options (1 octet) is the mandatory field */
+ if (!data || data_len == 0 || data_len > IW_IES_MAX_BUF_LEN) {
+ WL_ERR(("wrong interworking IE (len=%d)\n", data_len));
+ return BCME_BADARG;
+ }
+
+ /* Validate the pktflag parameter */
+ if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+ VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+ VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG|
+ VNDR_IE_CUSTOM_FLAG))) {
+ WL_ERR(("invalid packet flag 0x%x\n", pktflag));
+ return BCME_BADARG;
+ }
+
+ wl_get_iwdata_by_netdev(cfg, ndev, iw_ie, &iw_ie_len);
+
+ if (iw_ie_len == data_len && !memcmp(iw_ie, data, data_len)) {
+ WL_ERR(("Previous IW IE is equals to current IE\n"));
+ return BCME_OK;
+ }
+
+ buf_len = sizeof(ie_setbuf_t) + data_len - 1;
+
+ ie_getbufp.id = DOT11_MNG_INTERWORKING_ID;
+ if (wldev_iovar_getbuf_bsscfg(ndev, "ie", (void *)&ie_getbufp,
+ sizeof(ie_getbufp), getbuf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)
+ == BCME_OK) {
+ if (!memcmp(&getbuf[TLV_HDR_LEN], data, data_len)) {
+ WL_DBG(("skip to set interworking IE\n"));
+ return BCME_OK;
+ }
+ }
+
+ /* if already set with previous values, delete it first */
+ if (cfg->wl11u) {
+ if ((err = wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx)) != BCME_OK) {
+ return err;
+ }
+ }
+
+ ie_setbuf = (ie_setbuf_t *)MALLOCZ(cfg->osh, buf_len);
+ if (!ie_setbuf) {
+ WL_ERR(("Error allocating buffer for IE\n"));
+ return -ENOMEM;
+ }
+ strlcpy(ie_setbuf->cmd, "add", sizeof(ie_setbuf->cmd));
+
+ /* Buffer contains only 1 IE */
+ ie_setbuf->ie_buffer.iecount = htod32(1);
+ /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */
+ ie_setbuf->ie_buffer.ie_list[0].pktflag = htod32(pktflag);
+
+ /* Now, add the IE to the buffer */
+ ie_setbuf->ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
+ ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len;
+ /* Returning void here as max data_len can be 8 */
+ (void)memcpy_s((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0],
+ sizeof(uint8) * data_len, data, data_len);
+
+ if ((err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync))
+ == BCME_OK) {
+ WL_DBG(("set interworking IE\n"));
+ cfg->wl11u = TRUE;
+ wl_set_iwdata_by_netdev(cfg, ndev, data, data_len);
+ err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
+ }
+
+ MFREE(cfg->osh, ie_setbuf, buf_len);
+ return err;
+}
+#endif /* WL11U */
+
+#ifdef WL_BCNRECV
+/* Beacon recv results handler sending to upper layer */
+static s32
+wl_bcnrecv_result_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ wl_bss_info_v109_2_t *bi, uint32 scan_status)
+{
+ s32 err = BCME_OK;
+ struct wiphy *wiphy = NULL;
+ wl_bcnrecv_result_t *bcn_recv = NULL;
+ struct timespec ts;
+ if (!bi) {
+ WL_ERR(("%s: bi is NULL\n", __func__));
+ err = BCME_NORESOURCE;
+ goto exit;
+ }
+ if ((bi->length - bi->ie_length) < sizeof(wl_bss_info_v109_2_t)) {
+ WL_ERR(("bi info version doesn't support bcn_recv attributes\n"));
+ goto exit;
+ }
+
+ if (scan_status == WLC_E_STATUS_RXBCN) {
+ wiphy = cfg->wdev->wiphy;
+ if (!wiphy) {
+ WL_ERR(("wiphy is NULL\n"));
+ err = BCME_NORESOURCE;
+ goto exit;
+ }
+ bcn_recv = (wl_bcnrecv_result_t *)MALLOCZ(cfg->osh, sizeof(*bcn_recv));
+ if (unlikely(!bcn_recv)) {
+ WL_ERR(("Failed to allocate memory\n"));
+ return -ENOMEM;
+ }
+ /* Returning void here as copy size does not exceed dest size of SSID */
+ (void)memcpy_s((char *)bcn_recv->SSID, DOT11_MAX_SSID_LEN,
+ (char *)bi->SSID, DOT11_MAX_SSID_LEN);
+ /* Returning void here as copy size does not exceed dest size of ETH_LEN */
+ (void)memcpy_s(&bcn_recv->BSSID, ETHER_ADDR_LEN, &bi->BSSID, ETH_ALEN);
+ bcn_recv->channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(bi->chanspec));
+ bcn_recv->beacon_interval = bi->beacon_period;
+
+ /* kernal timestamp */
+ get_monotonic_boottime(&ts);
+ bcn_recv->system_time = ((u64)ts.tv_sec*1000000)
+ + ts.tv_nsec / 1000;
+ bcn_recv->timestamp[0] = bi->timestamp[0];
+ bcn_recv->timestamp[1] = bi->timestamp[1];
+ if ((err = wl_android_bcnrecv_event(cfgdev_to_wlc_ndev(cfgdev, cfg),
+ BCNRECV_ATTR_BCNINFO, 0, 0,
+ (uint8 *)bcn_recv, sizeof(*bcn_recv)))
+ != BCME_OK) {
+ WL_ERR(("failed to send bcnrecv event, error:%d\n", err));
+ }
+ } else {
+ WL_DBG(("Ignoring Escan Event:%d \n", scan_status));
+ }
+exit:
+ if (bcn_recv) {
+ MFREE(cfg->osh, bcn_recv, sizeof(*bcn_recv));
+ }
+ return err;
+}
+#endif /* WL_BCNRECV */
+
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+#ifndef WL_DRV_AVOID_SCANCACHE
+static void
+wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate)
+{
+ int idx;
+ for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) {
+ int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1;
+ if (bss->RSSI < candidate[idx].RSSI) {
+ if (len) {
+ /* In the below memcpy operation the candidate array always has the
+ * buffer space available to max 'len' calculated in the for loop.
+ */
+ (void)memcpy_s(&candidate[idx + 1],
+ (sizeof(removal_element_t) * len),
+ &candidate[idx], sizeof(removal_element_t) * len);
+ }
+ candidate[idx].RSSI = bss->RSSI;
+ candidate[idx].length = bss->length;
+ (void)memcpy_s(&candidate[idx].BSSID, ETHER_ADDR_LEN,
+ &bss->BSSID, ETHER_ADDR_LEN);
+ return;
+ }
+ }
+}
+
+static void
+wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate,
+ wl_bss_info_t *bi)
+{
+ int idx1, idx2;
+ int total_delete_len = 0;
+ for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) {
+ int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+ wl_bss_info_t *bss = NULL;
+ if (candidate[idx1].RSSI >= bi->RSSI)
+ continue;
+ for (idx2 = 0; idx2 < list->count; idx2++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) :
+ list->bss_info;
+ if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ candidate[idx1].RSSI == bss->RSSI &&
+ candidate[idx1].length == dtoh32(bss->length)) {
+ u32 delete_len = dtoh32(bss->length);
+ WL_DBG(("delete scan info of " MACDBG " to add new AP\n",
+ MAC2STRDBG(bss->BSSID.octet)));
+ if (idx2 < list->count -1) {
+ memmove((u8 *)bss, (u8 *)bss + delete_len,
+ list->buflen - cur_len - delete_len);
+ }
+ list->buflen -= delete_len;
+ list->count--;
+ total_delete_len += delete_len;
+ /* if delete_len is greater than or equal to result length */
+ if (total_delete_len >= bi->length) {
+ return;
+ }
+ break;
+ }
+ cur_len += dtoh32(bss->length);
+ }
+ }
+}
+#endif /* WL_DRV_AVOID_SCANCACHE */
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+s32
+wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = BCME_OK;
+ s32 status = ntoh32(e->status);
+ wl_escan_result_t *escan_result;
+ struct net_device *ndev = NULL;
+#ifndef WL_DRV_AVOID_SCANCACHE
+ wl_bss_info_t *bi;
+ u32 bi_length;
+ const wifi_p2p_ie_t * p2p_ie;
+ const u8 *p2p_dev_addr = NULL;
+ wl_scan_results_t *list;
+ wl_bss_info_t *bss = NULL;
+ u32 i;
+#endif /* WL_DRV_AVOID_SCANCACHE */
+
+ WL_DBG((" enter event type : %d, status : %d \n",
+ ntoh32(e->event_type), ntoh32(e->status)));
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ mutex_lock(&cfg->scan_sync);
+ /* P2P SCAN is coming from primary interface */
+ if (wl_get_p2p_status(cfg, SCANNING)) {
+ if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+ ndev = cfg->afx_hdl->dev;
+ else
+ ndev = cfg->escan_info.ndev;
+ }
+ escan_result = (wl_escan_result_t *)data;
+#ifdef WL_BCNRECV
+ if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_STARTED &&
+ status == WLC_E_STATUS_RXBCN) {
+ /* handle beacon recv scan results */
+ wl_bss_info_v109_2_t *bi_info;
+ bi_info = (wl_bss_info_v109_2_t *)escan_result->bss_info;
+ err = wl_bcnrecv_result_handler(cfg, cfgdev, bi_info, status);
+ goto exit;
+ }
+#endif /* WL_BCNRECV */
+ if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) {
+ WL_ERR_RLMT(("escan is not ready. drv_scan_status 0x%x"
+ " e_type %d e_states %d\n",
+ wl_get_drv_status(cfg, SCANNING, ndev),
+ ntoh32(e->event_type), ntoh32(e->status)));
+ goto exit;
+ }
+
+#ifndef WL_DRV_AVOID_SCANCACHE
+ if (status == WLC_E_STATUS_PARTIAL) {
+ WL_DBG(("WLC_E_STATUS_PARTIAL \n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
+ if (!escan_result) {
+ WL_ERR(("Invalid escan result (NULL pointer)\n"));
+ goto exit;
+ }
+ if ((dtoh32(escan_result->buflen) > (int)ESCAN_BUF_SIZE) ||
+ (dtoh32(escan_result->buflen) < sizeof(wl_escan_result_t))) {
+ WL_ERR(("Invalid escan buffer len:%d\n", dtoh32(escan_result->buflen)));
+ goto exit;
+ }
+ if (dtoh16(escan_result->bss_count) != 1) {
+ WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+ goto exit;
+ }
+ bi = escan_result->bss_info;
+ if (!bi) {
+ WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
+ goto exit;
+ }
+ bi_length = dtoh32(bi->length);
+ if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+ WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
+ goto exit;
+ }
+ if (wl_escan_check_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id) < 0)
+ goto exit;
+
+ if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+ if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+ WL_DBG(("Ignoring IBSS result\n"));
+ goto exit;
+ }
+ }
+
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+ if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+ cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+ s32 channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(bi->chanspec));
+
+ if ((channel > MAXCHANNEL) || (channel <= 0))
+ channel = WL_INVALID;
+ else
+ WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
+ " channel : %d\n",
+ MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
+ channel));
+
+ wl_clr_p2p_status(cfg, SCANNING);
+ cfg->afx_hdl->peer_chan = channel;
+ complete(&cfg->act_frm_scan);
+ goto exit;
+ }
+
+ } else {
+ int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT];
+ int remove_lower_rssi = FALSE;
+
+ bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ list = wl_escan_get_buf(cfg, FALSE);
+ if (scan_req_match(cfg)) {
+#ifdef WL_HOST_BAND_MGMT
+ s32 channel_band = 0;
+ chanspec_t chspec;
+#endif /* WL_HOST_BAND_MGMT */
+ /* p2p scan && allow only probe response */
+ if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+ (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+ goto exit;
+ if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset,
+ bi->ie_length)) == NULL) {
+ WL_ERR(("Couldn't find P2PIE in probe"
+ " response/beacon\n"));
+ goto exit;
+ }
+#ifdef WL_HOST_BAND_MGMT
+ chspec = wl_chspec_driver_to_host(bi->chanspec);
+ channel_band = CHSPEC2WLC_BAND(chspec);
+
+ if ((cfg->curr_band == WLC_BAND_5G) &&
+ (channel_band == WLC_BAND_2G)) {
+ /* Avoid sending the GO results in band conflict */
+ if (wl_cfgp2p_retreive_p2pattrib(p2p_ie,
+ P2P_SEID_GROUP_ID) != NULL)
+ goto exit;
+ }
+#endif /* WL_HOST_BAND_MGMT */
+ }
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen)
+ remove_lower_rssi = TRUE;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ for (i = 0; i < list->count; i++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+ : list->bss_info;
+ if (!bss) {
+ WL_ERR(("bss is NULL\n"));
+ goto exit;
+ }
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ WL_TRACE(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n",
+ bss->SSID, MAC2STRDBG(bss->BSSID.octet),
+ i, bss->RSSI, list->count));
+
+ if (remove_lower_rssi)
+ wl_cfg80211_find_removal_candidate(bss, candidate);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ (CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
+ == CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) &&
+ bi->SSID_len == bss->SSID_len &&
+ !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+
+ /* do not allow beacon data to update
+ *the data recd from a probe response
+ */
+ if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) &&
+ (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+ goto exit;
+
+ WL_DBG(("%s("MACDBG"), i=%d prev: RSSI %d"
+ " flags 0x%x, new: RSSI %d flags 0x%x\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
+ bss->RSSI, bss->flags, bi->RSSI, bi->flags));
+
+ if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
+ (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
+ /* preserve max RSSI if the measurements are
+ * both on-channel or both off-channel
+ */
+ WL_SCAN(("%s("MACDBG"), same onchan"
+ ", RSSI: prev %d new %d\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ bss->RSSI, bi->RSSI));
+ bi->RSSI = MAX(bss->RSSI, bi->RSSI);
+ } else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
+ (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
+ /* preserve the on-channel rssi measurement
+ * if the new measurement is off channel
+ */
+ WL_SCAN(("%s("MACDBG"), prev onchan"
+ ", RSSI: prev %d new %d\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ bss->RSSI, bi->RSSI));
+ bi->RSSI = bss->RSSI;
+ bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
+ }
+ if (dtoh32(bss->length) != bi_length) {
+ u32 prev_len = dtoh32(bss->length);
+
+ WL_SCAN(("bss info replacement"
+ " is occured(bcast:%d->probresp%d)\n",
+ bss->ie_length, bi->ie_length));
+ WL_DBG(("%s("MACDBG"), replacement!(%d -> %d)\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ prev_len, bi_length));
+
+ if ((list->buflen - prev_len) + bi_length
+ > ESCAN_BUF_SIZE) {
+ WL_ERR(("Buffer is too small: keep the"
+ " previous result of this AP\n"));
+ /* Only update RSSI */
+ bss->RSSI = bi->RSSI;
+ bss->flags |= (bi->flags
+ & WL_BSS_FLAGS_RSSI_ONCHANNEL);
+ goto exit;
+ }
+
+ if (i < list->count - 1) {
+ /* memory copy required by this case only */
+ memmove((u8 *)bss + bi_length,
+ (u8 *)bss + prev_len,
+ list->buflen - cur_len - prev_len);
+ }
+ list->buflen -= prev_len;
+ list->buflen += bi_length;
+ }
+ list->version = dtoh32(bi->version);
+ /* In the above code under check
+ * '(dtoh32(bss->length) != bi_length)'
+ * buffer overflow is avoided. bi_length
+ * is already accounted in list->buflen
+ */
+ if ((err = memcpy_s((u8 *)bss,
+ (ESCAN_BUF_SIZE - (list->buflen - bi_length)),
+ (u8 *)bi, bi_length)) != BCME_OK) {
+ WL_ERR(("Failed to copy the recent bss_info."
+ "err:%d recv_len:%d bi_len:%d\n", err,
+ ESCAN_BUF_SIZE - (list->buflen - bi_length),
+ bi_length));
+ /* This scenario should never happen. If it happens,
+ * set list->count to zero for recovery
+ */
+ list->count = 0;
+ list->buflen = 0;
+ ASSERT(0);
+ }
+ goto exit;
+ }
+ cur_len += dtoh32(bss->length);
+ }
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ wl_cfg80211_remove_lowRSSI_info(list, candidate, bi);
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+ WL_DBG(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n",
+ MAC2STRDBG(bi->BSSID.octet), bi->RSSI));
+ goto exit;
+ }
+#else
+ WL_ERR(("Buffer is too small: ignoring\n"));
+ goto exit;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+ }
+ /* In the previous step check is added to ensure the bi_legth does not
+ * exceed the ESCAN_BUF_SIZE
+ */
+ (void)memcpy_s(&(((char *)list)[list->buflen]),
+ (ESCAN_BUF_SIZE - list->buflen), bi, bi_length);
+ list->version = dtoh32(bi->version);
+ list->buflen += bi_length;
+ list->count++;
+
+ /*
+ * !Broadcast && number of ssid = 1 && number of channels =1
+ * means specific scan to association
+ */
+ if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
+ WL_ERR(("P2P assoc scan fast aborted.\n"));
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, false, true);
+ goto exit;
+ }
+ }
+ }
+ else if (status == WLC_E_STATUS_SUCCESS) {
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id,
+ escan_result->sync_id);
+
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_DBG(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ WL_INFORM_MEM(("ESCAN COMPLETED\n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
+ cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
+ if (!scan_req_match(cfg)) {
+ WL_TRACE_HW4(("SCAN COMPLETED: scanned AP count=%d\n",
+ cfg->bss_list->count));
+ }
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, false, false);
+ }
+ wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
+#ifdef CUSTOMER_HW4_DEBUG
+ if (wl_scan_timeout_dbg_enabled)
+ wl_scan_timeout_dbg_clear();
+#endif /* CUSTOMER_HW4_DEBUG */
+ } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
+ (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
+ (status == WLC_E_STATUS_NEWASSOC)) {
+ /* Dump FW preserve buffer content */
+ if (status == WLC_E_STATUS_ABORT) {
+ wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ }
+ /* Handle all cases of scan abort */
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_print_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id);
+ WL_DBG(("ESCAN ABORT reason: %d\n", status));
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_DBG(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ wl_clr_p2p_status(cfg, SCANNING);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ WL_INFORM_MEM(("ESCAN ABORTED\n"));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ if (p2p_scan(cfg) && cfg->scan_request &&
+ (cfg->scan_request->flags & NL80211_SCAN_FLAG_FLUSH)) {
+ WL_ERR(("scan list is changed"));
+ cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
+ } else
+#endif // endif
+ cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+
+ if (!scan_req_match(cfg)) {
+ WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
+ cfg->bss_list->count));
+ }
+#ifdef DUAL_ESCAN_RESULT_BUFFER
+ if (escan_result->sync_id != cfg->escan_info.cur_sync_id) {
+ /* If sync_id is not matching, then the abort might have
+ * come for the old scan req or for the in-driver initiated
+ * scan. So do abort for scan_req for which sync_id is
+ * matching.
+ */
+ WL_INFORM_MEM(("sync_id mismatch (%d != %d). "
+ "Ignore the scan abort event.\n",
+ escan_result->sync_id, cfg->escan_info.cur_sync_id));
+ goto exit;
+ } else {
+ /* sync id is matching, abort the scan */
+ WL_INFORM_MEM(("scan aborted for sync_id: %d \n",
+ cfg->escan_info.cur_sync_id));
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, true, false);
+ }
+#else
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, true, false);
+#endif /* DUAL_ESCAN_RESULT_BUFFER */
+ } else {
+ /* If there is no pending host initiated scan, do nothing */
+ WL_DBG(("ESCAN ABORT: No pending scans. Ignoring event.\n"));
+ }
+ wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT);
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+ WL_ERR(("reason[0x%x]\n", e->reason));
+ if (e->reason == 0xFFFFFFFF) {
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+ }
+ } else {
+ WL_ERR(("unexpected Escan Event %d : abort\n", status));
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_print_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id);
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_DBG(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+ if (!scan_req_match(cfg)) {
+ WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): "
+ "scanned AP count=%d\n",
+ cfg->bss_list->count));
+ }
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, true, false);
+ }
+ wl_escan_increment_sync_id(cfg, 2);
+ }
+#else /* WL_DRV_AVOID_SCANCACHE */
+ err = wl_escan_without_scan_cache(cfg, escan_result, ndev, e, status);
+#endif /* WL_DRV_AVOID_SCANCACHE */
+exit:
+ mutex_unlock(&cfg->scan_sync);
+ return err;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && \
+ defined(SUPPORT_RANDOM_MAC_SCAN)
+static const u8 *
+wl_retrieve_wps_attribute(const u8 *buf, u16 element_id)
+{
+ const wl_wps_ie_t *ie = NULL;
+ u16 len = 0;
+ const u8 *attrib;
+
+ if (!buf) {
+ WL_ERR(("WPS IE not present"));
+ return 0;
+ }
+
+ ie = (const wl_wps_ie_t*) buf;
+ len = ie->len;
+
+ /* Point subel to the P2P IE's subelt field.
+ * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+ */
+ attrib = ie->attrib;
+ len -= 4; /* exclude OUI + OUI_TYPE */
+
+ /* Search for attrib */
+ return wl_find_attribute(attrib, len, element_id);
+}
+
+static bool
+wl_is_wps_enrollee_active(struct net_device *ndev, const u8 *ie_ptr, u16 len)
+{
+ const u8 *ie;
+ const u8 *attrib;
+
+ if ((ie = (const u8 *)wl_cfgp2p_find_wpsie(ie_ptr, len)) == NULL) {
+ WL_DBG(("WPS IE not present. Do nothing.\n"));
+ return false;
+ }
+
+ if ((attrib = wl_retrieve_wps_attribute(ie, WPS_ATTR_REQ_TYPE)) == NULL) {
+ WL_DBG(("WPS_ATTR_REQ_TYPE not found!\n"));
+ return false;
+ }
+
+ if (*attrib == WPS_REQ_TYPE_ENROLLEE) {
+ WL_INFORM_MEM(("WPS Enrolle Active\n"));
+ return true;
+ } else {
+ WL_DBG(("WPS_REQ_TYPE:%d\n", *attrib));
+ }
+
+ return false;
+}
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) && defined(SUPPORT_RANDOM_MAC_SCAN) */
+
+/* Find listen channel */
+static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg,
+ const u8 *ie, u32 ie_len)
+{
+ const wifi_p2p_ie_t *p2p_ie;
+ const u8 *end, *pos;
+ s32 listen_channel;
+
+ pos = (const u8 *)ie;
+
+ p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len);
+
+ if (p2p_ie == NULL) {
+ return 0;
+ }
+
+ if (p2p_ie->len < MIN_P2P_IE_LEN || p2p_ie->len > MAX_P2P_IE_LEN) {
+ CFGP2P_ERR(("p2p_ie->len out of range - %d\n", p2p_ie->len));
+ return 0;
+ }
+ pos = p2p_ie->subelts;
+ end = p2p_ie->subelts + (p2p_ie->len - 4);
+
+ CFGP2P_DBG((" found p2p ie ! lenth %d \n",
+ p2p_ie->len));
+
+ while (pos < end) {
+ uint16 attr_len;
+ if (pos + 2 >= end) {
+ CFGP2P_DBG((" -- Invalid P2P attribute"));
+ return 0;
+ }
+ attr_len = ((uint16) (((pos + 1)[1] << 8) | (pos + 1)[0]));
+
+ if (pos + 3 + attr_len > end) {
+ CFGP2P_DBG(("P2P: Attribute underflow "
+ "(len=%u left=%d)",
+ attr_len, (int) (end - pos - 3)));
+ return 0;
+ }
+
+ /* if Listen Channel att id is 6 and the vailue is valid,
+ * return the listen channel
+ */
+ if (pos[0] == 6) {
+ /* listen channel subel length format
+ * 1(id) + 2(len) + 3(country) + 1(op. class) + 1(chan num)
+ */
+ listen_channel = pos[1 + 2 + 3 + 1];
+
+ if (listen_channel == SOCIAL_CHAN_1 ||
+ listen_channel == SOCIAL_CHAN_2 ||
+ listen_channel == SOCIAL_CHAN_3) {
+ CFGP2P_DBG((" Found my Listen Channel %d \n", listen_channel));
+ return listen_channel;
+ }
+ }
+ pos += 3 + attr_len;
+ }
+ return 0;
+}
+
+#ifdef WL_SCAN_TYPE
+static u32
+wl_cfgscan_map_nl80211_scan_type(struct bcm_cfg80211 *cfg, struct cfg80211_scan_request *request)
+{
+ u32 scan_flags = 0;
+
+ if (!request) {
+ return scan_flags;
+ }
+
+ if (request->flags & NL80211_SCAN_FLAG_LOW_SPAN) {
+ scan_flags |= WL_SCANFLAGS_LOW_SPAN;
+ }
+ if (request->flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) {
+ scan_flags |= WL_SCANFLAGS_HIGH_ACCURACY;
+ }
+ if (request->flags & NL80211_SCAN_FLAG_LOW_POWER) {
+ scan_flags |= WL_SCANFLAGS_LOW_POWER_SCAN;
+ }
+ if (request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) {
+ scan_flags |= WL_SCANFLAGS_LOW_PRIO;
+ }
+
+ WL_INFORM(("scan flags. wl:%x cfg80211:%x\n", scan_flags, request->flags));
+ return scan_flags;
+}
+#endif /* WL_SCAN_TYPE */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+#define IS_RADAR_CHAN(flags) (flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN))
+#else
+#define IS_RADAR_CHAN(flags) (flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+#endif // endif
+static void
+wl_cfgscan_populate_scan_channels(struct bcm_cfg80211 *cfg, u16 *channel_list,
+ struct cfg80211_scan_request *request, u32 *num_channels)
+{
+ u32 i = 0, j = 0;
+ u32 channel;
+ u32 n_channels = 0;
+ u32 chanspec = 0;
+
+ if (!request || !request->n_channels) {
+ /* Do full channel scan */
+ return;
+ }
+
+ n_channels = request->n_channels;
+ for (i = 0; i < n_channels; i++) {
+ channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+ /* SKIP DFS channels for Secondary interface */
+ if ((cfg->escan_info.ndev != bcmcfg_to_prmry_ndev(cfg)) &&
+ (IS_RADAR_CHAN(request->channels[i]->flags)))
+ continue;
+
+ chanspec = WL_CHANSPEC_BW_20;
+ if (chanspec == INVCHANSPEC) {
+ WL_ERR(("Invalid chanspec! Skipping channel\n"));
+ continue;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ if (request->channels[i]->band == IEEE80211_BAND_60GHZ) {
+ /* Not supported */
+ continue;
+ }
+#endif /* LINUX_VER >= 3.6 */
+
+ if (request->channels[i]->band == IEEE80211_BAND_2GHZ) {
+#ifdef WL_HOST_BAND_MGMT
+ if (cfg->curr_band == WLC_BAND_5G) {
+ WL_DBG(("In 5G only mode, omit 2G channel:%d\n", channel));
+ continue;
+ }
+#endif /* WL_HOST_BAND_MGMT */
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ } else {
+#ifdef WL_HOST_BAND_MGMT
+ if (cfg->curr_band == WLC_BAND_2G) {
+ WL_DBG(("In 2G only mode, omit 5G channel:%d\n", channel));
+ continue;
+ }
+#endif /* WL_HOST_BAND_MGMT */
+ chanspec |= WL_CHANSPEC_BAND_5G;
+ }
+ channel_list[j] = channel;
+ channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
+ channel_list[j] |= chanspec;
+ WL_SCAN(("Chan : %d, Channel spec: %x \n",
+ channel, channel_list[j]));
+ channel_list[j] = wl_chspec_host_to_driver(channel_list[j]);
+ j++;
+ }
+ *num_channels = j;
+
+}
+
+static void
+wl_cfgscan_populate_scan_ssids(struct bcm_cfg80211 *cfg, u8 *buf_ptr, u32 buf_len,
+ struct cfg80211_scan_request *request, u32 *ssid_num)
+{
+ u32 n_ssids;
+ wlc_ssid_t ssid;
+ int i, j = 0;
+
+ if (!request || !buf_ptr) {
+ /* Do full channel scan */
+ return;
+ }
+
+ n_ssids = request->n_ssids;
+ if (n_ssids > 0) {
+
+ if (buf_len < (n_ssids * sizeof(wlc_ssid_t))) {
+ WL_ERR(("buf len not sufficient for scan ssids\n"));
+ return;
+ }
+
+ for (i = 0; i < n_ssids; i++) {
+ bzero(&ssid, sizeof(wlc_ssid_t));
+ ssid.SSID_len = MIN(request->ssids[i].ssid_len, DOT11_MAX_SSID_LEN);
+ /* Returning void here, as per previous line copy length does not exceed
+ * DOT11_MAX_SSID_LEN
+ */
+ (void)memcpy_s(ssid.SSID, DOT11_MAX_SSID_LEN, request->ssids[i].ssid,
+ ssid.SSID_len);
+ if (!ssid.SSID_len) {
+ WL_SCAN(("%d: Broadcast scan\n", i));
+ } else {
+ WL_SCAN(("%d: scan for %s size =%d\n", i,
+ ssid.SSID, ssid.SSID_len));
+ }
+ /* For multiple ssid case copy the each SSID info the ptr below corresponds
+ * to that so dest is of type wlc_ssid_t
+ */
+ (void)memcpy_s(buf_ptr, sizeof(wlc_ssid_t), &ssid, sizeof(wlc_ssid_t));
+ buf_ptr += sizeof(wlc_ssid_t);
+ j++;
+ }
+ } else {
+ WL_SCAN(("Broadcast scan\n"));
+ }
+ *ssid_num = j;
+}
+
+static s32
+wl_scan_prep(struct bcm_cfg80211 *cfg, void *scan_params, u32 len,
+ struct cfg80211_scan_request *request)
+{
+ wl_scan_params_t *params = NULL;
+ wl_scan_params_v2_t *params_v2 = NULL;
+ u32 scan_type = 0;
+ u32 scan_param_size = 0;
+ u32 n_channels = 0;
+ u32 n_ssids = 0;
+ uint16 *chan_list = NULL;
+ u32 channel_offset = 0;
+ u32 cur_offset;
+
+ if (!scan_params) {
+ return BCME_ERROR;
+ }
+
+ if (cfg->active_scan == PASSIVE_SCAN) {
+ WL_INFORM_MEM(("Enforcing passive scan\n"));
+ scan_type = WL_SCANFLAGS_PASSIVE;
+ }
+
+ WL_DBG(("Preparing Scan request\n"));
+ if (cfg->scan_params_v2) {
+ params_v2 = (wl_scan_params_v2_t *)scan_params;
+ scan_param_size = sizeof(wl_scan_params_v2_t);
+ channel_offset = offsetof(wl_scan_params_v2_t, channel_list);
+ } else {
+ params = (wl_scan_params_t *)scan_params;
+ scan_param_size = sizeof(wl_scan_params_t);
+ channel_offset = offsetof(wl_scan_params_t, channel_list);
+ }
+
+ if (params_v2) {
+ /* scan params ver2 */
+#if defined(WL_SCAN_TYPE)
+ scan_type += wl_cfgscan_map_nl80211_scan_type(cfg, request);
+#endif /* WL_SCAN_TYPE */
+
+ (void)memcpy_s(¶ms_v2->bssid, ETHER_ADDR_LEN, ðer_bcast, ETHER_ADDR_LEN);
+ params_v2->version = htod16(WL_SCAN_PARAMS_VERSION_V2);
+ params_v2->length = htod16(sizeof(wl_scan_params_v2_t));
+ params_v2->bss_type = DOT11_BSSTYPE_ANY;
+ params_v2->scan_type = htod32(scan_type);
+ params_v2->nprobes = htod32(-1);
+ params_v2->active_time = htod32(-1);
+ params_v2->passive_time = htod32(-1);
+ params_v2->home_time = htod32(-1);
+ params_v2->channel_num = 0;
+ bzero(¶ms_v2->ssid, sizeof(wlc_ssid_t));
+ chan_list = params_v2->channel_list;
+ } else {
+ /* scan params ver 1 */
+ if (!params) {
+ ASSERT(0);
+ return BCME_ERROR;
+ }
+ (void)memcpy_s(¶ms->bssid, ETHER_ADDR_LEN, ðer_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = htod32(-1);
+ params->active_time = htod32(-1);
+ params->passive_time = htod32(-1);
+ params->home_time = htod32(-1);
+ params->channel_num = 0;
+ bzero(¶ms->ssid, sizeof(wlc_ssid_t));
+ chan_list = params->channel_list;
+ }
+
+ if (!request) {
+ /* scan_request null, do scan based on base config */
+ WL_DBG(("scan_request is null\n"));
+ return BCME_OK;
+ }
+
+ WL_INFORM(("n_channels:%d n_ssids:%d\n", request->n_channels, request->n_ssids));
+
+ cur_offset = channel_offset;
+ /* Copy channel array if applicable */
+ if ((request->n_channels > 0) && chan_list) {
+ if (len >= (scan_param_size + (request->n_channels * sizeof(u16)))) {
+ wl_cfgscan_populate_scan_channels(cfg,
+ chan_list, request, &n_channels);
+ cur_offset += (n_channels * (sizeof(u16)));
+ }
+ }
+
+ /* Copy ssid array if applicable */
+ if (request->n_ssids > 0) {
+ cur_offset = roundup(cur_offset, sizeof(u32));
+ if (len > (cur_offset + (request->n_ssids * sizeof(wlc_ssid_t)))) {
+ u32 rem_len = len - cur_offset;
+ wl_cfgscan_populate_scan_ssids(cfg,
+ ((u8 *)scan_params + cur_offset), rem_len, request, &n_ssids);
+ }
+ }
+
+ if (n_ssids || n_channels) {
+ u32 channel_num =
+ htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
+ if (params_v2) {
+ params_v2->channel_num = channel_num;
+ if (n_channels == 1) {
+ params_v2->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+ params_v2->nprobes = htod32(
+ params_v2->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+ }
+ } else {
+ params->channel_num = channel_num;
+ if (n_channels == 1) {
+ params->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+ params->nprobes = htod32(
+ params->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+ }
+ }
+ }
+
+ WL_INFORM(("scan_prep done. n_channels:%d n_ssids:%d\n", n_channels, n_ssids));
+ return BCME_OK;
+}
+
+static s32
+wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
+{
+ wl_uint32_list_t *list;
+ s32 err = BCME_OK;
+ if (valid_chan_list == NULL || size <= 0)
+ return -ENOMEM;
+
+ bzero(valid_chan_list, size);
+ list = (wl_uint32_list_t *)(void *) valid_chan_list;
+ list->count = htod32(WL_NUMCHANNELS);
+ err = wldev_ioctl_get(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size);
+ if (err != 0) {
+ WL_ERR(("get channels failed with %d\n", err));
+ }
+
+ return err;
+}
+
+#if defined(SAVE_CONNECTION_WHEN_CC_UPDATE)
+bool
+wl_update_ap_chandef(struct net_device *ndev)
+{
+ uint8 idx;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ dhd_pub_t *dhd = (dhd_pub_t *) cfg->pub;
+ struct net_device *net = NULL;
+ struct wireless_dev *wdev = NULL;
+ u32 chanspec = 0;
+ s32 err = BCME_OK;
+
+ for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+ net = dhd_idx2net(dhd, idx);
+ if (!net)
+ continue;
+ if (net->ieee80211_ptr) {
+ wdev = net->ieee80211_ptr;
+ if ((wdev->iftype == NL80211_IFTYPE_P2P_GO) ||
+ (wdev->iftype == NL80211_IFTYPE_AP)) {
+ chanspec = 0;
+ err = wldev_iovar_getint(net, "chanspec", (u32 *)&chanspec);
+ if (unlikely(err)) {
+ WL_ERR(("chanspec failed %d\n", err));
+ return err;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ if (!cfg80211_chandef_valid(&wdev->chandef)) {
+ wl_cfg80211_ch_switch_notify(net,
+ 0x1001, bcmcfg_to_wiphy(cfg));
+ WL_ERR(("current channel %d is not allowed\n"
+ "it MUST be stopped or"
+ " moved to a valid channel immediately\n",
+ CHSPEC_CHANNEL(chanspec)));
+ }else {
+ wl_cfg80211_ch_switch_notify(net, chanspec, bcmcfg_to_wiphy(cfg));
+ }
+#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
+ }
+ }
+ }
+
+ return err;
+}
+
+bool
+wl_check_valid_channel_in_country(struct net_device *ndev, char *ccode, int forced)
+{
+ bool found = FALSE;
+ s32 err = BCME_OK;
+ chanspec_t chanspec = 0;
+ int i, j, cnt;
+ char smbuf[WLC_IOCTL_SMLEN] = {0};
+ wl_channels_in_country_t *cic;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ uint32 channels[WL_NUMCHANNELS] = {0x00, };
+ uint32 channel = 0;
+
+ if (!(wl_get_drv_status(cfg, CONNECTING, ndev) ||
+ wl_get_drv_status(cfg, CONNECTED, ndev))) {
+ WL_DBG(("skip, not connected\n"));
+ return TRUE;
+ }
+
+ cic = (wl_channels_in_country_t *)smbuf;
+ cic->country_abbrev[0] = ccode[0];
+ cic->country_abbrev[1] = ccode[1];
+ cic->country_abbrev[2] = '\0';
+ cnt = 0;
+ for (i = WLC_BAND_2G; i >= WLC_BAND_5G; --i) {
+ cic->band = i;
+ cic->count = 0;
+ cic->buflen = WLC_IOCTL_SMLEN - sizeof(cic);
+
+ err = wldev_ioctl_get(ndev, WLC_GET_CHANNELS_IN_COUNTRY, cic, sizeof(smbuf));
+ if (!err) {
+ for (j = 0; j < cic->count; ++j) {
+ channels[cnt++] = dtoh32(cic->channel[j]);
+ }
+ }
+ }
+
+ if (cnt) {
+ err = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
+ if (!err) {
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(chanspec));
+ for (i = 0; i < cnt; ++i) {
+ if (channel == channels[i]) {
+ found = TRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ if (forced && !found) {
+ scb_val_t scbval;
+
+ wl_set_drv_status(cfg, DISCONNECTING, ndev);
+ bzero(&scbval, sizeof(scb_val_t));
+ WL_DBG(("Channel %d is not allowed on %s\n", channel, ccode));
+ err = wldev_ioctl_set(ndev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t));
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+ WL_ERR(("error(%d)\n", err));
+ return err;
+ }
+ else {
+ int wait_cnt = 50;
+ while (!wl_get_drv_status(cfg, DISCONNECTING, ndev) && wait_cnt) {
+ WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n",
+ wait_cnt));
+ wait_cnt--;
+ OSL_SLEEP(10);
+ }
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ /* cfg80211 expects disconnect event from DHD to release wdev->current_bss */
+ CFG80211_DISCONNECTED(ndev, DOT11_RC_DEAUTH_LEAVING, NULL, 0, true, GFP_KERNEL);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
+ }
+ return found;
+}
+#endif /* SAVE_CONNECTION_WHEN_CC_UPDATE */
+
+static s32
+wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ struct cfg80211_scan_request *request, uint16 action)
+{
+ s32 err = BCME_OK;
+ u32 n_channels;
+ u32 n_ssids;
+ s32 params_size;
+ wl_escan_params_t *eparams = NULL;
+ wl_escan_params_v2_t *eparams_v2 = NULL;
+ u8 *scan_params = NULL;
+ u8 *params = NULL;
+ u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+ u32 num_chans = 0;
+ s32 channel;
+ u32 n_valid_chan;
+ s32 search_state = WL_P2P_DISC_ST_SCAN;
+ u32 i, j, n_nodfs = 0;
+ u16 *default_chan_list = NULL;
+ wl_uint32_list_t *list;
+ s32 bssidx = -1;
+ struct net_device *dev = NULL;
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+ bool is_first_init_2g_scan = false;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+ p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN;
+ u32 chan_mem = 0;
+ u32 sync_id = 0;
+
+ WL_DBG(("Enter \n"));
+
+ /* scan request can come with empty request : perform all default scan */
+ if (!cfg) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (cfg->scan_params_v2) {
+ params_size = (WL_SCAN_PARAMS_V2_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_v2_t, params));
+ } else {
+ params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && \
+ defined(SUPPORT_RANDOM_MAC_SCAN)
+ if ((request != NULL) && !ETHER_ISNULLADDR(request->mac_addr) &&
+ !ETHER_ISNULLADDR(request->mac_addr_mask) &&
+ !wl_is_wps_enrollee_active(ndev, request->ie, request->ie_len)) {
+ /* Call scanmac only for valid configuration */
+ err = wl_cfg80211_scan_mac_enable(ndev, request->mac_addr,
+ request->mac_addr_mask);
+ if (err < 0) {
+ if (err == BCME_UNSUPPORTED) {
+ /* Ignore if chip doesnt support the feature */
+ err = BCME_OK;
+ } else {
+ /* For errors other than unsupported fail the scan */
+ WL_ERR(("%s : failed to set random mac for host scan, %d\n",
+ __FUNCTION__, err));
+ err = -EAGAIN;
+ goto exit;
+ }
+ }
+ }
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) && defined(SUPPORT_RANDOM_MAC_SCAN) */
+
+ if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+ /* LEGACY SCAN TRIGGER */
+ WL_SCAN((" LEGACY E-SCAN START\n"));
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+ if (!request) {
+ err = -EINVAL;
+ goto exit;
+ }
+ if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) {
+#ifdef USE_INITIAL_2G_SCAN
+ struct ieee80211_channel tmp_channel_list[CH_MAX_2G_CHANNEL];
+ /* allow one 5G channel to add previous connected channel in 5G */
+ bool allow_one_5g_channel = TRUE;
+ j = 0;
+ for (i = 0; i < request->n_channels; i++) {
+ int tmp_chan = ieee80211_frequency_to_channel
+ (request->channels[i]->center_freq);
+ if (tmp_chan > CH_MAX_2G_CHANNEL) {
+ if (allow_one_5g_channel)
+ allow_one_5g_channel = FALSE;
+ else
+ continue;
+ }
+ if (j > CH_MAX_2G_CHANNEL) {
+ WL_ERR(("Index %d exceeds max 2.4GHz channels %d"
+ " and previous 5G connected channel\n",
+ j, CH_MAX_2G_CHANNEL));
+ break;
+ }
+ bcopy(request->channels[i], &tmp_channel_list[j],
+ sizeof(struct ieee80211_channel));
+ WL_SCAN(("channel of request->channels[%d]=%d\n", i, tmp_chan));
+ j++;
+ }
+ if ((j > 0) && (j <= CH_MAX_2G_CHANNEL)) {
+ for (i = 0; i < j; i++)
+ bcopy(&tmp_channel_list[i], request->channels[i],
+ sizeof(struct ieee80211_channel));
+
+ request->n_channels = j;
+ is_first_init_2g_scan = true;
+ }
+ else
+ WL_ERR(("Invalid number of 2.4GHz channels %d\n", j));
+
+ WL_SCAN(("request->n_channels=%d\n", request->n_channels));
+#else /* USE_INITIAL_SHORT_DWELL_TIME */
+ is_first_init_2g_scan = true;
+#endif /* USE_INITIAL_2G_SCAN */
+ g_first_broadcast_scan = false;
+ }
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+
+ /* if scan request is not empty parse scan request paramters */
+ if (request != NULL) {
+ n_channels = request->n_channels;
+ n_ssids = request->n_ssids;
+ if (n_channels % 2)
+ /* If n_channels is odd, add a padd of u16 */
+ params_size += sizeof(u16) * (n_channels + 1);
+ else
+ params_size += sizeof(u16) * n_channels;
+
+ /* Allocate space for populating ssids in wl_escan_params_t struct */
+ params_size += sizeof(struct wlc_ssid) * n_ssids;
+ }
+ params = MALLOCZ(cfg->osh, params_size);
+ if (params == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ wl_escan_set_sync_id(sync_id, cfg);
+ if (cfg->scan_params_v2) {
+ eparams_v2 = (wl_escan_params_v2_t *)params;
+ scan_params = (u8 *)&eparams_v2->params;
+ eparams_v2->version = htod32(ESCAN_REQ_VERSION_V2);
+ eparams_v2->action = htod16(action);
+ eparams_v2->sync_id = sync_id;
+ } else {
+ eparams = (wl_escan_params_t *)params;
+ scan_params = (u8 *)&eparams->params;
+ eparams->version = htod32(ESCAN_REQ_VERSION);
+ eparams->action = htod16(action);
+ eparams->sync_id = sync_id;
+ }
+
+ if (wl_scan_prep(cfg, scan_params, params_size, request) < 0) {
+ WL_ERR(("scan_prep failed\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+ /* Override active_time to reduce scan time if it's first bradcast scan. */
+ if (is_first_init_2g_scan) {
+ if (eparams_v2) {
+ eparams_v2->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
+ } else {
+ eparams->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
+ }
+ }
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+
+ wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY);
+ if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
+ WL_ERR(("ioctl buffer length not sufficient\n"));
+ MFREE(cfg->osh, params, params_size);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
+ cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ WL_INFORM_MEM(("LEGACY_SCAN sync ID: %d, bssidx: %d\n", sync_id, bssidx));
+ if (unlikely(err)) {
+ if (err == BCME_EPERM)
+ /* Scan Not permitted at this point of time */
+ WL_DBG((" Escan not permitted at this time (%d)\n", err));
+ else
+ WL_ERR((" Escan set error (%d)\n", err));
+ } else {
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_REQUESTED);
+ }
+ MFREE(cfg->osh, params, params_size);
+ }
+ else if (p2p_is_on(cfg) && p2p_scan(cfg)) {
+ /* P2P SCAN TRIGGER */
+ s32 _freq = 0;
+ n_nodfs = 0;
+
+ if (request && request->n_channels) {
+ num_chans = request->n_channels;
+ WL_SCAN((" chann number : %d\n", num_chans));
+ chan_mem = (u32)(num_chans * sizeof(*default_chan_list));
+ default_chan_list = MALLOCZ(cfg->osh, chan_mem);
+ if (default_chan_list == NULL) {
+ WL_ERR(("channel list allocation failed \n"));
+ err = -ENOMEM;
+ goto exit;
+ }
+ if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) {
+#ifdef P2P_SKIP_DFS
+ int is_printed = false;
+#endif /* P2P_SKIP_DFS */
+ list = (wl_uint32_list_t *) chan_buf;
+ n_valid_chan = dtoh32(list->count);
+ if (n_valid_chan > WL_NUMCHANNELS) {
+ WL_ERR(("wrong n_valid_chan:%d\n", n_valid_chan));
+ MFREE(cfg->osh, default_chan_list, chan_mem);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ for (i = 0; i < num_chans; i++)
+ {
+#ifdef WL_HOST_BAND_MGMT
+ int channel_band = 0;
+#endif /* WL_HOST_BAND_MGMT */
+ _freq = request->channels[i]->center_freq;
+ channel = ieee80211_frequency_to_channel(_freq);
+#ifdef WL_HOST_BAND_MGMT
+ channel_band = (channel > CH_MAX_2G_CHANNEL) ?
+ WLC_BAND_5G : WLC_BAND_2G;
+ if ((cfg->curr_band != WLC_BAND_AUTO) &&
+ (cfg->curr_band != channel_band) &&
+ !IS_P2P_SOCIAL_CHANNEL(channel))
+ continue;
+#endif /* WL_HOST_BAND_MGMT */
+
+ /* ignore DFS channels */
+ if (request->channels[i]->flags &
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ (IEEE80211_CHAN_NO_IR
+ | IEEE80211_CHAN_RADAR))
+#else
+ (IEEE80211_CHAN_RADAR
+ | IEEE80211_CHAN_PASSIVE_SCAN))
+#endif // endif
+ continue;
+#ifdef P2P_SKIP_DFS
+ if (channel >= 52 && channel <= 144) {
+ if (is_printed == false) {
+ WL_ERR(("SKIP DFS CHANs(52~144)\n"));
+ is_printed = true;
+ }
+ continue;
+ }
+#endif /* P2P_SKIP_DFS */
+
+ for (j = 0; j < n_valid_chan; j++) {
+ /* allows only supported channel on
+ * current reguatory
+ */
+ if (n_nodfs >= num_chans) {
+ break;
+ }
+ if (channel == (dtoh32(list->element[j]))) {
+ default_chan_list[n_nodfs++] =
+ channel;
+ }
+ }
+
+ }
+ }
+ if (num_chans == SOCIAL_CHAN_CNT && (
+ (default_chan_list[0] == SOCIAL_CHAN_1) &&
+ (default_chan_list[1] == SOCIAL_CHAN_2) &&
+ (default_chan_list[2] == SOCIAL_CHAN_3))) {
+ /* SOCIAL CHANNELS 1, 6, 11 */
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+ WL_DBG(("P2P SEARCH PHASE START \n"));
+ } else if (((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1)) &&
+ (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) ||
+ ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2)) &&
+ (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP))) {
+ /* If you are already a GO, then do SEARCH only */
+ WL_DBG(("Already a GO. Do SEARCH Only"));
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ num_chans = n_nodfs;
+ p2p_scan_purpose = P2P_SCAN_NORMAL;
+
+ } else if (num_chans == 1) {
+ p2p_scan_purpose = P2P_SCAN_CONNECT_TRY;
+ WL_INFORM_MEM(("Trigger p2p join scan\n"));
+ } else if (num_chans == SOCIAL_CHAN_CNT + 1) {
+ /* SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan supported by
+ * the supplicant
+ */
+ p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+ } else {
+ WL_DBG(("P2P SCAN STATE START \n"));
+ num_chans = n_nodfs;
+ p2p_scan_purpose = P2P_SCAN_NORMAL;
+ }
+ } else {
+ err = -EINVAL;
+ goto exit;
+ }
+ err = wl_cfgp2p_escan(cfg, ndev, ACTIVE_SCAN, num_chans, default_chan_list,
+ search_state, action,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL,
+ p2p_scan_purpose);
+
+ if (!err)
+ cfg->p2p->search_state = search_state;
+
+ MFREE(cfg->osh, default_chan_list, chan_mem);
+ }
+exit:
+ if (unlikely(err)) {
+ /* Don't print Error incase of Scan suppress */
+ if ((err == BCME_EPERM) && cfg->scan_suppressed)
+ WL_DBG(("Escan failed: Scan Suppressed \n"));
+ else
+ WL_ERR(("scan error (%d)\n", err));
+ }
+ return err;
+}
+
+s32
+wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ s32 err = BCME_OK;
+ s32 passive_scan;
+ s32 passive_scan_time;
+ s32 passive_scan_time_org;
+ wl_scan_results_t *results;
+ WL_SCAN(("Enter \n"));
+
+ results = wl_escan_get_buf(cfg, FALSE);
+ results->version = 0;
+ results->count = 0;
+ results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+
+ cfg->escan_info.ndev = ndev;
+ cfg->escan_info.wiphy = wiphy;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
+ passive_scan = cfg->active_scan ? 0 : 1;
+ err = wldev_ioctl_set(ndev, WLC_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan));
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ goto exit;
+ }
+
+ if (passive_channel_skip) {
+
+ err = wldev_ioctl_get(ndev, WLC_GET_SCAN_PASSIVE_TIME,
+ &passive_scan_time_org, sizeof(passive_scan_time_org));
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
+
+ WL_SCAN(("PASSIVE SCAN time : %d \n", passive_scan_time_org));
+
+ passive_scan_time = 0;
+ err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
+ &passive_scan_time, sizeof(passive_scan_time));
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
+
+ WL_SCAN(("PASSIVE SCAN SKIPED!! (passive_channel_skip:%d) \n",
+ passive_channel_skip));
+ }
+
+ err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START);
+
+ if (passive_channel_skip) {
+ err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
+ &passive_scan_time_org, sizeof(passive_scan_time_org));
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
+
+ WL_SCAN(("PASSIVE SCAN RECOVERED!! (passive_scan_time_org:%d) \n",
+ passive_scan_time_org));
+ }
+
+exit:
+ return err;
+}
+
+static s32
+wl_get_scan_timeout_val(struct bcm_cfg80211 *cfg)
+{
+ u32 scan_timer_interval_ms = WL_SCAN_TIMER_INTERVAL_MS;
+
+#ifdef WES_SUPPORT
+#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
+ if ((cfg->custom_scan_channel_time > DHD_SCAN_ASSOC_ACTIVE_TIME) |
+ (cfg->custom_scan_unassoc_time > DHD_SCAN_UNASSOC_ACTIVE_TIME) |
+ (cfg->custom_scan_passive_time > DHD_SCAN_PASSIVE_TIME) |
+ (cfg->custom_scan_home_time > DHD_SCAN_HOME_TIME) |
+ (cfg->custom_scan_home_away_time > DHD_SCAN_HOME_AWAY_TIME)) {
+ scan_timer_interval_ms = CUSTOMER_WL_SCAN_TIMER_INTERVAL_MS;
+ }
+#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
+#endif /* WES_SUPPORT */
+
+ /* If NAN is enabled adding +10 sec to the existing timeout value */
+#ifdef WL_NAN
+ if (cfg->nan_enable) {
+ scan_timer_interval_ms += WL_SCAN_TIMER_INTERVAL_MS_NAN;
+ }
+#endif /* WL_NAN */
+ WL_MEM(("scan_timer_interval_ms %d\n", scan_timer_interval_ms));
+ return scan_timer_interval_ms;
+}
+
+#define SCAN_EBUSY_RETRY_LIMIT 20
+static s32
+wl_cfgscan_handle_scanbusy(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 err)
+{
+ s32 scanbusy_err = 0;
+ static u32 busy_count = 0;
+
+ if (!err) {
+ busy_count = 0;
+ return scanbusy_err;
+ }
+ if (err == BCME_BUSY || err == BCME_NOTREADY) {
+ WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY));
+ scanbusy_err = -EBUSY;
+ } else if ((err == BCME_EPERM) && cfg->scan_suppressed) {
+ WL_ERR(("Scan not permitted due to scan suppress\n"));
+ scanbusy_err = -EPERM;
+ } else {
+ /* For all other fw errors, use a generic error code as return
+ * value to cfg80211 stack
+ */
+ scanbusy_err = -EAGAIN;
+ }
+
+ if (scanbusy_err == -EBUSY) {
+ /* Flush FW preserve buffer logs for checking failure */
+ if (busy_count++ > (SCAN_EBUSY_RETRY_LIMIT/5)) {
+ wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ }
+ if (busy_count > SCAN_EBUSY_RETRY_LIMIT) {
+ struct ether_addr bssid;
+ s32 ret = 0;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ if (dhd_query_bus_erros(dhdp)) {
+ return BCME_NOTREADY;
+ }
+ dhdp->scan_busy_occurred = TRUE;
+ busy_count = 0;
+ WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n",
+ wl_get_drv_status(cfg, SCANNING, ndev),
+ wl_get_drv_status(cfg, SCAN_ABORTING, ndev),
+ wl_get_drv_status(cfg, CONNECTING, ndev),
+ wl_get_drv_status(cfg, CONNECTED, ndev),
+ wl_get_drv_status(cfg, DISCONNECTING, ndev),
+ wl_get_drv_status(cfg, AP_CREATING, ndev),
+ wl_get_drv_status(cfg, AP_CREATED, ndev),
+ wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev),
+ wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev)));
+
+#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_type = DUMP_TYPE_SCAN_BUSY;
+ dhd_bus_mem_dump(dhdp);
+ }
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+
+ bzero(&bssid, sizeof(bssid));
+ if ((ret = wldev_ioctl_get(ndev, WLC_GET_BSSID,
+ &bssid, ETHER_ADDR_LEN)) == 0) {
+ WL_ERR(("FW is connected with " MACDBG "/n",
+ MAC2STRDBG(bssid.octet)));
+ } else {
+ WL_ERR(("GET BSSID failed with %d\n", ret));
+ }
+
+ wl_cfg80211_scan_abort(cfg);
+
+ } else {
+ /* Hold the context for 400msec, so that 10 subsequent scans
+ * can give a buffer of 4sec which is enough to
+ * cover any on-going scan in the firmware
+ */
+ WL_DBG(("Enforcing delay for EBUSY case \n"));
+ msleep(400);
+ }
+ } else {
+ busy_count = 0;
+ }
+
+ return scanbusy_err;
+}
+
+s32
+__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct cfg80211_ssid *ssids;
+ struct ether_addr primary_mac;
+ bool p2p_ssid;
+#ifdef WL11U
+ bcm_tlv_t *interworking_ie;
+ u8 iw_ie[IW_IES_MAX_BUF_LEN];
+ u32 iw_ie_len = 0;
+#endif // endif
+ s32 err = 0;
+ s32 bssidx = -1;
+ s32 i;
+ bool escan_req_failed = false;
+ s32 scanbusy_err = 0;
+
+ unsigned long flags;
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ struct net_device *remain_on_channel_ndev = NULL;
+#endif // endif
+ /*
+ * Hostapd triggers scan before starting automatic channel selection
+ * to collect channel characteristics. However firmware scan engine
+ * doesn't support any channel characteristics collection along with
+ * scan. Hence return scan success.
+ */
+#ifndef IGUANA_LEGACY_CHIPS
+ if (request && (scan_req_iftype(request) == NL80211_IFTYPE_AP)) {
+ WL_DBG(("Scan Command on SoftAP Interface. Ignoring...\n"));
+ return 0;
+ }
+#endif // endif
+
+ ndev = ndev_to_wlc_ndev(ndev, cfg);
+
+ if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
+ WL_ERR(("Sending Action Frames. Try it again.\n"));
+ return -EAGAIN;
+ }
+
+ WL_DBG(("Enter wiphy (%p)\n", wiphy));
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ if (cfg->scan_request == NULL) {
+ wl_clr_drv_status_all(cfg, SCANNING);
+ WL_DBG(("<<<<<<<<<<<Force Clear Scanning Status>>>>>>>>>>>\n"));
+ } else {
+ WL_ERR(("Scanning already\n"));
+ return -EAGAIN;
+ }
+ }
+ if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) {
+ WL_ERR(("Scanning being aborted\n"));
+ return -EAGAIN;
+ }
+ if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
+ WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
+ return -EOPNOTSUPP;
+ }
+#if !defined(DISALBE_11H) && defined(DHD_NOSCAN_DURING_CSA)
+ if (request && cfg->in_csa) {
+ WL_ERR(("Scanning is not allowed during CSA\n"));
+ return -EAGAIN;
+ }
+#endif /* DISABLE_11H */
+#ifdef WL_BCNRECV
+ /* check fakeapscan in progress then abort */
+ wl_android_bcnrecv_stop(ndev, WL_BCNRECV_SCANBUSY);
+#endif /* WL_BCNRECV */
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ mutex_lock(&cfg->scan_sync);
+ remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg);
+ if (remain_on_channel_ndev) {
+ WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n"));
+ wl_notify_escan_complete(cfg, remain_on_channel_ndev, true, true);
+ }
+ mutex_unlock(&cfg->scan_sync);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#ifdef P2P_LISTEN_OFFLOADING
+ wl_cfg80211_cancel_p2plo(cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+
+ if (request) { /* scan bss */
+ ssids = request->ssids;
+ p2p_ssid = false;
+ for (i = 0; i < request->n_ssids; i++) {
+ if (ssids[i].ssid_len &&
+ IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) {
+ /* P2P Scan */
+#ifdef WL_BLOCK_P2P_SCAN_ON_STA
+ if (!(IS_P2P_IFACE(request->wdev))) {
+ /* P2P scan on non-p2p iface. Fail scan */
+ WL_ERR(("p2p_search on non p2p iface\n"));
+ goto scan_out;
+ }
+#endif /* WL_BLOCK_P2P_SCAN_ON_STA */
+ p2p_ssid = true;
+ break;
+ }
+ }
+ if (p2p_ssid) {
+ if (cfg->p2p_supported) {
+ /* p2p scan trigger */
+ if (p2p_on(cfg) == false) {
+ /* p2p on at the first time */
+ p2p_on(cfg) = true;
+ wl_cfgp2p_set_firm_p2p(cfg);
+ get_primary_mac(cfg, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+#if defined(P2P_IE_MISSING_FIX)
+ cfg->p2p_prb_noti = false;
+#endif // endif
+ }
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+ WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+ p2p_scan(cfg) = true;
+ }
+ } else {
+ /* legacy scan trigger
+ * So, we have to disable p2p discovery if p2p discovery is on
+ */
+ if (cfg->p2p_supported) {
+ p2p_scan(cfg) = false;
+ /* If Netdevice is not equals to primary and p2p is on
+ * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
+ */
+
+ if (p2p_scan(cfg) == false) {
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ err = wl_cfgp2p_discover_enable_search(cfg,
+ false);
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+
+ }
+ }
+ }
+ if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg,
+ ndev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from ndev(%p) failed\n",
+ ndev));
+ err = BCME_ERROR;
+ goto scan_out;
+ }
+#ifdef WL11U
+ wl_get_iwdata_by_netdev(cfg, ndev, iw_ie, &iw_ie_len);
+ if (request && (interworking_ie = wl_cfg80211_find_interworking_ie(
+ request->ie, request->ie_len)) != NULL) {
+ if ((err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
+ VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
+ interworking_ie->data,
+ interworking_ie->len)) != BCME_OK) {
+ WL_ERR(("Failed to add interworking IE"));
+ }
+ } else if (iw_ie_len != 0) {
+ /* we have to clear IW IE and disable gratuitous APR */
+ wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx);
+ err = wldev_iovar_setint_bsscfg(ndev, "grat_arp",
+ 0, bssidx);
+ /* we don't care about error here
+ * because the only failure case is unsupported,
+ * which is fine
+ */
+ if (unlikely(err)) {
+ WL_ERR(("Set grat_arp failed:(%d) Ignore!\n", err));
+ }
+ wl_clear_iwdata_by_netdev(cfg, ndev);
+ cfg->wl11u = FALSE;
+ }
+#endif /* WL11U */
+ if (request) {
+ err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ ndev_to_cfgdev(ndev), bssidx, VNDR_IE_PRBREQ_FLAG,
+ request->ie, request->ie_len);
+ }
+
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+
+ }
+ }
+ } else { /* scan in ibss */
+ ssids = this_ssid;
+ }
+
+ if (request && cfg->p2p_supported) {
+ WL_TRACE_HW4(("START SCAN\n"));
+#if defined(OEM_ANDROID)
+ DHD_OS_SCAN_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub),
+ SCAN_WAKE_LOCK_TIMEOUT);
+ DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
+#endif // endif
+ }
+
+ if (cfg->p2p_supported) {
+ if (request && p2p_on(cfg) && p2p_scan(cfg)) {
+
+ /* find my listen channel */
+ cfg->afx_hdl->my_listen_chan =
+ wl_find_listen_channel(cfg, request->ie,
+ request->ie_len);
+ err = wl_cfgp2p_enable_discovery(cfg, ndev,
+ request->ie, request->ie_len);
+
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+ }
+ }
+
+ mutex_lock(&cfg->scan_sync);
+ err = wl_do_escan(cfg, wiphy, ndev, request);
+ if (likely(!err)) {
+ goto scan_success;
+ } else {
+ escan_req_failed = true;
+ goto scan_out;
+ }
+
+scan_success:
+ wl_cfgscan_handle_scanbusy(cfg, ndev, BCME_OK);
+ cfg->scan_request = request;
+ wl_set_drv_status(cfg, SCANNING, ndev);
+ /* Arm the timer */
+ mod_timer(&cfg->scan_timeout,
+ jiffies + msecs_to_jiffies(wl_get_scan_timeout_val(cfg)));
+ mutex_unlock(&cfg->scan_sync);
+ return 0;
+
+scan_out:
+ if (escan_req_failed) {
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ cfg->scan_request = NULL;
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ mutex_unlock(&cfg->scan_sync);
+ /* Handling for scan busy errors */
+ scanbusy_err = wl_cfgscan_handle_scanbusy(cfg, ndev, err);
+ if (scanbusy_err == BCME_NOTREADY) {
+ /* In case of bus failures avoid ioctl calls */
+#if defined(OEM_ANDROID)
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+#endif // endif
+ return -ENODEV;
+ }
+ err = scanbusy_err;
+ }
+
+#if defined(OEM_ANDROID)
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+#endif // endif
+ return err;
+}
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+#else
+s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+ s32 err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+ WL_DBG(("Enter\n"));
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+#ifdef DHD_IFDEBUG
+#ifdef WL_CFG80211_P2P_DEV_IF
+ PRINT_WDEV_INFO(request->wdev);
+#else
+ PRINT_WDEV_INFO(ndev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#endif /* DHD_IFDEBUG */
+
+#ifndef IGUANA_LEGACY_CHIPS
+ if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ if (wl_cfg_multip2p_operational(cfg)) {
+ WL_ERR(("wlan0 scan failed, p2p devices are operational"));
+ return -ENODEV;
+ }
+ }
+#endif // endif
+ err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("scan error (%d)\n", err));
+ }
+#ifdef WL_DRV_AVOID_SCANCACHE
+ /* Reset roam cache after successful scan request */
+#ifdef ROAM_CHANNEL_CACHE
+ if (!err) {
+ reset_roam_cache(cfg);
+ }
+#endif /* ROAM_CHANNEL_CACHE */
+#endif /* WL_DRV_AVOID_SCANCACHE */
+ return err;
+}
+
+/* Note: This API should be invoked with scan_sync mutex
+ * held so that scan_request data structures doesn't
+ * get modified in between.
+ */
+struct wireless_dev *
+wl_get_scan_wdev(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev = NULL;
+
+ if (!cfg) {
+ WL_ERR(("cfg ptr null\n"));
+ return NULL;
+ }
+
+ if (!cfg->scan_request && !cfg->sched_scan_req) {
+ /* No scans in progress */
+ WL_MEM(("no scan in progress \n"));
+ return NULL;
+ }
+
+ if (cfg->scan_request) {
+ wdev = GET_SCAN_WDEV(cfg->scan_request);
+#ifdef WL_SCHED_SCAN
+ } else if (cfg->sched_scan_req) {
+ wdev = GET_SCHED_SCAN_WDEV(cfg->sched_scan_req);
+#endif /* WL_SCHED_SCAN */
+ } else {
+ WL_MEM(("no scan in progress \n"));
+ }
+
+ return wdev;
+}
+
+void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev = NULL;
+ struct net_device *ndev = NULL;
+
+ mutex_lock(&cfg->scan_sync);
+ if (!cfg->scan_request && !cfg->sched_scan_req) {
+ /* No scans in progress */
+ WL_INFORM_MEM(("No scan in progress\n"));
+ goto exit;
+ }
+
+ wdev = wl_get_scan_wdev(cfg);
+ if (!wdev) {
+ WL_ERR(("No wdev present\n"));
+ goto exit;
+ }
+
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+ wl_notify_escan_complete(cfg, ndev, true, true);
+ WL_INFORM_MEM(("Scan aborted! \n"));
+exit:
+ mutex_unlock(&cfg->scan_sync);
+}
+
+void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
+{
+ void *params = NULL;
+ s32 params_size = 0;
+ s32 err = BCME_OK;
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ u32 channel, channel_num;
+
+ if (!in_atomic()) {
+ /* Abort scan params only need space for 1 channel and 0 ssids */
+ if (cfg->scan_params_v2) {
+ params_size = WL_SCAN_PARAMS_V2_FIXED_SIZE + 1 * sizeof(uint16);
+ } else {
+ params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
+ }
+ params = MALLOCZ(cfg->osh, params_size);
+ if (params == NULL) {
+ WL_ERR(("mem alloc failed (%d bytes)\n", params_size));
+ return;
+ }
+
+ /* Use magic value of channel=-1 to abort scan */
+ channel = htodchanspec(-1);
+ channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (1 & WL_SCAN_PARAMS_COUNT_MASK));
+ if (cfg->scan_params_v2) {
+ wl_scan_params_v2_t *params_v2 = (wl_scan_params_v2_t *)params;
+ params_v2->channel_list[0] = channel;
+ params_v2->channel_num = channel_num;
+ } else {
+ wl_scan_params_t *params_v1 = (wl_scan_params_t *)params;
+ params_v1->channel_list[0] = channel;
+ params_v1->channel_num = channel_num;
+ }
+ /* Do a scan abort to stop the driver's scan engine */
+ err = wldev_ioctl_set(dev, WLC_SCAN, params, params_size);
+ if (err < 0) {
+ /* scan abort can fail if there is no outstanding scan */
+ WL_DBG(("scan abort failed. ret:%d\n", err));
+ }
+ MFREE(cfg->osh, params, params_size);
+ }
+#ifdef WLTDLS
+ if (cfg->tdls_mgmt_frame) {
+ MFREE(cfg->osh, cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len);
+ cfg->tdls_mgmt_frame = NULL;
+ cfg->tdls_mgmt_frame_len = 0;
+ }
+#endif /* WLTDLS */
+}
+
+s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev,
+ bool aborted, bool fw_abort)
+{
+ s32 err = BCME_OK;
+ unsigned long flags;
+ struct net_device *dev;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ WL_DBG(("Enter \n"));
+ BCM_REFERENCE(dhdp);
+
+ if (!ndev) {
+ WL_ERR(("ndev is null\n"));
+ err = BCME_ERROR;
+ goto out;
+ }
+
+ if (cfg->escan_info.ndev != ndev) {
+ WL_ERR(("Outstanding scan req ndev not matching (%p:%p)\n",
+ cfg->escan_info.ndev, ndev));
+ err = BCME_ERROR;
+ goto out;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && \
+ defined(SUPPORT_RANDOM_MAC_SCAN)
+ /* Disable scanmac if enabled */
+ if (cfg->scanmac_enabled) {
+ wl_cfg80211_scan_mac_disable(ndev);
+ }
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) && defined(SUPPORT_RANDOM_MAC_SCAN) */
+
+ if (cfg->scan_request) {
+ dev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_ENABLE_P2P_IF)
+ if (cfg->scan_request->dev != cfg->p2p_net)
+ dev = cfg->scan_request->dev;
+#elif defined(WL_CFG80211_P2P_DEV_IF)
+ if (cfg->scan_request->wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+ dev = cfg->scan_request->wdev->netdev;
+#endif /* WL_ENABLE_P2P_IF */
+ }
+ else {
+ WL_DBG(("cfg->scan_request is NULL. Internal scan scenario."
+ "doing scan_abort for ndev %p primary %p",
+ ndev, bcmcfg_to_prmry_ndev(cfg)));
+ dev = ndev;
+ }
+ if (fw_abort && !in_atomic())
+ wl_cfg80211_scan_abort(cfg);
+ if (timer_pending(&cfg->scan_timeout))
+ del_timer_sync(&cfg->scan_timeout);
+ cfg->scan_enq_time = 0;
+#if defined(ESCAN_RESULT_PATCH)
+ if (likely(cfg->scan_request)) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ if (aborted && cfg->p2p && p2p_scan(cfg) &&
+ (cfg->scan_request->flags & NL80211_SCAN_FLAG_FLUSH)) {
+ WL_ERR(("scan list is changed"));
+ cfg->bss_list = wl_escan_get_buf(cfg, !aborted);
+ } else
+#endif // endif
+ cfg->bss_list = wl_escan_get_buf(cfg, aborted);
+
+ wl_inform_bss(cfg);
+ }
+#endif /* ESCAN_RESULT_PATCH */
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+#ifdef WL_SCHED_SCAN
+ if (cfg->sched_scan_req && !cfg->scan_request) {
+ if (!aborted) {
+ WL_INFORM_MEM(("[%s] Report sched scan done.\n", dev->name));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+ cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy,
+ cfg->sched_scan_req->reqid);
+#else
+ cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */
+ }
+
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE);
+ cfg->sched_scan_running = FALSE;
+ cfg->sched_scan_req = NULL;
+ }
+#endif /* WL_SCHED_SCAN */
+ if (likely(cfg->scan_request)) {
+ WL_INFORM_MEM(("[%s] Report scan done.\n", dev->name));
+ /* scan_sync mutex is already held */
+ _wl_notify_scan_done(cfg, aborted);
+ cfg->scan_request = NULL;
+ }
+ if (cfg->p2p && p2p_is_on(cfg))
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, dev);
+
+#if defined(OEM_ANDROID)
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+ DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
+#endif // endif
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+
+out:
+ return err;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+void
+wl_cfg80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct bcm_cfg80211 *cfg;
+
+ WL_DBG(("Enter wl_cfg80211_abort_scan\n"));
+ cfg = wiphy_priv(wdev->wiphy);
+
+ /* Check if any scan in progress only then abort */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ wl_cfg80211_scan_abort(cfg);
+ /* Only scan abort is issued here. As per the expectation of abort_scan
+ * the status of abort is needed to be communicated using cfg80211_scan_done call.
+ * Here we just issue abort request and let the scan complete path to indicate
+ * abort to cfg80211 layer.
+ */
+ WL_DBG(("wl_cfg80211_abort_scan: Scan abort issued to FW\n"));
+ }
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+
+int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev)
+{
+ int ret = 0;
+
+ WL_TRACE(("Enter\n"));
+
+ if (!cfg || !cfgdev) {
+ return -EINVAL;
+ }
+
+ /* cancel scan and notify scan status */
+ wl_cfg80211_cancel_scan(cfg);
+
+ return ret;
+}
+
+/* This API is just meant as a wrapper for cfg80211_scan_done
+ * API. This doesn't do state mgmt. For cancelling scan,
+ * please use wl_cfg80211_cancel_scan API.
+ */
+static void
+_wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ struct cfg80211_scan_info info;
+#endif // endif
+
+ if (!cfg->scan_request) {
+ return;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ memset_s(&info, sizeof(struct cfg80211_scan_info), 0, sizeof(struct cfg80211_scan_info));
+ info.aborted = aborted;
+ cfg80211_scan_done(cfg->scan_request, &info);
+#else
+ cfg80211_scan_done(cfg->scan_request, aborted);
+#endif // endif
+ cfg->scan_request = NULL;
+}
+
+#ifdef WL_DRV_AVOID_SCANCACHE
+static u32 wl_p2p_find_peer_channel(struct bcm_cfg80211 *cfg, s32 status, wl_bss_info_t *bi,
+ u32 bi_length)
+{
+ u32 ret;
+ u8 *p2p_dev_addr = NULL;
+
+ ret = wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL);
+ if (!ret) {
+ return ret;
+ }
+ if (status == WLC_E_STATUS_PARTIAL) {
+ p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+ if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+ cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+ s32 channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(bi->chanspec));
+
+ if ((channel > MAXCHANNEL) || (channel <= 0)) {
+ channel = WL_INVALID;
+ } else {
+ WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
+ " channel : %d\n",
+ MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
+ channel));
+ }
+ wl_clr_p2p_status(cfg, SCANNING);
+ cfg->afx_hdl->peer_chan = channel;
+ complete(&cfg->act_frm_scan);
+ }
+ } else {
+ WL_INFORM_MEM(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ }
+
+ return ret;
+}
+
+static s32 wl_escan_without_scan_cache(struct bcm_cfg80211 *cfg, wl_escan_result_t *escan_result,
+ struct net_device *ndev, const wl_event_msg_t *e, s32 status)
+{
+ s32 err = BCME_OK;
+ wl_bss_info_t *bi;
+ u32 bi_length;
+ bool aborted = false;
+ bool fw_abort = false;
+ bool notify_escan_complete = false;
+
+ if (wl_escan_check_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id) < 0) {
+ goto exit;
+ }
+
+ wl_escan_print_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id);
+
+ if (!(status == WLC_E_STATUS_TIMEOUT) || !(status == WLC_E_STATUS_PARTIAL)) {
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ }
+
+ if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ notify_escan_complete = true;
+ }
+
+ if (status == WLC_E_STATUS_PARTIAL) {
+ WL_DBG(("WLC_E_STATUS_PARTIAL \n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
+ if ((!escan_result) || (dtoh16(escan_result->bss_count) != 1)) {
+ WL_ERR(("Invalid escan result (NULL pointer) or invalid bss_count\n"));
+ goto exit;
+ }
+
+ bi = escan_result->bss_info;
+ bi_length = dtoh32(bi->length);
+ if ((!bi) ||
+ (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE))) {
+ WL_ERR(("Invalid escan bss info (NULL pointer)"
+ "or invalid bss_info length\n"));
+ goto exit;
+ }
+
+ if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+ if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+ WL_DBG(("Ignoring IBSS result\n"));
+ goto exit;
+ }
+ }
+
+ if (wl_p2p_find_peer_channel(cfg, status, bi, bi_length)) {
+ goto exit;
+ } else {
+ if (scan_req_match(cfg)) {
+ /* p2p scan && allow only probe response */
+ if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+ (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+ goto exit;
+ }
+#ifdef ROAM_CHANNEL_CACHE
+ add_roam_cache(cfg, bi);
+#endif /* ROAM_CHANNEL_CACHE */
+ err = wl_inform_single_bss(cfg, bi, false);
+#ifdef ROAM_CHANNEL_CACHE
+ /* print_roam_cache(); */
+ update_roam_cache(cfg, ioctl_version);
+#endif /* ROAM_CHANNEL_CACHE */
+
+ /*
+ * !Broadcast && number of ssid = 1 && number of channels =1
+ * means specific scan to association
+ */
+ if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
+ WL_ERR(("P2P assoc scan fast aborted.\n"));
+ aborted = false;
+ fw_abort = true;
+ }
+ /* Directly exit from function here and
+ * avoid sending notify completion to cfg80211
+ */
+ goto exit;
+ }
+ } else if (status == WLC_E_STATUS_SUCCESS) {
+ if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+ goto exit;
+ }
+ WL_INFORM_MEM(("ESCAN COMPLETED\n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
+
+ /* Update escan complete status */
+ aborted = false;
+ fw_abort = false;
+
+#ifdef CUSTOMER_HW4_DEBUG
+ if (wl_scan_timeout_dbg_enabled)
+ wl_scan_timeout_dbg_clear();
+#endif /* CUSTOMER_HW4_DEBUG */
+ } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
+ (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
+ (status == WLC_E_STATUS_NEWASSOC)) {
+ /* Handle all cases of scan abort */
+
+ WL_DBG(("ESCAN ABORT reason: %d\n", status));
+ if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+ goto exit;
+ }
+ WL_INFORM_MEM(("ESCAN ABORTED\n"));
+
+ /* Update escan complete status */
+ aborted = true;
+ fw_abort = false;
+
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+ WL_ERR(("reason[0x%x]\n", e->reason));
+ if (e->reason == 0xFFFFFFFF) {
+ /* Update escan complete status */
+ aborted = true;
+ fw_abort = true;
+ }
+ } else {
+ WL_ERR(("unexpected Escan Event %d : abort\n", status));
+
+ if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+ goto exit;
+ }
+ /* Update escan complete status */
+ aborted = true;
+ fw_abort = false;
+ }
+
+ /* Notify escan complete status */
+ if (notify_escan_complete) {
+ wl_notify_escan_complete(cfg, ndev, aborted, fw_abort);
+ }
+
+exit:
+ return err;
+
+}
+#endif /* WL_DRV_AVOID_SCANCACHE */
+
+s32
+wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct channel_info channel_inform;
+ struct wl_scan_results *bss_list;
+ struct net_device *ndev = NULL;
+ u32 len = WL_SCAN_BUF_MAX;
+ s32 err = 0;
+ unsigned long flags;
+
+ WL_DBG(("Enter \n"));
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ if (!ndev || !wl_get_drv_status(cfg, SCANNING, ndev)) {
+ WL_DBG(("scan is not ready \n"));
+ return err;
+ }
+
+ mutex_lock(&cfg->scan_sync);
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ bzero(&channel_inform, sizeof(channel_inform));
+ err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &channel_inform,
+ sizeof(channel_inform));
+ if (unlikely(err)) {
+ WL_ERR(("scan busy (%d)\n", err));
+ goto scan_done_out;
+ }
+ channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
+ if (unlikely(channel_inform.scan_channel)) {
+
+ WL_DBG(("channel_inform.scan_channel (%d)\n",
+ channel_inform.scan_channel));
+ }
+ cfg->bss_list = cfg->scan_results;
+ bss_list = cfg->bss_list;
+ bzero(bss_list, len);
+ bss_list->buflen = htod32(len);
+ err = wldev_ioctl_get(ndev, WLC_SCAN_RESULTS, bss_list, len);
+ if (unlikely(err) && unlikely(!cfg->scan_suppressed)) {
+ WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+ err = -EINVAL;
+ goto scan_done_out;
+ }
+ bss_list->buflen = dtoh32(bss_list->buflen);
+ bss_list->version = dtoh32(bss_list->version);
+ bss_list->count = dtoh32(bss_list->count);
+
+ err = wl_inform_bss(cfg);
+
+scan_done_out:
+ del_timer_sync(&cfg->scan_timeout);
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ if (cfg->scan_request) {
+ _wl_notify_scan_done(cfg, false);
+ cfg->scan_request = NULL;
+ }
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ WL_DBG(("cfg80211_scan_done\n"));
+ mutex_unlock(&cfg->scan_sync);
+ return err;
+}
+
+void wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ struct cfg80211_scan_info info;
+
+ bzero(&info, sizeof(struct cfg80211_scan_info));
+ info.aborted = aborted;
+ cfg80211_scan_done(cfg->scan_request, &info);
+#else
+ cfg80211_scan_done(cfg->scan_request, aborted);
+#endif // endif
+}
+
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+int
+wl_cfg80211_set_random_mac(struct net_device *dev, bool enable)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int ret;
+
+ if (cfg->random_mac_enabled == enable) {
+ WL_ERR(("Random MAC already %s\n", enable ? "Enabled" : "Disabled"));
+ return BCME_OK;
+ }
+
+ if (enable) {
+ ret = wl_cfg80211_random_mac_enable(dev);
+ } else {
+ ret = wl_cfg80211_random_mac_disable(dev);
+ }
+
+ if (!ret) {
+ cfg->random_mac_enabled = enable;
+ }
+
+ return ret;
+}
+
+int
+wl_cfg80211_random_mac_enable(struct net_device *dev)
+{
+ u8 random_mac[ETH_ALEN] = {0, };
+ u8 rand_bytes[3] = {0, };
+ s32 err = BCME_ERROR;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#if !defined(LEGACY_RANDOM_MAC)
+ uint8 buffer[WLC_IOCTL_SMLEN] = {0, };
+ wl_scanmac_t *sm = NULL;
+ int len = 0;
+ wl_scanmac_enable_t *sm_enable = NULL;
+ wl_scanmac_config_t *sm_config = NULL;
+#endif /* !LEGACY_RANDOM_MAC */
+
+ if (wl_get_drv_status_all(cfg, CONNECTED) || wl_get_drv_status_all(cfg, CONNECTING) ||
+ wl_get_drv_status_all(cfg, AP_CREATED) || wl_get_drv_status_all(cfg, AP_CREATING)) {
+ WL_ERR(("fail to Set random mac, current state is wrong\n"));
+ return err;
+ }
+
+ (void)memcpy_s(random_mac, ETH_ALEN, bcmcfg_to_prmry_ndev(cfg)->dev_addr, ETH_ALEN);
+ get_random_bytes(&rand_bytes, sizeof(rand_bytes));
+
+ if (rand_bytes[2] == 0x0 || rand_bytes[2] == 0xff) {
+ rand_bytes[2] = 0xf0;
+ }
+
+#if defined(LEGACY_RANDOM_MAC)
+ /* of the six bytes of random_mac the bytes 3, 4, 5 are copied with contents of rand_bytes
+ * So while copying 3 bytes of content no overflow would be seen. Hence returning void.
+ */
+ (void)memcpy_s(&random_mac[3], (sizeof(u8) * 3), rand_bytes, sizeof(rand_bytes));
+
+ err = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr",
+ random_mac, ETH_ALEN, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed to set random generate MAC address\n"));
+ } else {
+ WL_ERR(("set mac " MACDBG " to " MACDBG "\n",
+ MAC2STRDBG((const u8 *)bcmcfg_to_prmry_ndev(cfg)->dev_addr),
+ MAC2STRDBG((const u8 *)&random_mac)));
+ WL_ERR(("random MAC enable done"));
+ }
+#else
+ /* Enable scan mac */
+ sm = (wl_scanmac_t *)buffer;
+ sm_enable = (wl_scanmac_enable_t *)sm->data;
+ sm->len = sizeof(*sm_enable);
+ sm_enable->enable = 1;
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE;
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
+ sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ /* For older chip which which does not have scanmac support can still use
+ * cur_etheraddr to set the randmac. rand_mask and rand_mac comes from upper
+ * cfg80211 layer. If rand_mask and rand_mac is not passed then fallback
+ * to default cur_etheraddr and default mask.
+ */
+ if (err == BCME_UNSUPPORTED) {
+ /* In case of host based legacy randomization, random address is
+ * generated by mixing 3 bytes of cur_etheraddr and 3 bytes of
+ * random bytes generated.In that case rand_mask is nothing but
+ * random bytes.
+ */
+ (void)memcpy_s(&random_mac[3], (sizeof(u8) * 3), rand_bytes, sizeof(rand_bytes));
+ err = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr",
+ random_mac, ETH_ALEN, cfg->ioctl_buf,
+ WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+ if (err != BCME_OK) {
+ WL_ERR(("failed to set random generate MAC address\n"));
+ } else {
+ WL_ERR(("set mac " MACDBG " to " MACDBG "\n",
+ MAC2STRDBG((const u8 *)bcmcfg_to_prmry_ndev(cfg)->dev_addr),
+ MAC2STRDBG((const u8 *)&random_mac)));
+ WL_ERR(("random MAC enable done using legacy randmac"));
+ }
+ } else if (err == BCME_OK) {
+ /* Configure scanmac */
+ (void)memset_s(buffer, sizeof(buffer), 0x0, sizeof(buffer));
+ sm_config = (wl_scanmac_config_t *)sm->data;
+ sm->len = sizeof(*sm_config);
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_CONFIG;
+ sm_config->scan_bitmap = WL_SCANMAC_SCAN_UNASSOC;
+
+ /* Set randomize mac address recv from upper layer */
+ (void)memcpy_s(&sm_config->mac.octet, ETH_ALEN, random_mac, ETH_ALEN);
+
+ /* Set randomize mask recv from upper layer */
+
+ /* Currently in samsung case, upper layer does not provide
+ * variable randmask and its using fixed 3 byte randomization
+ */
+ (void)memset_s(&sm_config->random_mask.octet, ETH_ALEN, 0x0, ETH_ALEN);
+ /* Memsetting the remaining octets 3, 4, 5. So remaining dest length is 3 */
+ (void)memset_s(&sm_config->random_mask.octet[3], 3, 0xFF, 3);
+
+ WL_DBG(("recv random mac addr " MACDBG " recv rand mask" MACDBG "\n",
+ MAC2STRDBG((const u8 *)&sm_config->mac.octet),
+ MAC2STRDBG((const u8 *)&sm_config->random_mask)));
+
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
+ err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
+ sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed scanmac configuration\n"));
+
+ /* Disable scan mac for clean-up */
+ wl_cfg80211_random_mac_disable(dev);
+ return err;
+ }
+ WL_DBG(("random MAC enable done using scanmac"));
+ } else {
+ WL_ERR(("failed to enable scanmac, err=%d\n", err));
+ }
+#endif /* LEGACY_RANDOM_MAC */
+
+ return err;
+}
+
+int
+wl_cfg80211_random_mac_disable(struct net_device *dev)
+{
+ s32 err = BCME_ERROR;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#if !defined(LEGACY_RANDOM_MAC)
+ uint8 buffer[WLC_IOCTL_SMLEN] = {0, };
+ wl_scanmac_t *sm = NULL;
+ int len = 0;
+ wl_scanmac_enable_t *sm_enable = NULL;
+#endif /* !LEGACY_RANDOM_MAC */
+
+#if defined(LEGACY_RANDOM_MAC)
+ WL_ERR(("set original mac " MACDBG "\n",
+ MAC2STRDBG((const u8 *)bcmcfg_to_prmry_ndev(cfg)->dev_addr)));
+
+ err = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr",
+ bcmcfg_to_prmry_ndev(cfg)->dev_addr, ETH_ALEN,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed to set original MAC address\n"));
+ } else {
+ WL_ERR(("legacy random MAC disable done \n"));
+ }
+#else
+ sm = (wl_scanmac_t *)buffer;
+ sm_enable = (wl_scanmac_enable_t *)sm->data;
+ sm->len = sizeof(*sm_enable);
+ /* Disable scanmac */
+ sm_enable->enable = 0;
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
+
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE;
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
+ sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed to disable scanmac, err=%d\n", err));
+ return err;
+ }
+ /* Clear scanmac enabled status */
+ cfg->scanmac_enabled = 0;
+ WL_DBG(("random MAC disable done\n"));
+#endif /* LEGACY_RANDOM_MAC */
+
+ return err;
+}
+
+/*
+ * This is new interface for mac randomization. It takes randmac and randmask
+ * as arg and it uses scanmac iovar to offload the mac randomization to firmware.
+ */
+int wl_cfg80211_scan_mac_enable(struct net_device *dev, uint8 *rand_mac, uint8 *rand_mask)
+{
+ int byte_index = 0;
+ s32 err = BCME_ERROR;
+ uint8 buffer[WLC_IOCTL_SMLEN] = {0, };
+ wl_scanmac_t *sm = NULL;
+ int len = 0;
+ wl_scanmac_enable_t *sm_enable = NULL;
+ wl_scanmac_config_t *sm_config = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if ((rand_mac == NULL) || (rand_mask == NULL)) {
+ err = BCME_BADARG;
+ WL_ERR(("fail to Set random mac, bad argument\n"));
+ /* Disable the current scanmac config */
+ wl_cfg80211_scan_mac_disable(dev);
+ return err;
+ }
+
+ if (ETHER_ISNULLADDR(rand_mac)) {
+ WL_DBG(("fail to Set random mac, Invalid rand mac\n"));
+ /* Disable the current scanmac config */
+ wl_cfg80211_scan_mac_disable(dev);
+ return err;
+ }
+
+ if (wl_get_drv_status_all(cfg, CONNECTED) || wl_get_drv_status_all(cfg, CONNECTING) ||
+ wl_get_drv_status_all(cfg, AP_CREATED) || wl_get_drv_status_all(cfg, AP_CREATING)) {
+ WL_ERR(("fail to Set random mac, current state is wrong\n"));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* Enable scan mac */
+ sm = (wl_scanmac_t *)buffer;
+ sm_enable = (wl_scanmac_enable_t *)sm->data;
+ sm->len = sizeof(*sm_enable);
+ sm_enable->enable = 1;
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE;
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
+ sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err == BCME_OK) {
+ /* Configure scanmac */
+ (void)memset_s(buffer, sizeof(buffer), 0x0, sizeof(buffer));
+ sm_config = (wl_scanmac_config_t *)sm->data;
+ sm->len = sizeof(*sm_config);
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_CONFIG;
+ sm_config->scan_bitmap = WL_SCANMAC_SCAN_UNASSOC;
+
+ /* Set randomize mac address recv from upper layer */
+ (void)memcpy_s(&sm_config->mac.octet, ETH_ALEN, rand_mac, ETH_ALEN);
+
+ /* Set randomize mask recv from upper layer */
+
+ /* There is a difference in how to interpret rand_mask between
+ * upperlayer and firmware. If the byte is set as FF then for
+ * upper layer it means keep that byte and do not randomize whereas
+ * for firmware it means randomize those bytes and vice versa. Hence
+ * conversion is needed before setting the iovar
+ */
+ (void)memset_s(&sm_config->random_mask.octet, ETH_ALEN, 0x0, ETH_ALEN);
+ /* Only byte randomization is supported currently. If mask recv is 0x0F
+ * for a particular byte then it will be treated as no randomization
+ * for that byte.
+ */
+ while (byte_index < ETH_ALEN) {
+ if (rand_mask[byte_index] == 0xFF) {
+ sm_config->random_mask.octet[byte_index] = 0x00;
+ } else if (rand_mask[byte_index] == 0x00) {
+ sm_config->random_mask.octet[byte_index] = 0xFF;
+ }
+ byte_index++;
+ }
+
+ WL_DBG(("recv random mac addr " MACDBG "recv rand mask" MACDBG "\n",
+ MAC2STRDBG((const u8 *)&sm_config->mac.octet),
+ MAC2STRDBG((const u8 *)&sm_config->random_mask)));
+
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
+ err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
+ sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed scanmac configuration\n"));
+
+ /* Disable scan mac for clean-up */
+ wl_cfg80211_random_mac_disable(dev);
+ return err;
+ }
+ /* Mark scanmac enabled */
+ cfg->scanmac_enabled = 1;
+ WL_DBG(("scanmac enable done"));
+ } else {
+ WL_ERR(("failed to enable scanmac, err=%d\n", err));
+ }
+
+ return err;
+}
+
+int
+wl_cfg80211_scan_mac_disable(struct net_device *dev)
+{
+ s32 err = BCME_ERROR;
+
+ err = wl_cfg80211_random_mac_disable(dev);
+
+ return err;
+}
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+#ifdef WL_SCHED_SCAN
+#define PNO_TIME 30
+#define PNO_REPEAT 4
+#define PNO_FREQ_EXPO_MAX 2
+static bool
+is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count)
+{
+ int i;
+
+ if (!ssid || !ssid_list)
+ return FALSE;
+
+ for (i = 0; i < count; i++) {
+ if (ssid->ssid_len == ssid_list[i].ssid_len) {
+ if (strncmp(ssid->ssid, ssid_list[i].ssid, ssid->ssid_len) == 0)
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+int
+wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ ushort pno_time = PNO_TIME;
+ int pno_repeat = PNO_REPEAT;
+ int pno_freq_expo_max = PNO_FREQ_EXPO_MAX;
+ wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ struct cfg80211_ssid *ssid = NULL;
+ struct cfg80211_ssid *hidden_ssid_list = NULL;
+ log_conn_event_t *event_data = NULL;
+ tlv_log *tlv_data = NULL;
+ u32 alloc_len = 0, tlv_len = 0;
+ u32 payload_len;
+ int ssid_cnt = 0;
+ int i;
+ int ret = 0;
+ unsigned long flags;
+
+ if (!request) {
+ WL_ERR(("Sched scan request was NULL\n"));
+ return -EINVAL;
+ }
+
+ WL_DBG(("Enter \n"));
+ WL_PNO((">>> SCHED SCAN START\n"));
+ WL_PNO(("Enter n_match_sets:%d n_ssids:%d \n",
+ request->n_match_sets, request->n_ssids));
+ WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n",
+ request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max));
+
+ if (!request->n_ssids || !request->n_match_sets) {
+ WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids));
+ return -EINVAL;
+ }
+
+ bzero(&ssids_local, sizeof(ssids_local));
+
+ if (request->n_ssids > 0) {
+ hidden_ssid_list = request->ssids;
+ }
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN;
+ event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
+ if (!event_data) {
+ WL_ERR(("%s: failed to allocate log_conn_event_t with "
+ "length(%d)\n", __func__, alloc_len));
+ return -ENOMEM;
+ }
+ bzero(event_data, alloc_len);
+ event_data->tlvs = NULL;
+ tlv_len = sizeof(tlv_log);
+ event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
+ if (!event_data->tlvs) {
+ WL_ERR(("%s: failed to allocate log_tlv with "
+ "length(%d)\n", __func__, tlv_len));
+ MFREE(cfg->osh, event_data, alloc_len);
+ return -ENOMEM;
+ }
+ }
+ for (i = 0; i < request->n_match_sets && ssid_cnt < MAX_PFN_LIST_COUNT; i++) {
+ ssid = &request->match_sets[i].ssid;
+ /* No need to include null ssid */
+ if (ssid->ssid_len) {
+ ssids_local[ssid_cnt].SSID_len = MIN(ssid->ssid_len,
+ (uint32)DOT11_MAX_SSID_LEN);
+ /* In previous step max SSID_len is limited to DOT11_MAX_SSID_LEN,
+ * returning void
+ */
+ (void)memcpy_s(ssids_local[ssid_cnt].SSID, DOT11_MAX_SSID_LEN, ssid->ssid,
+ ssids_local[ssid_cnt].SSID_len);
+ if (is_ssid_in_list(ssid, hidden_ssid_list, request->n_ssids)) {
+ ssids_local[ssid_cnt].hidden = TRUE;
+ WL_PNO((">>> PNO hidden SSID (%s) \n", ssid->ssid));
+ } else {
+ ssids_local[ssid_cnt].hidden = FALSE;
+ WL_PNO((">>> PNO non-hidden SSID (%s) \n", ssid->ssid));
+ }
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0))
+ if (request->match_sets[i].rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) {
+ ssids_local[ssid_cnt].rssi_thresh =
+ (int8)request->match_sets[i].rssi_thold;
+ }
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0)) */
+ ssid_cnt++;
+ }
+ }
+
+ if (ssid_cnt) {
+ if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, ssid_cnt,
+ pno_time, pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) {
+ WL_ERR(("PNO setup failed!! ret=%d \n", ret));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ for (i = 0; i < ssid_cnt; i++) {
+ payload_len = sizeof(log_conn_event_t);
+ event_data->event = WIFI_EVENT_DRIVER_PNO_ADD;
+ tlv_data = event_data->tlvs;
+ /* ssid */
+ tlv_data->tag = WIFI_TAG_SSID;
+ tlv_data->len = ssids_local[i].SSID_len;
+ (void)memcpy_s(tlv_data->value, DOT11_MAX_SSID_LEN,
+ ssids_local[i].SSID, ssids_local[i].SSID_len);
+ payload_len += TLV_LOG_SIZE(tlv_data);
+
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+ event_data, payload_len);
+ }
+ }
+
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ cfg->sched_scan_req = request;
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ } else {
+ ret = -EINVAL;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && \
+ defined(SUPPORT_RANDOM_MAC_SCAN)
+ if (!ETHER_ISNULLADDR(request->mac_addr) && !ETHER_ISNULLADDR(request->mac_addr_mask)) {
+ ret = wl_cfg80211_scan_mac_enable(dev, request->mac_addr, request->mac_addr_mask);
+ /* Ignore if chip doesnt support the feature */
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ /* If feature is not supported, ignore the error (legacy chips) */
+ ret = BCME_OK;
+ } else {
+ WL_ERR(("set random mac failed (%d). Ignore.\n", ret));
+ /* Cleanup the states and stop the pno */
+ if (dhd_dev_pno_stop_for_ssid(dev) < 0) {
+ WL_ERR(("PNO Stop for SSID failed"));
+ }
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ cfg->sched_scan_req = NULL;
+ cfg->sched_scan_running = FALSE;
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ }
+ }
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && (defined(SUPPORT_RANDOM_MAC_SCAN)) */
+exit:
+ if (event_data) {
+ MFREE(cfg->osh, event_data->tlvs, tlv_len);
+ MFREE(cfg->osh, event_data, alloc_len);
+ }
+ return ret;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+int
+wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid)
+#else
+int
+wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ unsigned long flags;
+
+ WL_DBG(("Enter \n"));
+ WL_PNO((">>> SCHED SCAN STOP\n"));
+
+ if (dhd_dev_pno_stop_for_ssid(dev) < 0) {
+ WL_ERR(("PNO Stop for SSID failed"));
+ } else {
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_REMOVE);
+ }
+
+ if (cfg->sched_scan_req || cfg->sched_scan_running) {
+ WL_PNO((">>> Sched scan running. Aborting it..\n"));
+ wl_cfg80211_cancel_scan(cfg);
+ }
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ cfg->sched_scan_req = NULL;
+ cfg->sched_scan_running = FALSE;
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ return 0;
+}
+#endif /* WL_SCHED_SCAN */
+
+#ifdef WES_SUPPORT
+#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
+s32 wl_cfg80211_custom_scan_time(struct net_device *dev,
+ enum wl_custom_scan_time_type type, int time)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (cfg == NULL) {
+ return FALSE;
+ }
+
+ switch (type) {
+ case WL_CUSTOM_SCAN_CHANNEL_TIME :
+ WL_ERR(("Scan Channel Time %d\n", time));
+ cfg->custom_scan_channel_time = time;
+ break;
+ case WL_CUSTOM_SCAN_UNASSOC_TIME :
+ WL_ERR(("Scan Unassoc Time %d\n", time));
+ cfg->custom_scan_unassoc_time = time;
+ break;
+ case WL_CUSTOM_SCAN_PASSIVE_TIME :
+ WL_ERR(("Scan Passive Time %d\n", time));
+ cfg->custom_scan_passive_time = time;
+ break;
+ case WL_CUSTOM_SCAN_HOME_TIME :
+ WL_ERR(("Scan Home Time %d\n", time));
+ cfg->custom_scan_home_time = time;
+ break;
+ case WL_CUSTOM_SCAN_HOME_AWAY_TIME :
+ WL_ERR(("Scan Home Away Time %d\n", time));
+ cfg->custom_scan_home_away_time = time;
+ break;
+ default:
+ return FALSE;
+ }
+ return TRUE;
+}
+#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
+#endif /* WES_SUPPORT */
+
+#ifdef CUSTOMER_HW4_DEBUG
+uint prev_dhd_console_ms = 0;
+u32 prev_wl_dbg_level = 0;
+static void wl_scan_timeout_dbg_set(void);
+
+static void wl_scan_timeout_dbg_set(void)
+{
+ WL_ERR(("Enter \n"));
+ prev_dhd_console_ms = dhd_console_ms;
+ prev_wl_dbg_level = wl_dbg_level;
+
+ dhd_console_ms = 1;
+ wl_dbg_level |= (WL_DBG_ERR | WL_DBG_P2P_ACTION | WL_DBG_SCAN);
+
+ wl_scan_timeout_dbg_enabled = 1;
+}
+void wl_scan_timeout_dbg_clear(void)
+{
+ WL_ERR(("Enter \n"));
+ dhd_console_ms = prev_dhd_console_ms;
+ wl_dbg_level = prev_wl_dbg_level;
+
+ wl_scan_timeout_dbg_enabled = 0;
+}
+#endif /* CUSTOMER_HW4_DEBUG */
+
+static void wl_scan_timeout(unsigned long data)
+{
+ wl_event_msg_t msg;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+ struct wireless_dev *wdev = NULL;
+ struct net_device *ndev = NULL;
+ struct wl_scan_results *bss_list;
+ wl_bss_info_t *bi = NULL;
+ s32 i;
+ u32 channel;
+ u64 cur_time = OSL_LOCALTIME_NS();
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ unsigned long flags;
+#ifdef RTT_SUPPORT
+ rtt_status_info_t *rtt_status = NULL;
+ UNUSED_PARAMETER(rtt_status);
+#endif /* RTT_SUPPORT */
+
+ UNUSED_PARAMETER(cur_time);
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ if (!(cfg->scan_request)) {
+ WL_ERR(("timer expired but no scan request\n"));
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ return;
+ }
+
+ wdev = GET_SCAN_WDEV(cfg->scan_request);
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+
+ if (!wdev) {
+ WL_ERR(("No wireless_dev present\n"));
+ return;
+ }
+
+ if (dhd_query_bus_erros(dhdp)) {
+ return;
+ }
+#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
+ if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON &&
+ ((cfg->scan_deq_time < cfg->scan_enq_time) ||
+ dhd_bus_query_dpc_sched_errors(dhdp))) {
+ WL_ERR(("****SCAN event timeout due to scheduling problem\n"));
+ /* change g_assert_type to trigger Kernel panic */
+ g_assert_type = 2;
+#ifdef RTT_SUPPORT
+ rtt_status = GET_RTTSTATE(dhdp);
+#endif /* RTT_SUPPORT */
+ WL_ERR(("***SCAN event timeout. WQ state:0x%x scan_enq_time:"SEC_USEC_FMT
+ " evt_hdlr_entry_time:"SEC_USEC_FMT" evt_deq_time:"SEC_USEC_FMT
+ "\nscan_deq_time:"SEC_USEC_FMT" scan_hdlr_cmplt_time:"SEC_USEC_FMT
+ " scan_cmplt_time:"SEC_USEC_FMT" evt_hdlr_exit_time:"SEC_USEC_FMT
+ "\ncurrent_time:"SEC_USEC_FMT"\n", work_busy(&cfg->event_work),
+ GET_SEC_USEC(cfg->scan_enq_time), GET_SEC_USEC(cfg->wl_evt_hdlr_entry_time),
+ GET_SEC_USEC(cfg->wl_evt_deq_time), GET_SEC_USEC(cfg->scan_deq_time),
+ GET_SEC_USEC(cfg->scan_hdlr_cmplt_time), GET_SEC_USEC(cfg->scan_cmplt_time),
+ GET_SEC_USEC(cfg->wl_evt_hdlr_exit_time), GET_SEC_USEC(cur_time)));
+ if (cfg->scan_enq_time) {
+ WL_ERR(("Elapsed time(ns): %llu\n", (cur_time - cfg->scan_enq_time)));
+ }
+ WL_ERR(("lock_states:[%d:%d:%d:%d:%d:%d]\n",
+ mutex_is_locked(&cfg->if_sync),
+ mutex_is_locked(&cfg->usr_sync),
+ mutex_is_locked(&cfg->pm_sync),
+ mutex_is_locked(&cfg->scan_sync),
+ spin_is_locked(&cfg->cfgdrv_lock),
+ spin_is_locked(&cfg->eq_lock)));
+#ifdef RTT_SUPPORT
+ WL_ERR(("RTT lock_state:[%d]\n",
+ mutex_is_locked(&rtt_status->rtt_mutex)));
+#ifdef WL_NAN
+ WL_ERR(("RTT and Geofence lock_states:[%d:%d]\n",
+ mutex_is_locked(&cfg->nancfg.nan_sync),
+ mutex_is_locked(&(rtt_status)->geofence_mutex)));
+#endif /* WL_NAN */
+#endif /* RTT_SUPPORT */
+
+ /* use ASSERT() to trigger panic */
+ ASSERT(0);
+ }
+#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
+ dhd_bus_intr_count_dump(dhdp);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) && !defined(CONFIG_MODULES)
+ /* Print WQ states. Enable only for in-built drivers as the symbol is not exported */
+ show_workqueue_state();
+#endif /* LINUX_VER >= 4.1 && !CONFIG_MODULES */
+
+ bss_list = wl_escan_get_buf(cfg, FALSE);
+ if (!bss_list) {
+ WL_ERR(("bss_list is null. Didn't receive any partial scan results\n"));
+ } else {
+ WL_ERR(("Dump scan buffer:\n"
+ "scanned AP count (%d)\n", bss_list->count));
+
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+ WL_ERR(("SSID :%s Channel :%d\n", bi->SSID, channel));
+ }
+ }
+
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+ bzero(&msg, sizeof(wl_event_msg_t));
+ WL_ERR(("timer expired\n"));
+ dhdp->scan_timeout_occurred = TRUE;
+#ifdef BCMPCIE
+ (void)dhd_pcie_dump_int_regs(dhdp);
+ dhd_pcie_dump_rc_conf_space_cap(dhdp);
+#endif /* BCMPCIE */
+#ifdef DHD_FW_COREDUMP
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_type = DUMP_TYPE_SCAN_TIMEOUT;
+ dhd_bus_mem_dump(dhdp);
+ }
+#endif /* DHD_FW_COREDUMP */
+ msg.event_type = hton32(WLC_E_ESCAN_RESULT);
+ msg.status = hton32(WLC_E_STATUS_TIMEOUT);
+ msg.reason = 0xFFFFFFFF;
+ wl_cfg80211_event(ndev, &msg, NULL);
+#ifdef CUSTOMER_HW4_DEBUG
+ if (!wl_scan_timeout_dbg_enabled)
+ wl_scan_timeout_dbg_set();
+#endif /* CUSTOMER_HW4_DEBUG */
+}
+
+s32 wl_init_scan(struct bcm_cfg80211 *cfg)
+{
+ int err = 0;
+
+ cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_init_sync_id(cfg);
+
+ /* Init scan_timeout timer */
+ init_timer_compat(&cfg->scan_timeout, wl_scan_timeout, cfg);
+
+ wl_cfg80211_set_bcmcfg(cfg);
+
+ return err;
+}
+
+#ifdef WL_SCHED_SCAN
+/* If target scan is not reliable, set the below define to "1" to do a
+ * full escan
+ */
+#define FULL_ESCAN_ON_PFN_NET_FOUND 0
+static s32
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ wl_pfn_net_info_v1_t *netinfo, *pnetinfo;
+ wl_pfn_net_info_v2_t *netinfo_v2, *pnetinfo_v2;
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ int err = 0;
+ struct cfg80211_scan_request *request = NULL;
+ struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT];
+ struct ieee80211_channel *channel = NULL;
+ int channel_req = 0;
+ int band = 0;
+ wl_pfn_scanresults_v1_t *pfn_result_v1 = (wl_pfn_scanresults_v1_t *)data;
+ wl_pfn_scanresults_v2_t *pfn_result_v2 = (wl_pfn_scanresults_v2_t *)data;
+ int n_pfn_results = 0;
+ log_conn_event_t *event_data = NULL;
+ tlv_log *tlv_data = NULL;
+ u32 alloc_len = 0, tlv_len = 0;
+ u32 payload_len;
+ u8 tmp_buf[DOT11_MAX_SSID_LEN + 1];
+
+ WL_DBG(("Enter\n"));
+
+ /* These static asserts guarantee v1/v2 net_info and subnet_info are compatible
+ * in size and SSID offset, allowing v1 to be used below except for the results
+ * fields themselves (status, count, offset to netinfo).
+ */
+ STATIC_ASSERT(sizeof(wl_pfn_net_info_v1_t) == sizeof(wl_pfn_net_info_v2_t));
+ STATIC_ASSERT(sizeof(wl_pfn_lnet_info_v1_t) == sizeof(wl_pfn_lnet_info_v2_t));
+ STATIC_ASSERT(sizeof(wl_pfn_subnet_info_v1_t) == sizeof(wl_pfn_subnet_info_v2_t));
+ STATIC_ASSERT(OFFSETOF(wl_pfn_subnet_info_v1_t, SSID) ==
+ OFFSETOF(wl_pfn_subnet_info_v2_t, u.SSID));
+
+ /* Extract the version-specific items */
+ if (pfn_result_v1->version == PFN_SCANRESULT_VERSION_V1) {
+ n_pfn_results = pfn_result_v1->count;
+ pnetinfo = pfn_result_v1->netinfo;
+ WL_INFORM_MEM(("PFN NET FOUND event. count:%d \n", n_pfn_results));
+
+ if (n_pfn_results > 0) {
+ int i;
+
+ if (n_pfn_results > MAX_PFN_LIST_COUNT)
+ n_pfn_results = MAX_PFN_LIST_COUNT;
+
+ bzero(&ssid, sizeof(ssid));
+
+ request = (struct cfg80211_scan_request *)MALLOCZ(cfg->osh,
+ sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
+ channel = (struct ieee80211_channel *)MALLOCZ(cfg->osh,
+ (sizeof(struct ieee80211_channel) * n_pfn_results));
+ if (!request || !channel) {
+ WL_ERR(("No memory"));
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ request->wiphy = wiphy;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN +
+ sizeof(uint16) + sizeof(int16);
+ event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
+ if (!event_data) {
+ WL_ERR(("%s: failed to allocate the log_conn_event_t with "
+ "length(%d)\n", __func__, alloc_len));
+ goto out_err;
+ }
+ tlv_len = 3 * sizeof(tlv_log);
+ event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
+ if (!event_data->tlvs) {
+ WL_ERR(("%s: failed to allocate the tlv_log with "
+ "length(%d)\n", __func__, tlv_len));
+ goto out_err;
+ }
+ }
+
+ for (i = 0; i < n_pfn_results; i++) {
+ netinfo = &pnetinfo[i];
+ if (!netinfo) {
+ WL_ERR(("Invalid netinfo ptr. index:%d", i));
+ err = -EINVAL;
+ goto out_err;
+ }
+ if (netinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ WL_ERR(("Wrong SSID length:%d\n",
+ netinfo->pfnsubnet.SSID_len));
+ err = -EINVAL;
+ goto out_err;
+ }
+ /* In previous step max SSID_len limited to DOT11_MAX_SSID_LEN
+ * and tmp_buf size is DOT11_MAX_SSID_LEN+1
+ */
+ (void)memcpy_s(tmp_buf, DOT11_MAX_SSID_LEN,
+ netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.SSID_len);
+ tmp_buf[netinfo->pfnsubnet.SSID_len] = '\0';
+ WL_PNO((">>> SSID:%s Channel:%d \n",
+ tmp_buf, netinfo->pfnsubnet.channel));
+ /* PFN result doesn't have all the info which are required by
+ * the supplicant. (For e.g IEs) Do a target Escan so that
+ * sched scan results are reported via wl_inform_single_bss in
+ * the required format. Escan does require the scan request in
+ * the form of cfg80211_scan_request. For timebeing, create
+ * cfg80211_scan_request one out of the received PNO event.
+ */
+
+ ssid[i].ssid_len = netinfo->pfnsubnet.SSID_len;
+ /* Returning void as ssid[i].ssid_len is limited to max of
+ * DOT11_MAX_SSID_LEN
+ */
+ (void)memcpy_s(ssid[i].ssid, IEEE80211_MAX_SSID_LEN,
+ netinfo->pfnsubnet.SSID, ssid[i].ssid_len);
+ request->n_ssids++;
+
+ channel_req = netinfo->pfnsubnet.channel;
+ band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
+ : NL80211_BAND_5GHZ;
+ channel[i].center_freq =
+ ieee80211_channel_to_frequency(channel_req, band);
+ channel[i].band = band;
+ channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+ request->channels[i] = &channel[i];
+ request->n_channels++;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ payload_len = sizeof(log_conn_event_t);
+ event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
+ tlv_data = event_data->tlvs;
+
+ /* ssid */
+ tlv_data->tag = WIFI_TAG_SSID;
+ tlv_data->len = ssid[i].ssid_len;
+ (void)memcpy_s(tlv_data->value, DOT11_MAX_SSID_LEN,
+ ssid[i].ssid, ssid[i].ssid_len);
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* channel */
+ tlv_data->tag = WIFI_TAG_CHANNEL;
+ tlv_data->len = sizeof(uint16);
+ (void)memcpy_s(tlv_data->value, sizeof(uint16),
+ &channel_req, sizeof(uint16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* rssi */
+ tlv_data->tag = WIFI_TAG_RSSI;
+ tlv_data->len = sizeof(int16);
+ (void)memcpy_s(tlv_data->value, sizeof(int16),
+ &netinfo->RSSI, sizeof(int16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+ &event_data->event, payload_len);
+ }
+ }
+
+ /* assign parsed ssid array */
+ if (request->n_ssids)
+ request->ssids = &ssid[0];
+
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ /* Abort any on-going scan */
+ wl_cfg80211_cancel_scan(cfg);
+ }
+
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
+ err = wl_cfgp2p_discover_enable_search(cfg, false);
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ goto out_err;
+ }
+ p2p_scan(cfg) = false;
+ }
+ wl_set_drv_status(cfg, SCANNING, ndev);
+#if FULL_ESCAN_ON_PFN_NET_FOUND
+ WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
+ err = wl_do_escan(cfg, wiphy, ndev, NULL);
+#else
+ WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
+ err = wl_do_escan(cfg, wiphy, ndev, request);
+#endif // endif
+ if (err) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ goto out_err;
+ }
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED);
+ cfg->sched_scan_running = TRUE;
+ }
+ else {
+ WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
+ }
+
+ } else if (pfn_result_v2->version == PFN_SCANRESULT_VERSION_V2) {
+ n_pfn_results = pfn_result_v2->count;
+ pnetinfo_v2 = (wl_pfn_net_info_v2_t *)pfn_result_v2->netinfo;
+
+ if (e->event_type == WLC_E_PFN_NET_LOST) {
+ WL_PNO(("Do Nothing %d\n", e->event_type));
+ return 0;
+ }
+
+ WL_INFORM_MEM(("PFN NET FOUND event. count:%d \n", n_pfn_results));
+
+ if (n_pfn_results > 0) {
+ int i;
+
+ if (n_pfn_results > MAX_PFN_LIST_COUNT)
+ n_pfn_results = MAX_PFN_LIST_COUNT;
+
+ bzero(&ssid, sizeof(ssid));
+
+ request = (struct cfg80211_scan_request *)MALLOCZ(cfg->osh,
+ sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
+ channel = (struct ieee80211_channel *)MALLOCZ(cfg->osh,
+ (sizeof(struct ieee80211_channel) * n_pfn_results));
+ if (!request || !channel) {
+ WL_ERR(("No memory"));
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ request->wiphy = wiphy;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN +
+ sizeof(uint16) + sizeof(int16);
+ event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
+ if (!event_data) {
+ WL_ERR(("%s: failed to allocate the log_conn_event_t with "
+ "length(%d)\n", __func__, alloc_len));
+ goto out_err;
+ }
+ tlv_len = 3 * sizeof(tlv_log);
+ event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
+ if (!event_data->tlvs) {
+ WL_ERR(("%s: failed to allocate the tlv_log with "
+ "length(%d)\n", __func__, tlv_len));
+ goto out_err;
+ }
+ }
+
+ for (i = 0; i < n_pfn_results; i++) {
+ netinfo_v2 = &pnetinfo_v2[i];
+ if (!netinfo_v2) {
+ WL_ERR(("Invalid netinfo ptr. index:%d", i));
+ err = -EINVAL;
+ goto out_err;
+ }
+ WL_PNO((">>> SSID:%s Channel:%d \n",
+ netinfo_v2->pfnsubnet.u.SSID,
+ netinfo_v2->pfnsubnet.channel));
+ /* PFN result doesn't have all the info which are required by the
+ * supplicant. (For e.g IEs) Do a target Escan so that sched scan
+ * results are reported via wl_inform_single_bss in the required
+ * format. Escan does require the scan request in the form of
+ * cfg80211_scan_request. For timebeing, create
+ * cfg80211_scan_request one out of the received PNO event.
+ */
+ ssid[i].ssid_len = MIN(DOT11_MAX_SSID_LEN,
+ netinfo_v2->pfnsubnet.SSID_len);
+ /* max ssid_len as in previous step DOT11_MAX_SSID_LEN is same
+ * as DOT11_MAX_SSID_LEN = 32
+ */
+ (void)memcpy_s(ssid[i].ssid, IEEE80211_MAX_SSID_LEN,
+ netinfo_v2->pfnsubnet.u.SSID, ssid[i].ssid_len);
+ request->n_ssids++;
+
+ channel_req = netinfo_v2->pfnsubnet.channel;
+ band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
+ : NL80211_BAND_5GHZ;
+ channel[i].center_freq =
+ ieee80211_channel_to_frequency(channel_req, band);
+ channel[i].band = band;
+ channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+ request->channels[i] = &channel[i];
+ request->n_channels++;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ payload_len = sizeof(log_conn_event_t);
+ event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
+ tlv_data = event_data->tlvs;
+
+ /* ssid */
+ tlv_data->tag = WIFI_TAG_SSID;
+ tlv_data->len = netinfo_v2->pfnsubnet.SSID_len;
+ (void)memcpy_s(tlv_data->value, DOT11_MAX_SSID_LEN,
+ ssid[i].ssid, ssid[i].ssid_len);
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* channel */
+ tlv_data->tag = WIFI_TAG_CHANNEL;
+ tlv_data->len = sizeof(uint16);
+ (void)memcpy_s(tlv_data->value, sizeof(uint16),
+ &channel_req, sizeof(uint16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* rssi */
+ tlv_data->tag = WIFI_TAG_RSSI;
+ tlv_data->len = sizeof(int16);
+ (void)memcpy_s(tlv_data->value, sizeof(uint16),
+ &netinfo_v2->RSSI, sizeof(int16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+ &event_data->event, payload_len);
+ }
+ }
+
+ /* assign parsed ssid array */
+ if (request->n_ssids)
+ request->ssids = &ssid[0];
+
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ /* Abort any on-going scan */
+ wl_cfg80211_cancel_scan(cfg);
+ }
+
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
+ err = wl_cfgp2p_discover_enable_search(cfg, false);
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ goto out_err;
+ }
+ p2p_scan(cfg) = false;
+ }
+
+ wl_set_drv_status(cfg, SCANNING, ndev);
+#if FULL_ESCAN_ON_PFN_NET_FOUND
+ WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
+ err = wl_do_escan(cfg, wiphy, ndev, NULL);
+#else
+ WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
+ err = wl_do_escan(cfg, wiphy, ndev, request);
+#endif // endif
+ if (err) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ goto out_err;
+ }
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED);
+ cfg->sched_scan_running = TRUE;
+ }
+ else {
+ WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
+ }
+ } else {
+ WL_ERR(("Unsupported version %d, expected %d or %d\n", pfn_result_v1->version,
+ PFN_SCANRESULT_VERSION_V1, PFN_SCANRESULT_VERSION_V2));
+ return 0;
+ }
+out_err:
+ if (request) {
+ MFREE(cfg->osh, request,
+ sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
+ }
+ if (channel) {
+ MFREE(cfg->osh, channel,
+ (sizeof(struct ieee80211_channel) * n_pfn_results));
+ }
+
+ if (event_data) {
+ if (event_data->tlvs) {
+ MFREE(cfg->osh, event_data->tlvs, tlv_len);
+ }
+ MFREE(cfg->osh, event_data, alloc_len);
+ }
+ return err;
+}
+#endif /* WL_SCHED_SCAN */
+
+#ifdef PNO_SUPPORT
+s32
+wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct net_device *ndev = NULL;
+#ifdef GSCAN_SUPPORT
+ void *ptr;
+ int send_evt_bytes = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+#endif /* GSCAN_SUPPORT */
+
+ WL_INFORM_MEM((">>> PNO Event\n"));
+
+ if (!data) {
+ WL_ERR(("Data received is NULL!\n"));
+ return 0;
+ }
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+#ifdef GSCAN_SUPPORT
+ ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
+ if (ptr) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
+ MFREE(cfg->osh, ptr, send_evt_bytes);
+ }
+ if (!dhd_dev_is_legacy_pno_enabled(ndev))
+ return 0;
+#endif /* GSCAN_SUPPORT */
+
+#ifndef WL_SCHED_SCAN
+ mutex_lock(&cfg->usr_sync);
+ /* TODO: Use cfg80211_sched_scan_results(wiphy); */
+ CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
+ mutex_unlock(&cfg->usr_sync);
+#else
+ /* If cfg80211 scheduled scan is supported, report the pno results via sched
+ * scan results
+ */
+ wl_notify_sched_scan_results(cfg, ndev, e, data);
+#endif /* WL_SCHED_SCAN */
+ return 0;
+}
+#endif /* PNO_SUPPORT */
+
+#ifdef GSCAN_SUPPORT
+s32
+wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ void *ptr = NULL;
+ int send_evt_bytes = 0;
+ int event_type;
+ struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ u32 len = ntoh32(e->datalen);
+ u32 buf_len = 0;
+
+ switch (event) {
+ case WLC_E_PFN_BEST_BATCHING:
+ err = dhd_dev_retrieve_batch_scan(ndev);
+ if (err < 0) {
+ WL_ERR(("Batch retrieval already in progress %d\n", err));
+ } else {
+ event_type = WIFI_SCAN_THRESHOLD_NUM_SCANS;
+ if (data && len) {
+ event_type = *((int *)data);
+ }
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_GSCAN_BATCH_SCAN_EVENT,
+ &event_type, sizeof(int));
+ }
+ break;
+ case WLC_E_PFN_SCAN_COMPLETE:
+ event_type = WIFI_SCAN_COMPLETE;
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_COMPLETE_EVENT,
+ &event_type, sizeof(int));
+ break;
+ case WLC_E_PFN_BSSID_NET_FOUND:
+ ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+ HOTLIST_FOUND, &buf_len);
+ if (ptr) {
+ wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+ ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT);
+ dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_FOUND);
+ } else {
+ err = -ENOMEM;
+ }
+ break;
+ case WLC_E_PFN_BSSID_NET_LOST:
+ /* WLC_E_PFN_BSSID_NET_LOST is conflict shared with WLC_E_PFN_SCAN_ALLGONE
+ * We currently do not use WLC_E_PFN_SCAN_ALLGONE, so if we get it, ignore
+ */
+ if (len) {
+ ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+ HOTLIST_LOST, &buf_len);
+ if (ptr) {
+ wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+ ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT);
+ dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_LOST);
+ MFREE(cfg->osh, ptr, buf_len);
+ } else {
+ err = -ENOMEM;
+ }
+ } else {
+ err = -EINVAL;
+ }
+ break;
+ case WLC_E_PFN_GSCAN_FULL_RESULT:
+ ptr = dhd_dev_process_full_gscan_result(ndev, data, len, &send_evt_bytes);
+ if (ptr) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes);
+ MFREE(cfg->osh, ptr, send_evt_bytes);
+ } else {
+ err = -ENOMEM;
+ }
+ break;
+ case WLC_E_PFN_SSID_EXT:
+ ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
+ if (ptr) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
+ MFREE(cfg->osh, ptr, send_evt_bytes);
+ } else {
+ err = -ENOMEM;
+ }
+ break;
+ default:
+ WL_ERR(("Unknown event %d\n", event));
+ break;
+ }
+ return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (strcmp(command, "SCAN-ACTIVE") == 0) {
+ cfg->active_scan = 1;
+ } else if (strcmp(command, "SCAN-PASSIVE") == 0) {
+ cfg->active_scan = 0;
+ } else
+ WL_ERR(("Unknown command \n"));
+ return;
+}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgscan.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgscan.h
new file mode 100644
index 0000000..2e29cf1
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgscan.h
@@ -0,0 +1,120 @@
+/*
+ * Header for Linux cfg80211 scan
+ *
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _wl_cfgscan_h_
+#define _wl_cfgscan_h_
+
+#include <linux/wireless.h>
+#include <typedefs.h>
+#include <ethernet.h>
+#include <wlioctl.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/rfkill.h>
+#include <osl.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define GET_SCAN_WDEV(scan_request) \
+ (scan_request && scan_request->dev) ? scan_request->dev->ieee80211_ptr : NULL;
+#else
+#define GET_SCAN_WDEV(scan_request) \
+ scan_request ? scan_request->wdev : NULL;
+#endif // endif
+#ifdef WL_SCHED_SCAN
+#define GET_SCHED_SCAN_WDEV(scan_request) \
+ (scan_request && scan_request->dev) ? scan_request->dev->ieee80211_ptr : NULL;
+#endif /* WL_SCHED_SCAN */
+
+extern s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+extern s32 wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy,
+ struct net_device *ndev, struct cfg80211_scan_request *request);
+extern s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request, struct cfg80211_ssid *this_ssid);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+extern s32 wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request);
+#else
+extern s32 wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request);
+extern int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+extern void wl_cfg80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+extern void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+extern s32 wl_init_scan(struct bcm_cfg80211 *cfg);
+extern int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev);
+extern s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+extern void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command);
+#ifdef PNO_SUPPORT
+extern s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* PNO_SUPPORT */
+#ifdef GSCAN_SUPPORT
+extern s32 wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* GSCAN_SUPPORT */
+
+#ifdef WES_SUPPORT
+#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
+#define CUSTOMER_WL_SCAN_TIMER_INTERVAL_MS 25000 /* Scan timeout */
+enum wl_custom_scan_time_type {
+ WL_CUSTOM_SCAN_CHANNEL_TIME = 0,
+ WL_CUSTOM_SCAN_UNASSOC_TIME,
+ WL_CUSTOM_SCAN_PASSIVE_TIME,
+ WL_CUSTOM_SCAN_HOME_TIME,
+ WL_CUSTOM_SCAN_HOME_AWAY_TIME
+};
+extern s32 wl_cfg80211_custom_scan_time(struct net_device *dev,
+ enum wl_custom_scan_time_type type, int time);
+#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
+#endif /* WES_SUPPORT */
+
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+int wl_cfg80211_set_random_mac(struct net_device *dev, bool enable);
+int wl_cfg80211_random_mac_enable(struct net_device *dev);
+int wl_cfg80211_random_mac_disable(struct net_device *dev);
+int wl_cfg80211_scan_mac_enable(struct net_device *dev, uint8 *rand_mac, uint8 *rand_mask);
+int wl_cfg80211_scan_mac_disable(struct net_device *dev);
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+#ifdef WL_SCHED_SCAN
+extern int wl_cfg80211_sched_scan_start(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_sched_scan_request *request);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+int
+wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 req);
+#else
+int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev);
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(4, 11, 0) */
+#endif /* WL_SCHED_SCAN */
+#endif /* _wl_cfgscan_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgvendor.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgvendor.c
index b064037..1b972c4 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgvendor.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgvendor.c
@@ -1,7 +1,9 @@
/*
* Linux cfg80211 Vendor Extension Code
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -24,7 +26,7 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgvendor.c 702754 2017-06-03 15:54:21Z $
+ * $Id: wl_cfgvendor.c 815871 2019-04-22 06:21:38Z $
*/
/*
@@ -53,6 +55,9 @@
#include <wlioctl.h>
#include <wlioctl_utils.h>
#include <dhd_cfg80211.h>
+#ifdef DHD_PKT_LOGGING
+#include <dhd_pktlog.h>
+#endif /* DHD_PKT_LOGGING */
#ifdef PNO_SUPPORT
#include <dhd_pno.h>
#endif /* PNO_SUPPORT */
@@ -91,14 +96,14 @@
char*
wl_get_kernel_timestamp(void)
{
- static char buf[16];
+ static char buf[32];
u64 ts_nsec;
unsigned long rem_nsec;
ts_nsec = local_clock();
- rem_nsec = do_div(ts_nsec, 1000000000);
+ rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
snprintf(buf, sizeof(buf), "%5lu.%06lu",
- (unsigned long)ts_nsec, rem_nsec / 1000);
+ (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC);
return buf;
}
@@ -150,6 +155,50 @@
return wl_cfgvendor_send_async_event(wiphy,
bcmcfg_to_prmry_ndev(cfg), BRCM_VENDOR_EVENT_PRIV_STR, buf, len);
}
+
+int
+wl_cfgvendor_notify_supp_event_str(const char *evt_name, const char *fmt, ...)
+{
+ char buf[SUPP_LOG_LEN] = {0};
+ struct bcm_cfg80211 *cfg;
+ struct wiphy *wiphy;
+ va_list args;
+ int len;
+ int prefix_len;
+ int rem_len;
+
+ cfg = wl_cfg80211_get_bcmcfg();
+ if (!cfg || !cfg->wdev) {
+ WL_DBG(("supp evt invalid arg\n"));
+ return BCME_OK;
+ }
+ wiphy = cfg->wdev->wiphy;
+ prefix_len = snprintf(buf, SUPP_LOG_LEN, "%s ", evt_name);
+ /* Remaining buffer len */
+ rem_len = SUPP_LOG_LEN - (prefix_len + 1);
+ /* Print the arg list on to the remaining part of the buffer */
+ va_start(args, fmt);
+ len = vsnprintf((buf + prefix_len), rem_len, fmt, args);
+ va_end(args);
+ if (len < 0) {
+ return -EINVAL;
+ }
+
+ if (len > rem_len) {
+ /* If return length is greater than buffer len,
+ * then its truncated buffer case.
+ */
+ len = rem_len;
+ }
+
+ /* Ensure the buffer is null terminated */
+ len += prefix_len;
+ buf[len] = '\0';
+ len++;
+
+ return wl_cfgvendor_send_async_event(wiphy,
+ bcmcfg_to_prmry_ndev(cfg), BRCM_VENDOR_EVENT_PRIV_STR, buf, len);
+}
#endif /* WL_SUPP_EVENT */
/*
@@ -161,7 +210,7 @@
int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
struct net_device *dev, int event_id, const void *data, int len)
{
- u16 kflags;
+ gfp_t kflags;
struct sk_buff *skb;
kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
@@ -248,11 +297,21 @@
goto exit;
}
- nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, MAX_FEATURE_SET_CONCURRRENT_GROUPS);
+ err = nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET,
+ MAX_FEATURE_SET_CONCURRRENT_GROUPS);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ goto exit;
+ }
for (i = 0; i < MAX_FEATURE_SET_CONCURRRENT_GROUPS; i++) {
reply = dhd_dev_get_feature_set_matrix(bcmcfg_to_prmry_ndev(cfg), i);
if (reply != WIFI_FEATURE_INVALID) {
- nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_FEATURE_SET, reply);
+ err = nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_FEATURE_SET,
+ reply);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ goto exit;
+ }
}
}
@@ -266,28 +325,34 @@
}
static int
-wl_cfgvendor_set_rand_mac_oui(struct wiphy *wiphy,
+wl_cfgvendor_set_pno_rand_mac_oui(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
{
- int err = 0;
+ int err = -EINVAL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
int type;
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ goto exit;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ goto exit;
+ }
+
type = nla_type(data);
- if (type == ANDR_WIFI_ATTRIBUTE_RANDOM_MAC_OUI) {
+ if (type == ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI) {
if (nla_len(data) != DOT11_OUI_LEN) {
WL_ERR(("nla_len not matched.\n"));
- err = -EINVAL;
goto exit;
}
err = dhd_dev_cfg_rand_mac_oui(bcmcfg_to_prmry_ndev(cfg), nla_data(data));
if (unlikely(err))
WL_ERR(("Bad OUI, could not set:%d \n", err));
-
- } else {
- err = -EINVAL;
}
exit:
return err;
@@ -297,18 +362,27 @@
wl_cfgvendor_set_nodfs_flag(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
{
- int err = 0;
+ int err = -EINVAL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
int type;
u32 nodfs;
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
+
type = nla_type(data);
if (type == ANDR_WIFI_ATTRIBUTE_NODFS_SET) {
nodfs = nla_get_u32(data);
err = dhd_dev_set_nodfs(bcmcfg_to_prmry_ndev(cfg), nodfs);
- } else {
- err = -1;
}
+
return err;
}
#endif /* CUSTOM_FORCE_NODFS_FLAG */
@@ -327,17 +401,20 @@
type = nla_type(iter);
switch (type) {
case ANDR_WIFI_ATTRIBUTE_COUNTRY:
- memcpy(country_code, nla_data(iter),
- MIN(nla_len(iter), WLC_CNTRY_BUF_SZ));
+ err = memcpy_s(country_code, WLC_CNTRY_BUF_SZ,
+ nla_data(iter), nla_len(iter));
+ if (err) {
+ WL_ERR(("Failed to copy country code: %d\n", err));
+ return err;
+ }
break;
default:
WL_ERR(("Unknown type: %d\n", type));
return err;
}
}
-
/* country code is unique for dongle..hence using primary interface. */
- err = wldev_set_country(primary_ndev, country_code, true, true, -1);
+ err = wl_cfg80211_set_country_code(primary_ndev, country_code, true, true, -1);
if (err < 0) {
WL_ERR(("Set country failed ret:%d\n", err));
}
@@ -350,7 +427,7 @@
wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
struct net_device *dev, void *data, int len, wl_vendor_event_t event)
{
- u16 kflags;
+ gfp_t kflags;
const void *ptr;
struct sk_buff *skb;
int malloc_len, total, iter_cnt_to_send, cnt;
@@ -425,7 +502,6 @@
}
err = wl_cfgvendor_send_cmd_reply(wiphy, reply, reply_len);
-
if (unlikely(err)) {
WL_ERR(("Vendor Command reply failed ret:%d \n", err));
}
@@ -459,6 +535,7 @@
}
results = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len);
+
if (!results) {
WL_ERR(("No results to send %d\n", err));
err = wl_cfgvendor_send_cmd_reply(wiphy, results, 0);
@@ -490,6 +567,13 @@
iter = results;
complete_flag = nla_reserve(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE,
sizeof(is_done));
+
+ if (unlikely(!complete_flag)) {
+ WL_ERR(("complete_flag could not be reserved"));
+ kfree_skb(skb);
+ dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+ return -ENOMEM;
+ }
mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD);
while (iter) {
@@ -505,30 +589,49 @@
is_done = 0;
break;
}
- nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id);
- nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag);
- nla_put_u32(skb, GSCAN_ATTRIBUTE_CH_BUCKET_BITMASK, iter->scan_ch_bucket);
-
+ err = nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id);
+ if (unlikely(err)) {
+ goto fail;
+ }
+ err = nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag);
+ if (unlikely(err)) {
+ goto fail;
+ }
+ err = nla_put_u32(skb, GSCAN_ATTRIBUTE_CH_BUCKET_BITMASK, iter->scan_ch_bucket);
+ if (unlikely(err)) {
+ goto fail;
+ }
num_results_iter = iter->tot_count - iter->tot_consumed;
- nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter);
+ err = nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter);
+ if (unlikely(err)) {
+ goto fail;
+ }
if (num_results_iter) {
ptr = &iter->results[iter->tot_consumed];
- iter->tot_consumed += num_results_iter;
- nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS,
+ err = nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS,
num_results_iter * sizeof(wifi_gscan_result_t), ptr);
+ if (unlikely(err)) {
+ goto fail;
+ }
+ iter->tot_consumed += num_results_iter;
}
nla_nest_end(skb, scan_hdr);
mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN +
(num_results_iter * sizeof(wifi_gscan_result_t));
iter = iter->next;
}
- MFREE(cfg->osh, results, reply_len);
- /* Returns TRUE if all result consumed */
+ /* Cleans up consumed results and returns TRUE if all results are consumed */
is_done = dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg));
memcpy(nla_data(complete_flag), &is_done, sizeof(is_done));
dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
return cfg80211_vendor_cmd_reply(skb);
+fail:
+ /* Free up consumed results which will now not be sent */
+ (void)dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg));
+ kfree_skb(skb);
+ dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+ return err;
}
static int
@@ -572,6 +675,16 @@
int type;
bool real_time = FALSE;
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
+
type = nla_type(data);
if (type == GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS) {
@@ -1102,18 +1215,39 @@
uint16 *reply = NULL;
uint32 reply_len = 0, num_channels, mem_needed;
struct sk_buff *skb;
+ dhd_pub_t *dhdp;
+ struct net_device *ndev = wdev->netdev;
+
+ if (!ndev) {
+ WL_ERR(("ndev null\n"));
+ return -EINVAL;
+ }
+
+ dhdp = wl_cfg80211_get_dhdp(ndev);
+ if (!dhdp) {
+ WL_ERR(("dhdp null\n"));
+ return -EINVAL;
+ }
+
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
type = nla_type(data);
-
if (type == GSCAN_ATTRIBUTE_BAND) {
band = nla_get_u32(data);
} else {
return -EINVAL;
}
- reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+ reply = dhd_pno_get_gscan(dhdp,
DHD_PNO_GET_CHANNEL_LIST, &band, &reply_len);
-
if (!reply) {
WL_ERR(("Could not get channel list\n"));
err = -EINVAL;
@@ -1176,38 +1310,6 @@
}
#endif /* RSSI_MONITOR_SUPPORT */
-#ifdef DHDTCPACK_SUPPRESS
-static int
-wl_cfgvendor_set_tcpack_sup_mode(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int err = BCME_OK, type;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct net_device *ndev = wdev_to_wlc_ndev(wdev, cfg);
- uint8 enable = 0;
-
- if (len <= 0) {
- WL_ERR(("Length of the nlattr is not valid len : %d\n", len));
- err = BCME_BADARG;
- goto exit;
- }
-
- type = nla_type(data);
- if (type == ANDR_WIFI_ATTRIBUTE_TCPACK_SUP_VALUE) {
- enable = (uint8)nla_get_u32(data);
- err = dhd_dev_set_tcpack_sup_mode_cfg(ndev, enable);
- if (unlikely(err)) {
- WL_ERR(("Could not set TCP Ack Suppress mode cfg: %d\n", err));
- }
- } else {
- err = BCME_BADARG;
- }
-
-exit:
- return err;
-}
-#endif /* DHDTCPACK_SUPPRESS */
-
#ifdef DHD_WAKE_STATUS
static int
wl_cfgvendor_get_wake_reason_stats(struct wiphy *wiphy,
@@ -1219,7 +1321,7 @@
#if defined(DHD_DEBUG) && defined(DHD_WAKE_EVENT_STATUS)
int flowid;
#endif /* DHD_DEBUG && DHD_WAKE_EVENT_STATUS */
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
WL_DBG(("Recv get wake status info cmd.\n"));
@@ -1236,10 +1338,22 @@
}
#ifdef DHD_WAKE_EVENT_STATUS
WL_ERR(("pwake_count_info->rcwake %d\n", pwake_count_info->rcwake));
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT, pwake_count_info->rcwake);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT_USED, WLC_E_LAST);
- nla_put(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_WAKE, (WLC_E_LAST * sizeof(uint)),
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT, pwake_count_info->rcwake);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total count of CMD event, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT_USED, WLC_E_LAST);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Max count of event used, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_WAKE, (WLC_E_LAST * sizeof(uint)),
pwake_count_info->rc_event);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Event wake data, ret=%d\n", ret));
+ goto exit;
+ }
#ifdef DHD_DEBUG
for (flowid = 0; flowid < WLC_E_LAST; flowid++) {
if (pwake_count_info->rc_event[flowid] != 0) {
@@ -1251,44 +1365,253 @@
#endif /* DHD_WAKE_EVENT_STATUS */
#ifdef DHD_WAKE_RX_STATUS
WL_ERR(("pwake_count_info->rxwake %d\n", pwake_count_info->rxwake));
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_RX_DATA_WAKE, pwake_count_info->rxwake);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_UNICAST_COUNT, pwake_count_info->rx_ucast);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_MULTICAST_COUNT, pwake_count_info->rx_mcast);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_BROADCAST_COUNT, pwake_count_info->rx_bcast);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP_PKT, pwake_count_info->rx_arp);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_PKT, pwake_count_info->rx_icmpv6);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_RA, pwake_count_info->rx_icmpv6_ra);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NA, pwake_count_info->rx_icmpv6_na);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NS, pwake_count_info->rx_icmpv6_ns);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV4_RX_MULTICAST_ADD_CNT,
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_RX_DATA_WAKE, pwake_count_info->rxwake);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total Wake due RX data, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_UNICAST_COUNT, pwake_count_info->rx_ucast);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to RX unicast, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_MULTICAST_COUNT, pwake_count_info->rx_mcast);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due RX multicast, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_BROADCAST_COUNT, pwake_count_info->rx_bcast);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to RX broadcast, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP_PKT, pwake_count_info->rx_arp);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to ICMP pkt, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_PKT, pwake_count_info->rx_icmpv6);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due ICMPV6 pkt, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_RA, pwake_count_info->rx_icmpv6_ra);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to ICMPV6_RA, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NA, pwake_count_info->rx_icmpv6_na);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to ICMPV6_NA, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NS, pwake_count_info->rx_icmpv6_ns);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to ICMPV6_NS, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV4_RX_MULTICAST_ADD_CNT,
pwake_count_info->rx_multi_ipv4);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV6_RX_MULTICAST_ADD_CNT,
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to RX IPV4 MULTICAST, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV6_RX_MULTICAST_ADD_CNT,
pwake_count_info->rx_multi_ipv6);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_OTHER_RX_MULTICAST_ADD_CNT,
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to RX IPV6 MULTICAST, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_OTHER_RX_MULTICAST_ADD_CNT,
pwake_count_info->rx_multi_other);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to Other RX Multicast, ret=%d\n", ret));
+ goto exit;
+ }
#endif /* #ifdef DHD_WAKE_RX_STATUS */
ret = cfg80211_vendor_cmd_reply(skb);
if (unlikely(ret)) {
WL_ERR(("Vendor cmd reply for -get wake status failed:%d \n", ret));
}
+ /* On cfg80211_vendor_cmd_reply() skb is consumed and freed in case of success or failure */
+ return ret;
+
exit:
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
return ret;
}
#endif /* DHD_WAKE_STATUS */
+#ifdef DHDTCPACK_SUPPRESS
+static int
+wl_cfgvendor_set_tcpack_sup_mode(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = BCME_OK, type;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = wdev_to_wlc_ndev(wdev, cfg);
+ uint8 enable = 0;
+
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("Length of the nlattr is not valid len : %d\n", len));
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ type = nla_type(data);
+ if (type == ANDR_WIFI_ATTRIBUTE_TCPACK_SUP_VALUE) {
+ enable = (uint8) nla_get_u32(data);
+ err = dhd_dev_set_tcpack_sup_mode_cfg(ndev, enable);
+ if (unlikely(err)) {
+ WL_ERR(("Could not set TCP Ack Suppress mode cfg: %d\n", err));
+ }
+ } else {
+ err = BCME_BADARG;
+ }
+
+exit:
+ return err;
+}
+#endif /* DHDTCPACK_SUPPRESS */
+
+static int
+wl_cfgvendor_set_latency_mode(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = BCME_OK;
+ int type;
+ bool lmode;
+
+ struct net_device *ndev = wdev_to_ndev(wdev);
+ WL_DBG(("wl_cfgvendor_set_latency_mode Enter \n"));
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
+
+ type = nla_type(data);
+ if (type == ANDR_WIFI_ATTRIBUTE_LATENCY_MODE) {
+ lmode = (bool)nla_get_u32(data);
+ err = wl_cfg80211_set_power_mgmt(wiphy, ndev, !lmode, 0);
+ }
+ return err;
+}
+
+#ifndef CONFIG_SOC_S5E5515
+static int
+wl_cfgvendor_set_random_mac(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = BCME_OK;
+
+ WL_ERR(("ANDR_WIFI_ATTRIBUTE_RANDOM_MAC is not available\n"));
+ err = BCME_UNSUPPORTED;
+
+ return err;
+}
+#endif /* CONFIG_SOC_S5E5515 */
+
+static int
+wl_cfgvendor_set_tx_power_scenario(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = BCME_OK;
+ int type;
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
+ type = nla_type(data);
+ if (type == ANDR_WIFI_ATTRIBUTE_TX_POWER_SCENARIO) {
+ //Handling to be implemented
+ //err = wl_cfg80211_set_tx_power(wiphy,wdev,type,0);
+ err = BCME_OK;
+ }
+ return err;
+}
+
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
+static int
+wl_cfgvendor_notify_dump_completion(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+ unsigned long flags = 0;
+
+ WL_INFORM(("%s, [DUMP] received file dump notification from HAL\n", __FUNCTION__));
+
+ DHD_GENERAL_LOCK(dhd_pub, flags);
+ /* call wmb() to synchronize with the previous memory operations */
+ OSL_SMP_WMB();
+ DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhd_pub);
+ /* Call another wmb() to make sure wait_for_dump_completion value
+ * gets updated before waking up waiting context.
+ */
+ OSL_SMP_WMB();
+ dhd_os_busbusy_wake(dhd_pub);
+ DHD_GENERAL_UNLOCK(dhd_pub, flags);
+
+ return BCME_OK;
+}
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
+
+#if defined(WL_CFG80211)
+static int
+wl_cfgvendor_set_hal_started(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ WL_INFORM(("%s,[DUMP] HAL STARTED\n", __FUNCTION__));
+
+ cfg->hal_started = true;
+ return BCME_OK;
+}
+
+static int
+wl_cfgvendor_stop_hal(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ WL_INFORM(("%s,[DUMP] HAL STOPPED\n", __FUNCTION__));
+
+ cfg->hal_started = false;
+ return BCME_OK;
+}
+#endif /* WL_CFG80211 */
+
#ifdef RTT_SUPPORT
void
wl_cfgvendor_rtt_evt(void *ctx, void *rtt_data)
{
struct wireless_dev *wdev = (struct wireless_dev *)ctx;
struct wiphy *wiphy;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
uint32 evt_complete = 0;
gfp_t kflags;
rtt_result_t *rtt_result;
rtt_results_header_t *rtt_header;
struct list_head *rtt_cache_list;
struct nlattr *rtt_nl_hdr;
+ int ret = BCME_OK;
wiphy = wdev->wiphy;
WL_DBG(("In\n"));
@@ -1313,14 +1636,15 @@
return;
}
evt_complete = 1;
- nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
+ ret = nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS_COMPLETE\n"));
+ goto free_mem;
+ }
cfg80211_vendor_event(skb, kflags);
return;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry(rtt_header, rtt_cache_list, list) {
/* Alloc the SKB for vendor_event */
#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
@@ -1339,24 +1663,55 @@
if (list_is_last(&rtt_header->list, rtt_cache_list)) {
evt_complete = 1;
}
- nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
+ ret = nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS_COMPLETE\n"));
+ goto free_mem;
+ }
rtt_nl_hdr = nla_nest_start(skb, RTT_ATTRIBUTE_RESULTS_PER_TARGET);
if (!rtt_nl_hdr) {
WL_ERR(("rtt_nl_hdr is NULL\n"));
+ dev_kfree_skb_any(skb);
break;
}
- nla_put(skb, RTT_ATTRIBUTE_TARGET_MAC, ETHER_ADDR_LEN, &rtt_header->peer_mac);
- nla_put_u32(skb, RTT_ATTRIBUTE_RESULT_CNT, rtt_header->result_cnt);
+ ret = nla_put(skb, RTT_ATTRIBUTE_TARGET_MAC, ETHER_ADDR_LEN,
+ &rtt_header->peer_mac);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_TARGET_MAC, ret:%d\n", ret));
+ goto free_mem;
+ }
+ ret = nla_put_u32(skb, RTT_ATTRIBUTE_RESULT_CNT, rtt_header->result_cnt);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT_CNT, ret:%d\n", ret));
+ goto free_mem;
+ }
list_for_each_entry(rtt_result, &rtt_header->result_list, list) {
- nla_put(skb, RTT_ATTRIBUTE_RESULT,
+ ret = nla_put(skb, RTT_ATTRIBUTE_RESULT,
rtt_result->report_len, &rtt_result->report);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT, ret:%d\n", ret));
+ goto free_mem;
+ }
+ ret = nla_put(skb, RTT_ATTRIBUTE_RESULT_DETAIL,
+ rtt_result->detail_len, &rtt_result->rtt_detail);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT_DETAIL, ret:%d\n",
+ ret));
+ goto free_mem;
+ }
}
nla_nest_end(skb, rtt_nl_hdr);
cfg80211_vendor_event(skb, kflags);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
+
+ return;
+
+free_mem:
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
}
static int
@@ -1372,7 +1727,7 @@
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
rtt_capabilities_t capability;
- memset(&rtt_param, 0, sizeof(rtt_param));
+ bzero(&rtt_param, sizeof(rtt_param));
WL_DBG(("In\n"));
err = dhd_dev_rtt_register_noti_callback(wdev->netdev, wdev, wl_cfgvendor_rtt_evt);
@@ -1395,6 +1750,11 @@
type = nla_type(iter);
switch (type) {
case RTT_ATTRIBUTE_TARGET_CNT:
+ if (target_cnt != 0) {
+ WL_ERR(("attempt to overwrite target_cnt"));
+ err = -EINVAL;
+ goto exit;
+ }
target_cnt = nla_get_u8(iter);
if ((target_cnt <= 0) || (target_cnt > RTT_MAX_TARGET_CNT)) {
WL_ERR(("target_cnt is not valid : %d\n",
@@ -1423,10 +1783,21 @@
}
rtt_target = rtt_param.target_info;
nla_for_each_nested(iter1, iter, rem1) {
+ if ((uint8 *)rtt_target >= ((uint8 *)rtt_param.target_info +
+ TARGET_INFO_SIZE(target_cnt))) {
+ WL_ERR(("rtt_target increased over its max size"));
+ err = -EINVAL;
+ goto exit;
+ }
nla_for_each_nested(iter2, iter1, rem2) {
type = nla_type(iter2);
switch (type) {
case RTT_ATTRIBUTE_TARGET_MAC:
+ if (nla_len(iter2) != ETHER_ADDR_LEN) {
+ WL_ERR(("mac_addr length not match\n"));
+ err = -EINVAL;
+ goto exit;
+ }
memcpy(&rtt_target->addr, nla_data(iter2),
ETHER_ADDR_LEN);
break;
@@ -1546,7 +1917,6 @@
if (rtt_param.target_info) {
MFREE(cfg->osh, rtt_param.target_info,
TARGET_INFO_SIZE(target_cnt));
- rtt_param.target_info = NULL;
}
return err;
}
@@ -1621,7 +1991,8 @@
}
}
cancel:
- if (dhd_dev_rtt_cancel_cfg(bcmcfg_to_prmry_ndev(cfg), mac_list, target_cnt) < 0) {
+ if (mac_list && dhd_dev_rtt_cancel_cfg(
+ bcmcfg_to_prmry_ndev(cfg), mac_list, target_cnt) < 0) {
WL_ERR(("Could not cancel RTT configuration\n"));
err = -EINVAL;
}
@@ -1688,7 +2059,7 @@
WL_DBG(("Recv -get_avail_ch command \n"));
- memset(&responder_info, 0, sizeof(responder_info));
+ bzero(&responder_info, sizeof(responder_info));
err = get_responder_info(cfg, &responder_info);
if (unlikely(err)) {
WL_ERR(("Failed to get responder info:%d \n", err));
@@ -1712,7 +2083,7 @@
WL_DBG(("Recv rtt -enable_resp cmd.\n"));
- memset(&responder_info, 0, sizeof(responder_info));
+ bzero(&responder_info, sizeof(responder_info));
/*
*Passing channel as NULL until implementation
@@ -1763,6 +2134,16 @@
int type;
uint32 lazy_roam_enable_flag;
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invaild len %d\n", len));
+ return -EINVAL;
+ }
+
type = nla_type(data);
if (type == GSCAN_ATTRIBUTE_LAZY_ROAM_ENABLE) {
@@ -1773,6 +2154,7 @@
if (unlikely(err))
WL_ERR(("Could not enable lazy roam:%d \n", err));
}
+
return err;
}
@@ -1784,7 +2166,7 @@
wlc_roam_exp_params_t roam_param;
const struct nlattr *iter;
- memset(&roam_param, 0, sizeof(roam_param));
+ bzero(&roam_param, sizeof(roam_param));
nla_for_each_attr(iter, data, len, tmp) {
type = nla_type(iter);
@@ -2193,6 +2575,7 @@
WL_ERR(("not matching ssid count:%d to expected:%d\n",
ssid_whitelist->ssid_count, num));
err = -EINVAL;
+ goto exit;
}
err = dhd_dev_set_whitelist_ssid(bcmcfg_to_prmry_ndev(cfg),
ssid_whitelist, mem_needed, flush);
@@ -2218,6 +2601,16 @@
int type;
int err = 0;
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
+
/* Get the requested fw roaming state */
type = nla_type(data);
if (type != GSCAN_ATTRIBUTE_ROAM_STATE_SET) {
@@ -2274,11 +2667,7 @@
struct sk_buff *reply;
#if defined(OEM_ANDROID)
dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
-#endif /* (OEM_ANDROID) */
- WL_DBG(("entry: cmd = %d\n", nlioc->cmd));
-
-#if defined(OEM_ANDROID)
/* send to dongle only if we are not waiting for reload already */
if (dhdp && dhdp->hang_was_sent) {
WL_INFORM(("Bus down. HANG was sent up earlier\n"));
@@ -2288,30 +2677,43 @@
}
#endif /* (OEM_ANDROID) */
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return BCME_BADARG;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return BCME_BADARG;
+ }
+
+ WL_DBG(("entry: cmd = %d\n", nlioc->cmd));
+
+ if (nlioc->offset != sizeof(struct bcm_nlmsg_hdr) ||
+ len <= sizeof(struct bcm_nlmsg_hdr)) {
+ WL_ERR(("invalid offset %d\n", nlioc->offset));
+ return BCME_BADARG;
+ }
len -= sizeof(struct bcm_nlmsg_hdr);
ret_len = nlioc->len;
if (ret_len > 0 || len > 0) {
- if (len > DHD_IOCTL_MAXLEN) {
+ if (len >= DHD_IOCTL_MAXLEN) {
WL_ERR(("oversize input buffer %d\n", len));
- len = DHD_IOCTL_MAXLEN;
+ len = DHD_IOCTL_MAXLEN - 1;
}
- if (ret_len > DHD_IOCTL_MAXLEN) {
+ if (ret_len >= DHD_IOCTL_MAXLEN) {
WL_ERR(("oversize return buffer %d\n", ret_len));
- ret_len = DHD_IOCTL_MAXLEN;
+ ret_len = DHD_IOCTL_MAXLEN - 1;
}
+
payload = max(ret_len, len) + 1;
buf = vzalloc(payload);
if (!buf) {
return -ENOMEM;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
memcpy(buf, (void *)((char *)nlioc + nlioc->offset), len);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
*((char *)buf + len) = '\0';
}
@@ -2323,7 +2725,7 @@
}
cur = buf;
while (ret_len > 0) {
- msglen = nlioc->len > maxmsglen ? maxmsglen : ret_len;
+ msglen = ret_len > maxmsglen ? maxmsglen : ret_len;
ret_len -= msglen;
payload = msglen + sizeof(msglen);
reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
@@ -2353,12 +2755,13 @@
struct net_device *
wl_cfgvendor_get_ndev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
- const void *data, unsigned long int *out_addr)
+ const char *data, unsigned long int *out_addr)
{
char *pos, *pos1;
char ifname[IFNAMSIZ + 1] = {0};
struct net_info *iter, *next;
struct net_device *ndev = NULL;
+ ulong ifname_len;
*out_addr = (unsigned long int) data; /* point to command str by default */
/* check whether ifname=<ifname> is provided in the command */
@@ -2370,11 +2773,13 @@
WL_ERR(("command format error \n"));
return NULL;
}
- memcpy(ifname, pos, (pos1 - pos));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+
+ ifname_len = pos1 - pos;
+ if (memcpy_s(ifname, (sizeof(ifname) - 1), pos, ifname_len) != BCME_OK) {
+ WL_ERR(("Failed to copy data. len: %ld\n", ifname_len));
+ return NULL;
+ }
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
if (iter->ndev) {
if (strncmp(iter->ndev->name, ifname,
@@ -2388,9 +2793,7 @@
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
WL_ERR(("Couldn't find ifname:%s in the netinfo list \n",
ifname));
return NULL;
@@ -2402,6 +2805,53 @@
return ndev;
}
+#ifdef WL_SAE
+static int
+wl_cfgvendor_set_sae_password(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = BCME_OK;
+ struct net_device *net = wdev->netdev;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+ wsec_pmk_t pmk;
+ s32 bssidx;
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, net->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", net->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ if (len < WSEC_MIN_PSK_LEN || len >= WSEC_MAX_PSK_LEN) {
+ WL_ERR(("Invalid passphrase length %d..should be >=8 and <=63\n",
+ len));
+ err = BCME_BADLEN;
+ goto done;
+ }
+ /* Set AUTH to SAE */
+ err = wldev_iovar_setint_bsscfg(net, "wpa_auth", WPA3_AUTH_SAE_PSK, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("could not set wpa_auth (0x%x)\n", err));
+ goto done;
+ }
+ pmk.key_len = htod16(len);
+ bcopy((const u8*)data, pmk.key, len);
+ pmk.flags = htod16(WSEC_PASSPHRASE);
+
+ err = wldev_ioctl_set(net, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+ if (err) {
+ WL_ERR(("\n failed to set pmk %d\n", err));
+ goto done;
+ } else {
+ WL_MEM(("sae passphrase set successfully\n"));
+ }
+done:
+ return err;
+}
+#endif /* WL_SAE */
+
+#ifdef BCM_PRIV_CMD_SUPPORT
+/* strlen("ifname=") + IFNAMESIZE + strlen(" ") + '\0' */
+#define ANDROID_PRIV_CMD_IF_PREFIX_LEN (7 + IFNAMSIZ + 2)
/* Max length for the reply buffer. For BRCM_ATTR_DRIVER_CMD, the reply
* would be a formatted string and reply buf would be the size of the
* string.
@@ -2415,15 +2865,18 @@
int err = 0;
int data_len = 0, cmd_len = 0, tmp = 0, type = 0;
struct net_device *ndev = wdev->netdev;
- char *reply_buf = NULL;
char *cmd = NULL;
-#ifdef OEM_ANDROID
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
int bytes_written;
struct net_device *net = NULL;
unsigned long int cmd_out = 0;
- u32 reply_len = WL_DRIVER_PRIV_CMD_LEN;
-#endif /* OEM_ANDROID */
+#if defined(WL_ANDROID_PRIV_CMD_OVER_NL80211) && defined(OEM_ANDROID)
+ u32 cmd_buf_len = WL_DRIVER_PRIV_CMD_LEN;
+ char cmd_prefix[ANDROID_PRIV_CMD_IF_PREFIX_LEN + 1] = {0};
+ char *cmd_buf = NULL;
+ char *current_pos;
+ u32 cmd_offset;
+#endif /* WL_ANDROID_PRIV_CMD_OVER_NL80211 && OEM_ANDROID */
WL_DBG(("%s: Enter \n", __func__));
@@ -2442,66 +2895,107 @@
goto exit;
}
-#ifdef OEM_ANDROID
+#if defined(WL_ANDROID_PRIV_CMD_OVER_NL80211) && defined(OEM_ANDROID)
if (type == BRCM_ATTR_DRIVER_CMD) {
- if (cmd_len >= WL_DRIVER_PRIV_CMD_LEN) {
- WL_ERR(("Unexpected command length. Ignore the command\n"));
+ if ((cmd_len >= WL_DRIVER_PRIV_CMD_LEN) ||
+ (cmd_len < ANDROID_PRIV_CMD_IF_PREFIX_LEN)) {
+ WL_ERR(("Unexpected command length (%u)."
+ "Ignore the command\n", cmd_len));
err = -EINVAL;
goto exit;
}
- net = wl_cfgvendor_get_ndev(cfg, wdev, cmd, &cmd_out);
- if (!cmd_out || !net) {
- err = -ENODEV;
- goto exit;
- }
- cmd = (char *)cmd_out;
- reply_buf = (char *)MALLOCZ(cfg->osh, reply_len);
- if (!reply_buf) {
- WL_ERR(("memory alloc failed for %u \n", cmd_len));
+
+ /* check whether there is any ifname prefix provided */
+ if (memcpy_s(cmd_prefix, (sizeof(cmd_prefix) - 1),
+ cmd, ANDROID_PRIV_CMD_IF_PREFIX_LEN) != BCME_OK) {
+ WL_ERR(("memcpy failed for cmd buffer. len:%d\n", cmd_len));
err = -ENOMEM;
goto exit;
}
- memcpy(reply_buf, cmd, cmd_len);
- WL_DBG(("vendor_command: %s len: %u \n", cmd, cmd_len));
- bytes_written = wl_handle_private_cmd(net, reply_buf, reply_len);
+
+ net = wl_cfgvendor_get_ndev(cfg, wdev, cmd_prefix, &cmd_out);
+ if (!cmd_out || !net) {
+ WL_ERR(("ndev not found\n"));
+ err = -ENODEV;
+ goto exit;
+ }
+
+ /* find offset of the command */
+ current_pos = (char *)cmd_out;
+ cmd_offset = current_pos - cmd_prefix;
+
+ if (!current_pos || (cmd_offset) > ANDROID_PRIV_CMD_IF_PREFIX_LEN) {
+ WL_ERR(("Invalid len cmd_offset: %u \n", cmd_offset));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Private command data in expected to be in str format. To ensure that
+ * the data is null terminated, copy to a local buffer before use
+ */
+ cmd_buf = (char *)MALLOCZ(cfg->osh, cmd_buf_len);
+ if (!cmd_buf) {
+ WL_ERR(("memory alloc failed for %u \n", cmd_buf_len));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* Point to the start of command */
+ if (memcpy_s(cmd_buf, (WL_DRIVER_PRIV_CMD_LEN - 1),
+ (const void *)(cmd + cmd_offset),
+ (cmd_len - cmd_offset - 1)) != BCME_OK) {
+ WL_ERR(("memcpy failed for cmd buffer. len:%d\n", cmd_len));
+ err = -ENOMEM;
+ goto exit;
+ }
+ cmd_buf[WL_DRIVER_PRIV_CMD_LEN - 1] = '\0';
+
+ WL_DBG(("vendor_command: %s len: %u \n", cmd_buf, cmd_buf_len));
+ bytes_written = wl_handle_private_cmd(net, cmd_buf, cmd_buf_len);
WL_DBG(("bytes_written: %d \n", bytes_written));
if (bytes_written == 0) {
- snprintf(reply_buf, reply_len, "%s", "OK");
- data_len = strlen("OK");
+ snprintf(cmd_buf, cmd_buf_len, "%s", "OK");
+ data_len = sizeof("OK");
} else if (bytes_written > 0) {
- data_len = bytes_written > reply_len ?
- reply_len : bytes_written;
+ if (bytes_written >= (cmd_buf_len - 1)) {
+ /* Not expected */
+ ASSERT(0);
+ err = -EINVAL;
+ goto exit;
+ }
+ data_len = bytes_written;
} else {
/* -ve return value. Propagate the error back */
err = bytes_written;
goto exit;
}
+ if ((data_len > 0) && (data_len < (cmd_buf_len - 1)) && cmd_buf) {
+ err = wl_cfgvendor_send_cmd_reply(wiphy, cmd_buf, data_len);
+ if (unlikely(err)) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ } else {
+ WL_DBG(("Vendor Command reply sent successfully!\n"));
+ }
+ } else {
+ /* No data to be sent back as reply */
+ WL_ERR(("Vendor_cmd: No reply expected. data_len:%u cmd_buf %p \n",
+ data_len, cmd_buf));
+ }
break;
}
-#endif /* OEM_ANDROID */
- }
-
- if ((data_len > 0) && reply_buf) {
- err = wl_cfgvendor_send_cmd_reply(wiphy, reply_buf, data_len+1);
- if (unlikely(err))
- WL_ERR(("Vendor Command reply failed ret:%d \n", err));
- else
- WL_DBG(("Vendor Command reply sent successfully!\n"));
- } else {
- /* No data to be sent back as reply */
- WL_ERR(("Vendor_cmd: No reply expected. data_len:%u reply_buf %p \n",
- data_len, reply_buf));
+#endif /* WL_ANDROID_PRIV_CMD_OVER_NL80211 && OEM_ANDROID */
}
exit:
-#ifdef OEM_ANDROID
- if (reply_buf) {
- MFREE(cfg->osh, reply_buf, reply_len);
+#if defined(WL_ANDROID_PRIV_CMD_OVER_NL80211) && defined(OEM_ANDROID)
+ if (cmd_buf) {
+ MFREE(cfg->osh, cmd_buf, cmd_buf_len);
}
-#endif /* OEM_ANDRIOD */
+#endif /* WL_ANDROID_PRIV_CMD_OVER_NL80211 && OEM_ANDROID */
net_os_wake_unlock(ndev);
return err;
}
+#endif /* BCM_PRIV_CMD_SUPPORT */
#ifdef WL_NAN
static const char *nan_attr_to_str(u16 cmd)
@@ -2780,8 +3274,12 @@
switch (attr_type) {
case NAN_ATTRIBUTE_MAC_ADDR:
- memcpy((char*)&cmd_data->mac_addr, (char*)nla_data(iter),
- ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy mac addr\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_PUBLISH_ID:
cmd_data->pub_id = nla_get_u16(iter);
@@ -2869,32 +3367,48 @@
ret = -EINVAL;
goto exit;
}
- memcpy((char*)&cmd_data->peer_disc_mac_addr,
- (char*)nla_data(iter), ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->peer_disc_mac_addr,
+ ETHER_ADDR_LEN, (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy peer_disc_mac_addr\n"));
+ goto exit;
+ }
break;
case NAN_ATTRIBUTE_PEER_NDI_MAC_ADDR:
if (nla_len(iter) != ETHER_ADDR_LEN) {
ret = -EINVAL;
goto exit;
}
- memcpy((char*)&cmd_data->peer_ndi_mac_addr,
- (char*)nla_data(iter), ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->peer_ndi_mac_addr,
+ ETHER_ADDR_LEN, (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy peer_ndi_mac_addr\n"));
+ goto exit;
+ }
break;
case NAN_ATTRIBUTE_MAC_ADDR:
if (nla_len(iter) != ETHER_ADDR_LEN) {
ret = -EINVAL;
goto exit;
}
- memcpy((char*)&cmd_data->mac_addr, (char*)nla_data(iter),
- ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy mac_addr\n"));
+ goto exit;
+ }
break;
case NAN_ATTRIBUTE_IF_ADDR:
if (nla_len(iter) != ETHER_ADDR_LEN) {
ret = -EINVAL;
goto exit;
}
- memcpy((char*)&cmd_data->if_addr, (char*)nla_data(iter),
- ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->if_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy if_addr\n"));
+ goto exit;
+ }
break;
case NAN_ATTRIBUTE_ENTRY_CONTROL:
if (nla_len(iter) != sizeof(uint8)) {
@@ -2979,8 +3493,12 @@
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->svc_hash.data, nla_data(iter),
- cmd_data->svc_hash.dlen);
+ ret = memcpy_s(cmd_data->svc_hash.data, cmd_data->svc_hash.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc hash data\n"));
+ goto exit;
+ }
break;
case NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN:
if (nla_len(iter) != sizeof(uint16)) {
@@ -2994,7 +3512,7 @@
}
cmd_data->svc_info.dlen = nla_get_u16(iter);
if (cmd_data->svc_info.dlen > MAX_APP_INFO_LEN) {
- WL_ERR(("Not allowed beyond :%d\n", MAX_APP_INFO_LEN));
+ WL_ERR_RLMT(("Not allowed beyond :%d\n", MAX_APP_INFO_LEN));
ret = -EINVAL;
goto exit;
}
@@ -3019,8 +3537,12 @@
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->svc_info.data,
- nla_data(iter), cmd_data->svc_info.dlen);
+ ret = memcpy_s(cmd_data->svc_info.data, cmd_data->svc_info.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc info\n"));
+ goto exit;
+ }
break;
case NAN_ATTRIBUTE_PUBLISH_ID:
if (nla_len(iter) != sizeof(uint32)) {
@@ -3084,8 +3606,12 @@
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->key.data, nla_data(iter),
- MIN(cmd_data->key.dlen, NAN_MAX_PMK_LEN));
+ ret = memcpy_s(cmd_data->key.data, NAN_MAX_PMK_LEN,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to key data\n"));
+ goto exit;
+ }
break;
default:
@@ -3144,8 +3670,12 @@
ret = -EINVAL;
goto exit;
}
- memcpy((char*)&cmd_data->mac_addr, (char*)nla_data(iter),
- ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy mac addr\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN:
if (nla_len(iter) != sizeof(uint16)) {
@@ -3159,8 +3689,8 @@
}
cmd_data->svc_info.dlen = nla_get_u16(iter);
if (cmd_data->svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
- WL_ERR(("Not allowed beyond :%d\n",
- NAN_MAX_SERVICE_SPECIFIC_INFO_LEN));
+ WL_ERR_RLMT(("Not allowed beyond :%d\n",
+ NAN_MAX_SERVICE_SPECIFIC_INFO_LEN));
ret = -EINVAL;
goto exit;
}
@@ -3186,8 +3716,12 @@
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->svc_info.data,
- nla_data(iter), cmd_data->svc_info.dlen);
+ ret = memcpy_s(cmd_data->svc_info.data, cmd_data->svc_info.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc info\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_SUBSCRIBE_ID:
if (nla_len(iter) != sizeof(uint16)) {
@@ -3291,8 +3825,12 @@
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->svc_hash.data, nla_data(iter),
- cmd_data->svc_hash.dlen);
+ ret = memcpy_s(cmd_data->svc_hash.data, cmd_data->svc_hash.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc hash data\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_PEER_ID:
if (nla_len(iter) != sizeof(uint32)) {
@@ -3384,7 +3922,7 @@
cmd_data->rx_match.dlen = nla_get_u16(iter);
if (cmd_data->rx_match.dlen > MAX_MATCH_FILTER_LEN) {
ret = -EINVAL;
- WL_ERR(("Not allowed beyond %d\n", MAX_MATCH_FILTER_LEN));
+ WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_MATCH_FILTER_LEN));
goto exit;
}
break;
@@ -3409,8 +3947,12 @@
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->rx_match.data, nla_data(iter),
- cmd_data->rx_match.dlen);
+ ret = memcpy_s(cmd_data->rx_match.data, cmd_data->rx_match.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy rx match data\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_TX_MATCH_FILTER_LEN:
if (nla_len(iter) != sizeof(uint16)) {
@@ -3425,7 +3967,7 @@
cmd_data->tx_match.dlen = nla_get_u16(iter);
if (cmd_data->tx_match.dlen > MAX_MATCH_FILTER_LEN) {
ret = -EINVAL;
- WL_ERR(("Not allowed beyond %d\n", MAX_MATCH_FILTER_LEN));
+ WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_MATCH_FILTER_LEN));
goto exit;
}
break;
@@ -3450,8 +3992,12 @@
ret = -EINVAL;
goto exit;
}
- memcpy(cmd_data->tx_match.data, nla_data(iter),
- cmd_data->tx_match.dlen);
+ ret = memcpy_s(cmd_data->tx_match.data, cmd_data->tx_match.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy tx match data\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_MAC_ADDR_LIST_NUM_ENTRIES:
if (nla_len(iter) != sizeof(uint16)) {
@@ -3487,8 +4033,13 @@
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->mac_list.list, nla_data(iter),
- (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN));
+ ret = memcpy_s(cmd_data->mac_list.list,
+ (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN),
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy list of mac addresses\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_TX_TYPE:
if (nla_len(iter) != sizeof(uint8)) {
@@ -3517,6 +4068,7 @@
ret = -EINVAL;
goto exit;
}
+ cmd_data->sde_control_config = TRUE;
if (nla_get_u8(iter) == 1) {
cmd_data->sde_control_flag
|= NAN_SDE_CF_RANGING_REQUIRED;
@@ -3607,8 +4159,12 @@
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->key.data, nla_data(iter),
- MIN(cmd_data->key.dlen, NAN_MAX_PMK_LEN));
+ ret = memcpy_s(cmd_data->key.data, NAN_MAX_PMK_LEN,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to key data\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_RSSI_THRESHOLD_FLAG:
if (nla_len(iter) != sizeof(uint8)) {
@@ -3641,7 +4197,7 @@
cmd_data->sde_svc_info.dlen = nla_get_u16(iter);
if (cmd_data->sde_svc_info.dlen > MAX_SDEA_SVC_INFO_LEN) {
ret = -EINVAL;
- WL_ERR(("Not allowed beyond %d\n", MAX_SDEA_SVC_INFO_LEN));
+ WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_SDEA_SVC_INFO_LEN));
goto exit;
}
break;
@@ -3666,8 +4222,13 @@
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->sde_svc_info.data,
- nla_data(iter), cmd_data->sde_svc_info.dlen);
+ ret = memcpy_s(cmd_data->sde_svc_info.data,
+ cmd_data->sde_svc_info.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to sdea info data\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_SECURITY:
if (nla_len(iter) != sizeof(uint8)) {
@@ -3732,7 +4293,7 @@
int len, nan_config_cmd_data_t *cmd_data, uint32 *nan_attr_mask)
{
int ret = BCME_OK;
- int attr_type;
+ int attr_type = 0;
int rem = len;
const struct nlattr *iter;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
@@ -4072,7 +4633,7 @@
cmd_data->scid.dlen = nla_get_u32(iter);
if (cmd_data->scid.dlen > MAX_SCID_LEN) {
ret = -EINVAL;
- WL_ERR(("Not allowed beyond %d\n", MAX_SCID_LEN));
+ WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_SCID_LEN));
goto exit;
}
WL_TRACE(("valid scid length = %u\n", cmd_data->scid.dlen));
@@ -4089,6 +4650,7 @@
ret = -EINVAL;
goto exit;
}
+
cmd_data->scid.data = MALLOCZ(cfg->osh, cmd_data->scid.dlen);
if (cmd_data->scid.data == NULL) {
WL_ERR(("failed to allocate scid, len=%d\n",
@@ -4096,7 +4658,12 @@
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->scid.data, nla_data(iter), cmd_data->scid.dlen);
+ ret = memcpy_s(cmd_data->scid.data, cmd_data->scid.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to scid data\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_2G_AWAKE_DW:
if (nla_len(iter) != sizeof(uint32)) {
@@ -4144,8 +4711,12 @@
ret = -EINVAL;
goto exit;
}
- memcpy((char*)&cmd_data->mac_addr, (char*)nla_data(iter),
- ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy mac addr\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_RANDOMIZATION_INTERVAL:
if (nla_len(iter) != sizeof(uint32)) {
@@ -4572,13 +5143,105 @@
return ret;
}
+#ifdef RTT_SUPPORT
+s32
+wl_cfgvendor_send_as_rtt_legacy_event(struct wiphy *wiphy, struct net_device *dev,
+ wl_nan_ev_rng_rpt_ind_t *range_res, uint32 status)
+{
+ s32 ret = BCME_OK;
+ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ rtt_report_t *report = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct sk_buff *msg = NULL;
+ struct nlattr *rtt_nl_hdr;
+
+ NAN_DBG_ENTER();
+
+ report = MALLOCZ(cfg->osh, sizeof(*report));
+ if (!report) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ if (range_res) {
+ report->distance = range_res->dist_mm/10;
+ ret = memcpy_s(&report->addr, ETHER_ADDR_LEN,
+ &range_res->peer_m_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy peer_m_addr\n"));
+ goto exit;
+ }
+ }
+ report->status = (rtt_reason_t)status;
+ report->type = RTT_TWO_WAY;
+
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ msg = cfg80211_vendor_event_alloc(wiphy, NULL, 100,
+ GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#else
+ msg = cfg80211_vendor_event_alloc(wiphy, 100, GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!msg) {
+ WL_ERR(("%s: fail to allocate skb for vendor event\n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ ret = nla_put_u32(msg, RTT_ATTRIBUTE_RESULTS_COMPLETE, 1);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS_COMPLETE\n"));
+ goto exit;
+ }
+ rtt_nl_hdr = nla_nest_start(msg, RTT_ATTRIBUTE_RESULTS_PER_TARGET);
+ if (!rtt_nl_hdr) {
+ WL_ERR(("rtt_nl_hdr is NULL\n"));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ ret = nla_put(msg, RTT_ATTRIBUTE_TARGET_MAC, ETHER_ADDR_LEN, &report->addr);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_TARGET_MAC\n"));
+ goto exit;
+ }
+ ret = nla_put_u32(msg, RTT_ATTRIBUTE_RESULT_CNT, 1);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT_CNT\n"));
+ goto exit;
+ }
+ ret = nla_put(msg, RTT_ATTRIBUTE_RESULT,
+ sizeof(*report), report);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS\n"));
+ goto exit;
+ }
+ nla_nest_end(msg, rtt_nl_hdr);
+ cfg80211_vendor_event(msg, kflags);
+ if (report) {
+ MFREE(cfg->osh, report, sizeof(*report));
+ }
+
+ return ret;
+exit:
+ if (msg)
+ dev_kfree_skb_any(msg);
+ WL_ERR(("Failed to send event GOOGLE_RTT_COMPLETE_EVENT,"
+ " -- Free skb, ret = %d\n", ret));
+ if (report)
+ MFREE(cfg->osh, report, sizeof(*report));
+ NAN_DBG_EXIT();
+ return ret;
+}
+#endif /* RTT_SUPPORT */
+
int
wl_cfgvendor_send_nan_event(struct wiphy *wiphy, struct net_device *dev,
int event_id, nan_event_data_t *event_data)
{
int ret = BCME_OK;
int buf_len = NAN_EVENT_BUFFER_SIZE_LARGE;
- u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct sk_buff *msg;
@@ -4755,7 +5418,7 @@
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
if (ret) {
WL_ERR(("failed to parse nan disc vendor args, ret = %d\n", ret));
@@ -4818,7 +5481,7 @@
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
if (ret) {
WL_ERR(("failed to parse nan disc vendor args, ret = %d\n", ret));
@@ -4876,19 +5539,12 @@
}
NAN_DBG_ENTER();
- if (!cfg->nan_init_state) {
- WL_ERR(("nan is not initialized\n"));
- ret = BCME_ERROR;
- goto exit;
- }
-
if (cfg->nan_enable) {
WL_ERR(("nan is already enabled\n"));
ret = BCME_OK;
goto exit;
}
-
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
cmd_data->sid_beacon.sid_enable = NAN_SID_ENABLE_FLAG_INVALID; /* Setting to some default */
cmd_data->sid_beacon.sid_count = NAN_SID_BEACON_COUNT_INVALID; /* Setting to some default */
@@ -4905,7 +5561,7 @@
goto exit;
}
/* Initializing Instance Id List */
- memset(cfg->nan_inst_ctrl, 0, NAN_ID_CTRL_SIZE * sizeof(nan_svc_inst_t));
+ bzero(cfg->nan_inst_ctrl, NAN_ID_CTRL_SIZE * sizeof(nan_svc_inst_t));
exit:
ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_ENABLE,
&nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
@@ -4927,9 +5583,6 @@
int ret = 0;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
nan_hal_resp_t nan_req_resp;
- dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
- int i;
-
NAN_DBG_ENTER();
if (!cfg->nan_init_state) {
@@ -4938,29 +5591,15 @@
goto exit;
}
+ mutex_lock(&cfg->if_sync);
if (cfg->nan_enable) {
- cfg->nancfg.disable_pending = TRUE;
- ret = wl_cfgnan_stop_handler(wdev->netdev, cfg, dhdp->up, false);
+ ret = wl_cfgnan_disable(cfg, NAN_USER_INITIATED);
if (ret) {
- WL_ERR(("failed to stop nan error[%d]\n", ret));
+ WL_ERR(("failed to disable nan, error[%d]\n", ret));
}
}
-
- /* We have to remove NDIs so that P2P/Softap can work */
- for (i = 0; i < NAN_MAX_NDI; i++) {
- if (cfg->nancfg.ndi[i].in_use && cfg->nancfg.ndi[i].created) {
- WL_INFORM_MEM(("Deleting NAN NDI IDX:%d\n", i));
- ret = wl_cfgnan_data_path_iface_create_delete_handler(wdev->netdev, cfg,
- (char*)cfg->nancfg.ndi[i].ifname,
- NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE, dhdp->up);
- if (ret) {
- WL_ERR(("failed to delete ndp iface [%d]\n", ret));
- }
- cfg->nancfg.ndi[i].created = false;
- }
- }
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
-
+ mutex_unlock(&cfg->if_sync);
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
exit:
ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DISABLE,
&nan_req_resp, ret, BCME_OK);
@@ -4985,7 +5624,8 @@
goto exit;
}
NAN_DBG_ENTER();
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
cmd_data->avail_params.duration = NAN_BAND_INVALID; /* Setting to some default */
cmd_data->sid_beacon.sid_enable = NAN_SID_ENABLE_FLAG_INVALID; /* Setting to some default */
@@ -5039,7 +5679,7 @@
}
NAN_DBG_ENTER();
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
if (ret) {
@@ -5086,7 +5726,7 @@
}
NAN_DBG_ENTER();
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
if (ret) {
@@ -5133,7 +5773,7 @@
}
NAN_DBG_ENTER();
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
if (ret) {
@@ -5164,8 +5804,7 @@
NAN_DBG_ENTER();
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
-
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgnan_get_capablities_handler(wdev->netdev, cfg, &nan_req_resp.capabilities);
if (ret) {
WL_ERR(("Could not get capabilities\n"));
@@ -5186,7 +5825,6 @@
struct wireless_dev *wdev, const void * data, int len)
{
int ret = 0;
- s32 idx;
nan_datapath_cmd_data_t *cmd_data = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
nan_hal_resp_t nan_req_resp;
@@ -5206,7 +5844,7 @@
}
NAN_DBG_ENTER();
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
if (ret) {
@@ -5214,19 +5852,14 @@
goto exit;
}
- /* Store the iface name to pub data so that it can be used
- * during NAN enable
- */
- if ((idx = wl_cfgnan_get_ndi_idx(cfg)) < 0) {
- WL_ERR(("No free idx for NAN NDI\n"));
- goto exit;
- }
- wl_cfgnan_add_ndi_data(cfg, idx, (char*)cmd_data->ndp_iface);
if (cfg->nan_enable) { /* new framework Impl, iface create called after nan enab */
- wl_cfgnan_data_path_iface_create_delete_handler(wdev->netdev,
+ ret = wl_cfgnan_data_path_iface_create_delete_handler(wdev->netdev,
cfg, cmd_data->ndp_iface,
NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, dhdp->up);
- cfg->nancfg.ndi[idx].created = true;
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to create iface, ret = %d\n", ret));
+ goto exit;
+ }
}
exit:
ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE,
@@ -5246,7 +5879,7 @@
nan_hal_resp_t nan_req_resp;
dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
- if (!cfg->nan_init_state) {
+ if (cfg->nan_init_state == false) {
WL_ERR(("%s: NAN is not inited or Device doesn't support NAN \n", __func__));
/* Deinit has taken care of cleaing the virtual iface */
ret = BCME_OK;
@@ -5260,7 +5893,7 @@
ret = BCME_NOMEM;
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
if (ret) {
WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
@@ -5271,26 +5904,10 @@
(char*)cmd_data->ndp_iface,
NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE, dhdp->up);
if (ret) {
- if (ret == -ENODEV) {
- if (wl_cfgnan_get_ndi_data(cfg, (char*)cmd_data->ndp_iface) != NULL) {
- /* NDIs have been removed by the NAN disable command */
- WL_DBG(("NDI removed by nan_disable\n"));
- ret = BCME_OK;
- }
- } else {
- WL_ERR(("failed to delete ndp iface [%d]\n", ret));
- goto exit;
- }
+ WL_ERR(("failed to delete ndp iface [%d]\n", ret));
+ goto exit;
}
exit:
- if (cfg->nan_init_state) {
- /* After successful delete of interface, clear up the ndi data */
- if (wl_cfgnan_del_ndi_data(cfg, (char*)cmd_data->ndp_iface) < 0) {
- WL_ERR(("Failed to find matching data for ndi:%s\n",
- (char*)cmd_data->ndp_iface));
- }
- }
-
ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE,
&nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
wl_cfgvendor_free_dp_cmd_data(cfg, cmd_data);
@@ -5322,7 +5939,7 @@
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
if (ret) {
WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
@@ -5371,7 +5988,7 @@
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
if (ret) {
WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
@@ -5412,7 +6029,7 @@
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
if (ret) {
WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
@@ -5460,7 +6077,8 @@
WL_ERR(("failed to parse sec info args\n"));
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgnan_sec_info_handler(cfg, cmd_data, &nan_req_resp);
if (ret) {
WL_ERR(("failed to retrieve svc hash/pub nmi error[%d]\n", ret));
@@ -5549,7 +6167,7 @@
RETURN_EIO_IF_NOT_UP(cfg);
/* Get the device rev info */
- memset(&revinfo, 0, sizeof(revinfo));
+ bzero(&revinfo, sizeof(revinfo));
err = wldev_ioctl_get(bcmcfg_to_prmry_ndev(cfg), WLC_GET_REVINFO, &revinfo,
sizeof(revinfo));
if (err != BCME_OK) {
@@ -5562,8 +6180,8 @@
return -ENOMEM;
}
- memset(&scbval, 0, sizeof(scb_val_t));
- memset(outdata, 0, WLC_IOCTL_MAXLEN);
+ bzero(&scbval, sizeof(scb_val_t));
+ bzero(outdata, WLC_IOCTL_MAXLEN);
output = outdata;
err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "radiostat", NULL, 0,
@@ -5574,7 +6192,7 @@
}
radio = (wifi_radio_stat *)iovar_buf;
- memset(&radio_h, 0, sizeof(wifi_radio_stat_h));
+ bzero(&radio_h, sizeof(wifi_radio_stat_h));
radio_h.on_time = radio->on_time;
radio_h.tx_time = radio->tx_time;
radio_h.rx_time = radio->rx_time;
@@ -5724,75 +6342,211 @@
}
#endif /* LINKSTAT_SUPPORT */
-#ifdef DEBUGABILITY
-static int wl_cfgvendor_dbg_start_logging(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
+#ifdef DHD_LOG_DUMP
+static int
+wl_cfgvendor_get_buf_data(const struct nlattr *iter, struct buf_data **buf)
{
- int ret = BCME_OK, rem, type;
- char ring_name[DBGRING_NAME_MAX] = {0};
- int log_level = 0, flags = 0, time_intval = 0, threshold = 0;
+ int ret = BCME_OK;
+
+ if (nla_len(iter) != sizeof(struct buf_data)) {
+ WL_ERR(("Invalid len : %d\n", nla_len(iter)));
+ ret = BCME_BADLEN;
+ }
+ (*buf) = (struct buf_data *)nla_data(iter);
+ if (!(*buf) || (((*buf)->len) <= 0) || !((*buf)->data_buf[0])) {
+ WL_ERR(("Invalid buffer\n"));
+ ret = BCME_ERROR;
+ }
+ return ret;
+}
+
+static int
+wl_cfgvendor_dbg_file_dump(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK, rem, type = 0;
const struct nlattr *iter;
+ char *mem_buf = NULL;
+ struct sk_buff *skb = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhd_pub = cfg->pub;
+ struct buf_data *buf;
+ int pos = 0;
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, CFG80211_VENDOR_CMD_REPLY_SKB_SZ);
+ if (!skb) {
+ WL_ERR(("skb allocation is failed\n"));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ WL_ERR(("%s\n", __FUNCTION__));
nla_for_each_attr(iter, data, len, rem) {
type = nla_type(iter);
+ ret = wl_cfgvendor_get_buf_data(iter, &buf);
+ if (ret)
+ goto exit;
switch (type) {
- case DEBUG_ATTRIBUTE_RING_NAME:
- strncpy(ring_name, nla_data(iter),
- MIN(sizeof(ring_name) -1, nla_len(iter)));
+ case DUMP_BUF_ATTR_MEMDUMP:
+ ret = dhd_os_get_socram_dump(bcmcfg_to_prmry_ndev(cfg), &mem_buf,
+ (uint32 *)(&(buf->len)));
+ if (ret) {
+ WL_ERR(("failed to get_socram_dump : %d\n", ret));
+ goto exit;
+ }
+ ret = dhd_export_debug_data(mem_buf, NULL, buf->data_buf[0],
+ (int)buf->len, &pos);
break;
- case DEBUG_ATTRIBUTE_LOG_LEVEL:
- log_level = nla_get_u32(iter);
+
+ case DUMP_BUF_ATTR_TIMESTAMP :
+ ret = dhd_print_time_str(buf->data_buf[0], NULL,
+ (uint32)buf->len, &pos);
break;
- case DEBUG_ATTRIBUTE_RING_FLAGS:
- flags = nla_get_u32(iter);
+#ifdef EWP_ECNTRS_LOGGING
+ case DUMP_BUF_ATTR_ECNTRS :
+ ret = dhd_print_ecntrs_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
break;
- case DEBUG_ATTRIBUTE_LOG_TIME_INTVAL:
- time_intval = nla_get_u32(iter);
+#endif /* EWP_ECNTRS_LOGGING */
+#ifdef DHD_STATUS_LOGGING
+ case DUMP_BUF_ATTR_STATUS_LOG :
+ ret = dhd_print_status_log_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
break;
- case DEBUG_ATTRIBUTE_LOG_MIN_DATA_SIZE:
- threshold = nla_get_u32(iter);
+#endif /* DHD_STATUS_LOGGING */
+#ifdef EWP_RTT_LOGGING
+ case DUMP_BUF_ATTR_RTT_LOG :
+ ret = dhd_print_rtt_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
break;
+#endif /* EWP_RTT_LOGGING */
+ case DUMP_BUF_ATTR_DHD_DUMP :
+ ret = dhd_print_dump_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#if defined(BCMPCIE)
+ case DUMP_BUF_ATTR_EXT_TRAP :
+ ret = dhd_print_ext_trap_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#endif /* BCMPCIE */
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+ case DUMP_BUF_ATTR_HEALTH_CHK :
+ ret = dhd_print_health_chk_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#endif // endif
+ case DUMP_BUF_ATTR_COOKIE :
+ ret = dhd_print_cookie_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#ifdef DHD_DUMP_PCIE_RINGS
+ case DUMP_BUF_ATTR_FLOWRING_DUMP :
+ ret = dhd_print_flowring_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#endif // endif
+ case DUMP_BUF_ATTR_GENERAL_LOG :
+ ret = dhd_get_dld_log_dump(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len,
+ DLD_BUF_TYPE_GENERAL, &pos);
+ break;
+
+ case DUMP_BUF_ATTR_PRESERVE_LOG :
+ ret = dhd_get_dld_log_dump(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len,
+ DLD_BUF_TYPE_PRESERVE, &pos);
+ break;
+
+ case DUMP_BUF_ATTR_SPECIAL_LOG :
+ ret = dhd_get_dld_log_dump(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len,
+ DLD_BUF_TYPE_SPECIAL, &pos);
+ break;
+#ifdef DHD_SSSR_DUMP
+ case DUMP_BUF_ATTR_SSSR_C0_D11_BEFORE :
+ ret = dhd_sssr_dump_d11_buf_before(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 0);
+ break;
+
+ case DUMP_BUF_ATTR_SSSR_C0_D11_AFTER :
+ ret = dhd_sssr_dump_d11_buf_after(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 0);
+ break;
+
+ case DUMP_BUF_ATTR_SSSR_C1_D11_BEFORE :
+ ret = dhd_sssr_dump_d11_buf_before(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 1);
+ break;
+
+ case DUMP_BUF_ATTR_SSSR_C1_D11_AFTER :
+ ret = dhd_sssr_dump_d11_buf_after(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 1);
+ break;
+
+ case DUMP_BUF_ATTR_SSSR_DIG_BEFORE :
+ ret = dhd_sssr_dump_dig_buf_before(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len);
+ break;
+
+ case DUMP_BUF_ATTR_SSSR_DIG_AFTER :
+ ret = dhd_sssr_dump_dig_buf_after(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len);
+ break;
+#endif /* DHD_SSSR_DUMP */
+#ifdef DHD_PKT_LOGGING
+ case DUMP_BUF_ATTR_PKTLOG:
+ ret = dhd_os_get_pktlog_dump(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len);
+ break;
+#endif /* DHD_PKT_LOGGING */
+#ifdef DNGL_AXI_ERROR_LOGGING
+ case DUMP_BUF_ATTR_AXI_ERROR:
+ ret = dhd_os_get_axi_error_dump(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len);
+ break;
+#endif /* DNGL_AXI_ERROR_LOGGING */
default:
WL_ERR(("Unknown type: %d\n", type));
- ret = BCME_BADADDR;
+ ret = BCME_ERROR;
goto exit;
}
}
- ret = dhd_os_start_logging(dhd_pub, ring_name, log_level, flags, time_intval, threshold);
+ if (ret)
+ goto exit;
+
+ ret = nla_put_u32(skb, type, (uint32)(ret));
if (ret < 0) {
- WL_ERR(("start_logging is failed ret: %d\n", ret));
+ WL_ERR(("Failed to put type, ret:%d\n", ret));
+ goto exit;
}
+ ret = cfg80211_vendor_cmd_reply(skb);
+ if (ret) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
+ }
+ return ret;
exit:
- return ret;
-}
-
-static int wl_cfgvendor_dbg_reset_logging(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int ret = BCME_OK;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhd_pub = cfg->pub;
-
- ret = dhd_os_reset_logging(dhd_pub);
- if (ret < 0) {
- WL_ERR(("reset logging is failed ret: %d\n", ret));
+ if (skb) {
+ /* Free skb memory */
+ kfree_skb(skb);
}
-
return ret;
}
+#endif /* DHD_LOG_DUMP */
+#ifdef DEBUGABILITY
static int
wl_cfgvendor_dbg_trigger_mem_dump(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
{
int ret = BCME_OK;
uint32 alloc_len;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ WL_ERR(("wl_cfgvendor_dbg_trigger_mem_dump %d\n", __LINE__));
+
dhdp->memdump_type = DUMP_TYPE_CFG_VENDOR_TRIGGERED;
ret = dhd_os_socram_dump(bcmcfg_to_prmry_ndev(cfg), &alloc_len);
if (ret) {
@@ -5800,21 +6554,31 @@
goto exit;
}
/* Alloc the SKB for vendor_event */
- skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, CFG80211_VENDOR_CMD_REPLY_SKB_SZ);
if (!skb) {
WL_ERR(("skb allocation is failed\n"));
ret = BCME_NOMEM;
goto exit;
}
- nla_put_u32(skb, DEBUG_ATTRIBUTE_FW_DUMP_LEN, alloc_len);
+ ret = nla_put_u32(skb, DEBUG_ATTRIBUTE_FW_DUMP_LEN, alloc_len);
+
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put fw dump length, ret=%d\n", ret));
+ goto exit;
+ }
ret = cfg80211_vendor_cmd_reply(skb);
if (ret) {
WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
+ goto exit;
}
-
+ return ret;
exit:
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
return ret;
}
@@ -5827,7 +6591,7 @@
uintptr_t user_buf = (uintptr_t)NULL;
const struct nlattr *iter;
char *mem_buf = NULL;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
nla_for_each_attr(iter, data, len, rem) {
@@ -5887,28 +6651,95 @@
}
}
/* Alloc the SKB for vendor_event */
- skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, CFG80211_VENDOR_CMD_REPLY_SKB_SZ);
if (!skb) {
WL_ERR(("skb allocation is failed\n"));
ret = BCME_NOMEM;
goto free_mem;
}
/* Indicate the memdump is succesfully copied */
- nla_put(skb, DEBUG_ATTRIBUTE_FW_DUMP_DATA, sizeof(ret), &ret);
+ ret = nla_put(skb, DEBUG_ATTRIBUTE_FW_DUMP_DATA, sizeof(ret), &ret);
+ if (ret < 0) {
+ WL_ERR(("Failed to put DEBUG_ATTRIBUTE_FW_DUMP_DATA, ret:%d\n", ret));
+ goto free_mem;
+ }
ret = cfg80211_vendor_cmd_reply(skb);
if (ret) {
WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
}
+ skb = NULL;
}
free_mem:
vfree(mem_buf);
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
exit:
return ret;
}
+static int wl_cfgvendor_dbg_start_logging(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK, rem, type;
+ char ring_name[DBGRING_NAME_MAX] = {0};
+ int log_level = 0, flags = 0, time_intval = 0, threshold = 0;
+ const struct nlattr *iter;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case DEBUG_ATTRIBUTE_RING_NAME:
+ strncpy(ring_name, nla_data(iter),
+ MIN(sizeof(ring_name) -1, nla_len(iter)));
+ break;
+ case DEBUG_ATTRIBUTE_LOG_LEVEL:
+ log_level = nla_get_u32(iter);
+ break;
+ case DEBUG_ATTRIBUTE_RING_FLAGS:
+ flags = nla_get_u32(iter);
+ break;
+ case DEBUG_ATTRIBUTE_LOG_TIME_INTVAL:
+ time_intval = nla_get_u32(iter);
+ break;
+ case DEBUG_ATTRIBUTE_LOG_MIN_DATA_SIZE:
+ threshold = nla_get_u32(iter);
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ ret = BCME_BADADDR;
+ goto exit;
+ }
+ }
+
+ ret = dhd_os_start_logging(dhd_pub, ring_name, log_level, flags, time_intval, threshold);
+ if (ret < 0) {
+ WL_ERR(("start_logging is failed ret: %d\n", ret));
+ }
+exit:
+ return ret;
+}
+
+static int wl_cfgvendor_dbg_reset_logging(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+
+ ret = dhd_os_reset_logging(dhd_pub);
+ if (ret < 0) {
+ WL_ERR(("reset logging is failed ret: %d\n", ret));
+ }
+
+ return ret;
+}
+
static int wl_cfgvendor_dbg_get_ring_status(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
{
@@ -5920,7 +6751,7 @@
dhd_dbg_ring_status_t ring_status;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
dhd_pub_t *dhd_pub = cfg->pub;
- memset(dbg_ring_status, 0, DBG_RING_STATUS_SIZE * DEBUG_RING_ID_MAX);
+ bzero(dbg_ring_status, DBG_RING_STATUS_SIZE * DEBUG_RING_ID_MAX);
ring_cnt = 0;
for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
ret = dhd_os_get_ring_status(dhd_pub, ring_id, &ring_status);
@@ -5932,16 +6763,19 @@
}
/* Alloc the SKB for vendor_event */
skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy,
- (DBG_RING_STATUS_SIZE * ring_cnt) + 100);
+ nla_total_size(DBG_RING_STATUS_SIZE) * ring_cnt + nla_total_size(sizeof(ring_cnt)));
if (!skb) {
WL_ERR(("skb allocation is failed\n"));
ret = BCME_NOMEM;
goto exit;
}
- nla_put_u32(skb, DEBUG_ATTRIBUTE_RING_NUM, ring_cnt);
+ /* Ignore return of nla_put_u32 and nla_put since the skb allocated
+ * above has a requested size for all payload
+ */
+ (void)nla_put_u32(skb, DEBUG_ATTRIBUTE_RING_NUM, ring_cnt);
for (i = 0; i < ring_cnt; i++) {
- nla_put(skb, DEBUG_ATTRIBUTE_RING_STATUS, DBG_RING_STATUS_SIZE,
+ (void)nla_put(skb, DEBUG_ATTRIBUTE_RING_STATUS, DBG_RING_STATUS_SIZE,
&dbg_ring_status[i]);
}
ret = cfg80211_vendor_cmd_reply(skb);
@@ -5966,8 +6800,7 @@
type = nla_type(iter);
switch (type) {
case DEBUG_ATTRIBUTE_RING_NAME:
- strncpy(ring_name, nla_data(iter),
- MIN(sizeof(ring_name) -1, nla_len(iter)));
+ strlcpy(ring_name, nla_data(iter), sizeof(ring_name));
break;
default:
WL_ERR(("Unknown type: %d\n", type));
@@ -5982,6 +6815,44 @@
return ret;
}
+#else
+static int
+wl_cfgvendor_dbg_trigger_mem_dump(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ return WIFI_ERROR_NOT_SUPPORTED;
+}
+
+static int
+wl_cfgvendor_dbg_get_mem_dump(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ return WIFI_ERROR_NOT_SUPPORTED;
+}
+
+static int wl_cfgvendor_dbg_start_logging(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ return WIFI_ERROR_NOT_SUPPORTED;
+}
+
+static int wl_cfgvendor_dbg_reset_logging(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ return WIFI_ERROR_NOT_SUPPORTED;
+}
+
+static int wl_cfgvendor_dbg_get_ring_status(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ return WIFI_ERROR_NOT_SUPPORTED;
+}
+
+static int wl_cfgvendor_dbg_get_ring_data(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ return WIFI_ERROR_NOT_SUPPORTED;
+}
#endif /* DEBUGABILITY */
static int wl_cfgvendor_dbg_get_feature(struct wiphy *wiphy,
@@ -6036,39 +6907,450 @@
nla_put(skb, DEBUG_ATTRIBUTE_RING_DATA, len, data);
cfg80211_vendor_event(skb, kflags);
}
+#endif /* DEBUGABILITY */
-static void wl_cfgvendor_dbg_send_urgent_evt(void *ctx, const void *data,
+#ifdef DHD_LOG_DUMP
+static int wl_cfgvendor_nla_put_sssr_dump_data(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int ret = BCME_OK;
+#ifdef DHD_SSSR_DUMP
+ uint32 arr_len[DUMP_SSSR_ATTR_COUNT];
+ int i = 0, j = 0;
+#endif /* DHD_SSSR_DUMP */
+ char memdump_path[MEMDUMP_PATH_LEN];
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_0_before_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_0_BEFORE_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 0 before dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_0_after_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_0_AFTER_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 1 after dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_1_before_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_1_BEFORE_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 1 before dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_1_after_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_1_AFTER_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 1 after dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_dig_before_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_DIG_BEFORE_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr dig before dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_dig_after_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_DIG_AFTER_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr dig after dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+#ifdef DHD_SSSR_DUMP
+ memset(arr_len, 0, sizeof(arr_len));
+ dhd_nla_put_sssr_dump_len(ndev, arr_len);
+
+ for (i = 0, j = DUMP_SSSR_ATTR_START; i < DUMP_SSSR_ATTR_COUNT; i++, j++) {
+ if (arr_len[i]) {
+ ret = nla_put_u32(skb, j, arr_len[i]);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr dump len, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+ }
+#endif /* DHD_SSSR_DUMP */
+
+exit:
+ return ret;
+}
+
+static int wl_cfgvendor_nla_put_debug_dump_data(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int ret = BCME_OK;
+ uint32 len = 0;
+ char dump_path[128];
+
+ ret = dhd_get_debug_dump_file_name(ndev, NULL, dump_path, sizeof(dump_path));
+ if (ret < 0) {
+ WL_ERR(("%s: Failed to get debug dump filename\n", __FUNCTION__));
+ goto exit;
+ }
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_DEBUG_DUMP, dump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put debug dump path, ret=%d\n", ret));
+ goto exit;
+ }
+ WL_ERR(("debug_dump path = %s%s\n", dump_path, FILE_NAME_HAL_TAG));
+ wl_print_verinfo(wl_get_cfg(ndev));
+
+ len = dhd_get_time_str_len();
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_TIMESTAMP, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put time stamp length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+
+ len = dhd_get_dld_len(DLD_BUF_TYPE_GENERAL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_GENERAL_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put general log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#ifdef EWP_ECNTRS_LOGGING
+ len = dhd_get_ecntrs_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_ECNTRS, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put ecntrs length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif /* EWP_ECNTRS_LOGGING */
+ len = dhd_get_dld_len(DLD_BUF_TYPE_SPECIAL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_SPECIAL_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put special log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+ len = dhd_get_dhd_dump_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_DHD_DUMP, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put dhd dump length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+
+#if defined(BCMPCIE)
+ len = dhd_get_ext_trap_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_EXT_TRAP, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put ext trap length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif /* BCMPCIE */
+
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+ len = dhd_get_health_chk_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_HEALTH_CHK, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put health check length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif // endif
+
+ len = dhd_get_dld_len(DLD_BUF_TYPE_PRESERVE);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_PRESERVE_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put preserve log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+
+ len = dhd_get_cookie_log_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_COOKIE, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put cookie length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#ifdef DHD_DUMP_PCIE_RINGS
+ len = dhd_get_flowring_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_FLOWRING_DUMP, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put flowring dump length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif // endif
+#ifdef DHD_STATUS_LOGGING
+ len = dhd_get_status_log_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_STATUS_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put status log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif /* DHD_STATUS_LOGGING */
+#ifdef EWP_RTT_LOGGING
+ len = dhd_get_rtt_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_RTT_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put rtt log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif /* EWP_RTT_LOGGING */
+exit:
+ return ret;
+}
+#ifdef DNGL_AXI_ERROR_LOGGING
+static void wl_cfgvendor_nla_put_axi_error_data(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int ret = 0;
+ char axierrordump_path[MEMDUMP_PATH_LEN];
+ int dumpsize = dhd_os_get_axi_error_dump_size(ndev);
+ if (dumpsize <= 0) {
+ WL_ERR(("Failed to calcuate axi error dump len\n"));
+ return;
+ }
+ dhd_os_get_axi_error_filename(ndev, axierrordump_path, MEMDUMP_PATH_LEN);
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_AXI_ERROR_DUMP, axierrordump_path);
+ if (ret) {
+ WL_ERR(("Failed to put filename\n"));
+ return;
+ }
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_AXI_ERROR, dumpsize);
+ if (ret) {
+ WL_ERR(("Failed to put filesize\n"));
+ return;
+ }
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#ifdef DHD_PKT_LOGGING
+static void wl_cfgvendor_nla_put_pktlogdump_data(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int ret = 0;
+ char pktlogdump_path[MEMDUMP_PATH_LEN];
+ uint32 pktlog_dumpsize = dhd_os_get_pktlog_dump_size(ndev);
+ if (pktlog_dumpsize == 0) {
+ WL_ERR(("Failed to calcuate pktlog len\n"));
+ return;
+ }
+ dhd_os_get_pktlogdump_filename(ndev, pktlogdump_path, MEMDUMP_PATH_LEN);
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_PKTLOG_DUMP, pktlogdump_path);
+ if (ret) {
+ WL_ERR(("Failed to put filename\n"));
+ return;
+ }
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_PKTLOG, pktlog_dumpsize);
+ if (ret) {
+ WL_ERR(("Failed to put filesize\n"));
+ return;
+ }
+}
+#endif /* DHD_PKT_LOGGING */
+
+static int wl_cfgvendor_nla_put_memdump_data(struct sk_buff *skb,
+ struct net_device *ndev, const uint32 fw_len)
+{
+ char memdump_path[MEMDUMP_PATH_LEN];
+ int ret = BCME_OK;
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN, "mem_dump");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_MEM_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put mem dump path, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_MEMDUMP, fw_len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put mem dump length, ret=%d\n", ret));
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static void wl_cfgvendor_dbg_send_file_dump_evt(void *ctx, const void *data,
const uint32 len, const uint32 fw_len)
{
struct net_device *ndev = ctx;
struct wiphy *wiphy;
gfp_t kflags;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
+ struct bcm_cfg80211 *cfg;
+ dhd_pub_t *dhd_pub;
+ int ret = BCME_OK;
+
if (!ndev) {
WL_ERR(("ndev is NULL\n"));
return;
}
+
kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
wiphy = ndev->ieee80211_ptr->wiphy;
/* Alloc the SKB for vendor_event */
#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
- skb = cfg80211_vendor_event_alloc(wiphy, NULL, len + 100,
- GOOGLE_FW_DUMP_EVENT, kflags);
+ skb = cfg80211_vendor_event_alloc(wiphy, NULL, len + CFG80211_VENDOR_EVT_SKB_SZ,
+ GOOGLE_FILE_DUMP_EVENT, kflags);
#else
- skb = cfg80211_vendor_event_alloc(wiphy, len + 100,
- GOOGLE_FW_DUMP_EVENT, kflags);
+ skb = cfg80211_vendor_event_alloc(wiphy, len + CFG80211_VENDOR_EVT_SKB_SZ,
+ GOOGLE_FILE_DUMP_EVENT, kflags);
#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
/* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
if (!skb) {
WL_ERR(("skb alloc failed"));
return;
}
- nla_put_u32(skb, DEBUG_ATTRIBUTE_FW_DUMP_LEN, fw_len);
- nla_put(skb, DEBUG_ATTRIBUTE_RING_DATA, len, data);
+
+ cfg = wiphy_priv(wiphy);
+ dhd_pub = cfg->pub;
+#ifdef DNGL_AXI_ERROR_LOGGING
+ if (dhd_pub->smmu_fault_occurred) {
+ wl_cfgvendor_nla_put_axi_error_data(skb, ndev);
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+ if (dhd_pub->memdump_enabled || (dhd_pub->memdump_type == DUMP_TYPE_BY_SYSDUMP)) {
+ if (((ret = wl_cfgvendor_nla_put_memdump_data(skb, ndev, fw_len)) < 0) ||
+ ((ret = wl_cfgvendor_nla_put_debug_dump_data(skb, ndev)) < 0) ||
+ ((ret = wl_cfgvendor_nla_put_sssr_dump_data(skb, ndev)) < 0)) {
+ WL_ERR(("nla put failed\n"));
+ goto done;
+ }
+#ifdef DHD_PKT_LOGGING
+ wl_cfgvendor_nla_put_pktlogdump_data(skb, ndev);
+#endif /* DHD_PKT_LOGGING */
+ }
+ /* TODO : Similar to above function add for debug_dump, sssr_dump, and pktlog also. */
cfg80211_vendor_event(skb, kflags);
+ return;
+done:
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ }
}
-#endif /* DEBUGABILITY */
+#endif /* DHD_LOG_DUMP */
+
+static int wl_cfgvendor_priv_frameburst(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ int val = *(int *)data;
+ int get_frmbrst = 0;
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ if (val == 0xa) {
+ ret = wldev_ioctl_get(bcmcfg_to_prmry_ndev(cfg),
+ WLC_GET_FAKEFRAG, &get_frmbrst, sizeof(int));
+ if (ret) {
+ WL_ERR(("Failed : %d\n", ret));
+ }
+ WL_DBG(("Get Frameburst : %d\n", get_frmbrst));
+ err = wl_cfgvendor_send_cmd_reply(wiphy, &get_frmbrst, sizeof(int));
+ if (unlikely(err))
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ }
+ else {
+ WL_DBG(("Set frameburst %d\n", val));
+ ret = wldev_ioctl_set(bcmcfg_to_prmry_ndev(cfg),
+ WLC_SET_FAKEFRAG, &val, sizeof(val));
+ if (ret < 0) {
+ WL_ERR(("Failed set frameburst, ret=%d\n", ret));
+ } else {
+ WL_ERR(("frameburst is %s\n", val ? "enabled" : "disabled"));
+ }
+ }
+ return ret;
+}
+
+static int wl_cfgvendor_priv_mpc(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ int val = *(int *)data;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int get_mpc = 0;
+ int err = 0;
+
+ if (val == 0xa) {
+ ret = wldev_iovar_getint(bcmcfg_to_prmry_ndev(cfg), "mpc", &get_mpc);
+ if (ret) {
+ WL_ERR(("Failed : %d\n", ret));
+ }
+ WL_DBG(("Get MPC : %d\n", get_mpc));
+ err = wl_cfgvendor_send_cmd_reply(wiphy, &get_mpc, sizeof(int));
+ if (unlikely(err))
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ }
+ else {
+ WL_DBG(("Set mpc val %d\n", val));
+ ret = wldev_iovar_setint(bcmcfg_to_prmry_ndev(cfg), "mpc", val);
+ if (ret < 0) {
+ WL_ERR(("Failed set mpc, ret=%d\n", ret));
+ } else {
+ WL_INFORM_MEM(("mpc is %s\n", val ? "enabled" : "disabled"));
+ }
+ }
+ return ret;
+}
+
+static int wl_cfgvendor_priv_band(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ uint val = *(uint *)data;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int get_band = 0;
+ int err = 0;
+
+ if (val == 0xa) {
+ ret = wldev_ioctl_get(bcmcfg_to_prmry_ndev(cfg),
+ WLC_GET_BAND, &get_band, sizeof(int));
+ if (ret) {
+ WL_ERR(("Failed : %d\n", ret));
+ }
+ WL_DBG(("Get band : %d\n", get_band));
+ err = wl_cfgvendor_send_cmd_reply(wiphy, &get_band, sizeof(int));
+ if (unlikely(err))
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ }
+ else {
+ WL_DBG(("Set band val %d\n", val));
+ if ((val == WLC_BAND_AUTO) || (val == WLC_BAND_5G) || (val == WLC_BAND_2G)) {
+ ret = wldev_ioctl_set(bcmcfg_to_prmry_ndev(cfg),
+ WLC_SET_BAND, &val, sizeof(val));
+ if (!ret)
+ dhd_bus_band_set(bcmcfg_to_prmry_ndev(cfg), val);
+ }
+
+ }
+ return ret;
+}
static int wl_cfgvendor_dbg_get_version(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
@@ -6179,14 +7461,23 @@
goto exit;
}
- nla_put_u32(skb, DEBUG_ATTRIBUTE_PKT_FATE_NUM, resp_count);
+ ret = nla_put_u32(skb, DEBUG_ATTRIBUTE_PKT_FATE_NUM, resp_count);
+ if (ret < 0) {
+ WL_ERR(("Failed to put DEBUG_ATTRIBUTE_PKT_FATE_NUM, ret:%d\n", ret));
+ goto exit;
+ }
ret = cfg80211_vendor_cmd_reply(skb);
if (unlikely(ret)) {
WL_ERR(("vendor Command reply failed ret:%d \n", ret));
}
+ return ret;
exit:
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
return ret;
}
@@ -6342,7 +7633,7 @@
struct wireless_dev *wdev, const void *data, int len)
{
struct net_device *ndev = wdev_to_ndev(wdev);
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
int ret, ver, max_len, mem_needed;
/* APF version */
@@ -6369,14 +7660,25 @@
return -ENOMEM;
}
- nla_put_u32(skb, APF_ATTRIBUTE_VERSION, ver);
- nla_put_u32(skb, APF_ATTRIBUTE_MAX_LEN, max_len);
+ ret = nla_put_u32(skb, APF_ATTRIBUTE_VERSION, ver);
+ if (ret < 0) {
+ WL_ERR(("Failed to put APF_ATTRIBUTE_VERSION, ret:%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, APF_ATTRIBUTE_MAX_LEN, max_len);
+ if (ret < 0) {
+ WL_ERR(("Failed to put APF_ATTRIBUTE_MAX_LEN, ret:%d\n", ret));
+ goto exit;
+ }
ret = cfg80211_vendor_cmd_reply(skb);
if (unlikely(ret)) {
WL_ERR(("vendor command reply failed, ret=%d\n", ret));
}
-
+ return ret;
+exit:
+ /* Free skb memory */
+ kfree_skb(skb);
return ret;
}
@@ -6496,6 +7798,95 @@
}
#endif /* NDO_CONFIG_SUPPORT */
+/* for kernel >= 4.13 NL80211 wl_cfg80211_set_pmk have to be used. */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
+static int wl_cfgvendor_set_pmk(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = 0;
+ wsec_pmk_t pmk;
+ const struct nlattr *iter;
+ int rem, type;
+ struct net_device *ndev = wdev_to_ndev(wdev);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wl_security *sec;
+
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case BRCM_ATTR_DRIVER_KEY_PMK:
+ if (nla_len(iter) > sizeof(pmk.key)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ pmk.flags = 0;
+ pmk.key_len = htod16(nla_len(iter));
+ bcopy((uint8 *)nla_data(iter), pmk.key, len);
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+ }
+
+ sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+ if ((sec->wpa_auth == WLAN_AKM_SUITE_8021X) ||
+ (sec->wpa_auth == WL_AKM_SUITE_SHA256_1X)) {
+ ret = wldev_iovar_setbuf(ndev, "okc_info_pmk", pmk.key, pmk.key_len, cfg->ioctl_buf,
+ WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (ret) {
+ /* could fail in case that 'okc' is not supported */
+ WL_INFORM_MEM(("okc_info_pmk failed, err=%d (ignore)\n", ret));
+ }
+ }
+
+ ret = wldev_ioctl_set(ndev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+ WL_INFORM_MEM(("IOVAR set_pmk ret:%d", ret));
+exit:
+ return ret;
+}
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) */
+
+static int wl_cfgvendor_get_driver_feature(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ u8 supported[(BRCM_WLAN_VENDOR_FEATURES_MAX / 8) + 1] = {0};
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+ struct sk_buff *skb;
+ int32 mem_needed;
+
+ mem_needed = VENDOR_REPLY_OVERHEAD + NLA_HDRLEN + sizeof(supported);
+
+ BCM_REFERENCE(dhd_pub);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
+ if (FW_SUPPORTED(dhd_pub, idsup)) {
+ ret = wl_features_set(supported, sizeof(supported),
+ BRCM_WLAN_VENDOR_FEATURE_KEY_MGMT_OFFLOAD);
+ }
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) */
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+ if (unlikely(!skb)) {
+ WL_ERR(("skb alloc failed"));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ ret = nla_put(skb, BRCM_ATTR_DRIVER_FEATURE_FLAGS, sizeof(supported), supported);
+ if (ret) {
+ kfree_skb(skb);
+ goto exit;
+ }
+ ret = cfg80211_vendor_cmd_reply(skb);
+exit:
+ return ret;
+}
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
#define WL_VENDOR_POLICY_RAW_DATA .policy = VENDOR_CMD_RAW_DATA
#else
@@ -6506,12 +7897,40 @@
{
{
.vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_FRAMEBURST
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_priv_frameburst,
+ WL_VENDOR_POLICY_RAW_DATA
+ },
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_MPC
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_priv_mpc,
+ WL_VENDOR_POLICY_RAW_DATA
+ },
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_BAND
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_priv_band,
+ WL_VENDOR_POLICY_RAW_DATA
+ },
+ {
+ {
+ .vendor_id = OUI_BRCM,
.subcmd = BRCM_VENDOR_SCMD_PRIV_STR
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
.doit = wl_cfgvendor_priv_string_handler,
WL_VENDOR_POLICY_RAW_DATA
},
+#ifdef BCM_PRIV_CMD_SUPPORT
{
{
.vendor_id = OUI_BRCM,
@@ -6521,6 +7940,18 @@
.doit = wl_cfgvendor_priv_bcm_handler,
WL_VENDOR_POLICY_RAW_DATA
},
+#endif /* BCM_PRIV_CMD_SUPPORT */
+#ifdef WL_SAE
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_BCM_PSK
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_sae_password,
+ WL_VENDOR_POLICY_RAW_DATA
+ },
+#endif /* WL_SAE */
#ifdef GSCAN_SUPPORT
{
{
@@ -6674,10 +8105,10 @@
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = ANDR_WIFI_RANDOM_MAC_OUI
+ .subcmd = ANDR_WIFI_SUBCMD_SET_PNO_RANDOM_MAC_OUI
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
- .doit = wl_cfgvendor_set_rand_mac_oui,
+ .doit = wl_cfgvendor_set_pno_rand_mac_oui,
WL_VENDOR_POLICY_RAW_DATA
},
#ifdef CUSTOM_FORCE_NODFS_FLAG
@@ -6721,7 +8152,6 @@
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
.doit = wl_cfgvendor_epno_cfg,
WL_VENDOR_POLICY_RAW_DATA
-
},
{
{
@@ -6731,7 +8161,6 @@
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
.doit = wl_cfgvendor_set_lazy_roam_cfg,
WL_VENDOR_POLICY_RAW_DATA
-
},
{
{
@@ -6804,25 +8233,17 @@
.doit = wl_cfgvendor_dbg_get_version,
WL_VENDOR_POLICY_RAW_DATA
},
-#ifdef DEBUGABILITY
+#ifdef DHD_LOG_DUMP
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_START_LOGGING
+ .subcmd = DEBUG_GET_FILE_DUMP_BUF
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
- .doit = wl_cfgvendor_dbg_start_logging,
+ .doit = wl_cfgvendor_dbg_file_dump,
WL_VENDOR_POLICY_RAW_DATA
},
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_RESET_LOGGING
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
- .doit = wl_cfgvendor_dbg_reset_logging,
- WL_VENDOR_POLICY_RAW_DATA
- },
+#endif /* DHD_LOG_DUMP */
{
{
.vendor_id = OUI_GOOGLE,
@@ -6844,6 +8265,24 @@
{
{
.vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_START_LOGGING
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_start_logging,
+ WL_VENDOR_POLICY_RAW_DATA
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_RESET_LOGGING
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_reset_logging,
+ WL_VENDOR_POLICY_RAW_DATA
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
.subcmd = DEBUG_GET_RING_STATUS
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
@@ -6859,7 +8298,6 @@
.doit = wl_cfgvendor_dbg_get_ring_data,
WL_VENDOR_POLICY_RAW_DATA
},
-#endif /* DEBUGABILITY */
{
{
.vendor_id = OUI_GOOGLE,
@@ -7110,6 +8548,17 @@
WL_VENDOR_POLICY_RAW_DATA
},
#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef DHD_WAKE_STATUS
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_GET_WAKE_REASON_STATS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_get_wake_reason_stats,
+ WL_VENDOR_POLICY_RAW_DATA
+ },
+#endif /* DHD_WAKE_STATUS */
#ifdef DHDTCPACK_SUPPRESS
{
{
@@ -7121,17 +8570,76 @@
WL_VENDOR_POLICY_RAW_DATA
},
#endif /* DHDTCPACK_SUPPRESS */
-#ifdef DHD_WAKE_STATUS
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_SET_PMK
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_pmk,
+ WL_VENDOR_POLICY_RAW_DATA
+ },
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) */
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_GET_FEATURES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_get_driver_feature,
+ WL_VENDOR_POLICY_RAW_DATA
+ },
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_GET_WAKE_REASON_STATS
+ .subcmd = DEBUG_FILE_DUMP_DONE_IND
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
- .doit = wl_cfgvendor_get_wake_reason_stats,
+ .doit = wl_cfgvendor_notify_dump_completion,
+ WL_VENDOR_POLICY_RAW_DATA
+ },
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
+#if defined(WL_CFG80211)
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_SET_HAL_START
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_hal_started,
+ WL_VENDOR_POLICY_RAW_DATA
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_SET_HAL_STOP
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_stop_hal,
+ WL_VENDOR_POLICY_RAW_DATA
+ },
+#endif /* WL_CFG80211 */
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_SET_LATENCY_MODE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_latency_mode,
+ WL_VENDOR_POLICY_RAW_DATA
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_TX_POWER_SCENARIO
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_tx_power_scenario,
WL_VENDOR_POLICY_RAW_DATA
}
-#endif /* DHD_WAKE_STATUS */
+
};
static const struct nl80211_vendor_cmd_info wl_vendor_events [] = {
@@ -7168,7 +8676,13 @@
{ OUI_GOOGLE, GOOGLE_NAN_EVENT_SUBSCRIBE_UNMATCH},
{ OUI_GOOGLE, GOOGLE_NAN_EVENT_UNKNOWN},
{ OUI_GOOGLE, GOOGLE_ROAM_EVENT_START},
- { OUI_BRCM, BRCM_VENDOR_EVENT_HANGED}
+ { OUI_BRCM, BRCM_VENDOR_EVENT_HANGED},
+ { OUI_BRCM, BRCM_VENDOR_EVENT_SAE_KEY},
+ { OUI_BRCM, BRCM_VENDOR_EVENT_BEACON_RECV},
+ { OUI_BRCM, BRCM_VENDOR_EVENT_PORT_AUTHORIZED},
+ { OUI_GOOGLE, GOOGLE_FILE_DUMP_EVENT },
+ { OUI_BRCM, BRCM_VENDOR_EVENT_CU},
+ { OUI_BRCM, BRCM_VENDOR_EVENT_WIPS}
};
int wl_cfgvendor_attach(struct wiphy *wiphy, dhd_pub_t *dhd)
@@ -7184,11 +8698,11 @@
#ifdef DEBUGABILITY
dhd_os_dbg_register_callback(FW_VERBOSE_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
- dhd_os_dbg_register_callback(FW_EVENT_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
dhd_os_dbg_register_callback(DHD_EVENT_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
- dhd_os_dbg_register_callback(NAN_EVENT_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
- dhd_os_dbg_register_urgent_notifier(dhd, wl_cfgvendor_dbg_send_urgent_evt);
#endif /* DEBUGABILITY */
+#ifdef DHD_LOG_DUMP
+ dhd_os_dbg_register_urgent_notifier(dhd, wl_cfgvendor_dbg_send_file_dump_evt);
+#endif /* DHD_LOG_DUMP */
return 0;
}
@@ -7205,3 +8719,179 @@
return 0;
}
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+void
+wl_cfgvendor_send_hang_event(struct net_device *dev, u16 reason, char *string, int hang_info_cnt)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct wiphy *wiphy;
+ char *hang_info;
+ int len = 0;
+ int bytes_written;
+ uint32 dummy_data = 0;
+ int reason_hang_info = 0;
+ int cnt = 0;
+ dhd_pub_t *dhd;
+ int hang_reason_mismatch = FALSE;
+
+ if (!cfg || !cfg->wdev) {
+ WL_ERR(("cfg=%p wdev=%p\n", cfg, (cfg ? cfg->wdev : NULL)));
+ return;
+ }
+
+ wiphy = cfg->wdev->wiphy;
+
+ if (!wiphy) {
+ WL_ERR(("wiphy is NULL\n"));
+ return;
+ }
+
+ hang_info = MALLOCZ(cfg->osh, VENDOR_SEND_HANG_EXT_INFO_LEN);
+ if (hang_info == NULL) {
+ WL_ERR(("alloc hang_info failed\n"));
+ return;
+ }
+
+ dhd = (dhd_pub_t *)(cfg->pub);
+
+#ifdef WL_BCNRECV
+ /* check fakeapscan in progress then stop scan */
+ if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_STARTED) {
+ wl_android_bcnrecv_stop(dev, WL_BCNRECV_HANG);
+ }
+#endif /* WL_BCNRECV */
+ sscanf(string, "%d", &reason_hang_info);
+ bytes_written = 0;
+ len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
+ if (strlen(string) == 0 || (reason_hang_info != reason)) {
+ WL_ERR(("hang reason mismatch: string len %d reason_hang_info %d\n",
+ (int)strlen(string), reason_hang_info));
+ hang_reason_mismatch = TRUE;
+ if (dhd) {
+ get_debug_dump_time(dhd->debug_dump_time_hang_str);
+ copy_debug_dump_time(dhd->debug_dump_time_str,
+ dhd->debug_dump_time_hang_str);
+ }
+ bytes_written += scnprintf(&hang_info[bytes_written], len,
+ "%d %d %s %08x %08x %08x %08x %08x %08x %08x",
+ reason, VENDOR_SEND_HANG_EXT_INFO_VER,
+ dhd->debug_dump_time_hang_str,
+ 0, 0, 0, 0, 0, 0, 0);
+ if (dhd) {
+ clear_debug_dump_time(dhd->debug_dump_time_hang_str);
+ }
+ } else {
+ bytes_written += scnprintf(&hang_info[bytes_written], len, "%s", string);
+ }
+
+ WL_ERR(("hang reason: %d info cnt: %d\n", reason, hang_info_cnt));
+
+ if (hang_reason_mismatch == FALSE) {
+ cnt = hang_info_cnt;
+ } else {
+ cnt = HANG_FIELD_MISMATCH_CNT;
+ }
+
+ while (cnt < HANG_FIELD_CNT_MAX) {
+ len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
+ if (len <= 0) {
+ break;
+ }
+ bytes_written += scnprintf(&hang_info[bytes_written], len,
+ "%c%08x", HANG_RAW_DEL, dummy_data);
+ cnt++;
+ }
+
+ WL_ERR(("hang info cnt: %d len: %d\n", cnt, (int)strlen(hang_info)));
+ WL_ERR(("hang info data: %s\n", hang_info));
+
+ wl_cfgvendor_send_async_event(wiphy,
+ bcmcfg_to_prmry_ndev(cfg), BRCM_VENDOR_EVENT_HANGED,
+ hang_info, (int)strlen(hang_info));
+
+ memset(string, 0, VENDOR_SEND_HANG_EXT_INFO_LEN);
+
+ if (hang_info) {
+ MFREE(cfg->osh, hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN);
+ }
+
+#ifdef DHD_LOG_DUMP
+ dhd_logdump_cookie_save(dhd, dhd->debug_dump_time_hang_str, "HANG");
+#endif /* DHD_LOG_DUMP */
+
+ if (dhd) {
+ clear_debug_dump_time(dhd->debug_dump_time_str);
+ }
+}
+
+void
+wl_copy_hang_info_if_falure(struct net_device *dev, u16 reason, s32 ret)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ dhd_pub_t *dhd;
+ s32 err = 0;
+ char ioctl_buf[WLC_IOCTL_SMLEN];
+ memuse_info_t mu;
+ int bytes_written = 0;
+ int remain_len = 0;
+
+ if (!dev) {
+ WL_ERR(("dev is null"));
+ return;
+
+ }
+
+ cfg = wl_get_cfg(dev);
+ if (!cfg) {
+ WL_ERR(("dev=%p cfg=%p\n", dev, cfg));
+ return;
+ }
+
+ dhd = (dhd_pub_t *)(cfg->pub);
+
+ if (!dhd || !dhd->hang_info) {
+ WL_ERR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
+ dhd, (dhd ? dhd->hang_info : NULL)));
+ return;
+ }
+
+ err = wldev_iovar_getbuf_bsscfg(dev, "memuse",
+ NULL, 0, ioctl_buf, WLC_IOCTL_SMLEN, 0, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return;
+ }
+
+ memcpy(&mu, ioctl_buf, sizeof(memuse_info_t));
+
+ if (mu.len >= sizeof(memuse_info_t)) {
+ WL_ERR(("Heap Total: %d(%dK)\n", mu.arena_size, KB(mu.arena_size)));
+ WL_ERR(("Free: %d(%dK), LWM: %d(%dK)\n",
+ mu.arena_free, KB(mu.arena_free),
+ mu.free_lwm, KB(mu.free_lwm)));
+ WL_ERR(("In use: %d(%dK), HWM: %d(%dK)\n",
+ mu.inuse_size, KB(mu.inuse_size),
+ mu.inuse_hwm, KB(mu.inuse_hwm)));
+ WL_ERR(("Malloc failure count: %d\n", mu.mf_count));
+ }
+
+ memset(dhd->hang_info, 0, VENDOR_SEND_HANG_EXT_INFO_LEN);
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
+
+ get_debug_dump_time(dhd->debug_dump_time_hang_str);
+ copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
+
+ bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len,
+ "%d %d %s %d %d %d %d %d %08x %08x",
+ reason, VENDOR_SEND_HANG_EXT_INFO_VER,
+ dhd->debug_dump_time_hang_str,
+ ret, mu.arena_size, mu.arena_free, mu.inuse_size, mu.mf_count, 0, 0);
+
+ dhd->hang_info_cnt = HANG_FIELD_IF_FAILURE_CNT;
+
+ clear_debug_dump_time(dhd->debug_dump_time_hang_str);
+
+ return;
+}
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgvendor.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgvendor.h
index 64795de..233c6b1 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgvendor.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_cfgvendor.h
@@ -1,7 +1,9 @@
/*
* Linux cfg80211 Vendor Extension Code
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -24,27 +26,30 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgvendor.h 697181 2017-05-02 12:15:03Z $
+ * $Id: wl_cfgvendor.h 814814 2019-04-15 03:31:10Z $
*/
#ifndef _wl_cfgvendor_h_
#define _wl_cfgvendor_h_
-#if ((LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || \
- defined(CONFIG_BCMDHD_VENDOR_EXT)) && !defined(WL_VENDOR_EXT_SUPPORT)
-/* defined CONFIG_BCMDHD_VENDOR_EXT in brix kernel to enable GSCAN testing */
-#define WL_VENDOR_EXT_SUPPORT
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0) && CONFIG_BCMDHD_VENDOR_EXT */
#define OUI_BRCM 0x001018
#define OUI_GOOGLE 0x001A11
+#define BRCM_VENDOR_SUBCMD_PRIV_STR 1
#define ATTRIBUTE_U32_LEN (NLA_HDRLEN + 4)
#define VENDOR_ID_OVERHEAD ATTRIBUTE_U32_LEN
#define VENDOR_SUBCMD_OVERHEAD ATTRIBUTE_U32_LEN
#define VENDOR_DATA_OVERHEAD (NLA_HDRLEN)
enum brcm_vendor_attr {
- BRCM_ATTR_DRIVER_CMD,
- BRCM_ATTR_DRIVER_MAX
+ BRCM_ATTR_DRIVER_CMD = 0,
+ BRCM_ATTR_DRIVER_KEY_PMK = 1,
+ BRCM_ATTR_DRIVER_FEATURE_FLAGS = 2,
+ BRCM_ATTR_DRIVER_MAX = 3
+};
+
+enum brcm_wlan_vendor_features {
+ BRCM_WLAN_VENDOR_FEATURE_KEY_MGMT_OFFLOAD = 0,
+ BRCM_WLAN_VENDOR_FEATURES_MAX = 1
};
#define SCAN_RESULTS_COMPLETE_FLAG_LEN ATTRIBUTE_U32_LEN
@@ -84,6 +89,9 @@
#define NAN_SID_BEACON_COUNT_INVALID 0xff
#define WL_NAN_DW_INTERVAL 512
+#define CFG80211_VENDOR_CMD_REPLY_SKB_SZ 100
+#define CFG80211_VENDOR_EVT_SKB_SZ 2048
+
typedef enum {
/* don't use 0 as a valid subcommand */
VENDOR_NL80211_SUBCMD_UNSPECIFIED,
@@ -125,6 +133,10 @@
ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_START = 0x1800,
ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_END = 0x18FF,
+ /* define all tx power related commands between 0x1900 and 0x1910 */
+ ANDROID_NL80211_SUBCMD_TX_POWER_RANGE_START = 0x1900,
+ ANDROID_NL80211_SUBCMD_TX_POWER_RANGE_END = 0x1910,
+
/* This is reserved for future usage */
} ANDROID_VENDOR_SUB_COMMAND;
@@ -143,7 +155,7 @@
/* ANDR_WIFI_XXX although not related to gscan are defined here */
ANDR_WIFI_SUBCMD_GET_FEATURE_SET,
ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX,
- ANDR_WIFI_RANDOM_MAC_OUI,
+ ANDR_WIFI_SUBCMD_SET_PNO_RANDOM_MAC_OUI, /*0x100C*/
ANDR_WIFI_NODFS_CHANNELS,
ANDR_WIFI_SET_COUNTRY,
GSCAN_SUBCMD_SET_EPNO_SSID,
@@ -158,7 +170,7 @@
WIFI_SUBCMD_CONFIG_TCPACK_SUP,
WIFI_SUBCMD_FW_ROAM_POLICY,
WIFI_SUBCMD_ROAM_CAPABILITY,
-
+ WIFI_SUBCMD_SET_LATENCY_MODE, /*0x101b*/
RTT_SUBCMD_SET_CONFIG = ANDROID_NL80211_SUBCMD_RTT_RANGE_START,
RTT_SUBCMD_CANCEL_CONFIG,
RTT_SUBCMD_GETCAPABILITY,
@@ -182,6 +194,10 @@
DEBUG_GET_TX_PKT_FATES,
DEBUG_GET_RX_PKT_FATES,
DEBUG_GET_WAKE_REASON_STATS,
+ DEBUG_GET_FILE_DUMP_BUF,
+ DEBUG_FILE_DUMP_DONE_IND,
+ DEBUG_SET_HAL_START,
+ DEBUG_SET_HAL_STOP,
WIFI_OFFLOAD_SUBCMD_START_MKEEP_ALIVE = ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_START,
WIFI_OFFLOAD_SUBCMD_STOP_MKEEP_ALIVE,
@@ -206,6 +222,7 @@
NAN_WIFI_SUBCMD_VERSION_INFO, /* 0x1711 */
APF_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_START,
APF_SUBCMD_SET_FILTER,
+ WIFI_SUBCMD_TX_POWER_SCENARIO = ANDROID_NL80211_SUBCMD_TX_POWER_RANGE_START, /*0x1900*/
/* Add more sub commands here */
VENDOR_SUBCMD_MAX
};
@@ -367,7 +384,8 @@
RTT_ATTRIBUTE_RESULTS_COMPLETE = 30,
RTT_ATTRIBUTE_RESULTS_PER_TARGET,
RTT_ATTRIBUTE_RESULT_CNT,
- RTT_ATTRIBUTE_RESULT
+ RTT_ATTRIBUTE_RESULT,
+ RTT_ATTRIBUTE_RESULT_DETAIL
};
enum wifi_rssi_monitor_attr {
@@ -376,6 +394,12 @@
RSSI_MONITOR_ATTRIBUTE_START
};
+enum wifi_sae_key_attr {
+ BRCM_SAE_KEY_ATTR_PEER_MAC,
+ BRCM_SAE_KEY_ATTR_PMK,
+ BRCM_SAE_KEY_ATTR_PMKID
+};
+
enum debug_attributes {
DEBUG_ATTRIBUTE_GET_DRIVER,
DEBUG_ATTRIBUTE_GET_FW,
@@ -385,6 +409,7 @@
DEBUG_ATTRIBUTE_LOG_LEVEL,
DEBUG_ATTRIBUTE_LOG_TIME_INTVAL,
DEBUG_ATTRIBUTE_LOG_MIN_DATA_SIZE,
+ DEBUG_ATTRIBUTE_DUMP_FILENAME,
DEBUG_ATTRIBUTE_FW_DUMP_LEN,
DEBUG_ATTRIBUTE_FW_DUMP_DATA,
DEBUG_ATTRIBUTE_RING_DATA,
@@ -396,6 +421,67 @@
DEBUG_ATTRIBUTE_PKT_FATE_DATA
};
+typedef enum {
+ DUMP_LEN_ATTR_INVALID,
+ DUMP_LEN_ATTR_MEMDUMP,
+ DUMP_LEN_ATTR_SSSR_C0_D11_BEFORE,
+ DUMP_LEN_ATTR_SSSR_C0_D11_AFTER,
+ DUMP_LEN_ATTR_SSSR_C1_D11_BEFORE,
+ DUMP_LEN_ATTR_SSSR_C1_D11_AFTER,
+ DUMP_LEN_ATTR_SSSR_DIG_BEFORE,
+ DUMP_LEN_ATTR_SSSR_DIG_AFTER,
+ DUMP_LEN_ATTR_TIMESTAMP,
+ DUMP_LEN_ATTR_GENERAL_LOG,
+ DUMP_LEN_ATTR_ECNTRS,
+ DUMP_LEN_ATTR_SPECIAL_LOG,
+ DUMP_LEN_ATTR_DHD_DUMP,
+ DUMP_LEN_ATTR_EXT_TRAP,
+ DUMP_LEN_ATTR_HEALTH_CHK,
+ DUMP_LEN_ATTR_PRESERVE_LOG,
+ DUMP_LEN_ATTR_COOKIE,
+ DUMP_LEN_ATTR_FLOWRING_DUMP,
+ DUMP_LEN_ATTR_PKTLOG,
+ DUMP_FILENAME_ATTR_DEBUG_DUMP,
+ DUMP_FILENAME_ATTR_MEM_DUMP,
+ DUMP_FILENAME_ATTR_SSSR_CORE_0_BEFORE_DUMP,
+ DUMP_FILENAME_ATTR_SSSR_CORE_0_AFTER_DUMP,
+ DUMP_FILENAME_ATTR_SSSR_CORE_1_BEFORE_DUMP,
+ DUMP_FILENAME_ATTR_SSSR_CORE_1_AFTER_DUMP,
+ DUMP_FILENAME_ATTR_SSSR_DIG_BEFORE_DUMP,
+ DUMP_FILENAME_ATTR_SSSR_DIG_AFTER_DUMP,
+ DUMP_FILENAME_ATTR_PKTLOG_DUMP,
+ DUMP_LEN_ATTR_STATUS_LOG,
+ DUMP_LEN_ATTR_AXI_ERROR,
+ DUMP_FILENAME_ATTR_AXI_ERROR_DUMP,
+ DUMP_LEN_ATTR_RTT_LOG
+} EWP_DUMP_EVENT_ATTRIBUTE;
+
+/* Attributes associated with DEBUG_GET_DUMP_BUF */
+typedef enum {
+ DUMP_BUF_ATTR_INVALID,
+ DUMP_BUF_ATTR_MEMDUMP,
+ DUMP_BUF_ATTR_SSSR_C0_D11_BEFORE,
+ DUMP_BUF_ATTR_SSSR_C0_D11_AFTER,
+ DUMP_BUF_ATTR_SSSR_C1_D11_BEFORE,
+ DUMP_BUF_ATTR_SSSR_C1_D11_AFTER,
+ DUMP_BUF_ATTR_SSSR_DIG_BEFORE,
+ DUMP_BUF_ATTR_SSSR_DIG_AFTER,
+ DUMP_BUF_ATTR_TIMESTAMP,
+ DUMP_BUF_ATTR_GENERAL_LOG,
+ DUMP_BUF_ATTR_ECNTRS,
+ DUMP_BUF_ATTR_SPECIAL_LOG,
+ DUMP_BUF_ATTR_DHD_DUMP,
+ DUMP_BUF_ATTR_EXT_TRAP,
+ DUMP_BUF_ATTR_HEALTH_CHK,
+ DUMP_BUF_ATTR_PRESERVE_LOG,
+ DUMP_BUF_ATTR_COOKIE,
+ DUMP_BUF_ATTR_FLOWRING_DUMP,
+ DUMP_BUF_ATTR_PKTLOG,
+ DUMP_BUF_ATTR_STATUS_LOG,
+ DUMP_BUF_ATTR_AXI_ERROR,
+ DUMP_BUF_ATTR_RTT_LOG
+} EWP_DUMP_CMD_ATTRIBUTE;
+
enum mkeep_alive_attributes {
MKEEP_ALIVE_ATTRIBUTE_ID,
MKEEP_ALIVE_ATTRIBUTE_IP_PKT,
@@ -444,20 +530,28 @@
GOOGLE_NAN_EVENT_TCA = 29,
GOOGLE_NAN_EVENT_SUBSCRIBE_UNMATCH = 30,
GOOGLE_NAN_EVENT_UNKNOWN = 31,
-
GOOGLE_ROAM_EVENT_START = 32,
-
- BRCM_VENDOR_EVENT_HANGED = 33
+ BRCM_VENDOR_EVENT_HANGED = 33,
+ BRCM_VENDOR_EVENT_SAE_KEY = 34,
+ BRCM_VENDOR_EVENT_BEACON_RECV = 35,
+ BRCM_VENDOR_EVENT_PORT_AUTHORIZED = 36,
+ GOOGLE_FILE_DUMP_EVENT = 37,
+ BRCM_VENDOR_EVENT_CU = 38,
+ BRCM_VENDOR_EVENT_WIPS = 39
} wl_vendor_event_t;
enum andr_wifi_attr {
ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET,
ANDR_WIFI_ATTRIBUTE_FEATURE_SET,
- ANDR_WIFI_ATTRIBUTE_RANDOM_MAC_OUI,
+ ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI,
ANDR_WIFI_ATTRIBUTE_NODFS_SET,
ANDR_WIFI_ATTRIBUTE_COUNTRY,
ANDR_WIFI_ATTRIBUTE_ND_OFFLOAD_VALUE,
- ANDR_WIFI_ATTRIBUTE_TCPACK_SUP_VALUE
+ ANDR_WIFI_ATTRIBUTE_TCPACK_SUP_VALUE,
+ ANDR_WIFI_ATTRIBUTE_LATENCY_MODE,
+ ANDR_WIFI_ATTRIBUTE_RANDOM_MAC,
+ ANDR_WIFI_ATTRIBUTE_TX_POWER_SCENARIO
+ //Add more attributes here
};
enum apf_attributes {
APF_ATTRIBUTE_VERSION,
@@ -556,14 +650,39 @@
} WLAN_DRIVER_WAKE_REASON_CNT;
#endif /* DHD_WAKE_STATUS */
+#ifdef WL_WIPSEVT
+#define BRCM_VENDOR_WIPS_EVENT_BUF_LEN 128
+typedef enum wl_vendor_wips_attr_type {
+ WIPS_ATTR_DEAUTH_CNT = 1,
+ WPPS_ATTR_DEAUTH_BSSID
+} wl_vendor_wips_attr_type_t;
+#endif /* WL_WIPSEVT */
+
/* Chipset roaming capabilities */
typedef struct wifi_roaming_capabilities {
u32 max_blacklist_size;
u32 max_whitelist_size;
} wifi_roaming_capabilities_t;
+/* sync-up return code with wifi_hal.h in wifi_hal layer. */
+typedef enum {
+ WIFI_SUCCESS = 0,
+ WIFI_ERROR_NONE = 0,
+ WIFI_ERROR_UNKNOWN = -1,
+ WIFI_ERROR_UNINITIALIZED = -2,
+ WIFI_ERROR_NOT_SUPPORTED = -3,
+ WIFI_ERROR_NOT_AVAILABLE = -4, /* Not available right now, but try later */
+ WIFI_ERROR_INVALID_ARGS = -5,
+ WIFI_ERROR_INVALID_REQUEST_ID = -6,
+ WIFI_ERROR_TIMED_OUT = -7,
+ WIFI_ERROR_TOO_MANY_REQUESTS = -8, /* Too many instances of this request */
+ WIFI_ERROR_OUT_OF_MEMORY = -9,
+ WIFI_ERROR_BUSY = -10
+} wifi_error;
+
/* Capture the BRCM_VENDOR_SUBCMD_PRIV_STRINGS* here */
#define BRCM_VENDOR_SCMD_CAPA "cap"
+#define MEMDUMP_PATH_LEN 128
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
extern int wl_cfgvendor_attach(struct wiphy *wiphy, dhd_pub_t *dhd);
@@ -576,20 +695,46 @@
static INLINE int wl_cfgvendor_attach(struct wiphy *wiphy,
dhd_pub_t *dhd) { UNUSED_PARAMETER(wiphy); UNUSED_PARAMETER(dhd); return 0; }
static INLINE int wl_cfgvendor_detach(struct wiphy *wiphy) { UNUSED_PARAMETER(wiphy); return 0; }
+static INLINE int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
+ struct net_device *dev, int event_id, const void *data, int len)
+{ return 0; }
+static INLINE int wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
+ struct net_device *dev, void *data, int len, wl_vendor_event_t event)
+{ return 0; }
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
#if defined(WL_SUPP_EVENT) && ((LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || \
defined(WL_VENDOR_EXT_SUPPORT))
extern int wl_cfgvendor_send_supp_eventstring(const char *func, const char *fmt, ...);
+int wl_cfgvendor_notify_supp_event_str(const char *evt_name, const char *fmt, ...);
#define SUPP_LOG_LEN 256
#define PRINT_SUPP_LOG(fmt, ...) \
wl_cfgvendor_send_supp_eventstring(__func__, fmt, ##__VA_ARGS__);
-#define SUPP_LOG(args) PRINT_SUPP_LOG args;
+#define SUPP_LOG(args) PRINT_SUPP_LOG args;
+#define SUPP_EVT_LOG(evt_name, fmt, ...) \
+ wl_cfgvendor_notify_supp_event_str(evt_name, fmt, ##__VA_ARGS__);
+#define SUPP_EVENT(args) SUPP_EVT_LOG args
#else
#define SUPP_LOG(x)
-#endif /* WL_SUPP_EVENT && ((kernel > (3, 13, 0)) || WL_VENDOR_EXT_SUPPORT) */
+#define SUPP_EVENT(x)
+#endif /* WL_SUPP_EVENT && (kernel > (3, 13, 0)) || WL_VENDOR_EXT_SUPPORT */
#define COMPAT_ASSIGN_VALUE(normal_structure, member, value) \
normal_structure.member = value;
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+#define CFG80211_VENDOR_EVENT_ALLOC(wiphy, wdev, len, type, kflags) \
+ cfg80211_vendor_event_alloc(wiphy, wdev, len, type, kflags);
+#else
+#define CFG80211_VENDOR_EVENT_ALLOC(wiphy, wdev, len, type, kflags) \
+ cfg80211_vendor_event_alloc(wiphy, len, type, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+void wl_cfgvendor_send_hang_event(struct net_device *dev, u16 reason,
+ char *string, int hang_info_cnt);
+void wl_copy_hang_info_if_falure(struct net_device *dev, u16 reason, s32 ret);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
#endif /* _wl_cfgvendor_h_ */
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_dbg.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_dbg.h
index e8ea769..56fa7ad 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_dbg.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_dbg.h
@@ -2,7 +2,9 @@
* Minimal debug/trace/assert driver definitions for
* Broadcom 802.11 Networking Adapter.
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_iw.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_iw.c
index 89d9b97..769eaa6 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_iw.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_iw.c
@@ -1,7 +1,9 @@
/*
* Linux Wireless Extensions support
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -40,6 +42,10 @@
#include <linux/if_arp.h>
#include <asm/uaccess.h>
+#include <linux/signal.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+#include <linux/sched/signal.h>
+#endif // endif
#include <wlioctl.h>
#include <wlioctl_utils.h>
@@ -1362,7 +1368,7 @@
/* look for wpa/rsn ies in the ie list... */
bcm_tlv_t *ie;
uint8 *ptr = ((uint8 *)bi) + bi->ie_offset;
- int ptr_len = bi->ie_length;
+ uint ptr_len = bi->ie_length;
/* OSEN IE */
if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_VS_ID)) &&
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_linux_mon.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_linux_mon.c
index 8b76caf..643bfe4 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_linux_mon.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_linux_mon.c
@@ -1,7 +1,9 @@
/*
* Broadcom Dongle Host Driver (DHD), Linux monitor network interface
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_roam.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_roam.c
index a295f94..f8d72d1 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_roam.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wl_roam.c
@@ -1,7 +1,9 @@
/*
* Linux roam cache
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -24,7 +26,7 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_roam.c 695669 2017-04-21 13:04:52Z $
+ * $Id: wl_roam.c 798173 2019-01-07 09:23:21Z $
*/
#include <typedefs.h>
@@ -36,6 +38,7 @@
#include <wl_cfg80211.h>
#endif // endif
#include <wldev_common.h>
+#include <bcmstdlib_s.h>
#ifdef ESCAN_CHANNEL_CACHE
#define MAX_ROAM_CACHE 200
@@ -172,13 +175,23 @@
/* need to set ROAMSCAN_MODE_NORMAL to update roamscan_channels,
* otherwise, it won't be updated
*/
- wldev_iovar_setint(dev, "roamscan_mode", ROAMSCAN_MODE_NORMAL);
+ error = wldev_iovar_setint(dev, "roamscan_mode", ROAMSCAN_MODE_NORMAL);
+ if (error) {
+ WL_ERR(("Failed to set roamscan mode to %d, error = %d\n",
+ ROAMSCAN_MODE_NORMAL, error));
+ return error;
+ }
error = wldev_iovar_setbuf(dev, "roamscan_channels", &channel_list,
sizeof(channel_list), iobuf, sizeof(iobuf), NULL);
if (error) {
- WL_DBG(("Failed to set roamscan channels, error = %d\n", error));
+ WL_ERR(("Failed to set roamscan channels, error = %d\n", error));
+ return error;
}
- wldev_iovar_setint(dev, "roamscan_mode", ROAMSCAN_MODE_WES);
+ error = wldev_iovar_setint(dev, "roamscan_mode", ROAMSCAN_MODE_WES);
+ if (error) {
+ WL_ERR(("Failed to set roamscan mode to %d, error = %d\n",
+ ROAMSCAN_MODE_WES, error));
+ }
return error;
}
@@ -236,7 +249,7 @@
WL_DBG(("CHSPEC = %s, CTL %d\n", wf_chspec_ntoa_ex(bi->chanspec, chanbuf), channel));
roam_cache[n_roam_cache].chanspec =
(channel <= CH_MAX_2G_CHANNEL ? band2G : band5G) | band_bw | channel;
- memcpy(roam_cache[n_roam_cache].ssid, bi->SSID, bi->SSID_len);
+ (void)memcpy_s(roam_cache[n_roam_cache].ssid, bi->SSID_len, bi->SSID, bi->SSID_len);
n_roam_cache++;
}
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wldev_common.c b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wldev_common.c
index 5c6d0ba..f25adf1 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wldev_common.c
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wldev_common.c
@@ -1,7 +1,9 @@
/*
* Common function shared by Linux WEXT, cfg80211 and p2p drivers
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -36,6 +38,7 @@
#include <bcmutils.h>
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
+#include <wl_cfgscan.h>
#endif /* WL_CFG80211 */
#define htod32(i) (i)
@@ -342,7 +345,7 @@
if (!scb_val)
return -ENOMEM;
- memset(scb_val, 0, sizeof(scb_val_t));
+
error = wldev_ioctl_get(dev, WLC_GET_RSSI, scb_val, sizeof(scb_val_t));
if (unlikely(error))
return error;
@@ -508,8 +511,8 @@
wl_cfg80211_scan_abort(cfg);
cspec.rev = revinfo;
- memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
- memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
+ strlcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
+ strlcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec);
error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec),
smbuf, sizeof(smbuf), NULL);
diff --git a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wldev_common.h b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wldev_common.h
index 0428728..98996fc 100644
--- a/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wldev_common.h
+++ b/src/kernel/linux/v4.19/drivers/net/wireless/bcmdhd/wldev_common.h
@@ -1,7 +1,9 @@
/*
* Common function shared by Linux WEXT, cfg80211 and p2p drivers
*
- * Copyright (C) 1999-2020, Broadcom.
+ * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -111,7 +113,11 @@
extern int net_os_set_dtim_skip(struct net_device *dev, int val);
extern int net_os_set_suspend_disable(struct net_device *dev, int val);
extern int net_os_set_suspend(struct net_device *dev, int val, int force);
+extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val);
extern int net_os_set_max_dtim_enable(struct net_device *dev, int val);
+#ifdef DISABLE_DTIM_IN_SUSPEND
+extern int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val);
+#endif /* DISABLE_DTIM_IN_SUSPEND */
#if defined(OEM_ANDROID)
extern int wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid,
int max, int *bytes_left);
diff --git a/src/kernel/linux/v4.19/drivers/spi/spidev.c b/src/kernel/linux/v4.19/drivers/spi/spidev.c
index 1428aae..bf189d1 100644
--- a/src/kernel/linux/v4.19/drivers/spi/spidev.c
+++ b/src/kernel/linux/v4.19/drivers/spi/spidev.c
@@ -668,11 +668,13 @@
#ifdef CONFIG_OF
static const struct of_device_id spidev_dt_ids[] = {
- { .compatible = "rohm,dh2228fv" },
- { .compatible = "lineartechnology,ltc2488" },
- { .compatible = "ge,achc" },
- { .compatible = "semtech,sx1301" },
- { .compatible = "siliconlabs,si3210" },
+ //{ .compatible = "rohm,dh2228fv" },
+ //{ .compatible = "lineartechnology,ltc2488" },
+ //{ .compatible = "ge,achc" },
+ //{ .compatible = "semtech,sx1301" },
+ //{ .compatible = "siliconlabs,si3210" },
+ { .compatible = "mediatek,spi-mt65xx-dev" },
+ { .compatible = "spi-slave-mt27xx-test"},
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
@@ -722,7 +724,7 @@
struct spidev_data *spidev;
int status;
unsigned long minor;
-
+printk("GLENN\n");
/*
* spidev should never be referenced in DT without a specific
* compatible string, it is a Linux implementation thing
@@ -804,7 +806,7 @@
.driver = {
.name = "spidev",
.of_match_table = of_match_ptr(spidev_dt_ids),
- .acpi_match_table = ACPI_PTR(spidev_acpi_ids),
+ //.acpi_match_table = ACPI_PTR(spidev_acpi_ids),
},
.probe = spidev_probe,
.remove = spidev_remove,
diff --git a/src/kernel/linux/v4.19/drivers/usb/gadget/function/u_ether.c b/src/kernel/linux/v4.19/drivers/usb/gadget/function/u_ether.c
old mode 100644
new mode 100755
index 30e96a3..184fcb9
--- a/src/kernel/linux/v4.19/drivers/usb/gadget/function/u_ether.c
+++ b/src/kernel/linux/v4.19/drivers/usb/gadget/function/u_ether.c
@@ -18,7 +18,9 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <net/sch_generic.h>
+#if defined(CONFIG_HW_NAT)
#include <net/ra_nat.h>
+#endif
#include <linux/ip.h>
#include <linux/ktime.h>
@@ -598,6 +600,7 @@
spin_unlock_irqrestore(&dev->reqrx_lock, flags);
}
+#if defined(CONFIG_HW_NAT)
int eth_hw_nat(bool to_usb, struct sk_buff *skb, struct net_device *net)
{
U_ETHER_DBG("%s: to_usb=%d, net->name=%s\n",
@@ -619,6 +622,7 @@
return 1;
}
}
+#endif
static void process_rx_w(struct work_struct *work)
{
@@ -665,8 +669,10 @@
pr_emerg("RNDIS rtt_rx_duration=%llu (WORK)\n", rtt_rx_duration);
}
+#if defined(CONFIG_HW_NAT)
if (eth_hw_nat(false, skb, dev->net) == 0)
continue;
+#endif
status = netif_rx_ni(skb);
}
@@ -904,6 +910,8 @@
struct Qdisc *qdisc;
ktime_t rtt_tx_start_time, rtt_tx_queue_time, rtt_tx_delta_time;
unsigned long long rtt_tx_duration;
+ struct sk_buff *skb2 = NULL;
+ int offset = 0;
spin_lock_irqsave(&dev->lock, flags);
if (dev->port_usb) {
@@ -914,17 +922,18 @@
}
spin_unlock_irqrestore(&dev->lock, flags);
+#if defined(CONFIG_HW_NAT)
if (eth_hw_nat(true, skb, net) == 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+#endif
if (skb && !in) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- pinfo = skb_shinfo(skb);
iph = skb->encapsulation ? inner_ip_hdr(skb): ip_hdr(skb);
if (uether_tx_rtt_profile) {
@@ -993,6 +1002,9 @@
dev->net->stats.tx_dropped++;
return NETDEV_TX_OK;
}
+
+ pinfo = skb_shinfo(skb);
+
spin_lock_irqsave(&dev->req_lock, flags);
if (multi_pkt_xfer && !dev->tx_req_bufsize) {
retval = alloc_tx_buffer(dev);
@@ -1154,9 +1166,35 @@
}
spin_unlock_irqrestore(&dev->req_lock, flags);
} else {
+ frag_cnt = 0;
+ if (net->features & NETIF_F_GSO)
+ frag_cnt = pinfo->nr_frags;
length = skb->len;
- req->buf = skb->data;
- req->context = skb;
+ if (frag_cnt == 0) {
+ req->buf = skb->data;
+ req->context = skb;
+ } else {
+ skb2 = alloc_skb(skb->len + NET_IP_ALIGN, GFP_KERNEL);
+ if (skb2 == NULL) {
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+ skb_put(skb2, skb->len);
+ memcpy(skb2->data, skb->data, skb->len - skb->data_len);
+ offset += skb->len - skb->data_len;
+ for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++) {
+ frag = pinfo->frags + frag_idx;
+ frag_data_len = skb_frag_size(frag);
+ frag_data_addr = skb_frag_address(frag);
+
+ memcpy(skb2->data + offset, frag_data_addr,
+ frag_data_len);
+ offset += frag_data_len;
+ }
+ dev_kfree_skb_any(skb);
+ req->buf = skb2->data;
+ req->context = skb2;
+ }
}
/* NCM requires no zlp if transfer is dwNtbInMaxSize */
@@ -1210,7 +1248,10 @@
if (retval) {
if (!multi_pkt_xfer)
- dev_kfree_skb_any(skb);
+ if (skb2 == NULL)
+ dev_kfree_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb2);
else
req->length = 0;
@@ -1426,8 +1467,10 @@
free_netdev(net);
dev = ERR_PTR(status);
} else {
+#if defined(CONFIG_HW_NAT)
if (ppe_dev_register_hook != NULL)
ppe_dev_register_hook(net);
+#endif
INFO(dev, "MAC %pM\n", net->dev_addr);
INFO(dev, "HOST MAC %pM\n", dev->host_mac);
@@ -1530,8 +1573,10 @@
else
INFO(dev, "MAC %pM\n", dev->dev_mac);
+#if defined(CONFIG_HW_NAT)
if (ppe_dev_register_hook != NULL)
ppe_dev_register_hook(net);
+#endif
return status;
}
@@ -1665,8 +1710,10 @@
if (!dev)
return;
+#if defined(CONFIG_HW_NAT)
if (ppe_dev_unregister_hook != NULL)
ppe_dev_unregister_hook(dev->net);
+#endif
unregister_netdev(dev->net);
flush_work(&dev->work);
diff --git a/src/kernel/linux/v4.19/drivers/usb/gadget/function/u_ether.h b/src/kernel/linux/v4.19/drivers/usb/gadget/function/u_ether.h
old mode 100644
new mode 100755
index 4eb1be6..56529b5
--- a/src/kernel/linux/v4.19/drivers/usb/gadget/function/u_ether.h
+++ b/src/kernel/linux/v4.19/drivers/usb/gadget/function/u_ether.h
@@ -316,8 +316,10 @@
extern unsigned long rndis_test_tx_usb_out;
extern unsigned long rndis_test_tx_complete;
extern void rx_fill(struct eth_dev *dev, gfp_t gfp_flags);
+#if defined(CONFIG_HW_NAT)
extern int (*ppe_hook_tx_rndis)(struct sk_buff *skb);
extern int (*ppe_hook_rx_rndis)(struct sk_buff *skb);
extern void (*ppe_dev_register_hook)(struct net_device *dev);
extern void (*ppe_dev_unregister_hook)(struct net_device *dev);
+#endif
#endif /* __U_ETHER_H */
diff --git a/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3.h b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3.h
old mode 100644
new mode 100755
index 120637a..dc334b1
--- a/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3.h
+++ b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3.h
@@ -422,6 +422,8 @@
#endif
struct delayed_work forward_to_driver_work;
struct mtu3_md_sync_data *md_sync_data;
+ int detect_gpio;
+ int detect_irq;
};
static inline struct mtu3 *gadget_to_mtu3(struct usb_gadget *g)
diff --git a/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_core.c b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_core.c
old mode 100644
new mode 100755
index 80b90a1..e0ce587
--- a/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_core.c
+++ b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_core.c
@@ -14,6 +14,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
#include "mtu3.h"
#include "mtu3_debug.h"
@@ -324,6 +326,7 @@
{
void __iomem *mbase = mtu->mac_base;
+ dev_info(mtu->dev, "%s\n", __func__);
/* be sure interrupts are disabled before registration of ISR */
mtu3_intr_disable(mtu);
@@ -356,11 +359,91 @@
mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, LPM_HRWE);
ssusb_set_force_vbus(mtu->ssusb, true);
+ mtu3_writel(mbase, U3D_POWER_MANAGEMENT, (mtu3_readl(mbase, U3D_POWER_MANAGEMENT)| LPM_MODE(1)));
+ dev_info(mtu->dev, "%s, U3D_POWER_MANAGEMENT:0x%x\n", __func__, mtu3_readl(mbase, U3D_POWER_MANAGEMENT));
/* use new QMU format when HW version >= 0x1003 */
if (mtu->gen2cp)
mtu3_writel(mbase, U3D_QFCR, ~0x0);
}
+static irqreturn_t mtu3_vbus_detect_eint_isr(int irq, void *data)
+{
+ struct mtu3 *mtu = data;
+
+ disable_irq_nosync(irq);
+
+ if (!gpio_get_value(mtu->detect_gpio)) {
+ dev_info(mtu->dev, "low-level, vbus plug out\n");
+ ssusb_set_force_vbus(mtu->ssusb, false);
+ irq_set_irq_type(irq, IRQF_TRIGGER_HIGH);
+ } else {
+ dev_info(mtu->dev, "high-level, vbus plug in\n");
+ ssusb_set_force_vbus(mtu->ssusb, true);
+ irq_set_irq_type(irq, IRQF_TRIGGER_LOW);
+ }
+ enable_irq(irq);
+ return IRQ_HANDLED;
+}
+
+static int mtu3_usb_vbus_detect_init(struct mtu3 *mtu)
+{
+ struct device_node *node;
+ int ret;
+
+ node = of_find_compatible_node(NULL, NULL,
+ "mediatek,mtu3");
+ if (!node) {
+ dev_err(mtu->dev, "map node failed\n");
+ return -ENODEV;
+ }
+
+ ret = of_get_named_gpio(node, "vbus_detect", 0);
+ if (ret < 0) {
+ dev_err(mtu->dev, "no vbus_detect gpio info\n");
+ return ret;
+ }
+ mtu->detect_gpio= ret;
+
+ ret = devm_gpio_request(mtu->dev, mtu->detect_gpio,
+ "mtu_vbus_detect");
+ if (ret < 0) {
+ dev_err(mtu->dev, "fail to request gpio%d (%d)\n",
+ mtu->detect_gpio, ret);
+ return ret;
+ }
+
+ ret = gpio_direction_input(mtu->detect_gpio);
+ if (ret < 0) {
+ dev_err(mtu->dev, "fail to set gpio%d as input pin (%d)\n",
+ mtu->detect_gpio, ret);
+ return ret;
+ }
+
+ /* 20 ms */
+ gpio_set_debounce(mtu->detect_gpio, 20*1000);
+ if (ret < 0) {
+ dev_err(mtu->dev, "fail to set gpio%d debounce (%d)\n",
+ mtu->detect_gpio, ret);
+ return ret;
+ }
+
+ mtu->detect_irq = gpio_to_irq(mtu->detect_gpio);
+ if (mtu->detect_irq <= 0) {
+ dev_err(mtu->dev, "gpio%d to irq fail, mtu_vbus_detect(%d)\n",
+ mtu->detect_irq, ret);
+ return ret;
+ }
+
+ ret = request_irq(mtu->detect_irq, mtu3_vbus_detect_eint_isr,
+ IRQF_TRIGGER_LOW, "mtu_detect_eint", mtu);
+ if (ret) {
+ dev_err(mtu->dev, "request eint(%d) fail (%d)\n",
+ mtu->detect_irq, ret);
+ return ret;
+ }
+ return 0;
+}
+
void mtu3_start(struct mtu3 *mtu)
{
void __iomem *mbase = mtu->mac_base;
@@ -1026,7 +1109,7 @@
dev_err(dev, "mtu3 gadget init failed:%d\n", ret);
goto gadget_err;
}
-
+ mtu3_usb_vbus_detect_init(mtu);
/* init as host mode, power down device IP for power saving */
if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
mtu3_stop(mtu);
diff --git a/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_core1.c b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_core1.c
new file mode 100755
index 0000000..0f2cb9b
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_core1.c
@@ -0,0 +1,1142 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mtu3_core.c - hardware access layer and gadget init/exit of
+ * MediaTek usb3 Dual-Role Controller Driver
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+
+#include "mtu3.h"
+#include "mtu3_debug.h"
+#include "mtu3_trace.h"
+
+#ifndef CONFIG_MTU3_MD_USB_EP0_CTRL
+static int total_isr_debug = 1;
+module_param(total_isr_debug, uint, 0644);
+MODULE_PARM_DESC (total_isr_debug, "profile total USB isr");
+extern unsigned long total_isr;
+
+static int ep_fifo_alloc(struct mtu3_ep *mep, u32 seg_size)
+{
+ struct mtu3_fifo_info *fifo = mep->fifo;
+ u32 num_bits = DIV_ROUND_UP(seg_size, MTU3_EP_FIFO_UNIT);
+ u32 start_bit;
+
+ /* ensure that @mep->fifo_seg_size is power of two */
+ num_bits = roundup_pow_of_two(num_bits);
+ if (num_bits > fifo->limit)
+ return -EINVAL;
+
+ mep->fifo_seg_size = num_bits * MTU3_EP_FIFO_UNIT;
+ num_bits = num_bits * (mep->slot + 1);
+ start_bit = bitmap_find_next_zero_area(fifo->bitmap,
+ fifo->limit, 0, num_bits, 0);
+ if (start_bit >= fifo->limit)
+ return -EOVERFLOW;
+
+ bitmap_set(fifo->bitmap, start_bit, num_bits);
+ mep->fifo_size = num_bits * MTU3_EP_FIFO_UNIT;
+ mep->fifo_addr = fifo->base + MTU3_EP_FIFO_UNIT * start_bit;
+
+ dev_dbg(mep->mtu->dev, "%s fifo:%#x/%#x, start_bit: %d\n",
+ __func__, mep->fifo_seg_size, mep->fifo_size, start_bit);
+
+ return mep->fifo_addr;
+}
+#endif
+
+static void ep_fifo_free(struct mtu3_ep *mep)
+{
+ struct mtu3_fifo_info *fifo = mep->fifo;
+ u32 addr = mep->fifo_addr;
+ u32 bits = mep->fifo_size / MTU3_EP_FIFO_UNIT;
+ u32 start_bit;
+
+ if (unlikely(addr < fifo->base || bits > fifo->limit))
+ return;
+
+ start_bit = (addr - fifo->base) / MTU3_EP_FIFO_UNIT;
+ bitmap_clear(fifo->bitmap, start_bit, bits);
+ mep->fifo_size = 0;
+ mep->fifo_seg_size = 0;
+
+ dev_dbg(mep->mtu->dev, "%s size:%#x/%#x, start_bit: %d\n",
+ __func__, mep->fifo_seg_size, mep->fifo_size, start_bit);
+}
+
+/* enable/disable U3D SS function */
+static inline void mtu3_ss_func_set(struct mtu3 *mtu, bool enable)
+{
+ /* If usb3_en==0, LTSSM will go to SS.Disable state */
+ if (enable) {
+ /*
+ Temp Fix:
+ A60931 new DTB has true type-C connector.
+ Phy sends vbus_present and starts to swap lane.
+ It will take about 120ms to swap lane if needed.
+ Device shall wait lane swap and then enable U3 terminator.
+ */
+ if (mtu->ssusb->plat_type == PLAT_FPGA && mtu->ssusb->fpga_phy_workaround == A60931_WORKAROUND){
+ mdelay(180);
+ }
+ mtu3_setbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN);
+ }
+ else
+ mtu3_clrbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN);
+
+ dev_info(mtu->dev, "USB3_EN = %d\n", !!enable);
+}
+
+/* set/clear U3D HS device soft connect */
+static inline void mtu3_hs_softconn_set(struct mtu3 *mtu, bool enable)
+{
+ if (enable) {
+ mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT,
+ SOFT_CONN | SUSPENDM_ENABLE);
+ } else {
+ mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT,
+ SOFT_CONN | SUSPENDM_ENABLE);
+ }
+ dev_info(mtu->dev, "SOFTCONN = %d\n", !!enable);
+}
+
+/* only port0 of U2/U3 supports device mode */
+static int mtu3_device_enable(struct mtu3 *mtu)
+{
+ void __iomem *ibase = mtu->ippc_base;
+ u32 check_clk = 0;
+
+ mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+
+ if (mtu->is_u3_ip) {
+ check_clk = SSUSB_U3_MAC_RST_B_STS;
+ mtu3_clrbits(ibase, SSUSB_U3_CTRL(0),
+ (SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN |
+ SSUSB_U3_PORT_HOST_SEL));
+ }
+ mtu3_clrbits(ibase, SSUSB_U2_CTRL(0),
+ (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
+ SSUSB_U2_PORT_HOST_SEL));
+
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
+ mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+ if (mtu->is_u3_ip)
+ mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
+ SSUSB_U3_PORT_DUAL_MODE);
+ }
+
+ return ssusb_check_clocks(mtu->ssusb, check_clk);
+}
+
+static void mtu3_device_disable(struct mtu3 *mtu)
+{
+ void __iomem *ibase = mtu->ippc_base;
+
+ if (mtu->is_u3_ip)
+ mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
+ (SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN));
+
+ mtu3_setbits(ibase, SSUSB_U2_CTRL(0),
+ SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN);
+
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+ mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+
+ mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+}
+
+/* reset U3D's device module. */
+static void mtu3_device_reset(struct mtu3 *mtu)
+{
+ void __iomem *ibase = mtu->ippc_base;
+
+ mtu3_setbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST);
+ udelay(1);
+ mtu3_clrbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST);
+}
+
+/* disable all interrupts */
+static void mtu3_intr_disable(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+
+ /* Disable level 1 interrupts */
+ mtu3_writel(mbase, U3D_LV1IECR, ~0x0);
+ /* Disable endpoint interrupts */
+ mtu3_writel(mbase, U3D_EPIECR, ~0x0);
+}
+
+static void mtu3_intr_status_clear(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+
+ /* Clear EP0 and Tx/Rx EPn interrupts status */
+ mtu3_writel(mbase, U3D_EPISR, ~0x0);
+ /* Clear U2 USB common interrupts status */
+ mtu3_writel(mbase, U3D_COMMON_USB_INTR, ~0x0);
+ /* Clear U3 LTSSM interrupts status */
+ mtu3_writel(mbase, U3D_LTSSM_INTR, ~0x0);
+ /* Clear speed change interrupt status */
+ mtu3_writel(mbase, U3D_DEV_LINK_INTR, ~0x0);
+}
+
+/* enable system global interrupt */
+#ifndef CONFIG_MTU3_MD_USB_EP0_CTRL
+static void mtu3_intr_enable(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 value;
+
+ /*Enable level 1 interrupts (BMU, QMU, MAC3, DMA, MAC2, EPCTL) */
+ value = BMU_INTR | QMU_INTR | MAC3_INTR | MAC2_INTR | EP_CTRL_INTR;
+ mtu3_writel(mbase, U3D_LV1IESR, value);
+
+ /* Enable U2 common USB interrupts */
+ value = SUSPEND_INTR | RESUME_INTR | RESET_INTR | LPM_RESUME_INTR;
+ mtu3_writel(mbase, U3D_COMMON_USB_INTR_ENABLE, value);
+
+ if (mtu->is_u3_ip) {
+ /* Enable U3 LTSSM interrupts */
+ value = HOT_RST_INTR | WARM_RST_INTR |
+ ENTER_U3_INTR | EXIT_U3_INTR;
+ mtu3_writel(mbase, U3D_LTSSM_INTR_ENABLE, value);
+ }
+
+ /* Enable QMU interrupts. */
+ value = TXQ_CSERR_INT | TXQ_LENERR_INT | RXQ_CSERR_INT |
+ RXQ_LENERR_INT | RXQ_ZLPERR_INT;
+ mtu3_writel(mbase, U3D_QIESR1, value);
+
+ /* Enable speed change interrupt */
+ mtu3_writel(mbase, U3D_DEV_LINK_INTR_ENABLE, SSUSB_DEV_SPEED_CHG_INTR);
+}
+#else
+static void mtu3_intr_enable(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 value;
+
+ /*Enable level 1 interrupts (BMU, QMU, MAC3, DMA, MAC2, EPCTL) */
+ value = QMU_INTR;
+ mtu3_writel(mbase, U3D_LV1IESR, value);
+
+ /* Enable QMU interrupts. */
+ value = TXQ_CSERR_INT | TXQ_LENERR_INT | RXQ_CSERR_INT |
+ RXQ_LENERR_INT | RXQ_ZLPERR_INT;
+ mtu3_writel(mbase, U3D_QIESR1, value);
+}
+
+#endif
+
+/* reset: u2 - data toggle, u3 - SeqN, flow control status etc */
+static void mtu3_ep_reset(struct mtu3_ep *mep)
+{
+ struct mtu3 *mtu = mep->mtu;
+ u32 rst_bit = EP_RST(mep->is_in, mep->epnum);
+
+ mtu3_setbits(mtu->mac_base, U3D_EP_RST, rst_bit);
+ mtu3_clrbits(mtu->mac_base, U3D_EP_RST, rst_bit);
+}
+
+/* set/clear the stall and toggle bits for non-ep0 */
+void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set)
+{
+ struct mtu3 *mtu = mep->mtu;
+ void __iomem *mbase = mtu->mac_base;
+ u8 epnum = mep->epnum;
+ u32 csr;
+
+ if (mep->is_in) { /* TX */
+ csr = mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)) & TX_W1C_BITS;
+ if (set)
+ csr |= TX_SENDSTALL;
+ else
+ csr = (csr & (~TX_SENDSTALL)) | TX_SENTSTALL;
+ mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr);
+ } else { /* RX */
+ csr = mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)) & RX_W1C_BITS;
+ if (set)
+ csr |= RX_SENDSTALL;
+ else
+ csr = (csr & (~RX_SENDSTALL)) | RX_SENTSTALL;
+ mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr);
+ }
+
+ if (!set) {
+ mtu3_ep_reset(mep);
+ mep->flags &= ~MTU3_EP_STALL;
+ } else {
+ mep->flags |= MTU3_EP_STALL;
+ }
+
+ dev_dbg(mtu->dev, "%s: %s\n", mep->name,
+ set ? "SEND STALL" : "CLEAR STALL, with EP RESET");
+}
+
+void mtu3_dev_on_off(struct mtu3 *mtu, int is_on)
+{
+#ifndef CONFIG_MTU3_MD_USB_EP0_CTRL
+ if (mtu->is_u3_ip && mtu->max_speed >= USB_SPEED_SUPER)
+ mtu3_ss_func_set(mtu, is_on);
+ else
+ mtu3_hs_softconn_set(mtu, is_on);
+#endif
+
+ dev_info(mtu->dev, "gadget (%s) pullup D%s\n",
+ usb_speed_string(mtu->max_speed), is_on ? "+" : "-");
+}
+
+static void mtu3_set_speed(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+
+ if (!mtu->is_u3_ip && (mtu->max_speed > USB_SPEED_HIGH))
+ mtu->max_speed = USB_SPEED_HIGH;
+
+ if (mtu->max_speed == USB_SPEED_FULL) {
+ /* disable U3 SS function */
+ mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN);
+ /* disable HS function */
+ mtu3_clrbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+ } else if (mtu->max_speed == USB_SPEED_HIGH) {
+ mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN);
+ /* HS/FS detected by HW */
+ mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+ } else if (mtu->max_speed == USB_SPEED_SUPER) {
+ mtu3_clrbits(mtu->ippc_base, SSUSB_U3_CTRL(0),
+ SSUSB_U3_PORT_SSP_SPEED);
+ }
+
+ dev_info(mtu->dev, "max_speed: %s\n",
+ usb_speed_string(mtu->max_speed));
+}
+
+static void mtu3_regs_init(struct mtu3 *mtu)
+{
+
+ void __iomem *mbase = mtu->mac_base;
+ dev_info(mtu->dev, "%s\n", __func__);
+
+ /* be sure interrupts are disabled before registration of ISR */
+ mtu3_intr_disable(mtu);
+ mtu3_intr_status_clear(mtu);
+
+ if (mtu->is_u3_ip) {
+ /* disable LGO_U1/U2 by default */
+ mtu3_clrbits(mbase, U3D_LINK_POWER_CONTROL,
+ SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE);
+ /* enable accept LGO_U1/U2 link command from host */
+ mtu3_setbits(mbase, U3D_LINK_POWER_CONTROL,
+ SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE);
+ /* device responses to u3_exit from host automatically */
+ mtu3_clrbits(mbase, U3D_LTSSM_CTRL, SOFT_U3_EXIT_EN);
+ /* automatically build U2 link when U3 detect fail */
+ mtu3_setbits(mbase, U3D_USB2_TEST_MODE, U2U3_AUTO_SWITCH);
+ /* auto clear SOFT_CONN when clear USB3_EN if work as HS */
+ mtu3_setbits(mbase, U3D_U3U2_SWITCH_CTRL, SOFTCON_CLR_AUTO_EN);
+ }
+
+ mtu3_set_speed(mtu);
+
+ /* delay about 0.1us from detecting reset to send chirp-K */
+ mtu3_clrbits(mbase, U3D_LINK_RESET_INFO, WTCHRP_MSK);
+ /* U2/U3 detected by HW */
+ mtu3_writel(mbase, U3D_DEVICE_CONF, 0);
+ /* vbus detected by HW */
+ mtu3_clrbits(mbase, U3D_MISC_CTRL, VBUS_FRC_EN | VBUS_ON);
+ /* enable automatical HWRW from L1 */
+ mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, LPM_HRWE);
+
+ //ssusb_set_force_vbus(mtu->ssusb, true);
+ mtu3_writel(mbase, U3D_POWER_MANAGEMENT, (mtu3_readl(mbase, U3D_POWER_MANAGEMENT)| LPM_MODE(1)));
+ dev_info(mtu->dev, "%s, U3D_POWER_MANAGEMENT:0x%x\n", __func__, mtu3_readl(mbase, U3D_POWER_MANAGEMENT));
+ /* use new QMU format when HW version >= 0x1003 */
+ if (mtu->gen2cp)
+ mtu3_writel(mbase, U3D_QFCR, ~0x0);
+}
+
+static irqreturn_t mtu3_vbus_detect_eint_isr(int irq, void *data)
+{
+ struct mtu3 *mtu = data;
+
+ disable_irq_nosync(irq);
+
+ if (!gpio_get_value(mtu->detect_gpio)) {
+ dev_info(mtu->dev, "low-level, vbus plug out\n");
+ ssusb_set_force_vbus(mtu->ssusb, false);
+ irq_set_irq_type(irq, IRQF_TRIGGER_HIGH);
+ } else {
+ dev_info(mtu->dev, "high-level, vbus plug in\n");
+ ssusb_set_force_vbus(mtu->ssusb, true);
+ irq_set_irq_type(irq, IRQF_TRIGGER_LOW);
+ }
+ enable_irq(irq);
+ return IRQ_HANDLED;
+}
+
+static int mtu3_usb_vbus_detect_init(struct mtu3 *mtu)
+{
+ struct device_node *node;
+ int ret;
+
+ node = of_find_compatible_node(NULL, NULL,
+ "mediatek,mtu3");
+ if (!node) {
+ dev_err(mtu->dev, "map node failed\n");
+ return -ENODEV;
+ }
+
+ ret = of_get_named_gpio(node, "vbus_detect", 0);
+ if (ret < 0) {
+ dev_err(mtu->dev, "no vbus_detect gpio info\n");
+ return ret;
+ }
+ mtu->detect_gpio= ret;
+
+ ret = devm_gpio_request(mtu->dev, mtu->detect_gpio,
+ "mtu_vbus_detect");
+ if (ret < 0) {
+ dev_err(mtu->dev, "fail to request gpio%d (%d)\n",
+ mtu->detect_gpio, ret);
+ return ret;
+ }
+
+ ret = gpio_direction_input(mtu->detect_gpio);
+ if (ret < 0) {
+ dev_err(mtu->dev, "fail to set gpio%d as input pin (%d)\n",
+ mtu->detect_gpio, ret);
+ return ret;
+ }
+
+ /* 20 ms */
+ gpio_set_debounce(mtu->detect_gpio, 20*1000);
+ if (ret < 0) {
+ dev_err(mtu->dev, "fail to set gpio%d debounce (%d)\n",
+ mtu->detect_gpio, ret);
+ return ret;
+ }
+
+ mtu->detect_irq = gpio_to_irq(mtu->detect_gpio);
+ if (mtu->detect_irq <= 0) {
+ dev_err(mtu->dev, "gpio%d to irq fail, mtu_vbus_detect(%d)\n",
+ mtu->detect_irq, ret);
+ return ret;
+ }
+
+ ret = request_irq(mtu->detect_irq, mtu3_vbus_detect_eint_isr,
+ IRQF_TRIGGER_LOW, "mtu_detect_eint", mtu);
+ if (ret) {
+ dev_err(mtu->dev, "request eint(%d) fail (%d)\n",
+ mtu->detect_irq, ret);
+ return ret;
+ }
+ return 0;
+}
+
+void mtu3_start(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+
+ dev_dbg(mtu->dev, "%s devctl 0x%x\n", __func__,
+ mtu3_readl(mbase, U3D_DEVICE_CONTROL));
+
+ mtu3_clrbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+
+#ifndef CONFIG_MTU3_MD_USB_EP0_CTRL
+ /* Initialize the device capability */
+ mtu3_regs_init(mtu);
+#else
+ /*
+ * When disable U2 port, USB2_CSR's register will be reset to
+ * default value after re-enable it again(HS is enabled by default).
+ * So if force mac to work as FS, disable HS function.
+ */
+ if (mtu->max_speed == USB_SPEED_FULL)
+ mtu3_clrbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+#endif
+
+ /* Initialize the default interrupts */
+ mtu3_intr_enable(mtu);
+ mtu->is_active = 1;
+
+ if (mtu->softconnect)
+ mtu3_dev_on_off(mtu, 1);
+}
+
+void mtu3_stop(struct mtu3 *mtu)
+{
+ dev_dbg(mtu->dev, "%s\n", __func__);
+
+ mtu3_intr_disable(mtu);
+ mtu3_intr_status_clear(mtu);
+
+ if (mtu->softconnect)
+ mtu3_dev_on_off(mtu, 0);
+
+ mtu->is_active = 0;
+ mtu3_setbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+}
+
+/* for non-ep0 */
+#ifndef CONFIG_MTU3_MD_USB_EP0_CTRL
+int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
+ int interval, int burst, int mult)
+{
+ void __iomem *mbase = mtu->mac_base;
+ bool gen2cp = mtu->gen2cp;
+ int epnum = mep->epnum;
+ u32 csr0, csr1, csr2;
+ int fifo_sgsz, fifo_addr;
+ int num_pkts;
+
+ fifo_addr = ep_fifo_alloc(mep, mep->maxp);
+ if (fifo_addr < 0) {
+ dev_err(mtu->dev, "alloc ep fifo failed(%d)\n", mep->maxp);
+ return -ENOMEM;
+ }
+ fifo_sgsz = ilog2(mep->fifo_seg_size);
+ dev_dbg(mtu->dev, "%s fifosz: %x(%x/%x)\n", __func__, fifo_sgsz,
+ mep->fifo_seg_size, mep->fifo_size);
+
+ if (mep->is_in) {
+ csr0 = TX_TXMAXPKTSZ(mep->maxp);
+ csr0 |= TX_DMAREQEN;
+
+ num_pkts = (burst + 1) * (mult + 1) - 1;
+ csr1 = TX_SS_BURST(burst) | TX_SLOT(mep->slot);
+ csr1 |= TX_MAX_PKT(gen2cp, num_pkts) | TX_MULT(gen2cp, mult);
+
+ csr2 = TX_FIFOADDR(fifo_addr >> 4);
+ csr2 |= TX_FIFOSEGSIZE(fifo_sgsz);
+
+ switch (mep->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ csr1 |= TX_TYPE(TYPE_BULK);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ csr1 |= TX_TYPE(TYPE_ISO);
+ csr2 |= TX_BINTERVAL(interval);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ csr1 |= TX_TYPE(TYPE_INT);
+ csr2 |= TX_BINTERVAL(interval);
+ break;
+ }
+
+ /* Enable QMU Done interrupt */
+ mtu3_setbits(mbase, U3D_QIESR0, QMU_TX_DONE_INT(epnum));
+
+ mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr0);
+ mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), csr1);
+ mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), csr2);
+
+ dev_dbg(mtu->dev, "U3D_TX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n",
+ epnum, mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)),
+ mtu3_readl(mbase, MU3D_EP_TXCR1(epnum)),
+ mtu3_readl(mbase, MU3D_EP_TXCR2(epnum)));
+ } else {
+ csr0 = RX_RXMAXPKTSZ(mep->maxp);
+ csr0 |= RX_DMAREQEN;
+
+ num_pkts = (burst + 1) * (mult + 1) - 1;
+ csr1 = RX_SS_BURST(burst) | RX_SLOT(mep->slot);
+ csr1 |= RX_MAX_PKT(gen2cp, num_pkts) | RX_MULT(gen2cp, mult);
+
+ csr2 = RX_FIFOADDR(fifo_addr >> 4);
+ csr2 |= RX_FIFOSEGSIZE(fifo_sgsz);
+
+ switch (mep->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ csr1 |= RX_TYPE(TYPE_BULK);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ csr1 |= RX_TYPE(TYPE_ISO);
+ csr2 |= RX_BINTERVAL(interval);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ csr1 |= RX_TYPE(TYPE_INT);
+ csr2 |= RX_BINTERVAL(interval);
+ break;
+ }
+
+ /*Enable QMU Done interrupt */
+ mtu3_setbits(mbase, U3D_QIESR0, QMU_RX_DONE_INT(epnum));
+
+ mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr0);
+ mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), csr1);
+ mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), csr2);
+
+ dev_dbg(mtu->dev, "U3D_RX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n",
+ epnum, mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)),
+ mtu3_readl(mbase, MU3D_EP_RXCR1(epnum)),
+ mtu3_readl(mbase, MU3D_EP_RXCR2(epnum)));
+ }
+
+ dev_dbg(mtu->dev, "csr0:%#x, csr1:%#x, csr2:%#x\n", csr0, csr1, csr2);
+ dev_dbg(mtu->dev, "%s: %s, fifo-addr:%#x, fifo-size:%#x(%#x/%#x)\n",
+ __func__, mep->name, mep->fifo_addr, mep->fifo_size,
+ fifo_sgsz, mep->fifo_seg_size);
+
+ return 0;
+}
+#else
+int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
+ int interval, int burst, int mult)
+{
+ void __iomem *mbase = mtu->mac_base;
+ int epnum = mep->epnum;
+
+ if (mep->is_in)
+ /* Enable QMU Done interrupt */
+ mtu3_setbits(mbase, U3D_QIESR0, QMU_TX_DONE_INT(epnum));
+ else
+ /*Enable QMU Done interrupt */
+ mtu3_setbits(mbase, U3D_QIESR0, QMU_RX_DONE_INT(epnum));
+ return 0;
+}
+
+#endif
+
+/* for non-ep0 */
+void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep)
+{
+ void __iomem *mbase = mtu->mac_base;
+ int epnum = mep->epnum;
+
+ if (mep->is_in) {
+ mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), 0);
+ mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), 0);
+ mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), 0);
+ mtu3_setbits(mbase, U3D_QIECR0, QMU_TX_DONE_INT(epnum));
+ } else {
+ mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), 0);
+ mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), 0);
+ mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), 0);
+ mtu3_setbits(mbase, U3D_QIECR0, QMU_RX_DONE_INT(epnum));
+ }
+
+ mtu3_ep_reset(mep);
+ ep_fifo_free(mep);
+
+ dev_dbg(mtu->dev, "%s: %s\n", __func__, mep->name);
+}
+
+/*
+ * Two scenarios:
+ * 1. when device IP supports SS, the fifo of EP0, TX EPs, RX EPs
+ * are separated;
+ * 2. when supports only HS, the fifo is shared for all EPs, and
+ * the capability registers of @EPNTXFFSZ or @EPNRXFFSZ indicate
+ * the total fifo size of non-ep0, and ep0's is fixed to 64B,
+ * so the total fifo size is 64B + @EPNTXFFSZ;
+ * Due to the first 64B should be reserved for EP0, non-ep0's fifo
+ * starts from offset 64 and are divided into two equal parts for
+ * TX or RX EPs for simplification.
+ */
+static void get_ep_fifo_config(struct mtu3 *mtu)
+{
+ struct mtu3_fifo_info *tx_fifo;
+ struct mtu3_fifo_info *rx_fifo;
+ u32 fifosize;
+
+ if (mtu->is_u3_ip) {
+ fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ);
+ tx_fifo = &mtu->tx_fifo;
+ tx_fifo->base = 0;
+ tx_fifo->limit = fifosize / MTU3_EP_FIFO_UNIT;
+ bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+
+ fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNRXFFSZ);
+ rx_fifo = &mtu->rx_fifo;
+ rx_fifo->base = 0;
+ rx_fifo->limit = fifosize / MTU3_EP_FIFO_UNIT;
+ bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+ mtu->slot = MTU3_U3_IP_SLOT_DEFAULT;
+ } else {
+ fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ);
+ tx_fifo = &mtu->tx_fifo;
+ tx_fifo->base = MTU3_U2_IP_EP0_FIFO_SIZE;
+ tx_fifo->limit = (fifosize / MTU3_EP_FIFO_UNIT) >> 1;
+ bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+
+ rx_fifo = &mtu->rx_fifo;
+ rx_fifo->base =
+ tx_fifo->base + tx_fifo->limit * MTU3_EP_FIFO_UNIT;
+ rx_fifo->limit = tx_fifo->limit;
+ bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+ mtu->slot = MTU3_U2_IP_SLOT_DEFAULT;
+ }
+
+ dev_dbg(mtu->dev, "%s, TX: base-%d, limit-%d; RX: base-%d, limit-%d\n",
+ __func__, tx_fifo->base, tx_fifo->limit,
+ rx_fifo->base, rx_fifo->limit);
+}
+
+void mtu3_ep0_setup(struct mtu3 *mtu)
+{
+ u32 maxpacket = mtu->g.ep0->maxpacket;
+ u32 csr;
+
+ dev_dbg(mtu->dev, "%s maxpacket: %d\n", __func__, maxpacket);
+
+ csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR);
+ csr &= ~EP0_MAXPKTSZ_MSK;
+ csr |= EP0_MAXPKTSZ(maxpacket);
+ csr &= EP0_W1C_BITS;
+ mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
+
+ /* Enable EP0 interrupt */
+ mtu3_writel(mtu->mac_base, U3D_EPIESR, EP0ISR | SETUPENDISR);
+}
+
+static int mtu3_mem_alloc(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ struct mtu3_ep *ep_array;
+ int in_ep_num, out_ep_num;
+ u32 cap_epinfo;
+ int ret;
+ int i;
+
+ cap_epinfo = mtu3_readl(mbase, U3D_CAP_EPINFO);
+ in_ep_num = CAP_TX_EP_NUM(cap_epinfo);
+ out_ep_num = CAP_RX_EP_NUM(cap_epinfo);
+
+ dev_info(mtu->dev, "fifosz/epnum: Tx=%#x/%d, Rx=%#x/%d\n",
+ mtu3_readl(mbase, U3D_CAP_EPNTXFFSZ), in_ep_num,
+ mtu3_readl(mbase, U3D_CAP_EPNRXFFSZ), out_ep_num);
+
+ /* one for ep0, another is reserved */
+ mtu->num_eps = min(in_ep_num, out_ep_num) + 1;
+ ep_array = kcalloc(mtu->num_eps * 2, sizeof(*ep_array), GFP_KERNEL);
+ if (ep_array == NULL)
+ return -ENOMEM;
+
+ mtu->ep_array = ep_array;
+ mtu->in_eps = ep_array;
+ mtu->out_eps = &ep_array[mtu->num_eps];
+ /* ep0 uses in_eps[0], out_eps[0] is reserved */
+ mtu->ep0 = mtu->in_eps;
+ mtu->ep0->mtu = mtu;
+ mtu->ep0->epnum = 0;
+
+ for (i = 1; i < mtu->num_eps; i++) {
+ struct mtu3_ep *mep = mtu->in_eps + i;
+
+ mep->fifo = &mtu->tx_fifo;
+ mep = mtu->out_eps + i;
+ mep->fifo = &mtu->rx_fifo;
+ }
+
+ get_ep_fifo_config(mtu);
+
+ ret = mtu3_qmu_init(mtu);
+ if (ret)
+ kfree(mtu->ep_array);
+
+ return ret;
+}
+
+static void mtu3_mem_free(struct mtu3 *mtu)
+{
+ mtu3_qmu_exit(mtu);
+ kfree(mtu->ep_array);
+}
+
+#ifndef CONFIG_MTU3_MD_USB_EP0_CTRL
+static irqreturn_t mtu3_link_isr(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ enum usb_device_speed udev_speed;
+ u32 maxpkt = 64;
+ u32 link;
+ u32 speed;
+
+ link = mtu3_readl(mbase, U3D_DEV_LINK_INTR);
+ link &= mtu3_readl(mbase, U3D_DEV_LINK_INTR_ENABLE);
+ mtu3_writel(mbase, U3D_DEV_LINK_INTR, link); /* W1C */
+ dev_info(mtu->dev, "=== LINK[%x] ===\n", link);
+
+ if (!(link & SSUSB_DEV_SPEED_CHG_INTR))
+ return IRQ_NONE;
+
+ speed = SSUSB_DEV_SPEED(mtu3_readl(mbase, U3D_DEVICE_CONF));
+
+ switch (speed) {
+ case MTU3_SPEED_FULL:
+ udev_speed = USB_SPEED_FULL;
+ /*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */
+ mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf)
+ | LPM_BESLCK(4) | LPM_BESLCK_U3(0xa));
+ mtu3_setbits(mbase, U3D_POWER_MANAGEMENT,
+ LPM_BESL_STALL | LPM_BESLD_STALL);
+ break;
+ case MTU3_SPEED_HIGH:
+ udev_speed = USB_SPEED_HIGH;
+ /*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */
+ mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf)
+ | LPM_BESLCK(4) | LPM_BESLCK_U3(0xa));
+ mtu3_setbits(mbase, U3D_POWER_MANAGEMENT,
+ LPM_BESL_STALL | LPM_BESLD_STALL);
+ break;
+ case MTU3_SPEED_SUPER:
+ udev_speed = USB_SPEED_SUPER;
+ maxpkt = 512;
+ break;
+ case MTU3_SPEED_SUPER_PLUS:
+ udev_speed = USB_SPEED_SUPER_PLUS;
+ maxpkt = 512;
+ break;
+ default:
+ udev_speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+ dev_dbg(mtu->dev, "%s: %s\n", __func__, usb_speed_string(udev_speed));
+ mtu3_dbg_trace(mtu->dev, "link speed %s",
+ usb_speed_string(udev_speed));
+
+ mtu->g.speed = udev_speed;
+ mtu->g.ep0->maxpacket = maxpkt;
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+
+ if (udev_speed == USB_SPEED_UNKNOWN)
+ mtu3_gadget_disconnect(mtu);
+ else
+ mtu3_ep0_setup(mtu);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtu3_u3_ltssm_isr(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 ltssm;
+
+ ltssm = mtu3_readl(mbase, U3D_LTSSM_INTR);
+ ltssm &= mtu3_readl(mbase, U3D_LTSSM_INTR_ENABLE);
+ mtu3_writel(mbase, U3D_LTSSM_INTR, ltssm); /* W1C */
+ dev_info(mtu->dev, "=== LTSSM[%x] ===\n", ltssm);
+ trace_mtu3_u3_ltssm_isr(ltssm);
+
+ if (ltssm & (HOT_RST_INTR | WARM_RST_INTR))
+ mtu3_gadget_reset(mtu);
+
+ if (ltssm & VBUS_FALL_INTR) {
+ mtu3_ss_func_set(mtu, false);
+ mtu3_gadget_reset(mtu);
+ }
+
+ if (ltssm & VBUS_RISE_INTR)
+ mtu3_ss_func_set(mtu, true);
+
+ if (ltssm & EXIT_U3_INTR)
+ mtu3_gadget_resume(mtu);
+
+ if (ltssm & ENTER_U3_INTR)
+ mtu3_gadget_suspend(mtu);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtu3_u2_common_isr(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 u2comm;
+
+ u2comm = mtu3_readl(mbase, U3D_COMMON_USB_INTR);
+ u2comm &= mtu3_readl(mbase, U3D_COMMON_USB_INTR_ENABLE);
+ mtu3_writel(mbase, U3D_COMMON_USB_INTR, u2comm); /* W1C */
+ dev_info(mtu->dev, "=== U2COMM[%x] ===\n", u2comm);
+ trace_mtu3_u2_common_isr(u2comm);
+
+ if (u2comm & SUSPEND_INTR)
+ mtu3_gadget_suspend(mtu);
+
+ if (u2comm & RESUME_INTR)
+ mtu3_gadget_resume(mtu);
+
+ if (u2comm & RESET_INTR)
+ mtu3_gadget_reset(mtu);
+
+ if (u2comm & LPM_RESUME_INTR) {
+ if (!(mtu3_readl(mbase, U3D_POWER_MANAGEMENT) & LPM_HRWE))
+ mtu3_setbits(mbase, U3D_USB20_MISC_CONTROL, LPM_U3_ACK_EN);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtu3_irq(int irq, void *data)
+{
+ struct mtu3 *mtu = (struct mtu3 *)data;
+ unsigned long flags;
+ u32 level1;
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+#ifndef CONFIG_MTU3_MD_USB_EP0_CTRL
+ if (total_isr_debug)
+ total_isr++;
+#endif
+ /* U3D_LV1ISR is RU */
+ level1 = mtu3_readl(mtu->mac_base, U3D_LV1ISR);
+ level1 &= mtu3_readl(mtu->mac_base, U3D_LV1IER);
+//zhengzhou 0318 usb otg */
+ /*if (unlikely(!mtu->softconnect) && (level1 & MAC2_INTR)) {
+ dev_info(mtu->dev, "%s !softconnect MAC2_INTR\n", __func__);
+ u2comm = mtu3_readl(mtu->mac_base, U3D_COMMON_USB_INTR);
+ u2comm &= mtu3_readl(mtu->mac_base, U3D_COMMON_USB_INTR_ENABLE);
+ mtu3_writel(mtu->mac_base, U3D_COMMON_USB_INTR, u2comm);
+ goto done;
+ }
+
+ if (unlikely(!mtu->softconnect) && (level1 & BMU_INTR)) {
+ dev_info(mtu->dev, "%s !softconnect BMU_INTR\n", __func__);
+ int_status = mtu3_readl(mtu->mac_base, U3D_EPISR);
+ int_status &= mtu3_readl(mtu->mac_base, U3D_EPIER);
+ mtu3_writel(mtu->mac_base, U3D_EPISR, int_status);
+ goto done;
+ }
+ */
+//zhengzhou 0318 usb otg */
+ if (level1 & EP_CTRL_INTR)
+ mtu3_link_isr(mtu);
+
+ if (level1 & MAC2_INTR)
+ mtu3_u2_common_isr(mtu);
+
+ if (level1 & MAC3_INTR)
+ mtu3_u3_ltssm_isr(mtu);
+
+ if (level1 & BMU_INTR)
+ mtu3_ep0_isr(mtu);
+
+ if (level1 & QMU_INTR) {
+#ifndef CONFIG_MTU3_MD_USB_EP0_CTRL
+ if (total_isr_debug)
+ total_isr--;
+#endif
+ mtu3_qmu_isr(mtu);
+ }
+
+//done://zhengzhou 0318 usb otg */
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return IRQ_HANDLED;
+}
+#else
+static irqreturn_t mtu3_irq(int irq, void *data)
+{
+ struct mtu3 *mtu = (struct mtu3 *)data;
+ unsigned long flags;
+ u32 level1;
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ /* U3D_LV1ISR is RU */
+ level1 = mtu3_readl(mtu->mac_base, U3D_LV1ISR);
+ level1 &= mtu3_readl(mtu->mac_base, U3D_LV1IER);
+
+ if (level1 & QMU_INTR)
+ mtu3_qmu_isr(mtu);
+
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+#endif
+
+static int mtu3_hw_init(struct mtu3 *mtu)
+{
+ u32 value;
+ int ret;
+
+ value = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_TRUNK_VERS);
+ mtu->hw_version = IP_TRUNK_VERS(value);
+ #if 0
+ mtu->gen2cp = !!(mtu->hw_version >= MTU3_TRUNK_VERS_1003);
+ #else
+ mtu->gen2cp = 0;
+ #endif
+
+ value = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_DEV_CAP);
+ mtu->is_u3_ip = !!SSUSB_IP_DEV_U3_PORT_NUM(value);
+
+ dev_info(mtu->dev, "IP version 0x%x(%s IP)\n", mtu->hw_version,
+ mtu->is_u3_ip ? "U3" : "U2");
+
+ mtu3_device_reset(mtu);
+
+ ret = mtu3_device_enable(mtu);
+ if (ret) {
+ dev_err(mtu->dev, "device enable failed %d\n", ret);
+ return ret;
+ }
+
+ ret = mtu3_mem_alloc(mtu);
+ if (ret)
+ return -ENOMEM;
+
+ mtu3_regs_init(mtu);
+
+ return 0;
+}
+
+static void mtu3_hw_exit(struct mtu3 *mtu)
+{
+ mtu3_device_disable(mtu);
+ mtu3_mem_free(mtu);
+}
+
+/**
+ * we set 32-bit DMA mask by default, here check whether the controller
+ * supports 36-bit DMA or not, if it does, set 36-bit DMA mask.
+ */
+static int mtu3_set_dma_mask(struct mtu3 *mtu)
+{
+ struct device *dev = mtu->dev;
+ bool is_36bit = false;
+ int ret = 0;
+ u32 value;
+
+ value = mtu3_readl(mtu->mac_base, U3D_MISC_CTRL);
+ if (value & DMA_ADDR_36BIT) {
+ is_36bit = true;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+ /* If set 36-bit DMA mask fails, fall back to 32-bit DMA mask */
+ if (ret) {
+ is_36bit = false;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ }
+ }
+ dev_info(dev, "dma mask: %s bits\n", is_36bit ? "36" : "32");
+
+ return ret;
+}
+
+int ssusb_gadget_init(struct ssusb_mtk *ssusb)
+{
+ struct device *dev = ssusb->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtu3 *mtu = NULL;
+ struct resource *res;
+ int ret = -ENOMEM;
+
+ mtu = devm_kzalloc(dev, sizeof(struct mtu3), GFP_KERNEL);
+ if (mtu == NULL)
+ return -ENOMEM;
+
+ mtu->irq = platform_get_irq(pdev, 0);
+ if (mtu->irq < 0) {
+ dev_err(dev, "fail to get irq number\n");
+ return mtu->irq;
+ }
+ dev_info(dev, "irq %d\n", mtu->irq);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac");
+ mtu->mac_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mtu->mac_base)) {
+ dev_err(dev, "error mapping memory for dev mac\n");
+ return PTR_ERR(mtu->mac_base);
+ }
+
+ spin_lock_init(&mtu->lock);
+ mtu->dev = dev;
+ mtu->ippc_base = ssusb->ippc_base;
+ ssusb->mac_base = mtu->mac_base;
+ ssusb->u3d = mtu;
+ mtu->ssusb = ssusb;
+ mtu->max_speed = usb_get_maximum_speed(dev);
+
+ #ifdef CONFIG_MTU3_MD_USB_EP0_CTRL
+ INIT_DELAYED_WORK(&mtu->forward_to_driver_work, mtu3_forward_to_driver_work);
+ mtu->md_sync_data = devm_kzalloc(dev, sizeof(struct mtu3_md_sync_data), GFP_KERNEL);
+ #endif
+ /* check the max_speed parameter */
+ switch (mtu->max_speed) {
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
+ break;
+ default:
+ dev_err(dev, "invalid max_speed: %s\n",
+ usb_speed_string(mtu->max_speed));
+ /* fall through */
+ case USB_SPEED_UNKNOWN:
+ /* default as SSP */
+ mtu->max_speed = USB_SPEED_SUPER_PLUS;
+ break;
+ }
+
+ dev_dbg(dev, "mac_base=0x%p, ippc_base=0x%p\n",
+ mtu->mac_base, mtu->ippc_base);
+
+ ret = mtu3_hw_init(mtu);
+ if (ret) {
+ dev_err(dev, "mtu3 hw init failed:%d\n", ret);
+ return ret;
+ }
+
+ ret = mtu3_set_dma_mask(mtu);
+ if (ret) {
+ dev_err(dev, "mtu3 set dma_mask failed:%d\n", ret);
+ goto dma_mask_err;
+ }
+
+ ret = devm_request_irq(dev, mtu->irq, mtu3_irq, 0, dev_name(dev), mtu);
+ if (ret) {
+ dev_err(dev, "request irq %d failed!\n", mtu->irq);
+ goto irq_err;
+ }
+
+ device_init_wakeup(dev, true);
+
+ ret = mtu3_gadget_setup(mtu);
+ if (ret) {
+ dev_err(dev, "mtu3 gadget init failed:%d\n", ret);
+ goto gadget_err;
+ }
+ mtu3_usb_vbus_detect_init(mtu);
+ /* init as host mode, power down device IP for power saving */
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+ mtu3_stop(mtu);
+
+ ssusb_dev_debugfs_init(ssusb);
+
+ dev_dbg(dev, " %s() done...\n", __func__);
+
+ return 0;
+
+gadget_err:
+ device_init_wakeup(dev, false);
+
+dma_mask_err:
+irq_err:
+ mtu3_hw_exit(mtu);
+ ssusb->u3d = NULL;
+ dev_err(dev, " %s() fail...\n", __func__);
+
+ return ret;
+}
+
+void ssusb_gadget_exit(struct ssusb_mtk *ssusb)
+{
+ struct mtu3 *mtu = ssusb->u3d;
+
+ mtu3_gadget_cleanup(mtu);
+ device_init_wakeup(ssusb->dev, false);
+ mtu3_hw_exit(mtu);
+}
diff --git a/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_dr.c b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_dr.c
old mode 100644
new mode 100755
index 50342e0..3a3ef38
--- a/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_dr.c
+++ b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_dr.c
@@ -42,9 +42,12 @@
static void toggle_opstate(struct ssusb_mtk *ssusb)
{
+ dev_info(ssusb->dev, "%s\n", __func__);
//if (!ssusb->otg_switch.is_u3_drd) {//zhengzhou 0318 usb otg */
+ if (!ssusb->otg_switch.is_u3_drd) {
mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
+ }
//}
}
diff --git a/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_dr_1.c b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_dr_1.c
new file mode 100755
index 0000000..3a3ef38
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_dr_1.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mtu3_dr.c - dual role switch and host glue layer
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
+#include <linux/usb/role.h>
+#include <linux/of_platform.h>
+
+#include "mtu3.h"
+#include "mtu3_dr.h"
+#include "mtu3_debug.h"
+
+#define USB2_PORT 2
+#define USB3_PORT 3
+
+enum mtu3_vbus_id_state {
+ MTU3_ID_FLOAT = 1,
+ MTU3_ID_GROUND,
+ MTU3_VBUS_OFF,
+ MTU3_VBUS_VALID,
+};
+
+static char *mailbox_state_string(enum mtu3_vbus_id_state state)
+{
+ switch (state) {
+ case MTU3_ID_FLOAT:
+ return "ID_FLOAT";
+ case MTU3_ID_GROUND:
+ return "ID_GROUND";
+ case MTU3_VBUS_OFF:
+ return "VBUS_OFF";
+ case MTU3_VBUS_VALID:
+ return "VBUS_VALID";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void toggle_opstate(struct ssusb_mtk *ssusb)
+{
+ dev_info(ssusb->dev, "%s\n", __func__);
+ //if (!ssusb->otg_switch.is_u3_drd) {//zhengzhou 0318 usb otg */
+ if (!ssusb->otg_switch.is_u3_drd) {
+ mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
+ mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
+ }
+ //}
+}
+
+/* only port0 supports dual-role mode */
+static int ssusb_port0_switch(struct ssusb_mtk *ssusb,
+ int version, bool tohost)
+{
+ void __iomem *ibase = ssusb->ippc_base;
+ u32 value;
+
+ dev_dbg(ssusb->dev, "%s (switch u%d port0 to %s)\n", __func__,
+ version, tohost ? "host" : "device");
+
+ if (version == USB2_PORT) {
+ /* 1. power off and disable u2 port0 */
+ value = mtu3_readl(ibase, SSUSB_U2_CTRL(0));
+ value |= SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS;
+ mtu3_writel(ibase, SSUSB_U2_CTRL(0), value);
+
+ /* 2. power on, enable u2 port0 and select its mode */
+ value = mtu3_readl(ibase, SSUSB_U2_CTRL(0));
+ value &= ~(SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS);
+ value = tohost ? (value | SSUSB_U2_PORT_HOST_SEL) :
+ (value & (~SSUSB_U2_PORT_HOST_SEL));
+ mtu3_writel(ibase, SSUSB_U2_CTRL(0), value);
+ } else {
+ /* 1. power off and disable u3 port0 */
+ value = mtu3_readl(ibase, SSUSB_U3_CTRL(0));
+ value |= SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS;
+ mtu3_writel(ibase, SSUSB_U3_CTRL(0), value);
+
+ /* 2. power on, enable u3 port0 and select its mode */
+ value = mtu3_readl(ibase, SSUSB_U3_CTRL(0));
+ value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS);
+ value = tohost ? (value | SSUSB_U3_PORT_HOST_SEL) :
+ (value & (~SSUSB_U3_PORT_HOST_SEL));
+ mtu3_writel(ibase, SSUSB_U3_CTRL(0), value);
+ }
+
+ return 0;
+}
+
+static void ssusb_ip_sleep(struct ssusb_mtk *ssusb)
+{
+ void __iomem *ibase = ssusb->ippc_base;
+
+ /* Set below sequence to avoid power leakage */
+ mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
+ (SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN));
+ mtu3_setbits(ibase, SSUSB_U2_CTRL(0),
+ SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN);
+ mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+ mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN);
+ mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+ udelay(50);
+ mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST);
+}
+
+static void switch_port_to_on(struct ssusb_mtk *ssusb, bool on)
+{
+ dev_dbg(ssusb->dev, "%s\n", __func__);
+
+ if (on) {
+ //ssusb_clks_enable(ssusb);//zhengzhou 0318 usb otg */
+ ssusb_phy_power_on(ssusb);
+ ssusb_ip_sw_reset(ssusb);
+ } else {
+ ssusb_ip_sleep(ssusb);
+ ssusb_phy_power_off(ssusb);
+ //ssusb_clks_disable(ssusb);//zhengzhou 0318 usb otg */
+ }
+}
+
+static void switch_port_to_host(struct ssusb_mtk *ssusb)
+{
+ u32 check_clk = 0;
+
+ dev_dbg(ssusb->dev, "%s\n", __func__);
+
+ ssusb_port0_switch(ssusb, USB2_PORT, true);
+
+ if (ssusb->otg_switch.is_u3_drd) {
+ ssusb_port0_switch(ssusb, USB3_PORT, true);
+ check_clk = SSUSB_U3_MAC_RST_B_STS;
+ }
+
+ ssusb_check_clocks(ssusb, check_clk);
+
+ /* after all clocks are stable */
+ toggle_opstate(ssusb);
+}
+
+static void switch_port_to_device(struct ssusb_mtk *ssusb)
+{
+ u32 check_clk = 0;
+
+ dev_info(ssusb->dev, "%s\n", __func__);
+
+ ssusb_port0_switch(ssusb, USB2_PORT, false);
+
+ if (ssusb->otg_switch.is_u3_drd) {
+ ssusb_port0_switch(ssusb, USB3_PORT, false);
+ check_clk = SSUSB_U3_MAC_RST_B_STS;
+ }
+
+ ssusb_check_clocks(ssusb, check_clk);
+}
+
+int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
+{
+ struct ssusb_mtk *ssusb =
+ container_of(otg_sx, struct ssusb_mtk, otg_switch);
+ struct regulator *vbus = otg_sx->vbus;
+ int ret;
+
+ /* vbus is optional */
+ if (!vbus)
+ return 0;
+
+ dev_dbg(ssusb->dev, "%s: turn %s\n", __func__, is_on ? "on" : "off");
+
+ if (is_on) {
+ ret = regulator_enable(vbus);
+ if (ret) {
+ dev_err(ssusb->dev, "vbus regulator enable failed\n");
+ return ret;
+ }
+ } else {
+ regulator_disable(vbus);
+ }
+
+ return 0;
+}
+
+static void ssusb_gadget_disconnect(struct mtu3 *mtu)
+{
+ /* notify gadget driver */
+ if (mtu->g.speed == USB_SPEED_UNKNOWN)
+ return;
+
+ if (mtu->gadget_driver && mtu->gadget_driver->disconnect) {
+ mtu->gadget_driver->disconnect(&mtu->g);
+ mtu->g.speed = USB_SPEED_UNKNOWN;
+ }
+
+ usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED);
+}
+
+/*
+ * switch to host: -> MTU3_VBUS_OFF --> MTU3_ID_GROUND
+ * switch to device: -> MTU3_ID_FLOAT --> MTU3_VBUS_VALID
+ */
+static void ssusb_set_mailbox(struct otg_switch_mtk *otg_sx,
+ enum mtu3_vbus_id_state status)
+{
+ struct ssusb_mtk *ssusb =
+ container_of(otg_sx, struct ssusb_mtk, otg_switch);
+ struct mtu3 *mtu = ssusb->u3d;
+ unsigned long flags;
+
+ dev_info(ssusb->dev, "mailbox %s\n", mailbox_state_string(status));
+ mtu3_dbg_trace(ssusb->dev, "mailbox %s", mailbox_state_string(status));
+
+ switch (status) {
+ case MTU3_ID_GROUND:
+ switch_port_to_host(ssusb);
+ ssusb_set_vbus(otg_sx, 1);
+ ssusb->is_host = true;
+ otg_sx->sw_state |= MTU3_SW_ID_GROUND;
+ break;
+ case MTU3_ID_FLOAT:
+ ssusb->is_host = false;
+ ssusb_set_vbus(otg_sx, 0);
+ switch_port_to_device(ssusb);
+ otg_sx->sw_state &= ~MTU3_SW_ID_GROUND;
+ break;
+ case MTU3_VBUS_OFF:
+ /* killing any outstanding requests */
+ spin_lock_irqsave(&mtu->lock, flags);
+ mtu3_nuke_all_ep(mtu);
+ spin_unlock_irqrestore(&mtu->lock, flags);
+ mtu3_stop(mtu);
+ pm_relax(ssusb->dev);
+ //ssusb_set_force_vbus(ssusb, false);//zhengzhou 0318 usb otg */
+ ssusb_gadget_disconnect(mtu);
+ otg_sx->sw_state &= ~MTU3_SW_VBUS_VALID;
+ break;
+ case MTU3_VBUS_VALID:
+ //ssusb_set_force_vbus(ssusb, true);//zhengzhou 0318 usb otg */
+ /* avoid suspend when works as device */
+ pm_stay_awake(ssusb->dev);
+ mtu3_start(mtu);
+ otg_sx->sw_state |= MTU3_SW_VBUS_VALID;
+ break;
+ default:
+ dev_err(ssusb->dev, "invalid state\n");
+ }
+}
+
+static void ssusb_id_work(struct work_struct *work)
+{
+ struct otg_switch_mtk *otg_sx =
+ container_of(work, struct otg_switch_mtk, id_work);
+ //zhengzhou 0318 usb otg start*/
+ struct ssusb_mtk *ssusb =
+ container_of(otg_sx, struct ssusb_mtk, otg_switch);
+
+ if (otg_sx->id_event){
+ mdelay(500);
+ pm_stay_awake(ssusb->dev);
+ switch_port_to_on(ssusb, true);
+ ssusb_host_enable(ssusb);
+ /* register host driver */
+ of_platform_populate(ssusb->dev->of_node,
+ NULL, NULL, ssusb->dev);
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
+ ssusb_set_mailbox(otg_sx, MTU3_ID_GROUND);
+ }
+ else {
+ /* FIXME: debounce time for xhci finish disconnection */
+ //mdelay(200);
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_DEVICE);
+ of_platform_depopulate(ssusb->dev);
+ ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
+ switch_port_to_on(ssusb, false);
+ pm_relax(ssusb->dev);
+ }
+ //zhengzhou 0318 usb otg end*/
+}
+
+static void ssusb_vbus_work(struct work_struct *work)
+{
+ struct otg_switch_mtk *otg_sx =
+ container_of(work, struct otg_switch_mtk, vbus_work);
+ //zhengzhou 0318 usb otg start*/
+ struct ssusb_mtk *ssusb =
+ container_of(otg_sx, struct ssusb_mtk, otg_switch);
+
+ if (otg_sx->vbus_event) {
+ msleep(500);
+ switch_port_to_on(ssusb, true);
+ switch_port_to_device(ssusb);
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
+ }
+ else {
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_OFF);
+ switch_port_to_on(ssusb, false);
+ }
+ //zhengzhou 0318 usb otg end*/
+}
+
+/*
+ * @ssusb_id_notifier is called in atomic context, but @ssusb_set_mailbox
+ * may sleep, so use work queue here
+ */
+static int ssusb_id_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct otg_switch_mtk *otg_sx =
+ container_of(nb, struct otg_switch_mtk, id_nb);
+
+ otg_sx->id_event = event;
+ schedule_work(&otg_sx->id_work);
+
+ return NOTIFY_DONE;
+}
+
+static int ssusb_vbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct otg_switch_mtk *otg_sx =
+ container_of(nb, struct otg_switch_mtk, vbus_nb);
+
+ otg_sx->vbus_event = event;
+ schedule_work(&otg_sx->vbus_work);
+
+ return NOTIFY_DONE;
+}
+
+static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx)
+{
+ struct ssusb_mtk *ssusb =
+ container_of(otg_sx, struct ssusb_mtk, otg_switch);
+ struct extcon_dev *edev = otg_sx->edev;
+ int ret;
+
+ /* extcon is optional */
+ if (!edev)
+ return 0;
+
+ otg_sx->vbus_nb.notifier_call = ssusb_vbus_notifier;
+ ret = devm_extcon_register_notifier(ssusb->dev, edev, EXTCON_USB,
+ &otg_sx->vbus_nb);
+ if (ret < 0) {
+ dev_err(ssusb->dev, "failed to register notifier for USB\n");
+ return ret;
+ }
+
+ otg_sx->id_nb.notifier_call = ssusb_id_notifier;
+ ret = devm_extcon_register_notifier(ssusb->dev, edev, EXTCON_USB_HOST,
+ &otg_sx->id_nb);
+ if (ret < 0) {
+ dev_err(ssusb->dev, "failed to register notifier for USB-HOST\n");
+ return ret;
+ }
+
+ dev_dbg(ssusb->dev, "EXTCON_USB: %d, EXTCON_USB_HOST: %d\n",
+ extcon_get_state(edev, EXTCON_USB),
+ extcon_get_state(edev, EXTCON_USB_HOST));
+
+ /* default as host, switch to device mode if needed */
+ if (extcon_get_state(edev, EXTCON_USB_HOST) == false) {
+ of_platform_depopulate(ssusb->dev); //zhengzhou 0318 usb otg */
+ ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
+ }
+ if (extcon_get_state(edev, EXTCON_USB) == true)
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
+
+ return 0;
+}
+
+/*
+ * We provide an interface via debugfs to switch between host and device modes
+ * depending on user input.
+ * This is useful in special cases, such as uses TYPE-A receptacle but also
+ * wants to support dual-role mode.
+ */
+void ssusb_mode_switch(struct ssusb_mtk *ssusb, int to_host)
+{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+ if (to_host) {
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_OFF);
+ ssusb_set_mailbox(otg_sx, MTU3_ID_GROUND);
+ } else {
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_DEVICE);
+ ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
+ }
+}
+
+void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
+ enum mtu3_dr_force_mode mode)
+{
+ u32 value;
+
+ value = mtu3_readl(ssusb->ippc_base, SSUSB_U2_CTRL(0));
+ switch (mode) {
+ case MTU3_DR_FORCE_DEVICE:
+ value |= SSUSB_U2_PORT_FORCE_IDDIG | SSUSB_U2_PORT_RG_IDDIG;
+ break;
+ case MTU3_DR_FORCE_HOST:
+ value |= SSUSB_U2_PORT_FORCE_IDDIG;
+ value &= ~SSUSB_U2_PORT_RG_IDDIG;
+ break;
+ case MTU3_DR_FORCE_NONE:
+ value &= ~(SSUSB_U2_PORT_FORCE_IDDIG | SSUSB_U2_PORT_RG_IDDIG);
+ break;
+ default:
+ return;
+ }
+ mtu3_writel(ssusb->ippc_base, SSUSB_U2_CTRL(0), value);
+}
+
+static int ssusb_role_sw_set(struct device *dev, enum usb_role role)
+{
+ struct ssusb_mtk *ssusb = dev_get_drvdata(dev);
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+ bool id_event, vbus_event;
+
+ dev_info(dev, "role_sw_set role %d\n", role);
+
+ otg_sx->latest_role = role;
+
+ if (otg_sx->op_mode != MTU3_DR_OPERATION_NORMAL) {
+ dev_info(dev, "op_mode %d, skip set role\n", otg_sx->op_mode);
+ return 0;
+ }
+
+ id_event = (role == USB_ROLE_HOST);
+ vbus_event = (role == USB_ROLE_DEVICE);
+
+ if (!!(otg_sx->sw_state & MTU3_SW_VBUS_VALID) ^ vbus_event) {
+ if (vbus_event) {
+ if (ssusb->clk_mgr) {
+ switch_port_to_on(ssusb, true);
+ switch_port_to_device(ssusb);
+ }
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
+ } else {
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_OFF);
+ if (ssusb->clk_mgr)
+ switch_port_to_on(ssusb, false);
+ }
+ }
+
+ if (!!(otg_sx->sw_state & MTU3_SW_ID_GROUND) ^ id_event) {
+ if (id_event) {
+ if (ssusb->clk_mgr) {
+ pm_stay_awake(ssusb->dev);
+ switch_port_to_on(ssusb, true);
+ ssusb_host_enable(ssusb);
+ /* register host driver */
+ of_platform_populate(dev->of_node,
+ NULL, NULL, dev);
+ }
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
+ ssusb_set_mailbox(otg_sx, MTU3_ID_GROUND);
+ } else {
+ /* FIXME: debounce time for xhci finish disconnection */
+ mdelay(200);
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_DEVICE);
+ if (ssusb->clk_mgr) {
+ /* unregister host driver */
+ of_platform_depopulate(dev);
+ ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
+ switch_port_to_on(ssusb, false);
+ pm_relax(ssusb->dev);
+ } else
+ ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
+ }
+ }
+
+ return 0;
+}
+
+static enum usb_role ssusb_role_sw_get(struct device *dev)
+{
+ struct ssusb_mtk *ssusb = dev_get_drvdata(dev);
+ enum usb_role role;
+
+ role = ssusb->is_host ? USB_ROLE_HOST : USB_ROLE_DEVICE;
+
+ return role;
+}
+
+static int ssusb_role_sw_register(struct otg_switch_mtk *otg_sx)
+{
+ struct usb_role_switch_desc role_sx_desc = { 0 };
+ struct ssusb_mtk *ssusb =
+ container_of(otg_sx, struct ssusb_mtk, otg_switch);
+
+ if (!otg_sx->role_sw_used)
+ return 0;
+
+ role_sx_desc.set = ssusb_role_sw_set;
+ role_sx_desc.get = ssusb_role_sw_get;
+ otg_sx->role_sw = usb_role_switch_register(ssusb->dev, &role_sx_desc);
+
+ if (IS_ERR(otg_sx->role_sw))
+ return PTR_ERR(otg_sx->role_sw);
+
+ /* default to role none */
+ ssusb_role_sw_set(ssusb->dev, USB_ROLE_NONE);
+
+ return 0;
+}
+
+
+static ssize_t cmode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ssusb_mtk *ssusb = dev_get_drvdata(dev);
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+ enum usb_role role = otg_sx->latest_role;
+ int mode;
+
+ if (kstrtoint(buf, 10, &mode))
+ return -EINVAL;
+
+ dev_info(dev, "store cmode %d op_mode %d\n", mode, otg_sx->op_mode);
+
+ if (otg_sx->op_mode != mode) {
+ /* set switch role */
+ switch (mode) {
+ case MTU3_DR_OPERATION_NONE:
+ otg_sx->latest_role = USB_ROLE_NONE;
+ break;
+ case MTU3_DR_OPERATION_NORMAL:
+ /* switch usb role to latest role */
+ break;
+ case MTU3_DR_OPERATION_HOST:
+ otg_sx->latest_role = USB_ROLE_HOST;
+ break;
+ case MTU3_DR_OPERATION_DEVICE:
+ otg_sx->latest_role = USB_ROLE_DEVICE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* switch operation mode to normal temporarily */
+ otg_sx->op_mode = MTU3_DR_OPERATION_NORMAL;
+ /* switch usb role */
+ ssusb_role_sw_set(ssusb->dev, otg_sx->latest_role);
+ /* update operation mode */
+ otg_sx->op_mode = mode;
+ /* restore role */
+ otg_sx->latest_role = role;
+ }
+
+ return count;
+}
+
+static ssize_t cmode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ssusb_mtk *ssusb = dev_get_drvdata(dev);
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+ return sprintf(buf, "%d\n", otg_sx->op_mode);
+}
+static DEVICE_ATTR_RW(cmode);
+
+static struct attribute *ssusb_dr_attrs[] = {
+ &dev_attr_cmode.attr,
+ NULL
+};
+
+static const struct attribute_group ssusb_dr_group = {
+ .attrs = ssusb_dr_attrs,
+};
+
+int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
+{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+ int ret = 0;
+
+ INIT_WORK(&otg_sx->id_work, ssusb_id_work);
+ INIT_WORK(&otg_sx->vbus_work, ssusb_vbus_work);
+
+ /* default as host, update state */
+ otg_sx->sw_state = ssusb->is_host ?
+ MTU3_SW_ID_GROUND : MTU3_SW_VBUS_VALID;
+ /* initial operation mode */
+ otg_sx->op_mode = MTU3_DR_OPERATION_NORMAL;
+
+ ret = sysfs_create_group(&ssusb->dev->kobj, &ssusb_dr_group);
+ if (ret)
+ dev_info(ssusb->dev, "error creating sysfs attributes\n");
+
+ if (otg_sx->manual_drd_enabled)
+ ssusb_dr_debugfs_init(ssusb);
+ else if (otg_sx->role_sw_used)
+ ret = ssusb_role_sw_register(otg_sx);
+ else
+ ret = ssusb_extcon_register(otg_sx);
+
+ return ret;
+}
+
+void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
+{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+ cancel_work_sync(&otg_sx->id_work);
+ cancel_work_sync(&otg_sx->vbus_work);
+ usb_role_switch_unregister(otg_sx->role_sw);
+ sysfs_remove_group(&ssusb->dev->kobj, &ssusb_dr_group);
+}
diff --git a/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_host.c b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_host.c
old mode 100644
new mode 100755
index 512b9cc..3766856
--- a/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_host.c
+++ b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_host.c
@@ -205,7 +205,7 @@
ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
/* if port0 supports dual-role, works as host mode by default */
- //ssusb_set_force_vbus(ssusb, false);//zhengzhou
+ //ssusb_set_force_vbus(ssusb, false);
ssusb_set_vbus(&ssusb->otg_switch, 1);
}
diff --git a/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_plat.c b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_plat.c
old mode 100644
new mode 100755
index 00c7c6a..b3e4b08
--- a/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_plat.c
+++ b/src/kernel/linux/v4.19/drivers/usb/mtu3/mtu3_plat.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/kthread.h>
#include <linux/stddef.h>
-
+#include <linux/pinctrl/consumer.h>
#include "mtu3.h"
#include "mtu3_dr.h"
#include "mtu3_debug.h"
@@ -87,8 +87,8 @@
check_val = ex_clks | SSUSB_SYS125_RST_B_STS | SSUSB_SYSPLL_STABLE |
SSUSB_REF_RST_B_STS;
- //if (ssusb->u3d->max_speed > USB_SPEED_HIGH) {//zhengzhou
- //if ((ssusb->u3d) && (ssusb->u3d->max_speed > USB_SPEED_HIGH)) {//zhengzhou
+ //if (ssusb->u3d->max_speed > USB_SPEED_HIGH) {
+ //if ((ssusb->u3d) && (ssusb->u3d->max_speed > USB_SPEED_HIGH)) {
if ((ssusb->u3d) && (ssusb->u3d->max_speed > USB_SPEED_HIGH)) {
ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value,
(check_val == (value & check_val)), 100, 20000);
@@ -113,12 +113,12 @@
u32 u2ctl;
u32 misc;
- if (!ssusb->force_vbus)
+ if (!ssusb->force_vbus)
return;
u2ctl = mtu3_readl(ssusb->ippc_base, SSUSB_U2_CTRL(0));
- //misc = mtu3_readl(ssusb->mac_base, U3D_MISC_CTRL);//zhengzhou
- if (ssusb->mac_base)//zhengzhou
+ //misc = mtu3_readl(ssusb->mac_base, U3D_MISC_CTRL);
+ if (ssusb->mac_base)
misc = mtu3_readl(ssusb->mac_base, U3D_MISC_CTRL);
if (vbus_on) {
u2ctl &= ~SSUSB_U2_PORT_OTG_SEL;
@@ -128,8 +128,8 @@
misc &= ~(VBUS_FRC_EN | VBUS_ON);
}
mtu3_writel(ssusb->ippc_base, SSUSB_U2_CTRL(0), u2ctl);
- //mtu3_writel(ssusb->mac_base, U3D_MISC_CTRL, misc);//zhengzhou
- if (ssusb->mac_base)//zhengzhou
+ //mtu3_writel(ssusb->mac_base, U3D_MISC_CTRL, misc);
+ if (ssusb->mac_base)
mtu3_writel(ssusb->mac_base, U3D_MISC_CTRL, misc);
}
@@ -308,7 +308,7 @@
* power down device ip, otherwise ip-sleep will fail when working as
* host only mode
*/
- //if (ssusb->dr_mode == USB_DR_MODE_HOST)//zhengzhou
+ //if (ssusb->dr_mode == USB_DR_MODE_HOST)
mtu3_setbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL2,
SSUSB_IP_DEV_PDN);
}
@@ -667,7 +667,21 @@
struct device *dev = &pdev->dev;
struct ssusb_mtk *ssusb;
int ret = -ENOMEM;
-
+ struct pinctrl *mtu_pinctrl;
+ struct pinctrl_state *default_mtu3;
+ mtu_pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(mtu_pinctrl)) {
+ ret = PTR_ERR(mtu_pinctrl);
+ pr_notice("Cannot find mtu_pinctrl!\n");
+ return ret;
+ }
+ default_mtu3 = pinctrl_lookup_state(mtu_pinctrl, "default");
+
+ if (IS_ERR(default_mtu3)) {
+ ret = PTR_ERR(default_mtu3);
+ pr_notice("Cannot find pinctrl default %d!\n", ret);
+ }
+ pinctrl_select_state(mtu_pinctrl,default_mtu3);
/* all elements are set to ZERO as default value */
ssusb = devm_kzalloc(dev, sizeof(*ssusb), GFP_KERNEL);
if (!ssusb)