blob: ed6316c41cb78e96616dfefce2678505823c0e23 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
6 */
7
8#include <linux/skbuff.h>
9#include <linux/ctype.h>
10
11#include "core.h"
12#include "htc.h"
13#include "debug.h"
14#include "wmi.h"
15#include "wmi-tlv.h"
16#include "mac.h"
17#include "testmode.h"
18#include "wmi-ops.h"
19#include "p2p.h"
20#include "hw.h"
21#include "hif.h"
22#include "txrx.h"
23
24#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
25#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
26#define ATH10K_WMI_DFS_CONF_TIMEOUT_HZ (HZ / 6)
27
28/* MAIN WMI cmd track */
29static struct wmi_cmd_map wmi_cmd_map = {
30 .init_cmdid = WMI_INIT_CMDID,
31 .start_scan_cmdid = WMI_START_SCAN_CMDID,
32 .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
33 .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
34 .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
35 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
36 .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
37 .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
38 .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
39 .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
40 .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
41 .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
42 .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
43 .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
44 .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
45 .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
46 .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
47 .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
48 .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
49 .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
50 .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
51 .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
52 .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
53 .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
54 .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
55 .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
56 .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
57 .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
58 .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
59 .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
60 .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
61 .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
62 .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
63 .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
64 .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
65 .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
66 .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
67 .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
68 .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
69 .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
70 .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
71 .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
72 .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
73 .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
74 .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
75 .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
76 .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
77 .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
78 .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
79 .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
80 .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
81 .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
82 .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
83 .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
84 .roam_scan_mode = WMI_ROAM_SCAN_MODE,
85 .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
86 .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
87 .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
88 .roam_ap_profile = WMI_ROAM_AP_PROFILE,
89 .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
90 .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
91 .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
92 .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
93 .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
94 .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
95 .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
96 .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
97 .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
98 .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
99 .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
100 .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
101 .wlan_profile_set_hist_intvl_cmdid =
102 WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
103 .wlan_profile_get_profile_data_cmdid =
104 WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
105 .wlan_profile_enable_profile_id_cmdid =
106 WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
107 .wlan_profile_list_profile_id_cmdid =
108 WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
109 .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
110 .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
111 .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
112 .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
113 .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
114 .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
115 .wow_enable_disable_wake_event_cmdid =
116 WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
117 .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
118 .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
119 .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
120 .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
121 .vdev_spectral_scan_configure_cmdid =
122 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
123 .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
124 .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
125 .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
126 .network_list_offload_config_cmdid =
127 WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
128 .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
129 .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
130 .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
131 .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
132 .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
133 .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
134 .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
135 .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
136 .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
137 .echo_cmdid = WMI_ECHO_CMDID,
138 .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
139 .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
140 .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
141 .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
142 .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
143 .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
144 .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
145 .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
146 .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
147 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
148 .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
149 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
150 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
151 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
152 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
153 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
154 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
155 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
156 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
157 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
158 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
159 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
160 .nan_cmdid = WMI_CMD_UNSUPPORTED,
161 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
162 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
163 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
164 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
165 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
166 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
167 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
168 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
169 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
170 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
171 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
172 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
173 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
174 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
175 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
176 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
177 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
178 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
179 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
180 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
181 .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
182 .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
183 .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
184 .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
185 .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
186 .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
187 .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
188 .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
189 .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
190 .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
191 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
192 .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
193};
194
195/* 10.X WMI cmd track */
196static struct wmi_cmd_map wmi_10x_cmd_map = {
197 .init_cmdid = WMI_10X_INIT_CMDID,
198 .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
199 .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
200 .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
201 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
202 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
203 .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
204 .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
205 .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
206 .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
207 .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
208 .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
209 .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
210 .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
211 .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
212 .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
213 .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
214 .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
215 .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
216 .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
217 .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
218 .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
219 .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
220 .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
221 .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
222 .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
223 .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
224 .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
225 .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
226 .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
227 .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
228 .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
229 .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
230 .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
231 .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
232 .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
233 .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
234 .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
235 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
236 .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
237 .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
238 .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
239 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
240 .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
241 .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
242 .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
243 .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
244 .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
245 .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
246 .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
247 .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
248 .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
249 .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
250 .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
251 .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
252 .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
253 .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
254 .roam_scan_rssi_change_threshold =
255 WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
256 .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
257 .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
258 .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
259 .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
260 .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
261 .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
262 .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
263 .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
264 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
265 .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
266 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
267 .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
268 .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
269 .wlan_profile_set_hist_intvl_cmdid =
270 WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
271 .wlan_profile_get_profile_data_cmdid =
272 WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
273 .wlan_profile_enable_profile_id_cmdid =
274 WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
275 .wlan_profile_list_profile_id_cmdid =
276 WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
277 .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
278 .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
279 .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
280 .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
281 .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
282 .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
283 .wow_enable_disable_wake_event_cmdid =
284 WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
285 .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
286 .wow_hostwakeup_from_sleep_cmdid =
287 WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
288 .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
289 .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
290 .vdev_spectral_scan_configure_cmdid =
291 WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
292 .vdev_spectral_scan_enable_cmdid =
293 WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
294 .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
295 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
296 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
297 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
298 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
299 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
300 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
301 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
302 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
303 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
304 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
305 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
306 .echo_cmdid = WMI_10X_ECHO_CMDID,
307 .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
308 .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
309 .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
310 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
311 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
312 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
313 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
314 .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
315 .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
316 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
317 .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
318 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
319 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
320 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
321 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
322 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
323 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
324 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
325 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
326 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
327 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
328 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
329 .nan_cmdid = WMI_CMD_UNSUPPORTED,
330 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
331 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
332 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
333 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
334 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
335 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
336 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
337 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
338 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
339 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
340 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
341 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
342 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
343 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
344 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
345 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
346 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
347 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
348 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
349 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
350 .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
351 .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
352 .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
353 .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
354 .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
355 .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
356 .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
357 .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
358 .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
359 .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
360 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
361 .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
362};
363
364/* 10.2.4 WMI cmd track */
365static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
366 .init_cmdid = WMI_10_2_INIT_CMDID,
367 .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
368 .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
369 .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
370 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
371 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
372 .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
373 .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
374 .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
375 .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
376 .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
377 .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
378 .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
379 .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
380 .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
381 .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
382 .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
383 .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
384 .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
385 .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
386 .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
387 .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
388 .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
389 .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
390 .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
391 .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
392 .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
393 .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
394 .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
395 .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
396 .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
397 .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
398 .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
399 .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
400 .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
401 .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
402 .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
403 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
404 .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
405 .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
406 .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
407 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
408 .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
409 .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
410 .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
411 .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
412 .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
413 .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
414 .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
415 .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
416 .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
417 .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
418 .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
419 .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
420 .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
421 .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
422 .roam_scan_rssi_change_threshold =
423 WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
424 .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
425 .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
426 .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
427 .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
428 .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
429 .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
430 .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
431 .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
432 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
433 .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
434 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
435 .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
436 .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
437 .wlan_profile_set_hist_intvl_cmdid =
438 WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
439 .wlan_profile_get_profile_data_cmdid =
440 WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
441 .wlan_profile_enable_profile_id_cmdid =
442 WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
443 .wlan_profile_list_profile_id_cmdid =
444 WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
445 .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
446 .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
447 .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
448 .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
449 .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
450 .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
451 .wow_enable_disable_wake_event_cmdid =
452 WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
453 .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
454 .wow_hostwakeup_from_sleep_cmdid =
455 WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
456 .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
457 .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
458 .vdev_spectral_scan_configure_cmdid =
459 WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
460 .vdev_spectral_scan_enable_cmdid =
461 WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
462 .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
463 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
464 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
465 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
466 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
467 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
468 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
469 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
470 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
471 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
472 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
473 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
474 .echo_cmdid = WMI_10_2_ECHO_CMDID,
475 .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
476 .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
477 .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
478 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
479 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
480 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
481 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
482 .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
483 .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
484 .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
485 .pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS,
486 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
487 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
488 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
489 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
490 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
491 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
492 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
493 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
494 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
495 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
496 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
497 .nan_cmdid = WMI_CMD_UNSUPPORTED,
498 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
499 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
500 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
501 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
502 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
503 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
504 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
505 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
506 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
507 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
508 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
509 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
510 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
511 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
512 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
513 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
514 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
515 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
516 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
517 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
518 .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
519 .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
520 .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
521 .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
522 .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
523 .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
524 .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
525 .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
526 .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
527 .pdev_bss_chan_info_request_cmdid =
528 WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
529 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
530 .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
531 .set_bb_timing_cmdid = WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID,
532};
533
534/* 10.4 WMI cmd track */
535static struct wmi_cmd_map wmi_10_4_cmd_map = {
536 .init_cmdid = WMI_10_4_INIT_CMDID,
537 .start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
538 .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
539 .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
540 .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
541 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
542 .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
543 .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
544 .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
545 .pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
546 .pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
547 .pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
548 .pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
549 .pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
550 .pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
551 .pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
552 .pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
553 .pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
554 .pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
555 .vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
556 .vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
557 .vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
558 .vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
559 .vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
560 .vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
561 .vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
562 .vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
563 .vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
564 .peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
565 .peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
566 .peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
567 .peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
568 .peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
569 .peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
570 .peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
571 .peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
572 .bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
573 .pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
574 .bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
575 .bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
576 .prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
577 .mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
578 .prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
579 .addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
580 .addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
581 .addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
582 .delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
583 .addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
584 .send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
585 .sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
586 .sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
587 .sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
588 .pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
589 .pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
590 .roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
591 .roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
592 .roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
593 .roam_scan_rssi_change_threshold =
594 WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
595 .roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
596 .ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
597 .ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
598 .ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
599 .p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
600 .p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
601 .p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
602 .p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
603 .p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
604 .ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
605 .ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
606 .peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
607 .wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
608 .wlan_profile_set_hist_intvl_cmdid =
609 WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
610 .wlan_profile_get_profile_data_cmdid =
611 WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
612 .wlan_profile_enable_profile_id_cmdid =
613 WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
614 .wlan_profile_list_profile_id_cmdid =
615 WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
616 .pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
617 .pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
618 .add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
619 .rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
620 .wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
621 .wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
622 .wow_enable_disable_wake_event_cmdid =
623 WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
624 .wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
625 .wow_hostwakeup_from_sleep_cmdid =
626 WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
627 .rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
628 .rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
629 .vdev_spectral_scan_configure_cmdid =
630 WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
631 .vdev_spectral_scan_enable_cmdid =
632 WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
633 .request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
634 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
635 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
636 .gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
637 .csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
638 .csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
639 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
640 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
641 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
642 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
643 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
644 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
645 .echo_cmdid = WMI_10_4_ECHO_CMDID,
646 .pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
647 .dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
648 .pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
649 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
650 .vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
651 .vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
652 .force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
653 .gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
654 .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
655 .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
656 .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
657 .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
658 .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
659 .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
660 .vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
661 .wlan_peer_caching_add_peer_cmdid =
662 WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
663 .wlan_peer_caching_evict_peer_cmdid =
664 WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
665 .wlan_peer_caching_restore_peer_cmdid =
666 WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
667 .wlan_peer_caching_print_all_peers_info_cmdid =
668 WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
669 .peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
670 .peer_add_proxy_sta_entry_cmdid =
671 WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
672 .rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
673 .oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
674 .nan_cmdid = WMI_10_4_NAN_CMDID,
675 .vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
676 .qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
677 .pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
678 .pdev_smart_ant_set_rx_antenna_cmdid =
679 WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
680 .peer_smart_ant_set_tx_antenna_cmdid =
681 WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
682 .peer_smart_ant_set_train_info_cmdid =
683 WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
684 .peer_smart_ant_set_node_config_ops_cmdid =
685 WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
686 .pdev_set_antenna_switch_table_cmdid =
687 WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
688 .pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
689 .pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
690 .pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
691 .pdev_ratepwr_chainmsk_table_cmdid =
692 WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
693 .pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
694 .tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
695 .fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
696 .vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
697 .peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
698 .pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
699 .pdev_get_ani_ofdm_config_cmdid =
700 WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
701 .pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
702 .pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
703 .pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
704 .pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
705 .vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
706 .pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
707 .vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
708 .vdev_filter_neighbor_rx_packets_cmdid =
709 WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
710 .mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
711 .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
712 .pdev_bss_chan_info_request_cmdid =
713 WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
714 .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
715 .vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID,
716 .set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID,
717 .atf_ssid_grouping_request_cmdid =
718 WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID,
719 .peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID,
720 .set_periodic_channel_stats_cfg_cmdid =
721 WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG,
722 .peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID,
723 .btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID,
724 .peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID,
725 .peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID,
726 .peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID,
727 .pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID,
728 .coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID,
729 .pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID,
730 .pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID,
731 .vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID,
732 .prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID,
733 .config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID,
734 .debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID,
735 .get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID,
736 .pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID,
737 .vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID,
738 .pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID,
739 .tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID,
740 .tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID,
741 .tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
742 .radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
743};
744
745/* MAIN WMI VDEV param map */
746static struct wmi_vdev_param_map wmi_vdev_param_map = {
747 .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
748 .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
749 .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
750 .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
751 .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
752 .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
753 .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
754 .preamble = WMI_VDEV_PARAM_PREAMBLE,
755 .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
756 .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
757 .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
758 .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
759 .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
760 .wmi_vdev_oc_scheduler_air_time_limit =
761 WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
762 .wds = WMI_VDEV_PARAM_WDS,
763 .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
764 .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
765 .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
766 .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
767 .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
768 .chwidth = WMI_VDEV_PARAM_CHWIDTH,
769 .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
770 .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
771 .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
772 .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
773 .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
774 .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
775 .sgi = WMI_VDEV_PARAM_SGI,
776 .ldpc = WMI_VDEV_PARAM_LDPC,
777 .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
778 .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
779 .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
780 .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
781 .nss = WMI_VDEV_PARAM_NSS,
782 .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
783 .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
784 .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
785 .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
786 .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
787 .ap_keepalive_min_idle_inactive_time_secs =
788 WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
789 .ap_keepalive_max_idle_inactive_time_secs =
790 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
791 .ap_keepalive_max_unresponsive_time_secs =
792 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
793 .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
794 .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
795 .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
796 .txbf = WMI_VDEV_PARAM_TXBF,
797 .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
798 .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
799 .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
800 .ap_detect_out_of_sync_sleeping_sta_time_secs =
801 WMI_VDEV_PARAM_UNSUPPORTED,
802 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
803 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
804 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
805 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
806 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
807 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
808 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
809 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
810 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
811 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
812 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
813 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
814 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
815 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
816 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
817 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
818 .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
819 .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
820};
821
822/* 10.X WMI VDEV param map */
823static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
824 .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
825 .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
826 .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
827 .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
828 .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
829 .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
830 .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
831 .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
832 .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
833 .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
834 .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
835 .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
836 .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
837 .wmi_vdev_oc_scheduler_air_time_limit =
838 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
839 .wds = WMI_10X_VDEV_PARAM_WDS,
840 .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
841 .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
842 .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
843 .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
844 .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
845 .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
846 .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
847 .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
848 .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
849 .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
850 .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
851 .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
852 .sgi = WMI_10X_VDEV_PARAM_SGI,
853 .ldpc = WMI_10X_VDEV_PARAM_LDPC,
854 .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
855 .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
856 .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
857 .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
858 .nss = WMI_10X_VDEV_PARAM_NSS,
859 .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
860 .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
861 .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
862 .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
863 .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
864 .ap_keepalive_min_idle_inactive_time_secs =
865 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
866 .ap_keepalive_max_idle_inactive_time_secs =
867 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
868 .ap_keepalive_max_unresponsive_time_secs =
869 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
870 .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
871 .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
872 .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
873 .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
874 .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
875 .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
876 .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
877 .ap_detect_out_of_sync_sleeping_sta_time_secs =
878 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
879 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
880 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
881 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
882 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
883 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
884 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
885 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
886 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
887 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
888 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
889 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
890 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
891 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
892 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
893 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
894 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
895 .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
896 .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
897};
898
899static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
900 .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
901 .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
902 .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
903 .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
904 .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
905 .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
906 .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
907 .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
908 .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
909 .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
910 .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
911 .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
912 .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
913 .wmi_vdev_oc_scheduler_air_time_limit =
914 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
915 .wds = WMI_10X_VDEV_PARAM_WDS,
916 .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
917 .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
918 .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
919 .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
920 .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
921 .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
922 .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
923 .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
924 .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
925 .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
926 .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
927 .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
928 .sgi = WMI_10X_VDEV_PARAM_SGI,
929 .ldpc = WMI_10X_VDEV_PARAM_LDPC,
930 .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
931 .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
932 .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
933 .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
934 .nss = WMI_10X_VDEV_PARAM_NSS,
935 .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
936 .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
937 .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
938 .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
939 .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
940 .ap_keepalive_min_idle_inactive_time_secs =
941 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
942 .ap_keepalive_max_idle_inactive_time_secs =
943 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
944 .ap_keepalive_max_unresponsive_time_secs =
945 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
946 .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
947 .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
948 .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
949 .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
950 .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
951 .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
952 .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
953 .ap_detect_out_of_sync_sleeping_sta_time_secs =
954 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
955 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
956 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
957 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
958 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
959 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
960 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
961 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
962 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
963 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
964 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
965 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
966 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
967 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
968 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
969 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
970 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
971 .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
972 .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
973};
974
975static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
976 .rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
977 .fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
978 .beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
979 .listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
980 .multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
981 .mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
982 .slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
983 .preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
984 .swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
985 .wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
986 .wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
987 .wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
988 .dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
989 .wmi_vdev_oc_scheduler_air_time_limit =
990 WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
991 .wds = WMI_10_4_VDEV_PARAM_WDS,
992 .atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
993 .bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
994 .bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
995 .bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
996 .feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
997 .chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
998 .chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
999 .disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
1000 .sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
1001 .mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
1002 .protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
1003 .fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
1004 .sgi = WMI_10_4_VDEV_PARAM_SGI,
1005 .ldpc = WMI_10_4_VDEV_PARAM_LDPC,
1006 .tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
1007 .rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
1008 .intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
1009 .def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
1010 .nss = WMI_10_4_VDEV_PARAM_NSS,
1011 .bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
1012 .mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
1013 .mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
1014 .dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
1015 .unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
1016 .ap_keepalive_min_idle_inactive_time_secs =
1017 WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
1018 .ap_keepalive_max_idle_inactive_time_secs =
1019 WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
1020 .ap_keepalive_max_unresponsive_time_secs =
1021 WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
1022 .ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
1023 .mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
1024 .enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
1025 .txbf = WMI_10_4_VDEV_PARAM_TXBF,
1026 .packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
1027 .drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
1028 .tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
1029 .ap_detect_out_of_sync_sleeping_sta_time_secs =
1030 WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
1031 .rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
1032 .cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
1033 .mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
1034 .rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
1035 .vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
1036 .vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
1037 .early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
1038 .early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
1039 .early_rx_bmiss_sample_cycle =
1040 WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
1041 .early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
1042 .early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
1043 .early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
1044 .proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
1045 .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
1046 .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
1047 .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
1048 .inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
1049 .dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
1050 .disable_4addr_src_lrn = WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN,
1051 .rtt_responder_role = WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE,
1052};
1053
1054static struct wmi_pdev_param_map wmi_pdev_param_map = {
1055 .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
1056 .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
1057 .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
1058 .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
1059 .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
1060 .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
1061 .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
1062 .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1063 .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
1064 .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
1065 .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1066 .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
1067 .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
1068 .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1069 .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
1070 .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
1071 .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
1072 .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
1073 .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
1074 .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1075 .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1076 .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
1077 .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1078 .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
1079 .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
1080 .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1081 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1082 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1083 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1084 .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1085 .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1086 .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1087 .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1088 .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
1089 .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
1090 .dcs = WMI_PDEV_PARAM_DCS,
1091 .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
1092 .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
1093 .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
1094 .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
1095 .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
1096 .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
1097 .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
1098 .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
1099 .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
1100 .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1101 .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
1102 .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1103 .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
1104 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1105 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1106 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1107 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1108 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1109 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1110 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1111 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1112 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1113 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1114 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1115 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1116 .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1117 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1118 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1119 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1120 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1121 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1122 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1123 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1124 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1125 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1126 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1127 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1128 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1129 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1130 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1131 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1132 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1133 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1134 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1135 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1136 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1137 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1138 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1139 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1140 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1141 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1142 .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1143 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1144 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1145 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1146 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1147};
1148
1149static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
1150 .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1151 .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1152 .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1153 .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1154 .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1155 .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1156 .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1157 .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1158 .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1159 .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1160 .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1161 .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1162 .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1163 .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1164 .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1165 .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1166 .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1167 .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1168 .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1169 .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1170 .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1171 .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1172 .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1173 .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1174 .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1175 .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1176 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1177 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1178 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1179 .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1180 .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1181 .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1182 .bcnflt_stats_update_period =
1183 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1184 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1185 .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1186 .dcs = WMI_10X_PDEV_PARAM_DCS,
1187 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1188 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1189 .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1190 .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1191 .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1192 .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1193 .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1194 .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1195 .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1196 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1197 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1198 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1199 .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1200 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1201 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1202 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1203 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1204 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1205 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1206 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1207 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1208 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1209 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1210 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1211 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1212 .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1213 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1214 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1215 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1216 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1217 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1218 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1219 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1220 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1221 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1222 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1223 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1224 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1225 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1226 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1227 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1228 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1229 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1230 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1231 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1232 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1233 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1234 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1235 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1236 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1237 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1238 .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1239 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1240 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1241 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1242 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1243};
1244
1245static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
1246 .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1247 .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1248 .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1249 .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1250 .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1251 .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1252 .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1253 .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1254 .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1255 .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1256 .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1257 .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1258 .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1259 .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1260 .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1261 .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1262 .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1263 .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1264 .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1265 .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1266 .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1267 .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1268 .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1269 .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1270 .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1271 .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1272 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1273 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1274 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1275 .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1276 .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1277 .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1278 .bcnflt_stats_update_period =
1279 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1280 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1281 .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1282 .dcs = WMI_10X_PDEV_PARAM_DCS,
1283 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1284 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1285 .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1286 .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1287 .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1288 .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1289 .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1290 .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1291 .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1292 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1293 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1294 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1295 .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1296 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1297 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1298 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1299 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1300 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1301 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1302 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1303 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1304 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1305 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1306 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1307 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1308 .peer_sta_ps_statechg_enable =
1309 WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
1310 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1311 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1312 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1313 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1314 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1315 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1316 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1317 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1318 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1319 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1320 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1321 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1322 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1323 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1324 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1325 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1326 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1327 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1328 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1329 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1330 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1331 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1332 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1333 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1334 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1335 .pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET,
1336 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1337 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1338 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1339 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1340};
1341
1342/* firmware 10.2 specific mappings */
1343static struct wmi_cmd_map wmi_10_2_cmd_map = {
1344 .init_cmdid = WMI_10_2_INIT_CMDID,
1345 .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
1346 .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
1347 .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
1348 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
1349 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
1350 .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
1351 .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
1352 .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
1353 .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
1354 .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
1355 .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
1356 .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
1357 .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
1358 .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
1359 .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
1360 .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
1361 .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
1362 .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
1363 .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
1364 .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
1365 .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
1366 .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
1367 .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
1368 .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
1369 .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
1370 .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
1371 .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
1372 .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
1373 .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
1374 .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
1375 .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
1376 .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
1377 .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
1378 .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
1379 .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
1380 .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
1381 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1382 .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
1383 .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
1384 .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
1385 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1386 .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
1387 .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
1388 .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
1389 .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
1390 .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
1391 .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
1392 .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
1393 .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
1394 .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
1395 .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
1396 .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
1397 .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
1398 .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
1399 .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
1400 .roam_scan_rssi_change_threshold =
1401 WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
1402 .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
1403 .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
1404 .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
1405 .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
1406 .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
1407 .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
1408 .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
1409 .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
1410 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
1411 .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
1412 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
1413 .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
1414 .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
1415 .wlan_profile_set_hist_intvl_cmdid =
1416 WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
1417 .wlan_profile_get_profile_data_cmdid =
1418 WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
1419 .wlan_profile_enable_profile_id_cmdid =
1420 WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
1421 .wlan_profile_list_profile_id_cmdid =
1422 WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
1423 .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
1424 .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
1425 .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
1426 .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
1427 .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
1428 .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
1429 .wow_enable_disable_wake_event_cmdid =
1430 WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
1431 .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
1432 .wow_hostwakeup_from_sleep_cmdid =
1433 WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
1434 .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
1435 .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
1436 .vdev_spectral_scan_configure_cmdid =
1437 WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
1438 .vdev_spectral_scan_enable_cmdid =
1439 WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
1440 .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
1441 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
1442 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
1443 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
1444 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
1445 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
1446 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
1447 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
1448 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
1449 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
1450 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
1451 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
1452 .echo_cmdid = WMI_10_2_ECHO_CMDID,
1453 .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
1454 .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
1455 .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
1456 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
1457 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1458 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1459 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
1460 .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
1461 .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
1462 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
1463 .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
1464 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
1465 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
1466 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
1467 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
1468 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
1469 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
1470 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
1471 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
1472 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
1473 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1474 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
1475 .nan_cmdid = WMI_CMD_UNSUPPORTED,
1476 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
1477 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
1478 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
1479 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1480 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1481 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
1482 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
1483 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
1484 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
1485 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
1486 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
1487 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
1488 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
1489 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
1490 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
1491 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1492 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1493 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
1494 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
1495 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
1496 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
1497 .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
1498};
1499
1500static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
1501 .tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
1502 .rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
1503 .txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
1504 .txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
1505 .txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
1506 .beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
1507 .beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
1508 .resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1509 .protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
1510 .dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
1511 .non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1512 .agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
1513 .sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
1514 .ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1515 .ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
1516 .ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
1517 .ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
1518 .ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
1519 .ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
1520 .ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1521 .ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1522 .ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
1523 .ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1524 .l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
1525 .dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
1526 .pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1527 .pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
1528 .pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1529 .pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1530 .pdev_stats_update_period =
1531 WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1532 .vdev_stats_update_period =
1533 WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1534 .peer_stats_update_period =
1535 WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1536 .bcnflt_stats_update_period =
1537 WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1538 .pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
1539 .arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
1540 .dcs = WMI_10_4_PDEV_PARAM_DCS,
1541 .ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
1542 .ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
1543 .ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
1544 .ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
1545 .ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
1546 .dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
1547 .proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
1548 .idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
1549 .power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
1550 .fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
1551 .burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
1552 .burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
1553 .cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
1554 .aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
1555 .rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
1556 .smart_antenna_default_antenna =
1557 WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
1558 .igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
1559 .igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
1560 .antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
1561 .rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
1562 .set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
1563 .proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
1564 .set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
1565 .set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
1566 .remove_mcast2ucast_buffer =
1567 WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
1568 .peer_sta_ps_statechg_enable =
1569 WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
1570 .igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
1571 .block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
1572 .set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
1573 .set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
1574 .set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
1575 .txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
1576 .set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
1577 .set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
1578 .en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
1579 .mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
1580 .noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
1581 .noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
1582 .dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
1583 .set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
1584 .atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
1585 .atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
1586 .ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
1587 .mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
1588 .sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
1589 .signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
1590 .signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
1591 .enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
1592 .enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
1593 .cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
1594 .rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
1595 .pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
1596 .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
1597 .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
1598 .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
1599 .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
1600};
1601
1602static const u8 wmi_key_cipher_suites[] = {
1603 [WMI_CIPHER_NONE] = WMI_CIPHER_NONE,
1604 [WMI_CIPHER_WEP] = WMI_CIPHER_WEP,
1605 [WMI_CIPHER_TKIP] = WMI_CIPHER_TKIP,
1606 [WMI_CIPHER_AES_OCB] = WMI_CIPHER_AES_OCB,
1607 [WMI_CIPHER_AES_CCM] = WMI_CIPHER_AES_CCM,
1608 [WMI_CIPHER_WAPI] = WMI_CIPHER_WAPI,
1609 [WMI_CIPHER_CKIP] = WMI_CIPHER_CKIP,
1610 [WMI_CIPHER_AES_CMAC] = WMI_CIPHER_AES_CMAC,
1611 [WMI_CIPHER_AES_GCM] = WMI_CIPHER_AES_GCM,
1612};
1613
1614static const u8 wmi_tlv_key_cipher_suites[] = {
1615 [WMI_CIPHER_NONE] = WMI_TLV_CIPHER_NONE,
1616 [WMI_CIPHER_WEP] = WMI_TLV_CIPHER_WEP,
1617 [WMI_CIPHER_TKIP] = WMI_TLV_CIPHER_TKIP,
1618 [WMI_CIPHER_AES_OCB] = WMI_TLV_CIPHER_AES_OCB,
1619 [WMI_CIPHER_AES_CCM] = WMI_TLV_CIPHER_AES_CCM,
1620 [WMI_CIPHER_WAPI] = WMI_TLV_CIPHER_WAPI,
1621 [WMI_CIPHER_CKIP] = WMI_TLV_CIPHER_CKIP,
1622 [WMI_CIPHER_AES_CMAC] = WMI_TLV_CIPHER_AES_CMAC,
1623 [WMI_CIPHER_AES_GCM] = WMI_TLV_CIPHER_AES_GCM,
1624};
1625
1626static const struct wmi_peer_flags_map wmi_peer_flags_map = {
1627 .auth = WMI_PEER_AUTH,
1628 .qos = WMI_PEER_QOS,
1629 .need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY,
1630 .need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY,
1631 .apsd = WMI_PEER_APSD,
1632 .ht = WMI_PEER_HT,
1633 .bw40 = WMI_PEER_40MHZ,
1634 .stbc = WMI_PEER_STBC,
1635 .ldbc = WMI_PEER_LDPC,
1636 .dyn_mimops = WMI_PEER_DYN_MIMOPS,
1637 .static_mimops = WMI_PEER_STATIC_MIMOPS,
1638 .spatial_mux = WMI_PEER_SPATIAL_MUX,
1639 .vht = WMI_PEER_VHT,
1640 .bw80 = WMI_PEER_80MHZ,
1641 .vht_2g = WMI_PEER_VHT_2G,
1642 .pmf = WMI_PEER_PMF,
1643 .bw160 = WMI_PEER_160MHZ,
1644};
1645
1646static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
1647 .auth = WMI_10X_PEER_AUTH,
1648 .qos = WMI_10X_PEER_QOS,
1649 .need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY,
1650 .need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY,
1651 .apsd = WMI_10X_PEER_APSD,
1652 .ht = WMI_10X_PEER_HT,
1653 .bw40 = WMI_10X_PEER_40MHZ,
1654 .stbc = WMI_10X_PEER_STBC,
1655 .ldbc = WMI_10X_PEER_LDPC,
1656 .dyn_mimops = WMI_10X_PEER_DYN_MIMOPS,
1657 .static_mimops = WMI_10X_PEER_STATIC_MIMOPS,
1658 .spatial_mux = WMI_10X_PEER_SPATIAL_MUX,
1659 .vht = WMI_10X_PEER_VHT,
1660 .bw80 = WMI_10X_PEER_80MHZ,
1661 .bw160 = WMI_10X_PEER_160MHZ,
1662};
1663
1664static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
1665 .auth = WMI_10_2_PEER_AUTH,
1666 .qos = WMI_10_2_PEER_QOS,
1667 .need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY,
1668 .need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY,
1669 .apsd = WMI_10_2_PEER_APSD,
1670 .ht = WMI_10_2_PEER_HT,
1671 .bw40 = WMI_10_2_PEER_40MHZ,
1672 .stbc = WMI_10_2_PEER_STBC,
1673 .ldbc = WMI_10_2_PEER_LDPC,
1674 .dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS,
1675 .static_mimops = WMI_10_2_PEER_STATIC_MIMOPS,
1676 .spatial_mux = WMI_10_2_PEER_SPATIAL_MUX,
1677 .vht = WMI_10_2_PEER_VHT,
1678 .bw80 = WMI_10_2_PEER_80MHZ,
1679 .vht_2g = WMI_10_2_PEER_VHT_2G,
1680 .pmf = WMI_10_2_PEER_PMF,
1681 .bw160 = WMI_10_2_PEER_160MHZ,
1682};
1683
1684void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
1685 const struct wmi_channel_arg *arg)
1686{
1687 u32 flags = 0;
1688
1689 memset(ch, 0, sizeof(*ch));
1690
1691 if (arg->passive)
1692 flags |= WMI_CHAN_FLAG_PASSIVE;
1693 if (arg->allow_ibss)
1694 flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
1695 if (arg->allow_ht)
1696 flags |= WMI_CHAN_FLAG_ALLOW_HT;
1697 if (arg->allow_vht)
1698 flags |= WMI_CHAN_FLAG_ALLOW_VHT;
1699 if (arg->ht40plus)
1700 flags |= WMI_CHAN_FLAG_HT40_PLUS;
1701 if (arg->chan_radar)
1702 flags |= WMI_CHAN_FLAG_DFS;
1703
1704 ch->mhz = __cpu_to_le32(arg->freq);
1705 ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
1706 if (arg->mode == MODE_11AC_VHT80_80)
1707 ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
1708 else
1709 ch->band_center_freq2 = 0;
1710 ch->min_power = arg->min_power;
1711 ch->max_power = arg->max_power;
1712 ch->reg_power = arg->max_reg_power;
1713 ch->antenna_max = arg->max_antenna_gain;
1714 ch->max_tx_power = arg->max_power;
1715
1716 /* mode & flags share storage */
1717 ch->mode = arg->mode;
1718 ch->flags |= __cpu_to_le32(flags);
1719}
1720
1721int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
1722{
1723 unsigned long time_left, i;
1724
1725 time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
1726 WMI_SERVICE_READY_TIMEOUT_HZ);
1727 if (!time_left) {
1728 /* Sometimes the PCI HIF doesn't receive interrupt
1729 * for the service ready message even if the buffer
1730 * was completed. PCIe sniffer shows that it's
1731 * because the corresponding CE ring doesn't fires
1732 * it. Workaround here by polling CE rings once.
1733 */
1734 ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
1735
1736 for (i = 0; i < CE_COUNT; i++)
1737 ath10k_hif_send_complete_check(ar, i, 1);
1738
1739 time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
1740 WMI_SERVICE_READY_TIMEOUT_HZ);
1741 if (!time_left) {
1742 ath10k_warn(ar, "polling timed out\n");
1743 return -ETIMEDOUT;
1744 }
1745
1746 ath10k_warn(ar, "service ready completion received, continuing normally\n");
1747 }
1748
1749 return 0;
1750}
1751
1752int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
1753{
1754 unsigned long time_left;
1755
1756 time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
1757 WMI_UNIFIED_READY_TIMEOUT_HZ);
1758 if (!time_left)
1759 return -ETIMEDOUT;
1760 return 0;
1761}
1762
1763struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
1764{
1765 struct sk_buff *skb;
1766 u32 round_len = roundup(len, 4);
1767
1768 skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
1769 if (!skb)
1770 return NULL;
1771
1772 skb_reserve(skb, WMI_SKB_HEADROOM);
1773 if (!IS_ALIGNED((unsigned long)skb->data, 4))
1774 ath10k_warn(ar, "Unaligned WMI skb\n");
1775
1776 skb_put(skb, round_len);
1777 memset(skb->data, 0, round_len);
1778
1779 return skb;
1780}
1781
1782static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
1783{
1784 dev_kfree_skb(skb);
1785}
1786
1787int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
1788 u32 cmd_id)
1789{
1790 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
1791 struct wmi_cmd_hdr *cmd_hdr;
1792 int ret;
1793 u32 cmd = 0;
1794
1795 if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
1796 return -ENOMEM;
1797
1798 cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
1799
1800 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1801 cmd_hdr->cmd_id = __cpu_to_le32(cmd);
1802
1803 memset(skb_cb, 0, sizeof(*skb_cb));
1804 trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
1805 ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
1806
1807 if (ret)
1808 goto err_pull;
1809
1810 return 0;
1811
1812err_pull:
1813 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
1814 return ret;
1815}
1816
1817static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
1818{
1819 struct ath10k *ar = arvif->ar;
1820 struct ath10k_skb_cb *cb;
1821 struct sk_buff *bcn;
1822 bool dtim_zero;
1823 bool deliver_cab;
1824 int ret;
1825
1826 spin_lock_bh(&ar->data_lock);
1827
1828 bcn = arvif->beacon;
1829
1830 if (!bcn)
1831 goto unlock;
1832
1833 cb = ATH10K_SKB_CB(bcn);
1834
1835 switch (arvif->beacon_state) {
1836 case ATH10K_BEACON_SENDING:
1837 case ATH10K_BEACON_SENT:
1838 break;
1839 case ATH10K_BEACON_SCHEDULED:
1840 arvif->beacon_state = ATH10K_BEACON_SENDING;
1841 spin_unlock_bh(&ar->data_lock);
1842
1843 dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO);
1844 deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB);
1845 ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
1846 arvif->vdev_id,
1847 bcn->data, bcn->len,
1848 cb->paddr,
1849 dtim_zero,
1850 deliver_cab);
1851
1852 spin_lock_bh(&ar->data_lock);
1853
1854 if (ret == 0)
1855 arvif->beacon_state = ATH10K_BEACON_SENT;
1856 else
1857 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
1858 }
1859
1860unlock:
1861 spin_unlock_bh(&ar->data_lock);
1862}
1863
1864static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
1865 struct ieee80211_vif *vif)
1866{
1867 struct ath10k_vif *arvif = (void *)vif->drv_priv;
1868
1869 ath10k_wmi_tx_beacon_nowait(arvif);
1870}
1871
1872static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
1873{
1874 ieee80211_iterate_active_interfaces_atomic(ar->hw,
1875 IEEE80211_IFACE_ITER_NORMAL,
1876 ath10k_wmi_tx_beacons_iter,
1877 NULL);
1878}
1879
1880static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
1881{
1882 /* try to send pending beacons first. they take priority */
1883 ath10k_wmi_tx_beacons_nowait(ar);
1884
1885 wake_up(&ar->wmi.tx_credits_wq);
1886}
1887
1888int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
1889{
1890 int ret = -EOPNOTSUPP;
1891
1892 might_sleep();
1893
1894 if (cmd_id == WMI_CMD_UNSUPPORTED) {
1895 ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
1896 cmd_id);
1897 return ret;
1898 }
1899
1900 wait_event_timeout(ar->wmi.tx_credits_wq, ({
1901 /* try to send pending beacons first. they take priority */
1902 ath10k_wmi_tx_beacons_nowait(ar);
1903
1904 ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
1905
1906 if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
1907 ret = -ESHUTDOWN;
1908
1909 (ret != -EAGAIN);
1910 }), 3 * HZ);
1911
1912 if (ret)
1913 dev_kfree_skb_any(skb);
1914
1915 if (ret == -EAGAIN) {
1916 ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
1917 cmd_id);
1918 queue_work(ar->workqueue, &ar->restart_work);
1919 }
1920
1921 return ret;
1922}
1923
1924static struct sk_buff *
1925ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
1926{
1927 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
1928 struct ath10k_vif *arvif;
1929 struct wmi_mgmt_tx_cmd *cmd;
1930 struct ieee80211_hdr *hdr;
1931 struct sk_buff *skb;
1932 int len;
1933 u32 vdev_id;
1934 u32 buf_len = msdu->len;
1935 u16 fc;
1936
1937 hdr = (struct ieee80211_hdr *)msdu->data;
1938 fc = le16_to_cpu(hdr->frame_control);
1939
1940 if (cb->vif) {
1941 arvif = (void *)cb->vif->drv_priv;
1942 vdev_id = arvif->vdev_id;
1943 } else {
1944 vdev_id = 0;
1945 }
1946
1947 if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
1948 return ERR_PTR(-EINVAL);
1949
1950 len = sizeof(cmd->hdr) + msdu->len;
1951
1952 if ((ieee80211_is_action(hdr->frame_control) ||
1953 ieee80211_is_deauth(hdr->frame_control) ||
1954 ieee80211_is_disassoc(hdr->frame_control)) &&
1955 ieee80211_has_protected(hdr->frame_control)) {
1956 len += IEEE80211_CCMP_MIC_LEN;
1957 buf_len += IEEE80211_CCMP_MIC_LEN;
1958 }
1959
1960 len = round_up(len, 4);
1961
1962 skb = ath10k_wmi_alloc_skb(ar, len);
1963 if (!skb)
1964 return ERR_PTR(-ENOMEM);
1965
1966 cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
1967
1968 cmd->hdr.vdev_id = __cpu_to_le32(vdev_id);
1969 cmd->hdr.tx_rate = 0;
1970 cmd->hdr.tx_power = 0;
1971 cmd->hdr.buf_len = __cpu_to_le32(buf_len);
1972
1973 ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
1974 memcpy(cmd->buf, msdu->data, msdu->len);
1975
1976 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
1977 msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
1978 fc & IEEE80211_FCTL_STYPE);
1979 trace_ath10k_tx_hdr(ar, skb->data, skb->len);
1980 trace_ath10k_tx_payload(ar, skb->data, skb->len);
1981
1982 return skb;
1983}
1984
1985static void ath10k_wmi_event_scan_started(struct ath10k *ar)
1986{
1987 lockdep_assert_held(&ar->data_lock);
1988
1989 switch (ar->scan.state) {
1990 case ATH10K_SCAN_IDLE:
1991 case ATH10K_SCAN_RUNNING:
1992 case ATH10K_SCAN_ABORTING:
1993 ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
1994 ath10k_scan_state_str(ar->scan.state),
1995 ar->scan.state);
1996 break;
1997 case ATH10K_SCAN_STARTING:
1998 ar->scan.state = ATH10K_SCAN_RUNNING;
1999
2000 if (ar->scan.is_roc)
2001 ieee80211_ready_on_channel(ar->hw);
2002
2003 complete(&ar->scan.started);
2004 break;
2005 }
2006}
2007
2008static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
2009{
2010 lockdep_assert_held(&ar->data_lock);
2011
2012 switch (ar->scan.state) {
2013 case ATH10K_SCAN_IDLE:
2014 case ATH10K_SCAN_RUNNING:
2015 case ATH10K_SCAN_ABORTING:
2016 ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
2017 ath10k_scan_state_str(ar->scan.state),
2018 ar->scan.state);
2019 break;
2020 case ATH10K_SCAN_STARTING:
2021 complete(&ar->scan.started);
2022 __ath10k_scan_finish(ar);
2023 break;
2024 }
2025}
2026
2027static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
2028{
2029 lockdep_assert_held(&ar->data_lock);
2030
2031 switch (ar->scan.state) {
2032 case ATH10K_SCAN_IDLE:
2033 case ATH10K_SCAN_STARTING:
2034 /* One suspected reason scan can be completed while starting is
2035 * if firmware fails to deliver all scan events to the host,
2036 * e.g. when transport pipe is full. This has been observed
2037 * with spectral scan phyerr events starving wmi transport
2038 * pipe. In such case the "scan completed" event should be (and
2039 * is) ignored by the host as it may be just firmware's scan
2040 * state machine recovering.
2041 */
2042 ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
2043 ath10k_scan_state_str(ar->scan.state),
2044 ar->scan.state);
2045 break;
2046 case ATH10K_SCAN_RUNNING:
2047 case ATH10K_SCAN_ABORTING:
2048 __ath10k_scan_finish(ar);
2049 break;
2050 }
2051}
2052
2053static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
2054{
2055 lockdep_assert_held(&ar->data_lock);
2056
2057 switch (ar->scan.state) {
2058 case ATH10K_SCAN_IDLE:
2059 case ATH10K_SCAN_STARTING:
2060 ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
2061 ath10k_scan_state_str(ar->scan.state),
2062 ar->scan.state);
2063 break;
2064 case ATH10K_SCAN_RUNNING:
2065 case ATH10K_SCAN_ABORTING:
2066 ar->scan_channel = NULL;
2067 break;
2068 }
2069}
2070
2071static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
2072{
2073 lockdep_assert_held(&ar->data_lock);
2074
2075 switch (ar->scan.state) {
2076 case ATH10K_SCAN_IDLE:
2077 case ATH10K_SCAN_STARTING:
2078 ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
2079 ath10k_scan_state_str(ar->scan.state),
2080 ar->scan.state);
2081 break;
2082 case ATH10K_SCAN_RUNNING:
2083 case ATH10K_SCAN_ABORTING:
2084 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
2085
2086 if (ar->scan.is_roc && ar->scan.roc_freq == freq)
2087 complete(&ar->scan.on_channel);
2088 break;
2089 }
2090}
2091
2092static const char *
2093ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
2094 enum wmi_scan_completion_reason reason)
2095{
2096 switch (type) {
2097 case WMI_SCAN_EVENT_STARTED:
2098 return "started";
2099 case WMI_SCAN_EVENT_COMPLETED:
2100 switch (reason) {
2101 case WMI_SCAN_REASON_COMPLETED:
2102 return "completed";
2103 case WMI_SCAN_REASON_CANCELLED:
2104 return "completed [cancelled]";
2105 case WMI_SCAN_REASON_PREEMPTED:
2106 return "completed [preempted]";
2107 case WMI_SCAN_REASON_TIMEDOUT:
2108 return "completed [timedout]";
2109 case WMI_SCAN_REASON_INTERNAL_FAILURE:
2110 return "completed [internal err]";
2111 case WMI_SCAN_REASON_MAX:
2112 break;
2113 }
2114 return "completed [unknown]";
2115 case WMI_SCAN_EVENT_BSS_CHANNEL:
2116 return "bss channel";
2117 case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2118 return "foreign channel";
2119 case WMI_SCAN_EVENT_DEQUEUED:
2120 return "dequeued";
2121 case WMI_SCAN_EVENT_PREEMPTED:
2122 return "preempted";
2123 case WMI_SCAN_EVENT_START_FAILED:
2124 return "start failed";
2125 case WMI_SCAN_EVENT_RESTARTED:
2126 return "restarted";
2127 case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2128 return "foreign channel exit";
2129 default:
2130 return "unknown";
2131 }
2132}
2133
2134static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
2135 struct wmi_scan_ev_arg *arg)
2136{
2137 struct wmi_scan_event *ev = (void *)skb->data;
2138
2139 if (skb->len < sizeof(*ev))
2140 return -EPROTO;
2141
2142 skb_pull(skb, sizeof(*ev));
2143 arg->event_type = ev->event_type;
2144 arg->reason = ev->reason;
2145 arg->channel_freq = ev->channel_freq;
2146 arg->scan_req_id = ev->scan_req_id;
2147 arg->scan_id = ev->scan_id;
2148 arg->vdev_id = ev->vdev_id;
2149
2150 return 0;
2151}
2152
2153int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
2154{
2155 struct wmi_scan_ev_arg arg = {};
2156 enum wmi_scan_event_type event_type;
2157 enum wmi_scan_completion_reason reason;
2158 u32 freq;
2159 u32 req_id;
2160 u32 scan_id;
2161 u32 vdev_id;
2162 int ret;
2163
2164 ret = ath10k_wmi_pull_scan(ar, skb, &arg);
2165 if (ret) {
2166 ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
2167 return ret;
2168 }
2169
2170 event_type = __le32_to_cpu(arg.event_type);
2171 reason = __le32_to_cpu(arg.reason);
2172 freq = __le32_to_cpu(arg.channel_freq);
2173 req_id = __le32_to_cpu(arg.scan_req_id);
2174 scan_id = __le32_to_cpu(arg.scan_id);
2175 vdev_id = __le32_to_cpu(arg.vdev_id);
2176
2177 spin_lock_bh(&ar->data_lock);
2178
2179 ath10k_dbg(ar, ATH10K_DBG_WMI,
2180 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
2181 ath10k_wmi_event_scan_type_str(event_type, reason),
2182 event_type, reason, freq, req_id, scan_id, vdev_id,
2183 ath10k_scan_state_str(ar->scan.state), ar->scan.state);
2184
2185 switch (event_type) {
2186 case WMI_SCAN_EVENT_STARTED:
2187 ath10k_wmi_event_scan_started(ar);
2188 break;
2189 case WMI_SCAN_EVENT_COMPLETED:
2190 ath10k_wmi_event_scan_completed(ar);
2191 break;
2192 case WMI_SCAN_EVENT_BSS_CHANNEL:
2193 ath10k_wmi_event_scan_bss_chan(ar);
2194 break;
2195 case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2196 ath10k_wmi_event_scan_foreign_chan(ar, freq);
2197 break;
2198 case WMI_SCAN_EVENT_START_FAILED:
2199 ath10k_warn(ar, "received scan start failure event\n");
2200 ath10k_wmi_event_scan_start_failed(ar);
2201 break;
2202 case WMI_SCAN_EVENT_DEQUEUED:
2203 case WMI_SCAN_EVENT_PREEMPTED:
2204 case WMI_SCAN_EVENT_RESTARTED:
2205 case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2206 default:
2207 break;
2208 }
2209
2210 spin_unlock_bh(&ar->data_lock);
2211 return 0;
2212}
2213
2214/* If keys are configured, HW decrypts all frames
2215 * with protected bit set. Mark such frames as decrypted.
2216 */
2217static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
2218 struct sk_buff *skb,
2219 struct ieee80211_rx_status *status)
2220{
2221 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2222 unsigned int hdrlen;
2223 bool peer_key;
2224 u8 *addr, keyidx;
2225
2226 if (!ieee80211_is_auth(hdr->frame_control) ||
2227 !ieee80211_has_protected(hdr->frame_control))
2228 return;
2229
2230 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2231 if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
2232 return;
2233
2234 keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
2235 addr = ieee80211_get_SA(hdr);
2236
2237 spin_lock_bh(&ar->data_lock);
2238 peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
2239 spin_unlock_bh(&ar->data_lock);
2240
2241 if (peer_key) {
2242 ath10k_dbg(ar, ATH10K_DBG_MAC,
2243 "mac wep key present for peer %pM\n", addr);
2244 status->flag |= RX_FLAG_DECRYPTED;
2245 }
2246}
2247
2248static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
2249 struct wmi_mgmt_rx_ev_arg *arg)
2250{
2251 struct wmi_mgmt_rx_event_v1 *ev_v1;
2252 struct wmi_mgmt_rx_event_v2 *ev_v2;
2253 struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
2254 struct wmi_mgmt_rx_ext_info *ext_info;
2255 size_t pull_len;
2256 u32 msdu_len;
2257 u32 len;
2258
2259 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
2260 ar->running_fw->fw_file.fw_features)) {
2261 ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
2262 ev_hdr = &ev_v2->hdr.v1;
2263 pull_len = sizeof(*ev_v2);
2264 } else {
2265 ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
2266 ev_hdr = &ev_v1->hdr;
2267 pull_len = sizeof(*ev_v1);
2268 }
2269
2270 if (skb->len < pull_len)
2271 return -EPROTO;
2272
2273 skb_pull(skb, pull_len);
2274 arg->channel = ev_hdr->channel;
2275 arg->buf_len = ev_hdr->buf_len;
2276 arg->status = ev_hdr->status;
2277 arg->snr = ev_hdr->snr;
2278 arg->phy_mode = ev_hdr->phy_mode;
2279 arg->rate = ev_hdr->rate;
2280
2281 msdu_len = __le32_to_cpu(arg->buf_len);
2282 if (skb->len < msdu_len)
2283 return -EPROTO;
2284
2285 if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2286 len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2287 ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2288 memcpy(&arg->ext_info, ext_info,
2289 sizeof(struct wmi_mgmt_rx_ext_info));
2290 }
2291 /* the WMI buffer might've ended up being padded to 4 bytes due to HTC
2292 * trailer with credit update. Trim the excess garbage.
2293 */
2294 skb_trim(skb, msdu_len);
2295
2296 return 0;
2297}
2298
2299static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
2300 struct sk_buff *skb,
2301 struct wmi_mgmt_rx_ev_arg *arg)
2302{
2303 struct wmi_10_4_mgmt_rx_event *ev;
2304 struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
2305 size_t pull_len;
2306 u32 msdu_len;
2307 struct wmi_mgmt_rx_ext_info *ext_info;
2308 u32 len;
2309
2310 ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
2311 ev_hdr = &ev->hdr;
2312 pull_len = sizeof(*ev);
2313
2314 if (skb->len < pull_len)
2315 return -EPROTO;
2316
2317 skb_pull(skb, pull_len);
2318 arg->channel = ev_hdr->channel;
2319 arg->buf_len = ev_hdr->buf_len;
2320 arg->status = ev_hdr->status;
2321 arg->snr = ev_hdr->snr;
2322 arg->phy_mode = ev_hdr->phy_mode;
2323 arg->rate = ev_hdr->rate;
2324
2325 msdu_len = __le32_to_cpu(arg->buf_len);
2326 if (skb->len < msdu_len)
2327 return -EPROTO;
2328
2329 if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2330 len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2331 ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2332 memcpy(&arg->ext_info, ext_info,
2333 sizeof(struct wmi_mgmt_rx_ext_info));
2334 }
2335
2336 /* Make sure bytes added for padding are removed. */
2337 skb_trim(skb, msdu_len);
2338
2339 return 0;
2340}
2341
2342static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
2343 struct ieee80211_hdr *hdr)
2344{
2345 if (!ieee80211_has_protected(hdr->frame_control))
2346 return false;
2347
2348 /* FW delivers WEP Shared Auth frame with Protected Bit set and
2349 * encrypted payload. However in case of PMF it delivers decrypted
2350 * frames with Protected Bit set.
2351 */
2352 if (ieee80211_is_auth(hdr->frame_control))
2353 return false;
2354
2355 /* qca99x0 based FW delivers broadcast or multicast management frames
2356 * (ex: group privacy action frames in mesh) as encrypted payload.
2357 */
2358 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
2359 ar->hw_params.sw_decrypt_mcast_mgmt)
2360 return false;
2361
2362 return true;
2363}
2364
2365static int
2366wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
2367{
2368 struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
2369 struct ath10k_wmi *wmi = &ar->wmi;
2370 struct ieee80211_tx_info *info;
2371 struct sk_buff *msdu;
2372 int ret;
2373
2374 spin_lock_bh(&ar->data_lock);
2375
2376 pkt_addr = idr_find(&wmi->mgmt_pending_tx, param->desc_id);
2377 if (!pkt_addr) {
2378 ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
2379 param->desc_id);
2380 ret = -ENOENT;
2381 goto out;
2382 }
2383
2384 msdu = pkt_addr->vaddr;
2385 dma_unmap_single(ar->dev, pkt_addr->paddr,
2386 msdu->len, DMA_TO_DEVICE);
2387 info = IEEE80211_SKB_CB(msdu);
2388 kfree(pkt_addr);
2389
2390 if (param->status) {
2391 info->flags &= ~IEEE80211_TX_STAT_ACK;
2392 } else {
2393 info->flags |= IEEE80211_TX_STAT_ACK;
2394 info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
2395 param->ack_rssi;
2396 info->status.is_valid_ack_signal = true;
2397 }
2398
2399 ieee80211_tx_status_irqsafe(ar->hw, msdu);
2400
2401 ret = 0;
2402
2403out:
2404 idr_remove(&wmi->mgmt_pending_tx, param->desc_id);
2405 spin_unlock_bh(&ar->data_lock);
2406 return ret;
2407}
2408
2409int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
2410{
2411 struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
2412 struct mgmt_tx_compl_params param;
2413 int ret;
2414
2415 ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
2416 if (ret) {
2417 ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
2418 return ret;
2419 }
2420
2421 memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
2422 param.desc_id = __le32_to_cpu(arg.desc_id);
2423 param.status = __le32_to_cpu(arg.status);
2424
2425 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
2426 param.ack_rssi = __le32_to_cpu(arg.ack_rssi);
2427
2428 wmi_process_mgmt_tx_comp(ar, &param);
2429
2430 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
2431
2432 return 0;
2433}
2434
2435int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb)
2436{
2437 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg arg;
2438 struct mgmt_tx_compl_params param;
2439 u32 num_reports;
2440 int i, ret;
2441
2442 ret = ath10k_wmi_pull_mgmt_tx_bundle_compl(ar, skb, &arg);
2443 if (ret) {
2444 ath10k_warn(ar, "failed to parse bundle mgmt compl event: %d\n", ret);
2445 return ret;
2446 }
2447
2448 num_reports = __le32_to_cpu(arg.num_reports);
2449
2450 for (i = 0; i < num_reports; i++) {
2451 memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
2452 param.desc_id = __le32_to_cpu(arg.desc_ids[i]);
2453 param.status = __le32_to_cpu(arg.desc_ids[i]);
2454
2455 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
2456 param.ack_rssi = __le32_to_cpu(arg.ack_rssi[i]);
2457 wmi_process_mgmt_tx_comp(ar, &param);
2458 }
2459
2460 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv event bundle mgmt tx completion\n");
2461
2462 return 0;
2463}
2464
2465int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
2466{
2467 struct wmi_mgmt_rx_ev_arg arg = {};
2468 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2469 struct ieee80211_hdr *hdr;
2470 struct ieee80211_supported_band *sband;
2471 u32 rx_status;
2472 u32 channel;
2473 u32 phy_mode;
2474 u32 snr;
2475 u32 rate;
2476 u16 fc;
2477 int ret;
2478
2479 ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
2480 if (ret) {
2481 ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
2482 dev_kfree_skb(skb);
2483 return ret;
2484 }
2485
2486 channel = __le32_to_cpu(arg.channel);
2487 rx_status = __le32_to_cpu(arg.status);
2488 snr = __le32_to_cpu(arg.snr);
2489 phy_mode = __le32_to_cpu(arg.phy_mode);
2490 rate = __le32_to_cpu(arg.rate);
2491
2492 memset(status, 0, sizeof(*status));
2493
2494 ath10k_dbg(ar, ATH10K_DBG_MGMT,
2495 "event mgmt rx status %08x\n", rx_status);
2496
2497 if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) ||
2498 (rx_status & (WMI_RX_STATUS_ERR_DECRYPT |
2499 WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
2500 dev_kfree_skb(skb);
2501 return 0;
2502 }
2503
2504 if (rx_status & WMI_RX_STATUS_ERR_MIC)
2505 status->flag |= RX_FLAG_MMIC_ERROR;
2506
2507 if (rx_status & WMI_RX_STATUS_EXT_INFO) {
2508 status->mactime =
2509 __le64_to_cpu(arg.ext_info.rx_mac_timestamp);
2510 status->flag |= RX_FLAG_MACTIME_END;
2511 }
2512 /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
2513 * MODE_11B. This means phy_mode is not a reliable source for the band
2514 * of mgmt rx.
2515 */
2516 if (channel >= 1 && channel <= 14) {
2517 status->band = NL80211_BAND_2GHZ;
2518 } else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) {
2519 status->band = NL80211_BAND_5GHZ;
2520 } else {
2521 /* Shouldn't happen unless list of advertised channels to
2522 * mac80211 has been changed.
2523 */
2524 WARN_ON_ONCE(1);
2525 dev_kfree_skb(skb);
2526 return 0;
2527 }
2528
2529 if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
2530 ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
2531
2532 sband = &ar->mac.sbands[status->band];
2533
2534 status->freq = ieee80211_channel_to_frequency(channel, status->band);
2535 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
2536 status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
2537
2538 hdr = (struct ieee80211_hdr *)skb->data;
2539 fc = le16_to_cpu(hdr->frame_control);
2540
2541 /* Firmware is guaranteed to report all essential management frames via
2542 * WMI while it can deliver some extra via HTT. Since there can be
2543 * duplicates split the reporting wrt monitor/sniffing.
2544 */
2545 status->flag |= RX_FLAG_SKIP_MONITOR;
2546
2547 ath10k_wmi_handle_wep_reauth(ar, skb, status);
2548
2549 if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
2550 status->flag |= RX_FLAG_DECRYPTED;
2551
2552 if (!ieee80211_is_action(hdr->frame_control) &&
2553 !ieee80211_is_deauth(hdr->frame_control) &&
2554 !ieee80211_is_disassoc(hdr->frame_control)) {
2555 status->flag |= RX_FLAG_IV_STRIPPED |
2556 RX_FLAG_MMIC_STRIPPED;
2557 hdr->frame_control = __cpu_to_le16(fc &
2558 ~IEEE80211_FCTL_PROTECTED);
2559 }
2560 }
2561
2562 if (ieee80211_is_beacon(hdr->frame_control))
2563 ath10k_mac_handle_beacon(ar, skb);
2564
2565 if (ieee80211_is_beacon(hdr->frame_control) ||
2566 ieee80211_is_probe_resp(hdr->frame_control))
2567 status->boottime_ns = ktime_get_boottime_ns();
2568
2569 ath10k_dbg(ar, ATH10K_DBG_MGMT,
2570 "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
2571 skb, skb->len,
2572 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
2573
2574 ath10k_dbg(ar, ATH10K_DBG_MGMT,
2575 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
2576 status->freq, status->band, status->signal,
2577 status->rate_idx);
2578
2579 ieee80211_rx_ni(ar->hw, skb);
2580
2581 return 0;
2582}
2583
2584static int freq_to_idx(struct ath10k *ar, int freq)
2585{
2586 struct ieee80211_supported_band *sband;
2587 int band, ch, idx = 0;
2588
2589 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
2590 sband = ar->hw->wiphy->bands[band];
2591 if (!sband)
2592 continue;
2593
2594 for (ch = 0; ch < sband->n_channels; ch++, idx++)
2595 if (sband->channels[ch].center_freq == freq)
2596 goto exit;
2597 }
2598
2599exit:
2600 return idx;
2601}
2602
2603static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
2604 struct wmi_ch_info_ev_arg *arg)
2605{
2606 struct wmi_chan_info_event *ev = (void *)skb->data;
2607
2608 if (skb->len < sizeof(*ev))
2609 return -EPROTO;
2610
2611 skb_pull(skb, sizeof(*ev));
2612 arg->err_code = ev->err_code;
2613 arg->freq = ev->freq;
2614 arg->cmd_flags = ev->cmd_flags;
2615 arg->noise_floor = ev->noise_floor;
2616 arg->rx_clear_count = ev->rx_clear_count;
2617 arg->cycle_count = ev->cycle_count;
2618
2619 return 0;
2620}
2621
2622static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
2623 struct sk_buff *skb,
2624 struct wmi_ch_info_ev_arg *arg)
2625{
2626 struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
2627
2628 if (skb->len < sizeof(*ev))
2629 return -EPROTO;
2630
2631 skb_pull(skb, sizeof(*ev));
2632 arg->err_code = ev->err_code;
2633 arg->freq = ev->freq;
2634 arg->cmd_flags = ev->cmd_flags;
2635 arg->noise_floor = ev->noise_floor;
2636 arg->rx_clear_count = ev->rx_clear_count;
2637 arg->cycle_count = ev->cycle_count;
2638 arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
2639 arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
2640 arg->rx_frame_count = ev->rx_frame_count;
2641
2642 return 0;
2643}
2644
2645/*
2646 * Handle the channel info event for firmware which only sends one
2647 * chan_info event per scanned channel.
2648 */
2649static void ath10k_wmi_event_chan_info_unpaired(struct ath10k *ar,
2650 struct chan_info_params *params)
2651{
2652 struct survey_info *survey;
2653 int idx;
2654
2655 if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
2656 ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info report completed\n");
2657 return;
2658 }
2659
2660 idx = freq_to_idx(ar, params->freq);
2661 if (idx >= ARRAY_SIZE(ar->survey)) {
2662 ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
2663 params->freq, idx);
2664 return;
2665 }
2666
2667 survey = &ar->survey[idx];
2668
2669 if (!params->mac_clk_mhz)
2670 return;
2671
2672 memset(survey, 0, sizeof(*survey));
2673
2674 survey->noise = params->noise_floor;
2675 survey->time = (params->cycle_count / params->mac_clk_mhz) / 1000;
2676 survey->time_busy = (params->rx_clear_count / params->mac_clk_mhz) / 1000;
2677 survey->filled |= SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
2678 SURVEY_INFO_TIME_BUSY;
2679}
2680
2681/*
2682 * Handle the channel info event for firmware which sends chan_info
2683 * event in pairs(start and stop events) for every scanned channel.
2684 */
2685static void ath10k_wmi_event_chan_info_paired(struct ath10k *ar,
2686 struct chan_info_params *params)
2687{
2688 struct survey_info *survey;
2689 int idx;
2690
2691 idx = freq_to_idx(ar, params->freq);
2692 if (idx >= ARRAY_SIZE(ar->survey)) {
2693 ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
2694 params->freq, idx);
2695 return;
2696 }
2697
2698 if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
2699 if (ar->ch_info_can_report_survey) {
2700 survey = &ar->survey[idx];
2701 survey->noise = params->noise_floor;
2702 survey->filled = SURVEY_INFO_NOISE_DBM;
2703
2704 ath10k_hw_fill_survey_time(ar,
2705 survey,
2706 params->cycle_count,
2707 params->rx_clear_count,
2708 ar->survey_last_cycle_count,
2709 ar->survey_last_rx_clear_count);
2710 }
2711
2712 ar->ch_info_can_report_survey = false;
2713 } else {
2714 ar->ch_info_can_report_survey = true;
2715 }
2716
2717 if (!(params->cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
2718 ar->survey_last_rx_clear_count = params->rx_clear_count;
2719 ar->survey_last_cycle_count = params->cycle_count;
2720 }
2721}
2722
2723void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
2724{
2725 struct chan_info_params ch_info_param;
2726 struct wmi_ch_info_ev_arg arg = {};
2727 int ret;
2728
2729 ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
2730 if (ret) {
2731 ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
2732 return;
2733 }
2734
2735 ch_info_param.err_code = __le32_to_cpu(arg.err_code);
2736 ch_info_param.freq = __le32_to_cpu(arg.freq);
2737 ch_info_param.cmd_flags = __le32_to_cpu(arg.cmd_flags);
2738 ch_info_param.noise_floor = __le32_to_cpu(arg.noise_floor);
2739 ch_info_param.rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
2740 ch_info_param.cycle_count = __le32_to_cpu(arg.cycle_count);
2741 ch_info_param.mac_clk_mhz = __le32_to_cpu(arg.mac_clk_mhz);
2742
2743 ath10k_dbg(ar, ATH10K_DBG_WMI,
2744 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
2745 ch_info_param.err_code, ch_info_param.freq, ch_info_param.cmd_flags,
2746 ch_info_param.noise_floor, ch_info_param.rx_clear_count,
2747 ch_info_param.cycle_count);
2748
2749 spin_lock_bh(&ar->data_lock);
2750
2751 switch (ar->scan.state) {
2752 case ATH10K_SCAN_IDLE:
2753 case ATH10K_SCAN_STARTING:
2754 ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
2755 goto exit;
2756 case ATH10K_SCAN_RUNNING:
2757 case ATH10K_SCAN_ABORTING:
2758 break;
2759 }
2760
2761 if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
2762 ar->running_fw->fw_file.fw_features))
2763 ath10k_wmi_event_chan_info_unpaired(ar, &ch_info_param);
2764 else
2765 ath10k_wmi_event_chan_info_paired(ar, &ch_info_param);
2766
2767exit:
2768 spin_unlock_bh(&ar->data_lock);
2769}
2770
2771void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
2772{
2773 struct wmi_echo_ev_arg arg = {};
2774 int ret;
2775
2776 ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
2777 if (ret) {
2778 ath10k_warn(ar, "failed to parse echo: %d\n", ret);
2779 return;
2780 }
2781
2782 ath10k_dbg(ar, ATH10K_DBG_WMI,
2783 "wmi event echo value 0x%08x\n",
2784 le32_to_cpu(arg.value));
2785
2786 if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
2787 complete(&ar->wmi.barrier);
2788}
2789
2790int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
2791{
2792 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
2793 skb->len);
2794
2795 trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
2796
2797 return 0;
2798}
2799
2800void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
2801 struct ath10k_fw_stats_pdev *dst)
2802{
2803 dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
2804 dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
2805 dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
2806 dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
2807 dst->cycle_count = __le32_to_cpu(src->cycle_count);
2808 dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
2809 dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
2810}
2811
2812void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
2813 struct ath10k_fw_stats_pdev *dst)
2814{
2815 dst->comp_queued = __le32_to_cpu(src->comp_queued);
2816 dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2817 dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2818 dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2819 dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2820 dst->local_enqued = __le32_to_cpu(src->local_enqued);
2821 dst->local_freed = __le32_to_cpu(src->local_freed);
2822 dst->hw_queued = __le32_to_cpu(src->hw_queued);
2823 dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2824 dst->underrun = __le32_to_cpu(src->underrun);
2825 dst->tx_abort = __le32_to_cpu(src->tx_abort);
2826 dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
2827 dst->tx_ko = __le32_to_cpu(src->tx_ko);
2828 dst->data_rc = __le32_to_cpu(src->data_rc);
2829 dst->self_triggers = __le32_to_cpu(src->self_triggers);
2830 dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2831 dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2832 dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2833 dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2834 dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2835 dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2836 dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2837}
2838
2839static void
2840ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
2841 struct ath10k_fw_stats_pdev *dst)
2842{
2843 dst->comp_queued = __le32_to_cpu(src->comp_queued);
2844 dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2845 dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2846 dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2847 dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2848 dst->local_enqued = __le32_to_cpu(src->local_enqued);
2849 dst->local_freed = __le32_to_cpu(src->local_freed);
2850 dst->hw_queued = __le32_to_cpu(src->hw_queued);
2851 dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2852 dst->underrun = __le32_to_cpu(src->underrun);
2853 dst->tx_abort = __le32_to_cpu(src->tx_abort);
2854 dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
2855 dst->tx_ko = __le32_to_cpu(src->tx_ko);
2856 dst->data_rc = __le32_to_cpu(src->data_rc);
2857 dst->self_triggers = __le32_to_cpu(src->self_triggers);
2858 dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2859 dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2860 dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2861 dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2862 dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2863 dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2864 dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2865 dst->hw_paused = __le32_to_cpu(src->hw_paused);
2866 dst->seq_posted = __le32_to_cpu(src->seq_posted);
2867 dst->seq_failed_queueing =
2868 __le32_to_cpu(src->seq_failed_queueing);
2869 dst->seq_completed = __le32_to_cpu(src->seq_completed);
2870 dst->seq_restarted = __le32_to_cpu(src->seq_restarted);
2871 dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted);
2872 dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush);
2873 dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2874 dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated);
2875 dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed);
2876 dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2877 dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired);
2878}
2879
2880void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
2881 struct ath10k_fw_stats_pdev *dst)
2882{
2883 dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
2884 dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
2885 dst->r0_frags = __le32_to_cpu(src->r0_frags);
2886 dst->r1_frags = __le32_to_cpu(src->r1_frags);
2887 dst->r2_frags = __le32_to_cpu(src->r2_frags);
2888 dst->r3_frags = __le32_to_cpu(src->r3_frags);
2889 dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
2890 dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
2891 dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
2892 dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
2893 dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
2894 dst->phy_errs = __le32_to_cpu(src->phy_errs);
2895 dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
2896 dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
2897}
2898
2899void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
2900 struct ath10k_fw_stats_pdev *dst)
2901{
2902 dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
2903 dst->rts_bad = __le32_to_cpu(src->rts_bad);
2904 dst->rts_good = __le32_to_cpu(src->rts_good);
2905 dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
2906 dst->no_beacons = __le32_to_cpu(src->no_beacons);
2907 dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
2908}
2909
2910void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
2911 struct ath10k_fw_stats_peer *dst)
2912{
2913 ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2914 dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2915 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2916}
2917
2918static void
2919ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
2920 struct ath10k_fw_stats_peer *dst)
2921{
2922 ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2923 dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2924 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2925 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
2926}
2927
2928static void
2929ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd *src,
2930 struct ath10k_fw_stats_vdev_extd *dst)
2931{
2932 dst->vdev_id = __le32_to_cpu(src->vdev_id);
2933 dst->ppdu_aggr_cnt = __le32_to_cpu(src->ppdu_aggr_cnt);
2934 dst->ppdu_noack = __le32_to_cpu(src->ppdu_noack);
2935 dst->mpdu_queued = __le32_to_cpu(src->mpdu_queued);
2936 dst->ppdu_nonaggr_cnt = __le32_to_cpu(src->ppdu_nonaggr_cnt);
2937 dst->mpdu_sw_requeued = __le32_to_cpu(src->mpdu_sw_requeued);
2938 dst->mpdu_suc_retry = __le32_to_cpu(src->mpdu_suc_retry);
2939 dst->mpdu_suc_multitry = __le32_to_cpu(src->mpdu_suc_multitry);
2940 dst->mpdu_fail_retry = __le32_to_cpu(src->mpdu_fail_retry);
2941 dst->tx_ftm_suc = __le32_to_cpu(src->tx_ftm_suc);
2942 dst->tx_ftm_suc_retry = __le32_to_cpu(src->tx_ftm_suc_retry);
2943 dst->tx_ftm_fail = __le32_to_cpu(src->tx_ftm_fail);
2944 dst->rx_ftmr_cnt = __le32_to_cpu(src->rx_ftmr_cnt);
2945 dst->rx_ftmr_dup_cnt = __le32_to_cpu(src->rx_ftmr_dup_cnt);
2946 dst->rx_iftmr_cnt = __le32_to_cpu(src->rx_iftmr_cnt);
2947 dst->rx_iftmr_dup_cnt = __le32_to_cpu(src->rx_iftmr_dup_cnt);
2948}
2949
2950static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
2951 struct sk_buff *skb,
2952 struct ath10k_fw_stats *stats)
2953{
2954 const struct wmi_stats_event *ev = (void *)skb->data;
2955 u32 num_pdev_stats, num_peer_stats;
2956 int i;
2957
2958 if (!skb_pull(skb, sizeof(*ev)))
2959 return -EPROTO;
2960
2961 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
2962 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
2963
2964 for (i = 0; i < num_pdev_stats; i++) {
2965 const struct wmi_pdev_stats *src;
2966 struct ath10k_fw_stats_pdev *dst;
2967
2968 src = (void *)skb->data;
2969 if (!skb_pull(skb, sizeof(*src)))
2970 return -EPROTO;
2971
2972 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
2973 if (!dst)
2974 continue;
2975
2976 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
2977 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
2978 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
2979
2980 list_add_tail(&dst->list, &stats->pdevs);
2981 }
2982
2983 /* fw doesn't implement vdev stats */
2984
2985 for (i = 0; i < num_peer_stats; i++) {
2986 const struct wmi_peer_stats *src;
2987 struct ath10k_fw_stats_peer *dst;
2988
2989 src = (void *)skb->data;
2990 if (!skb_pull(skb, sizeof(*src)))
2991 return -EPROTO;
2992
2993 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
2994 if (!dst)
2995 continue;
2996
2997 ath10k_wmi_pull_peer_stats(src, dst);
2998 list_add_tail(&dst->list, &stats->peers);
2999 }
3000
3001 return 0;
3002}
3003
3004static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
3005 struct sk_buff *skb,
3006 struct ath10k_fw_stats *stats)
3007{
3008 const struct wmi_stats_event *ev = (void *)skb->data;
3009 u32 num_pdev_stats, num_peer_stats;
3010 int i;
3011
3012 if (!skb_pull(skb, sizeof(*ev)))
3013 return -EPROTO;
3014
3015 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3016 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3017
3018 for (i = 0; i < num_pdev_stats; i++) {
3019 const struct wmi_10x_pdev_stats *src;
3020 struct ath10k_fw_stats_pdev *dst;
3021
3022 src = (void *)skb->data;
3023 if (!skb_pull(skb, sizeof(*src)))
3024 return -EPROTO;
3025
3026 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3027 if (!dst)
3028 continue;
3029
3030 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3031 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3032 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3033 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3034
3035 list_add_tail(&dst->list, &stats->pdevs);
3036 }
3037
3038 /* fw doesn't implement vdev stats */
3039
3040 for (i = 0; i < num_peer_stats; i++) {
3041 const struct wmi_10x_peer_stats *src;
3042 struct ath10k_fw_stats_peer *dst;
3043
3044 src = (void *)skb->data;
3045 if (!skb_pull(skb, sizeof(*src)))
3046 return -EPROTO;
3047
3048 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3049 if (!dst)
3050 continue;
3051
3052 ath10k_wmi_pull_peer_stats(&src->old, dst);
3053
3054 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
3055
3056 list_add_tail(&dst->list, &stats->peers);
3057 }
3058
3059 return 0;
3060}
3061
3062static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
3063 struct sk_buff *skb,
3064 struct ath10k_fw_stats *stats)
3065{
3066 const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3067 u32 num_pdev_stats;
3068 u32 num_pdev_ext_stats;
3069 u32 num_peer_stats;
3070 int i;
3071
3072 if (!skb_pull(skb, sizeof(*ev)))
3073 return -EPROTO;
3074
3075 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3076 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3077 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3078
3079 for (i = 0; i < num_pdev_stats; i++) {
3080 const struct wmi_10_2_pdev_stats *src;
3081 struct ath10k_fw_stats_pdev *dst;
3082
3083 src = (void *)skb->data;
3084 if (!skb_pull(skb, sizeof(*src)))
3085 return -EPROTO;
3086
3087 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3088 if (!dst)
3089 continue;
3090
3091 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3092 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3093 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3094 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3095 /* FIXME: expose 10.2 specific values */
3096
3097 list_add_tail(&dst->list, &stats->pdevs);
3098 }
3099
3100 for (i = 0; i < num_pdev_ext_stats; i++) {
3101 const struct wmi_10_2_pdev_ext_stats *src;
3102
3103 src = (void *)skb->data;
3104 if (!skb_pull(skb, sizeof(*src)))
3105 return -EPROTO;
3106
3107 /* FIXME: expose values to userspace
3108 *
3109 * Note: Even though this loop seems to do nothing it is
3110 * required to parse following sub-structures properly.
3111 */
3112 }
3113
3114 /* fw doesn't implement vdev stats */
3115
3116 for (i = 0; i < num_peer_stats; i++) {
3117 const struct wmi_10_2_peer_stats *src;
3118 struct ath10k_fw_stats_peer *dst;
3119
3120 src = (void *)skb->data;
3121 if (!skb_pull(skb, sizeof(*src)))
3122 return -EPROTO;
3123
3124 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3125 if (!dst)
3126 continue;
3127
3128 ath10k_wmi_pull_peer_stats(&src->old, dst);
3129
3130 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
3131 /* FIXME: expose 10.2 specific values */
3132
3133 list_add_tail(&dst->list, &stats->peers);
3134 }
3135
3136 return 0;
3137}
3138
3139static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
3140 struct sk_buff *skb,
3141 struct ath10k_fw_stats *stats)
3142{
3143 const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3144 u32 num_pdev_stats;
3145 u32 num_pdev_ext_stats;
3146 u32 num_peer_stats;
3147 int i;
3148
3149 if (!skb_pull(skb, sizeof(*ev)))
3150 return -EPROTO;
3151
3152 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3153 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3154 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3155
3156 for (i = 0; i < num_pdev_stats; i++) {
3157 const struct wmi_10_2_pdev_stats *src;
3158 struct ath10k_fw_stats_pdev *dst;
3159
3160 src = (void *)skb->data;
3161 if (!skb_pull(skb, sizeof(*src)))
3162 return -EPROTO;
3163
3164 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3165 if (!dst)
3166 continue;
3167
3168 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3169 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3170 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3171 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3172 /* FIXME: expose 10.2 specific values */
3173
3174 list_add_tail(&dst->list, &stats->pdevs);
3175 }
3176
3177 for (i = 0; i < num_pdev_ext_stats; i++) {
3178 const struct wmi_10_2_pdev_ext_stats *src;
3179
3180 src = (void *)skb->data;
3181 if (!skb_pull(skb, sizeof(*src)))
3182 return -EPROTO;
3183
3184 /* FIXME: expose values to userspace
3185 *
3186 * Note: Even though this loop seems to do nothing it is
3187 * required to parse following sub-structures properly.
3188 */
3189 }
3190
3191 /* fw doesn't implement vdev stats */
3192
3193 for (i = 0; i < num_peer_stats; i++) {
3194 const struct wmi_10_2_4_ext_peer_stats *src;
3195 struct ath10k_fw_stats_peer *dst;
3196 int stats_len;
3197
3198 if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
3199 stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
3200 else
3201 stats_len = sizeof(struct wmi_10_2_4_peer_stats);
3202
3203 src = (void *)skb->data;
3204 if (!skb_pull(skb, stats_len))
3205 return -EPROTO;
3206
3207 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3208 if (!dst)
3209 continue;
3210
3211 ath10k_wmi_pull_peer_stats(&src->common.old, dst);
3212
3213 dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
3214
3215 if (ath10k_peer_stats_enabled(ar))
3216 dst->rx_duration = __le32_to_cpu(src->rx_duration);
3217 /* FIXME: expose 10.2 specific values */
3218
3219 list_add_tail(&dst->list, &stats->peers);
3220 }
3221
3222 return 0;
3223}
3224
3225static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
3226 struct sk_buff *skb,
3227 struct ath10k_fw_stats *stats)
3228{
3229 const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3230 u32 num_pdev_stats;
3231 u32 num_pdev_ext_stats;
3232 u32 num_vdev_stats;
3233 u32 num_peer_stats;
3234 u32 num_bcnflt_stats;
3235 u32 stats_id;
3236 int i;
3237
3238 if (!skb_pull(skb, sizeof(*ev)))
3239 return -EPROTO;
3240
3241 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3242 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3243 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
3244 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3245 num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
3246 stats_id = __le32_to_cpu(ev->stats_id);
3247
3248 for (i = 0; i < num_pdev_stats; i++) {
3249 const struct wmi_10_4_pdev_stats *src;
3250 struct ath10k_fw_stats_pdev *dst;
3251
3252 src = (void *)skb->data;
3253 if (!skb_pull(skb, sizeof(*src)))
3254 return -EPROTO;
3255
3256 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3257 if (!dst)
3258 continue;
3259
3260 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3261 ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst);
3262 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3263 dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs);
3264 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3265
3266 list_add_tail(&dst->list, &stats->pdevs);
3267 }
3268
3269 for (i = 0; i < num_pdev_ext_stats; i++) {
3270 const struct wmi_10_2_pdev_ext_stats *src;
3271
3272 src = (void *)skb->data;
3273 if (!skb_pull(skb, sizeof(*src)))
3274 return -EPROTO;
3275
3276 /* FIXME: expose values to userspace
3277 *
3278 * Note: Even though this loop seems to do nothing it is
3279 * required to parse following sub-structures properly.
3280 */
3281 }
3282
3283 for (i = 0; i < num_vdev_stats; i++) {
3284 const struct wmi_vdev_stats *src;
3285
3286 /* Ignore vdev stats here as it has only vdev id. Actual vdev
3287 * stats will be retrieved from vdev extended stats.
3288 */
3289 src = (void *)skb->data;
3290 if (!skb_pull(skb, sizeof(*src)))
3291 return -EPROTO;
3292 }
3293
3294 for (i = 0; i < num_peer_stats; i++) {
3295 const struct wmi_10_4_peer_stats *src;
3296 struct ath10k_fw_stats_peer *dst;
3297
3298 src = (void *)skb->data;
3299 if (!skb_pull(skb, sizeof(*src)))
3300 return -EPROTO;
3301
3302 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3303 if (!dst)
3304 continue;
3305
3306 ath10k_wmi_10_4_pull_peer_stats(src, dst);
3307 list_add_tail(&dst->list, &stats->peers);
3308 }
3309
3310 for (i = 0; i < num_bcnflt_stats; i++) {
3311 const struct wmi_10_4_bss_bcn_filter_stats *src;
3312
3313 src = (void *)skb->data;
3314 if (!skb_pull(skb, sizeof(*src)))
3315 return -EPROTO;
3316
3317 /* FIXME: expose values to userspace
3318 *
3319 * Note: Even though this loop seems to do nothing it is
3320 * required to parse following sub-structures properly.
3321 */
3322 }
3323
3324 if (stats_id & WMI_10_4_STAT_PEER_EXTD) {
3325 stats->extended = true;
3326
3327 for (i = 0; i < num_peer_stats; i++) {
3328 const struct wmi_10_4_peer_extd_stats *src;
3329 struct ath10k_fw_extd_stats_peer *dst;
3330
3331 src = (void *)skb->data;
3332 if (!skb_pull(skb, sizeof(*src)))
3333 return -EPROTO;
3334
3335 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3336 if (!dst)
3337 continue;
3338
3339 ether_addr_copy(dst->peer_macaddr,
3340 src->peer_macaddr.addr);
3341 dst->rx_duration = __le32_to_cpu(src->rx_duration);
3342 list_add_tail(&dst->list, &stats->peers_extd);
3343 }
3344 }
3345
3346 if (stats_id & WMI_10_4_STAT_VDEV_EXTD) {
3347 for (i = 0; i < num_vdev_stats; i++) {
3348 const struct wmi_vdev_stats_extd *src;
3349 struct ath10k_fw_stats_vdev_extd *dst;
3350
3351 src = (void *)skb->data;
3352 if (!skb_pull(skb, sizeof(*src)))
3353 return -EPROTO;
3354
3355 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3356 if (!dst)
3357 continue;
3358 ath10k_wmi_10_4_pull_vdev_stats(src, dst);
3359 list_add_tail(&dst->list, &stats->vdevs);
3360 }
3361 }
3362
3363 return 0;
3364}
3365
3366void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
3367{
3368 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
3369 ath10k_debug_fw_stats_process(ar, skb);
3370}
3371
3372static int
3373ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
3374 struct wmi_vdev_start_ev_arg *arg)
3375{
3376 struct wmi_vdev_start_response_event *ev = (void *)skb->data;
3377
3378 if (skb->len < sizeof(*ev))
3379 return -EPROTO;
3380
3381 skb_pull(skb, sizeof(*ev));
3382 arg->vdev_id = ev->vdev_id;
3383 arg->req_id = ev->req_id;
3384 arg->resp_type = ev->resp_type;
3385 arg->status = ev->status;
3386
3387 return 0;
3388}
3389
3390void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
3391{
3392 struct wmi_vdev_start_ev_arg arg = {};
3393 int ret;
3394 u32 status;
3395
3396 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
3397
3398 ar->last_wmi_vdev_start_status = 0;
3399
3400 ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
3401 if (ret) {
3402 ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
3403 ar->last_wmi_vdev_start_status = ret;
3404 goto out;
3405 }
3406
3407 status = __le32_to_cpu(arg.status);
3408 if (WARN_ON_ONCE(status)) {
3409 ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
3410 status, (status == WMI_VDEV_START_CHAN_INVALID) ?
3411 "chan-invalid" : "unknown");
3412 /* Setup is done one way or another though, so we should still
3413 * do the completion, so don't return here.
3414 */
3415 ar->last_wmi_vdev_start_status = -EINVAL;
3416 }
3417
3418out:
3419 complete(&ar->vdev_setup_done);
3420}
3421
3422void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
3423{
3424 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
3425 complete(&ar->vdev_setup_done);
3426}
3427
3428static int
3429ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
3430 struct wmi_peer_kick_ev_arg *arg)
3431{
3432 struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
3433
3434 if (skb->len < sizeof(*ev))
3435 return -EPROTO;
3436
3437 skb_pull(skb, sizeof(*ev));
3438 arg->mac_addr = ev->peer_macaddr.addr;
3439
3440 return 0;
3441}
3442
3443void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
3444{
3445 struct wmi_peer_kick_ev_arg arg = {};
3446 struct ieee80211_sta *sta;
3447 int ret;
3448
3449 ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
3450 if (ret) {
3451 ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
3452 ret);
3453 return;
3454 }
3455
3456 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
3457 arg.mac_addr);
3458
3459 rcu_read_lock();
3460
3461 sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
3462 if (!sta) {
3463 ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
3464 arg.mac_addr);
3465 goto exit;
3466 }
3467
3468 ieee80211_report_low_ack(sta, 10);
3469
3470exit:
3471 rcu_read_unlock();
3472}
3473
3474/*
3475 * FIXME
3476 *
3477 * We don't report to mac80211 sleep state of connected
3478 * stations. Due to this mac80211 can't fill in TIM IE
3479 * correctly.
3480 *
3481 * I know of no way of getting nullfunc frames that contain
3482 * sleep transition from connected stations - these do not
3483 * seem to be sent from the target to the host. There also
3484 * doesn't seem to be a dedicated event for that. So the
3485 * only way left to do this would be to read tim_bitmap
3486 * during SWBA.
3487 *
3488 * We could probably try using tim_bitmap from SWBA to tell
3489 * mac80211 which stations are asleep and which are not. The
3490 * problem here is calling mac80211 functions so many times
3491 * could take too long and make us miss the time to submit
3492 * the beacon to the target.
3493 *
3494 * So as a workaround we try to extend the TIM IE if there
3495 * is unicast buffered for stations with aid > 7 and fill it
3496 * in ourselves.
3497 */
3498static void ath10k_wmi_update_tim(struct ath10k *ar,
3499 struct ath10k_vif *arvif,
3500 struct sk_buff *bcn,
3501 const struct wmi_tim_info_arg *tim_info)
3502{
3503 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
3504 struct ieee80211_tim_ie *tim;
3505 u8 *ies, *ie;
3506 u8 ie_len, pvm_len;
3507 __le32 t;
3508 u32 v, tim_len;
3509
3510 /* When FW reports 0 in tim_len, ensure atleast first byte
3511 * in tim_bitmap is considered for pvm calculation.
3512 */
3513 tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
3514
3515 /* if next SWBA has no tim_changed the tim_bitmap is garbage.
3516 * we must copy the bitmap upon change and reuse it later
3517 */
3518 if (__le32_to_cpu(tim_info->tim_changed)) {
3519 int i;
3520
3521 if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
3522 ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
3523 tim_len, sizeof(arvif->u.ap.tim_bitmap));
3524 tim_len = sizeof(arvif->u.ap.tim_bitmap);
3525 }
3526
3527 for (i = 0; i < tim_len; i++) {
3528 t = tim_info->tim_bitmap[i / 4];
3529 v = __le32_to_cpu(t);
3530 arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
3531 }
3532
3533 /* FW reports either length 0 or length based on max supported
3534 * station. so we calculate this on our own
3535 */
3536 arvif->u.ap.tim_len = 0;
3537 for (i = 0; i < tim_len; i++)
3538 if (arvif->u.ap.tim_bitmap[i])
3539 arvif->u.ap.tim_len = i;
3540
3541 arvif->u.ap.tim_len++;
3542 }
3543
3544 ies = bcn->data;
3545 ies += ieee80211_hdrlen(hdr->frame_control);
3546 ies += 12; /* fixed parameters */
3547
3548 ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
3549 (u8 *)skb_tail_pointer(bcn) - ies);
3550 if (!ie) {
3551 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
3552 ath10k_warn(ar, "no tim ie found;\n");
3553 return;
3554 }
3555
3556 tim = (void *)ie + 2;
3557 ie_len = ie[1];
3558 pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
3559
3560 if (pvm_len < arvif->u.ap.tim_len) {
3561 int expand_size = tim_len - pvm_len;
3562 int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
3563 void *next_ie = ie + 2 + ie_len;
3564
3565 if (skb_put(bcn, expand_size)) {
3566 memmove(next_ie + expand_size, next_ie, move_size);
3567
3568 ie[1] += expand_size;
3569 ie_len += expand_size;
3570 pvm_len += expand_size;
3571 } else {
3572 ath10k_warn(ar, "tim expansion failed\n");
3573 }
3574 }
3575
3576 if (pvm_len > tim_len) {
3577 ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
3578 return;
3579 }
3580
3581 tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
3582 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
3583
3584 if (tim->dtim_count == 0) {
3585 ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO;
3586
3587 if (__le32_to_cpu(tim_info->tim_mcast) == 1)
3588 ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB;
3589 }
3590
3591 ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
3592 tim->dtim_count, tim->dtim_period,
3593 tim->bitmap_ctrl, pvm_len);
3594}
3595
3596static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
3597 struct sk_buff *bcn,
3598 const struct wmi_p2p_noa_info *noa)
3599{
3600 if (!arvif->vif->p2p)
3601 return;
3602
3603 ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
3604
3605 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
3606 ath10k_p2p_noa_update(arvif, noa);
3607
3608 if (arvif->u.ap.noa_data)
3609 if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
3610 skb_put_data(bcn, arvif->u.ap.noa_data,
3611 arvif->u.ap.noa_len);
3612}
3613
3614static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
3615 struct wmi_swba_ev_arg *arg)
3616{
3617 struct wmi_host_swba_event *ev = (void *)skb->data;
3618 u32 map;
3619 size_t i;
3620
3621 if (skb->len < sizeof(*ev))
3622 return -EPROTO;
3623
3624 skb_pull(skb, sizeof(*ev));
3625 arg->vdev_map = ev->vdev_map;
3626
3627 for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3628 if (!(map & BIT(0)))
3629 continue;
3630
3631 /* If this happens there were some changes in firmware and
3632 * ath10k should update the max size of tim_info array.
3633 */
3634 if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3635 break;
3636
3637 if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3638 sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3639 ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3640 return -EPROTO;
3641 }
3642
3643 arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3644 arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3645 arg->tim_info[i].tim_bitmap =
3646 ev->bcn_info[i].tim_info.tim_bitmap;
3647 arg->tim_info[i].tim_changed =
3648 ev->bcn_info[i].tim_info.tim_changed;
3649 arg->tim_info[i].tim_num_ps_pending =
3650 ev->bcn_info[i].tim_info.tim_num_ps_pending;
3651
3652 arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
3653 i++;
3654 }
3655
3656 return 0;
3657}
3658
3659static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar,
3660 struct sk_buff *skb,
3661 struct wmi_swba_ev_arg *arg)
3662{
3663 struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data;
3664 u32 map;
3665 size_t i;
3666
3667 if (skb->len < sizeof(*ev))
3668 return -EPROTO;
3669
3670 skb_pull(skb, sizeof(*ev));
3671 arg->vdev_map = ev->vdev_map;
3672
3673 for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3674 if (!(map & BIT(0)))
3675 continue;
3676
3677 /* If this happens there were some changes in firmware and
3678 * ath10k should update the max size of tim_info array.
3679 */
3680 if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3681 break;
3682
3683 if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3684 sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3685 ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3686 return -EPROTO;
3687 }
3688
3689 arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3690 arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3691 arg->tim_info[i].tim_bitmap =
3692 ev->bcn_info[i].tim_info.tim_bitmap;
3693 arg->tim_info[i].tim_changed =
3694 ev->bcn_info[i].tim_info.tim_changed;
3695 arg->tim_info[i].tim_num_ps_pending =
3696 ev->bcn_info[i].tim_info.tim_num_ps_pending;
3697 i++;
3698 }
3699
3700 return 0;
3701}
3702
3703static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
3704 struct sk_buff *skb,
3705 struct wmi_swba_ev_arg *arg)
3706{
3707 struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
3708 u32 map, tim_len;
3709 size_t i;
3710
3711 if (skb->len < sizeof(*ev))
3712 return -EPROTO;
3713
3714 skb_pull(skb, sizeof(*ev));
3715 arg->vdev_map = ev->vdev_map;
3716
3717 for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3718 if (!(map & BIT(0)))
3719 continue;
3720
3721 /* If this happens there were some changes in firmware and
3722 * ath10k should update the max size of tim_info array.
3723 */
3724 if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3725 break;
3726
3727 if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3728 sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3729 ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3730 return -EPROTO;
3731 }
3732
3733 tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
3734 if (tim_len) {
3735 /* Exclude 4 byte guard length */
3736 tim_len -= 4;
3737 arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
3738 } else {
3739 arg->tim_info[i].tim_len = 0;
3740 }
3741
3742 arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3743 arg->tim_info[i].tim_bitmap =
3744 ev->bcn_info[i].tim_info.tim_bitmap;
3745 arg->tim_info[i].tim_changed =
3746 ev->bcn_info[i].tim_info.tim_changed;
3747 arg->tim_info[i].tim_num_ps_pending =
3748 ev->bcn_info[i].tim_info.tim_num_ps_pending;
3749
3750 /* 10.4 firmware doesn't have p2p support. notice of absence
3751 * info can be ignored for now.
3752 */
3753
3754 i++;
3755 }
3756
3757 return 0;
3758}
3759
3760static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
3761{
3762 return WMI_TXBF_CONF_BEFORE_ASSOC;
3763}
3764
3765void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
3766{
3767 struct wmi_swba_ev_arg arg = {};
3768 u32 map;
3769 int i = -1;
3770 const struct wmi_tim_info_arg *tim_info;
3771 const struct wmi_p2p_noa_info *noa_info;
3772 struct ath10k_vif *arvif;
3773 struct sk_buff *bcn;
3774 dma_addr_t paddr;
3775 int ret, vdev_id = 0;
3776
3777 ret = ath10k_wmi_pull_swba(ar, skb, &arg);
3778 if (ret) {
3779 ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
3780 return;
3781 }
3782
3783 map = __le32_to_cpu(arg.vdev_map);
3784
3785 ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
3786 map);
3787
3788 for (; map; map >>= 1, vdev_id++) {
3789 if (!(map & 0x1))
3790 continue;
3791
3792 i++;
3793
3794 if (i >= WMI_MAX_AP_VDEV) {
3795 ath10k_warn(ar, "swba has corrupted vdev map\n");
3796 break;
3797 }
3798
3799 tim_info = &arg.tim_info[i];
3800 noa_info = arg.noa_info[i];
3801
3802 ath10k_dbg(ar, ATH10K_DBG_MGMT,
3803 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
3804 i,
3805 __le32_to_cpu(tim_info->tim_len),
3806 __le32_to_cpu(tim_info->tim_mcast),
3807 __le32_to_cpu(tim_info->tim_changed),
3808 __le32_to_cpu(tim_info->tim_num_ps_pending),
3809 __le32_to_cpu(tim_info->tim_bitmap[3]),
3810 __le32_to_cpu(tim_info->tim_bitmap[2]),
3811 __le32_to_cpu(tim_info->tim_bitmap[1]),
3812 __le32_to_cpu(tim_info->tim_bitmap[0]));
3813
3814 /* TODO: Only first 4 word from tim_bitmap is dumped.
3815 * Extend debug code to dump full tim_bitmap.
3816 */
3817
3818 arvif = ath10k_get_arvif(ar, vdev_id);
3819 if (arvif == NULL) {
3820 ath10k_warn(ar, "no vif for vdev_id %d found\n",
3821 vdev_id);
3822 continue;
3823 }
3824
3825 /* mac80211 would have already asked us to stop beaconing and
3826 * bring the vdev down, so continue in that case
3827 */
3828 if (!arvif->is_up)
3829 continue;
3830
3831 /* There are no completions for beacons so wait for next SWBA
3832 * before telling mac80211 to decrement CSA counter
3833 *
3834 * Once CSA counter is completed stop sending beacons until
3835 * actual channel switch is done
3836 */
3837 if (arvif->vif->csa_active &&
3838 ieee80211_csa_is_complete(arvif->vif)) {
3839 ieee80211_csa_finish(arvif->vif);
3840 continue;
3841 }
3842
3843 bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
3844 if (!bcn) {
3845 ath10k_warn(ar, "could not get mac80211 beacon\n");
3846 continue;
3847 }
3848
3849 ath10k_tx_h_seq_no(arvif->vif, bcn);
3850 ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
3851 ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
3852
3853 spin_lock_bh(&ar->data_lock);
3854
3855 if (arvif->beacon) {
3856 switch (arvif->beacon_state) {
3857 case ATH10K_BEACON_SENT:
3858 break;
3859 case ATH10K_BEACON_SCHEDULED:
3860 ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
3861 arvif->vdev_id);
3862 break;
3863 case ATH10K_BEACON_SENDING:
3864 ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
3865 arvif->vdev_id);
3866 dev_kfree_skb(bcn);
3867 goto skip;
3868 }
3869
3870 ath10k_mac_vif_beacon_free(arvif);
3871 }
3872
3873 if (!arvif->beacon_buf) {
3874 paddr = dma_map_single(arvif->ar->dev, bcn->data,
3875 bcn->len, DMA_TO_DEVICE);
3876 ret = dma_mapping_error(arvif->ar->dev, paddr);
3877 if (ret) {
3878 ath10k_warn(ar, "failed to map beacon: %d\n",
3879 ret);
3880 dev_kfree_skb_any(bcn);
3881 goto skip;
3882 }
3883
3884 ATH10K_SKB_CB(bcn)->paddr = paddr;
3885 } else {
3886 if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
3887 ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
3888 bcn->len, IEEE80211_MAX_FRAME_LEN);
3889 skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
3890 }
3891 memcpy(arvif->beacon_buf, bcn->data, bcn->len);
3892 ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
3893 }
3894
3895 arvif->beacon = bcn;
3896 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
3897
3898 trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
3899 trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
3900
3901skip:
3902 spin_unlock_bh(&ar->data_lock);
3903 }
3904
3905 ath10k_wmi_tx_beacons_nowait(ar);
3906}
3907
3908void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
3909{
3910 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
3911}
3912
3913static void ath10k_radar_detected(struct ath10k *ar)
3914{
3915 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
3916 ATH10K_DFS_STAT_INC(ar, radar_detected);
3917
3918 /* Control radar events reporting in debugfs file
3919 * dfs_block_radar_events
3920 */
3921 if (ar->dfs_block_radar_events)
3922 ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
3923 else
3924 ieee80211_radar_detected(ar->hw);
3925}
3926
3927static void ath10k_radar_confirmation_work(struct work_struct *work)
3928{
3929 struct ath10k *ar = container_of(work, struct ath10k,
3930 radar_confirmation_work);
3931 struct ath10k_radar_found_info radar_info;
3932 int ret, time_left;
3933
3934 reinit_completion(&ar->wmi.radar_confirm);
3935
3936 spin_lock_bh(&ar->data_lock);
3937 memcpy(&radar_info, &ar->last_radar_info, sizeof(radar_info));
3938 spin_unlock_bh(&ar->data_lock);
3939
3940 ret = ath10k_wmi_report_radar_found(ar, &radar_info);
3941 if (ret) {
3942 ath10k_warn(ar, "failed to send radar found %d\n", ret);
3943 goto wait_complete;
3944 }
3945
3946 time_left = wait_for_completion_timeout(&ar->wmi.radar_confirm,
3947 ATH10K_WMI_DFS_CONF_TIMEOUT_HZ);
3948 if (time_left) {
3949 /* DFS Confirmation status event received and
3950 * necessary action completed.
3951 */
3952 goto wait_complete;
3953 } else {
3954 /* DFS Confirmation event not received from FW.Considering this
3955 * as real radar.
3956 */
3957 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3958 "dfs confirmation not received from fw, considering as radar\n");
3959 goto radar_detected;
3960 }
3961
3962radar_detected:
3963 ath10k_radar_detected(ar);
3964
3965 /* Reset state to allow sending confirmation on consecutive radar
3966 * detections, unless radar confirmation is disabled/stopped.
3967 */
3968wait_complete:
3969 spin_lock_bh(&ar->data_lock);
3970 if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_STOPPED)
3971 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
3972 spin_unlock_bh(&ar->data_lock);
3973}
3974
3975static void ath10k_dfs_radar_report(struct ath10k *ar,
3976 struct wmi_phyerr_ev_arg *phyerr,
3977 const struct phyerr_radar_report *rr,
3978 u64 tsf)
3979{
3980 u32 reg0, reg1, tsf32l;
3981 struct ieee80211_channel *ch;
3982 struct pulse_event pe;
3983 struct radar_detector_specs rs;
3984 u64 tsf64;
3985 u8 rssi, width;
3986 struct ath10k_radar_found_info *radar_info;
3987
3988 reg0 = __le32_to_cpu(rr->reg0);
3989 reg1 = __le32_to_cpu(rr->reg1);
3990
3991 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3992 "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
3993 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
3994 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
3995 MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
3996 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
3997 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
3998 "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
3999 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
4000 MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
4001 MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
4002 MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
4003 MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
4004 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4005 "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
4006 MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
4007 MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
4008
4009 if (!ar->dfs_detector)
4010 return;
4011
4012 spin_lock_bh(&ar->data_lock);
4013 ch = ar->rx_channel;
4014
4015 /* fetch target operating channel during channel change */
4016 if (!ch)
4017 ch = ar->tgt_oper_chan;
4018
4019 spin_unlock_bh(&ar->data_lock);
4020
4021 if (!ch) {
4022 ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
4023 goto radar_detected;
4024 }
4025
4026 /* report event to DFS pattern detector */
4027 tsf32l = phyerr->tsf_timestamp;
4028 tsf64 = tsf & (~0xFFFFFFFFULL);
4029 tsf64 |= tsf32l;
4030
4031 width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
4032 rssi = phyerr->rssi_combined;
4033
4034 /* hardware store this as 8 bit signed value,
4035 * set to zero if negative number
4036 */
4037 if (rssi & 0x80)
4038 rssi = 0;
4039
4040 pe.ts = tsf64;
4041 pe.freq = ch->center_freq;
4042 pe.width = width;
4043 pe.rssi = rssi;
4044 pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
4045 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4046 "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
4047 pe.freq, pe.width, pe.rssi, pe.ts);
4048
4049 ATH10K_DFS_STAT_INC(ar, pulses_detected);
4050
4051 if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, &rs)) {
4052 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4053 "dfs no pulse pattern detected, yet\n");
4054 return;
4055 }
4056
4057 if ((test_bit(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, ar->wmi.svc_map)) &&
4058 ar->dfs_detector->region == NL80211_DFS_FCC) {
4059 /* Consecutive radar indications need not be
4060 * sent to the firmware until we get confirmation
4061 * for the previous detected radar.
4062 */
4063 spin_lock_bh(&ar->data_lock);
4064 if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_IDLE) {
4065 spin_unlock_bh(&ar->data_lock);
4066 return;
4067 }
4068 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_INPROGRESS;
4069 radar_info = &ar->last_radar_info;
4070
4071 radar_info->pri_min = rs.pri_min;
4072 radar_info->pri_max = rs.pri_max;
4073 radar_info->width_min = rs.width_min;
4074 radar_info->width_max = rs.width_max;
4075 /*TODO Find sidx_min and sidx_max */
4076 radar_info->sidx_min = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
4077 radar_info->sidx_max = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
4078
4079 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4080 "sending wmi radar found cmd pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
4081 radar_info->pri_min, radar_info->pri_max,
4082 radar_info->width_min, radar_info->width_max,
4083 radar_info->sidx_min, radar_info->sidx_max);
4084 ieee80211_queue_work(ar->hw, &ar->radar_confirmation_work);
4085 spin_unlock_bh(&ar->data_lock);
4086 return;
4087 }
4088
4089radar_detected:
4090 ath10k_radar_detected(ar);
4091}
4092
4093static int ath10k_dfs_fft_report(struct ath10k *ar,
4094 struct wmi_phyerr_ev_arg *phyerr,
4095 const struct phyerr_fft_report *fftr,
4096 u64 tsf)
4097{
4098 u32 reg0, reg1;
4099 u8 rssi, peak_mag;
4100
4101 reg0 = __le32_to_cpu(fftr->reg0);
4102 reg1 = __le32_to_cpu(fftr->reg1);
4103 rssi = phyerr->rssi_combined;
4104
4105 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4106 "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
4107 MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
4108 MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
4109 MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
4110 MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
4111 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4112 "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
4113 MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
4114 MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
4115 MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
4116 MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
4117
4118 peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
4119
4120 /* false event detection */
4121 if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
4122 peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
4123 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
4124 ATH10K_DFS_STAT_INC(ar, pulses_discarded);
4125 return -EINVAL;
4126 }
4127
4128 return 0;
4129}
4130
4131void ath10k_wmi_event_dfs(struct ath10k *ar,
4132 struct wmi_phyerr_ev_arg *phyerr,
4133 u64 tsf)
4134{
4135 int buf_len, tlv_len, res, i = 0;
4136 const struct phyerr_tlv *tlv;
4137 const struct phyerr_radar_report *rr;
4138 const struct phyerr_fft_report *fftr;
4139 const u8 *tlv_buf;
4140
4141 buf_len = phyerr->buf_len;
4142 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4143 "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
4144 phyerr->phy_err_code, phyerr->rssi_combined,
4145 phyerr->tsf_timestamp, tsf, buf_len);
4146
4147 /* Skip event if DFS disabled */
4148 if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
4149 return;
4150
4151 ATH10K_DFS_STAT_INC(ar, pulses_total);
4152
4153 while (i < buf_len) {
4154 if (i + sizeof(*tlv) > buf_len) {
4155 ath10k_warn(ar, "too short buf for tlv header (%d)\n",
4156 i);
4157 return;
4158 }
4159
4160 tlv = (struct phyerr_tlv *)&phyerr->buf[i];
4161 tlv_len = __le16_to_cpu(tlv->len);
4162 tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
4163 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4164 "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
4165 tlv_len, tlv->tag, tlv->sig);
4166
4167 switch (tlv->tag) {
4168 case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
4169 if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
4170 ath10k_warn(ar, "too short radar pulse summary (%d)\n",
4171 i);
4172 return;
4173 }
4174
4175 rr = (struct phyerr_radar_report *)tlv_buf;
4176 ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
4177 break;
4178 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4179 if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
4180 ath10k_warn(ar, "too short fft report (%d)\n",
4181 i);
4182 return;
4183 }
4184
4185 fftr = (struct phyerr_fft_report *)tlv_buf;
4186 res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
4187 if (res)
4188 return;
4189 break;
4190 }
4191
4192 i += sizeof(*tlv) + tlv_len;
4193 }
4194}
4195
4196void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
4197 struct wmi_phyerr_ev_arg *phyerr,
4198 u64 tsf)
4199{
4200 int buf_len, tlv_len, res, i = 0;
4201 struct phyerr_tlv *tlv;
4202 const void *tlv_buf;
4203 const struct phyerr_fft_report *fftr;
4204 size_t fftr_len;
4205
4206 buf_len = phyerr->buf_len;
4207
4208 while (i < buf_len) {
4209 if (i + sizeof(*tlv) > buf_len) {
4210 ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
4211 i);
4212 return;
4213 }
4214
4215 tlv = (struct phyerr_tlv *)&phyerr->buf[i];
4216 tlv_len = __le16_to_cpu(tlv->len);
4217 tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
4218
4219 if (i + sizeof(*tlv) + tlv_len > buf_len) {
4220 ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
4221 i);
4222 return;
4223 }
4224
4225 switch (tlv->tag) {
4226 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4227 if (sizeof(*fftr) > tlv_len) {
4228 ath10k_warn(ar, "failed to parse fft report at byte %d\n",
4229 i);
4230 return;
4231 }
4232
4233 fftr_len = tlv_len - sizeof(*fftr);
4234 fftr = tlv_buf;
4235 res = ath10k_spectral_process_fft(ar, phyerr,
4236 fftr, fftr_len,
4237 tsf);
4238 if (res < 0) {
4239 ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
4240 res);
4241 return;
4242 }
4243 break;
4244 }
4245
4246 i += sizeof(*tlv) + tlv_len;
4247 }
4248}
4249
4250static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4251 struct sk_buff *skb,
4252 struct wmi_phyerr_hdr_arg *arg)
4253{
4254 struct wmi_phyerr_event *ev = (void *)skb->data;
4255
4256 if (skb->len < sizeof(*ev))
4257 return -EPROTO;
4258
4259 arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
4260 arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4261 arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4262 arg->buf_len = skb->len - sizeof(*ev);
4263 arg->phyerrs = ev->phyerrs;
4264
4265 return 0;
4266}
4267
4268static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4269 struct sk_buff *skb,
4270 struct wmi_phyerr_hdr_arg *arg)
4271{
4272 struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
4273
4274 if (skb->len < sizeof(*ev))
4275 return -EPROTO;
4276
4277 /* 10.4 firmware always reports only one phyerr */
4278 arg->num_phyerrs = 1;
4279
4280 arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4281 arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4282 arg->buf_len = skb->len;
4283 arg->phyerrs = skb->data;
4284
4285 return 0;
4286}
4287
4288int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar,
4289 const void *phyerr_buf,
4290 int left_len,
4291 struct wmi_phyerr_ev_arg *arg)
4292{
4293 const struct wmi_phyerr *phyerr = phyerr_buf;
4294 int i;
4295
4296 if (left_len < sizeof(*phyerr)) {
4297 ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4298 left_len, sizeof(*phyerr));
4299 return -EINVAL;
4300 }
4301
4302 arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4303 arg->freq1 = __le16_to_cpu(phyerr->freq1);
4304 arg->freq2 = __le16_to_cpu(phyerr->freq2);
4305 arg->rssi_combined = phyerr->rssi_combined;
4306 arg->chan_width_mhz = phyerr->chan_width_mhz;
4307 arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4308 arg->buf = phyerr->buf;
4309 arg->hdr_len = sizeof(*phyerr);
4310
4311 for (i = 0; i < 4; i++)
4312 arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4313
4314 switch (phyerr->phy_err_code) {
4315 case PHY_ERROR_GEN_SPECTRAL_SCAN:
4316 arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4317 break;
4318 case PHY_ERROR_GEN_FALSE_RADAR_EXT:
4319 arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT;
4320 break;
4321 case PHY_ERROR_GEN_RADAR:
4322 arg->phy_err_code = PHY_ERROR_RADAR;
4323 break;
4324 default:
4325 arg->phy_err_code = PHY_ERROR_UNKNOWN;
4326 break;
4327 }
4328
4329 return 0;
4330}
4331
4332static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar,
4333 const void *phyerr_buf,
4334 int left_len,
4335 struct wmi_phyerr_ev_arg *arg)
4336{
4337 const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf;
4338 u32 phy_err_mask;
4339 int i;
4340
4341 if (left_len < sizeof(*phyerr)) {
4342 ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4343 left_len, sizeof(*phyerr));
4344 return -EINVAL;
4345 }
4346
4347 arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4348 arg->freq1 = __le16_to_cpu(phyerr->freq1);
4349 arg->freq2 = __le16_to_cpu(phyerr->freq2);
4350 arg->rssi_combined = phyerr->rssi_combined;
4351 arg->chan_width_mhz = phyerr->chan_width_mhz;
4352 arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4353 arg->buf = phyerr->buf;
4354 arg->hdr_len = sizeof(*phyerr);
4355
4356 for (i = 0; i < 4; i++)
4357 arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4358
4359 phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]);
4360
4361 if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK)
4362 arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4363 else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK)
4364 arg->phy_err_code = PHY_ERROR_RADAR;
4365 else
4366 arg->phy_err_code = PHY_ERROR_UNKNOWN;
4367
4368 return 0;
4369}
4370
4371void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
4372{
4373 struct wmi_phyerr_hdr_arg hdr_arg = {};
4374 struct wmi_phyerr_ev_arg phyerr_arg = {};
4375 const void *phyerr;
4376 u32 count, i, buf_len, phy_err_code;
4377 u64 tsf;
4378 int left_len, ret;
4379
4380 ATH10K_DFS_STAT_INC(ar, phy_errors);
4381
4382 ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
4383 if (ret) {
4384 ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret);
4385 return;
4386 }
4387
4388 /* Check number of included events */
4389 count = hdr_arg.num_phyerrs;
4390
4391 left_len = hdr_arg.buf_len;
4392
4393 tsf = hdr_arg.tsf_u32;
4394 tsf <<= 32;
4395 tsf |= hdr_arg.tsf_l32;
4396
4397 ath10k_dbg(ar, ATH10K_DBG_WMI,
4398 "wmi event phyerr count %d tsf64 0x%llX\n",
4399 count, tsf);
4400
4401 phyerr = hdr_arg.phyerrs;
4402 for (i = 0; i < count; i++) {
4403 ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg);
4404 if (ret) {
4405 ath10k_warn(ar, "failed to parse phyerr event (%d)\n",
4406 i);
4407 return;
4408 }
4409
4410 left_len -= phyerr_arg.hdr_len;
4411 buf_len = phyerr_arg.buf_len;
4412 phy_err_code = phyerr_arg.phy_err_code;
4413
4414 if (left_len < buf_len) {
4415 ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
4416 return;
4417 }
4418
4419 left_len -= buf_len;
4420
4421 switch (phy_err_code) {
4422 case PHY_ERROR_RADAR:
4423 ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4424 break;
4425 case PHY_ERROR_SPECTRAL_SCAN:
4426 ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4427 break;
4428 case PHY_ERROR_FALSE_RADAR_EXT:
4429 ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4430 ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4431 break;
4432 default:
4433 break;
4434 }
4435
4436 phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
4437 }
4438}
4439
4440static int
4441ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb,
4442 struct wmi_dfs_status_ev_arg *arg)
4443{
4444 struct wmi_dfs_status_ev_arg *ev = (void *)skb->data;
4445
4446 if (skb->len < sizeof(*ev))
4447 return -EPROTO;
4448
4449 arg->status = ev->status;
4450
4451 return 0;
4452}
4453
4454static void
4455ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb)
4456{
4457 struct wmi_dfs_status_ev_arg status_arg = {};
4458 int ret;
4459
4460 ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg);
4461
4462 if (ret) {
4463 ath10k_warn(ar, "failed to parse dfs status event: %d\n", ret);
4464 return;
4465 }
4466
4467 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4468 "dfs status event received from fw: %d\n",
4469 status_arg.status);
4470
4471 /* Even in case of radar detection failure we follow the same
4472 * behaviour as if radar is detected i.e to switch to a different
4473 * channel.
4474 */
4475 if (status_arg.status == WMI_HW_RADAR_DETECTED ||
4476 status_arg.status == WMI_RADAR_DETECTION_FAIL)
4477 ath10k_radar_detected(ar);
4478 complete(&ar->wmi.radar_confirm);
4479}
4480
4481void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
4482{
4483 struct wmi_roam_ev_arg arg = {};
4484 int ret;
4485 u32 vdev_id;
4486 u32 reason;
4487 s32 rssi;
4488
4489 ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
4490 if (ret) {
4491 ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
4492 return;
4493 }
4494
4495 vdev_id = __le32_to_cpu(arg.vdev_id);
4496 reason = __le32_to_cpu(arg.reason);
4497 rssi = __le32_to_cpu(arg.rssi);
4498 rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
4499
4500 ath10k_dbg(ar, ATH10K_DBG_WMI,
4501 "wmi roam event vdev %u reason 0x%08x rssi %d\n",
4502 vdev_id, reason, rssi);
4503
4504 if (reason >= WMI_ROAM_REASON_MAX)
4505 ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
4506 reason, vdev_id);
4507
4508 switch (reason) {
4509 case WMI_ROAM_REASON_BEACON_MISS:
4510 ath10k_mac_handle_beacon_miss(ar, vdev_id);
4511 break;
4512 case WMI_ROAM_REASON_BETTER_AP:
4513 case WMI_ROAM_REASON_LOW_RSSI:
4514 case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
4515 case WMI_ROAM_REASON_HO_FAILED:
4516 ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
4517 reason, vdev_id);
4518 break;
4519 }
4520}
4521
4522void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
4523{
4524 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
4525}
4526
4527void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
4528{
4529 char buf[101], c;
4530 int i;
4531
4532 for (i = 0; i < sizeof(buf) - 1; i++) {
4533 if (i >= skb->len)
4534 break;
4535
4536 c = skb->data[i];
4537
4538 if (c == '\0')
4539 break;
4540
4541 if (isascii(c) && isprint(c))
4542 buf[i] = c;
4543 else
4544 buf[i] = '.';
4545 }
4546
4547 if (i == sizeof(buf) - 1)
4548 ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
4549
4550 /* for some reason the debug prints end with \n, remove that */
4551 if (skb->data[i - 1] == '\n')
4552 i--;
4553
4554 /* the last byte is always reserved for the null character */
4555 buf[i] = '\0';
4556
4557 ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
4558}
4559
4560void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
4561{
4562 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
4563}
4564
4565void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
4566{
4567 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
4568}
4569
4570void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
4571 struct sk_buff *skb)
4572{
4573 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
4574}
4575
4576void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
4577 struct sk_buff *skb)
4578{
4579 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
4580}
4581
4582void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
4583{
4584 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
4585}
4586
4587void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
4588{
4589 struct wmi_wow_ev_arg ev = {};
4590 int ret;
4591
4592 complete(&ar->wow.wakeup_completed);
4593
4594 ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
4595 if (ret) {
4596 ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
4597 return;
4598 }
4599
4600 ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
4601 wow_reason(ev.wake_reason));
4602}
4603
4604void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
4605{
4606 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
4607}
4608
4609static u8 ath10k_tpc_config_get_rate(struct ath10k *ar,
4610 struct wmi_pdev_tpc_config_event *ev,
4611 u32 rate_idx, u32 num_chains,
4612 u32 rate_code, u8 type)
4613{
4614 u8 tpc, num_streams, preamble, ch, stm_idx;
4615
4616 num_streams = ATH10K_HW_NSS(rate_code);
4617 preamble = ATH10K_HW_PREAMBLE(rate_code);
4618 ch = num_chains - 1;
4619
4620 tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]);
4621
4622 if (__le32_to_cpu(ev->num_tx_chain) <= 1)
4623 goto out;
4624
4625 if (preamble == WMI_RATE_PREAMBLE_CCK)
4626 goto out;
4627
4628 stm_idx = num_streams - 1;
4629 if (num_chains <= num_streams)
4630 goto out;
4631
4632 switch (type) {
4633 case WMI_TPC_TABLE_TYPE_STBC:
4634 tpc = min_t(u8, tpc,
4635 ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]);
4636 break;
4637 case WMI_TPC_TABLE_TYPE_TXBF:
4638 tpc = min_t(u8, tpc,
4639 ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]);
4640 break;
4641 case WMI_TPC_TABLE_TYPE_CDD:
4642 tpc = min_t(u8, tpc,
4643 ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]);
4644 break;
4645 default:
4646 ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type);
4647 tpc = 0;
4648 break;
4649 }
4650
4651out:
4652 return tpc;
4653}
4654
4655static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
4656 struct wmi_pdev_tpc_config_event *ev,
4657 struct ath10k_tpc_stats *tpc_stats,
4658 u8 *rate_code, u16 *pream_table, u8 type)
4659{
4660 u32 i, j, pream_idx, flags;
4661 u8 tpc[WMI_TPC_TX_N_CHAIN];
4662 char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
4663 char buff[WMI_TPC_BUF_SIZE];
4664
4665 flags = __le32_to_cpu(ev->flags);
4666
4667 switch (type) {
4668 case WMI_TPC_TABLE_TYPE_CDD:
4669 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
4670 ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
4671 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4672 return;
4673 }
4674 break;
4675 case WMI_TPC_TABLE_TYPE_STBC:
4676 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
4677 ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
4678 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4679 return;
4680 }
4681 break;
4682 case WMI_TPC_TABLE_TYPE_TXBF:
4683 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
4684 ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
4685 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4686 return;
4687 }
4688 break;
4689 default:
4690 ath10k_dbg(ar, ATH10K_DBG_WMI,
4691 "invalid table type in wmi tpc event: %d\n", type);
4692 return;
4693 }
4694
4695 pream_idx = 0;
4696 for (i = 0; i < tpc_stats->rate_max; i++) {
4697 memset(tpc_value, 0, sizeof(tpc_value));
4698 memset(buff, 0, sizeof(buff));
4699 if (i == pream_table[pream_idx])
4700 pream_idx++;
4701
4702 for (j = 0; j < tpc_stats->num_tx_chain; j++) {
4703 tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
4704 rate_code[i],
4705 type);
4706 snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
4707 strlcat(tpc_value, buff, sizeof(tpc_value));
4708 }
4709 tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
4710 tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
4711 memcpy(tpc_stats->tpc_table[type].tpc_value[i],
4712 tpc_value, sizeof(tpc_value));
4713 }
4714}
4715
4716void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
4717 u32 num_tx_chain)
4718{
4719 u32 i, j, pream_idx;
4720 u8 rate_idx;
4721
4722 /* Create the rate code table based on the chains supported */
4723 rate_idx = 0;
4724 pream_idx = 0;
4725
4726 /* Fill CCK rate code */
4727 for (i = 0; i < 4; i++) {
4728 rate_code[rate_idx] =
4729 ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK);
4730 rate_idx++;
4731 }
4732 pream_table[pream_idx] = rate_idx;
4733 pream_idx++;
4734
4735 /* Fill OFDM rate code */
4736 for (i = 0; i < 8; i++) {
4737 rate_code[rate_idx] =
4738 ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM);
4739 rate_idx++;
4740 }
4741 pream_table[pream_idx] = rate_idx;
4742 pream_idx++;
4743
4744 /* Fill HT20 rate code */
4745 for (i = 0; i < num_tx_chain; i++) {
4746 for (j = 0; j < 8; j++) {
4747 rate_code[rate_idx] =
4748 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4749 rate_idx++;
4750 }
4751 }
4752 pream_table[pream_idx] = rate_idx;
4753 pream_idx++;
4754
4755 /* Fill HT40 rate code */
4756 for (i = 0; i < num_tx_chain; i++) {
4757 for (j = 0; j < 8; j++) {
4758 rate_code[rate_idx] =
4759 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4760 rate_idx++;
4761 }
4762 }
4763 pream_table[pream_idx] = rate_idx;
4764 pream_idx++;
4765
4766 /* Fill VHT20 rate code */
4767 for (i = 0; i < num_tx_chain; i++) {
4768 for (j = 0; j < 10; j++) {
4769 rate_code[rate_idx] =
4770 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4771 rate_idx++;
4772 }
4773 }
4774 pream_table[pream_idx] = rate_idx;
4775 pream_idx++;
4776
4777 /* Fill VHT40 rate code */
4778 for (i = 0; i < num_tx_chain; i++) {
4779 for (j = 0; j < 10; j++) {
4780 rate_code[rate_idx] =
4781 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4782 rate_idx++;
4783 }
4784 }
4785 pream_table[pream_idx] = rate_idx;
4786 pream_idx++;
4787
4788 /* Fill VHT80 rate code */
4789 for (i = 0; i < num_tx_chain; i++) {
4790 for (j = 0; j < 10; j++) {
4791 rate_code[rate_idx] =
4792 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4793 rate_idx++;
4794 }
4795 }
4796 pream_table[pream_idx] = rate_idx;
4797 pream_idx++;
4798
4799 rate_code[rate_idx++] =
4800 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4801 rate_code[rate_idx++] =
4802 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4803 rate_code[rate_idx++] =
4804 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4805 rate_code[rate_idx++] =
4806 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4807 rate_code[rate_idx++] =
4808 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4809
4810 pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END;
4811}
4812
4813void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
4814{
4815 u32 num_tx_chain, rate_max;
4816 u8 rate_code[WMI_TPC_RATE_MAX];
4817 u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
4818 struct wmi_pdev_tpc_config_event *ev;
4819 struct ath10k_tpc_stats *tpc_stats;
4820
4821 ev = (struct wmi_pdev_tpc_config_event *)skb->data;
4822
4823 num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4824
4825 if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
4826 ath10k_warn(ar, "number of tx chain is %d greater than TPC configured tx chain %d\n",
4827 num_tx_chain, WMI_TPC_TX_N_CHAIN);
4828 return;
4829 }
4830
4831 rate_max = __le32_to_cpu(ev->rate_max);
4832 if (rate_max > WMI_TPC_RATE_MAX) {
4833 ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
4834 rate_max, WMI_TPC_RATE_MAX);
4835 rate_max = WMI_TPC_RATE_MAX;
4836 }
4837
4838 tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
4839 if (!tpc_stats)
4840 return;
4841
4842 ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
4843 num_tx_chain);
4844
4845 tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
4846 tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
4847 tpc_stats->ctl = __le32_to_cpu(ev->ctl);
4848 tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
4849 tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
4850 tpc_stats->twice_antenna_reduction =
4851 __le32_to_cpu(ev->twice_antenna_reduction);
4852 tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
4853 tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
4854 tpc_stats->num_tx_chain = num_tx_chain;
4855 tpc_stats->rate_max = rate_max;
4856
4857 ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4858 rate_code, pream_table,
4859 WMI_TPC_TABLE_TYPE_CDD);
4860 ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4861 rate_code, pream_table,
4862 WMI_TPC_TABLE_TYPE_STBC);
4863 ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4864 rate_code, pream_table,
4865 WMI_TPC_TABLE_TYPE_TXBF);
4866
4867 ath10k_debug_tpc_stats_process(ar, tpc_stats);
4868
4869 ath10k_dbg(ar, ATH10K_DBG_WMI,
4870 "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
4871 __le32_to_cpu(ev->chan_freq),
4872 __le32_to_cpu(ev->phy_mode),
4873 __le32_to_cpu(ev->ctl),
4874 __le32_to_cpu(ev->reg_domain),
4875 a_sle32_to_cpu(ev->twice_antenna_gain),
4876 __le32_to_cpu(ev->twice_antenna_reduction),
4877 __le32_to_cpu(ev->power_limit),
4878 __le32_to_cpu(ev->twice_max_rd_power) / 2,
4879 __le32_to_cpu(ev->num_tx_chain),
4880 __le32_to_cpu(ev->rate_max));
4881}
4882
4883static u8
4884ath10k_wmi_tpc_final_get_rate(struct ath10k *ar,
4885 struct wmi_pdev_tpc_final_table_event *ev,
4886 u32 rate_idx, u32 num_chains,
4887 u32 rate_code, u8 type, u32 pream_idx)
4888{
4889 u8 tpc, num_streams, preamble, ch, stm_idx;
4890 s8 pow_agcdd, pow_agstbc, pow_agtxbf;
4891 int pream;
4892
4893 num_streams = ATH10K_HW_NSS(rate_code);
4894 preamble = ATH10K_HW_PREAMBLE(rate_code);
4895 ch = num_chains - 1;
4896 stm_idx = num_streams - 1;
4897 pream = -1;
4898
4899 if (__le32_to_cpu(ev->chan_freq) <= 2483) {
4900 switch (pream_idx) {
4901 case WMI_TPC_PREAM_2GHZ_CCK:
4902 pream = 0;
4903 break;
4904 case WMI_TPC_PREAM_2GHZ_OFDM:
4905 pream = 1;
4906 break;
4907 case WMI_TPC_PREAM_2GHZ_HT20:
4908 case WMI_TPC_PREAM_2GHZ_VHT20:
4909 pream = 2;
4910 break;
4911 case WMI_TPC_PREAM_2GHZ_HT40:
4912 case WMI_TPC_PREAM_2GHZ_VHT40:
4913 pream = 3;
4914 break;
4915 case WMI_TPC_PREAM_2GHZ_VHT80:
4916 pream = 4;
4917 break;
4918 default:
4919 pream = -1;
4920 break;
4921 }
4922 }
4923
4924 if (__le32_to_cpu(ev->chan_freq) >= 5180) {
4925 switch (pream_idx) {
4926 case WMI_TPC_PREAM_5GHZ_OFDM:
4927 pream = 0;
4928 break;
4929 case WMI_TPC_PREAM_5GHZ_HT20:
4930 case WMI_TPC_PREAM_5GHZ_VHT20:
4931 pream = 1;
4932 break;
4933 case WMI_TPC_PREAM_5GHZ_HT40:
4934 case WMI_TPC_PREAM_5GHZ_VHT40:
4935 pream = 2;
4936 break;
4937 case WMI_TPC_PREAM_5GHZ_VHT80:
4938 pream = 3;
4939 break;
4940 case WMI_TPC_PREAM_5GHZ_HTCUP:
4941 pream = 4;
4942 break;
4943 default:
4944 pream = -1;
4945 break;
4946 }
4947 }
4948
4949 if (pream == -1) {
4950 ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n",
4951 pream_idx, __le32_to_cpu(ev->chan_freq));
4952 tpc = 0;
4953 goto out;
4954 }
4955
4956 if (pream == 4)
4957 tpc = min_t(u8, ev->rates_array[rate_idx],
4958 ev->max_reg_allow_pow[ch]);
4959 else
4960 tpc = min_t(u8, min_t(u8, ev->rates_array[rate_idx],
4961 ev->max_reg_allow_pow[ch]),
4962 ev->ctl_power_table[0][pream][stm_idx]);
4963
4964 if (__le32_to_cpu(ev->num_tx_chain) <= 1)
4965 goto out;
4966
4967 if (preamble == WMI_RATE_PREAMBLE_CCK)
4968 goto out;
4969
4970 if (num_chains <= num_streams)
4971 goto out;
4972
4973 switch (type) {
4974 case WMI_TPC_TABLE_TYPE_STBC:
4975 pow_agstbc = ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx];
4976 if (pream == 4)
4977 tpc = min_t(u8, tpc, pow_agstbc);
4978 else
4979 tpc = min_t(u8, min_t(u8, tpc, pow_agstbc),
4980 ev->ctl_power_table[0][pream][stm_idx]);
4981 break;
4982 case WMI_TPC_TABLE_TYPE_TXBF:
4983 pow_agtxbf = ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx];
4984 if (pream == 4)
4985 tpc = min_t(u8, tpc, pow_agtxbf);
4986 else
4987 tpc = min_t(u8, min_t(u8, tpc, pow_agtxbf),
4988 ev->ctl_power_table[1][pream][stm_idx]);
4989 break;
4990 case WMI_TPC_TABLE_TYPE_CDD:
4991 pow_agcdd = ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx];
4992 if (pream == 4)
4993 tpc = min_t(u8, tpc, pow_agcdd);
4994 else
4995 tpc = min_t(u8, min_t(u8, tpc, pow_agcdd),
4996 ev->ctl_power_table[0][pream][stm_idx]);
4997 break;
4998 default:
4999 ath10k_warn(ar, "unknown wmi tpc final table type: %d\n", type);
5000 tpc = 0;
5001 break;
5002 }
5003
5004out:
5005 return tpc;
5006}
5007
5008static void
5009ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
5010 struct wmi_pdev_tpc_final_table_event *ev,
5011 struct ath10k_tpc_stats_final *tpc_stats,
5012 u8 *rate_code, u16 *pream_table, u8 type)
5013{
5014 u32 i, j, pream_idx, flags;
5015 u8 tpc[WMI_TPC_TX_N_CHAIN];
5016 char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
5017 char buff[WMI_TPC_BUF_SIZE];
5018
5019 flags = __le32_to_cpu(ev->flags);
5020
5021 switch (type) {
5022 case WMI_TPC_TABLE_TYPE_CDD:
5023 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
5024 ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
5025 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5026 return;
5027 }
5028 break;
5029 case WMI_TPC_TABLE_TYPE_STBC:
5030 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
5031 ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
5032 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5033 return;
5034 }
5035 break;
5036 case WMI_TPC_TABLE_TYPE_TXBF:
5037 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
5038 ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
5039 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5040 return;
5041 }
5042 break;
5043 default:
5044 ath10k_dbg(ar, ATH10K_DBG_WMI,
5045 "invalid table type in wmi tpc event: %d\n", type);
5046 return;
5047 }
5048
5049 pream_idx = 0;
5050 for (i = 0; i < tpc_stats->rate_max; i++) {
5051 memset(tpc_value, 0, sizeof(tpc_value));
5052 memset(buff, 0, sizeof(buff));
5053 if (i == pream_table[pream_idx])
5054 pream_idx++;
5055
5056 for (j = 0; j < tpc_stats->num_tx_chain; j++) {
5057 tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
5058 rate_code[i],
5059 type, pream_idx);
5060 snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
5061 strlcat(tpc_value, buff, sizeof(tpc_value));
5062 }
5063 tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx;
5064 tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i];
5065 memcpy(tpc_stats->tpc_table_final[type].tpc_value[i],
5066 tpc_value, sizeof(tpc_value));
5067 }
5068}
5069
5070void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
5071{
5072 u32 num_tx_chain, rate_max;
5073 u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
5074 u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
5075 struct wmi_pdev_tpc_final_table_event *ev;
5076 struct ath10k_tpc_stats_final *tpc_stats;
5077
5078 ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
5079
5080 num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
5081 if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
5082 ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
5083 num_tx_chain, WMI_TPC_TX_N_CHAIN);
5084 return;
5085 }
5086
5087 rate_max = __le32_to_cpu(ev->rate_max);
5088 if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
5089 ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
5090 rate_max, WMI_TPC_FINAL_RATE_MAX);
5091 rate_max = WMI_TPC_FINAL_RATE_MAX;
5092 }
5093
5094 tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
5095 if (!tpc_stats)
5096 return;
5097
5098 ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
5099 num_tx_chain);
5100
5101 tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
5102 tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
5103 tpc_stats->ctl = __le32_to_cpu(ev->ctl);
5104 tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
5105 tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
5106 tpc_stats->twice_antenna_reduction =
5107 __le32_to_cpu(ev->twice_antenna_reduction);
5108 tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
5109 tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
5110 tpc_stats->num_tx_chain = num_tx_chain;
5111 tpc_stats->rate_max = rate_max;
5112
5113 ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5114 rate_code, pream_table,
5115 WMI_TPC_TABLE_TYPE_CDD);
5116 ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5117 rate_code, pream_table,
5118 WMI_TPC_TABLE_TYPE_STBC);
5119 ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5120 rate_code, pream_table,
5121 WMI_TPC_TABLE_TYPE_TXBF);
5122
5123 ath10k_debug_tpc_stats_final_process(ar, tpc_stats);
5124
5125 ath10k_dbg(ar, ATH10K_DBG_WMI,
5126 "wmi event tpc final table channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
5127 __le32_to_cpu(ev->chan_freq),
5128 __le32_to_cpu(ev->phy_mode),
5129 __le32_to_cpu(ev->ctl),
5130 __le32_to_cpu(ev->reg_domain),
5131 a_sle32_to_cpu(ev->twice_antenna_gain),
5132 __le32_to_cpu(ev->twice_antenna_reduction),
5133 __le32_to_cpu(ev->power_limit),
5134 __le32_to_cpu(ev->twice_max_rd_power) / 2,
5135 __le32_to_cpu(ev->num_tx_chain),
5136 __le32_to_cpu(ev->rate_max));
5137}
5138
5139static void
5140ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
5141{
5142 struct wmi_tdls_peer_event *ev;
5143 struct ath10k_peer *peer;
5144 struct ath10k_vif *arvif;
5145 int vdev_id;
5146 int peer_status;
5147 int peer_reason;
5148 u8 reason;
5149
5150 if (skb->len < sizeof(*ev)) {
5151 ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
5152 skb->len);
5153 return;
5154 }
5155
5156 ev = (struct wmi_tdls_peer_event *)skb->data;
5157 vdev_id = __le32_to_cpu(ev->vdev_id);
5158 peer_status = __le32_to_cpu(ev->peer_status);
5159 peer_reason = __le32_to_cpu(ev->peer_reason);
5160
5161 spin_lock_bh(&ar->data_lock);
5162 peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
5163 spin_unlock_bh(&ar->data_lock);
5164
5165 if (!peer) {
5166 ath10k_warn(ar, "failed to find peer entry for %pM\n",
5167 ev->peer_macaddr.addr);
5168 return;
5169 }
5170
5171 switch (peer_status) {
5172 case WMI_TDLS_SHOULD_TEARDOWN:
5173 switch (peer_reason) {
5174 case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
5175 case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
5176 case WMI_TDLS_TEARDOWN_REASON_RSSI:
5177 reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
5178 break;
5179 default:
5180 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
5181 break;
5182 }
5183
5184 arvif = ath10k_get_arvif(ar, vdev_id);
5185 if (!arvif) {
5186 ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
5187 vdev_id);
5188 return;
5189 }
5190
5191 ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
5192 NL80211_TDLS_TEARDOWN, reason,
5193 GFP_ATOMIC);
5194
5195 ath10k_dbg(ar, ATH10K_DBG_WMI,
5196 "received tdls teardown event for peer %pM reason %u\n",
5197 ev->peer_macaddr.addr, peer_reason);
5198 break;
5199 default:
5200 ath10k_dbg(ar, ATH10K_DBG_WMI,
5201 "received unknown tdls peer event %u\n",
5202 peer_status);
5203 break;
5204 }
5205}
5206
5207static void
5208ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
5209{
5210 struct wmi_peer_sta_ps_state_chg_event *ev;
5211 struct ieee80211_sta *sta;
5212 struct ath10k_sta *arsta;
5213 u8 peer_addr[ETH_ALEN];
5214
5215 lockdep_assert_held(&ar->data_lock);
5216
5217 ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data;
5218 ether_addr_copy(peer_addr, ev->peer_macaddr.addr);
5219
5220 rcu_read_lock();
5221
5222 sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL);
5223
5224 if (!sta) {
5225 ath10k_warn(ar, "failed to find station entry %pM\n",
5226 peer_addr);
5227 goto exit;
5228 }
5229
5230 arsta = (struct ath10k_sta *)sta->drv_priv;
5231 arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state);
5232
5233exit:
5234 rcu_read_unlock();
5235}
5236
5237void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
5238{
5239 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
5240}
5241
5242void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
5243{
5244 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
5245}
5246
5247void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
5248{
5249 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
5250}
5251
5252void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
5253{
5254 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
5255}
5256
5257void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
5258{
5259 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
5260}
5261
5262void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
5263 struct sk_buff *skb)
5264{
5265 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
5266}
5267
5268void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
5269{
5270 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
5271}
5272
5273void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
5274{
5275 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
5276}
5277
5278void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
5279{
5280 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
5281}
5282
5283static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
5284 u32 num_units, u32 unit_len)
5285{
5286 dma_addr_t paddr;
5287 u32 pool_size;
5288 int idx = ar->wmi.num_mem_chunks;
5289 void *vaddr;
5290
5291 pool_size = num_units * round_up(unit_len, 4);
5292 vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
5293
5294 if (!vaddr)
5295 return -ENOMEM;
5296
5297 ar->wmi.mem_chunks[idx].vaddr = vaddr;
5298 ar->wmi.mem_chunks[idx].paddr = paddr;
5299 ar->wmi.mem_chunks[idx].len = pool_size;
5300 ar->wmi.mem_chunks[idx].req_id = req_id;
5301 ar->wmi.num_mem_chunks++;
5302
5303 return num_units;
5304}
5305
5306static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
5307 u32 num_units, u32 unit_len)
5308{
5309 int ret;
5310
5311 while (num_units) {
5312 ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len);
5313 if (ret < 0)
5314 return ret;
5315
5316 num_units -= ret;
5317 }
5318
5319 return 0;
5320}
5321
5322static bool
5323ath10k_wmi_is_host_mem_allocated(struct ath10k *ar,
5324 const struct wlan_host_mem_req **mem_reqs,
5325 u32 num_mem_reqs)
5326{
5327 u32 req_id, num_units, unit_size, num_unit_info;
5328 u32 pool_size;
5329 int i, j;
5330 bool found;
5331
5332 if (ar->wmi.num_mem_chunks != num_mem_reqs)
5333 return false;
5334
5335 for (i = 0; i < num_mem_reqs; ++i) {
5336 req_id = __le32_to_cpu(mem_reqs[i]->req_id);
5337 num_units = __le32_to_cpu(mem_reqs[i]->num_units);
5338 unit_size = __le32_to_cpu(mem_reqs[i]->unit_size);
5339 num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info);
5340
5341 if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5342 if (ar->num_active_peers)
5343 num_units = ar->num_active_peers + 1;
5344 else
5345 num_units = ar->max_num_peers + 1;
5346 } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5347 num_units = ar->max_num_peers + 1;
5348 } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5349 num_units = ar->max_num_vdevs + 1;
5350 }
5351
5352 found = false;
5353 for (j = 0; j < ar->wmi.num_mem_chunks; j++) {
5354 if (ar->wmi.mem_chunks[j].req_id == req_id) {
5355 pool_size = num_units * round_up(unit_size, 4);
5356 if (ar->wmi.mem_chunks[j].len == pool_size) {
5357 found = true;
5358 break;
5359 }
5360 }
5361 }
5362 if (!found)
5363 return false;
5364 }
5365
5366 return true;
5367}
5368
5369static int
5370ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5371 struct wmi_svc_rdy_ev_arg *arg)
5372{
5373 struct wmi_service_ready_event *ev;
5374 size_t i, n;
5375
5376 if (skb->len < sizeof(*ev))
5377 return -EPROTO;
5378
5379 ev = (void *)skb->data;
5380 skb_pull(skb, sizeof(*ev));
5381 arg->min_tx_power = ev->hw_min_tx_power;
5382 arg->max_tx_power = ev->hw_max_tx_power;
5383 arg->ht_cap = ev->ht_cap_info;
5384 arg->vht_cap = ev->vht_cap_info;
5385 arg->sw_ver0 = ev->sw_version;
5386 arg->sw_ver1 = ev->sw_version_1;
5387 arg->phy_capab = ev->phy_capability;
5388 arg->num_rf_chains = ev->num_rf_chains;
5389 arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5390 arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5391 arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5392 arg->num_mem_reqs = ev->num_mem_reqs;
5393 arg->service_map = ev->wmi_service_bitmap;
5394 arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5395
5396 n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5397 ARRAY_SIZE(arg->mem_reqs));
5398 for (i = 0; i < n; i++)
5399 arg->mem_reqs[i] = &ev->mem_reqs[i];
5400
5401 if (skb->len <
5402 __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5403 return -EPROTO;
5404
5405 return 0;
5406}
5407
5408static int
5409ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5410 struct wmi_svc_rdy_ev_arg *arg)
5411{
5412 struct wmi_10x_service_ready_event *ev;
5413 int i, n;
5414
5415 if (skb->len < sizeof(*ev))
5416 return -EPROTO;
5417
5418 ev = (void *)skb->data;
5419 skb_pull(skb, sizeof(*ev));
5420 arg->min_tx_power = ev->hw_min_tx_power;
5421 arg->max_tx_power = ev->hw_max_tx_power;
5422 arg->ht_cap = ev->ht_cap_info;
5423 arg->vht_cap = ev->vht_cap_info;
5424 arg->sw_ver0 = ev->sw_version;
5425 arg->phy_capab = ev->phy_capability;
5426 arg->num_rf_chains = ev->num_rf_chains;
5427 arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5428 arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5429 arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5430 arg->num_mem_reqs = ev->num_mem_reqs;
5431 arg->service_map = ev->wmi_service_bitmap;
5432 arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5433
5434 n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5435 ARRAY_SIZE(arg->mem_reqs));
5436 for (i = 0; i < n; i++)
5437 arg->mem_reqs[i] = &ev->mem_reqs[i];
5438
5439 if (skb->len <
5440 __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5441 return -EPROTO;
5442
5443 return 0;
5444}
5445
5446static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
5447{
5448 struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
5449 struct sk_buff *skb = ar->svc_rdy_skb;
5450 struct wmi_svc_rdy_ev_arg arg = {};
5451 u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
5452 int ret;
5453 bool allocated;
5454
5455 if (!skb) {
5456 ath10k_warn(ar, "invalid service ready event skb\n");
5457 return;
5458 }
5459
5460 ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
5461 if (ret) {
5462 ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
5463 return;
5464 }
5465
5466 ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
5467 arg.service_map_len);
5468
5469 ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
5470 ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
5471 ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
5472 ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
5473 ar->fw_version_major =
5474 (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
5475 ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
5476 ar->fw_version_release =
5477 (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
5478 ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
5479 ar->phy_capability = __le32_to_cpu(arg.phy_capab);
5480 ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
5481 ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
5482 ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan);
5483 ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan);
5484
5485 ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
5486 arg.service_map, arg.service_map_len);
5487
5488 if (ar->num_rf_chains > ar->max_spatial_stream) {
5489 ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
5490 ar->num_rf_chains, ar->max_spatial_stream);
5491 ar->num_rf_chains = ar->max_spatial_stream;
5492 }
5493
5494 if (!ar->cfg_tx_chainmask) {
5495 ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1;
5496 ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1;
5497 }
5498
5499 if (strlen(ar->hw->wiphy->fw_version) == 0) {
5500 snprintf(ar->hw->wiphy->fw_version,
5501 sizeof(ar->hw->wiphy->fw_version),
5502 "%u.%u.%u.%u",
5503 ar->fw_version_major,
5504 ar->fw_version_minor,
5505 ar->fw_version_release,
5506 ar->fw_version_build);
5507 }
5508
5509 num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
5510 if (num_mem_reqs > WMI_MAX_MEM_REQS) {
5511 ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
5512 num_mem_reqs);
5513 return;
5514 }
5515
5516 if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
5517 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
5518 ar->running_fw->fw_file.fw_features))
5519 ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
5520 ar->max_num_vdevs;
5521 else
5522 ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
5523 ar->max_num_vdevs;
5524
5525 ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
5526 ar->max_num_vdevs;
5527 ar->num_tids = ar->num_active_peers * 2;
5528 ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
5529 }
5530
5531 /* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
5532 * and WMI_SERVICE_IRAM_TIDS, etc.
5533 */
5534
5535 allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs,
5536 num_mem_reqs);
5537 if (allocated)
5538 goto skip_mem_alloc;
5539
5540 /* Either this event is received during boot time or there is a change
5541 * in memory requirement from firmware when compared to last request.
5542 * Free any old memory and do a fresh allocation based on the current
5543 * memory requirement.
5544 */
5545 ath10k_wmi_free_host_mem(ar);
5546
5547 for (i = 0; i < num_mem_reqs; ++i) {
5548 req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
5549 num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
5550 unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
5551 num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
5552
5553 if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5554 if (ar->num_active_peers)
5555 num_units = ar->num_active_peers + 1;
5556 else
5557 num_units = ar->max_num_peers + 1;
5558 } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5559 /* number of units to allocate is number of
5560 * peers, 1 extra for self peer on target
5561 * this needs to be tied, host and target
5562 * can get out of sync
5563 */
5564 num_units = ar->max_num_peers + 1;
5565 } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5566 num_units = ar->max_num_vdevs + 1;
5567 }
5568
5569 ath10k_dbg(ar, ATH10K_DBG_WMI,
5570 "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
5571 req_id,
5572 __le32_to_cpu(arg.mem_reqs[i]->num_units),
5573 num_unit_info,
5574 unit_size,
5575 num_units);
5576
5577 ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
5578 unit_size);
5579 if (ret)
5580 return;
5581 }
5582
5583skip_mem_alloc:
5584 ath10k_dbg(ar, ATH10K_DBG_WMI,
5585 "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
5586 __le32_to_cpu(arg.min_tx_power),
5587 __le32_to_cpu(arg.max_tx_power),
5588 __le32_to_cpu(arg.ht_cap),
5589 __le32_to_cpu(arg.vht_cap),
5590 __le32_to_cpu(arg.sw_ver0),
5591 __le32_to_cpu(arg.sw_ver1),
5592 __le32_to_cpu(arg.fw_build),
5593 __le32_to_cpu(arg.phy_capab),
5594 __le32_to_cpu(arg.num_rf_chains),
5595 __le32_to_cpu(arg.eeprom_rd),
5596 __le32_to_cpu(arg.num_mem_reqs));
5597
5598 dev_kfree_skb(skb);
5599 ar->svc_rdy_skb = NULL;
5600 complete(&ar->wmi.service_ready);
5601}
5602
5603void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
5604{
5605 ar->svc_rdy_skb = skb;
5606 queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
5607}
5608
5609static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5610 struct wmi_rdy_ev_arg *arg)
5611{
5612 struct wmi_ready_event *ev = (void *)skb->data;
5613
5614 if (skb->len < sizeof(*ev))
5615 return -EPROTO;
5616
5617 skb_pull(skb, sizeof(*ev));
5618 arg->sw_version = ev->sw_version;
5619 arg->abi_version = ev->abi_version;
5620 arg->status = ev->status;
5621 arg->mac_addr = ev->mac_addr.addr;
5622
5623 return 0;
5624}
5625
5626static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
5627 struct wmi_roam_ev_arg *arg)
5628{
5629 struct wmi_roam_ev *ev = (void *)skb->data;
5630
5631 if (skb->len < sizeof(*ev))
5632 return -EPROTO;
5633
5634 skb_pull(skb, sizeof(*ev));
5635 arg->vdev_id = ev->vdev_id;
5636 arg->reason = ev->reason;
5637
5638 return 0;
5639}
5640
5641static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
5642 struct sk_buff *skb,
5643 struct wmi_echo_ev_arg *arg)
5644{
5645 struct wmi_echo_event *ev = (void *)skb->data;
5646
5647 arg->value = ev->value;
5648
5649 return 0;
5650}
5651
5652int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
5653{
5654 struct wmi_rdy_ev_arg arg = {};
5655 int ret;
5656
5657 ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
5658 if (ret) {
5659 ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
5660 return ret;
5661 }
5662
5663 ath10k_dbg(ar, ATH10K_DBG_WMI,
5664 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
5665 __le32_to_cpu(arg.sw_version),
5666 __le32_to_cpu(arg.abi_version),
5667 arg.mac_addr,
5668 __le32_to_cpu(arg.status));
5669
5670 if (is_zero_ether_addr(ar->mac_addr))
5671 ether_addr_copy(ar->mac_addr, arg.mac_addr);
5672 complete(&ar->wmi.unified_ready);
5673 return 0;
5674}
5675
5676void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
5677{
5678 int ret;
5679 struct wmi_svc_avail_ev_arg arg = {};
5680
5681 ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg);
5682 if (ret) {
5683 ath10k_warn(ar, "failed to parse service available event: %d\n",
5684 ret);
5685 }
5686
5687 /*
5688 * Initialization of "arg.service_map_ext_valid" to ZERO is necessary
5689 * for the below logic to work.
5690 */
5691 if (arg.service_map_ext_valid)
5692 ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
5693 __le32_to_cpu(arg.service_map_ext_len));
5694}
5695
5696static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
5697{
5698 const struct wmi_pdev_temperature_event *ev;
5699
5700 ev = (struct wmi_pdev_temperature_event *)skb->data;
5701 if (WARN_ON(skb->len < sizeof(*ev)))
5702 return -EPROTO;
5703
5704 ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
5705 return 0;
5706}
5707
5708static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
5709 struct sk_buff *skb)
5710{
5711 struct wmi_pdev_bss_chan_info_event *ev;
5712 struct survey_info *survey;
5713 u64 busy, total, tx, rx, rx_bss;
5714 u32 freq, noise_floor;
5715 u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
5716 int idx;
5717
5718 ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
5719 if (WARN_ON(skb->len < sizeof(*ev)))
5720 return -EPROTO;
5721
5722 freq = __le32_to_cpu(ev->freq);
5723 noise_floor = __le32_to_cpu(ev->noise_floor);
5724 busy = __le64_to_cpu(ev->cycle_busy);
5725 total = __le64_to_cpu(ev->cycle_total);
5726 tx = __le64_to_cpu(ev->cycle_tx);
5727 rx = __le64_to_cpu(ev->cycle_rx);
5728 rx_bss = __le64_to_cpu(ev->cycle_rx_bss);
5729
5730 ath10k_dbg(ar, ATH10K_DBG_WMI,
5731 "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
5732 freq, noise_floor, busy, total, tx, rx, rx_bss);
5733
5734 spin_lock_bh(&ar->data_lock);
5735 idx = freq_to_idx(ar, freq);
5736 if (idx >= ARRAY_SIZE(ar->survey)) {
5737 ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
5738 freq, idx);
5739 goto exit;
5740 }
5741
5742 survey = &ar->survey[idx];
5743
5744 survey->noise = noise_floor;
5745 survey->time = div_u64(total, cc_freq_hz);
5746 survey->time_busy = div_u64(busy, cc_freq_hz);
5747 survey->time_rx = div_u64(rx_bss, cc_freq_hz);
5748 survey->time_tx = div_u64(tx, cc_freq_hz);
5749 survey->filled |= (SURVEY_INFO_NOISE_DBM |
5750 SURVEY_INFO_TIME |
5751 SURVEY_INFO_TIME_BUSY |
5752 SURVEY_INFO_TIME_RX |
5753 SURVEY_INFO_TIME_TX);
5754exit:
5755 spin_unlock_bh(&ar->data_lock);
5756 complete(&ar->bss_survey_done);
5757 return 0;
5758}
5759
5760static inline void ath10k_wmi_queue_set_coverage_class_work(struct ath10k *ar)
5761{
5762 if (ar->hw_params.hw_ops->set_coverage_class) {
5763 spin_lock_bh(&ar->data_lock);
5764
5765 /* This call only ensures that the modified coverage class
5766 * persists in case the firmware sets the registers back to
5767 * their default value. So calling it is only necessary if the
5768 * coverage class has a non-zero value.
5769 */
5770 if (ar->fw_coverage.coverage_class)
5771 queue_work(ar->workqueue, &ar->set_coverage_class_work);
5772
5773 spin_unlock_bh(&ar->data_lock);
5774 }
5775}
5776
5777static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
5778{
5779 struct wmi_cmd_hdr *cmd_hdr;
5780 enum wmi_event_id id;
5781
5782 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
5783 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
5784
5785 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
5786 goto out;
5787
5788 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
5789
5790 switch (id) {
5791 case WMI_MGMT_RX_EVENTID:
5792 ath10k_wmi_event_mgmt_rx(ar, skb);
5793 /* mgmt_rx() owns the skb now! */
5794 return;
5795 case WMI_SCAN_EVENTID:
5796 ath10k_wmi_event_scan(ar, skb);
5797 ath10k_wmi_queue_set_coverage_class_work(ar);
5798 break;
5799 case WMI_CHAN_INFO_EVENTID:
5800 ath10k_wmi_event_chan_info(ar, skb);
5801 break;
5802 case WMI_ECHO_EVENTID:
5803 ath10k_wmi_event_echo(ar, skb);
5804 break;
5805 case WMI_DEBUG_MESG_EVENTID:
5806 ath10k_wmi_event_debug_mesg(ar, skb);
5807 ath10k_wmi_queue_set_coverage_class_work(ar);
5808 break;
5809 case WMI_UPDATE_STATS_EVENTID:
5810 ath10k_wmi_event_update_stats(ar, skb);
5811 break;
5812 case WMI_VDEV_START_RESP_EVENTID:
5813 ath10k_wmi_event_vdev_start_resp(ar, skb);
5814 ath10k_wmi_queue_set_coverage_class_work(ar);
5815 break;
5816 case WMI_VDEV_STOPPED_EVENTID:
5817 ath10k_wmi_event_vdev_stopped(ar, skb);
5818 ath10k_wmi_queue_set_coverage_class_work(ar);
5819 break;
5820 case WMI_PEER_STA_KICKOUT_EVENTID:
5821 ath10k_wmi_event_peer_sta_kickout(ar, skb);
5822 break;
5823 case WMI_HOST_SWBA_EVENTID:
5824 ath10k_wmi_event_host_swba(ar, skb);
5825 break;
5826 case WMI_TBTTOFFSET_UPDATE_EVENTID:
5827 ath10k_wmi_event_tbttoffset_update(ar, skb);
5828 break;
5829 case WMI_PHYERR_EVENTID:
5830 ath10k_wmi_event_phyerr(ar, skb);
5831 break;
5832 case WMI_ROAM_EVENTID:
5833 ath10k_wmi_event_roam(ar, skb);
5834 ath10k_wmi_queue_set_coverage_class_work(ar);
5835 break;
5836 case WMI_PROFILE_MATCH:
5837 ath10k_wmi_event_profile_match(ar, skb);
5838 break;
5839 case WMI_DEBUG_PRINT_EVENTID:
5840 ath10k_wmi_event_debug_print(ar, skb);
5841 ath10k_wmi_queue_set_coverage_class_work(ar);
5842 break;
5843 case WMI_PDEV_QVIT_EVENTID:
5844 ath10k_wmi_event_pdev_qvit(ar, skb);
5845 break;
5846 case WMI_WLAN_PROFILE_DATA_EVENTID:
5847 ath10k_wmi_event_wlan_profile_data(ar, skb);
5848 break;
5849 case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
5850 ath10k_wmi_event_rtt_measurement_report(ar, skb);
5851 break;
5852 case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
5853 ath10k_wmi_event_tsf_measurement_report(ar, skb);
5854 break;
5855 case WMI_RTT_ERROR_REPORT_EVENTID:
5856 ath10k_wmi_event_rtt_error_report(ar, skb);
5857 break;
5858 case WMI_WOW_WAKEUP_HOST_EVENTID:
5859 ath10k_wmi_event_wow_wakeup_host(ar, skb);
5860 break;
5861 case WMI_DCS_INTERFERENCE_EVENTID:
5862 ath10k_wmi_event_dcs_interference(ar, skb);
5863 break;
5864 case WMI_PDEV_TPC_CONFIG_EVENTID:
5865 ath10k_wmi_event_pdev_tpc_config(ar, skb);
5866 break;
5867 case WMI_PDEV_FTM_INTG_EVENTID:
5868 ath10k_wmi_event_pdev_ftm_intg(ar, skb);
5869 break;
5870 case WMI_GTK_OFFLOAD_STATUS_EVENTID:
5871 ath10k_wmi_event_gtk_offload_status(ar, skb);
5872 break;
5873 case WMI_GTK_REKEY_FAIL_EVENTID:
5874 ath10k_wmi_event_gtk_rekey_fail(ar, skb);
5875 break;
5876 case WMI_TX_DELBA_COMPLETE_EVENTID:
5877 ath10k_wmi_event_delba_complete(ar, skb);
5878 break;
5879 case WMI_TX_ADDBA_COMPLETE_EVENTID:
5880 ath10k_wmi_event_addba_complete(ar, skb);
5881 break;
5882 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
5883 ath10k_wmi_event_vdev_install_key_complete(ar, skb);
5884 break;
5885 case WMI_SERVICE_READY_EVENTID:
5886 ath10k_wmi_event_service_ready(ar, skb);
5887 return;
5888 case WMI_READY_EVENTID:
5889 ath10k_wmi_event_ready(ar, skb);
5890 ath10k_wmi_queue_set_coverage_class_work(ar);
5891 break;
5892 case WMI_SERVICE_AVAILABLE_EVENTID:
5893 ath10k_wmi_event_service_available(ar, skb);
5894 break;
5895 default:
5896 ath10k_warn(ar, "Unknown eventid: %d\n", id);
5897 break;
5898 }
5899
5900out:
5901 dev_kfree_skb(skb);
5902}
5903
5904static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
5905{
5906 struct wmi_cmd_hdr *cmd_hdr;
5907 enum wmi_10x_event_id id;
5908 bool consumed;
5909
5910 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
5911 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
5912
5913 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
5914 goto out;
5915
5916 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
5917
5918 consumed = ath10k_tm_event_wmi(ar, id, skb);
5919
5920 /* Ready event must be handled normally also in UTF mode so that we
5921 * know the UTF firmware has booted, others we are just bypass WMI
5922 * events to testmode.
5923 */
5924 if (consumed && id != WMI_10X_READY_EVENTID) {
5925 ath10k_dbg(ar, ATH10K_DBG_WMI,
5926 "wmi testmode consumed 0x%x\n", id);
5927 goto out;
5928 }
5929
5930 switch (id) {
5931 case WMI_10X_MGMT_RX_EVENTID:
5932 ath10k_wmi_event_mgmt_rx(ar, skb);
5933 /* mgmt_rx() owns the skb now! */
5934 return;
5935 case WMI_10X_SCAN_EVENTID:
5936 ath10k_wmi_event_scan(ar, skb);
5937 ath10k_wmi_queue_set_coverage_class_work(ar);
5938 break;
5939 case WMI_10X_CHAN_INFO_EVENTID:
5940 ath10k_wmi_event_chan_info(ar, skb);
5941 break;
5942 case WMI_10X_ECHO_EVENTID:
5943 ath10k_wmi_event_echo(ar, skb);
5944 break;
5945 case WMI_10X_DEBUG_MESG_EVENTID:
5946 ath10k_wmi_event_debug_mesg(ar, skb);
5947 ath10k_wmi_queue_set_coverage_class_work(ar);
5948 break;
5949 case WMI_10X_UPDATE_STATS_EVENTID:
5950 ath10k_wmi_event_update_stats(ar, skb);
5951 break;
5952 case WMI_10X_VDEV_START_RESP_EVENTID:
5953 ath10k_wmi_event_vdev_start_resp(ar, skb);
5954 ath10k_wmi_queue_set_coverage_class_work(ar);
5955 break;
5956 case WMI_10X_VDEV_STOPPED_EVENTID:
5957 ath10k_wmi_event_vdev_stopped(ar, skb);
5958 ath10k_wmi_queue_set_coverage_class_work(ar);
5959 break;
5960 case WMI_10X_PEER_STA_KICKOUT_EVENTID:
5961 ath10k_wmi_event_peer_sta_kickout(ar, skb);
5962 break;
5963 case WMI_10X_HOST_SWBA_EVENTID:
5964 ath10k_wmi_event_host_swba(ar, skb);
5965 break;
5966 case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
5967 ath10k_wmi_event_tbttoffset_update(ar, skb);
5968 break;
5969 case WMI_10X_PHYERR_EVENTID:
5970 ath10k_wmi_event_phyerr(ar, skb);
5971 break;
5972 case WMI_10X_ROAM_EVENTID:
5973 ath10k_wmi_event_roam(ar, skb);
5974 ath10k_wmi_queue_set_coverage_class_work(ar);
5975 break;
5976 case WMI_10X_PROFILE_MATCH:
5977 ath10k_wmi_event_profile_match(ar, skb);
5978 break;
5979 case WMI_10X_DEBUG_PRINT_EVENTID:
5980 ath10k_wmi_event_debug_print(ar, skb);
5981 ath10k_wmi_queue_set_coverage_class_work(ar);
5982 break;
5983 case WMI_10X_PDEV_QVIT_EVENTID:
5984 ath10k_wmi_event_pdev_qvit(ar, skb);
5985 break;
5986 case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
5987 ath10k_wmi_event_wlan_profile_data(ar, skb);
5988 break;
5989 case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
5990 ath10k_wmi_event_rtt_measurement_report(ar, skb);
5991 break;
5992 case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
5993 ath10k_wmi_event_tsf_measurement_report(ar, skb);
5994 break;
5995 case WMI_10X_RTT_ERROR_REPORT_EVENTID:
5996 ath10k_wmi_event_rtt_error_report(ar, skb);
5997 break;
5998 case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
5999 ath10k_wmi_event_wow_wakeup_host(ar, skb);
6000 break;
6001 case WMI_10X_DCS_INTERFERENCE_EVENTID:
6002 ath10k_wmi_event_dcs_interference(ar, skb);
6003 break;
6004 case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
6005 ath10k_wmi_event_pdev_tpc_config(ar, skb);
6006 break;
6007 case WMI_10X_INST_RSSI_STATS_EVENTID:
6008 ath10k_wmi_event_inst_rssi_stats(ar, skb);
6009 break;
6010 case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
6011 ath10k_wmi_event_vdev_standby_req(ar, skb);
6012 break;
6013 case WMI_10X_VDEV_RESUME_REQ_EVENTID:
6014 ath10k_wmi_event_vdev_resume_req(ar, skb);
6015 break;
6016 case WMI_10X_SERVICE_READY_EVENTID:
6017 ath10k_wmi_event_service_ready(ar, skb);
6018 return;
6019 case WMI_10X_READY_EVENTID:
6020 ath10k_wmi_event_ready(ar, skb);
6021 ath10k_wmi_queue_set_coverage_class_work(ar);
6022 break;
6023 case WMI_10X_PDEV_UTF_EVENTID:
6024 /* ignore utf events */
6025 break;
6026 default:
6027 ath10k_warn(ar, "Unknown eventid: %d\n", id);
6028 break;
6029 }
6030
6031out:
6032 dev_kfree_skb(skb);
6033}
6034
6035static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
6036{
6037 struct wmi_cmd_hdr *cmd_hdr;
6038 enum wmi_10_2_event_id id;
6039 bool consumed;
6040
6041 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6042 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6043
6044 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
6045 goto out;
6046
6047 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6048
6049 consumed = ath10k_tm_event_wmi(ar, id, skb);
6050
6051 /* Ready event must be handled normally also in UTF mode so that we
6052 * know the UTF firmware has booted, others we are just bypass WMI
6053 * events to testmode.
6054 */
6055 if (consumed && id != WMI_10_2_READY_EVENTID) {
6056 ath10k_dbg(ar, ATH10K_DBG_WMI,
6057 "wmi testmode consumed 0x%x\n", id);
6058 goto out;
6059 }
6060
6061 switch (id) {
6062 case WMI_10_2_MGMT_RX_EVENTID:
6063 ath10k_wmi_event_mgmt_rx(ar, skb);
6064 /* mgmt_rx() owns the skb now! */
6065 return;
6066 case WMI_10_2_SCAN_EVENTID:
6067 ath10k_wmi_event_scan(ar, skb);
6068 ath10k_wmi_queue_set_coverage_class_work(ar);
6069 break;
6070 case WMI_10_2_CHAN_INFO_EVENTID:
6071 ath10k_wmi_event_chan_info(ar, skb);
6072 break;
6073 case WMI_10_2_ECHO_EVENTID:
6074 ath10k_wmi_event_echo(ar, skb);
6075 break;
6076 case WMI_10_2_DEBUG_MESG_EVENTID:
6077 ath10k_wmi_event_debug_mesg(ar, skb);
6078 ath10k_wmi_queue_set_coverage_class_work(ar);
6079 break;
6080 case WMI_10_2_UPDATE_STATS_EVENTID:
6081 ath10k_wmi_event_update_stats(ar, skb);
6082 break;
6083 case WMI_10_2_VDEV_START_RESP_EVENTID:
6084 ath10k_wmi_event_vdev_start_resp(ar, skb);
6085 ath10k_wmi_queue_set_coverage_class_work(ar);
6086 break;
6087 case WMI_10_2_VDEV_STOPPED_EVENTID:
6088 ath10k_wmi_event_vdev_stopped(ar, skb);
6089 ath10k_wmi_queue_set_coverage_class_work(ar);
6090 break;
6091 case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
6092 ath10k_wmi_event_peer_sta_kickout(ar, skb);
6093 break;
6094 case WMI_10_2_HOST_SWBA_EVENTID:
6095 ath10k_wmi_event_host_swba(ar, skb);
6096 break;
6097 case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
6098 ath10k_wmi_event_tbttoffset_update(ar, skb);
6099 break;
6100 case WMI_10_2_PHYERR_EVENTID:
6101 ath10k_wmi_event_phyerr(ar, skb);
6102 break;
6103 case WMI_10_2_ROAM_EVENTID:
6104 ath10k_wmi_event_roam(ar, skb);
6105 ath10k_wmi_queue_set_coverage_class_work(ar);
6106 break;
6107 case WMI_10_2_PROFILE_MATCH:
6108 ath10k_wmi_event_profile_match(ar, skb);
6109 break;
6110 case WMI_10_2_DEBUG_PRINT_EVENTID:
6111 ath10k_wmi_event_debug_print(ar, skb);
6112 ath10k_wmi_queue_set_coverage_class_work(ar);
6113 break;
6114 case WMI_10_2_PDEV_QVIT_EVENTID:
6115 ath10k_wmi_event_pdev_qvit(ar, skb);
6116 break;
6117 case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
6118 ath10k_wmi_event_wlan_profile_data(ar, skb);
6119 break;
6120 case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
6121 ath10k_wmi_event_rtt_measurement_report(ar, skb);
6122 break;
6123 case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
6124 ath10k_wmi_event_tsf_measurement_report(ar, skb);
6125 break;
6126 case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
6127 ath10k_wmi_event_rtt_error_report(ar, skb);
6128 break;
6129 case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
6130 ath10k_wmi_event_wow_wakeup_host(ar, skb);
6131 break;
6132 case WMI_10_2_DCS_INTERFERENCE_EVENTID:
6133 ath10k_wmi_event_dcs_interference(ar, skb);
6134 break;
6135 case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
6136 ath10k_wmi_event_pdev_tpc_config(ar, skb);
6137 break;
6138 case WMI_10_2_INST_RSSI_STATS_EVENTID:
6139 ath10k_wmi_event_inst_rssi_stats(ar, skb);
6140 break;
6141 case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
6142 ath10k_wmi_event_vdev_standby_req(ar, skb);
6143 ath10k_wmi_queue_set_coverage_class_work(ar);
6144 break;
6145 case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
6146 ath10k_wmi_event_vdev_resume_req(ar, skb);
6147 ath10k_wmi_queue_set_coverage_class_work(ar);
6148 break;
6149 case WMI_10_2_SERVICE_READY_EVENTID:
6150 ath10k_wmi_event_service_ready(ar, skb);
6151 return;
6152 case WMI_10_2_READY_EVENTID:
6153 ath10k_wmi_event_ready(ar, skb);
6154 ath10k_wmi_queue_set_coverage_class_work(ar);
6155 break;
6156 case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
6157 ath10k_wmi_event_temperature(ar, skb);
6158 break;
6159 case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
6160 ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
6161 break;
6162 case WMI_10_2_RTT_KEEPALIVE_EVENTID:
6163 case WMI_10_2_GPIO_INPUT_EVENTID:
6164 case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
6165 case WMI_10_2_GENERIC_BUFFER_EVENTID:
6166 case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
6167 case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
6168 case WMI_10_2_WDS_PEER_EVENTID:
6169 ath10k_dbg(ar, ATH10K_DBG_WMI,
6170 "received event id %d not implemented\n", id);
6171 break;
6172 case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID:
6173 ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
6174 break;
6175 default:
6176 ath10k_warn(ar, "Unknown eventid: %d\n", id);
6177 break;
6178 }
6179
6180out:
6181 dev_kfree_skb(skb);
6182}
6183
6184static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
6185{
6186 struct wmi_cmd_hdr *cmd_hdr;
6187 enum wmi_10_4_event_id id;
6188 bool consumed;
6189
6190 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6191 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6192
6193 if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
6194 goto out;
6195
6196 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6197
6198 consumed = ath10k_tm_event_wmi(ar, id, skb);
6199
6200 /* Ready event must be handled normally also in UTF mode so that we
6201 * know the UTF firmware has booted, others we are just bypass WMI
6202 * events to testmode.
6203 */
6204 if (consumed && id != WMI_10_4_READY_EVENTID) {
6205 ath10k_dbg(ar, ATH10K_DBG_WMI,
6206 "wmi testmode consumed 0x%x\n", id);
6207 goto out;
6208 }
6209
6210 switch (id) {
6211 case WMI_10_4_MGMT_RX_EVENTID:
6212 ath10k_wmi_event_mgmt_rx(ar, skb);
6213 /* mgmt_rx() owns the skb now! */
6214 return;
6215 case WMI_10_4_ECHO_EVENTID:
6216 ath10k_wmi_event_echo(ar, skb);
6217 break;
6218 case WMI_10_4_DEBUG_MESG_EVENTID:
6219 ath10k_wmi_event_debug_mesg(ar, skb);
6220 ath10k_wmi_queue_set_coverage_class_work(ar);
6221 break;
6222 case WMI_10_4_SERVICE_READY_EVENTID:
6223 ath10k_wmi_event_service_ready(ar, skb);
6224 return;
6225 case WMI_10_4_SCAN_EVENTID:
6226 ath10k_wmi_event_scan(ar, skb);
6227 ath10k_wmi_queue_set_coverage_class_work(ar);
6228 break;
6229 case WMI_10_4_CHAN_INFO_EVENTID:
6230 ath10k_wmi_event_chan_info(ar, skb);
6231 break;
6232 case WMI_10_4_PHYERR_EVENTID:
6233 ath10k_wmi_event_phyerr(ar, skb);
6234 break;
6235 case WMI_10_4_READY_EVENTID:
6236 ath10k_wmi_event_ready(ar, skb);
6237 ath10k_wmi_queue_set_coverage_class_work(ar);
6238 break;
6239 case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
6240 ath10k_wmi_event_peer_sta_kickout(ar, skb);
6241 break;
6242 case WMI_10_4_ROAM_EVENTID:
6243 ath10k_wmi_event_roam(ar, skb);
6244 ath10k_wmi_queue_set_coverage_class_work(ar);
6245 break;
6246 case WMI_10_4_HOST_SWBA_EVENTID:
6247 ath10k_wmi_event_host_swba(ar, skb);
6248 break;
6249 case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
6250 ath10k_wmi_event_tbttoffset_update(ar, skb);
6251 break;
6252 case WMI_10_4_DEBUG_PRINT_EVENTID:
6253 ath10k_wmi_event_debug_print(ar, skb);
6254 ath10k_wmi_queue_set_coverage_class_work(ar);
6255 break;
6256 case WMI_10_4_VDEV_START_RESP_EVENTID:
6257 ath10k_wmi_event_vdev_start_resp(ar, skb);
6258 ath10k_wmi_queue_set_coverage_class_work(ar);
6259 break;
6260 case WMI_10_4_VDEV_STOPPED_EVENTID:
6261 ath10k_wmi_event_vdev_stopped(ar, skb);
6262 ath10k_wmi_queue_set_coverage_class_work(ar);
6263 break;
6264 case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
6265 case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
6266 case WMI_10_4_WDS_PEER_EVENTID:
6267 case WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID:
6268 ath10k_dbg(ar, ATH10K_DBG_WMI,
6269 "received event id %d not implemented\n", id);
6270 break;
6271 case WMI_10_4_UPDATE_STATS_EVENTID:
6272 ath10k_wmi_event_update_stats(ar, skb);
6273 break;
6274 case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
6275 ath10k_wmi_event_temperature(ar, skb);
6276 break;
6277 case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
6278 ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
6279 break;
6280 case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
6281 ath10k_wmi_event_pdev_tpc_config(ar, skb);
6282 break;
6283 case WMI_10_4_TDLS_PEER_EVENTID:
6284 ath10k_wmi_handle_tdls_peer_event(ar, skb);
6285 break;
6286 case WMI_10_4_PDEV_TPC_TABLE_EVENTID:
6287 ath10k_wmi_event_tpc_final_table(ar, skb);
6288 break;
6289 case WMI_10_4_DFS_STATUS_CHECK_EVENTID:
6290 ath10k_wmi_event_dfs_status_check(ar, skb);
6291 break;
6292 case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID:
6293 ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
6294 break;
6295 default:
6296 ath10k_warn(ar, "Unknown eventid: %d\n", id);
6297 break;
6298 }
6299
6300out:
6301 dev_kfree_skb(skb);
6302}
6303
6304static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
6305{
6306 int ret;
6307
6308 ret = ath10k_wmi_rx(ar, skb);
6309 if (ret)
6310 ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
6311}
6312
6313int ath10k_wmi_connect(struct ath10k *ar)
6314{
6315 int status;
6316 struct ath10k_htc_svc_conn_req conn_req;
6317 struct ath10k_htc_svc_conn_resp conn_resp;
6318
6319 memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
6320
6321 memset(&conn_req, 0, sizeof(conn_req));
6322 memset(&conn_resp, 0, sizeof(conn_resp));
6323
6324 /* these fields are the same for all service endpoints */
6325 conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
6326 conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
6327 conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
6328
6329 /* connect to control service */
6330 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
6331
6332 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
6333 if (status) {
6334 ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
6335 status);
6336 return status;
6337 }
6338
6339 ar->wmi.eid = conn_resp.eid;
6340 return 0;
6341}
6342
6343static struct sk_buff *
6344ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k *ar,
6345 const u8 macaddr[ETH_ALEN])
6346{
6347 struct wmi_pdev_set_base_macaddr_cmd *cmd;
6348 struct sk_buff *skb;
6349
6350 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6351 if (!skb)
6352 return ERR_PTR(-ENOMEM);
6353
6354 cmd = (struct wmi_pdev_set_base_macaddr_cmd *)skb->data;
6355 ether_addr_copy(cmd->mac_addr.addr, macaddr);
6356
6357 ath10k_dbg(ar, ATH10K_DBG_WMI,
6358 "wmi pdev basemac %pM\n", macaddr);
6359 return skb;
6360}
6361
6362static struct sk_buff *
6363ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
6364 u16 ctl2g, u16 ctl5g,
6365 enum wmi_dfs_region dfs_reg)
6366{
6367 struct wmi_pdev_set_regdomain_cmd *cmd;
6368 struct sk_buff *skb;
6369
6370 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6371 if (!skb)
6372 return ERR_PTR(-ENOMEM);
6373
6374 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
6375 cmd->reg_domain = __cpu_to_le32(rd);
6376 cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6377 cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6378 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6379 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6380
6381 ath10k_dbg(ar, ATH10K_DBG_WMI,
6382 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
6383 rd, rd2g, rd5g, ctl2g, ctl5g);
6384 return skb;
6385}
6386
6387static struct sk_buff *
6388ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
6389 rd5g, u16 ctl2g, u16 ctl5g,
6390 enum wmi_dfs_region dfs_reg)
6391{
6392 struct wmi_pdev_set_regdomain_cmd_10x *cmd;
6393 struct sk_buff *skb;
6394
6395 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6396 if (!skb)
6397 return ERR_PTR(-ENOMEM);
6398
6399 cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
6400 cmd->reg_domain = __cpu_to_le32(rd);
6401 cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6402 cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6403 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6404 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6405 cmd->dfs_domain = __cpu_to_le32(dfs_reg);
6406
6407 ath10k_dbg(ar, ATH10K_DBG_WMI,
6408 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
6409 rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
6410 return skb;
6411}
6412
6413static struct sk_buff *
6414ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
6415{
6416 struct wmi_pdev_suspend_cmd *cmd;
6417 struct sk_buff *skb;
6418
6419 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6420 if (!skb)
6421 return ERR_PTR(-ENOMEM);
6422
6423 cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
6424 cmd->suspend_opt = __cpu_to_le32(suspend_opt);
6425
6426 return skb;
6427}
6428
6429static struct sk_buff *
6430ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
6431{
6432 struct sk_buff *skb;
6433
6434 skb = ath10k_wmi_alloc_skb(ar, 0);
6435 if (!skb)
6436 return ERR_PTR(-ENOMEM);
6437
6438 return skb;
6439}
6440
6441static struct sk_buff *
6442ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
6443{
6444 struct wmi_pdev_set_param_cmd *cmd;
6445 struct sk_buff *skb;
6446
6447 if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
6448 ath10k_warn(ar, "pdev param %d not supported by firmware\n",
6449 id);
6450 return ERR_PTR(-EOPNOTSUPP);
6451 }
6452
6453 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6454 if (!skb)
6455 return ERR_PTR(-ENOMEM);
6456
6457 cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
6458 cmd->param_id = __cpu_to_le32(id);
6459 cmd->param_value = __cpu_to_le32(value);
6460
6461 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
6462 id, value);
6463 return skb;
6464}
6465
6466void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
6467 struct wmi_host_mem_chunks *chunks)
6468{
6469 struct host_memory_chunk *chunk;
6470 int i;
6471
6472 chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
6473
6474 for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
6475 chunk = &chunks->items[i];
6476 chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
6477 chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
6478 chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
6479
6480 ath10k_dbg(ar, ATH10K_DBG_WMI,
6481 "wmi chunk %d len %d requested, addr 0x%llx\n",
6482 i,
6483 ar->wmi.mem_chunks[i].len,
6484 (unsigned long long)ar->wmi.mem_chunks[i].paddr);
6485 }
6486}
6487
6488static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
6489{
6490 struct wmi_init_cmd *cmd;
6491 struct sk_buff *buf;
6492 struct wmi_resource_config config = {};
6493 u32 len, val;
6494
6495 config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
6496 config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
6497 config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
6498
6499 config.num_offload_reorder_bufs =
6500 __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
6501
6502 config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
6503 config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
6504 config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
6505 config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
6506 config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
6507 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6508 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6509 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6510 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
6511 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6512 config.scan_max_pending_reqs =
6513 __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
6514
6515 config.bmiss_offload_max_vdev =
6516 __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
6517
6518 config.roam_offload_max_vdev =
6519 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
6520
6521 config.roam_offload_max_ap_profiles =
6522 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
6523
6524 config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
6525 config.num_mcast_table_elems =
6526 __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
6527
6528 config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
6529 config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
6530 config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
6531 config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
6532 config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
6533
6534 val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6535 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6536
6537 config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
6538
6539 config.gtk_offload_max_vdev =
6540 __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
6541
6542 config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
6543 config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
6544
6545 len = sizeof(*cmd) +
6546 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
6547
6548 buf = ath10k_wmi_alloc_skb(ar, len);
6549 if (!buf)
6550 return ERR_PTR(-ENOMEM);
6551
6552 cmd = (struct wmi_init_cmd *)buf->data;
6553
6554 memcpy(&cmd->resource_config, &config, sizeof(config));
6555 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6556
6557 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
6558 return buf;
6559}
6560
6561static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
6562{
6563 struct wmi_init_cmd_10x *cmd;
6564 struct sk_buff *buf;
6565 struct wmi_resource_config_10x config = {};
6566 u32 len, val;
6567
6568 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6569 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6570 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6571 config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6572 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6573 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6574 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6575 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6576 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6577 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6578 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6579 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6580 config.scan_max_pending_reqs =
6581 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6582
6583 config.bmiss_offload_max_vdev =
6584 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6585
6586 config.roam_offload_max_vdev =
6587 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6588
6589 config.roam_offload_max_ap_profiles =
6590 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6591
6592 config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6593 config.num_mcast_table_elems =
6594 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6595
6596 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6597 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6598 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6599 config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
6600 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6601
6602 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6603 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6604
6605 config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6606
6607 config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6608 config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6609
6610 len = sizeof(*cmd) +
6611 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
6612
6613 buf = ath10k_wmi_alloc_skb(ar, len);
6614 if (!buf)
6615 return ERR_PTR(-ENOMEM);
6616
6617 cmd = (struct wmi_init_cmd_10x *)buf->data;
6618
6619 memcpy(&cmd->resource_config, &config, sizeof(config));
6620 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6621
6622 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
6623 return buf;
6624}
6625
6626static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
6627{
6628 struct wmi_init_cmd_10_2 *cmd;
6629 struct sk_buff *buf;
6630 struct wmi_resource_config_10x config = {};
6631 u32 len, val, features;
6632
6633 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6634 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6635
6636 if (ath10k_peer_stats_enabled(ar)) {
6637 config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
6638 config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
6639 } else {
6640 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6641 config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6642 }
6643
6644 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6645 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6646 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6647 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6648 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6649 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6650 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6651 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6652
6653 config.scan_max_pending_reqs =
6654 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6655
6656 config.bmiss_offload_max_vdev =
6657 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6658
6659 config.roam_offload_max_vdev =
6660 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6661
6662 config.roam_offload_max_ap_profiles =
6663 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6664
6665 config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6666 config.num_mcast_table_elems =
6667 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6668
6669 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6670 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6671 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6672 config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
6673 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6674
6675 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6676 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6677
6678 config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6679
6680 config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6681 config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6682
6683 len = sizeof(*cmd) +
6684 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
6685
6686 buf = ath10k_wmi_alloc_skb(ar, len);
6687 if (!buf)
6688 return ERR_PTR(-ENOMEM);
6689
6690 cmd = (struct wmi_init_cmd_10_2 *)buf->data;
6691
6692 features = WMI_10_2_RX_BATCH_MODE;
6693
6694 if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) &&
6695 test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
6696 features |= WMI_10_2_COEX_GPIO;
6697
6698 if (ath10k_peer_stats_enabled(ar))
6699 features |= WMI_10_2_PEER_STATS;
6700
6701 if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
6702 features |= WMI_10_2_BSS_CHAN_INFO;
6703
6704 cmd->resource_config.feature_mask = __cpu_to_le32(features);
6705
6706 memcpy(&cmd->resource_config.common, &config, sizeof(config));
6707 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6708
6709 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
6710 return buf;
6711}
6712
6713static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
6714{
6715 struct wmi_init_cmd_10_4 *cmd;
6716 struct sk_buff *buf;
6717 struct wmi_resource_config_10_4 config = {};
6718 u32 len;
6719
6720 config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
6721 config.num_peers = __cpu_to_le32(ar->max_num_peers);
6722 config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
6723 config.num_tids = __cpu_to_le32(ar->num_tids);
6724
6725 config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
6726 config.num_offload_reorder_buffs =
6727 __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
6728 config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
6729 config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
6730 config.tx_chain_mask = __cpu_to_le32(ar->hw_params.tx_chain_mask);
6731 config.rx_chain_mask = __cpu_to_le32(ar->hw_params.rx_chain_mask);
6732
6733 config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6734 config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6735 config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6736 config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
6737
6738 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6739 config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
6740 config.bmiss_offload_max_vdev =
6741 __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
6742 config.roam_offload_max_vdev =
6743 __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
6744 config.roam_offload_max_ap_profiles =
6745 __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
6746 config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
6747 config.num_mcast_table_elems =
6748 __cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
6749
6750 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
6751 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
6752 config.num_wds_entries = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
6753 config.dma_burst_size = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
6754 config.mac_aggr_delim = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
6755
6756 config.rx_skip_defrag_timeout_dup_detection_check =
6757 __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
6758
6759 config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
6760 config.gtk_offload_max_vdev =
6761 __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
6762 config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
6763 config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
6764 config.max_peer_ext_stats =
6765 __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
6766 config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
6767
6768 config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
6769 config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
6770 config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
6771 config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
6772
6773 config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
6774 config.tt_support =
6775 __cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
6776 config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
6777 config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
6778 config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
6779
6780 len = sizeof(*cmd) +
6781 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
6782
6783 buf = ath10k_wmi_alloc_skb(ar, len);
6784 if (!buf)
6785 return ERR_PTR(-ENOMEM);
6786
6787 cmd = (struct wmi_init_cmd_10_4 *)buf->data;
6788 memcpy(&cmd->resource_config, &config, sizeof(config));
6789 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6790
6791 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
6792 return buf;
6793}
6794
6795int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
6796{
6797 if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
6798 return -EINVAL;
6799 if (arg->n_channels > ARRAY_SIZE(arg->channels))
6800 return -EINVAL;
6801 if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
6802 return -EINVAL;
6803 if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
6804 return -EINVAL;
6805
6806 return 0;
6807}
6808
6809static size_t
6810ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
6811{
6812 int len = 0;
6813
6814 if (arg->ie_len) {
6815 len += sizeof(struct wmi_ie_data);
6816 len += roundup(arg->ie_len, 4);
6817 }
6818
6819 if (arg->n_channels) {
6820 len += sizeof(struct wmi_chan_list);
6821 len += sizeof(__le32) * arg->n_channels;
6822 }
6823
6824 if (arg->n_ssids) {
6825 len += sizeof(struct wmi_ssid_list);
6826 len += sizeof(struct wmi_ssid) * arg->n_ssids;
6827 }
6828
6829 if (arg->n_bssids) {
6830 len += sizeof(struct wmi_bssid_list);
6831 len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
6832 }
6833
6834 return len;
6835}
6836
6837void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
6838 const struct wmi_start_scan_arg *arg)
6839{
6840 u32 scan_id;
6841 u32 scan_req_id;
6842
6843 scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
6844 scan_id |= arg->scan_id;
6845
6846 scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
6847 scan_req_id |= arg->scan_req_id;
6848
6849 cmn->scan_id = __cpu_to_le32(scan_id);
6850 cmn->scan_req_id = __cpu_to_le32(scan_req_id);
6851 cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
6852 cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
6853 cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
6854 cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
6855 cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
6856 cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
6857 cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
6858 cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
6859 cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
6860 cmn->idle_time = __cpu_to_le32(arg->idle_time);
6861 cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
6862 cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
6863 cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
6864}
6865
6866static void
6867ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
6868 const struct wmi_start_scan_arg *arg)
6869{
6870 struct wmi_ie_data *ie;
6871 struct wmi_chan_list *channels;
6872 struct wmi_ssid_list *ssids;
6873 struct wmi_bssid_list *bssids;
6874 void *ptr = tlvs->tlvs;
6875 int i;
6876
6877 if (arg->n_channels) {
6878 channels = ptr;
6879 channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
6880 channels->num_chan = __cpu_to_le32(arg->n_channels);
6881
6882 for (i = 0; i < arg->n_channels; i++)
6883 channels->channel_list[i].freq =
6884 __cpu_to_le16(arg->channels[i]);
6885
6886 ptr += sizeof(*channels);
6887 ptr += sizeof(__le32) * arg->n_channels;
6888 }
6889
6890 if (arg->n_ssids) {
6891 ssids = ptr;
6892 ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
6893 ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
6894
6895 for (i = 0; i < arg->n_ssids; i++) {
6896 ssids->ssids[i].ssid_len =
6897 __cpu_to_le32(arg->ssids[i].len);
6898 memcpy(&ssids->ssids[i].ssid,
6899 arg->ssids[i].ssid,
6900 arg->ssids[i].len);
6901 }
6902
6903 ptr += sizeof(*ssids);
6904 ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
6905 }
6906
6907 if (arg->n_bssids) {
6908 bssids = ptr;
6909 bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
6910 bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
6911
6912 for (i = 0; i < arg->n_bssids; i++)
6913 ether_addr_copy(bssids->bssid_list[i].addr,
6914 arg->bssids[i].bssid);
6915
6916 ptr += sizeof(*bssids);
6917 ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
6918 }
6919
6920 if (arg->ie_len) {
6921 ie = ptr;
6922 ie->tag = __cpu_to_le32(WMI_IE_TAG);
6923 ie->ie_len = __cpu_to_le32(arg->ie_len);
6924 memcpy(ie->ie_data, arg->ie, arg->ie_len);
6925
6926 ptr += sizeof(*ie);
6927 ptr += roundup(arg->ie_len, 4);
6928 }
6929}
6930
6931static struct sk_buff *
6932ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
6933 const struct wmi_start_scan_arg *arg)
6934{
6935 struct wmi_start_scan_cmd *cmd;
6936 struct sk_buff *skb;
6937 size_t len;
6938 int ret;
6939
6940 ret = ath10k_wmi_start_scan_verify(arg);
6941 if (ret)
6942 return ERR_PTR(ret);
6943
6944 len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
6945 skb = ath10k_wmi_alloc_skb(ar, len);
6946 if (!skb)
6947 return ERR_PTR(-ENOMEM);
6948
6949 cmd = (struct wmi_start_scan_cmd *)skb->data;
6950
6951 ath10k_wmi_put_start_scan_common(&cmd->common, arg);
6952 ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
6953
6954 cmd->burst_duration_ms = __cpu_to_le32(0);
6955
6956 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
6957 return skb;
6958}
6959
6960static struct sk_buff *
6961ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
6962 const struct wmi_start_scan_arg *arg)
6963{
6964 struct wmi_10x_start_scan_cmd *cmd;
6965 struct sk_buff *skb;
6966 size_t len;
6967 int ret;
6968
6969 ret = ath10k_wmi_start_scan_verify(arg);
6970 if (ret)
6971 return ERR_PTR(ret);
6972
6973 len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
6974 skb = ath10k_wmi_alloc_skb(ar, len);
6975 if (!skb)
6976 return ERR_PTR(-ENOMEM);
6977
6978 cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
6979
6980 ath10k_wmi_put_start_scan_common(&cmd->common, arg);
6981 ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
6982
6983 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
6984 return skb;
6985}
6986
6987void ath10k_wmi_start_scan_init(struct ath10k *ar,
6988 struct wmi_start_scan_arg *arg)
6989{
6990 /* setup commonly used values */
6991 arg->scan_req_id = 1;
6992 arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
6993 arg->dwell_time_active = 50;
6994 arg->dwell_time_passive = 150;
6995 arg->min_rest_time = 50;
6996 arg->max_rest_time = 500;
6997 arg->repeat_probe_time = 0;
6998 arg->probe_spacing_time = 0;
6999 arg->idle_time = 0;
7000 arg->max_scan_time = 20000;
7001 arg->probe_delay = 5;
7002 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
7003 | WMI_SCAN_EVENT_COMPLETED
7004 | WMI_SCAN_EVENT_BSS_CHANNEL
7005 | WMI_SCAN_EVENT_FOREIGN_CHANNEL
7006 | WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT
7007 | WMI_SCAN_EVENT_DEQUEUED;
7008 arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
7009 arg->n_bssids = 1;
7010 arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
7011}
7012
7013static struct sk_buff *
7014ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
7015 const struct wmi_stop_scan_arg *arg)
7016{
7017 struct wmi_stop_scan_cmd *cmd;
7018 struct sk_buff *skb;
7019 u32 scan_id;
7020 u32 req_id;
7021
7022 if (arg->req_id > 0xFFF)
7023 return ERR_PTR(-EINVAL);
7024 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
7025 return ERR_PTR(-EINVAL);
7026
7027 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7028 if (!skb)
7029 return ERR_PTR(-ENOMEM);
7030
7031 scan_id = arg->u.scan_id;
7032 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
7033
7034 req_id = arg->req_id;
7035 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
7036
7037 cmd = (struct wmi_stop_scan_cmd *)skb->data;
7038 cmd->req_type = __cpu_to_le32(arg->req_type);
7039 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
7040 cmd->scan_id = __cpu_to_le32(scan_id);
7041 cmd->scan_req_id = __cpu_to_le32(req_id);
7042
7043 ath10k_dbg(ar, ATH10K_DBG_WMI,
7044 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
7045 arg->req_id, arg->req_type, arg->u.scan_id);
7046 return skb;
7047}
7048
7049static struct sk_buff *
7050ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
7051 enum wmi_vdev_type type,
7052 enum wmi_vdev_subtype subtype,
7053 const u8 macaddr[ETH_ALEN])
7054{
7055 struct wmi_vdev_create_cmd *cmd;
7056 struct sk_buff *skb;
7057
7058 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7059 if (!skb)
7060 return ERR_PTR(-ENOMEM);
7061
7062 cmd = (struct wmi_vdev_create_cmd *)skb->data;
7063 cmd->vdev_id = __cpu_to_le32(vdev_id);
7064 cmd->vdev_type = __cpu_to_le32(type);
7065 cmd->vdev_subtype = __cpu_to_le32(subtype);
7066 ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
7067
7068 ath10k_dbg(ar, ATH10K_DBG_WMI,
7069 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
7070 vdev_id, type, subtype, macaddr);
7071 return skb;
7072}
7073
7074static struct sk_buff *
7075ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
7076{
7077 struct wmi_vdev_delete_cmd *cmd;
7078 struct sk_buff *skb;
7079
7080 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7081 if (!skb)
7082 return ERR_PTR(-ENOMEM);
7083
7084 cmd = (struct wmi_vdev_delete_cmd *)skb->data;
7085 cmd->vdev_id = __cpu_to_le32(vdev_id);
7086
7087 ath10k_dbg(ar, ATH10K_DBG_WMI,
7088 "WMI vdev delete id %d\n", vdev_id);
7089 return skb;
7090}
7091
7092static struct sk_buff *
7093ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
7094 const struct wmi_vdev_start_request_arg *arg,
7095 bool restart)
7096{
7097 struct wmi_vdev_start_request_cmd *cmd;
7098 struct sk_buff *skb;
7099 const char *cmdname;
7100 u32 flags = 0;
7101
7102 if (WARN_ON(arg->hidden_ssid && !arg->ssid))
7103 return ERR_PTR(-EINVAL);
7104 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
7105 return ERR_PTR(-EINVAL);
7106
7107 if (restart)
7108 cmdname = "restart";
7109 else
7110 cmdname = "start";
7111
7112 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7113 if (!skb)
7114 return ERR_PTR(-ENOMEM);
7115
7116 if (arg->hidden_ssid)
7117 flags |= WMI_VDEV_START_HIDDEN_SSID;
7118 if (arg->pmf_enabled)
7119 flags |= WMI_VDEV_START_PMF_ENABLED;
7120
7121 cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
7122 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7123 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
7124 cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
7125 cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
7126 cmd->flags = __cpu_to_le32(flags);
7127 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
7128 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
7129
7130 if (arg->ssid) {
7131 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
7132 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
7133 }
7134
7135 ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
7136
7137 ath10k_dbg(ar, ATH10K_DBG_WMI,
7138 "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
7139 cmdname, arg->vdev_id,
7140 flags, arg->channel.freq, arg->channel.mode,
7141 cmd->chan.flags, arg->channel.max_power);
7142
7143 return skb;
7144}
7145
7146static struct sk_buff *
7147ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
7148{
7149 struct wmi_vdev_stop_cmd *cmd;
7150 struct sk_buff *skb;
7151
7152 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7153 if (!skb)
7154 return ERR_PTR(-ENOMEM);
7155
7156 cmd = (struct wmi_vdev_stop_cmd *)skb->data;
7157 cmd->vdev_id = __cpu_to_le32(vdev_id);
7158
7159 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
7160 return skb;
7161}
7162
7163static struct sk_buff *
7164ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
7165 const u8 *bssid)
7166{
7167 struct wmi_vdev_up_cmd *cmd;
7168 struct sk_buff *skb;
7169
7170 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7171 if (!skb)
7172 return ERR_PTR(-ENOMEM);
7173
7174 cmd = (struct wmi_vdev_up_cmd *)skb->data;
7175 cmd->vdev_id = __cpu_to_le32(vdev_id);
7176 cmd->vdev_assoc_id = __cpu_to_le32(aid);
7177 ether_addr_copy(cmd->vdev_bssid.addr, bssid);
7178
7179 ath10k_dbg(ar, ATH10K_DBG_WMI,
7180 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
7181 vdev_id, aid, bssid);
7182 return skb;
7183}
7184
7185static struct sk_buff *
7186ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
7187{
7188 struct wmi_vdev_down_cmd *cmd;
7189 struct sk_buff *skb;
7190
7191 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7192 if (!skb)
7193 return ERR_PTR(-ENOMEM);
7194
7195 cmd = (struct wmi_vdev_down_cmd *)skb->data;
7196 cmd->vdev_id = __cpu_to_le32(vdev_id);
7197
7198 ath10k_dbg(ar, ATH10K_DBG_WMI,
7199 "wmi mgmt vdev down id 0x%x\n", vdev_id);
7200 return skb;
7201}
7202
7203static struct sk_buff *
7204ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
7205 u32 param_id, u32 param_value)
7206{
7207 struct wmi_vdev_set_param_cmd *cmd;
7208 struct sk_buff *skb;
7209
7210 if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
7211 ath10k_dbg(ar, ATH10K_DBG_WMI,
7212 "vdev param %d not supported by firmware\n",
7213 param_id);
7214 return ERR_PTR(-EOPNOTSUPP);
7215 }
7216
7217 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7218 if (!skb)
7219 return ERR_PTR(-ENOMEM);
7220
7221 cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
7222 cmd->vdev_id = __cpu_to_le32(vdev_id);
7223 cmd->param_id = __cpu_to_le32(param_id);
7224 cmd->param_value = __cpu_to_le32(param_value);
7225
7226 ath10k_dbg(ar, ATH10K_DBG_WMI,
7227 "wmi vdev id 0x%x set param %d value %d\n",
7228 vdev_id, param_id, param_value);
7229 return skb;
7230}
7231
7232static struct sk_buff *
7233ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
7234 const struct wmi_vdev_install_key_arg *arg)
7235{
7236 struct wmi_vdev_install_key_cmd *cmd;
7237 struct sk_buff *skb;
7238
7239 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
7240 return ERR_PTR(-EINVAL);
7241 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
7242 return ERR_PTR(-EINVAL);
7243
7244 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
7245 if (!skb)
7246 return ERR_PTR(-ENOMEM);
7247
7248 cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
7249 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7250 cmd->key_idx = __cpu_to_le32(arg->key_idx);
7251 cmd->key_flags = __cpu_to_le32(arg->key_flags);
7252 cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
7253 cmd->key_len = __cpu_to_le32(arg->key_len);
7254 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
7255 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
7256
7257 if (arg->macaddr)
7258 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
7259 if (arg->key_data)
7260 memcpy(cmd->key_data, arg->key_data, arg->key_len);
7261
7262 ath10k_dbg(ar, ATH10K_DBG_WMI,
7263 "wmi vdev install key idx %d cipher %d len %d\n",
7264 arg->key_idx, arg->key_cipher, arg->key_len);
7265 return skb;
7266}
7267
7268static struct sk_buff *
7269ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
7270 const struct wmi_vdev_spectral_conf_arg *arg)
7271{
7272 struct wmi_vdev_spectral_conf_cmd *cmd;
7273 struct sk_buff *skb;
7274
7275 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7276 if (!skb)
7277 return ERR_PTR(-ENOMEM);
7278
7279 cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
7280 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7281 cmd->scan_count = __cpu_to_le32(arg->scan_count);
7282 cmd->scan_period = __cpu_to_le32(arg->scan_period);
7283 cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
7284 cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
7285 cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
7286 cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
7287 cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
7288 cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
7289 cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
7290 cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
7291 cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
7292 cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
7293 cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
7294 cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
7295 cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
7296 cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
7297 cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
7298 cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
7299
7300 return skb;
7301}
7302
7303static struct sk_buff *
7304ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
7305 u32 trigger, u32 enable)
7306{
7307 struct wmi_vdev_spectral_enable_cmd *cmd;
7308 struct sk_buff *skb;
7309
7310 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7311 if (!skb)
7312 return ERR_PTR(-ENOMEM);
7313
7314 cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
7315 cmd->vdev_id = __cpu_to_le32(vdev_id);
7316 cmd->trigger_cmd = __cpu_to_le32(trigger);
7317 cmd->enable_cmd = __cpu_to_le32(enable);
7318
7319 return skb;
7320}
7321
7322static struct sk_buff *
7323ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
7324 const u8 peer_addr[ETH_ALEN],
7325 enum wmi_peer_type peer_type)
7326{
7327 struct wmi_peer_create_cmd *cmd;
7328 struct sk_buff *skb;
7329
7330 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7331 if (!skb)
7332 return ERR_PTR(-ENOMEM);
7333
7334 cmd = (struct wmi_peer_create_cmd *)skb->data;
7335 cmd->vdev_id = __cpu_to_le32(vdev_id);
7336 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7337 cmd->peer_type = __cpu_to_le32(peer_type);
7338
7339 ath10k_dbg(ar, ATH10K_DBG_WMI,
7340 "wmi peer create vdev_id %d peer_addr %pM\n",
7341 vdev_id, peer_addr);
7342 return skb;
7343}
7344
7345static struct sk_buff *
7346ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
7347 const u8 peer_addr[ETH_ALEN])
7348{
7349 struct wmi_peer_delete_cmd *cmd;
7350 struct sk_buff *skb;
7351
7352 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7353 if (!skb)
7354 return ERR_PTR(-ENOMEM);
7355
7356 cmd = (struct wmi_peer_delete_cmd *)skb->data;
7357 cmd->vdev_id = __cpu_to_le32(vdev_id);
7358 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7359
7360 ath10k_dbg(ar, ATH10K_DBG_WMI,
7361 "wmi peer delete vdev_id %d peer_addr %pM\n",
7362 vdev_id, peer_addr);
7363 return skb;
7364}
7365
7366static struct sk_buff *
7367ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
7368 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
7369{
7370 struct wmi_peer_flush_tids_cmd *cmd;
7371 struct sk_buff *skb;
7372
7373 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7374 if (!skb)
7375 return ERR_PTR(-ENOMEM);
7376
7377 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
7378 cmd->vdev_id = __cpu_to_le32(vdev_id);
7379 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
7380 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7381
7382 ath10k_dbg(ar, ATH10K_DBG_WMI,
7383 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
7384 vdev_id, peer_addr, tid_bitmap);
7385 return skb;
7386}
7387
7388static struct sk_buff *
7389ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
7390 const u8 *peer_addr,
7391 enum wmi_peer_param param_id,
7392 u32 param_value)
7393{
7394 struct wmi_peer_set_param_cmd *cmd;
7395 struct sk_buff *skb;
7396
7397 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7398 if (!skb)
7399 return ERR_PTR(-ENOMEM);
7400
7401 cmd = (struct wmi_peer_set_param_cmd *)skb->data;
7402 cmd->vdev_id = __cpu_to_le32(vdev_id);
7403 cmd->param_id = __cpu_to_le32(param_id);
7404 cmd->param_value = __cpu_to_le32(param_value);
7405 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7406
7407 ath10k_dbg(ar, ATH10K_DBG_WMI,
7408 "wmi vdev %d peer 0x%pM set param %d value %d\n",
7409 vdev_id, peer_addr, param_id, param_value);
7410 return skb;
7411}
7412
7413static struct sk_buff *
7414ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
7415 enum wmi_sta_ps_mode psmode)
7416{
7417 struct wmi_sta_powersave_mode_cmd *cmd;
7418 struct sk_buff *skb;
7419
7420 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7421 if (!skb)
7422 return ERR_PTR(-ENOMEM);
7423
7424 cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
7425 cmd->vdev_id = __cpu_to_le32(vdev_id);
7426 cmd->sta_ps_mode = __cpu_to_le32(psmode);
7427
7428 ath10k_dbg(ar, ATH10K_DBG_WMI,
7429 "wmi set powersave id 0x%x mode %d\n",
7430 vdev_id, psmode);
7431 return skb;
7432}
7433
7434static struct sk_buff *
7435ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
7436 enum wmi_sta_powersave_param param_id,
7437 u32 value)
7438{
7439 struct wmi_sta_powersave_param_cmd *cmd;
7440 struct sk_buff *skb;
7441
7442 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7443 if (!skb)
7444 return ERR_PTR(-ENOMEM);
7445
7446 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
7447 cmd->vdev_id = __cpu_to_le32(vdev_id);
7448 cmd->param_id = __cpu_to_le32(param_id);
7449 cmd->param_value = __cpu_to_le32(value);
7450
7451 ath10k_dbg(ar, ATH10K_DBG_WMI,
7452 "wmi sta ps param vdev_id 0x%x param %d value %d\n",
7453 vdev_id, param_id, value);
7454 return skb;
7455}
7456
7457static struct sk_buff *
7458ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
7459 enum wmi_ap_ps_peer_param param_id, u32 value)
7460{
7461 struct wmi_ap_ps_peer_cmd *cmd;
7462 struct sk_buff *skb;
7463
7464 if (!mac)
7465 return ERR_PTR(-EINVAL);
7466
7467 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7468 if (!skb)
7469 return ERR_PTR(-ENOMEM);
7470
7471 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
7472 cmd->vdev_id = __cpu_to_le32(vdev_id);
7473 cmd->param_id = __cpu_to_le32(param_id);
7474 cmd->param_value = __cpu_to_le32(value);
7475 ether_addr_copy(cmd->peer_macaddr.addr, mac);
7476
7477 ath10k_dbg(ar, ATH10K_DBG_WMI,
7478 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
7479 vdev_id, param_id, value, mac);
7480 return skb;
7481}
7482
7483static struct sk_buff *
7484ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
7485 const struct wmi_scan_chan_list_arg *arg)
7486{
7487 struct wmi_scan_chan_list_cmd *cmd;
7488 struct sk_buff *skb;
7489 struct wmi_channel_arg *ch;
7490 struct wmi_channel *ci;
7491 int len;
7492 int i;
7493
7494 len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
7495
7496 skb = ath10k_wmi_alloc_skb(ar, len);
7497 if (!skb)
7498 return ERR_PTR(-EINVAL);
7499
7500 cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
7501 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
7502
7503 for (i = 0; i < arg->n_channels; i++) {
7504 ch = &arg->channels[i];
7505 ci = &cmd->chan_info[i];
7506
7507 ath10k_wmi_put_wmi_channel(ci, ch);
7508 }
7509
7510 return skb;
7511}
7512
7513static void
7514ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
7515 const struct wmi_peer_assoc_complete_arg *arg)
7516{
7517 struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
7518
7519 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7520 cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
7521 cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
7522 cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
7523 cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
7524 cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
7525 cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
7526 cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
7527 cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
7528 cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
7529 cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
7530 cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
7531 cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
7532
7533 ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
7534
7535 cmd->peer_legacy_rates.num_rates =
7536 __cpu_to_le32(arg->peer_legacy_rates.num_rates);
7537 memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
7538 arg->peer_legacy_rates.num_rates);
7539
7540 cmd->peer_ht_rates.num_rates =
7541 __cpu_to_le32(arg->peer_ht_rates.num_rates);
7542 memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
7543 arg->peer_ht_rates.num_rates);
7544
7545 cmd->peer_vht_rates.rx_max_rate =
7546 __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
7547 cmd->peer_vht_rates.rx_mcs_set =
7548 __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
7549 cmd->peer_vht_rates.tx_max_rate =
7550 __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
7551 cmd->peer_vht_rates.tx_mcs_set =
7552 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
7553}
7554
7555static void
7556ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
7557 const struct wmi_peer_assoc_complete_arg *arg)
7558{
7559 struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
7560
7561 ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7562 memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
7563}
7564
7565static void
7566ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
7567 const struct wmi_peer_assoc_complete_arg *arg)
7568{
7569 ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7570}
7571
7572static void
7573ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
7574 const struct wmi_peer_assoc_complete_arg *arg)
7575{
7576 struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
7577 int max_mcs, max_nss;
7578 u32 info0;
7579
7580 /* TODO: Is using max values okay with firmware? */
7581 max_mcs = 0xf;
7582 max_nss = 0xf;
7583
7584 info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
7585 SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
7586
7587 ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7588 cmd->info0 = __cpu_to_le32(info0);
7589}
7590
7591static void
7592ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
7593 const struct wmi_peer_assoc_complete_arg *arg)
7594{
7595 struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
7596
7597 ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
7598 if (arg->peer_bw_rxnss_override)
7599 cmd->peer_bw_rxnss_override =
7600 __cpu_to_le32((arg->peer_bw_rxnss_override - 1) |
7601 BIT(PEER_BW_RXNSS_OVERRIDE_OFFSET));
7602 else
7603 cmd->peer_bw_rxnss_override = 0;
7604}
7605
7606static int
7607ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
7608{
7609 if (arg->peer_mpdu_density > 16)
7610 return -EINVAL;
7611 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
7612 return -EINVAL;
7613 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
7614 return -EINVAL;
7615
7616 return 0;
7617}
7618
7619static struct sk_buff *
7620ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
7621 const struct wmi_peer_assoc_complete_arg *arg)
7622{
7623 size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
7624 struct sk_buff *skb;
7625 int ret;
7626
7627 ret = ath10k_wmi_peer_assoc_check_arg(arg);
7628 if (ret)
7629 return ERR_PTR(ret);
7630
7631 skb = ath10k_wmi_alloc_skb(ar, len);
7632 if (!skb)
7633 return ERR_PTR(-ENOMEM);
7634
7635 ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
7636
7637 ath10k_dbg(ar, ATH10K_DBG_WMI,
7638 "wmi peer assoc vdev %d addr %pM (%s)\n",
7639 arg->vdev_id, arg->addr,
7640 arg->peer_reassoc ? "reassociate" : "new");
7641 return skb;
7642}
7643
7644static struct sk_buff *
7645ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
7646 const struct wmi_peer_assoc_complete_arg *arg)
7647{
7648 size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
7649 struct sk_buff *skb;
7650 int ret;
7651
7652 ret = ath10k_wmi_peer_assoc_check_arg(arg);
7653 if (ret)
7654 return ERR_PTR(ret);
7655
7656 skb = ath10k_wmi_alloc_skb(ar, len);
7657 if (!skb)
7658 return ERR_PTR(-ENOMEM);
7659
7660 ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
7661
7662 ath10k_dbg(ar, ATH10K_DBG_WMI,
7663 "wmi peer assoc vdev %d addr %pM (%s)\n",
7664 arg->vdev_id, arg->addr,
7665 arg->peer_reassoc ? "reassociate" : "new");
7666 return skb;
7667}
7668
7669static struct sk_buff *
7670ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
7671 const struct wmi_peer_assoc_complete_arg *arg)
7672{
7673 size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
7674 struct sk_buff *skb;
7675 int ret;
7676
7677 ret = ath10k_wmi_peer_assoc_check_arg(arg);
7678 if (ret)
7679 return ERR_PTR(ret);
7680
7681 skb = ath10k_wmi_alloc_skb(ar, len);
7682 if (!skb)
7683 return ERR_PTR(-ENOMEM);
7684
7685 ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
7686
7687 ath10k_dbg(ar, ATH10K_DBG_WMI,
7688 "wmi peer assoc vdev %d addr %pM (%s)\n",
7689 arg->vdev_id, arg->addr,
7690 arg->peer_reassoc ? "reassociate" : "new");
7691 return skb;
7692}
7693
7694static struct sk_buff *
7695ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar,
7696 const struct wmi_peer_assoc_complete_arg *arg)
7697{
7698 size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd);
7699 struct sk_buff *skb;
7700 int ret;
7701
7702 ret = ath10k_wmi_peer_assoc_check_arg(arg);
7703 if (ret)
7704 return ERR_PTR(ret);
7705
7706 skb = ath10k_wmi_alloc_skb(ar, len);
7707 if (!skb)
7708 return ERR_PTR(-ENOMEM);
7709
7710 ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg);
7711
7712 ath10k_dbg(ar, ATH10K_DBG_WMI,
7713 "wmi peer assoc vdev %d addr %pM (%s)\n",
7714 arg->vdev_id, arg->addr,
7715 arg->peer_reassoc ? "reassociate" : "new");
7716 return skb;
7717}
7718
7719static struct sk_buff *
7720ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
7721{
7722 struct sk_buff *skb;
7723
7724 skb = ath10k_wmi_alloc_skb(ar, 0);
7725 if (!skb)
7726 return ERR_PTR(-ENOMEM);
7727
7728 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
7729 return skb;
7730}
7731
7732static struct sk_buff *
7733ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
7734 enum wmi_bss_survey_req_type type)
7735{
7736 struct wmi_pdev_chan_info_req_cmd *cmd;
7737 struct sk_buff *skb;
7738
7739 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7740 if (!skb)
7741 return ERR_PTR(-ENOMEM);
7742
7743 cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
7744 cmd->type = __cpu_to_le32(type);
7745
7746 ath10k_dbg(ar, ATH10K_DBG_WMI,
7747 "wmi pdev bss info request type %d\n", type);
7748
7749 return skb;
7750}
7751
7752/* This function assumes the beacon is already DMA mapped */
7753static struct sk_buff *
7754ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
7755 size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
7756 bool deliver_cab)
7757{
7758 struct wmi_bcn_tx_ref_cmd *cmd;
7759 struct sk_buff *skb;
7760 struct ieee80211_hdr *hdr;
7761 u16 fc;
7762
7763 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7764 if (!skb)
7765 return ERR_PTR(-ENOMEM);
7766
7767 hdr = (struct ieee80211_hdr *)bcn;
7768 fc = le16_to_cpu(hdr->frame_control);
7769
7770 cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
7771 cmd->vdev_id = __cpu_to_le32(vdev_id);
7772 cmd->data_len = __cpu_to_le32(bcn_len);
7773 cmd->data_ptr = __cpu_to_le32(bcn_paddr);
7774 cmd->msdu_id = 0;
7775 cmd->frame_control = __cpu_to_le32(fc);
7776 cmd->flags = 0;
7777 cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
7778
7779 if (dtim_zero)
7780 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
7781
7782 if (deliver_cab)
7783 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
7784
7785 return skb;
7786}
7787
7788void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
7789 const struct wmi_wmm_params_arg *arg)
7790{
7791 params->cwmin = __cpu_to_le32(arg->cwmin);
7792 params->cwmax = __cpu_to_le32(arg->cwmax);
7793 params->aifs = __cpu_to_le32(arg->aifs);
7794 params->txop = __cpu_to_le32(arg->txop);
7795 params->acm = __cpu_to_le32(arg->acm);
7796 params->no_ack = __cpu_to_le32(arg->no_ack);
7797}
7798
7799static struct sk_buff *
7800ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
7801 const struct wmi_wmm_params_all_arg *arg)
7802{
7803 struct wmi_pdev_set_wmm_params *cmd;
7804 struct sk_buff *skb;
7805
7806 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7807 if (!skb)
7808 return ERR_PTR(-ENOMEM);
7809
7810 cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
7811 ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
7812 ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
7813 ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
7814 ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
7815
7816 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
7817 return skb;
7818}
7819
7820static struct sk_buff *
7821ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
7822{
7823 struct wmi_request_stats_cmd *cmd;
7824 struct sk_buff *skb;
7825
7826 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7827 if (!skb)
7828 return ERR_PTR(-ENOMEM);
7829
7830 cmd = (struct wmi_request_stats_cmd *)skb->data;
7831 cmd->stats_id = __cpu_to_le32(stats_mask);
7832
7833 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
7834 stats_mask);
7835 return skb;
7836}
7837
7838static struct sk_buff *
7839ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
7840 enum wmi_force_fw_hang_type type, u32 delay_ms)
7841{
7842 struct wmi_force_fw_hang_cmd *cmd;
7843 struct sk_buff *skb;
7844
7845 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7846 if (!skb)
7847 return ERR_PTR(-ENOMEM);
7848
7849 cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
7850 cmd->type = __cpu_to_le32(type);
7851 cmd->delay_ms = __cpu_to_le32(delay_ms);
7852
7853 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
7854 type, delay_ms);
7855 return skb;
7856}
7857
7858static struct sk_buff *
7859ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
7860 u32 log_level)
7861{
7862 struct wmi_dbglog_cfg_cmd *cmd;
7863 struct sk_buff *skb;
7864 u32 cfg;
7865
7866 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7867 if (!skb)
7868 return ERR_PTR(-ENOMEM);
7869
7870 cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
7871
7872 if (module_enable) {
7873 cfg = SM(log_level,
7874 ATH10K_DBGLOG_CFG_LOG_LVL);
7875 } else {
7876 /* set back defaults, all modules with WARN level */
7877 cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
7878 ATH10K_DBGLOG_CFG_LOG_LVL);
7879 module_enable = ~0;
7880 }
7881
7882 cmd->module_enable = __cpu_to_le32(module_enable);
7883 cmd->module_valid = __cpu_to_le32(~0);
7884 cmd->config_enable = __cpu_to_le32(cfg);
7885 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
7886
7887 ath10k_dbg(ar, ATH10K_DBG_WMI,
7888 "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
7889 __le32_to_cpu(cmd->module_enable),
7890 __le32_to_cpu(cmd->module_valid),
7891 __le32_to_cpu(cmd->config_enable),
7892 __le32_to_cpu(cmd->config_valid));
7893 return skb;
7894}
7895
7896static struct sk_buff *
7897ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
7898 u32 log_level)
7899{
7900 struct wmi_10_4_dbglog_cfg_cmd *cmd;
7901 struct sk_buff *skb;
7902 u32 cfg;
7903
7904 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7905 if (!skb)
7906 return ERR_PTR(-ENOMEM);
7907
7908 cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
7909
7910 if (module_enable) {
7911 cfg = SM(log_level,
7912 ATH10K_DBGLOG_CFG_LOG_LVL);
7913 } else {
7914 /* set back defaults, all modules with WARN level */
7915 cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
7916 ATH10K_DBGLOG_CFG_LOG_LVL);
7917 module_enable = ~0;
7918 }
7919
7920 cmd->module_enable = __cpu_to_le64(module_enable);
7921 cmd->module_valid = __cpu_to_le64(~0);
7922 cmd->config_enable = __cpu_to_le32(cfg);
7923 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
7924
7925 ath10k_dbg(ar, ATH10K_DBG_WMI,
7926 "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
7927 __le64_to_cpu(cmd->module_enable),
7928 __le64_to_cpu(cmd->module_valid),
7929 __le32_to_cpu(cmd->config_enable),
7930 __le32_to_cpu(cmd->config_valid));
7931 return skb;
7932}
7933
7934static struct sk_buff *
7935ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
7936{
7937 struct wmi_pdev_pktlog_enable_cmd *cmd;
7938 struct sk_buff *skb;
7939
7940 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7941 if (!skb)
7942 return ERR_PTR(-ENOMEM);
7943
7944 ev_bitmap &= ATH10K_PKTLOG_ANY;
7945
7946 cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
7947 cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
7948
7949 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
7950 ev_bitmap);
7951 return skb;
7952}
7953
7954static struct sk_buff *
7955ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
7956{
7957 struct sk_buff *skb;
7958
7959 skb = ath10k_wmi_alloc_skb(ar, 0);
7960 if (!skb)
7961 return ERR_PTR(-ENOMEM);
7962
7963 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
7964 return skb;
7965}
7966
7967static struct sk_buff *
7968ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
7969 u32 duration, u32 next_offset,
7970 u32 enabled)
7971{
7972 struct wmi_pdev_set_quiet_cmd *cmd;
7973 struct sk_buff *skb;
7974
7975 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7976 if (!skb)
7977 return ERR_PTR(-ENOMEM);
7978
7979 cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
7980 cmd->period = __cpu_to_le32(period);
7981 cmd->duration = __cpu_to_le32(duration);
7982 cmd->next_start = __cpu_to_le32(next_offset);
7983 cmd->enabled = __cpu_to_le32(enabled);
7984
7985 ath10k_dbg(ar, ATH10K_DBG_WMI,
7986 "wmi quiet param: period %u duration %u enabled %d\n",
7987 period, duration, enabled);
7988 return skb;
7989}
7990
7991static struct sk_buff *
7992ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
7993 const u8 *mac)
7994{
7995 struct wmi_addba_clear_resp_cmd *cmd;
7996 struct sk_buff *skb;
7997
7998 if (!mac)
7999 return ERR_PTR(-EINVAL);
8000
8001 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8002 if (!skb)
8003 return ERR_PTR(-ENOMEM);
8004
8005 cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
8006 cmd->vdev_id = __cpu_to_le32(vdev_id);
8007 ether_addr_copy(cmd->peer_macaddr.addr, mac);
8008
8009 ath10k_dbg(ar, ATH10K_DBG_WMI,
8010 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
8011 vdev_id, mac);
8012 return skb;
8013}
8014
8015static struct sk_buff *
8016ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8017 u32 tid, u32 buf_size)
8018{
8019 struct wmi_addba_send_cmd *cmd;
8020 struct sk_buff *skb;
8021
8022 if (!mac)
8023 return ERR_PTR(-EINVAL);
8024
8025 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8026 if (!skb)
8027 return ERR_PTR(-ENOMEM);
8028
8029 cmd = (struct wmi_addba_send_cmd *)skb->data;
8030 cmd->vdev_id = __cpu_to_le32(vdev_id);
8031 ether_addr_copy(cmd->peer_macaddr.addr, mac);
8032 cmd->tid = __cpu_to_le32(tid);
8033 cmd->buffersize = __cpu_to_le32(buf_size);
8034
8035 ath10k_dbg(ar, ATH10K_DBG_WMI,
8036 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
8037 vdev_id, mac, tid, buf_size);
8038 return skb;
8039}
8040
8041static struct sk_buff *
8042ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8043 u32 tid, u32 status)
8044{
8045 struct wmi_addba_setresponse_cmd *cmd;
8046 struct sk_buff *skb;
8047
8048 if (!mac)
8049 return ERR_PTR(-EINVAL);
8050
8051 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8052 if (!skb)
8053 return ERR_PTR(-ENOMEM);
8054
8055 cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
8056 cmd->vdev_id = __cpu_to_le32(vdev_id);
8057 ether_addr_copy(cmd->peer_macaddr.addr, mac);
8058 cmd->tid = __cpu_to_le32(tid);
8059 cmd->statuscode = __cpu_to_le32(status);
8060
8061 ath10k_dbg(ar, ATH10K_DBG_WMI,
8062 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
8063 vdev_id, mac, tid, status);
8064 return skb;
8065}
8066
8067static struct sk_buff *
8068ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8069 u32 tid, u32 initiator, u32 reason)
8070{
8071 struct wmi_delba_send_cmd *cmd;
8072 struct sk_buff *skb;
8073
8074 if (!mac)
8075 return ERR_PTR(-EINVAL);
8076
8077 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8078 if (!skb)
8079 return ERR_PTR(-ENOMEM);
8080
8081 cmd = (struct wmi_delba_send_cmd *)skb->data;
8082 cmd->vdev_id = __cpu_to_le32(vdev_id);
8083 ether_addr_copy(cmd->peer_macaddr.addr, mac);
8084 cmd->tid = __cpu_to_le32(tid);
8085 cmd->initiator = __cpu_to_le32(initiator);
8086 cmd->reasoncode = __cpu_to_le32(reason);
8087
8088 ath10k_dbg(ar, ATH10K_DBG_WMI,
8089 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
8090 vdev_id, mac, tid, initiator, reason);
8091 return skb;
8092}
8093
8094static struct sk_buff *
8095ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
8096{
8097 struct wmi_pdev_get_tpc_config_cmd *cmd;
8098 struct sk_buff *skb;
8099
8100 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8101 if (!skb)
8102 return ERR_PTR(-ENOMEM);
8103
8104 cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
8105 cmd->param = __cpu_to_le32(param);
8106
8107 ath10k_dbg(ar, ATH10K_DBG_WMI,
8108 "wmi pdev get tpc config param %d\n", param);
8109 return skb;
8110}
8111
8112size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head)
8113{
8114 struct ath10k_fw_stats_peer *i;
8115 size_t num = 0;
8116
8117 list_for_each_entry(i, head, list)
8118 ++num;
8119
8120 return num;
8121}
8122
8123size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head)
8124{
8125 struct ath10k_fw_stats_vdev *i;
8126 size_t num = 0;
8127
8128 list_for_each_entry(i, head, list)
8129 ++num;
8130
8131 return num;
8132}
8133
8134static void
8135ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8136 char *buf, u32 *length)
8137{
8138 u32 len = *length;
8139 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8140
8141 len += scnprintf(buf + len, buf_len - len, "\n");
8142 len += scnprintf(buf + len, buf_len - len, "%30s\n",
8143 "ath10k PDEV stats");
8144 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8145 "=================");
8146
8147 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8148 "Channel noise floor", pdev->ch_noise_floor);
8149 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8150 "Channel TX power", pdev->chan_tx_power);
8151 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8152 "TX frame count", pdev->tx_frame_count);
8153 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8154 "RX frame count", pdev->rx_frame_count);
8155 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8156 "RX clear count", pdev->rx_clear_count);
8157 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8158 "Cycle count", pdev->cycle_count);
8159 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8160 "PHY error count", pdev->phy_err_count);
8161
8162 *length = len;
8163}
8164
8165static void
8166ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8167 char *buf, u32 *length)
8168{
8169 u32 len = *length;
8170 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8171
8172 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8173 "RTS bad count", pdev->rts_bad);
8174 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8175 "RTS good count", pdev->rts_good);
8176 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8177 "FCS bad count", pdev->fcs_bad);
8178 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8179 "No beacon count", pdev->no_beacons);
8180 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8181 "MIB int count", pdev->mib_int_count);
8182
8183 len += scnprintf(buf + len, buf_len - len, "\n");
8184 *length = len;
8185}
8186
8187static void
8188ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8189 char *buf, u32 *length)
8190{
8191 u32 len = *length;
8192 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8193
8194 len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8195 "ath10k PDEV TX stats");
8196 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8197 "=================");
8198
8199 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8200 "HTT cookies queued", pdev->comp_queued);
8201 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8202 "HTT cookies disp.", pdev->comp_delivered);
8203 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8204 "MSDU queued", pdev->msdu_enqued);
8205 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8206 "MPDU queued", pdev->mpdu_enqued);
8207 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8208 "MSDUs dropped", pdev->wmm_drop);
8209 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8210 "Local enqued", pdev->local_enqued);
8211 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8212 "Local freed", pdev->local_freed);
8213 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8214 "HW queued", pdev->hw_queued);
8215 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8216 "PPDUs reaped", pdev->hw_reaped);
8217 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8218 "Num underruns", pdev->underrun);
8219 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8220 "PPDUs cleaned", pdev->tx_abort);
8221 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8222 "MPDUs requed", pdev->mpdus_requed);
8223 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8224 "Excessive retries", pdev->tx_ko);
8225 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8226 "HW rate", pdev->data_rc);
8227 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8228 "Sched self triggers", pdev->self_triggers);
8229 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8230 "Dropped due to SW retries",
8231 pdev->sw_retry_failure);
8232 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8233 "Illegal rate phy errors",
8234 pdev->illgl_rate_phy_err);
8235 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8236 "Pdev continuous xretry", pdev->pdev_cont_xretry);
8237 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8238 "TX timeout", pdev->pdev_tx_timeout);
8239 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8240 "PDEV resets", pdev->pdev_resets);
8241 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8242 "PHY underrun", pdev->phy_underrun);
8243 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8244 "MPDU is more than txop limit", pdev->txop_ovf);
8245 *length = len;
8246}
8247
8248static void
8249ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8250 char *buf, u32 *length)
8251{
8252 u32 len = *length;
8253 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8254
8255 len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8256 "ath10k PDEV RX stats");
8257 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8258 "=================");
8259
8260 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8261 "Mid PPDU route change",
8262 pdev->mid_ppdu_route_change);
8263 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8264 "Tot. number of statuses", pdev->status_rcvd);
8265 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8266 "Extra frags on rings 0", pdev->r0_frags);
8267 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8268 "Extra frags on rings 1", pdev->r1_frags);
8269 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8270 "Extra frags on rings 2", pdev->r2_frags);
8271 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8272 "Extra frags on rings 3", pdev->r3_frags);
8273 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8274 "MSDUs delivered to HTT", pdev->htt_msdus);
8275 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8276 "MPDUs delivered to HTT", pdev->htt_mpdus);
8277 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8278 "MSDUs delivered to stack", pdev->loc_msdus);
8279 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8280 "MPDUs delivered to stack", pdev->loc_mpdus);
8281 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8282 "Oversized AMSUs", pdev->oversize_amsdu);
8283 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8284 "PHY errors", pdev->phy_errs);
8285 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8286 "PHY errors drops", pdev->phy_err_drop);
8287 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8288 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
8289 *length = len;
8290}
8291
8292static void
8293ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
8294 char *buf, u32 *length)
8295{
8296 u32 len = *length;
8297 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8298 int i;
8299
8300 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8301 "vdev id", vdev->vdev_id);
8302 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8303 "beacon snr", vdev->beacon_snr);
8304 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8305 "data snr", vdev->data_snr);
8306 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8307 "num rx frames", vdev->num_rx_frames);
8308 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8309 "num rts fail", vdev->num_rts_fail);
8310 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8311 "num rts success", vdev->num_rts_success);
8312 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8313 "num rx err", vdev->num_rx_err);
8314 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8315 "num rx discard", vdev->num_rx_discard);
8316 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8317 "num tx not acked", vdev->num_tx_not_acked);
8318
8319 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
8320 len += scnprintf(buf + len, buf_len - len,
8321 "%25s [%02d] %u\n",
8322 "num tx frames", i,
8323 vdev->num_tx_frames[i]);
8324
8325 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
8326 len += scnprintf(buf + len, buf_len - len,
8327 "%25s [%02d] %u\n",
8328 "num tx frames retries", i,
8329 vdev->num_tx_frames_retries[i]);
8330
8331 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
8332 len += scnprintf(buf + len, buf_len - len,
8333 "%25s [%02d] %u\n",
8334 "num tx frames failures", i,
8335 vdev->num_tx_frames_failures[i]);
8336
8337 for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
8338 len += scnprintf(buf + len, buf_len - len,
8339 "%25s [%02d] 0x%08x\n",
8340 "tx rate history", i,
8341 vdev->tx_rate_history[i]);
8342
8343 for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
8344 len += scnprintf(buf + len, buf_len - len,
8345 "%25s [%02d] %u\n",
8346 "beacon rssi history", i,
8347 vdev->beacon_rssi_history[i]);
8348
8349 len += scnprintf(buf + len, buf_len - len, "\n");
8350 *length = len;
8351}
8352
8353static void
8354ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
8355 char *buf, u32 *length, bool extended_peer)
8356{
8357 u32 len = *length;
8358 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8359
8360 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8361 "Peer MAC address", peer->peer_macaddr);
8362 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8363 "Peer RSSI", peer->peer_rssi);
8364 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8365 "Peer TX rate", peer->peer_tx_rate);
8366 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8367 "Peer RX rate", peer->peer_rx_rate);
8368 if (!extended_peer)
8369 len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
8370 "Peer RX duration", peer->rx_duration);
8371
8372 len += scnprintf(buf + len, buf_len - len, "\n");
8373 *length = len;
8374}
8375
8376static void
8377ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer *peer,
8378 char *buf, u32 *length)
8379{
8380 u32 len = *length;
8381 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8382
8383 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8384 "Peer MAC address", peer->peer_macaddr);
8385 len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
8386 "Peer RX duration", peer->rx_duration);
8387}
8388
8389void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
8390 struct ath10k_fw_stats *fw_stats,
8391 char *buf)
8392{
8393 u32 len = 0;
8394 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8395 const struct ath10k_fw_stats_pdev *pdev;
8396 const struct ath10k_fw_stats_vdev *vdev;
8397 const struct ath10k_fw_stats_peer *peer;
8398 size_t num_peers;
8399 size_t num_vdevs;
8400
8401 spin_lock_bh(&ar->data_lock);
8402
8403 pdev = list_first_entry_or_null(&fw_stats->pdevs,
8404 struct ath10k_fw_stats_pdev, list);
8405 if (!pdev) {
8406 ath10k_warn(ar, "failed to get pdev stats\n");
8407 goto unlock;
8408 }
8409
8410 num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
8411 num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
8412
8413 ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8414 ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8415 ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8416
8417 len += scnprintf(buf + len, buf_len - len, "\n");
8418 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8419 "ath10k VDEV stats", num_vdevs);
8420 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8421 "=================");
8422
8423 list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8424 ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8425 }
8426
8427 len += scnprintf(buf + len, buf_len - len, "\n");
8428 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8429 "ath10k PEER stats", num_peers);
8430 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8431 "=================");
8432
8433 list_for_each_entry(peer, &fw_stats->peers, list) {
8434 ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8435 fw_stats->extended);
8436 }
8437
8438unlock:
8439 spin_unlock_bh(&ar->data_lock);
8440
8441 if (len >= buf_len)
8442 buf[len - 1] = 0;
8443 else
8444 buf[len] = 0;
8445}
8446
8447void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
8448 struct ath10k_fw_stats *fw_stats,
8449 char *buf)
8450{
8451 unsigned int len = 0;
8452 unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
8453 const struct ath10k_fw_stats_pdev *pdev;
8454 const struct ath10k_fw_stats_vdev *vdev;
8455 const struct ath10k_fw_stats_peer *peer;
8456 size_t num_peers;
8457 size_t num_vdevs;
8458
8459 spin_lock_bh(&ar->data_lock);
8460
8461 pdev = list_first_entry_or_null(&fw_stats->pdevs,
8462 struct ath10k_fw_stats_pdev, list);
8463 if (!pdev) {
8464 ath10k_warn(ar, "failed to get pdev stats\n");
8465 goto unlock;
8466 }
8467
8468 num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
8469 num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
8470
8471 ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8472 ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8473 ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8474 ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8475
8476 len += scnprintf(buf + len, buf_len - len, "\n");
8477 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8478 "ath10k VDEV stats", num_vdevs);
8479 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8480 "=================");
8481
8482 list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8483 ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8484 }
8485
8486 len += scnprintf(buf + len, buf_len - len, "\n");
8487 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8488 "ath10k PEER stats", num_peers);
8489 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8490 "=================");
8491
8492 list_for_each_entry(peer, &fw_stats->peers, list) {
8493 ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8494 fw_stats->extended);
8495 }
8496
8497unlock:
8498 spin_unlock_bh(&ar->data_lock);
8499
8500 if (len >= buf_len)
8501 buf[len - 1] = 0;
8502 else
8503 buf[len] = 0;
8504}
8505
8506static struct sk_buff *
8507ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
8508 u32 detect_level, u32 detect_margin)
8509{
8510 struct wmi_pdev_set_adaptive_cca_params *cmd;
8511 struct sk_buff *skb;
8512
8513 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8514 if (!skb)
8515 return ERR_PTR(-ENOMEM);
8516
8517 cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data;
8518 cmd->enable = __cpu_to_le32(enable);
8519 cmd->cca_detect_level = __cpu_to_le32(detect_level);
8520 cmd->cca_detect_margin = __cpu_to_le32(detect_margin);
8521
8522 ath10k_dbg(ar, ATH10K_DBG_WMI,
8523 "wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n",
8524 enable, detect_level, detect_margin);
8525 return skb;
8526}
8527
8528static void
8529ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd *vdev,
8530 char *buf, u32 *length)
8531{
8532 u32 len = *length;
8533 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8534 u32 val;
8535
8536 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8537 "vdev id", vdev->vdev_id);
8538 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8539 "ppdu aggr count", vdev->ppdu_aggr_cnt);
8540 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8541 "ppdu noack", vdev->ppdu_noack);
8542 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8543 "mpdu queued", vdev->mpdu_queued);
8544 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8545 "ppdu nonaggr count", vdev->ppdu_nonaggr_cnt);
8546 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8547 "mpdu sw requeued", vdev->mpdu_sw_requeued);
8548 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8549 "mpdu success retry", vdev->mpdu_suc_retry);
8550 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8551 "mpdu success multitry", vdev->mpdu_suc_multitry);
8552 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8553 "mpdu fail retry", vdev->mpdu_fail_retry);
8554 val = vdev->tx_ftm_suc;
8555 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8556 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8557 "tx ftm success",
8558 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8559 val = vdev->tx_ftm_suc_retry;
8560 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8561 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8562 "tx ftm success retry",
8563 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8564 val = vdev->tx_ftm_fail;
8565 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8566 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8567 "tx ftm fail",
8568 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8569 val = vdev->rx_ftmr_cnt;
8570 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8571 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8572 "rx ftm request count",
8573 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8574 val = vdev->rx_ftmr_dup_cnt;
8575 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8576 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8577 "rx ftm request dup count",
8578 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8579 val = vdev->rx_iftmr_cnt;
8580 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8581 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8582 "rx initial ftm req count",
8583 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8584 val = vdev->rx_iftmr_dup_cnt;
8585 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8586 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8587 "rx initial ftm req dup cnt",
8588 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8589 len += scnprintf(buf + len, buf_len - len, "\n");
8590
8591 *length = len;
8592}
8593
8594void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
8595 struct ath10k_fw_stats *fw_stats,
8596 char *buf)
8597{
8598 u32 len = 0;
8599 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8600 const struct ath10k_fw_stats_pdev *pdev;
8601 const struct ath10k_fw_stats_vdev_extd *vdev;
8602 const struct ath10k_fw_stats_peer *peer;
8603 const struct ath10k_fw_extd_stats_peer *extd_peer;
8604 size_t num_peers;
8605 size_t num_vdevs;
8606
8607 spin_lock_bh(&ar->data_lock);
8608
8609 pdev = list_first_entry_or_null(&fw_stats->pdevs,
8610 struct ath10k_fw_stats_pdev, list);
8611 if (!pdev) {
8612 ath10k_warn(ar, "failed to get pdev stats\n");
8613 goto unlock;
8614 }
8615
8616 num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
8617 num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
8618
8619 ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8620 ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8621 ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8622
8623 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8624 "HW paused", pdev->hw_paused);
8625 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8626 "Seqs posted", pdev->seq_posted);
8627 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8628 "Seqs failed queueing", pdev->seq_failed_queueing);
8629 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8630 "Seqs completed", pdev->seq_completed);
8631 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8632 "Seqs restarted", pdev->seq_restarted);
8633 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8634 "MU Seqs posted", pdev->mu_seq_posted);
8635 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8636 "MPDUs SW flushed", pdev->mpdus_sw_flush);
8637 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8638 "MPDUs HW filtered", pdev->mpdus_hw_filter);
8639 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8640 "MPDUs truncated", pdev->mpdus_truncated);
8641 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8642 "MPDUs receive no ACK", pdev->mpdus_ack_failed);
8643 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8644 "MPDUs expired", pdev->mpdus_expired);
8645
8646 ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8647 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8648 "Num Rx Overflow errors", pdev->rx_ovfl_errs);
8649
8650 len += scnprintf(buf + len, buf_len - len, "\n");
8651 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8652 "ath10k VDEV stats", num_vdevs);
8653 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8654 "=================");
8655 list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8656 ath10k_wmi_fw_vdev_stats_extd_fill(vdev, buf, &len);
8657 }
8658
8659 len += scnprintf(buf + len, buf_len - len, "\n");
8660 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8661 "ath10k PEER stats", num_peers);
8662 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8663 "=================");
8664
8665 list_for_each_entry(peer, &fw_stats->peers, list) {
8666 ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8667 fw_stats->extended);
8668 }
8669
8670 if (fw_stats->extended) {
8671 list_for_each_entry(extd_peer, &fw_stats->peers_extd, list) {
8672 ath10k_wmi_fw_extd_peer_stats_fill(extd_peer, buf,
8673 &len);
8674 }
8675 }
8676
8677unlock:
8678 spin_unlock_bh(&ar->data_lock);
8679
8680 if (len >= buf_len)
8681 buf[len - 1] = 0;
8682 else
8683 buf[len] = 0;
8684}
8685
8686int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
8687 enum wmi_vdev_subtype subtype)
8688{
8689 switch (subtype) {
8690 case WMI_VDEV_SUBTYPE_NONE:
8691 return WMI_VDEV_SUBTYPE_LEGACY_NONE;
8692 case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8693 return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV;
8694 case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8695 return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI;
8696 case WMI_VDEV_SUBTYPE_P2P_GO:
8697 return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO;
8698 case WMI_VDEV_SUBTYPE_PROXY_STA:
8699 return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
8700 case WMI_VDEV_SUBTYPE_MESH_11S:
8701 case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8702 return -ENOTSUPP;
8703 }
8704 return -ENOTSUPP;
8705}
8706
8707static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
8708 enum wmi_vdev_subtype subtype)
8709{
8710 switch (subtype) {
8711 case WMI_VDEV_SUBTYPE_NONE:
8712 return WMI_VDEV_SUBTYPE_10_2_4_NONE;
8713 case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8714 return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV;
8715 case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8716 return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI;
8717 case WMI_VDEV_SUBTYPE_P2P_GO:
8718 return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO;
8719 case WMI_VDEV_SUBTYPE_PROXY_STA:
8720 return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA;
8721 case WMI_VDEV_SUBTYPE_MESH_11S:
8722 return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
8723 case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8724 return -ENOTSUPP;
8725 }
8726 return -ENOTSUPP;
8727}
8728
8729static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
8730 enum wmi_vdev_subtype subtype)
8731{
8732 switch (subtype) {
8733 case WMI_VDEV_SUBTYPE_NONE:
8734 return WMI_VDEV_SUBTYPE_10_4_NONE;
8735 case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8736 return WMI_VDEV_SUBTYPE_10_4_P2P_DEV;
8737 case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8738 return WMI_VDEV_SUBTYPE_10_4_P2P_CLI;
8739 case WMI_VDEV_SUBTYPE_P2P_GO:
8740 return WMI_VDEV_SUBTYPE_10_4_P2P_GO;
8741 case WMI_VDEV_SUBTYPE_PROXY_STA:
8742 return WMI_VDEV_SUBTYPE_10_4_PROXY_STA;
8743 case WMI_VDEV_SUBTYPE_MESH_11S:
8744 return WMI_VDEV_SUBTYPE_10_4_MESH_11S;
8745 case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8746 return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
8747 }
8748 return -ENOTSUPP;
8749}
8750
8751static struct sk_buff *
8752ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
8753 enum wmi_host_platform_type type,
8754 u32 fw_feature_bitmap)
8755{
8756 struct wmi_ext_resource_config_10_4_cmd *cmd;
8757 struct sk_buff *skb;
8758 u32 num_tdls_sleep_sta = 0;
8759
8760 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8761 if (!skb)
8762 return ERR_PTR(-ENOMEM);
8763
8764 if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map))
8765 num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA;
8766
8767 cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
8768 cmd->host_platform_config = __cpu_to_le32(type);
8769 cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
8770 cmd->wlan_gpio_priority = __cpu_to_le32(-1);
8771 cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT);
8772 cmd->coex_gpio_pin1 = __cpu_to_le32(-1);
8773 cmd->coex_gpio_pin2 = __cpu_to_le32(-1);
8774 cmd->coex_gpio_pin3 = __cpu_to_le32(-1);
8775 cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS);
8776 cmd->num_tdls_conn_table_entries = __cpu_to_le32(20);
8777 cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta);
8778 cmd->max_tdls_concurrent_buffer_sta =
8779 __cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA);
8780
8781 ath10k_dbg(ar, ATH10K_DBG_WMI,
8782 "wmi ext resource config host type %d firmware feature bitmap %08x\n",
8783 type, fw_feature_bitmap);
8784 return skb;
8785}
8786
8787static struct sk_buff *
8788ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
8789 enum wmi_tdls_state state)
8790{
8791 struct wmi_10_4_tdls_set_state_cmd *cmd;
8792 struct sk_buff *skb;
8793 u32 options = 0;
8794
8795 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8796 if (!skb)
8797 return ERR_PTR(-ENOMEM);
8798
8799 if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map) &&
8800 state == WMI_TDLS_ENABLE_ACTIVE)
8801 state = WMI_TDLS_ENABLE_PASSIVE;
8802
8803 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
8804 options |= WMI_TDLS_BUFFER_STA_EN;
8805
8806 cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data;
8807 cmd->vdev_id = __cpu_to_le32(vdev_id);
8808 cmd->state = __cpu_to_le32(state);
8809 cmd->notification_interval_ms = __cpu_to_le32(5000);
8810 cmd->tx_discovery_threshold = __cpu_to_le32(100);
8811 cmd->tx_teardown_threshold = __cpu_to_le32(5);
8812 cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
8813 cmd->rssi_delta = __cpu_to_le32(-20);
8814 cmd->tdls_options = __cpu_to_le32(options);
8815 cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
8816 cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
8817 cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
8818 cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
8819 cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
8820 cmd->teardown_notification_ms = __cpu_to_le32(10);
8821 cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96);
8822
8823 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n",
8824 state, vdev_id);
8825 return skb;
8826}
8827
8828static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp)
8829{
8830 u32 peer_qos = 0;
8831
8832 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
8833 peer_qos |= WMI_TDLS_PEER_QOS_AC_VO;
8834 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
8835 peer_qos |= WMI_TDLS_PEER_QOS_AC_VI;
8836 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
8837 peer_qos |= WMI_TDLS_PEER_QOS_AC_BK;
8838 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
8839 peer_qos |= WMI_TDLS_PEER_QOS_AC_BE;
8840
8841 peer_qos |= SM(sp, WMI_TDLS_PEER_SP);
8842
8843 return peer_qos;
8844}
8845
8846static struct sk_buff *
8847ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
8848{
8849 struct wmi_pdev_get_tpc_table_cmd *cmd;
8850 struct sk_buff *skb;
8851
8852 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8853 if (!skb)
8854 return ERR_PTR(-ENOMEM);
8855
8856 cmd = (struct wmi_pdev_get_tpc_table_cmd *)skb->data;
8857 cmd->param = __cpu_to_le32(param);
8858
8859 ath10k_dbg(ar, ATH10K_DBG_WMI,
8860 "wmi pdev get tpc table param:%d\n", param);
8861 return skb;
8862}
8863
8864static struct sk_buff *
8865ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
8866 const struct wmi_tdls_peer_update_cmd_arg *arg,
8867 const struct wmi_tdls_peer_capab_arg *cap,
8868 const struct wmi_channel_arg *chan_arg)
8869{
8870 struct wmi_10_4_tdls_peer_update_cmd *cmd;
8871 struct wmi_tdls_peer_capabilities *peer_cap;
8872 struct wmi_channel *chan;
8873 struct sk_buff *skb;
8874 u32 peer_qos;
8875 int len, chan_len;
8876 int i;
8877
8878 /* tdls peer update cmd has place holder for one channel*/
8879 chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0;
8880
8881 len = sizeof(*cmd) + chan_len * sizeof(*chan);
8882
8883 skb = ath10k_wmi_alloc_skb(ar, len);
8884 if (!skb)
8885 return ERR_PTR(-ENOMEM);
8886
8887 memset(skb->data, 0, sizeof(*cmd));
8888
8889 cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
8890 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
8891 ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
8892 cmd->peer_state = __cpu_to_le32(arg->peer_state);
8893
8894 peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues,
8895 cap->peer_max_sp);
8896
8897 peer_cap = &cmd->peer_capab;
8898 peer_cap->peer_qos = __cpu_to_le32(peer_qos);
8899 peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
8900 peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
8901 peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
8902 peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
8903 peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
8904 peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
8905
8906 for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
8907 peer_cap->peer_operclass[i] = cap->peer_operclass[i];
8908
8909 peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
8910 peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
8911 peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
8912
8913 for (i = 0; i < cap->peer_chan_len; i++) {
8914 chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i];
8915 ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
8916 }
8917
8918 ath10k_dbg(ar, ATH10K_DBG_WMI,
8919 "wmi tdls peer update vdev %i state %d n_chans %u\n",
8920 arg->vdev_id, arg->peer_state, cap->peer_chan_len);
8921 return skb;
8922}
8923
8924static struct sk_buff *
8925ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar,
8926 const struct ath10k_radar_found_info *arg)
8927{
8928 struct wmi_radar_found_info *cmd;
8929 struct sk_buff *skb;
8930
8931 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8932 if (!skb)
8933 return ERR_PTR(-ENOMEM);
8934
8935 cmd = (struct wmi_radar_found_info *)skb->data;
8936 cmd->pri_min = __cpu_to_le32(arg->pri_min);
8937 cmd->pri_max = __cpu_to_le32(arg->pri_max);
8938 cmd->width_min = __cpu_to_le32(arg->width_min);
8939 cmd->width_max = __cpu_to_le32(arg->width_max);
8940 cmd->sidx_min = __cpu_to_le32(arg->sidx_min);
8941 cmd->sidx_max = __cpu_to_le32(arg->sidx_max);
8942
8943 ath10k_dbg(ar, ATH10K_DBG_WMI,
8944 "wmi radar found pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
8945 arg->pri_min, arg->pri_max, arg->width_min,
8946 arg->width_max, arg->sidx_min, arg->sidx_max);
8947 return skb;
8948}
8949
8950static struct sk_buff *
8951ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
8952{
8953 struct wmi_echo_cmd *cmd;
8954 struct sk_buff *skb;
8955
8956 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8957 if (!skb)
8958 return ERR_PTR(-ENOMEM);
8959
8960 cmd = (struct wmi_echo_cmd *)skb->data;
8961 cmd->value = cpu_to_le32(value);
8962
8963 ath10k_dbg(ar, ATH10K_DBG_WMI,
8964 "wmi echo value 0x%08x\n", value);
8965 return skb;
8966}
8967
8968int
8969ath10k_wmi_barrier(struct ath10k *ar)
8970{
8971 int ret;
8972 int time_left;
8973
8974 spin_lock_bh(&ar->data_lock);
8975 reinit_completion(&ar->wmi.barrier);
8976 spin_unlock_bh(&ar->data_lock);
8977
8978 ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
8979 if (ret) {
8980 ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
8981 return ret;
8982 }
8983
8984 time_left = wait_for_completion_timeout(&ar->wmi.barrier,
8985 ATH10K_WMI_BARRIER_TIMEOUT_HZ);
8986 if (!time_left)
8987 return -ETIMEDOUT;
8988
8989 return 0;
8990}
8991
8992static struct sk_buff *
8993ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k *ar,
8994 const struct wmi_bb_timing_cfg_arg *arg)
8995{
8996 struct wmi_pdev_bb_timing_cfg_cmd *cmd;
8997 struct sk_buff *skb;
8998
8999 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9000 if (!skb)
9001 return ERR_PTR(-ENOMEM);
9002
9003 cmd = (struct wmi_pdev_bb_timing_cfg_cmd *)skb->data;
9004 cmd->bb_tx_timing = __cpu_to_le32(arg->bb_tx_timing);
9005 cmd->bb_xpa_timing = __cpu_to_le32(arg->bb_xpa_timing);
9006
9007 ath10k_dbg(ar, ATH10K_DBG_WMI,
9008 "wmi pdev bb_tx_timing 0x%x bb_xpa_timing 0x%x\n",
9009 arg->bb_tx_timing, arg->bb_xpa_timing);
9010 return skb;
9011}
9012
9013static const struct wmi_ops wmi_ops = {
9014 .rx = ath10k_wmi_op_rx,
9015 .map_svc = wmi_main_svc_map,
9016
9017 .pull_scan = ath10k_wmi_op_pull_scan_ev,
9018 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9019 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9020 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9021 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9022 .pull_swba = ath10k_wmi_op_pull_swba_ev,
9023 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9024 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9025 .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
9026 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9027 .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
9028 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9029 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9030
9031 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9032 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9033 .gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
9034 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9035 .gen_init = ath10k_wmi_op_gen_init,
9036 .gen_start_scan = ath10k_wmi_op_gen_start_scan,
9037 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9038 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9039 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9040 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9041 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9042 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9043 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9044 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9045 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9046 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9047 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9048 /* .gen_vdev_wmm_conf not implemented */
9049 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
9050 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9051 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9052 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9053 .gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
9054 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9055 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9056 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9057 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9058 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9059 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9060 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
9061 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9062 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9063 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9064 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9065 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9066 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9067 /* .gen_pdev_get_temperature not implemented */
9068 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9069 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
9070 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9071 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
9072 .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
9073 .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9074 .gen_echo = ath10k_wmi_op_gen_echo,
9075 /* .gen_bcn_tmpl not implemented */
9076 /* .gen_prb_tmpl not implemented */
9077 /* .gen_p2p_go_bcn_ie not implemented */
9078 /* .gen_adaptive_qcs not implemented */
9079 /* .gen_pdev_enable_adaptive_cca not implemented */
9080};
9081
9082static const struct wmi_ops wmi_10_1_ops = {
9083 .rx = ath10k_wmi_10_1_op_rx,
9084 .map_svc = wmi_10x_svc_map,
9085 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9086 .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
9087 .gen_init = ath10k_wmi_10_1_op_gen_init,
9088 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9089 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9090 .gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
9091 /* .gen_pdev_get_temperature not implemented */
9092
9093 /* shared with main branch */
9094 .pull_scan = ath10k_wmi_op_pull_scan_ev,
9095 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9096 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9097 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9098 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9099 .pull_swba = ath10k_wmi_op_pull_swba_ev,
9100 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9101 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9102 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9103 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9104 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9105
9106 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9107 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9108 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9109 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9110 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9111 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9112 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9113 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9114 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9115 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9116 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9117 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9118 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9119 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9120 /* .gen_vdev_wmm_conf not implemented */
9121 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
9122 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9123 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9124 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9125 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9126 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9127 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9128 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9129 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9130 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9131 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
9132 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9133 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9134 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9135 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9136 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9137 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9138 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9139 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
9140 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9141 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
9142 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9143 .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9144 .gen_echo = ath10k_wmi_op_gen_echo,
9145 /* .gen_bcn_tmpl not implemented */
9146 /* .gen_prb_tmpl not implemented */
9147 /* .gen_p2p_go_bcn_ie not implemented */
9148 /* .gen_adaptive_qcs not implemented */
9149 /* .gen_pdev_enable_adaptive_cca not implemented */
9150};
9151
9152static const struct wmi_ops wmi_10_2_ops = {
9153 .rx = ath10k_wmi_10_2_op_rx,
9154 .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
9155 .gen_init = ath10k_wmi_10_2_op_gen_init,
9156 .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
9157 /* .gen_pdev_get_temperature not implemented */
9158
9159 /* shared with 10.1 */
9160 .map_svc = wmi_10x_svc_map,
9161 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9162 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9163 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9164 .gen_echo = ath10k_wmi_op_gen_echo,
9165
9166 .pull_scan = ath10k_wmi_op_pull_scan_ev,
9167 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9168 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9169 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9170 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9171 .pull_swba = ath10k_wmi_op_pull_swba_ev,
9172 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9173 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9174 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9175 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9176 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9177
9178 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9179 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9180 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9181 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9182 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9183 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9184 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9185 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9186 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9187 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9188 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9189 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9190 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9191 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9192 /* .gen_vdev_wmm_conf not implemented */
9193 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
9194 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9195 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9196 .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
9197 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9198 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9199 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9200 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9201 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9202 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9203 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9204 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
9205 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9206 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9207 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9208 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9209 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9210 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9211 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9212 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
9213 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9214 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
9215 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9216 .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9217 /* .gen_pdev_enable_adaptive_cca not implemented */
9218};
9219
9220static const struct wmi_ops wmi_10_2_4_ops = {
9221 .rx = ath10k_wmi_10_2_op_rx,
9222 .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
9223 .gen_init = ath10k_wmi_10_2_op_gen_init,
9224 .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
9225 .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
9226 .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
9227
9228 /* shared with 10.1 */
9229 .map_svc = wmi_10x_svc_map,
9230 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9231 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9232 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9233 .gen_echo = ath10k_wmi_op_gen_echo,
9234
9235 .pull_scan = ath10k_wmi_op_pull_scan_ev,
9236 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9237 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9238 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9239 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9240 .pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev,
9241 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9242 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9243 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9244 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9245 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9246
9247 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9248 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9249 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9250 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9251 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9252 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9253 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9254 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9255 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9256 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9257 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9258 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9259 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9260 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9261 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
9262 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9263 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9264 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9265 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9266 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9267 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9268 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9269 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9270 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9271 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
9272 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9273 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9274 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9275 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9276 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9277 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9278 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9279 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
9280 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9281 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
9282 .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
9283 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9284 .gen_pdev_enable_adaptive_cca =
9285 ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
9286 .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
9287 .gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing,
9288 /* .gen_bcn_tmpl not implemented */
9289 /* .gen_prb_tmpl not implemented */
9290 /* .gen_p2p_go_bcn_ie not implemented */
9291 /* .gen_adaptive_qcs not implemented */
9292};
9293
9294static const struct wmi_ops wmi_10_4_ops = {
9295 .rx = ath10k_wmi_10_4_op_rx,
9296 .map_svc = wmi_10_4_svc_map,
9297
9298 .pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats,
9299 .pull_scan = ath10k_wmi_op_pull_scan_ev,
9300 .pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
9301 .pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
9302 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9303 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9304 .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
9305 .pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr,
9306 .pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
9307 .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
9308 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9309 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9310 .pull_dfs_status_ev = ath10k_wmi_10_4_op_pull_dfs_status_ev,
9311 .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
9312
9313 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9314 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9315 .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
9316 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9317 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9318 .gen_init = ath10k_wmi_10_4_op_gen_init,
9319 .gen_start_scan = ath10k_wmi_op_gen_start_scan,
9320 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9321 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9322 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9323 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9324 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9325 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9326 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9327 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9328 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9329 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9330 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9331 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
9332 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9333 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9334 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9335 .gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc,
9336 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9337 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9338 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9339 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9340 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9341 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9342 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9343 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9344 .gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
9345 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9346 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9347 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9348 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9349 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
9350 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9351 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
9352 .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
9353 .ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
9354 .gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state,
9355 .gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update,
9356 .gen_pdev_get_tpc_table_cmdid =
9357 ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid,
9358 .gen_radar_found = ath10k_wmi_10_4_gen_radar_found,
9359
9360 /* shared with 10.2 */
9361 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9362 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
9363 .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
9364 .get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
9365 .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
9366 .gen_echo = ath10k_wmi_op_gen_echo,
9367 .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
9368};
9369
9370int ath10k_wmi_attach(struct ath10k *ar)
9371{
9372 switch (ar->running_fw->fw_file.wmi_op_version) {
9373 case ATH10K_FW_WMI_OP_VERSION_10_4:
9374 ar->wmi.ops = &wmi_10_4_ops;
9375 ar->wmi.cmd = &wmi_10_4_cmd_map;
9376 ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
9377 ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
9378 ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9379 ar->wmi_key_cipher = wmi_key_cipher_suites;
9380 break;
9381 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
9382 ar->wmi.cmd = &wmi_10_2_4_cmd_map;
9383 ar->wmi.ops = &wmi_10_2_4_ops;
9384 ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
9385 ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
9386 ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9387 ar->wmi_key_cipher = wmi_key_cipher_suites;
9388 break;
9389 case ATH10K_FW_WMI_OP_VERSION_10_2:
9390 ar->wmi.cmd = &wmi_10_2_cmd_map;
9391 ar->wmi.ops = &wmi_10_2_ops;
9392 ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9393 ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9394 ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9395 ar->wmi_key_cipher = wmi_key_cipher_suites;
9396 break;
9397 case ATH10K_FW_WMI_OP_VERSION_10_1:
9398 ar->wmi.cmd = &wmi_10x_cmd_map;
9399 ar->wmi.ops = &wmi_10_1_ops;
9400 ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9401 ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9402 ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
9403 ar->wmi_key_cipher = wmi_key_cipher_suites;
9404 break;
9405 case ATH10K_FW_WMI_OP_VERSION_MAIN:
9406 ar->wmi.cmd = &wmi_cmd_map;
9407 ar->wmi.ops = &wmi_ops;
9408 ar->wmi.vdev_param = &wmi_vdev_param_map;
9409 ar->wmi.pdev_param = &wmi_pdev_param_map;
9410 ar->wmi.peer_flags = &wmi_peer_flags_map;
9411 ar->wmi_key_cipher = wmi_key_cipher_suites;
9412 break;
9413 case ATH10K_FW_WMI_OP_VERSION_TLV:
9414 ath10k_wmi_tlv_attach(ar);
9415 ar->wmi_key_cipher = wmi_tlv_key_cipher_suites;
9416 break;
9417 case ATH10K_FW_WMI_OP_VERSION_UNSET:
9418 case ATH10K_FW_WMI_OP_VERSION_MAX:
9419 ath10k_err(ar, "unsupported WMI op version: %d\n",
9420 ar->running_fw->fw_file.wmi_op_version);
9421 return -EINVAL;
9422 }
9423
9424 init_completion(&ar->wmi.service_ready);
9425 init_completion(&ar->wmi.unified_ready);
9426 init_completion(&ar->wmi.barrier);
9427 init_completion(&ar->wmi.radar_confirm);
9428
9429 INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
9430 INIT_WORK(&ar->radar_confirmation_work,
9431 ath10k_radar_confirmation_work);
9432
9433 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9434 ar->running_fw->fw_file.fw_features)) {
9435 idr_init(&ar->wmi.mgmt_pending_tx);
9436 }
9437
9438 return 0;
9439}
9440
9441void ath10k_wmi_free_host_mem(struct ath10k *ar)
9442{
9443 int i;
9444
9445 /* free the host memory chunks requested by firmware */
9446 for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
9447 dma_free_coherent(ar->dev,
9448 ar->wmi.mem_chunks[i].len,
9449 ar->wmi.mem_chunks[i].vaddr,
9450 ar->wmi.mem_chunks[i].paddr);
9451 }
9452
9453 ar->wmi.num_mem_chunks = 0;
9454}
9455
9456static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
9457 void *ctx)
9458{
9459 struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
9460 struct ath10k *ar = ctx;
9461 struct sk_buff *msdu;
9462
9463 ath10k_dbg(ar, ATH10K_DBG_WMI,
9464 "force cleanup mgmt msdu_id %hu\n", msdu_id);
9465
9466 msdu = pkt_addr->vaddr;
9467 dma_unmap_single(ar->dev, pkt_addr->paddr,
9468 msdu->len, DMA_TO_DEVICE);
9469 ieee80211_free_txskb(ar->hw, msdu);
9470 kfree(pkt_addr);
9471
9472 return 0;
9473}
9474
9475void ath10k_wmi_detach(struct ath10k *ar)
9476{
9477 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9478 ar->running_fw->fw_file.fw_features)) {
9479 spin_lock_bh(&ar->data_lock);
9480 idr_for_each(&ar->wmi.mgmt_pending_tx,
9481 ath10k_wmi_mgmt_tx_clean_up_pending, ar);
9482 idr_destroy(&ar->wmi.mgmt_pending_tx);
9483 spin_unlock_bh(&ar->data_lock);
9484 }
9485
9486 cancel_work_sync(&ar->svc_rdy_work);
9487 dev_kfree_skb(ar->svc_rdy_skb);
9488}