2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
5 * Copyright (C) 1999-2015, Broadcom Corporation
7 * Portions contributed by Nvidia
8 * Copyright (C) 2015 NVIDIA Corporation. All rights reserved.
10 * Unless you and Broadcom execute a separate written software license
11 * agreement governing use of this software, this software is licensed to you
12 * under the terms of the GNU General Public License version 2 (the "GPL"),
13 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
14 * following added to such license:
16 * As a special exception, the copyright holders of this software give you
17 * permission to link this software with independent modules, and to copy and
18 * distribute the resulting executable under terms of your choice, provided that
19 * you also meet, for each linked independent module, the terms and conditions of
20 * the license of that module. An independent module is a module which is not
21 * derived from this software. The special exception does not apply to any
22 * modifications of the software.
24 * Notwithstanding the above, under no circumstances may you combine this
25 * software in any way with any other Broadcom software provided under a license
26 * other than the GPL, without Broadcom's express prior written consent.
28 * $Id: dhd_linux.c 531927 2015-02-04 14:00:07Z $
36 #include <linux/syscalls.h>
37 #include <event_log.h>
38 #endif /* SHOW_LOGTRACE */
41 #include <linux/init.h>
42 #include <linux/kernel.h>
43 #include <linux/slab.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/inetdevice.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/etherdevice.h>
49 #include <linux/random.h>
50 #include <linux/spinlock.h>
51 #include <linux/ethtool.h>
52 #include <linux/fcntl.h>
55 #include <linux/reboot.h>
56 #include <linux/notifier.h>
57 #include <net/addrconf.h>
58 #ifdef ENABLE_ADAPTIVE_SCHED
59 #include <linux/cpufreq.h>
60 #endif /* ENABLE_ADAPTIVE_SCHED */
62 #include <asm/uaccess.h>
63 #include <asm/unaligned.h>
67 #include <bcmendian.h>
70 #include <proto/ethernet.h>
71 #include <proto/bcmevent.h>
72 #include <proto/vlan.h>
74 #include <proto/bcmicmp.h>
76 #include <proto/802.3.h>
78 #include <dngl_stats.h>
79 #include <dhd_linux_wq.h>
81 #include <dhd_linux.h>
82 #ifdef PCIE_FULL_DONGLE
83 #include <dhd_flowring.h>
86 #include <dhd_proto.h>
88 /* Used for the bottom half, so same priority as the other irqthread */
89 #define DHD_DEFAULT_RT_PRIORITY (MAX_USER_RT_PRIO / 2)
90 #ifdef CONFIG_HAS_WAKELOCK
91 #include <linux/wakelock.h>
94 #include <wl_cfg80211.h>
97 #include <wl_cfgp2p.h>
104 #include <linux/compat.h>
108 #include <dhd_wmf_linux.h>
111 #ifdef DHDTCPACK_SUPPRESS
113 #endif /* DHDTCPACK_SUPPRESS */
115 #ifdef CONFIG_BCMDHD_CUSTOM_SYSFS_TEGRA
116 #include "dhd_custom_sysfs_tegra.h"
117 #include "dhd_custom_sysfs_tegra_scan.h"
119 #define RX_CAPTURE(skb)\
121 tegra_sysfs_histogram_tcpdump_rx(skb, __func__, __LINE__);\
124 #define DPC_CAPTURE(void)\
126 tegra_sysfs_dpc_pkt();\
131 #define RX_CAPTURE(skb)
133 #define DPC_CAPTURE(void)
138 #include <linux/time.h>
141 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
142 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
143 #define TSMAX 1000 /* max no. of timing record kept */
146 static uint32 tsidx = 0;
147 static uint32 htsf_seqnum = 0;
149 struct timeval tsync;
150 static uint32 tsport = 5010;
152 typedef struct histo_ {
156 #if !ISPOWEROF2(DHD_SDALIGN)
157 #error DHD_SDALIGN is not a power of 2!
160 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
161 #endif /* WLMEDIA_HTSF */
166 extern bool ap_cfg_running;
167 extern bool ap_fw_loaded;
171 #ifdef ENABLE_ADAPTIVE_SCHED
172 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
173 #ifndef CUSTOM_CPUFREQ_THRESH
174 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
175 #endif /* CUSTOM_CPUFREQ_THRESH */
176 #endif /* ENABLE_ADAPTIVE_SCHED */
178 /* enable HOSTIP cache update from the host side when an eth0:N is up */
179 #define AOE_IP_ALIAS_SUPPORT 1
183 #include <bcm_rpc_tp.h>
186 #include <wlfc_proto.h>
187 #include <dhd_wlfc.h>
190 #include <wl_android.h>
192 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
194 #endif /* CUSTOMER_HW20 && WLANAUDIO */
196 /* Maximum STA per radio */
197 #define DHD_MAX_STA 32
200 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
201 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
202 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
204 #ifdef ARP_OFFLOAD_SUPPORT
205 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
206 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
207 unsigned long event, void *ptr);
208 static struct notifier_block dhd_inetaddr_notifier = {
209 .notifier_call = dhd_inetaddr_notifier_call
211 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
212 * created in kernel notifier link list (with 'next' pointing to itself)
214 static bool dhd_inetaddr_notifier_registered = FALSE;
215 #endif /* ARP_OFFLOAD_SUPPORT */
218 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
219 unsigned long event, void *ptr);
220 static struct notifier_block dhd_inet6addr_notifier = {
221 .notifier_call = dhd_inet6addr_notifier_call
223 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
224 * created in kernel notifier link list (with 'next' pointing to itself)
226 static bool dhd_inet6addr_notifier_registered = FALSE;
229 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
230 #include <linux/suspend.h>
231 volatile bool dhd_mmc_suspend = FALSE;
232 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
233 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
235 #if defined(OOB_INTR_ONLY)
236 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
238 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
239 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
241 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
242 MODULE_LICENSE("GPL and additional rights");
243 #endif /* LinuxVer */
248 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
250 #ifndef PROP_TXSTATUS
251 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
253 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
255 #endif /* BCM_FD_AGGR */
258 extern bool dhd_wlfc_skip_fc(void);
259 extern void dhd_wlfc_plat_init(void *dhd);
260 extern void dhd_wlfc_plat_deinit(void *dhd);
261 #endif /* PROP_TXSTATUS */
263 extern int dhd_slpauto_config(dhd_pub_t *dhd, s32 val);
266 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
272 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
274 /* Linux wireless extension support */
275 #if defined(WL_WIRELESS_EXT)
277 extern wl_iw_extra_params_t g_wl_iw_params;
278 #endif /* defined(WL_WIRELESS_EXT) */
280 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
281 #include <linux/earlysuspend.h>
282 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
284 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
286 #ifdef PKT_FILTER_SUPPORT
287 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
288 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
289 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
294 extern int dhd_read_macaddr(struct dhd_info *dhd);
296 static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
299 extern int dhd_write_macaddr(struct ether_addr *mac);
301 static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
307 #if defined(DHD_DEBUG)
308 static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
309 #endif /* DHD_DEBUG */
311 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
312 static struct notifier_block dhd_reboot_notifier = {
313 .notifier_call = dhd_reboot_callback,
318 typedef struct dhd_if_event {
319 struct list_head list;
320 wl_event_data_if_t event;
321 char name[IFNAMSIZ+1];
322 uint8 mac[ETHER_ADDR_LEN];
325 /* Interface control information */
326 typedef struct dhd_if {
327 struct dhd_info *info; /* back pointer to dhd_info */
328 /* OS/stack specifics */
329 struct net_device *net;
330 int idx; /* iface idx in dongle */
331 uint subunit; /* subunit */
332 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
335 uint8 bssidx; /* bsscfg index for the interface */
336 bool attached; /* Delayed attachment when unset */
337 bool txflowcontrol; /* Per interface flow control indicator */
338 char name[IFNAMSIZ+1]; /* linux interface name */
339 struct net_device_stats stats;
341 dhd_wmf_t wmf; /* per bsscfg wmf setting */
343 #ifdef PCIE_FULL_DONGLE
344 struct list_head sta_list; /* sll of associated stations */
345 #if !defined(BCM_GMAC3)
346 spinlock_t sta_list_lock; /* lock for manipulating sll */
347 #endif /* ! BCM_GMAC3 */
348 #endif /* PCIE_FULL_DONGLE */
349 uint32 ap_isolate; /* ap-isolation settings */
362 uint32 coef; /* scaling factor */
363 uint32 coefdec1; /* first decimal */
364 uint32 coefdec2; /* second decimal */
374 static tstamp_t ts[TSMAX];
375 static tstamp_t maxdelayts;
376 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
378 #endif /* WLMEDIA_HTSF */
380 struct ipv6_work_info_t {
386 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
387 #define MAX_WLANAUDIO_BLACKLIST 4
389 struct wlanaudio_blacklist {
392 ulong txfail_jiffies;
393 struct ether_addr blacklist_addr;
395 #endif /* CUSTOMER_HW20 && WLANAUDIO */
397 #if defined(DHD_DEBUG)
398 typedef struct dhd_dump {
402 #endif /* DHD_DEBUG */
404 /* When Perimeter locks are deployed, any blocking calls must be preceeded
405 * with a PERIM UNLOCK and followed by a PERIM LOCK.
406 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
407 * wait_event_timeout().
410 /* Local private structure (extension of pub) */
411 typedef struct dhd_info {
412 #if defined(WL_WIRELESS_EXT)
413 wl_iw_t iw; /* wireless extensions state (must be first) */
414 #endif /* defined(WL_WIRELESS_EXT) */
416 dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
419 void *adapter; /* adapter information, interrupt, fw path etc. */
420 char fw_path[PATH_MAX]; /* path to firmware image */
421 char nv_path[PATH_MAX]; /* path to nvram vars file */
423 struct semaphore proto_sem;
425 spinlock_t wlfc_spinlock;
427 #endif /* PROP_TXSTATUS */
431 wait_queue_head_t ioctl_resp_wait;
432 wait_queue_head_t d3ack_wait;
433 uint32 default_wd_interval;
435 struct timer_list timer;
437 struct tasklet_struct tasklet;
442 struct semaphore sdsem;
443 tsk_ctl_t thr_dpc_ctl;
444 tsk_ctl_t thr_wdt_ctl;
446 tsk_ctl_t thr_rxf_ctl;
448 bool rxthread_enabled;
451 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
452 struct wake_lock wl_wifi; /* Wifi wakelock */
453 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
454 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
455 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
456 #ifdef BCMPCIE_OOB_HOST_WAKE
457 struct wake_lock wl_intrwake; /* Host wakeup wakelock */
458 #endif /* BCMPCIE_OOB_HOST_WAKE */
459 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
461 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
462 /* net_device interface lock, prevent race conditions among net_dev interface
463 * calls and wifi_on or wifi_off
465 struct mutex dhd_net_if_mutex;
466 struct mutex dhd_suspend_mutex;
468 spinlock_t wakelock_spinlock;
469 uint32 wakelock_counter;
470 int wakelock_wd_counter;
471 int wakelock_rx_timeout_enable;
472 int wakelock_ctrl_timeout_enable;
474 uint32 wakelock_before_waive;
476 /* Thread to issue ioctl for multicast */
477 wait_queue_head_t ctrl_wait;
478 atomic_t pend_8021x_cnt;
479 dhd_attach_states_t dhd_state;
481 dhd_event_log_t event_data;
482 #endif /* SHOW_LOGTRACE */
484 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
485 struct early_suspend early_suspend;
486 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
488 #ifdef ARP_OFFLOAD_SUPPORT
490 #endif /* ARP_OFFLOAD_SUPPORT */
494 struct timer_list rpcth_timer;
495 bool rpcth_timer_active;
498 #ifdef DHDTCPACK_SUPPRESS
499 spinlock_t tcpack_lock;
500 #endif /* DHDTCPACK_SUPPRESS */
501 void *dhd_deferred_wq;
502 #ifdef DEBUG_CPU_FREQ
503 struct notifier_block freq_trans;
504 int __percpu *new_freq;
507 struct notifier_block pm_notifier;
508 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
509 struct wlanaudio_blacklist wlanaudio_blist[MAX_WLANAUDIO_BLACKLIST];
510 bool is_wlanaudio_blist;
511 #endif /* CUSTOMER_HW20 && WLANAUDIO */
514 #define DHDIF_FWDER(dhdif) FALSE
516 /* Flag to indicate if we should download firmware on driver load */
517 uint dhd_download_fw_on_driverload = TRUE;
519 /* Definitions to provide path to the firmware and nvram
520 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
522 char firmware_path[MOD_PARAM_PATHLEN];
523 char nvram_path[MOD_PARAM_PATHLEN];
525 /* backup buffer for firmware and nvram path */
526 char fw_bak_path[MOD_PARAM_PATHLEN];
527 char nv_bak_path[MOD_PARAM_PATHLEN];
529 /* information string to keep firmware, chio, cheip version info visiable from log */
530 char info_string[MOD_PARAM_INFOLEN];
531 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
533 int disable_proptx = 0;
534 module_param(op_mode, int, 0644);
535 extern int wl_control_wl_start(struct net_device *dev);
536 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
537 struct semaphore dhd_registration_sem;
538 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
540 /* deferred handlers */
541 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
542 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
543 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
544 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
546 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
550 extern void dhd_netdev_free(struct net_device *ndev);
551 #endif /* WL_CFG80211 */
554 module_param(dhd_msg_level, int, 0);
556 #ifdef ARP_OFFLOAD_SUPPORT
557 /* ARP offload enable */
558 uint dhd_arp_enable = TRUE;
559 module_param(dhd_arp_enable, uint, 0);
561 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
563 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
565 module_param(dhd_arp_mode, uint, 0);
566 #endif /* ARP_OFFLOAD_SUPPORT */
568 /* Disable Prop tx */
569 module_param(disable_proptx, int, 0644);
570 /* load firmware and/or nvram values from the filesystem */
571 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
572 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
574 /* Watchdog interval */
576 /* extend watchdog expiration to 2 seconds when DPC is running */
577 #define WATCHDOG_EXTEND_INTERVAL (2000)
579 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
580 module_param(dhd_watchdog_ms, uint, 0);
582 #if defined(DHD_DEBUG)
583 /* Console poll interval */
584 uint dhd_console_ms = 0;
585 module_param(dhd_console_ms, uint, 0644);
586 #endif /* defined(DHD_DEBUG) */
589 uint dhd_slpauto = TRUE;
590 module_param(dhd_slpauto, uint, 0);
592 #ifdef PKT_FILTER_SUPPORT
593 /* Global Pkt filter enable control */
594 uint dhd_pkt_filter_enable = TRUE;
595 module_param(dhd_pkt_filter_enable, uint, 0);
598 /* Pkt filter init setup */
599 uint dhd_pkt_filter_init = 0;
600 module_param(dhd_pkt_filter_init, uint, 0);
602 /* Pkt filter mode control */
603 uint dhd_master_mode = TRUE;
604 module_param(dhd_master_mode, uint, 0);
606 int dhd_watchdog_prio = 0;
607 module_param(dhd_watchdog_prio, int, 0);
609 /* DPC thread priority */
610 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
611 module_param(dhd_dpc_prio, int, 0);
613 /* RX frame thread priority */
614 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
615 module_param(dhd_rxf_prio, int, 0);
617 int passive_channel_skip = 0;
618 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
620 #if !defined(BCMDHDUSB)
621 extern int dhd_dongle_ramsize;
622 module_param(dhd_dongle_ramsize, int, 0);
623 #endif /* BCMDHDUSB */
625 /* Keep track of number of instances */
626 static int dhd_found = 0;
627 static int instance_base = 0; /* Starting instance number */
628 module_param(instance_base, int, 0644);
630 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
631 dhd_info_t *dhd_global = NULL;
632 #endif /* CUSTOMER_HW20 && WLANAUDIO */
636 /* DHD Perimiter lock only used in router with bypass forwarding. */
637 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
638 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
639 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
640 #define DHD_PERIM_LOCK_ALL() do { /* noop */ } while (0)
641 #define DHD_PERIM_UNLOCK_ALL() do { /* noop */ } while (0)
643 #ifdef PCIE_FULL_DONGLE
644 #if defined(BCM_GMAC3)
645 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
646 #define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
647 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
648 #else /* ! BCM_GMAC3 */
649 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
650 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
651 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
652 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
653 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
654 #endif /* ! BCM_GMAC3 */
655 #endif /* PCIE_FULL_DONGLE */
657 /* Control fw roaming */
658 uint dhd_roam_disable = 0;
660 /* Control radio state */
661 uint dhd_radio_up = 1;
663 /* Network inteface name */
664 char iface_name[IFNAMSIZ] = {'\0'};
665 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
667 /* The following are specific to the SDIO dongle */
669 /* IOCTL response timeout */
670 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
672 /* Idle timeout for backplane clock */
673 int dhd_idletime = DHD_IDLETIME_TICKS;
674 module_param(dhd_idletime, int, 0);
677 uint dhd_poll = FALSE;
678 module_param(dhd_poll, uint, 0);
681 uint dhd_intr = TRUE;
682 module_param(dhd_intr, uint, 0);
684 /* SDIO Drive Strength (in milliamps) */
685 uint dhd_sdiod_drive_strength = 6;
686 module_param(dhd_sdiod_drive_strength, uint, 0);
690 extern uint dhd_txbound;
691 extern uint dhd_rxbound;
692 module_param(dhd_txbound, uint, 0);
693 module_param(dhd_rxbound, uint, 0);
695 /* Deferred transmits */
696 extern uint dhd_deferred_tx;
697 module_param(dhd_deferred_tx, uint, 0);
700 extern void dhd_dbg_init(dhd_pub_t *dhdp);
701 extern void dhd_dbg_remove(void);
702 #endif /* BCMDBGFS */
708 /* Echo packet generator (pkts/s) */
710 module_param(dhd_pktgen, uint, 0);
712 /* Echo packet len (0 => sawtooth, max 2040) */
713 uint dhd_pktgen_len = 0;
714 module_param(dhd_pktgen_len, uint, 0);
718 extern char dhd_version[];
720 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
721 static void dhd_net_if_lock_local(dhd_info_t *dhd);
722 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
723 static void dhd_suspend_lock(dhd_pub_t *dhdp);
724 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
727 void htsf_update(dhd_info_t *dhd, void *data);
728 tsf_t prev_tsf, cur_tsf;
730 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
731 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
732 static void dhd_dump_latency(void);
733 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
734 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
735 static void dhd_dump_htsfhisto(histo_t *his, char *s);
736 #endif /* WLMEDIA_HTSF */
738 /* Monitor interface */
739 int dhd_monitor_init(void *dhd_pub);
740 int dhd_monitor_uninit(void);
743 #if defined(WL_WIRELESS_EXT)
744 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
745 #endif /* defined(WL_WIRELESS_EXT) */
747 static void dhd_dpc(ulong data);
749 extern int dhd_wait_pend8021x(struct net_device *dev);
750 void dhd_os_wd_timer_extend(void *bus, bool extend);
754 #error TOE requires BDC
756 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
757 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
760 static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
761 wl_event_msg_t *event_ptr, void **data_ptr);
762 #ifdef DHD_UNICAST_DHCP
763 static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
764 static int dhd_get_pkt_ip_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
765 int *len_ptr, uint8 *prot_ptr);
766 static int dhd_get_pkt_ether_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
767 int *len_ptr, uint16 *et_ptr, bool *snap_ptr);
769 static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx);
770 #endif /* DHD_UNICAST_DHCP */
772 static int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx);
774 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd);
775 #if defined(CONFIG_PM_SLEEP)
776 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
778 int ret = NOTIFY_DONE;
779 bool suspend = FALSE;
780 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
782 BCM_REFERENCE(dhdinfo);
784 case PM_HIBERNATION_PREPARE:
785 case PM_SUSPEND_PREPARE:
788 case PM_POST_HIBERNATION:
789 case PM_POST_SUSPEND:
794 /* FIXME: dhd_wlfc_suspend acquires wd wakelock and calling
795 in this function is breaking LP0. So moving this function
796 call to dhd_set_suspend. Need to enable it after fixing
797 wd wakelock issue. */
799 #if defined(SUPPORT_P2P_GO_PS)
802 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
803 dhd_wlfc_suspend(&dhdinfo->pub);
804 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
806 dhd_wlfc_resume(&dhdinfo->pub);
808 #endif /* defined(SUPPORT_P2P_GO_PS) */
811 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
812 KERNEL_VERSION(2, 6, 39))
813 dhd_mmc_suspend = suspend;
820 static struct notifier_block dhd_pm_notifier = {
821 .notifier_call = dhd_pm_callback,
824 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
825 * created in kernel notifier link list (with 'next' pointing to itself)
827 static bool dhd_pm_notifier_registered = FALSE;
829 extern int register_pm_notifier(struct notifier_block *nb);
830 extern int unregister_pm_notifier(struct notifier_block *nb);
831 #endif /* CONFIG_PM_SLEEP */
833 /* Request scheduling of the bus rx frame */
834 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
835 static void dhd_os_rxflock(dhd_pub_t *pub);
836 static void dhd_os_rxfunlock(dhd_pub_t *pub);
838 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
839 typedef struct dhd_dev_priv {
840 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
841 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
842 int ifidx; /* interface index */
845 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
846 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
847 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
848 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
849 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
851 /** Clear the dhd net_device's private structure. */
853 dhd_dev_priv_clear(struct net_device * dev)
855 dhd_dev_priv_t * dev_priv;
856 ASSERT(dev != (struct net_device *)NULL);
857 dev_priv = DHD_DEV_PRIV(dev);
858 dev_priv->dhd = (dhd_info_t *)NULL;
859 dev_priv->ifp = (dhd_if_t *)NULL;
860 dev_priv->ifidx = DHD_BAD_IF;
863 /** Setup the dhd net_device's private structure. */
865 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
868 dhd_dev_priv_t * dev_priv;
869 ASSERT(dev != (struct net_device *)NULL);
870 dev_priv = DHD_DEV_PRIV(dev);
873 dev_priv->ifidx = ifidx;
876 #ifdef PCIE_FULL_DONGLE
878 /** Dummy objects are defined with state representing bad|down.
879 * Performance gains from reducing branch conditionals, instruction parallelism,
880 * dual issue, reducing load shadows, avail of larger pipelines.
881 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
882 * is accessed via the dhd_sta_t.
885 /* Dummy dhd_info object */
886 dhd_info_t dhd_info_null = {
887 #if defined(BCM_GMAC3)
891 .info = &dhd_info_null,
892 #ifdef DHDTCPACK_SUPPRESS
893 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
894 #endif /* DHDTCPACK_SUPPRESS */
896 .busstate = DHD_BUS_DOWN
899 #define DHD_INFO_NULL (&dhd_info_null)
900 #define DHD_PUB_NULL (&dhd_info_null.pub)
902 /* Dummy netdevice object */
903 struct net_device dhd_net_dev_null = {
904 .reg_state = NETREG_UNREGISTERED
906 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
908 /* Dummy dhd_if object */
909 dhd_if_t dhd_if_null = {
910 #if defined(BCM_GMAC3)
914 .wmf = { .wmf_enable = TRUE },
916 .info = DHD_INFO_NULL,
917 .net = DHD_NET_DEV_NULL,
920 #define DHD_IF_NULL (&dhd_if_null)
922 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
924 /** Interface STA list management. */
926 /** Fetch the dhd_if object, given the interface index in the dhd. */
927 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
929 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
930 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
931 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
933 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
934 static void dhd_if_del_sta_list(dhd_if_t * ifp);
935 static void dhd_if_flush_sta(dhd_if_t * ifp);
937 /* Construct/Destruct a sta pool. */
938 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
939 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
940 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
943 /* Return interface pointer */
944 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
946 ASSERT(ifidx < DHD_MAX_IFS);
948 if (ifidx >= DHD_MAX_IFS)
951 return dhdp->info->iflist[ifidx];
954 /** Reset a dhd_sta object and free into the dhd pool. */
956 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
960 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
962 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
963 id16_map_free(dhdp->staid_allocator, sta->idx);
964 for (prio = 0; prio < (int)NUMPRIO; prio++)
965 sta->flowid[prio] = FLOWID_INVALID;
966 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
967 sta->ifidx = DHD_BAD_IF;
968 bzero(sta->ea.octet, ETHER_ADDR_LEN);
969 INIT_LIST_HEAD(&sta->list);
970 sta->idx = ID16_INVALID; /* implying free */
973 /** Allocate a dhd_sta object from the dhd pool. */
975 dhd_sta_alloc(dhd_pub_t * dhdp)
979 dhd_sta_pool_t * sta_pool;
981 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
983 idx = id16_map_alloc(dhdp->staid_allocator);
984 if (idx == ID16_INVALID) {
985 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
989 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
990 sta = &sta_pool[idx];
992 ASSERT((sta->idx == ID16_INVALID) &&
993 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
994 sta->idx = idx; /* implying allocated */
999 /** Delete all STAs in an interface's STA list. */
1001 dhd_if_del_sta_list(dhd_if_t *ifp)
1003 dhd_sta_t *sta, *next;
1004 unsigned long flags;
1006 DHD_IF_STA_LIST_LOCK(ifp, flags);
1008 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1009 #if defined(BCM_GMAC3)
1011 /* Remove sta from WOFA forwarder. */
1012 fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
1014 #endif /* BCM_GMAC3 */
1015 list_del(&sta->list);
1016 dhd_sta_free(&ifp->info->pub, sta);
1019 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1024 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1026 dhd_if_flush_sta(dhd_if_t * ifp)
1028 #if defined(BCM_GMAC3)
1030 if (ifp && (ifp->fwdh != FWDER_NULL)) {
1031 dhd_sta_t *sta, *next;
1032 unsigned long flags;
1034 DHD_IF_STA_LIST_LOCK(ifp, flags);
1036 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1037 /* Remove any sta entry from WOFA forwarder. */
1038 fwder_flush(ifp->fwdh, (wofa_t)sta);
1041 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1043 #endif /* BCM_GMAC3 */
1046 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1048 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1050 int idx, sta_pool_memsz;
1052 dhd_sta_pool_t * sta_pool;
1053 void * staid_allocator;
1055 ASSERT(dhdp != (dhd_pub_t *)NULL);
1056 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1058 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1059 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1060 if (staid_allocator == NULL) {
1061 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1065 /* Pre allocate a pool of dhd_sta objects (one extra). */
1066 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1067 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1068 if (sta_pool == NULL) {
1069 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1070 id16_map_fini(dhdp->osh, staid_allocator);
1074 dhdp->sta_pool = sta_pool;
1075 dhdp->staid_allocator = staid_allocator;
1077 /* Initialize all sta(s) for the pre-allocated free pool. */
1078 bzero((uchar *)sta_pool, sta_pool_memsz);
1079 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1080 sta = &sta_pool[idx];
1081 sta->idx = id16_map_alloc(staid_allocator);
1082 ASSERT(sta->idx <= max_sta);
1084 /* Now place them into the pre-allocated free pool. */
1085 for (idx = 1; idx <= max_sta; idx++) {
1086 sta = &sta_pool[idx];
1087 dhd_sta_free(dhdp, sta);
1093 /** Destruct the pool of dhd_sta_t objects.
1094 * Caller must ensure that no STA objects are currently associated with an if.
1097 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1099 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1103 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1104 for (idx = 1; idx <= max_sta; idx++) {
1105 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1106 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1108 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1109 dhdp->sta_pool = NULL;
1112 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1113 dhdp->staid_allocator = NULL;
1116 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1118 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1120 int idx, sta_pool_memsz;
1122 dhd_sta_pool_t * sta_pool;
1123 void *staid_allocator;
1126 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1130 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1131 staid_allocator = dhdp->staid_allocator;
1134 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1138 if (!staid_allocator) {
1139 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1143 /* clear free pool */
1144 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1145 bzero((uchar *)sta_pool, sta_pool_memsz);
1147 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1148 id16_map_clear(staid_allocator, max_sta, 1);
1150 /* Initialize all sta(s) for the pre-allocated free pool. */
1151 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1152 sta = &sta_pool[idx];
1153 sta->idx = id16_map_alloc(staid_allocator);
1154 ASSERT(sta->idx <= max_sta);
1156 /* Now place them into the pre-allocated free pool. */
1157 for (idx = 1; idx <= max_sta; idx++) {
1158 sta = &sta_pool[idx];
1159 dhd_sta_free(dhdp, sta);
1163 /** Find STA with MAC address ea in an interface's STA list. */
1165 dhd_find_sta(void *pub, int ifidx, void *ea)
1167 dhd_sta_t *sta, *next;
1169 unsigned long flags;
1172 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1174 return DHD_STA_NULL;
1176 DHD_IF_STA_LIST_LOCK(ifp, flags);
1178 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1179 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1180 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1185 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1187 return DHD_STA_NULL;
1190 /** Add STA into the interface's STA list. */
1192 dhd_add_sta(void *pub, int ifidx, void *ea)
1196 unsigned long flags;
1199 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1201 return DHD_STA_NULL;
1203 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1204 if (sta == DHD_STA_NULL) {
1205 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1206 return DHD_STA_NULL;
1209 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1211 /* link the sta and the dhd interface */
1214 INIT_LIST_HEAD(&sta->list);
1216 DHD_IF_STA_LIST_LOCK(ifp, flags);
1218 list_add_tail(&sta->list, &ifp->sta_list);
1220 #if defined(BCM_GMAC3)
1222 ASSERT(ISALIGNED(ea, 2));
1223 /* Add sta to WOFA forwarder. */
1224 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1226 #endif /* BCM_GMAC3 */
1228 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1233 /** Delete STA from the interface's STA list. */
1235 dhd_del_sta(void *pub, int ifidx, void *ea)
1237 dhd_sta_t *sta, *next;
1239 unsigned long flags;
1242 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1246 DHD_IF_STA_LIST_LOCK(ifp, flags);
1248 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1249 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1250 #if defined(BCM_GMAC3)
1251 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
1252 ASSERT(ISALIGNED(ea, 2));
1253 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1255 #endif /* BCM_GMAC3 */
1256 list_del(&sta->list);
1257 dhd_sta_free(&ifp->info->pub, sta);
1261 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1266 /** Add STA if it doesn't exist. Not reentrant. */
1268 dhd_findadd_sta(void *pub, int ifidx, void *ea)
1272 sta = dhd_find_sta(pub, ifidx, ea);
1276 sta = dhd_add_sta(pub, ifidx, ea);
1282 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
1283 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
1284 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
1285 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
1286 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
1287 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
1288 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
1289 #endif /* PCIE_FULL_DONGLE */
1292 /* Returns dhd iflist index correspondig the the bssidx provided by apps */
1293 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
1296 dhd_info_t *dhd = dhdp->info;
1299 ASSERT(bssidx < DHD_MAX_IFS);
1302 for (i = 0; i < DHD_MAX_IFS; i++) {
1303 ifp = dhd->iflist[i];
1304 if (ifp && (ifp->bssidx == bssidx)) {
1305 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
1306 ifp->name, bssidx, i));
1313 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
1319 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
1323 dhd_os_rxflock(dhdp);
1324 store_idx = dhdp->store_idx;
1325 sent_idx = dhdp->sent_idx;
1326 if (dhdp->skbbuf[store_idx] != NULL) {
1327 /* Make sure the previous packets are processed */
1328 dhd_os_rxfunlock(dhdp);
1329 #ifdef RXF_DEQUEUE_ON_BUSY
1330 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1331 skb, store_idx, sent_idx));
1333 #else /* RXF_DEQUEUE_ON_BUSY */
1334 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1335 skb, store_idx, sent_idx));
1336 /* removed msleep here, should use wait_event_timeout if we
1337 * want to give rx frame thread a chance to run
1339 #if defined(WAIT_DEQUEUE)
1343 #endif /* RXF_DEQUEUE_ON_BUSY */
1345 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
1346 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
1347 dhdp->skbbuf[store_idx] = skb;
1348 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
1349 dhd_os_rxfunlock(dhdp);
1354 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
1360 dhd_os_rxflock(dhdp);
1362 store_idx = dhdp->store_idx;
1363 sent_idx = dhdp->sent_idx;
1364 skb = dhdp->skbbuf[sent_idx];
1367 dhd_os_rxfunlock(dhdp);
1368 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
1369 store_idx, sent_idx));
1373 dhdp->skbbuf[sent_idx] = NULL;
1374 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
1376 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
1379 dhd_os_rxfunlock(dhdp);
1384 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
1386 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1388 if (prepost) { /* pre process */
1389 dhd_read_macaddr(dhd);
1390 } else { /* post process */
1391 dhd_write_macaddr(&dhd->pub.mac);
1397 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
1399 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
1401 bool _apply = FALSE;
1402 /* In case of IBSS mode, apply arp pkt filter */
1403 if (op_mode & DHD_FLAG_IBSS_MODE) {
1407 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
1408 if ((dhd->arp_version == 1) &&
1409 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
1417 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
1419 #ifdef PKT_FILTER_SUPPORT
1421 dhd_set_packet_filter_mode(struct net_device *dev, char *command)
1423 dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
1424 dhd_pub_t *dhdp = &dhdi->pub;
1426 dhdi->pub.pkt_filter_mode = bcm_strtoul(command, &command, 0);
1427 dhd_enable_packet_filter(1, dhdp);
1431 dhd_set_packet_filter_ports(struct net_device *dev, char *command)
1433 int i = 0, error = BCME_OK, count = 0, get_count = 0, action = 0;
1434 uint16 portnum = 0, *ports = NULL, get_ports[WL_PKT_FILTER_PORTS_MAX];
1435 dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
1436 dhd_pub_t *dhdp = &dhdi->pub;
1437 char iovbuf[WLC_IOCTL_SMLEN];
1440 action = bcm_strtoul(command, &command, 0);
1441 if (action > PKT_FILTER_PORTS_MAX)
1444 if (action == PKT_FILTER_PORTS_LOOPBACK) {
1445 /* echo the loopback value if port filter is supported else error */
1446 bcm_mkiovar("cap", NULL, 0, iovbuf, sizeof(iovbuf));
1447 error = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
1449 DHD_ERROR(("%s: Get Capability failed (error=%d)\n", __FUNCTION__, error));
1453 if (strstr(iovbuf, "pktfltr2"))
1454 return bcm_strtoul(command, &command, 0);
1456 DHD_ERROR(("%s: pktfltr2 is not supported\n", __FUNCTION__));
1457 return BCME_UNSUPPORTED;
1461 if (action == PKT_FILTER_PORTS_CLEAR) {
1462 /* action 0 is clear all ports */
1463 dhdp->pkt_filter_ports_count = 0;
1464 bzero(dhdp->pkt_filter_ports, sizeof(dhdp->pkt_filter_ports));
1467 portnum = bcm_strtoul(command, &command, 0);
1469 /* no ports to add or remove */
1473 /* get configured ports */
1474 count = dhdp->pkt_filter_ports_count;
1475 ports = dhdp->pkt_filter_ports;
1477 if (action == PKT_FILTER_PORTS_ADD) {
1478 /* action 1 is add ports */
1480 /* copy new ports */
1481 while ((portnum != 0) && (count < WL_PKT_FILTER_PORTS_MAX)) {
1482 for (i = 0; i < count; i++) {
1483 /* duplicate port */
1484 if (portnum == ports[i])
1487 if (portnum != ports[i])
1488 ports[count++] = portnum;
1489 portnum = bcm_strtoul(command, &command, 0);
1491 } else if ((action == PKT_FILTER_PORTS_DEL) && (count > 0)) {
1492 /* action 2 is remove ports */
1493 bcopy(dhdp->pkt_filter_ports, get_ports, count * sizeof(uint16));
1496 while (portnum != 0) {
1498 for (i = 0; i < get_count; i++) {
1499 if (portnum != get_ports[i])
1500 ports[count++] = get_ports[i];
1503 bcopy(ports, get_ports, count * sizeof(uint16));
1504 portnum = bcm_strtoul(command, &command, 0);
1507 dhdp->pkt_filter_ports_count = count;
1513 dhd_enable_packet_filter_ports(dhd_pub_t *dhd, bool enable)
1516 wl_pkt_filter_ports_t *portlist = NULL;
1517 const uint pkt_filter_ports_buf_len = sizeof("pkt_filter_ports")
1518 + WL_PKT_FILTER_PORTS_FIXED_LEN + (WL_PKT_FILTER_PORTS_MAX * sizeof(uint16));
1519 char pkt_filter_ports_buf[pkt_filter_ports_buf_len];
1520 char iovbuf[pkt_filter_ports_buf_len];
1522 DHD_ERROR(("%s: enable %d, in_suspend %d, mode %d, port count %d\n", __FUNCTION__,
1523 enable, dhd->in_suspend, dhd->pkt_filter_mode,
1524 dhd->pkt_filter_ports_count));
1526 bzero(pkt_filter_ports_buf, sizeof(pkt_filter_ports_buf));
1527 portlist = (wl_pkt_filter_ports_t*)pkt_filter_ports_buf;
1528 portlist->version = WL_PKT_FILTER_PORTS_VERSION;
1529 portlist->reserved = 0;
1532 if (!(dhd->pkt_filter_mode & PKT_FILTER_MODE_PORTS_ONLY)) {
1533 /* disable port filter */
1534 portlist->count = 0;
1535 dhd_master_mode &= ~PKT_FILTER_MODE_PORTS_ONLY;
1536 dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
1538 /* enable port filter */
1539 dhd_master_mode |= PKT_FILTER_MODE_PORTS_ONLY;
1540 if (dhd->pkt_filter_mode
1541 & PKT_FILTER_MODE_FORWARD_ON_MATCH)
1542 /* whitelist mode: FORWARD_ON_MATCH */
1544 PKT_FILTER_MODE_FORWARD_ON_MATCH;
1546 /* blacklist mode: DISCARD_ON_MATCH */
1548 ~PKT_FILTER_MODE_FORWARD_ON_MATCH;
1549 portlist->count = dhd->pkt_filter_ports_count;
1550 bcopy(dhd->pkt_filter_ports, portlist->ports,
1551 dhd->pkt_filter_ports_count * sizeof(uint16));
1554 /* disable port filter */
1555 portlist->count = 0;
1556 dhd_master_mode &= ~PKT_FILTER_MODE_PORTS_ONLY;
1557 dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
1560 DHD_INFO(("%s: update: mode %d, port count %d\n", __FUNCTION__, dhd_master_mode,
1564 bcm_mkiovar("pkt_filter_ports",
1566 (WL_PKT_FILTER_PORTS_FIXED_LEN + (portlist->count * sizeof(uint16))),
1567 iovbuf, sizeof(iovbuf));
1568 error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1570 DHD_ERROR(("%s: set pkt_filter_ports failed %d\n", __FUNCTION__, error));
1573 bcm_mkiovar("pkt_filter_mode", (char*)&dhd_master_mode,
1574 sizeof(dhd_master_mode), iovbuf, sizeof(iovbuf));
1575 error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1577 DHD_ERROR(("%s: set pkt_filter_mode failed %d\n", __FUNCTION__, error));
1581 #endif /* PKT_FILTER_SUPPORT */
1583 void dhd_set_packet_filter(dhd_pub_t *dhd)
1585 #ifdef PKT_FILTER_SUPPORT
1588 DHD_TRACE(("%s: enter\n", __FUNCTION__));
1589 if (dhd_pkt_filter_enable) {
1590 for (i = 0; i < dhd->pktfilter_count; i++) {
1591 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
1594 #endif /* PKT_FILTER_SUPPORT */
1597 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
1599 #ifdef PKT_FILTER_SUPPORT
1602 DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
1604 dhd_enable_packet_filter_ports(dhd, value);
1606 /* 1 - Enable packet filter, only allow unicast packet to send up */
1607 /* 0 - Disable packet filter */
1608 if (dhd_pkt_filter_enable && (!value ||
1609 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
1611 for (i = 0; i < dhd->pktfilter_count; i++) {
1612 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
1613 if (value && (i == DHD_ARP_FILTER_NUM) &&
1614 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
1615 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
1616 "val %d, cnt %d, op_mode 0x%x\n",
1617 value, i, dhd->op_mode));
1620 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
1621 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
1622 value, dhd_master_mode);
1625 #endif /* PKT_FILTER_SUPPORT */
1628 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
1630 #ifndef SUPPORT_PM2_ONLY
1631 int power_mode = PM_MAX;
1632 #endif /* SUPPORT_PM2_ONLY */
1633 /* wl_pkt_filter_enable_t enable_parm; */
1635 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
1636 #ifndef ENABLE_FW_ROAM_SUSPEND
1638 #endif /* ENABLE_FW_ROAM_SUSPEND */
1639 uint nd_ra_filter = 0;
1646 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
1647 __FUNCTION__, value, dhd->in_suspend));
1649 dhd_suspend_lock(dhd);
1651 #ifdef CUSTOM_SET_CPUCORE
1652 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
1653 /* set specific cpucore */
1654 dhd_set_cpucore(dhd, TRUE);
1655 #endif /* CUSTOM_SET_CPUCORE */
1657 if (value && dhd->in_suspend) {
1658 #ifdef PKT_FILTER_SUPPORT
1659 dhd->early_suspended = 1;
1661 #ifdef CONFIG_BCMDHD_CUSTOM_SYSFS_TEGRA
1662 tegra_sysfs_suspend();
1664 /* Kernel suspended */
1665 DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
1667 #ifndef SUPPORT_PM2_ONLY
1668 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1669 sizeof(power_mode), TRUE, 0);
1670 #endif /* SUPPORT_PM2_ONLY */
1672 /* Enable packet filter, only allow unicast packet to send up */
1673 dhd_enable_packet_filter(1, dhd);
1676 /* If DTIM skip is set up as default, force it to wake
1677 * each third DTIM for better power savings. Note that
1678 * one side effect is a chance to miss BC/MC packet.
1680 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
1681 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
1682 4, iovbuf, sizeof(iovbuf));
1683 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
1685 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
1687 #ifndef ENABLE_FW_ROAM_SUSPEND
1688 /* Disable firmware roaming during suspend */
1689 bcm_mkiovar("roam_off", (char *)&roamvar, 4,
1690 iovbuf, sizeof(iovbuf));
1691 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1692 #endif /* ENABLE_FW_ROAM_SUSPEND */
1693 if (FW_SUPPORTED(dhd, ndoe)) {
1694 /* enable IPv6 RA filter in firmware during suspend */
1696 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
1697 iovbuf, sizeof(iovbuf));
1698 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
1699 sizeof(iovbuf), TRUE, 0)) < 0)
1700 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1703 #if defined(SUPPORT_P2P_GO_PS)
1704 if (bcmdhd_support_p2p_go_ps) {
1705 #ifdef PROP_TXSTATUS
1706 DHD_OS_WAKE_LOCK_WAIVE(dhd);
1707 dhd_wlfc_suspend(dhd);
1708 DHD_OS_WAKE_LOCK_RESTORE(dhd);
1711 #endif /* defined(SUPPORT_P2P_GO_PS) */
1713 #ifdef PKT_FILTER_SUPPORT
1714 dhd->early_suspended = 0;
1716 #ifdef CONFIG_BCMDHD_CUSTOM_SYSFS_TEGRA
1717 tegra_sysfs_resume();
1719 /* Kernel resumed */
1720 DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
1722 #ifndef SUPPORT_PM2_ONLY
1723 power_mode = PM_FAST;
1724 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1725 sizeof(power_mode), TRUE, 0);
1726 #endif /* SUPPORT_PM2_ONLY */
1727 #ifdef PKT_FILTER_SUPPORT
1728 /* disable pkt filter */
1729 dhd_enable_packet_filter(0, dhd);
1730 #endif /* PKT_FILTER_SUPPORT */
1732 /* restore pre-suspend setting for dtim_skip */
1733 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
1734 4, iovbuf, sizeof(iovbuf));
1736 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1737 #ifndef ENABLE_FW_ROAM_SUSPEND
1738 roamvar = dhd_roam_disable;
1739 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf,
1741 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1742 #endif /* ENABLE_FW_ROAM_SUSPEND */
1743 if (FW_SUPPORTED(dhd, ndoe)) {
1744 /* disable IPv6 RA filter in firmware during suspend */
1746 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
1747 iovbuf, sizeof(iovbuf));
1748 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
1749 sizeof(iovbuf), TRUE, 0)) < 0)
1750 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1753 #if defined(SUPPORT_P2P_GO_PS)
1754 if (bcmdhd_support_p2p_go_ps) {
1755 #ifdef PROP_TXSTATUS
1756 dhd_wlfc_resume(dhd);
1759 #endif /* defined(SUPPORT_P2P_GO_PS) */
1763 dhd_suspend_unlock(dhd);
1768 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
1770 dhd_pub_t *dhdp = &dhd->pub;
1773 DHD_OS_WAKE_LOCK(dhdp);
1774 DHD_PERIM_LOCK(dhdp);
1776 /* Set flag when early suspend was called */
1777 dhdp->in_suspend = val;
1778 if ((force || !dhdp->suspend_disable_flag) &&
1779 dhd_support_sta_mode(dhdp))
1781 ret = dhd_set_suspend(val, dhdp);
1784 DHD_PERIM_UNLOCK(dhdp);
1785 DHD_OS_WAKE_UNLOCK(dhdp);
1789 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
1790 static void dhd_early_suspend(struct early_suspend *h)
1792 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
1793 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
1796 dhd_suspend_resume_helper(dhd, 1, 0);
1799 static void dhd_late_resume(struct early_suspend *h)
1801 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
1802 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
1805 dhd_suspend_resume_helper(dhd, 0, 0);
1807 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
1810 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
1811 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
1813 * dhd_timeout_start(&tmo, usec);
1814 * while (!dhd_timeout_expired(&tmo))
1815 * if (poll_something())
1817 * if (dhd_timeout_expired(&tmo))
1822 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
1827 tmo->tick = jiffies_to_usecs(1);
1831 dhd_timeout_expired(dhd_timeout_t *tmo)
1833 /* Does nothing the first call */
1834 if (tmo->increment == 0) {
1839 if (tmo->elapsed >= tmo->limit)
1842 /* Add the delay that's about to take place */
1843 tmo->elapsed += tmo->increment;
1845 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
1846 OSL_DELAY(tmo->increment);
1847 tmo->increment *= 2;
1848 if (tmo->increment > tmo->tick)
1849 tmo->increment = tmo->tick;
1851 wait_queue_head_t delay_wait;
1852 DECLARE_WAITQUEUE(wait, current);
1853 init_waitqueue_head(&delay_wait);
1854 add_wait_queue(&delay_wait, &wait);
1855 set_current_state(TASK_INTERRUPTIBLE);
1856 (void)schedule_timeout(1);
1857 remove_wait_queue(&delay_wait, &wait);
1858 set_current_state(TASK_RUNNING);
1865 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
1870 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
1873 while (i < DHD_MAX_IFS) {
1874 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
1882 struct net_device * dhd_idx2net(void *pub, int ifidx)
1884 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
1885 struct dhd_info *dhd_info;
1887 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
1889 dhd_info = dhd_pub->info;
1890 if (dhd_info && dhd_info->iflist[ifidx])
1891 return dhd_info->iflist[ifidx]->net;
1896 dhd_ifname2idx(dhd_info_t *dhd, char *name)
1898 int i = DHD_MAX_IFS;
1902 if (name == NULL || *name == '\0')
1906 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
1909 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
1911 return i; /* default - the primary interface */
1915 dhd_ifidx2hostidx(dhd_info_t *dhd, int ifidx)
1917 int i = DHD_MAX_IFS;
1922 if (dhd->iflist[i] && (dhd->iflist[i]->idx == ifidx))
1925 DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__, i, ifidx));
1927 return i; /* default - the primary interface */
1931 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
1933 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1937 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
1938 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
1942 if (dhd->iflist[ifidx] == NULL) {
1943 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
1947 if (dhd->iflist[ifidx]->net)
1948 return dhd->iflist[ifidx]->net->name;
1954 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
1957 dhd_info_t *dhd = (dhd_info_t *)dhdp;
1960 for (i = 0; i < DHD_MAX_IFS; i++)
1961 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
1962 return dhd->iflist[i]->mac_addr;
1969 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
1971 struct net_device *dev;
1972 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1973 struct netdev_hw_addr *ha;
1975 struct dev_mc_list *mclist;
1977 uint32 allmulti, cnt;
1984 ASSERT(dhd && dhd->iflist[ifidx]);
1985 if (dhd == NULL || dhd->iflist[ifidx] == NULL)
1987 dev = dhd->iflist[ifidx]->net;
1990 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1991 netif_addr_lock_bh(dev);
1993 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1994 cnt = netdev_mc_count(dev);
1996 cnt = dev->mc_count;
1997 #endif /* LINUX_VERSION_CODE */
1999 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
2000 netif_addr_unlock_bh(dev);
2003 /* Determine initial value of allmulti flag */
2004 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
2006 /* Send down the multicast list first. */
2009 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
2010 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
2011 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
2012 dhd_ifname(&dhd->pub, ifidx), cnt));
2016 strncpy(bufp, "mcast_list", buflen - 1);
2017 bufp[buflen - 1] = '\0';
2018 bufp += strlen("mcast_list") + 1;
2021 memcpy(bufp, &cnt, sizeof(cnt));
2022 bufp += sizeof(cnt);
2025 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
2026 netif_addr_lock_bh(dev);
2028 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
2029 netdev_for_each_mc_addr(ha, dev) {
2032 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
2033 bufp += ETHER_ADDR_LEN;
2037 for (mclist = dev->mc_list; (mclist && (cnt > 0));
2038 cnt--, mclist = mclist->next) {
2039 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
2040 bufp += ETHER_ADDR_LEN;
2042 #endif /* LINUX_VERSION_CODE */
2044 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
2045 netif_addr_unlock_bh(dev);
2048 memset(&ioc, 0, sizeof(ioc));
2049 ioc.cmd = WLC_SET_VAR;
2054 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2056 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
2057 dhd_ifname(&dhd->pub, ifidx), cnt));
2058 allmulti = cnt ? TRUE : allmulti;
2061 MFREE(dhd->pub.osh, buf, buflen);
2063 /* Now send the allmulti setting. This is based on the setting in the
2064 * net_device flags, but might be modified above to be turned on if we
2065 * were trying to set some addresses and dongle rejected it...
2068 buflen = sizeof("allmulti") + sizeof(allmulti);
2069 if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
2070 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
2073 allmulti = htol32(allmulti);
2075 if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
2076 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
2077 dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
2078 MFREE(dhd->pub.osh, buf, buflen);
2083 memset(&ioc, 0, sizeof(ioc));
2084 ioc.cmd = WLC_SET_VAR;
2089 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2091 DHD_ERROR(("%s: set allmulti %d failed\n",
2092 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2095 MFREE(dhd->pub.osh, buf, buflen);
2097 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
2099 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2101 allmulti = htol32(allmulti);
2103 memset(&ioc, 0, sizeof(ioc));
2104 ioc.cmd = WLC_SET_PROMISC;
2105 ioc.buf = &allmulti;
2106 ioc.len = sizeof(allmulti);
2109 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2111 DHD_ERROR(("%s: set promisc %d failed\n",
2112 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2117 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
2123 if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
2124 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
2127 memset(&ioc, 0, sizeof(ioc));
2128 ioc.cmd = WLC_SET_VAR;
2133 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2135 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
2137 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
2139 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
2146 extern struct net_device *ap_net_dev;
2147 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
2151 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
2153 dhd_info_t *dhd = handle;
2154 dhd_if_event_t *if_event = event_info;
2155 struct net_device *ndev;
2158 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2159 struct wireless_dev *vwdev, *primary_wdev;
2160 struct net_device *primary_ndev;
2161 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2163 if (event != DHD_WQ_WORK_IF_ADD) {
2164 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2169 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2174 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2178 dhd_net_if_lock_local(dhd);
2179 DHD_OS_WAKE_LOCK(&dhd->pub);
2180 DHD_PERIM_LOCK(&dhd->pub);
2182 ifidx = if_event->event.ifidx;
2183 bssidx = if_event->event.bssidx;
2184 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
2186 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
2187 if_event->mac, bssidx, TRUE);
2189 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
2193 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2194 vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
2195 if (unlikely(!vwdev)) {
2196 WL_ERR(("Could not allocate wireless device\n"));
2199 primary_ndev = dhd->pub.info->iflist[0]->net;
2200 primary_wdev = ndev_to_wdev(primary_ndev);
2201 vwdev->wiphy = primary_wdev->wiphy;
2202 vwdev->iftype = if_event->event.role;
2203 vwdev->netdev = ndev;
2204 ndev->ieee80211_ptr = vwdev;
2205 SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
2206 DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
2207 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2209 DHD_PERIM_UNLOCK(&dhd->pub);
2210 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
2211 DHD_PERIM_LOCK(&dhd->pub);
2212 if (ret != BCME_OK) {
2213 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
2214 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2217 #ifdef PCIE_FULL_DONGLE
2218 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
2219 if (FW_SUPPORTED((&dhd->pub), ap) && !(DHD_IF_ROLE_STA(if_event->event.role))) {
2220 char iovbuf[WLC_IOCTL_SMLEN];
2223 memset(iovbuf, 0, sizeof(iovbuf));
2224 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
2225 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
2227 if (ret != BCME_OK) {
2228 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
2229 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2232 #endif /* PCIE_FULL_DONGLE */
2234 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2236 DHD_PERIM_UNLOCK(&dhd->pub);
2237 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2238 dhd_net_if_unlock_local(dhd);
2242 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
2244 dhd_info_t *dhd = handle;
2246 dhd_if_event_t *if_event = event_info;
2249 if (event != DHD_WQ_WORK_IF_DEL) {
2250 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2255 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2260 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2264 dhd_net_if_lock_local(dhd);
2265 DHD_OS_WAKE_LOCK(&dhd->pub);
2266 DHD_PERIM_LOCK(&dhd->pub);
2268 ifidx = if_event->event.ifidx;
2269 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
2271 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2273 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2275 DHD_PERIM_UNLOCK(&dhd->pub);
2276 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2277 dhd_net_if_unlock_local(dhd);
2281 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
2283 dhd_info_t *dhd = handle;
2284 dhd_if_t *ifp = event_info;
2286 if (event != DHD_WQ_WORK_SET_MAC) {
2287 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2291 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2295 dhd_net_if_lock_local(dhd);
2296 DHD_OS_WAKE_LOCK(&dhd->pub);
2297 DHD_PERIM_LOCK(&dhd->pub);
2301 unsigned long flags;
2303 DHD_GENERAL_LOCK(&dhd->pub, flags);
2304 in_ap = (ap_net_dev != NULL);
2305 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
2308 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
2315 if (ifp == NULL || !dhd->pub.up) {
2316 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
2320 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
2321 ifp->set_macaddress = FALSE;
2322 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
2323 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
2325 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
2328 DHD_PERIM_UNLOCK(&dhd->pub);
2329 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2330 dhd_net_if_unlock_local(dhd);
2334 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
2336 dhd_info_t *dhd = handle;
2337 dhd_if_t *ifp = event_info;
2340 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
2341 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2346 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2350 dhd_net_if_lock_local(dhd);
2351 DHD_OS_WAKE_LOCK(&dhd->pub);
2352 DHD_PERIM_LOCK(&dhd->pub);
2357 unsigned long flags;
2358 DHD_GENERAL_LOCK(&dhd->pub, flags);
2359 in_ap = (ap_net_dev != NULL);
2360 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
2363 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
2365 ifp->set_multicast = FALSE;
2371 if (ifp == NULL || !dhd->pub.up) {
2372 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
2379 _dhd_set_multicast_list(dhd, ifidx);
2380 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
2383 DHD_PERIM_UNLOCK(&dhd->pub);
2384 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2385 dhd_net_if_unlock_local(dhd);
2389 dhd_set_mac_address(struct net_device *dev, void *addr)
2393 dhd_info_t *dhd = DHD_DEV_INFO(dev);
2394 struct sockaddr *sa = (struct sockaddr *)addr;
2398 ifidx = dhd_net2idx(dhd, dev);
2399 if (ifidx == DHD_BAD_IF)
2402 dhdif = dhd->iflist[ifidx];
2404 dhd_net_if_lock_local(dhd);
2405 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
2406 dhdif->set_macaddress = TRUE;
2407 dhd_net_if_unlock_local(dhd);
2408 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
2409 dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
2414 dhd_set_multicast_list(struct net_device *dev)
2416 dhd_info_t *dhd = DHD_DEV_INFO(dev);
2419 ifidx = dhd_net2idx(dhd, dev);
2420 if (ifidx == DHD_BAD_IF)
2423 dhd->iflist[ifidx]->set_multicast = TRUE;
2424 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
2425 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
2428 #ifdef PROP_TXSTATUS
2430 dhd_os_wlfc_block(dhd_pub_t *pub)
2432 dhd_info_t *di = (dhd_info_t *)(pub->info);
2434 spin_lock_bh(&di->wlfc_spinlock);
2439 dhd_os_wlfc_unblock(dhd_pub_t *pub)
2441 dhd_info_t *di = (dhd_info_t *)(pub->info);
2444 spin_unlock_bh(&di->wlfc_spinlock);
2448 #endif /* PROP_TXSTATUS */
2450 #if defined(DHD_8021X_DUMP)
2452 dhd_tx_dump(osl_t *osh, void *pkt)
2457 dump_data = PKTDATA(osh, pkt);
2458 protocol = (dump_data[12] << 8) | dump_data[13];
2460 if (protocol == ETHER_TYPE_802_1X) {
2461 DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
2462 dump_data[14], dump_data[15], dump_data[30]));
2465 #endif /* DHD_8021X_DUMP */
2468 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
2471 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
2472 struct ether_header *eh = NULL;
2475 DHD_INFO(("skb->prio = %d\n", PKTPRIO(pktbuf)));
2477 /* Reject if down */
2478 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
2479 /* free the packet here since the caller won't */
2480 PKTFREE(dhdp->osh, pktbuf, TRUE);
2484 #ifdef PCIE_FULL_DONGLE
2485 if (dhdp->busstate == DHD_BUS_SUSPEND) {
2486 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
2487 PKTFREE(dhdp->osh, pktbuf, TRUE);
2490 #endif /* PCIE_FULL_DONGLE */
2492 #ifdef DHD_UNICAST_DHCP
2493 /* if dhcp_unicast is enabled, we need to convert the */
2494 /* broadcast DHCP ACK/REPLY packets to Unicast. */
2495 if (dhdp->dhcp_unicast) {
2496 dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp, pktbuf, ifidx);
2498 #endif /* DHD_UNICAST_DHCP */
2499 /* Update multicast statistic */
2500 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
2501 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
2502 eh = (struct ether_header *)pktdata;
2504 if (ETHER_ISMULTI(eh->ether_dhost))
2505 dhdp->tx_multicast++;
2506 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
2507 atomic_inc(&dhd->pend_8021x_cnt);
2508 #ifdef DHD_DHCP_DUMP
2509 if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
2513 uint16 udp_port_pos;
2514 uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN];
2515 uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
2517 udp_port_pos = ETHER_HDR_LEN + ip_header_len;
2518 source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1];
2519 dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3];
2520 if (source_port == 0x0044 || dest_port == 0x0044) {
2521 dump_hex = (pktdata[udp_port_pos+249] << 8) |
2522 pktdata[udp_port_pos+250];
2523 if (dump_hex == 0x0101) {
2524 DHD_ERROR(("DHCP - DISCOVER [TX]\n"));
2525 } else if (dump_hex == 0x0102) {
2526 DHD_ERROR(("DHCP - OFFER [TX]\n"));
2527 } else if (dump_hex == 0x0103) {
2528 DHD_ERROR(("DHCP - REQUEST [TX]\n"));
2529 } else if (dump_hex == 0x0105) {
2530 DHD_ERROR(("DHCP - ACK [TX]\n"));
2532 DHD_ERROR(("DHCP - 0x%X [TX]\n", dump_hex));
2534 } else if (source_port == 0x0043 || dest_port == 0x0043) {
2535 DHD_ERROR(("DHCP - BOOTP [RX]\n"));
2538 #endif /* DHD_DHCP_DUMP */
2540 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
2544 /* Look into the packet and update the packet priority */
2545 temp_prio = PKTPRIO(pktbuf);
2546 if (temp_prio & 0x100)
2547 PKTSETPRIO(pktbuf, temp_prio & 0xFF);
2548 #ifndef PKTPRIO_OVERRIDE
2549 if (PKTPRIO(pktbuf) == 0)
2552 pktsetprio_qms(pktbuf, wl_get_up_table(), FALSE);
2554 pktsetprio(pktbuf, FALSE);
2555 #endif /* QOS_MAP_SET */
2558 #if defined(PCIE_FULL_DONGLE) && !defined(PCIE_TX_DEFERRAL)
2560 * Lkup the per interface hash table, for a matching flowring. If one is not
2561 * available, allocate a unique flowid and add a flowring entry.
2562 * The found or newly created flowid is placed into the pktbuf's tag.
2564 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
2565 if (ret != BCME_OK) {
2566 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
2571 #ifdef PROP_TXSTATUS
2572 if (dhd_wlfc_is_supported(dhdp)) {
2573 /* store the interface ID */
2574 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
2576 /* store destination MAC in the tag as well */
2577 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
2579 /* decide which FIFO this packet belongs to */
2580 if (ETHER_ISMULTI(eh->ether_dhost))
2581 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
2582 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
2584 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
2586 #endif /* PROP_TXSTATUS */
2587 /* If the protocol uses a data header, apply it */
2588 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
2590 /* Use bus module to send data frame */
2592 dhd_htsf_addtxts(dhdp, pktbuf);
2594 #if defined(DHD_8021X_DUMP)
2595 dhd_tx_dump(dhdp->osh, pktbuf);
2597 #ifdef PROP_TXSTATUS
2599 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
2600 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
2601 /* non-proptxstatus way */
2603 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
2605 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
2606 #endif /* BCMPCIE */
2611 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
2613 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
2614 #endif /* BCMPCIE */
2615 #endif /* PROP_TXSTATUS */
2621 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
2626 dhd_info_t *dhd = DHD_DEV_INFO(net);
2627 dhd_if_t *ifp = NULL;
2630 uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
2632 uint8 htsfdlystat_sz = 0;
2635 struct ether_header *eh;
2637 #endif /* DHD_WMF */
2639 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2641 DHD_OS_WAKE_LOCK(&dhd->pub);
2642 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2644 /* Reject if down */
2645 if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) {
2646 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
2647 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
2648 netif_stop_queue(net);
2649 /* Send Event when bus down detected during data session */
2651 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
2652 net_os_send_hang_message(net);
2654 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2655 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2656 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2659 return NETDEV_TX_BUSY;
2663 ifp = DHD_DEV_IFP(net);
2664 ifidx = DHD_DEV_IFIDX(net);
2666 ASSERT(ifidx == dhd_net2idx(dhd, net));
2667 ASSERT((ifp != NULL) && (ifp == dhd->iflist[ifidx]));
2669 if (ifidx == DHD_BAD_IF) {
2670 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
2671 netif_stop_queue(net);
2672 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2673 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2674 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2677 return NETDEV_TX_BUSY;
2681 /* if wifi scan is blocked waiting for tx packet, unblock it */
2682 #ifdef CONFIG_BCMDHD_CUSTOM_SYSFS_TEGRA
2683 TEGRA_SCAN_TX_PKT_CHECK(skb, ifidx)
2686 /* re-align socket buffer if "skb->data" is odd address */
2687 if (((unsigned long)(skb->data)) & 0x1) {
2688 unsigned char *data = skb->data;
2689 uint32 length = skb->len;
2690 PKTPUSH(dhd->pub.osh, skb, 1);
2691 memmove(skb->data, data, length);
2692 PKTSETLEN(dhd->pub.osh, skb, length);
2695 datalen = PKTLEN(dhd->pub.osh, skb);
2697 /* Make sure there's enough room for any header */
2699 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
2700 struct sk_buff *skb2;
2702 DHD_INFO(("%s: insufficient headroom\n",
2703 dhd_ifname(&dhd->pub, ifidx)));
2704 dhd->pub.tx_realloc++;
2706 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
2709 if ((skb = skb2) == NULL) {
2710 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
2711 dhd_ifname(&dhd->pub, ifidx)));
2717 #ifdef CONFIG_BCMDHD_CUSTOM_SYSFS_TEGRA
2718 tegra_sysfs_histogram_tcpdump_tx(skb, __func__, __LINE__);
2721 /* Convert to packet */
2722 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
2723 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
2724 dhd_ifname(&dhd->pub, ifidx)));
2725 dev_kfree_skb_any(skb);
2730 if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
2731 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
2732 struct ether_header *eh = (struct ether_header *)pktdata;
2734 if (!ETHER_ISMULTI(eh->ether_dhost) &&
2735 (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
2736 eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
2741 eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
2742 iph = (uint8 *)eh + ETHER_HDR_LEN;
2744 /* WMF processing for multicast packets
2745 * Only IPv4 packets are handled
2747 if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
2748 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
2749 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
2750 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
2752 bool ucast_convert = FALSE;
2753 #ifdef DHD_UCAST_UPNP
2756 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
2757 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
2758 #endif /* DHD_UCAST_UPNP */
2759 #ifdef DHD_IGMP_UCQUERY
2760 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
2761 (IPV4_PROT(iph) == IP_PROT_IGMP) &&
2762 (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
2763 #endif /* DHD_IGMP_UCQUERY */
2764 if (ucast_convert) {
2766 unsigned long flags;
2768 DHD_IF_STA_LIST_LOCK(ifp, flags);
2770 /* Convert upnp/igmp query to unicast for each assoc STA */
2771 list_for_each_entry(sta, &ifp->sta_list, list) {
2772 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
2773 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2774 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2775 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2778 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
2781 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2782 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2783 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2785 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
2786 return NETDEV_TX_OK;
2788 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
2790 /* There will be no STA info if the packet is coming from LAN host
2793 ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
2797 /* Either taken by WMF or we should drop it.
2800 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2801 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2802 return NETDEV_TX_OK;
2804 /* Continue the transmit path */
2809 #endif /* DHD_WMF */
2811 #ifdef DHDTCPACK_SUPPRESS
2812 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
2813 /* If this packet has been hold or got freed, just return */
2814 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
2819 /* If this packet has replaced another packet and got freed, just return */
2820 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
2825 #endif /* DHDTCPACK_SUPPRESS */
2827 ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
2831 ifp->stats.tx_dropped++;
2832 dhd->pub.tx_dropped++;
2836 #ifdef PROP_TXSTATUS
2837 /* tx_packets counter can counted only when wlfc is disabled */
2838 if (!dhd_wlfc_is_supported(&dhd->pub))
2841 dhd->pub.tx_packets++;
2842 ifp->stats.tx_packets++;
2843 ifp->stats.tx_bytes += datalen;
2847 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2848 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2850 /* Return ok: we always eat the packet */
2851 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2854 return NETDEV_TX_OK;
2860 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
2862 struct net_device *net;
2863 dhd_info_t *dhd = dhdp->info;
2866 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2870 if (ifidx == ALL_INTERFACES) {
2871 /* Flow control on all active interfaces */
2872 dhdp->txoff = state;
2873 for (i = 0; i < DHD_MAX_IFS; i++) {
2874 if (dhd->iflist[i]) {
2875 net = dhd->iflist[i]->net;
2877 netif_stop_queue(net);
2879 netif_wake_queue(net);
2884 if (dhd->iflist[ifidx]) {
2885 net = dhd->iflist[ifidx]->net;
2887 netif_stop_queue(net);
2889 netif_wake_queue(net);
2900 static const PKTTYPE_INFO packet_type_info[] =
2902 { ETHER_TYPE_IP, "IP" },
2903 { ETHER_TYPE_ARP, "ARP" },
2904 { ETHER_TYPE_BRCM, "BRCM" },
2905 { ETHER_TYPE_802_1X, "802.1X" },
2906 { ETHER_TYPE_WAI, "WAPI" },
2910 static const char *_get_packet_type_str(uint16 type)
2913 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
2915 for (i = 0; i < n; i++) {
2916 if (packet_type_info[i].type == type)
2917 return packet_type_info[i].str;
2920 return packet_type_info[n].str;
2922 #endif /* DHD_RX_DUMP */
2927 dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
2929 dhd_info_t *dhd = dhdp->info;
2931 return dhd->rxthread_enabled;
2933 #endif /* DHD_WMF */
2936 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
2938 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2939 struct sk_buff *skb;
2942 void *data, *pnext = NULL;
2945 wl_event_msg_t event;
2948 void *skbhead = NULL;
2949 void *skbprev = NULL;
2950 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
2953 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
2955 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2957 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
2958 struct ether_header *eh;
2960 pnext = PKTNEXT(dhdp->osh, pktbuf);
2961 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
2963 ifp = dhd->iflist[ifidx];
2965 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
2967 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2971 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
2973 /* Dropping only data packets before registering net device to avoid kernel panic */
2974 #ifndef PROP_TXSTATUS_VSDB
2975 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
2976 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
2978 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || (bcmdhd_prop_txstatus_vsdb && !dhd->pub.up)) &&
2979 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
2980 #endif /* PROP_TXSTATUS_VSDB */
2981 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
2983 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2988 #ifdef PROP_TXSTATUS
2989 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
2990 /* WLFC may send header only packet when
2991 there is an urgent message but no packet to
2994 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2998 #ifdef DHD_L2_FILTER
2999 /* If block_ping is enabled drop the ping packet */
3000 if (dhdp->block_ping) {
3001 if (dhd_l2_filter_block_ping(dhdp, pktbuf, ifidx) == BCME_OK) {
3002 PKTFREE(dhdp->osh, pktbuf, FALSE);
3008 /* WMF processing for multicast packets */
3009 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
3013 sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
3014 ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
3017 /* The packet is taken by WMF. Continue to next iteration */
3020 /* Packet DROP decision by WMF. Toss it */
3021 DHD_ERROR(("%s: WMF decides to drop packet\n",
3023 PKTCFREE(dhdp->osh, pktbuf, FALSE);
3026 /* Continue the transmit path */
3030 #endif /* DHD_WMF */
3031 #ifdef DHDTCPACK_SUPPRESS
3032 dhd_tcpdata_info_get(dhdp, pktbuf);
3034 skb = PKTTONATIVE(dhdp->osh, pktbuf);
3036 ifp = dhd->iflist[ifidx];
3038 ifp = dhd->iflist[0];
3041 skb->dev = ifp->net;
3043 #ifdef PCIE_FULL_DONGLE
3044 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
3045 (!ifp->ap_isolate)) {
3046 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
3047 if (ETHER_ISUCAST(eh->ether_dhost)) {
3048 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
3049 dhd_sendpkt(dhdp, ifidx, pktbuf);
3053 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
3054 dhd_sendpkt(dhdp, ifidx, npktbuf);
3057 #endif /* PCIE_FULL_DONGLE */
3059 /* Get the protocol, maintain skb around eth_type_trans()
3060 * The main reason for this hack is for the limitation of
3061 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
3062 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
3063 * coping of the packet coming from the network stack to add
3064 * BDC, Hardware header etc, during network interface registration
3065 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
3066 * for BDC, Hardware header etc. and not just the ETH_HLEN
3071 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
3072 dump_data = skb->data;
3073 protocol = (dump_data[12] << 8) | dump_data[13];
3074 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
3075 #ifdef DHD_8021X_DUMP
3076 if (protocol == ETHER_TYPE_802_1X) {
3077 DHD_ERROR(("ETHER_TYPE_802_1X [RX]: "
3078 "ver %d, type %d, replay %d\n",
3079 dump_data[14], dump_data[15],
3082 #endif /* DHD_8021X_DUMP */
3083 #ifdef DHD_DHCP_DUMP
3084 if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
3088 uint16 udp_port_pos;
3089 uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN];
3090 uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
3092 udp_port_pos = ETHER_HDR_LEN + ip_header_len;
3093 source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1];
3094 dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3];
3095 if (source_port == 0x0044 || dest_port == 0x0044) {
3096 dump_hex = (dump_data[udp_port_pos+249] << 8) |
3097 dump_data[udp_port_pos+250];
3098 if (dump_hex == 0x0101) {
3099 DHD_ERROR(("DHCP - DISCOVER [RX]\n"));
3100 } else if (dump_hex == 0x0102) {
3101 DHD_ERROR(("DHCP - OFFER [RX]\n"));
3102 } else if (dump_hex == 0x0103) {
3103 DHD_ERROR(("DHCP - REQUEST [RX]\n"));
3104 } else if (dump_hex == 0x0105) {
3105 DHD_ERROR(("DHCP - ACK [RX]\n"));
3107 DHD_ERROR(("DHCP - 0x%X [RX]\n", dump_hex));
3109 } else if (source_port == 0x0043 || dest_port == 0x0043) {
3110 DHD_ERROR(("DHCP - BOOTP [RX]\n"));
3113 #endif /* DHD_DHCP_DUMP */
3114 #if defined(DHD_RX_DUMP)
3115 DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
3116 if (protocol != ETHER_TYPE_BRCM) {
3117 if (dump_data[0] == 0xFF) {
3118 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
3120 if ((dump_data[12] == 8) &&
3121 (dump_data[13] == 6)) {
3122 DHD_ERROR(("%s: ARP %d\n",
3123 __FUNCTION__, dump_data[0x15]));
3125 } else if (dump_data[0] & 1) {
3126 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
3127 __FUNCTION__, MAC2STRDBG(dump_data)));
3129 #ifdef DHD_RX_FULL_DUMP
3132 for (k = 0; k < skb->len; k++) {
3133 DHD_ERROR(("%02X ", dump_data[k]));
3139 #endif /* DHD_RX_FULL_DUMP */
3141 #endif /* DHD_RX_DUMP */
3143 skb->protocol = eth_type_trans(skb, skb->dev);
3145 if (skb->pkt_type == PACKET_MULTICAST) {
3146 dhd->pub.rx_multicast++;
3147 ifp->stats.multicast++;
3154 dhd_htsf_addrxts(dhdp, pktbuf);
3156 /* Strip header, count, deliver upward */
3158 skb_pull(skb, ETH_HLEN);
3160 /* Process special event packets and then discard them */
3161 memset(&event, 0, sizeof(event));
3162 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
3163 dhd_wl_host_event(dhd, &ifidx,
3164 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
3165 skb_mac_header(skb),
3168 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
3172 wl_event_to_host_order(&event);
3174 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
3176 #if defined(PNO_SUPPORT)
3177 if (event.event_type == WLC_E_PFN_NET_FOUND) {
3178 /* enforce custom wake lock to garantee that Kernel not suspended */
3179 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
3181 #endif /* PNO_SUPPORT */
3183 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
3184 PKTFREE(dhdp->osh, pktbuf, FALSE);
3186 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
3188 tout_rx = DHD_PACKET_TIMEOUT_MS;
3190 #ifdef PROP_TXSTATUS
3191 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
3192 #endif /* PROP_TXSTATUS */
3195 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
3196 ifp = dhd->iflist[ifidx];
3199 ifp->net->last_rx = jiffies;
3201 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
3202 dhdp->dstats.rx_bytes += skb->len;
3203 dhdp->rx_packets++; /* Local count */
3204 ifp->stats.rx_bytes += skb->len;
3205 ifp->stats.rx_packets++;
3208 if (in_interrupt()) {
3211 if (dhd->rxthread_enabled) {
3215 PKTSETNEXT(dhdp->osh, skbprev, skb);
3219 /* If the receive is not processed inside an ISR,
3220 * the softirqd must be woken explicitly to service
3221 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3222 * by netif_rx_ni(), but in earlier kernels, we need
3223 * to do it manually.
3225 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3230 local_irq_save(flags);
3232 local_irq_restore(flags);
3233 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
3238 if (dhd->rxthread_enabled && skbhead)
3239 dhd_sched_rxf(dhdp, skbhead);
3241 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
3242 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
3246 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
3248 /* Linux version has nothing to do */
3253 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
3255 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3256 struct ether_header *eh;
3258 #ifdef PROP_TXSTATUS
3259 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
3262 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
3264 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
3265 type = ntoh16(eh->ether_type);
3267 if (type == ETHER_TYPE_802_1X)
3268 atomic_dec(&dhd->pend_8021x_cnt);
3270 #ifdef PROP_TXSTATUS
3272 if ( (ifp != NULL) && dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
3273 uint datalen = PKTLEN(dhd->pub.osh, txp);
3276 dhd->pub.tx_packets++;
3277 ifp->stats.tx_packets++;
3278 ifp->stats.tx_bytes += datalen;
3280 ifp->stats.tx_dropped++;
3286 static struct net_device_stats *
3287 dhd_get_stats(struct net_device *net)
3289 dhd_info_t *dhd = DHD_DEV_INFO(net);
3293 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3295 ifidx = dhd_net2idx(dhd, net);
3296 if (ifidx == DHD_BAD_IF) {
3297 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
3299 memset(&net->stats, 0, sizeof(net->stats));
3303 ifp = dhd->iflist[ifidx];
3307 /* Use the protocol to get dongle stats */
3308 dhd_prot_dstats(&dhd->pub);
3314 dhd_watchdog_thread(void *data)
3316 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3317 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3318 /* This thread doesn't need any user-level access,
3319 * so get rid of all our resources
3321 if (dhd_watchdog_prio > 0) {
3322 struct sched_param param;
3323 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
3324 dhd_watchdog_prio:(MAX_RT_PRIO-1);
3325 setScheduler(current, SCHED_FIFO, ¶m);
3329 if (down_interruptible (&tsk->sema) == 0) {
3330 unsigned long flags;
3331 unsigned long jiffies_at_start = jiffies;
3332 unsigned long time_lapse;
3334 SMP_RD_BARRIER_DEPENDS();
3335 if (tsk->terminated) {
3339 if (dhd->pub.dongle_reset == FALSE) {
3340 DHD_TIMER(("%s:\n", __FUNCTION__));
3342 /* Call the bus module watchdog */
3343 dhd_bus_watchdog(&dhd->pub);
3346 DHD_GENERAL_LOCK(&dhd->pub, flags);
3347 /* Count the tick for reference */
3349 time_lapse = jiffies - jiffies_at_start;
3351 /* Reschedule the watchdog */
3352 if (dhd->wd_timer_valid)
3353 mod_timer(&dhd->timer,
3355 msecs_to_jiffies(dhd_watchdog_ms) -
3356 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
3357 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3363 complete_and_exit(&tsk->completed, 0);
3366 static void dhd_watchdog(ulong data)
3368 dhd_info_t *dhd = (dhd_info_t *)data;
3369 unsigned long flags;
3371 if (dhd->pub.dongle_reset) {
3375 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
3376 up(&dhd->thr_wdt_ctl.sema);
3380 /* Call the bus module watchdog */
3381 dhd_bus_watchdog(&dhd->pub);
3383 DHD_GENERAL_LOCK(&dhd->pub, flags);
3384 /* Count the tick for reference */
3387 /* Reschedule the watchdog */
3388 if (dhd->wd_timer_valid)
3389 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
3390 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3394 #ifdef ENABLE_ADAPTIVE_SCHED
3396 dhd_sched_policy(int prio)
3398 struct sched_param param;
3399 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
3400 param.sched_priority = 0;
3401 setScheduler(current, SCHED_NORMAL, ¶m);
3403 if (get_scheduler_policy(current) != SCHED_FIFO) {
3404 param.sched_priority = DHD_DEFAULT_RT_PRIORITY;
3405 setScheduler(current, SCHED_FIFO, ¶m);
3409 #endif /* ENABLE_ADAPTIVE_SCHED */
3410 #ifdef DEBUG_CPU_FREQ
3411 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
3413 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
3414 struct cpufreq_freqs *freq = data;
3418 if (val == CPUFREQ_POSTCHANGE) {
3419 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
3420 freq->new, freq->cpu));
3421 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
3427 #endif /* DEBUG_CPU_FREQ */
3429 dhd_dpc_thread(void *data)
3431 unsigned long timeout;
3432 unsigned int loopcnt, count;
3433 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3434 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3436 /* This thread doesn't need any user-level access,
3437 * so get rid of all our resources
3439 if (dhd_dpc_prio > 0)
3441 struct sched_param param;
3442 param.sched_priority = DHD_DEFAULT_RT_PRIORITY;
3443 setScheduler(current, SCHED_FIFO, ¶m);
3446 #ifdef CUSTOM_DPC_CPUCORE
3447 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
3449 #ifdef CUSTOM_SET_CPUCORE
3450 dhd->pub.current_dpc = current;
3451 #endif /* CUSTOM_SET_CPUCORE */
3452 /* Run until signal received */
3454 if (!binary_sema_down(tsk)) {
3455 #ifdef ENABLE_ADAPTIVE_SCHED
3456 dhd_sched_policy(dhd_dpc_prio);
3457 #endif /* ENABLE_ADAPTIVE_SCHED */
3458 SMP_RD_BARRIER_DEPENDS();
3459 if (tsk->terminated) {
3463 /* Call bus dpc unless it indicated down (then clean stop) */
3464 if (dhd->pub.busstate != DHD_BUS_DOWN) {
3465 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
3466 timeout = jiffies + msecs_to_jiffies(100);
3469 /* DPC_CAPTURE(); */
3470 while (dhd_bus_dpc(dhd->pub.bus)) {
3472 if (time_after(jiffies, timeout) &&
3473 (loopcnt % 1000 == 0)) {
3476 msecs_to_jiffies(100);
3478 /* process all data */
3481 DHD_ERROR(("%s is consuming too much time"
3482 " Looped %u times for 1000 iterations in 100ms timeout\n",
3484 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
3485 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3489 dhd_bus_stop(dhd->pub.bus, TRUE);
3490 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3496 complete_and_exit(&tsk->completed, 0);
3500 dhd_rxf_thread(void *data)
3502 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3503 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3504 #if defined(WAIT_DEQUEUE)
3505 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
3506 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
3508 dhd_pub_t *pub = &dhd->pub;
3510 /* This thread doesn't need any user-level access,
3511 * so get rid of all our resources
3513 if (dhd_rxf_prio > 0)
3515 struct sched_param param;
3516 param.sched_priority = DHD_DEFAULT_RT_PRIORITY;
3517 setScheduler(current, SCHED_FIFO, ¶m);
3520 DAEMONIZE("dhd_rxf");
3521 /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
3523 /* signal: thread has started */
3524 complete(&tsk->completed);
3525 #ifdef CUSTOM_SET_CPUCORE
3526 dhd->pub.current_rxf = current;
3527 #endif /* CUSTOM_SET_CPUCORE */
3528 /* Run until signal received */
3530 if (down_interruptible(&tsk->sema) == 0) {
3532 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
3535 #ifdef ENABLE_ADAPTIVE_SCHED
3536 dhd_sched_policy(dhd_rxf_prio);
3537 #endif /* ENABLE_ADAPTIVE_SCHED */
3539 SMP_RD_BARRIER_DEPENDS();
3541 if (tsk->terminated) {
3544 skb = dhd_rxf_dequeue(pub);
3550 void *skbnext = PKTNEXT(pub->osh, skb);
3551 PKTSETNEXT(pub->osh, skb, NULL);
3553 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3557 local_irq_save(flags);
3559 local_irq_restore(flags);
3564 #if defined(WAIT_DEQUEUE)
3565 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
3567 watchdogTime = OSL_SYSUPTIME();
3571 DHD_OS_WAKE_UNLOCK(pub);
3576 complete_and_exit(&tsk->completed, 0);
3580 void dhd_dpc_kill(dhd_pub_t *dhdp)
3592 tasklet_kill(&dhd->tasklet);
3593 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
3595 #endif /* BCMPCIE */
3602 dhd = (dhd_info_t *)data;
3604 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
3605 * down below , wake lock is set,
3606 * the tasklet is initialized in dhd_attach()
3608 /* Call bus dpc unless it indicated down (then clean stop) */
3609 if (dhd->pub.busstate != DHD_BUS_DOWN) {
3610 if (dhd_bus_dpc(dhd->pub.bus))
3611 tasklet_schedule(&dhd->tasklet);
3613 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3615 dhd_bus_stop(dhd->pub.bus, TRUE);
3616 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3621 dhd_sched_dpc(dhd_pub_t *dhdp)
3623 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3625 DHD_OS_WAKE_LOCK(dhdp);
3626 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
3627 /* If the semaphore does not get up,
3628 * wake unlock should be done here
3630 if (!binary_sema_up(&dhd->thr_dpc_ctl))
3631 DHD_OS_WAKE_UNLOCK(dhdp);
3634 tasklet_schedule(&dhd->tasklet);
3639 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
3641 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3642 #ifdef RXF_DEQUEUE_ON_BUSY
3645 #endif /* RXF_DEQUEUE_ON_BUSY */
3647 DHD_OS_WAKE_LOCK(dhdp);
3649 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
3650 #ifdef RXF_DEQUEUE_ON_BUSY
3652 ret = dhd_rxf_enqueue(dhdp, skb);
3653 if (ret == BCME_OK || ret == BCME_ERROR)
3656 OSL_SLEEP(50); /* waiting for dequeueing */
3657 } while (retry-- > 0);
3659 if (retry <= 0 && ret == BCME_BUSY) {
3663 void *skbnext = PKTNEXT(dhdp->osh, skbp);
3664 PKTSETNEXT(dhdp->osh, skbp, NULL);
3668 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
3671 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
3672 up(&dhd->thr_rxf_ctl.sema);
3675 #else /* RXF_DEQUEUE_ON_BUSY */
3677 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
3680 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
3681 up(&dhd->thr_rxf_ctl.sema);
3684 #endif /* RXF_DEQUEUE_ON_BUSY */
3688 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
3690 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
3696 memset(&ioc, 0, sizeof(ioc));
3698 ioc.cmd = WLC_GET_VAR;
3700 ioc.len = (uint)sizeof(buf);
3703 strncpy(buf, "toe_ol", sizeof(buf) - 1);
3704 buf[sizeof(buf) - 1] = '\0';
3705 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3706 /* Check for older dongle image that doesn't support toe_ol */
3708 DHD_ERROR(("%s: toe not supported by device\n",
3709 dhd_ifname(&dhd->pub, ifidx)));
3713 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
3717 memcpy(toe_ol, buf, sizeof(uint32));
3721 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
3723 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
3729 memset(&ioc, 0, sizeof(ioc));
3731 ioc.cmd = WLC_SET_VAR;
3733 ioc.len = (uint)sizeof(buf);
3736 /* Set toe_ol as requested */
3738 strncpy(buf, "toe_ol", sizeof(buf) - 1);
3739 buf[sizeof(buf) - 1] = '\0';
3740 memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
3742 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3743 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
3744 dhd_ifname(&dhd->pub, ifidx), ret));
3748 /* Enable toe globally only if any components are enabled. */
3750 toe = (toe_ol != 0);
3753 memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
3755 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3756 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
3764 #if defined(WL_CFG80211)
3765 void dhd_set_scb_probe(dhd_pub_t *dhd)
3767 #define NUM_SCB_MAX_PROBE 3
3769 wl_scb_probe_t scb_probe;
3770 char iovbuf[WL_EVENTING_MASK_LEN + 12];
3772 memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
3774 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
3777 bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
3779 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
3780 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
3782 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
3784 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
3786 bcm_mkiovar("scb_probe", (char *)&scb_probe,
3787 sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
3788 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
3789 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
3790 #undef NUM_SCB_MAX_PROBE
3793 #endif /* WL_CFG80211 */
3795 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
3797 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
3799 dhd_info_t *dhd = DHD_DEV_INFO(net);
3801 snprintf(info->driver, sizeof(info->driver), "wl");
3802 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
3805 struct ethtool_ops dhd_ethtool_ops = {
3806 .get_drvinfo = dhd_ethtool_get_drvinfo
3808 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
3811 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
3813 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
3815 struct ethtool_drvinfo info;
3816 char drvname[sizeof(info.driver)];
3819 struct ethtool_value edata;
3820 uint32 toe_cmpnt, csum_dir;
3824 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3826 /* all ethtool calls start with a cmd word */
3827 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
3831 case ETHTOOL_GDRVINFO:
3832 /* Copy out any request driver name */
3833 if (copy_from_user(&info, uaddr, sizeof(info)))
3835 strncpy(drvname, info.driver, sizeof(info.driver));
3836 drvname[sizeof(info.driver)-1] = '\0';
3838 /* clear struct for return */
3839 memset(&info, 0, sizeof(info));
3842 /* if dhd requested, identify ourselves */
3843 if (strcmp(drvname, "?dhd") == 0) {
3844 snprintf(info.driver, sizeof(info.driver), "dhd");
3845 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
3846 info.version[sizeof(info.version) - 1] = '\0';
3849 /* otherwise, require dongle to be up */
3850 else if (!dhd->pub.up) {
3851 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
3855 /* finally, report dongle driver type */
3856 else if (dhd->pub.iswl)
3857 snprintf(info.driver, sizeof(info.driver), "wl");
3859 snprintf(info.driver, sizeof(info.driver), "xx");
3861 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
3862 if (copy_to_user(uaddr, &info, sizeof(info)))
3864 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
3865 (int)sizeof(drvname), drvname, info.driver));
3869 /* Get toe offload components from dongle */
3870 case ETHTOOL_GRXCSUM:
3871 case ETHTOOL_GTXCSUM:
3872 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
3875 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
3878 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
3880 if (copy_to_user(uaddr, &edata, sizeof(edata)))
3884 /* Set toe offload components in dongle */
3885 case ETHTOOL_SRXCSUM:
3886 case ETHTOOL_STXCSUM:
3887 if (copy_from_user(&edata, uaddr, sizeof(edata)))
3890 /* Read the current settings, update and write back */
3891 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
3894 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
3896 if (edata.data != 0)
3897 toe_cmpnt |= csum_dir;
3899 toe_cmpnt &= ~csum_dir;
3901 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
3904 /* If setting TX checksum mode, tell Linux the new mode */
3905 if (cmd == ETHTOOL_STXCSUM) {
3907 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
3909 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
3921 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
3923 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
3928 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3935 dhd = (dhd_info_t *)dhdp->info;
3936 #if !defined(BCMPCIE)
3937 if (dhd->thr_dpc_ctl.thr_pid < 0) {
3938 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
3943 #ifdef CONFIG_MACH_UNIVERSAL5433
3944 /* old revision does not send hang message */
3945 if ((check_rev() && (error == -ETIMEDOUT)) || (error == -EREMOTEIO) ||
3947 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
3948 #endif /* CONFIG_MACH_UNIVERSAL5433 */
3949 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
3950 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
3951 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
3952 net_os_send_hang_message(net);
3958 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
3960 int bcmerror = BCME_OK;
3962 struct net_device *net;
3964 net = dhd_idx2net(pub, ifidx);
3966 bcmerror = BCME_BADARG;
3971 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
3973 /* check for local dhd ioctl and handle it */
3974 if (ioc->driver == DHD_IOCTL_MAGIC) {
3975 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
3977 pub->bcmerror = bcmerror;
3981 /* send to dongle (must be up, and wl). */
3982 if (pub->busstate != DHD_BUS_DATA) {
3983 bcmerror = BCME_DONGLE_DOWN;
3988 bcmerror = BCME_DONGLE_DOWN;
3993 * Flush the TX queue if required for proper message serialization:
3994 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
3995 * prevent M4 encryption and
3996 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
3997 * prevent disassoc frame being sent before WPS-DONE frame.
3999 if (ioc->cmd == WLC_SET_KEY ||
4000 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
4001 strncmp("wsec_key", data_buf, 9) == 0) ||
4002 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
4003 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
4004 ioc->cmd == WLC_DISASSOC)
4005 dhd_wait_pend8021x(net);
4009 /* short cut wl ioctl calls here */
4010 if (strcmp("htsf", data_buf) == 0) {
4011 dhd_ioctl_htsf_get(dhd, 0);
4015 if (strcmp("htsflate", data_buf) == 0) {
4017 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
4018 memset(&maxdelayts, 0, sizeof(tstamp_t));
4022 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
4023 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
4024 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
4025 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
4031 if (strcmp("htsfclear", data_buf) == 0) {
4032 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
4033 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
4034 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
4035 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
4039 if (strcmp("htsfhis", data_buf) == 0) {
4040 dhd_dump_htsfhisto(&vi_d1, "H to D");
4041 dhd_dump_htsfhisto(&vi_d2, "D to D");
4042 dhd_dump_htsfhisto(&vi_d3, "D to H");
4043 dhd_dump_htsfhisto(&vi_d4, "H to H");
4046 if (strcmp("tsport", data_buf) == 0) {
4048 memcpy(&tsport, data_buf + 7, 4);
4050 DHD_ERROR(("current timestamp port: %d \n", tsport));
4055 #endif /* WLMEDIA_HTSF */
4057 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
4058 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
4060 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
4062 bcmerror = BCME_UNSUPPORTED;
4066 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
4069 dhd_check_hang(net, pub, bcmerror);
4075 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
4077 dhd_info_t *dhd = DHD_DEV_INFO(net);
4082 void *local_buf = NULL;
4085 DHD_OS_WAKE_LOCK(&dhd->pub);
4086 DHD_PERIM_LOCK(&dhd->pub);
4088 /* Interface up check for built-in type */
4089 if (!dhd_download_fw_on_driverload && dhd->pub.up == 0) {
4090 DHD_TRACE(("%s: Interface is down \n", __FUNCTION__));
4091 DHD_PERIM_UNLOCK(&dhd->pub);
4092 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4096 /* send to dongle only if we are not waiting for reload already */
4097 if (dhd->pub.hang_was_sent) {
4098 DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
4099 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
4100 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4101 return OSL_ERROR(BCME_DONGLE_DOWN);
4104 ifidx = dhd_net2idx(dhd, net);
4105 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
4107 if (ifidx == DHD_BAD_IF) {
4108 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
4109 DHD_PERIM_UNLOCK(&dhd->pub);
4110 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4114 #if defined(WL_WIRELESS_EXT)
4115 /* linux wireless extensions */
4116 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
4117 /* may recurse, do NOT lock */
4118 ret = wl_iw_ioctl(net, ifr, cmd);
4119 DHD_PERIM_UNLOCK(&dhd->pub);
4120 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4123 #endif /* defined(WL_WIRELESS_EXT) */
4125 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
4126 if (cmd == SIOCETHTOOL) {
4127 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
4128 DHD_PERIM_UNLOCK(&dhd->pub);
4129 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4132 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
4134 if (cmd == SIOCDEVPRIVATE+1) {
4135 ret = wl_android_priv_cmd(net, ifr, cmd);
4136 dhd_check_hang(net, &dhd->pub, ret);
4137 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4141 if (cmd != SIOCDEVPRIVATE) {
4142 DHD_PERIM_UNLOCK(&dhd->pub);
4143 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4147 memset(&ioc, 0, sizeof(ioc));
4149 #ifdef CONFIG_COMPAT
4150 if (is_compat_task()) {
4151 compat_wl_ioctl_t compat_ioc;
4152 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
4153 bcmerror = BCME_BADADDR;
4156 ioc.cmd = compat_ioc.cmd;
4157 ioc.buf = compat_ptr(compat_ioc.buf);
4158 ioc.len = compat_ioc.len;
4159 ioc.set = compat_ioc.set;
4160 ioc.used = compat_ioc.used;
4161 ioc.needed = compat_ioc.needed;
4162 /* To differentiate between wl and dhd read 4 more byes */
4163 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
4164 sizeof(uint)) != 0)) {
4165 bcmerror = BCME_BADADDR;
4169 #endif /* CONFIG_COMPAT */
4171 /* Copy the ioc control structure part of ioctl request */
4172 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
4173 bcmerror = BCME_BADADDR;
4177 /* To differentiate between wl and dhd read 4 more byes */
4178 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
4179 sizeof(uint)) != 0)) {
4180 bcmerror = BCME_BADADDR;
4185 if (!capable(CAP_NET_ADMIN)) {
4186 bcmerror = BCME_EPERM;
4191 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
4192 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
4193 bcmerror = BCME_NOMEM;
4197 DHD_PERIM_UNLOCK(&dhd->pub);
4198 if (copy_from_user(local_buf, ioc.buf, buflen)) {
4199 DHD_PERIM_LOCK(&dhd->pub);
4200 bcmerror = BCME_BADADDR;
4203 DHD_PERIM_LOCK(&dhd->pub);
4205 *(char *)(local_buf + buflen) = '\0';
4208 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
4210 if (!bcmerror && buflen && local_buf && ioc.buf) {
4211 DHD_PERIM_UNLOCK(&dhd->pub);
4212 if (copy_to_user(ioc.buf, local_buf, buflen))
4214 DHD_PERIM_LOCK(&dhd->pub);
4219 MFREE(dhd->pub.osh, local_buf, buflen+1);
4221 DHD_PERIM_UNLOCK(&dhd->pub);
4222 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4224 return OSL_ERROR(bcmerror);
4230 dhd_stop(struct net_device *net)
4233 dhd_info_t *dhd = DHD_DEV_INFO(net);
4234 DHD_OS_WAKE_LOCK(&dhd->pub);
4235 DHD_PERIM_LOCK(&dhd->pub);
4236 DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net));
4237 if (dhd->pub.up == 0) {
4241 dhd_if_flush_sta(DHD_DEV_IFP(net));
4244 ifidx = dhd_net2idx(dhd, net);
4245 BCM_REFERENCE(ifidx);
4247 /* Set state and stop OS transmissions */
4248 netif_stop_queue(net);
4253 wl_cfg80211_down(NULL);
4256 * For CFG80211: Clean up all the left over virtual interfaces
4257 * when the primary Interface is brought down. [ifconfig wlan0 down]
4259 if (!dhd_download_fw_on_driverload) {
4260 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
4261 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
4265 dhd_net_if_lock_local(dhd);
4266 for (i = 1; i < DHD_MAX_IFS; i++)
4267 dhd_remove_if(&dhd->pub, i, FALSE);
4268 #ifdef ARP_OFFLOAD_SUPPORT
4269 if (dhd_inetaddr_notifier_registered) {
4270 dhd_inetaddr_notifier_registered = FALSE;
4271 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
4273 #endif /* ARP_OFFLOAD_SUPPORT */
4275 if (dhd_inet6addr_notifier_registered) {
4276 dhd_inet6addr_notifier_registered = FALSE;
4277 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
4279 #endif /* CONFIG_IPV6 */
4280 dhd_net_if_unlock_local(dhd);
4284 #endif /* WL_CFG80211 */
4286 #ifdef PROP_TXSTATUS
4287 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
4289 /* Stop the protocol module */
4290 dhd_prot_stop(&dhd->pub);
4292 OLD_MOD_DEC_USE_COUNT;
4294 #if defined(WL_CFG80211)
4295 if (ifidx == 0 && !dhd_download_fw_on_driverload)
4296 wl_android_wifi_off(net);
4298 dhd->pub.rxcnt_timeout = 0;
4299 dhd->pub.txcnt_timeout = 0;
4301 dhd->pub.hang_was_sent = 0;
4303 /* Clear country spec for for built-in type driver */
4304 if (!dhd_download_fw_on_driverload) {
4305 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
4306 dhd->pub.dhd_cspec.rev = 0;
4307 dhd->pub.dhd_cspec.ccode[0] = 0x00;
4310 DHD_PERIM_UNLOCK(&dhd->pub);
4311 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4315 #if defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
4316 defined(USE_INITIAL_SHORT_DWELL_TIME))
4317 extern bool g_first_broadcast_scan;
4318 #endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
4321 static int dhd_interworking_enable(dhd_pub_t *dhd)
4323 char iovbuf[WLC_IOCTL_SMLEN];
4324 uint32 enable = true;
4327 bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
4328 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4329 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
4332 if (ret == BCME_OK) {
4333 /* basic capabilities for HS20 REL2 */
4334 uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
4335 bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
4336 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
4337 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4338 DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__, ret));
4347 dhd_open(struct net_device *net)
4349 dhd_info_t *dhd = DHD_DEV_INFO(net);
4354 char iovbuf[WLC_IOCTL_SMLEN];
4355 dbus_config_t config;
4356 uint32 agglimit = 0;
4357 uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */
4358 #endif /* BCM_FD_AGGR */
4365 DHD_OS_WAKE_LOCK(&dhd->pub);
4366 DHD_PERIM_LOCK(&dhd->pub);
4367 dhd->pub.dongle_trap_occured = 0;
4368 dhd->pub.hang_was_sent = 0;
4370 #if !defined(WL_CFG80211)
4372 * Force start if ifconfig_up gets called before START command
4373 * We keep WEXT's wl_control_wl_start to provide backward compatibility
4374 * This should be removed in the future
4376 ret = wl_control_wl_start(net);
4378 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
4385 ifidx = dhd_net2idx(dhd, net);
4386 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
4389 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
4394 if (!dhd->iflist[ifidx]) {
4395 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
4401 atomic_set(&dhd->pend_8021x_cnt, 0);
4402 #if defined(WL_CFG80211)
4403 if (!dhd_download_fw_on_driverload) {
4404 DHD_ERROR(("\n%s\n", dhd_version));
4405 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
4406 g_first_broadcast_scan = TRUE;
4407 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
4408 ret = wl_android_wifi_on(net);
4410 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
4411 __FUNCTION__, ret));
4418 if (dhd->pub.busstate != DHD_BUS_DATA) {
4420 /* try to bring up bus */
4421 DHD_PERIM_UNLOCK(&dhd->pub);
4422 ret = dhd_bus_start(&dhd->pub);
4423 DHD_PERIM_LOCK(&dhd->pub);
4425 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
4433 config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT;
4436 memset(iovbuf, 0, sizeof(iovbuf));
4437 bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4,
4438 iovbuf, sizeof(iovbuf));
4440 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) {
4441 agglimit = *(uint32 *)iovbuf;
4442 config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT;
4443 config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK;
4444 config.aggr_param.maxrxsize *= 2; /* temporary double rx size */
4445 DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
4446 agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize));
4447 if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) {
4448 DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
4451 DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
4452 rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC;
4455 /* Set aggregation for TX */
4456 bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK,
4457 rpc_agg & BCM_RPC_TP_HOST_AGG_MASK);
4459 /* Set aggregation for RX */
4460 memset(iovbuf, 0, sizeof(iovbuf));
4461 bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf));
4462 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) {
4463 dhd->pub.info->fdaggr = (rpc_agg != 0);
4465 DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret));
4467 #endif /* BCM_FD_AGGR */
4469 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
4470 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
4473 /* Get current TOE mode from dongle */
4474 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
4475 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
4477 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
4480 #if defined(WL_CFG80211)
4481 if (unlikely(wl_cfg80211_up(NULL))) {
4482 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
4486 if (!dhd_download_fw_on_driverload) {
4487 #ifdef ARP_OFFLOAD_SUPPORT
4488 dhd->pend_ipaddr = 0;
4489 if (!dhd_inetaddr_notifier_registered) {
4490 dhd_inetaddr_notifier_registered = TRUE;
4491 register_inetaddr_notifier(&dhd_inetaddr_notifier);
4493 #endif /* ARP_OFFLOAD_SUPPORT */
4495 if (!dhd_inet6addr_notifier_registered) {
4496 dhd_inet6addr_notifier_registered = TRUE;
4497 register_inet6addr_notifier(&dhd_inet6addr_notifier);
4499 #endif /* CONFIG_IPV6 */
4501 dhd_set_scb_probe(&dhd->pub);
4502 #endif /* WL_CFG80211 */
4505 /* Allow transmit calls */
4506 netif_start_queue(net);
4510 dhd_dbg_init(&dhd->pub);
4513 OLD_MOD_INC_USE_COUNT;
4518 DHD_PERIM_UNLOCK(&dhd->pub);
4519 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4525 int dhd_do_driver_init(struct net_device *net)
4527 dhd_info_t *dhd = NULL;
4530 DHD_ERROR(("Primary Interface not initialized \n"));
4535 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
4536 dhd = DHD_DEV_INFO(net);
4538 /* If driver is already initialized, do nothing
4540 if (dhd->pub.busstate == DHD_BUS_DATA) {
4541 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
4545 if (dhd_open(net) < 0) {
4546 DHD_ERROR(("Driver Init Failed \n"));
4554 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
4558 if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
4562 /* handle IF event caused by wl commands, SoftAP, WEXT and
4563 * anything else. This has to be done asynchronously otherwise
4564 * DPC will be blocked (and iovars will timeout as DPC has no chance
4565 * to read the response back)
4567 if (ifevent->ifidx > 0) {
4568 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
4570 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
4571 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
4572 strncpy(if_event->name, name, IFNAMSIZ);
4573 if_event->name[IFNAMSIZ - 1] = '\0';
4574 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
4575 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
4582 dhd_p2p_ifdel(dhd_pub_t *dhdpub, int ifidx)
4584 dhd_info_t *dhdinfo = dhdpub->info;
4585 dhdinfo->p2p_del_ifp = dhdinfo->iflist[ifidx];
4586 dhdinfo->iflist[ifidx] = NULL;
4590 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
4592 dhd_if_event_t *if_event;
4594 #if defined(WL_CFG80211) && !defined(P2PONEINT)
4595 if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
4597 #endif /* WL_CFG80211 */
4599 /* handle IF event caused by wl commands, SoftAP, WEXT and
4602 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
4603 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
4604 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
4605 strncpy(if_event->name, name, IFNAMSIZ);
4606 if_event->name[IFNAMSIZ - 1] = '\0';
4607 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
4608 dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
4613 /* unregister and free the existing net_device interface (if any) in iflist and
4614 * allocate a new one. the slot is reused. this function does NOT register the
4615 * new interface to linux kernel. dhd_register_if does the job
4618 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
4619 uint8 *mac, uint8 bssidx, bool need_rtnl_lock)
4621 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
4624 ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
4625 ifp = dhdinfo->iflist[ifidx];
4628 if (ifp->net != NULL) {
4629 DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
4631 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
4633 /* in unregister_netdev case, the interface gets freed by net->destructor
4634 * (which is set to free_netdev)
4636 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
4637 free_netdev(ifp->net);
4639 netif_stop_queue(ifp->net);
4641 unregister_netdev(ifp->net);
4643 unregister_netdevice(ifp->net);
4648 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
4650 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
4655 memset(ifp, 0, sizeof(dhd_if_t));
4656 ifp->info = dhdinfo;
4658 ifp->bssidx = bssidx;
4660 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
4662 /* Allocate etherdev, including space for private structure */
4663 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
4664 if (ifp->net == NULL) {
4665 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
4669 /* Setup the dhd interface's netdevice private structure. */
4670 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
4672 if (name && name[0]) {
4673 strncpy(ifp->net->name, name, IFNAMSIZ);
4674 ifp->net->name[IFNAMSIZ - 1] = '\0';
4678 ifp->net->destructor = free_netdev;
4680 ifp->net->destructor = dhd_netdev_free;
4682 ifp->net->destructor = free_netdev;
4683 #endif /* WL_CFG80211 */
4684 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
4685 ifp->name[IFNAMSIZ - 1] = '\0';
4686 dhdinfo->iflist[ifidx] = ifp;
4688 #ifdef PCIE_FULL_DONGLE
4689 /* Initialize STA info list */
4690 INIT_LIST_HEAD(&ifp->sta_list);
4691 DHD_IF_STA_LIST_LOCK_INIT(ifp);
4692 #endif /* PCIE_FULL_DONGLE */
4698 if (ifp->net != NULL) {
4699 dhd_dev_priv_clear(ifp->net);
4700 free_netdev(ifp->net);
4703 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
4706 dhdinfo->iflist[ifidx] = NULL;
4710 /* unregister and free the the net_device interface associated with the indexed
4711 * slot, also free the slot memory and set the slot pointer to NULL
4714 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
4716 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
4719 ifp = dhdinfo->iflist[ifidx];
4721 dhdinfo->iflist[ifidx] = NULL;
4722 if (ifp->net != NULL) {
4723 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
4725 /* in unregister_netdev case, the interface gets freed by net->destructor
4726 * (which is set to free_netdev)
4728 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
4729 free_netdev(ifp->net);
4731 netif_stop_queue(ifp->net);
4737 custom_rps_map_clear(ifp->net->_rx);
4738 #endif /* SET_RPS_CPUS */
4740 unregister_netdev(ifp->net);
4742 unregister_netdevice(ifp->net);
4747 dhd_wmf_cleanup(dhdpub, ifidx);
4748 #endif /* DHD_WMF */
4750 dhd_if_del_sta_list(ifp);
4752 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
4760 dhd_remove_p2p_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
4762 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
4763 dhd_if_t *ifp = dhdinfo->p2p_del_ifp;
4765 ifp = dhdinfo->p2p_del_ifp;
4766 dhdinfo->p2p_del_ifp=NULL;
4769 if (ifp->net != NULL) {
4770 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
4772 /* in unregister_netdev case, the interface gets freed by net->destructor
4773 * (which is set to free_netdev)
4775 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
4776 free_netdev(ifp->net);
4778 netif_stop_queue(ifp->net);
4783 unregister_netdev(ifp->net);
4785 unregister_netdevice(ifp->net);
4790 //Take care of this once WMF is enabled
4791 //dhd_wmf_cleanup(dhdpub, ifidx);
4792 #endif /* DHD_WMF */
4794 dhd_if_del_sta_list(ifp);
4796 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
4804 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
4805 static struct net_device_ops dhd_ops_pri = {
4806 .ndo_open = dhd_open,
4807 .ndo_stop = dhd_stop,
4808 .ndo_get_stats = dhd_get_stats,
4809 .ndo_do_ioctl = dhd_ioctl_entry,
4810 .ndo_start_xmit = dhd_start_xmit,
4811 .ndo_set_mac_address = dhd_set_mac_address,
4812 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4813 .ndo_set_rx_mode = dhd_set_multicast_list,
4815 .ndo_set_multicast_list = dhd_set_multicast_list,
4819 static struct net_device_ops dhd_ops_virt = {
4820 .ndo_get_stats = dhd_get_stats,
4821 .ndo_do_ioctl = dhd_ioctl_entry,
4822 .ndo_start_xmit = dhd_start_xmit,
4823 .ndo_set_mac_address = dhd_set_mac_address,
4824 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4825 .ndo_set_rx_mode = dhd_set_multicast_list,
4827 .ndo_set_multicast_list = dhd_set_multicast_list,
4832 extern int wl_cfgp2p_if_open(struct net_device *net);
4833 extern int wl_cfgp2p_if_stop(struct net_device *net);
4835 static struct net_device_ops dhd_cfgp2p_ops_virt = {
4836 .ndo_open = wl_cfgp2p_if_open,
4837 .ndo_stop = wl_cfgp2p_if_stop,
4838 .ndo_get_stats = dhd_get_stats,
4839 .ndo_do_ioctl = dhd_ioctl_entry,
4840 .ndo_start_xmit = dhd_start_xmit,
4841 .ndo_set_mac_address = dhd_set_mac_address,
4842 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4843 .ndo_set_rx_mode = dhd_set_multicast_list,
4845 .ndo_set_multicast_list = dhd_set_multicast_list,
4848 #endif /* P2PONEINT */
4849 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
4852 extern void debugger_init(void *bus_handle);
4856 #ifdef SHOW_LOGTRACE
4857 static char *logstrs_path = "/root/logstrs.bin";
4858 module_param(logstrs_path, charp, S_IRUGO);
4861 dhd_init_logstrs_array(dhd_event_log_t *temp)
4863 struct file *filep = NULL;
4866 char *raw_fmts = NULL;
4867 int logstrs_size = 0;
4869 logstr_header_t *hdr = NULL;
4870 uint32 *lognums = NULL;
4871 char *logstrs = NULL;
4879 filep = filp_open(logstrs_path, O_RDONLY, 0);
4880 if (IS_ERR(filep)) {
4881 DHD_ERROR(("Failed to open the file logstrs.bin in %s", __FUNCTION__));
4884 error = vfs_stat(logstrs_path, &stat);
4886 DHD_ERROR(("Failed in %s to find file stat", __FUNCTION__));
4889 logstrs_size = (int) stat.size;
4891 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
4892 if (raw_fmts == NULL) {
4893 DHD_ERROR(("Failed to allocate raw_fmts memory"));
4896 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
4897 DHD_ERROR(("Error: Log strings file read failed"));
4901 /* Remember header from the logstrs.bin file */
4902 hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
4903 sizeof(logstr_header_t));
4905 if (hdr->log_magic == LOGSTRS_MAGIC) {
4907 * logstrs.bin start with header.
4909 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
4910 ram_index = (hdr->ram_lognums_offset -
4911 hdr->rom_lognums_offset) / sizeof(uint32);
4912 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
4913 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
4916 * Legacy logstrs.bin format without header.
4918 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
4919 if (num_fmts == 0) {
4920 /* Legacy ROM/RAM logstrs.bin format:
4921 * - ROM 'lognums' section
4922 * - RAM 'lognums' section
4923 * - ROM 'logstrs' section.
4924 * - RAM 'logstrs' section.
4926 * 'lognums' is an array of indexes for the strings in the
4927 * 'logstrs' section. The first uint32 is 0 (index of first
4928 * string in ROM 'logstrs' section).
4930 * The 4324b5 is the only ROM that uses this legacy format. Use the
4931 * fixed number of ROM fmtnums to find the start of the RAM
4932 * 'lognums' section. Use the fixed first ROM string ("Con\n") to
4933 * find the ROM 'logstrs' section.
4935 #define NUM_4324B5_ROM_FMTS 186
4936 #define FIRST_4324B5_ROM_LOGSTR "Con\n"
4937 ram_index = NUM_4324B5_ROM_FMTS;
4938 lognums = (uint32 *) raw_fmts;
4939 num_fmts = ram_index;
4940 logstrs = (char *) &raw_fmts[num_fmts << 2];
4941 while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
4943 logstrs = (char *) &raw_fmts[num_fmts << 2];
4946 /* Legacy RAM-only logstrs.bin format:
4947 * - RAM 'lognums' section
4948 * - RAM 'logstrs' section.
4950 * 'lognums' is an array of indexes for the strings in the
4951 * 'logstrs' section. The first uint32 is an index to the
4952 * start of 'logstrs'. Therefore, if this index is divided
4953 * by 'sizeof(uint32)' it provides the number of logstr
4957 lognums = (uint32 *) raw_fmts;
4958 logstrs = (char *) &raw_fmts[num_fmts << 2];
4961 fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL);
4963 DHD_ERROR(("Failed to allocate fmts memory"));
4967 for (i = 0; i < num_fmts; i++) {
4968 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
4969 * (they are 0-indexed relative to 'rom_logstrs_offset').
4971 * RAM lognums are already indexed to point to the correct RAM logstrs (they
4972 * are 0-indexed relative to the start of the logstrs.bin file).
4974 if (i == ram_index) {
4977 fmts[i] = &logstrs[lognums[i]];
4980 temp->raw_fmts = raw_fmts;
4981 temp->num_fmts = num_fmts;
4982 filp_close(filep, NULL);
4991 filp_close(filep, NULL);
4996 #endif /* SHOW_LOGTRACE */
4998 #ifdef CUSTOMER_HW20
4999 static char init_ccode[8] = {'\0'};
5000 module_param_string(init_ccode, init_ccode, 8, 0);
5002 dhd_parse_ccspec(const char *spec, char *ccode, int *regrev)
5005 char *endptr = NULL;
5009 revstr = strchr(spec, '/');
5012 rev = bcm_strtoul(revstr + 1, &endptr, 10);
5013 if (*endptr != '\0') {
5014 /* not all the value string was parsed by strtol */
5015 DHD_ERROR(("Could not parse \"%s\" as a regulatory revision "
5016 "in the country string \"%s\"\n",
5023 ccode_len = (int)(uintptr)(revstr - spec);
5025 ccode_len = (int)strlen(spec);
5027 if (ccode_len > 3) {
5028 DHD_ERROR(("Could not parse a 2-3 char country code "
5029 "in the country string \"%s\"\n", spec));
5033 memcpy(ccode, spec, ccode_len);
5034 ccode[ccode_len] = '\0';
5041 dhd_init_ccode(struct net_device *ndev)
5044 wl_country_t cspec = {{0}, 0, {0}};
5047 if (init_ccode[0] == '\0')
5050 memset(&cspec, 0, sizeof(cspec));
5053 DHD_ERROR(("Country Code = %s, len = %d \n", init_ccode, (int)strlen(init_ccode)));
5055 if (strncmp(init_ccode, "UY/1", 4) == 0) {
5057 ret = wldev_ioctl(ndev, WLC_SET_BAND, &band, sizeof(band), true);
5058 WL_INFORM(("set BAND A \n"));
5061 ret = dhd_parse_ccspec(init_ccode, cspec.country_abbrev, &cspec.rev);
5063 if (ret != BCME_OK) {
5064 DHD_ERROR(("ERROR occured when ccode is set \n"));
5068 if (cspec.rev != -1)
5069 memcpy(cspec.ccode, cspec.country_abbrev, WLC_CNTRY_BUF_SZ);
5071 ret = wldev_ioctl(ndev, WLC_SET_COUNTRY, &cspec, sizeof(cspec), true);
5072 if (ret != BCME_OK) {
5073 DHD_ERROR(("%s: country code set failed %d\n", __FUNCTION__, ret));
5077 memset(&cspec, 0, sizeof(cspec));
5079 ret = wldev_ioctl(ndev, WLC_GET_COUNTRY, &cspec, sizeof(cspec), false);
5082 DHD_ERROR(("%s: country code get failed %d\n", __FUNCTION__, ret));
5084 DHD_ERROR(("Get Country Code = %s %d %s\n",
5085 cspec.country_abbrev, cspec.rev, cspec.ccode));
5089 #endif /* CUSTOMER_HW20 */
5092 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
5094 dhd_info_t *dhd = NULL;
5095 struct net_device *net = NULL;
5096 char if_name[IFNAMSIZ] = {'\0'};
5097 uint32 bus_type = -1;
5098 uint32 bus_num = -1;
5099 uint32 slot_num = -1;
5100 wifi_adapter_info_t *adapter = NULL;
5102 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
5103 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5105 /* will implement get_ids for DBUS later */
5106 #if defined(BCMSDIO)
5107 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
5109 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
5111 /* Allocate primary dhd_info */
5112 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
5114 dhd = MALLOC(osh, sizeof(dhd_info_t));
5116 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
5120 memset(dhd, 0, sizeof(dhd_info_t));
5121 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
5123 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
5126 dhd->adapter = adapter;
5128 #ifdef GET_CUSTOM_MAC_ENABLE
5129 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
5130 #endif /* GET_CUSTOM_MAC_ENABLE */
5131 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
5132 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
5134 /* Initialize thread based operation and lock */
5135 sema_init(&dhd->sdsem, 1);
5137 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
5138 * This is indeed a hack but we have to make it work properly before we have a better
5141 dhd_update_fw_nv_path(dhd);
5143 /* Link to info module */
5144 dhd->pub.info = dhd;
5147 /* Link to bus module */
5149 dhd->pub.hdrlen = bus_hdrlen;
5151 /* Set network interface name if it was provided as module parameter */
5152 if (iface_name[0]) {
5155 strncpy(if_name, iface_name, IFNAMSIZ);
5156 if_name[IFNAMSIZ - 1] = 0;
5157 len = strlen(if_name);
5158 ch = if_name[len - 1];
5159 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
5160 strcat(if_name, "%d");
5162 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE);
5165 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
5167 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
5170 net->netdev_ops = NULL;
5173 sema_init(&dhd->proto_sem, 1);
5175 #ifdef PROP_TXSTATUS
5176 spin_lock_init(&dhd->wlfc_spinlock);
5178 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
5179 dhd->pub.plat_init = dhd_wlfc_plat_init;
5180 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
5182 #ifdef DHD_WLFC_THREAD
5183 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
5184 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
5185 if (IS_ERR(dhd->pub.wlfc_thread)) {
5186 DHD_ERROR(("create wlfc thread failed\n"));
5189 wake_up_process(dhd->pub.wlfc_thread);
5191 #endif /* DHD_WLFC_THREAD */
5192 #endif /* PROP_TXSTATUS */
5194 /* Initialize other structure content */
5195 init_waitqueue_head(&dhd->ioctl_resp_wait);
5196 init_waitqueue_head(&dhd->d3ack_wait);
5197 init_waitqueue_head(&dhd->ctrl_wait);
5199 /* Initialize the spinlocks */
5200 spin_lock_init(&dhd->sdlock);
5201 spin_lock_init(&dhd->txqlock);
5202 spin_lock_init(&dhd->dhd_lock);
5203 spin_lock_init(&dhd->rxf_lock);
5204 #if defined(RXFRAME_THREAD)
5205 dhd->rxthread_enabled = TRUE;
5206 #endif /* defined(RXFRAME_THREAD) */
5208 #ifdef DHDTCPACK_SUPPRESS
5209 spin_lock_init(&dhd->tcpack_lock);
5210 #endif /* DHDTCPACK_SUPPRESS */
5212 /* Initialize Wakelock stuff */
5213 spin_lock_init(&dhd->wakelock_spinlock);
5214 dhd->wakelock_counter = 0;
5215 dhd->wakelock_wd_counter = 0;
5216 dhd->wakelock_rx_timeout_enable = 0;
5217 dhd->wakelock_ctrl_timeout_enable = 0;
5218 #ifdef CONFIG_HAS_WAKELOCK
5219 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
5220 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
5221 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
5222 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
5223 #ifdef BCMPCIE_OOB_HOST_WAKE
5224 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
5225 #endif /* BCMPCIE_OOB_HOST_WAKE */
5226 #endif /* CONFIG_HAS_WAKELOCK */
5227 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5228 mutex_init(&dhd->dhd_net_if_mutex);
5229 mutex_init(&dhd->dhd_suspend_mutex);
5231 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
5233 /* Attach and link in the protocol */
5234 if (dhd_prot_attach(&dhd->pub) != 0) {
5235 DHD_ERROR(("dhd_prot_attach failed\n"));
5238 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
5241 /* Attach and link in the cfg80211 */
5242 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
5243 DHD_ERROR(("wl_cfg80211_attach failed\n"));
5247 dhd_monitor_init(&dhd->pub);
5248 dhd_state |= DHD_ATTACH_STATE_CFG80211;
5250 #if defined(WL_WIRELESS_EXT)
5251 /* Attach and link in the iw */
5252 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
5253 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
5254 DHD_ERROR(("wl_iw_attach failed\n"));
5257 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
5259 #endif /* defined(WL_WIRELESS_EXT) */
5261 #ifdef SHOW_LOGTRACE
5262 dhd_init_logstrs_array(&dhd->event_data);
5263 #endif /* SHOW_LOGTRACE */
5265 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
5266 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
5271 /* Set up the watchdog timer */
5272 init_timer(&dhd->timer);
5273 dhd->timer.data = (ulong)dhd;
5274 dhd->timer.function = dhd_watchdog;
5275 dhd->default_wd_interval = dhd_watchdog_ms;
5277 if (dhd_watchdog_prio >= 0) {
5278 /* Initialize watchdog thread */
5279 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
5282 dhd->thr_wdt_ctl.thr_pid = -1;
5286 debugger_init((void *) bus);
5289 /* Set up the bottom half handler */
5290 if (dhd_dpc_prio >= 0) {
5291 /* Initialize DPC thread */
5292 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
5294 /* use tasklet for dpc */
5295 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
5296 dhd->thr_dpc_ctl.thr_pid = -1;
5299 if (dhd->rxthread_enabled) {
5300 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
5301 /* Initialize RXF thread */
5302 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
5305 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
5307 #if defined(CONFIG_PM_SLEEP)
5308 if (!dhd_pm_notifier_registered) {
5309 dhd_pm_notifier_registered = TRUE;
5310 register_pm_notifier(&dhd_pm_notifier);
5312 #endif /* CONFIG_PM_SLEEP */
5314 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
5315 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
5316 dhd->early_suspend.suspend = dhd_early_suspend;
5317 dhd->early_suspend.resume = dhd_late_resume;
5318 register_early_suspend(&dhd->early_suspend);
5319 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
5320 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
5322 #ifdef ARP_OFFLOAD_SUPPORT
5323 dhd->pend_ipaddr = 0;
5324 if (!dhd_inetaddr_notifier_registered) {
5325 dhd_inetaddr_notifier_registered = TRUE;
5326 register_inetaddr_notifier(&dhd_inetaddr_notifier);
5328 #endif /* ARP_OFFLOAD_SUPPORT */
5330 if (!dhd_inet6addr_notifier_registered) {
5331 dhd_inet6addr_notifier_registered = TRUE;
5332 register_inet6addr_notifier(&dhd_inet6addr_notifier);
5335 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
5336 #ifdef DEBUG_CPU_FREQ
5337 dhd->new_freq = alloc_percpu(int);
5338 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
5339 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
5341 #ifdef DHDTCPACK_SUPPRESS
5343 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_REPLACE);
5344 #elif defined(BCMPCIE)
5345 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
5347 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
5348 #endif /* BCMSDIO */
5349 #endif /* DHDTCPACK_SUPPRESS */
5351 dhd_state |= DHD_ATTACH_STATE_DONE;
5352 dhd->dhd_state = dhd_state;
5355 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
5357 #endif /* CUSTOMER_HW20 && WLANAUDIO */
5361 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
5362 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
5363 __FUNCTION__, dhd_state, &dhd->pub));
5364 dhd->dhd_state = dhd_state;
5365 dhd_detach(&dhd->pub);
5366 dhd_free(&dhd->pub);
5372 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
5374 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
5375 return DHD_FLAG_HOSTAP_MODE;
5376 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
5377 return DHD_FLAG_P2P_MODE;
5378 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
5379 return DHD_FLAG_IBSS_MODE;
5380 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
5381 return DHD_FLAG_MFG_MODE;
5383 return DHD_FLAG_STA_MODE;
5386 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
5390 const char *fw = NULL;
5391 const char *nv = NULL;
5392 wifi_adapter_info_t *adapter = dhdinfo->adapter;
5395 /* Update firmware and nvram path. The path may be from adapter info or module parameter
5396 * The path from adapter info is used for initialization only (as it won't change).
5398 * The firmware_path/nvram_path module parameter may be changed by the system at run
5399 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
5400 * command may change dhdinfo->fw_path. As such we need to clear the path info in
5401 * module parameter after it is copied. We won't update the path until the module parameter
5402 * is changed again (first character is not '\0')
5405 /* set default firmware and nvram path for built-in type driver */
5406 if (!dhd_download_fw_on_driverload) {
5407 #ifdef CONFIG_BCMDHD_FW_PATH
5408 fw = CONFIG_BCMDHD_FW_PATH;
5409 #endif /* CONFIG_BCMDHD_FW_PATH */
5410 #ifdef CONFIG_BCMDHD_NVRAM_PATH
5411 nv = CONFIG_BCMDHD_NVRAM_PATH;
5412 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
5415 /* check if we need to initialize the path */
5416 if (dhdinfo->fw_path[0] == '\0') {
5417 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
5418 fw = adapter->fw_path;
5421 if (dhdinfo->nv_path[0] == '\0') {
5422 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
5423 nv = adapter->nv_path;
5426 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
5428 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
5430 if (firmware_path[0] != '\0')
5432 if (nvram_path[0] != '\0')
5435 if (fw && fw[0] != '\0') {
5436 fw_len = strlen(fw);
5437 if (fw_len >= sizeof(dhdinfo->fw_path)) {
5438 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
5441 strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
5442 if (dhdinfo->fw_path[fw_len-1] == '\n')
5443 dhdinfo->fw_path[fw_len-1] = '\0';
5445 if (nv && nv[0] != '\0') {
5446 nv_len = strlen(nv);
5447 if (nv_len >= sizeof(dhdinfo->nv_path)) {
5448 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
5451 strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
5452 if (dhdinfo->nv_path[nv_len-1] == '\n')
5453 dhdinfo->nv_path[nv_len-1] = '\0';
5456 /* clear the path in module parameter */
5457 firmware_path[0] = '\0';
5458 nvram_path[0] = '\0';
5460 #ifndef BCMEMBEDIMAGE
5461 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
5462 if (dhdinfo->fw_path[0] == '\0') {
5463 DHD_ERROR(("firmware path not found\n"));
5466 if (dhdinfo->nv_path[0] == '\0') {
5467 DHD_ERROR(("nvram path not found\n"));
5470 #endif /* BCMEMBEDIMAGE */
5477 dhd_bus_start(dhd_pub_t *dhdp)
5480 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
5481 unsigned long flags;
5485 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
5487 DHD_PERIM_LOCK(dhdp);
5489 /* try to download image and nvram to the dongle */
5490 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
5491 DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path));
5492 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
5493 dhd->fw_path, dhd->nv_path);
5495 DHD_ERROR(("%s: failed to download firmware %s\n",
5496 __FUNCTION__, dhd->fw_path));
5497 DHD_PERIM_UNLOCK(dhdp);
5501 if (dhd->pub.busstate != DHD_BUS_LOAD) {
5502 DHD_PERIM_UNLOCK(dhdp);
5506 dhd_os_sdlock(dhdp);
5508 /* Start the watchdog timer */
5509 dhd->pub.tickcnt = 0;
5510 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
5512 /* Bring up the bus */
5513 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
5515 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
5516 dhd_os_sdunlock(dhdp);
5517 DHD_PERIM_UNLOCK(dhdp);
5520 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
5521 #if defined(BCMPCIE_OOB_HOST_WAKE)
5522 dhd_os_sdunlock(dhdp);
5523 #endif /* BCMPCIE_OOB_HOST_WAKE */
5524 /* Host registration for OOB interrupt */
5525 if (dhd_bus_oob_intr_register(dhdp)) {
5526 /* deactivate timer and wait for the handler to finish */
5527 #if !defined(BCMPCIE_OOB_HOST_WAKE)
5528 DHD_GENERAL_LOCK(&dhd->pub, flags);
5529 dhd->wd_timer_valid = FALSE;
5530 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5531 dhd_os_sdunlock(dhdp);
5532 del_timer_sync(&dhd->timer);
5533 #endif /* BCMPCIE_OOB_HOST_WAKE */
5534 DHD_PERIM_UNLOCK(dhdp);
5535 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5536 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
5540 #if defined(BCMPCIE_OOB_HOST_WAKE)
5541 dhd_os_sdlock(dhdp);
5542 dhd_bus_oob_intr_set(dhdp, TRUE);
5544 /* Enable oob at firmware */
5545 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
5546 #endif /* BCMPCIE_OOB_HOST_WAKE */
5548 #ifdef PCIE_FULL_DONGLE
5551 uint32 num_flowrings; /* includes H2D common rings */
5552 num_flowrings = dhd_bus_max_h2d_queues(dhd->pub.bus, &txpush);
5553 DHD_ERROR(("%s: Initializing %u flowrings\n", __FUNCTION__,
5555 if ((ret = dhd_flow_rings_init(&dhd->pub, num_flowrings)) != BCME_OK) {
5556 dhd_os_sdunlock(dhdp);
5557 DHD_PERIM_UNLOCK(dhdp);
5561 #endif /* PCIE_FULL_DONGLE */
5563 /* Do protocol initialization necessary for IOCTL/IOVAR */
5564 dhd_prot_init(&dhd->pub);
5566 /* If bus is not ready, can't come up */
5567 if (dhd->pub.busstate != DHD_BUS_DATA) {
5568 DHD_GENERAL_LOCK(&dhd->pub, flags);
5569 dhd->wd_timer_valid = FALSE;
5570 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5571 dhd_os_sdunlock(dhdp);
5572 del_timer_sync(&dhd->timer);
5573 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
5574 DHD_PERIM_UNLOCK(dhdp);
5575 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5579 dhd_os_sdunlock(dhdp);
5581 /* Bus is ready, query any dongle information */
5582 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
5583 DHD_PERIM_UNLOCK(dhdp);
5587 #ifdef ARP_OFFLOAD_SUPPORT
5588 if (dhd->pend_ipaddr) {
5589 #ifdef AOE_IP_ALIAS_SUPPORT
5590 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
5591 #endif /* AOE_IP_ALIAS_SUPPORT */
5592 dhd->pend_ipaddr = 0;
5594 #endif /* ARP_OFFLOAD_SUPPORT */
5596 DHD_PERIM_UNLOCK(dhdp);
5600 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
5602 char iovbuf[WLC_IOCTL_SMLEN];
5603 uint32 tdls = tdls_on;
5605 uint32 tdls_auto_op = 0;
5606 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
5607 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
5608 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
5610 if (!FW_SUPPORTED(dhd, tdls))
5613 if (dhd->tdls_enable == tdls_on)
5615 bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
5616 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5617 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
5620 dhd->tdls_enable = tdls_on;
5623 tdls_auto_op = auto_on;
5624 bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
5625 iovbuf, sizeof(iovbuf));
5626 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5627 sizeof(iovbuf), TRUE, 0)) < 0) {
5628 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
5633 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
5634 sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
5635 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5636 sizeof(iovbuf), TRUE, 0)) < 0) {
5637 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
5640 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
5641 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5642 sizeof(iovbuf), TRUE, 0)) < 0) {
5643 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
5646 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
5647 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5648 sizeof(iovbuf), TRUE, 0)) < 0) {
5649 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
5657 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
5659 dhd_info_t *dhd = DHD_DEV_INFO(dev);
5662 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
5667 #ifdef PCIE_FULL_DONGLE
5668 void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
5670 dhd_info_t *dhd = DHD_DEV_INFO(dev);
5671 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
5672 tdls_peer_node_t *cur = dhdp->peer_tbl.node;
5673 tdls_peer_node_t *new = NULL, *prev = NULL;
5675 uint8 sa[ETHER_ADDR_LEN];
5676 int ifidx = dhd_net2idx(dhd, dev);
5678 if (ifidx == DHD_BAD_IF)
5681 dhdif = dhd->iflist[ifidx];
5682 memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
5685 while (cur != NULL) {
5686 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
5687 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
5688 __FUNCTION__, __LINE__));
5694 new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
5696 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
5699 memcpy(new->addr, da, ETHER_ADDR_LEN);
5700 new->next = dhdp->peer_tbl.node;
5701 dhdp->peer_tbl.node = new;
5702 dhdp->peer_tbl.tdls_peer_count++;
5705 while (cur != NULL) {
5706 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
5707 dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
5709 prev->next = cur->next;
5711 dhdp->peer_tbl.node = cur->next;
5712 MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
5713 dhdp->peer_tbl.tdls_peer_count--;
5719 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
5722 #endif /* PCIE_FULL_DONGLE */
5725 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
5730 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
5732 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
5733 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
5738 #if !defined(AP) && defined(WLP2P)
5739 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
5740 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
5741 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
5742 * would still be named as fw_bcmdhd_apsta.
5745 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
5748 char buf[WLC_IOCTL_SMLEN];
5749 bool mchan_supported = FALSE;
5750 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
5751 * test mode, that means we only will use the mode as it is
5753 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
5755 if (FW_SUPPORTED(dhd, vsdb)) {
5756 mchan_supported = TRUE;
5758 if (!FW_SUPPORTED(dhd, p2p)) {
5759 DHD_TRACE(("Chip does not support p2p\n"));
5763 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
5764 memset(buf, 0, sizeof(buf));
5765 bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
5766 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
5768 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
5773 /* By default, chip supports single chan concurrency,
5774 * now lets check for mchan
5776 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
5777 if (mchan_supported)
5778 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
5779 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
5780 /* For customer_hw4, although ICS,
5781 * we still support concurrent mode
5794 #ifdef SUPPORT_AP_POWERSAVE
5795 #define RXCHAIN_PWRSAVE_PPS 10
5796 #define RXCHAIN_PWRSAVE_QUIET_TIME 10
5797 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
5798 int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
5801 int32 pps = RXCHAIN_PWRSAVE_PPS;
5802 int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
5803 int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
5806 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
5807 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5808 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5809 DHD_ERROR(("Failed to enable AP power save"));
5811 bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf));
5812 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5813 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5814 DHD_ERROR(("Failed to set pps"));
5816 bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time,
5817 4, iovbuf, sizeof(iovbuf));
5818 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5819 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5820 DHD_ERROR(("Failed to set quiet time"));
5822 bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check,
5823 4, iovbuf, sizeof(iovbuf));
5824 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5825 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5826 DHD_ERROR(("Failed to set stas assoc check"));
5829 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
5830 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5831 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5832 DHD_ERROR(("Failed to disable AP power save"));
5838 #endif /* SUPPORT_AP_POWERSAVE */
5843 dhd_preinit_ioctls(dhd_pub_t *dhd)
5846 char eventmask[WL_EVENTING_MASK_LEN];
5847 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
5848 uint32 buf_key_b4_m4 = 1;
5849 uint32 rpt_hitxrate = 1;
5851 eventmsgs_ext_t *eventmask_msg = NULL;
5852 char* iov_buf = NULL;
5854 #if defined(CUSTOM_AMPDU_BA_WSIZE)
5855 uint32 ampdu_ba_wsize = 0;
5857 #if defined(CUSTOM_AMPDU_MPDU)
5858 int32 ampdu_mpdu = 0;
5860 #if defined(CUSTOM_AMPDU_RELEASE)
5861 int32 ampdu_release = 0;
5863 #if defined(CUSTOM_AMSDU_AGGSF)
5864 int32 amsdu_aggsf = 0;
5867 #if defined(BCMSDIO)
5868 #ifdef PROP_TXSTATUS
5869 int wlfc_enable = TRUE;
5871 uint32 hostreorder = 1;
5872 #endif /* DISABLE_11N */
5873 #endif /* PROP_TXSTATUS */
5875 #ifdef PCIE_FULL_DONGLE
5876 uint32 wl_ap_isolate;
5877 #endif /* PCIE_FULL_DONGLE */
5879 #ifdef DHD_ENABLE_LPC
5881 #endif /* DHD_ENABLE_LPC */
5882 uint power_mode = PM_FAST;
5883 uint32 dongle_align = DHD_SDALIGN;
5884 #if defined(BCMSDIO)
5885 uint32 glom = CUSTOM_GLOM_SETTING;
5886 #endif /* defined(BCMSDIO) */
5887 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
5890 #if defined(VSDB) || defined(ROAM_ENABLE)
5891 uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
5893 uint bcn_timeout = 4;
5896 #if defined(ARP_OFFLOAD_SUPPORT)
5899 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
5900 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
5901 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
5902 char buf[WLC_IOCTL_SMLEN];
5904 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
5907 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
5908 int roam_scan_period[2] = {10, WLC_BAND_ALL};
5909 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
5910 #ifdef ROAM_AP_ENV_DETECTION
5911 int roam_env_mode = AP_ENV_INDETERMINATE;
5912 #endif /* ROAM_AP_ENV_DETECTION */
5913 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
5914 int roam_fullscan_period = 60;
5915 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
5916 int roam_fullscan_period = 120;
5917 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
5919 #ifdef DISABLE_BUILTIN_ROAM
5921 #endif /* DISABLE_BUILTIN_ROAM */
5922 #endif /* ROAM_ENABLE */
5927 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
5928 uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
5929 struct ether_addr p2p_ea;
5931 #ifdef SOFTAP_UAPSD_OFF
5932 uint32 wme_apsd = 0;
5933 #endif /* SOFTAP_UAPSD_OFF */
5934 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
5935 uint32 apsta = 1; /* Enable APSTA mode */
5936 #elif defined(SOFTAP_AND_GC)
5939 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
5940 #ifdef GET_CUSTOM_MAC_ENABLE
5941 struct ether_addr ea_addr;
5942 #endif /* GET_CUSTOM_MAC_ENABLE */
5944 #ifdef CUSTOM_AMPDU_BA_WSIZE
5945 struct ampdu_tid_control atc;
5949 #endif /* DISABLE_11N */
5953 #endif /* USE_WL_TXBF */
5954 #ifdef USE_WL_FRAMEBURST
5955 uint32 frameburst = 1;
5956 #endif /* USE_WL_FRAMEBURST */
5957 #ifdef CUSTOM_PSPRETEND_THR
5958 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
5963 #ifdef PKT_FILTER_SUPPORT
5964 dhd_pkt_filter_enable = TRUE;
5965 #endif /* PKT_FILTER_SUPPORT */
5967 dhd->tdls_enable = FALSE;
5969 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
5970 DHD_TRACE(("Enter %s\n", __FUNCTION__));
5972 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
5973 (op_mode == DHD_FLAG_MFG_MODE)) {
5974 /* Check and adjust IOCTL response timeout for Manufactring firmware */
5975 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
5976 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
5980 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
5981 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
5983 #ifdef GET_CUSTOM_MAC_ENABLE
5984 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
5986 memset(buf, 0, sizeof(buf));
5987 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
5988 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
5990 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
5994 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
5996 #endif /* GET_CUSTOM_MAC_ENABLE */
5997 /* Get the default device MAC address directly from firmware */
5998 memset(buf, 0, sizeof(buf));
5999 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
6000 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
6002 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
6006 /* Update public MAC address after reading from Firmware */
6007 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
6009 #ifdef GET_CUSTOM_MAC_ENABLE
6011 #endif /* GET_CUSTOM_MAC_ENABLE */
6013 /* get a capabilities from firmware */
6014 memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
6015 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
6016 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
6017 sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
6018 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
6019 __FUNCTION__, ret));
6022 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
6023 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
6024 #ifdef SET_RANDOM_MAC_SOFTAP
6027 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
6028 #if defined(ARP_OFFLOAD_SUPPORT)
6031 #ifdef PKT_FILTER_SUPPORT
6032 dhd_pkt_filter_enable = FALSE;
6034 #ifdef SET_RANDOM_MAC_SOFTAP
6035 SRANDOM32((uint)jiffies);
6036 rand_mac = RANDOM32();
6037 iovbuf[0] = 0x02; /* locally administered bit */
6040 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
6041 iovbuf[4] = (unsigned char)(rand_mac >> 8);
6042 iovbuf[5] = (unsigned char)(rand_mac >> 16);
6044 bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
6045 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
6047 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
6049 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
6050 #endif /* SET_RANDOM_MAC_SOFTAP */
6051 #if !defined(AP) && defined(WL_CFG80211)
6052 /* Turn off MPC in AP mode */
6053 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
6054 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6055 sizeof(iovbuf), TRUE, 0)) < 0) {
6056 DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
6059 #ifdef SUPPORT_AP_POWERSAVE
6060 dhd_set_ap_powersave(dhd, 0, TRUE);
6062 #ifdef SOFTAP_UAPSD_OFF
6063 bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf));
6064 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6065 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", __FUNCTION__, ret));
6066 #endif /* SOFTAP_UAPSD_OFF */
6067 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
6068 (op_mode == DHD_FLAG_MFG_MODE)) {
6069 #if defined(ARP_OFFLOAD_SUPPORT)
6071 #endif /* ARP_OFFLOAD_SUPPORT */
6072 #ifdef PKT_FILTER_SUPPORT
6073 dhd_pkt_filter_enable = FALSE;
6074 #endif /* PKT_FILTER_SUPPORT */
6075 dhd->op_mode = DHD_FLAG_MFG_MODE;
6077 uint32 concurrent_mode = 0;
6078 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
6079 (op_mode == DHD_FLAG_P2P_MODE)) {
6080 #if defined(ARP_OFFLOAD_SUPPORT)
6083 #ifdef PKT_FILTER_SUPPORT
6084 dhd_pkt_filter_enable = FALSE;
6086 dhd->op_mode = DHD_FLAG_P2P_MODE;
6087 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
6088 (op_mode == DHD_FLAG_IBSS_MODE)) {
6089 dhd->op_mode = DHD_FLAG_IBSS_MODE;
6091 dhd->op_mode = DHD_FLAG_STA_MODE;
6092 #if !defined(AP) && defined(WLP2P)
6093 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
6094 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
6095 #if defined(ARP_OFFLOAD_SUPPORT)
6098 dhd->op_mode |= concurrent_mode;
6101 /* Check if we are enabling p2p */
6102 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
6103 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
6104 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6105 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6106 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
6109 #if defined(SOFTAP_AND_GC)
6110 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
6111 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
6112 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
6115 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
6116 ETHER_SET_LOCALADDR(&p2p_ea);
6117 bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
6118 ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
6119 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6120 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6121 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
6123 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
6127 (void)concurrent_mode;
6131 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
6132 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
6133 /* Set Country code */
6134 if (dhd->dhd_cspec.ccode[0] != 0) {
6135 bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
6136 sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
6137 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6138 DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
6142 /* Set Listen Interval */
6143 bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
6144 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6145 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
6147 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
6148 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
6149 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
6150 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6151 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
6152 #if defined(ROAM_ENABLE)
6153 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
6154 sizeof(roam_trigger), TRUE, 0)) < 0)
6155 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
6156 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
6157 sizeof(roam_scan_period), TRUE, 0)) < 0)
6158 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
6159 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
6160 sizeof(roam_delta), TRUE, 0)) < 0)
6161 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
6162 bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
6163 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6164 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
6165 #ifdef ROAM_AP_ENV_DETECTION
6166 if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
6167 bcm_mkiovar("roam_env_detection", (char *)&roam_env_mode,
6168 4, iovbuf, sizeof(iovbuf));
6169 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) == BCME_OK)
6170 dhd->roam_env_detection = TRUE;
6172 dhd->roam_env_detection = FALSE;
6175 #endif /* ROAM_AP_ENV_DETECTION */
6176 #endif /* ROAM_ENABLE */
6179 /* by default TDLS on and auto mode off */
6180 _dhd_tdls_enable(dhd, true, false, NULL);
6183 #ifdef DHD_ENABLE_LPC
6184 if (bcmdhd_dhd_enable_lpc) {
6186 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
6187 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6188 sizeof(iovbuf), TRUE, 0)) < 0) {
6189 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
6192 #endif /* DHD_ENABLE_LPC */
6194 /* Set PowerSave mode */
6195 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
6197 /* Match Host and Dongle rx alignment */
6198 bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
6199 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6201 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
6202 /* enable credall to reduce the chance of no bus credit happened. */
6203 bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
6204 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6207 #if defined(BCMSDIO)
6208 if (glom != DEFAULT_GLOM_VALUE) {
6209 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
6210 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
6211 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6213 #endif /* defined(BCMSDIO) */
6215 /* Setup timeout if Beacons are lost and roam is off to report link down */
6216 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
6217 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6218 /* Setup assoc_retry_max count to reconnect target AP in dongle */
6219 bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
6220 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6221 #if defined(AP) && !defined(WLP2P)
6222 /* Turn off MPC in AP mode */
6223 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
6224 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6225 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
6226 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6227 #endif /* defined(AP) && !defined(WLP2P) */
6231 if (ap_fw_loaded == TRUE) {
6232 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
6236 #if defined(KEEP_ALIVE)
6238 /* Set Keep Alive : be sure to use FW with -keepalive */
6242 if (ap_fw_loaded == FALSE)
6244 if (!(dhd->op_mode &
6245 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
6246 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
6247 DHD_ERROR(("%s set keeplive failed %d\n",
6248 __FUNCTION__, res));
6251 #endif /* defined(KEEP_ALIVE) */
6254 if (bcmdhd_use_wl_txbf) {
6255 bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
6256 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6257 sizeof(iovbuf), TRUE, 0)) < 0) {
6258 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
6261 #endif /* USE_WL_TXBF */
6262 #ifdef USE_WL_FRAMEBURST
6263 if (bcmdhd_use_wl_frameburst) {
6264 /* Set frameburst to value */
6265 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
6266 sizeof(frameburst), TRUE, 0)) < 0) {
6267 DHD_ERROR(("%s Set frameburst failed %d\n", __FUNCTION__, ret));
6270 #endif /* USE_WL_FRAMEBURST */
6271 #if defined(CUSTOM_AMPDU_BA_WSIZE)
6272 /* Set ampdu ba wsize to 64 or 16 */
6273 #ifdef CUSTOM_AMPDU_BA_WSIZE
6274 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
6276 if (ampdu_ba_wsize != 0) {
6277 bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf));
6278 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6279 sizeof(iovbuf), TRUE, 0)) < 0) {
6280 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
6281 __FUNCTION__, ampdu_ba_wsize, ret));
6286 bcm_mkiovar("ampdu_rx_tid", (char *)&atc, sizeof(atc), iovbuf, sizeof(iovbuf));
6287 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6290 iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
6291 if (iov_buf == NULL) {
6292 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
6297 #if defined(CUSTOM_AMPDU_MPDU)
6298 if (bcmdhd_use_custom_ampdu_mpdu) {
6299 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
6300 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
6301 bcm_mkiovar("ampdu_mpdu", (char *)&du_mpdu, 4, iovbuf, sizeof(iovbuf));
6302 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6303 sizeof(iovbuf), TRUE, 0)) < 0) {
6304 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
6305 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
6309 #endif /* CUSTOM_AMPDU_MPDU */
6311 #if defined(CUSTOM_AMPDU_RELEASE)
6312 ampdu_release = CUSTOM_AMPDU_RELEASE;
6313 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
6314 bcm_mkiovar("ampdu_release", (char *)&du_release, 4, iovbuf, sizeof(iovbuf));
6315 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6316 sizeof(iovbuf), TRUE, 0)) < 0) {
6317 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
6318 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
6321 #endif /* CUSTOM_AMPDU_RELEASE */
6323 #if defined(CUSTOM_AMSDU_AGGSF)
6324 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
6325 if (amsdu_aggsf != 0) {
6326 bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf));
6327 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6328 sizeof(iovbuf), TRUE, 0)) < 0) {
6329 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
6330 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
6333 #endif /* CUSTOM_AMSDU_AGGSF */
6335 #ifdef CUSTOM_PSPRETEND_THR
6336 if (bcmdhd_use_custom_pspretend_thr) {
6337 /* Turn off MPC in AP mode */
6338 bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
6339 iovbuf, sizeof(iovbuf));
6340 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6341 sizeof(iovbuf), TRUE, 0)) < 0) {
6342 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
6343 __FUNCTION__, ret));
6348 /* Set the rpt_hitxrate to 1 so that link speed updated by WLC_GET_RATE
6349 * is the maximum trasnmit rate
6350 * rpt_hitxrate 0 : Here the rate reported is the most used rate in
6352 * rpt_hitxrate 1 : Here the rate reported is the highest used rate
6355 bcm_mkiovar("rpt_hitxrate", (char *)&rpt_hitxrate, 4, iovbuf,
6357 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6358 sizeof(iovbuf), TRUE, 0);
6360 DHD_ERROR(("%s Set rpt_hitxrate failed %d\n", __FUNCTION__, ret));
6363 bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
6364 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6365 sizeof(iovbuf), TRUE, 0)) < 0) {
6366 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
6369 /* Read event_msgs mask */
6370 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
6371 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
6372 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
6375 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
6377 /* Setup event_msgs */
6378 setbit(eventmask, WLC_E_SET_SSID);
6379 setbit(eventmask, WLC_E_PRUNE);
6380 setbit(eventmask, WLC_E_AUTH);
6381 setbit(eventmask, WLC_E_AUTH_IND);
6382 setbit(eventmask, WLC_E_ASSOC);
6383 setbit(eventmask, WLC_E_REASSOC);
6384 setbit(eventmask, WLC_E_REASSOC_IND);
6385 setbit(eventmask, WLC_E_DEAUTH);
6386 setbit(eventmask, WLC_E_DEAUTH_IND);
6387 setbit(eventmask, WLC_E_DISASSOC_IND);
6388 setbit(eventmask, WLC_E_DISASSOC);
6389 setbit(eventmask, WLC_E_JOIN);
6390 setbit(eventmask, WLC_E_START);
6391 setbit(eventmask, WLC_E_ASSOC_IND);
6392 setbit(eventmask, WLC_E_PSK_SUP);
6393 setbit(eventmask, WLC_E_LINK);
6394 setbit(eventmask, WLC_E_NDIS_LINK);
6395 setbit(eventmask, WLC_E_MIC_ERROR);
6396 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
6397 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
6399 setbit(eventmask, WLC_E_PMKID_CACHE);
6400 setbit(eventmask, WLC_E_TXFAIL);
6402 setbit(eventmask, WLC_E_JOIN_START);
6403 setbit(eventmask, WLC_E_SCAN_COMPLETE);
6405 setbit(eventmask, WLC_E_HTSFSYNC);
6406 #endif /* WLMEDIA_HTSF */
6408 setbit(eventmask, WLC_E_PFN_NET_FOUND);
6409 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
6410 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
6411 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
6412 #endif /* PNO_SUPPORT */
6413 /* enable dongle roaming event */
6414 /* WLC_E_ROAM event is depricated for bcm4354
6415 WLC_E_BSSID event is used for roaming in bcm4354*/
6416 #ifndef DISABLE_ROAM_EVENT
6417 if (!bcmdhd_disable_roam_event) {
6418 setbit(eventmask, WLC_E_ROAM);
6420 #endif /* DISABLE_ROAM_EVENT */
6421 setbit(eventmask, WLC_E_BSSID);
6423 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
6426 setbit(eventmask, WLC_E_ESCAN_RESULT);
6427 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
6428 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
6429 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
6431 #endif /* WL_CFG80211 */
6432 setbit(eventmask, WLC_E_TRACE);
6433 setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
6434 /* Write updated Event mask */
6435 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
6436 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6437 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
6441 /* make up event mask ext message iovar for event larger than 128 */
6442 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
6443 eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
6444 if (eventmask_msg == NULL) {
6445 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
6449 bzero(eventmask_msg, msglen);
6450 eventmask_msg->ver = EVENTMSGS_VER;
6451 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
6453 /* Read event_msgs_ext mask */
6454 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN);
6455 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0);
6456 if (ret2 != BCME_UNSUPPORTED)
6458 if (ret2 == 0) { /* event_msgs_ext must be supported */
6459 bcopy(iov_buf, eventmask_msg, msglen);
6461 #ifdef BT_WIFI_HANDOVER
6462 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
6463 #endif /* BT_WIFI_HANDOVER */
6465 /* Write updated Event mask */
6466 eventmask_msg->ver = EVENTMSGS_VER;
6467 eventmask_msg->command = EVENTMSGS_SET_MASK;
6468 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
6469 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
6470 msglen, iov_buf, WLC_IOCTL_SMLEN);
6471 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6472 iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
6473 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
6476 } else if (ret2 < 0 && ret2 != BCME_UNSUPPORTED) {
6477 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
6479 } /* unsupported is ok */
6481 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
6482 sizeof(scan_assoc_time), TRUE, 0);
6483 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
6484 sizeof(scan_unassoc_time), TRUE, 0);
6485 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
6486 sizeof(scan_passive_time), TRUE, 0);
6488 #ifdef ARP_OFFLOAD_SUPPORT
6489 /* Set and enable ARP offload feature for STA only */
6491 if (arpoe && !ap_fw_loaded) {
6495 dhd_arp_offload_enable(dhd, TRUE);
6496 dhd_arp_offload_set(dhd, dhd_arp_mode);
6498 dhd_arp_offload_enable(dhd, FALSE);
6499 dhd_arp_offload_set(dhd, 0);
6501 dhd_arp_enable = arpoe;
6502 #endif /* ARP_OFFLOAD_SUPPORT */
6504 #ifdef PKT_FILTER_SUPPORT
6505 /* Setup default defintions for pktfilter , enable in suspend */
6506 dhd->pktfilter_count = 6;
6507 /* Setup filter to allow only unicast */
6508 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
6509 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
6510 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
6511 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
6512 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
6513 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
6514 /* apply APP pktfilter */
6515 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
6520 dhd_enable_packet_filter(0, dhd);
6522 #endif /* defined(SOFTAP) */
6523 dhd_set_packet_filter(dhd);
6524 #endif /* PKT_FILTER_SUPPORT */
6526 bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
6527 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6528 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
6529 #endif /* DISABLE_11N */
6531 /* query for 'ver' to get version info from firmware */
6532 memset(buf, 0, sizeof(buf));
6534 bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
6535 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
6536 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
6538 bcmstrtok(&ptr, "\n", 0);
6539 /* Print fw version info */
6540 DHD_ERROR(("Firmware version = %s\n", buf));
6541 #if defined(BCMSDIO)
6542 dhd_set_version_info(dhd, buf);
6543 #endif /* defined(BCMSDIO) */
6546 #if defined(BCMSDIO)
6547 dhd_txglom_enable(dhd, TRUE);
6548 #endif /* defined(BCMSDIO) */
6550 #if defined(BCMSDIO)
6551 #ifdef PROP_TXSTATUS
6552 if (disable_proptx ||
6553 #ifdef PROP_TXSTATUS_VSDB
6554 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
6555 (bcmdhd_prop_txstatus_vsdb &&
6556 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
6557 dhd->op_mode != DHD_FLAG_IBSS_MODE)) ||
6558 #endif /* PROP_TXSTATUS_VSDB */
6560 wlfc_enable = FALSE;
6564 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
6565 if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6566 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
6567 if (ret2 != BCME_UNSUPPORTED)
6569 if (ret == BCME_NOTDOWN) {
6571 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
6572 sizeof(wl_down), TRUE, 0);
6573 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
6574 __FUNCTION__, ret2, hostreorder));
6576 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4,
6577 iovbuf, sizeof(iovbuf));
6578 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6579 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
6580 if (ret2 != BCME_UNSUPPORTED)
6583 if (ret2 != BCME_OK)
6586 #endif /* DISABLE_11N */
6592 else if (hostreorder)
6593 dhd_wlfc_hostreorder_init(dhd);
6594 #endif /* DISABLE_11N */
6596 #endif /* PROP_TXSTATUS */
6597 #endif /* BCMSDIO || BCMBUS */
6598 #ifdef PCIE_FULL_DONGLE
6599 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
6600 if (FW_SUPPORTED(dhd, ap)) {
6601 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
6602 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
6603 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6604 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
6606 #endif /* PCIE_FULL_DONGLE */
6608 if (!dhd->pno_state) {
6614 dhd_interworking_enable(dhd);
6618 /*RXCB not applicable to PCIE*/
6619 #if !defined(BCMPCIE) && defined(RXCB)
6620 if (bcmdhd_custom_rxcb) {
6621 /* Enable bus rx callback */
6622 bcm_mkiovar("bus:rxcb", (char *)&rxcb, 4, iovbuf, sizeof(iovbuf));
6623 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6625 DHD_ERROR(("%s failed to set RXCB %d\n", __FUNCTION__, ret));
6632 kfree(eventmask_msg);
6639 void dhd_set_ampdu_rx_tid(struct net_device *dev, int ampdu_rx_tid)
6642 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
6643 dhd_pub_t *pub = &dhd->pub;
6645 for (i = 0; i < 8; i++) { /* One bit each for traffic class CS7 - CS0 */
6646 struct ampdu_tid_control atc;
6648 atc.enable = (ampdu_rx_tid >> i) & 1;
6649 bcm_mkiovar("ampdu_rx_tid", (char *)&atc, sizeof(atc), iovbuf,sizeof(iovbuf));
6650 ret = dhd_wl_ioctl_cmd(pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf),TRUE, 0);
6652 DHD_ERROR(("%s failed %d\n", __func__, ret));
6657 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
6659 char buf[strlen(name) + 1 + cmd_len];
6660 int len = sizeof(buf);
6664 len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
6666 memset(&ioc, 0, sizeof(ioc));
6668 ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
6673 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
6674 if (!set && ret >= 0)
6675 memcpy(cmd_buf, buf, cmd_len);
6680 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
6682 struct dhd_info *dhd = dhdp->info;
6683 struct net_device *dev = NULL;
6685 ASSERT(dhd && dhd->iflist[ifidx]);
6686 dev = dhd->iflist[ifidx]->net;
6689 if (netif_running(dev)) {
6690 DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
6691 return BCME_NOTDOWN;
6694 #define DHD_MIN_MTU 1500
6695 #define DHD_MAX_MTU 1752
6697 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
6698 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
6706 #ifdef ARP_OFFLOAD_SUPPORT
6707 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
6709 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
6711 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
6715 bzero(ipv4_buf, sizeof(ipv4_buf));
6717 /* display what we've got */
6718 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
6719 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
6721 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
6723 /* now we saved hoste_ip table, clr it in the dongle AOE */
6724 dhd_aoe_hostip_clr(dhd_pub, idx);
6727 DHD_ERROR(("%s failed\n", __FUNCTION__));
6731 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
6732 if (add && (ipv4_buf[i] == 0)) {
6734 add = FALSE; /* added ipa to local table */
6735 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
6737 } else if (ipv4_buf[i] == ipa) {
6739 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
6740 __FUNCTION__, ipa, i));
6743 if (ipv4_buf[i] != 0) {
6744 /* add back host_ip entries from our local cache */
6745 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
6746 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
6747 __FUNCTION__, ipv4_buf[i], i));
6751 /* see the resulting hostip table */
6752 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
6753 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
6754 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
6759 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
6760 * whenever there is an event related to an IP address.
6761 * ptr : kernel provided pointer to IP address that has changed
6763 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
6764 unsigned long event,
6767 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
6773 if (!dhd_arp_enable)
6775 if (!ifa || !(ifa->ifa_dev->dev))
6778 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6779 /* Filter notifications meant for non Broadcom devices */
6780 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
6781 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
6782 #if defined(WL_ENABLE_P2P_IF)
6783 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
6784 #endif /* WL_ENABLE_P2P_IF */
6787 #endif /* LINUX_VERSION_CODE */
6789 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
6793 dhd_pub = &dhd->pub;
6795 if (dhd_pub->arp_version == 1) {
6799 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
6800 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
6803 if (idx < DHD_MAX_IFS)
6804 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
6805 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
6807 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
6814 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
6815 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
6817 if (dhd->pub.busstate != DHD_BUS_DATA) {
6818 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
6819 if (dhd->pend_ipaddr) {
6820 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
6821 __FUNCTION__, dhd->pend_ipaddr));
6823 dhd->pend_ipaddr = ifa->ifa_address;
6827 #ifdef AOE_IP_ALIAS_SUPPORT
6828 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
6830 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
6831 #endif /* AOE_IP_ALIAS_SUPPORT */
6835 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
6836 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
6837 dhd->pend_ipaddr = 0;
6838 #ifdef AOE_IP_ALIAS_SUPPORT
6839 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
6841 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
6843 dhd_aoe_hostip_clr(&dhd->pub, idx);
6844 dhd_aoe_arp_clr(&dhd->pub, idx);
6845 #endif /* AOE_IP_ALIAS_SUPPORT */
6849 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
6850 __func__, ifa->ifa_label, event));
6855 #endif /* ARP_OFFLOAD_SUPPORT */
6858 /* Neighbor Discovery Offload: defered handler */
6860 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
6862 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
6863 dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub;
6866 if (event != DHD_WQ_WORK_IPV6_NDO) {
6867 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
6872 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
6877 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
6881 if (ndo_work->if_idx) {
6882 DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
6886 switch (ndo_work->event) {
6888 DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
6889 ret = dhd_ndo_enable(pub, TRUE);
6891 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
6894 ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
6896 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
6897 __FUNCTION__, ret));
6901 DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
6902 ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
6904 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
6905 __FUNCTION__, ret));
6909 ret = dhd_ndo_enable(pub, FALSE);
6911 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
6916 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
6920 /* free ndo_work. alloced while scheduling the work */
6927 * Neighbor Discovery Offload: Called when an interface
6928 * is assigned with ipv6 address.
6929 * Handles only primary interface
6931 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
6932 unsigned long event,
6937 struct inet6_ifaddr *inet6_ifa = ptr;
6938 struct in6_addr *ipv6_addr = &inet6_ifa->addr;
6939 struct ipv6_work_info_t *ndo_info;
6940 int idx = 0; /* REVISIT */
6942 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6943 /* Filter notifications meant for non Broadcom devices */
6944 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
6947 #endif /* LINUX_VERSION_CODE */
6949 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
6953 if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
6955 dhd_pub = &dhd->pub;
6956 if (!FW_SUPPORTED(dhd_pub, ndoe))
6959 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
6961 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
6965 ndo_info->event = event;
6966 ndo_info->if_idx = idx;
6967 memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
6969 /* defer the work to thread as it may block kernel */
6970 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
6971 dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
6974 #endif /* #ifdef CONFIG_IPV6 */
6977 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
6979 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6981 struct net_device *net = NULL;
6983 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
6985 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
6987 ASSERT(dhd && dhd->iflist[ifidx]);
6988 ifp = dhd->iflist[ifidx];
6990 ASSERT(net && (ifp->idx == ifidx));
6993 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
6995 net->get_stats = dhd_get_stats;
6996 net->do_ioctl = dhd_ioctl_entry;
6997 net->hard_start_xmit = dhd_start_xmit;
6998 net->set_mac_address = dhd_set_mac_address;
6999 net->set_multicast_list = dhd_set_multicast_list;
7000 net->open = net->stop = NULL;
7002 ASSERT(!net->netdev_ops);
7003 net->netdev_ops = &dhd_ops_virt;
7004 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7006 net->netdev_ops = &dhd_cfgp2p_ops_virt;
7007 #endif /* P2PONEINT */
7009 /* Ok, link into the network layer... */
7012 * device functions for the primary interface only
7014 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7015 net->open = dhd_open;
7016 net->stop = dhd_stop;
7018 net->netdev_ops = &dhd_ops_pri;
7019 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7020 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
7021 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
7024 * We have to use the primary MAC for virtual interfaces
7026 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
7028 * Android sets the locally administered bit to indicate that this is a
7029 * portable hotspot. This will not work in simultaneous AP/STA mode,
7030 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
7032 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
7034 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
7035 __func__, net->name));
7036 temp_addr[0] |= 0x02;
7040 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
7041 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
7042 net->ethtool_ops = &dhd_ethtool_ops;
7043 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
7045 #if defined(WL_WIRELESS_EXT)
7046 #if WIRELESS_EXT < 19
7047 net->get_wireless_stats = dhd_get_wireless_stats;
7048 #endif /* WIRELESS_EXT < 19 */
7049 #if WIRELESS_EXT > 12
7050 net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
7051 #endif /* WIRELESS_EXT > 12 */
7052 #endif /* defined(WL_WIRELESS_EXT) */
7054 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
7056 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
7059 printf("%s\n", dhd_version);
7062 err = register_netdev(net);
7064 err = register_netdevice(net);
7066 #ifdef CONFIG_BCMDHD_CUSTOM_SYSFS_TEGRA
7068 extern struct net_device *dhd_custom_sysfs_tegra_histogram_stat_netdev;
7070 dhd_custom_sysfs_tegra_histogram_stat_netdev = net;
7075 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
7080 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
7081 MAC2STRDBG(net->dev_addr));
7083 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
7084 wl_iw_iscan_set_scan_broadcast_prep(net, 1);
7087 #if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
7088 KERNEL_VERSION(2, 6, 27))))
7091 up(&dhd_registration_sem);
7093 if (!dhd_download_fw_on_driverload) {
7094 dhd_net_bus_devreset(net, TRUE);
7096 dhd_net_bus_suspend(net);
7097 #endif /* BCMLXSDMMC */
7098 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
7101 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
7105 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
7108 net->netdev_ops = NULL;
7114 dhd_bus_detach(dhd_pub_t *dhdp)
7118 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7121 dhd = (dhd_info_t *)dhdp->info;
7125 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
7126 * calling stop again will cuase SD read/write errors.
7128 if (dhd->pub.busstate != DHD_BUS_DOWN) {
7129 /* Stop the protocol module */
7130 dhd_prot_stop(&dhd->pub);
7132 /* Stop the bus module */
7133 dhd_bus_stop(dhd->pub.bus, TRUE);
7136 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
7137 dhd_bus_oob_intr_unregister(dhdp);
7144 void dhd_detach(dhd_pub_t *dhdp)
7147 unsigned long flags;
7148 int timer_valid = FALSE;
7153 dhd = (dhd_info_t *)dhdp->info;
7157 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
7159 #endif /* CUSTOMER_HW20 && WLANAUDIO */
7161 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
7164 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
7165 /* Give sufficient time for threads to start running in case
7166 * dhd_attach() has failed
7171 #ifdef PROP_TXSTATUS
7172 #ifdef DHD_WLFC_THREAD
7173 if (dhd->pub.wlfc_thread) {
7174 kthread_stop(dhd->pub.wlfc_thread);
7175 dhdp->wlfc_thread_go = TRUE;
7176 wake_up_interruptible(&dhdp->wlfc_wqhead);
7178 dhd->pub.wlfc_thread = NULL;
7179 #endif /* DHD_WLFC_THREAD */
7180 #endif /* PROP_TXSTATUS */
7182 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
7183 dhd_bus_detach(dhdp);
7184 #ifdef PCIE_FULL_DONGLE
7185 dhd_flow_rings_deinit(dhdp);
7189 dhd_prot_detach(dhdp);
7192 #ifdef ARP_OFFLOAD_SUPPORT
7193 if (dhd_inetaddr_notifier_registered) {
7194 dhd_inetaddr_notifier_registered = FALSE;
7195 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
7197 #endif /* ARP_OFFLOAD_SUPPORT */
7199 if (dhd_inet6addr_notifier_registered) {
7200 dhd_inet6addr_notifier_registered = FALSE;
7201 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
7205 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
7206 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
7207 if (dhd->early_suspend.suspend)
7208 unregister_early_suspend(&dhd->early_suspend);
7210 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
7212 #if defined(WL_WIRELESS_EXT)
7213 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
7214 /* Detatch and unlink in the iw */
7217 #endif /* defined(WL_WIRELESS_EXT) */
7219 /* delete all interfaces, start with virtual */
7220 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
7224 /* Cleanup virtual interfaces */
7225 dhd_net_if_lock_local(dhd);
7226 for (i = 1; i < DHD_MAX_IFS; i++) {
7228 dhd_remove_if(&dhd->pub, i, TRUE);
7230 dhd_net_if_unlock_local(dhd);
7232 /* delete primary interface 0 */
7233 ifp = dhd->iflist[0];
7236 if (ifp && ifp->net) {
7240 /* in unregister_netdev case, the interface gets freed by net->destructor
7241 * (which is set to free_netdev)
7243 if (ifp->net->reg_state == NETREG_UNINITIALIZED)
7244 free_netdev(ifp->net);
7247 custom_rps_map_clear(ifp->net->_rx);
7248 #endif /* SET_RPS_CPUS */
7249 unregister_netdev(ifp->net);
7253 dhd_wmf_cleanup(dhdp, 0);
7254 #endif /* DHD_WMF */
7256 dhd_if_del_sta_list(ifp);
7258 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
7259 dhd->iflist[0] = NULL;
7263 /* Clear the watchdog timer */
7264 DHD_GENERAL_LOCK(&dhd->pub, flags);
7265 timer_valid = dhd->wd_timer_valid;
7266 dhd->wd_timer_valid = FALSE;
7267 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7269 del_timer_sync(&dhd->timer);
7271 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
7272 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
7273 PROC_STOP(&dhd->thr_wdt_ctl);
7276 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
7277 PROC_STOP(&dhd->thr_rxf_ctl);
7280 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
7281 PROC_STOP(&dhd->thr_dpc_ctl);
7283 tasklet_kill(&dhd->tasklet);
7286 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
7287 wl_cfg80211_detach(NULL);
7288 dhd_monitor_uninit();
7291 /* free deferred work queue */
7292 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
7293 dhd->dhd_deferred_wq = NULL;
7295 #ifdef SHOW_LOGTRACE
7296 if (dhd->event_data.fmts)
7297 kfree(dhd->event_data.fmts);
7298 if (dhd->event_data.raw_fmts)
7299 kfree(dhd->event_data.raw_fmts);
7300 #endif /* SHOW_LOGTRACE */
7303 if (dhdp->pno_state)
7304 dhd_pno_deinit(dhdp);
7306 #if defined(CONFIG_PM_SLEEP)
7307 if (dhd_pm_notifier_registered) {
7308 unregister_pm_notifier(&dhd_pm_notifier);
7309 dhd_pm_notifier_registered = FALSE;
7311 #endif /* CONFIG_PM_SLEEP */
7312 #ifdef DEBUG_CPU_FREQ
7314 free_percpu(dhd->new_freq);
7315 dhd->new_freq = NULL;
7316 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
7318 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
7319 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
7320 #ifdef CONFIG_HAS_WAKELOCK
7321 dhd->wakelock_counter = 0;
7322 dhd->wakelock_wd_counter = 0;
7323 dhd->wakelock_rx_timeout_enable = 0;
7324 dhd->wakelock_ctrl_timeout_enable = 0;
7325 wake_lock_destroy(&dhd->wl_wifi);
7326 wake_lock_destroy(&dhd->wl_rxwake);
7327 wake_lock_destroy(&dhd->wl_ctrlwake);
7328 wake_lock_destroy(&dhd->wl_wdwake);
7329 #ifdef BCMPCIE_OOB_HOST_WAKE
7330 wake_lock_destroy(&dhd->wl_intrwake);
7331 #endif /* BCMPCIE_OOB_HOST_WAKE */
7332 #endif /* CONFIG_HAS_WAKELOCK */
7338 #ifdef DHDTCPACK_SUPPRESS
7339 /* This will free all MEM allocated for TCPACK SUPPRESS */
7340 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7341 #endif /* DHDTCPACK_SUPPRESS */
7346 dhd_free(dhd_pub_t *dhdp)
7349 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7353 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
7354 if (dhdp->reorder_bufs[i]) {
7355 reorder_info_t *ptr;
7356 uint32 buf_size = sizeof(struct reorder_info);
7358 ptr = dhdp->reorder_bufs[i];
7360 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
7361 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
7362 i, ptr->max_idx, buf_size));
7364 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
7365 dhdp->reorder_bufs[i] = NULL;
7369 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
7371 dhd = (dhd_info_t *)dhdp->info;
7372 if (dhdp->soc_ram) {
7373 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
7374 dhdp->soc_ram = NULL;
7377 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
7379 dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
7380 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
7386 dhd_clear(dhd_pub_t *dhdp)
7388 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7392 #ifdef DHDTCPACK_SUPPRESS
7393 /* Clean up timer/data structure for any remaining/pending packet or timer. */
7394 dhd_tcpack_info_tbl_clean(dhdp);
7395 #endif /* DHDTCPACK_SUPPRESS */
7396 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
7397 if (dhdp->reorder_bufs[i]) {
7398 reorder_info_t *ptr;
7399 uint32 buf_size = sizeof(struct reorder_info);
7401 ptr = dhdp->reorder_bufs[i];
7403 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
7404 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
7405 i, ptr->max_idx, buf_size));
7407 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
7408 dhdp->reorder_bufs[i] = NULL;
7412 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
7414 if (dhdp->soc_ram) {
7415 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
7416 dhdp->soc_ram = NULL;
7422 dhd_module_cleanup(void)
7424 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7426 dhd_bus_unregister();
7430 dhd_wifi_platform_unregister_drv();
7434 dhd_module_exit(void)
7436 dhd_module_cleanup();
7437 unregister_reboot_notifier(&dhd_reboot_notifier);
7441 dhd_module_init(void)
7444 int retry = POWERUP_MAX_RETRY;
7446 DHD_ERROR(("%s in\n", __FUNCTION__));
7448 DHD_PERIM_RADIO_INIT();
7450 if (firmware_path[0] != '\0') {
7451 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
7452 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
7455 if (nvram_path[0] != '\0') {
7456 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
7457 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
7461 err = dhd_wifi_platform_register_drv();
7463 register_reboot_notifier(&dhd_reboot_notifier);
7467 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
7468 __FUNCTION__, retry));
7469 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
7470 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
7471 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
7472 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
7477 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
7483 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
7485 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
7486 if (code == SYS_RESTART) {
7493 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
7494 #if defined(CONFIG_DEFERRED_INITCALLS)
7495 deferred_module_init(dhd_module_init);
7496 #elif defined(USE_LATE_INITCALL_SYNC)
7497 late_initcall_sync(dhd_module_init);
7499 late_initcall(dhd_module_init);
7500 #endif /* USE_LATE_INITCALL_SYNC */
7502 module_init(dhd_module_init);
7503 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
7505 module_exit(dhd_module_exit);
7508 * OS specific functions required to implement DHD driver in OS independent way
7511 dhd_os_proto_block(dhd_pub_t *pub)
7513 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7516 DHD_PERIM_UNLOCK(pub);
7518 down(&dhd->proto_sem);
7520 DHD_PERIM_LOCK(pub);
7528 dhd_os_proto_unblock(dhd_pub_t *pub)
7530 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7533 up(&dhd->proto_sem);
7541 dhd_os_get_ioctl_resp_timeout(void)
7543 return ((unsigned int)dhd_ioctl_timeout_msec);
7547 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
7549 dhd_ioctl_timeout_msec = (int)timeout_msec;
7553 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
7555 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7558 /* Convert timeout in millsecond to jiffies */
7559 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
7560 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
7562 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
7565 DHD_PERIM_UNLOCK(pub);
7567 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
7569 DHD_PERIM_LOCK(pub);
7575 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
7577 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
7579 wake_up(&dhd->ioctl_resp_wait);
7584 dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition, bool *pending)
7586 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7589 /* Convert timeout in millsecond to jiffies */
7590 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
7591 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
7593 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
7596 DHD_PERIM_UNLOCK(pub);
7598 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
7600 DHD_PERIM_LOCK(pub);
7606 dhd_os_d3ack_wake(dhd_pub_t *pub)
7608 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
7610 wake_up(&dhd->d3ack_wait);
7615 dhd_os_wd_timer_extend(void *bus, bool extend)
7617 dhd_pub_t *pub = bus;
7618 dhd_info_t *dhd = (dhd_info_t *)pub->info;
7621 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
7623 dhd_os_wd_timer(bus, dhd->default_wd_interval);
7628 dhd_os_wd_timer(void *bus, uint wdtick)
7630 dhd_pub_t *pub = bus;
7631 dhd_info_t *dhd = (dhd_info_t *)pub->info;
7632 unsigned long flags;
7634 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7637 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
7641 DHD_GENERAL_LOCK(pub, flags);
7643 /* don't start the wd until fw is loaded */
7644 if (pub->busstate == DHD_BUS_DOWN) {
7645 DHD_GENERAL_UNLOCK(pub, flags);
7647 DHD_OS_WD_WAKE_UNLOCK(pub);
7651 /* Totally stop the timer */
7652 if (!wdtick && dhd->wd_timer_valid == TRUE) {
7653 dhd->wd_timer_valid = FALSE;
7654 DHD_GENERAL_UNLOCK(pub, flags);
7655 del_timer_sync(&dhd->timer);
7656 DHD_OS_WD_WAKE_UNLOCK(pub);
7661 DHD_OS_WD_WAKE_LOCK(pub);
7662 dhd_watchdog_ms = (uint)wdtick;
7663 /* Re arm the timer, at last watchdog period */
7664 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
7665 dhd->wd_timer_valid = TRUE;
7667 DHD_GENERAL_UNLOCK(pub, flags);
7671 dhd_os_open_image(char *filename)
7675 fp = filp_open(filename, O_RDONLY, 0);
7677 * 2.6.11 (FC4) supports filp_open() but later revs don't?
7679 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
7689 dhd_os_get_image_block(char *buf, int len, void *image)
7691 struct file *fp = (struct file *)image;
7697 rdlen = kernel_read(fp, fp->f_pos, buf, len);
7705 dhd_os_close_image(void *image)
7708 filp_close((struct file *)image, NULL);
7712 dhd_os_sdlock(dhd_pub_t *pub)
7716 dhd = (dhd_info_t *)(pub->info);
7718 if (dhd_dpc_prio >= 0)
7721 spin_lock_bh(&dhd->sdlock);
7725 dhd_os_sdunlock(dhd_pub_t *pub)
7729 dhd = (dhd_info_t *)(pub->info);
7731 if (dhd_dpc_prio >= 0)
7734 spin_unlock_bh(&dhd->sdlock);
7738 dhd_os_sdlock_txq(dhd_pub_t *pub)
7742 dhd = (dhd_info_t *)(pub->info);
7743 spin_lock_bh(&dhd->txqlock);
7747 dhd_os_sdunlock_txq(dhd_pub_t *pub)
7751 dhd = (dhd_info_t *)(pub->info);
7752 spin_unlock_bh(&dhd->txqlock);
7756 dhd_os_sdlock_rxq(dhd_pub_t *pub)
7761 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
7766 dhd_os_rxflock(dhd_pub_t *pub)
7770 dhd = (dhd_info_t *)(pub->info);
7771 spin_lock_bh(&dhd->rxf_lock);
7776 dhd_os_rxfunlock(dhd_pub_t *pub)
7780 dhd = (dhd_info_t *)(pub->info);
7781 spin_unlock_bh(&dhd->rxf_lock);
7784 #ifdef DHDTCPACK_SUPPRESS
7786 dhd_os_tcpacklock(dhd_pub_t *pub)
7790 dhd = (dhd_info_t *)(pub->info);
7792 if (irqs_disabled())
7793 spin_lock(&dhd->tcpack_lock);
7795 spin_lock_bh(&dhd->tcpack_lock);
7797 spin_lock_bh(&dhd->tcpack_lock);
7803 dhd_os_tcpackunlock(dhd_pub_t *pub)
7807 dhd = (dhd_info_t *)(pub->info);
7809 if (irqs_disabled())
7810 spin_unlock(&dhd->tcpack_lock);
7812 spin_unlock_bh(&dhd->tcpack_lock);
7814 spin_unlock_bh(&dhd->tcpack_lock);
7817 #endif /* DHDTCPACK_SUPPRESS */
7819 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
7822 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
7824 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
7826 DHD_ERROR(("%s: failed to alloc memory, section: %d,"
7827 " size: %dbytes", __FUNCTION__, section, size));
7828 if (kmalloc_if_fail)
7829 buf = kmalloc(size, flags);
7835 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
7839 #if defined(WL_WIRELESS_EXT)
7840 struct iw_statistics *
7841 dhd_get_wireless_stats(struct net_device *dev)
7844 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7850 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
7853 return &dhd->iw.wstats;
7857 #endif /* defined(WL_WIRELESS_EXT) */
7859 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
7861 dhd_wlanaudio_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
7862 wl_event_msg_t *event, void **data)
7865 char eabuf[ETHER_ADDR_STR_LEN];
7866 struct ether_addr *addr = &event->addr;
7867 uint32 type = ntoh32_ua((void *)&event->event_type);
7872 bcm_ether_ntoa(addr, eabuf);
7874 return (BCME_ERROR);
7876 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
7877 if (dhd->wlanaudio_blist[cnt].is_blacklist)
7880 if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr,
7881 addr, ETHER_ADDR_LEN)) {
7882 /* Mac address is Same */
7883 dhd->wlanaudio_blist[cnt].cnt++;
7885 if (dhd->wlanaudio_blist[cnt].cnt < 15) {
7886 /* black list is false */
7887 if ((dhd->wlanaudio_blist[cnt].cnt > 10) &&
7888 (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies
7890 dhd->wlanaudio_blist[cnt].is_blacklist = true;
7891 dhd->is_wlanaudio_blist = true;
7894 if ((!dhd->wlanaudio_blist[cnt].is_blacklist) &&
7895 (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies
7898 bzero(&dhd->wlanaudio_blist[cnt],
7899 sizeof(struct wlanaudio_blacklist));
7903 } else if ((!dhd->wlanaudio_blist[cnt].is_blacklist) &&
7904 (!dhd->wlanaudio_blist[cnt].cnt)) {
7906 (char*)&dhd->wlanaudio_blist[cnt].blacklist_addr,
7908 dhd->wlanaudio_blist[cnt].cnt++;
7909 dhd->wlanaudio_blist[cnt].txfail_jiffies = jiffies;
7911 bcm_ether_ntoa(&dhd->wlanaudio_blist[cnt].blacklist_addr, eabuf);
7917 case WLC_E_AUTH_IND :
7919 case WLC_E_DEAUTH_IND :
7921 case WLC_E_ASSOC_IND:
7923 case WLC_E_REASSOC_IND:
7924 case WLC_E_DISASSOC:
7925 case WLC_E_DISASSOC_IND:
7930 bcm_ether_ntoa(addr, eabuf);
7932 return (BCME_ERROR);
7934 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
7935 if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr,
7936 addr, ETHER_ADDR_LEN)) {
7937 /* Mac address is Same */
7938 if (dhd->wlanaudio_blist[cnt].is_blacklist) {
7939 /* black list is true */
7940 bzero(&dhd->wlanaudio_blist[cnt],
7941 sizeof(struct wlanaudio_blacklist));
7946 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
7947 if (dhd->wlanaudio_blist[cnt].is_blacklist)
7953 dhd->is_wlanaudio_blist = false;
7961 #endif /* CUSTOMER_HW20 && WLANAUDIO */
7963 dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
7964 wl_event_msg_t *event, void **data)
7968 ASSERT(dhd != NULL);
7970 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
7971 bcmerror = dhd_wlanaudio_event(dhd, ifidx, pktdata, event, data);
7973 if (bcmerror != BCME_OK)
7975 #endif /* CUSTOMER_HW20 && WLANAUDIO */
7977 #ifdef SHOW_LOGTRACE
7978 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
7980 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
7981 #endif /* SHOW_LOGTRACE */
7983 if (bcmerror != BCME_OK)
7986 #if defined(WL_WIRELESS_EXT)
7987 if (event->bsscfgidx == 0) {
7989 * Wireless ext is on primary interface only
7992 ASSERT(dhd->iflist[*ifidx] != NULL);
7993 ASSERT(dhd->iflist[*ifidx]->net != NULL);
7995 if (dhd->iflist[*ifidx]->net) {
7996 wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
7999 #endif /* defined(WL_WIRELESS_EXT) */
8002 ASSERT(dhd->iflist[*ifidx] != NULL);
8003 if (dhd->iflist[*ifidx] == NULL)
8005 ASSERT(dhd->iflist[*ifidx]->net != NULL);
8006 if (dhd->iflist[*ifidx]->net == NULL)
8008 if (dhd->iflist[*ifidx]->net)
8009 wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
8010 #endif /* defined(WL_CFG80211) */
8015 /* send up locally generated event */
8017 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
8019 switch (ntoh32(event->event_type)) {
8026 #ifdef LOG_INTO_TCPDUMP
8028 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
8030 struct sk_buff *p, *skb;
8037 struct ether_header eth;
8039 pktlen = sizeof(eth) + data_len;
8042 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
8043 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
8045 bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
8046 bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
8047 ETHER_TOGGLE_LOCALADDR(ð.ether_shost);
8048 eth.ether_type = hton16(ETHER_TYPE_BRCM);
8050 bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
8051 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
8052 skb = PKTTONATIVE(dhdp->osh, p);
8053 skb_data = skb->data;
8056 ifidx = dhd_ifname2idx(dhd, "wlan0");
8057 ifp = dhd->iflist[ifidx];
8059 ifp = dhd->iflist[0];
8062 skb->dev = ifp->net;
8063 skb->protocol = eth_type_trans(skb, skb->dev);
8064 skb->data = skb_data;
8067 /* Strip header, count, deliver upward */
8068 skb_pull(skb, ETH_HLEN);
8070 /* Send the packet */
8071 if (in_interrupt()) {
8078 /* Could not allocate a sk_buf */
8079 DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
8082 #endif /* LOG_INTO_TCPDUMP */
8084 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
8086 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
8087 struct dhd_info *dhdinfo = dhd->info;
8089 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8090 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
8092 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
8093 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
8095 dhd_os_sdunlock(dhd);
8096 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
8098 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
8102 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
8104 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
8105 struct dhd_info *dhdinfo = dhd->info;
8106 if (waitqueue_active(&dhdinfo->ctrl_wait))
8107 wake_up(&dhdinfo->ctrl_wait);
8112 #if defined(BCMSDIO) || defined(BCMPCIE)
8114 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
8117 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8120 /* Issue wl down command before resetting the chip */
8121 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
8122 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
8124 #ifdef PROP_TXSTATUS
8125 if (dhd->pub.wlfc_enabled)
8126 dhd_wlfc_deinit(&dhd->pub);
8127 #endif /* PROP_TXSTATUS */
8129 if (dhd->pub.pno_state)
8130 dhd_pno_deinit(&dhd->pub);
8136 dhd_update_fw_nv_path(dhd);
8137 /* update firmware and nvram path to sdio bus */
8138 dhd_bus_update_fw_nv_path(dhd->pub.bus,
8139 dhd->fw_path, dhd->nv_path);
8141 #endif /* BCMSDIO */
8143 ret = dhd_bus_devreset(&dhd->pub, flag);
8145 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
8154 dhd_net_bus_suspend(struct net_device *dev)
8156 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8157 return dhd_bus_suspend(&dhd->pub);
8161 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
8163 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8164 return dhd_bus_resume(&dhd->pub, stage);
8167 #endif /* BCMSDIO */
8168 #endif /* BCMSDIO || BCMPCIE */
8170 int net_os_set_suspend_disable(struct net_device *dev, int val)
8172 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8176 ret = dhd->pub.suspend_disable_flag;
8177 dhd->pub.suspend_disable_flag = val;
8182 int net_os_set_suspend(struct net_device *dev, int val, int force)
8185 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8188 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
8189 ret = dhd_set_suspend(val, &dhd->pub);
8191 ret = dhd_suspend_resume_helper(dhd, val, force);
8194 wl_cfg80211_update_power_mode(dev);
8200 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
8202 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8205 dhd->pub.suspend_bcn_li_dtim = val;
8210 #ifdef PKT_FILTER_SUPPORT
8211 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
8213 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8214 char *filterp = NULL;
8218 if (!dhd || (num == DHD_UNICAST_FILTER_NUM) ||
8219 (num == DHD_MDNS_FILTER_NUM))
8221 if (num >= dhd->pub.pktfilter_count)
8224 case DHD_BROADCAST_FILTER_NUM:
8225 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
8228 case DHD_MULTICAST4_FILTER_NUM:
8229 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
8232 case DHD_MULTICAST6_FILTER_NUM:
8233 filterp = "103 0 0 0 0xFFFF 0x3333";
8242 dhd->pub.pktfilter[num] = filterp;
8243 dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
8244 } else { /* Delete filter */
8245 if (dhd->pub.pktfilter[num] != NULL) {
8246 dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
8247 dhd->pub.pktfilter[num] = NULL;
8253 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
8258 /* Packet filtering is set only if we still in early-suspend and
8259 * we need either to turn it ON or turn it OFF
8260 * We can always turn it OFF in case of early-suspend, but we turn it
8261 * back ON only if suspend_disable_flag was not set
8263 if (dhdp && dhdp->up) {
8264 if (dhdp->in_suspend) {
8265 if (!val || (val && !dhdp->suspend_disable_flag))
8266 dhd_enable_packet_filter(val, dhdp);
8272 /* function to enable/disable packet for Network device */
8273 int net_os_enable_packet_filter(struct net_device *dev, int val)
8275 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8277 return dhd_os_enable_packet_filter(&dhd->pub, val);
8279 #endif /* PKT_FILTER_SUPPORT */
8282 dhd_dev_init_ioctl(struct net_device *dev)
8284 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8287 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
8295 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
8297 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
8299 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8301 return (dhd_pno_stop_for_ssid(&dhd->pub));
8303 /* Linux wrapper to call common dhd_pno_set_for_ssid */
8305 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
8306 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
8308 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8310 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
8311 pno_repeat, pno_freq_expo_max, channel_list, nchan));
8314 /* Linux wrapper to call common dhd_pno_enable */
8316 dhd_dev_pno_enable(struct net_device *dev, int enable)
8318 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8320 return (dhd_pno_enable(&dhd->pub, enable));
8323 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
8325 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
8326 struct dhd_pno_hotlist_params *hotlist_params)
8328 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8329 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
8331 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
8333 dhd_dev_pno_stop_for_batch(struct net_device *dev)
8335 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8336 return (dhd_pno_stop_for_batch(&dhd->pub));
8338 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
8340 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
8342 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8343 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
8345 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
8347 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
8349 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8350 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
8352 #endif /* PNO_SUPPORT */
8354 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8355 static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
8358 struct net_device *dev;
8360 dhd = (dhd_info_t *)((dhd_pub_t *)dhd_info)->info;
8361 dev = dhd->iflist[0]->net;
8367 #if defined(WL_WIRELESS_EXT)
8368 wl_iw_send_priv_event(dev, "HANG");
8370 #if defined(WL_CFG80211)
8371 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
8377 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
8381 if (!dhdp->hang_was_sent) {
8382 dhdp->hang_was_sent = 1;
8383 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
8384 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
8390 int net_os_send_hang_message(struct net_device *dev)
8392 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8396 /* Report FW problem when enabled */
8397 if (dhd->pub.hang_report) {
8398 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8399 ret = dhd_os_send_hang_message(&dhd->pub);
8401 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
8404 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
8406 /* Enforce bus down to stop any future traffic */
8407 dhd->pub.busstate = DHD_BUS_DOWN;
8412 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
8415 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
8417 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8418 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
8421 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
8422 wl_country_t *cspec)
8424 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8425 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
8428 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
8430 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8431 if (dhd && dhd->pub.up) {
8432 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
8434 wl_update_wiphybands(NULL, notify);
8439 void dhd_bus_band_set(struct net_device *dev, uint band)
8441 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8442 if (dhd && dhd->pub.up) {
8444 wl_update_wiphybands(NULL, true);
8449 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
8451 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8453 if (!fw || fw[0] == '\0')
8456 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
8457 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
8460 if (strstr(fw, "apsta") != NULL) {
8461 DHD_INFO(("GOT APSTA FIRMWARE\n"));
8462 ap_fw_loaded = TRUE;
8464 DHD_INFO(("GOT STA FIRMWARE\n"));
8465 ap_fw_loaded = FALSE;
8471 void dhd_net_if_lock(struct net_device *dev)
8473 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8474 dhd_net_if_lock_local(dhd);
8477 void dhd_net_if_unlock(struct net_device *dev)
8479 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8480 dhd_net_if_unlock_local(dhd);
8483 static void dhd_net_if_lock_local(dhd_info_t *dhd)
8485 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8487 mutex_lock(&dhd->dhd_net_if_mutex);
8491 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
8493 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8495 mutex_unlock(&dhd->dhd_net_if_mutex);
8499 static void dhd_suspend_lock(dhd_pub_t *pub)
8501 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8502 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8504 mutex_lock(&dhd->dhd_suspend_mutex);
8508 static void dhd_suspend_unlock(dhd_pub_t *pub)
8510 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8511 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8513 mutex_unlock(&dhd->dhd_suspend_mutex);
8517 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
8519 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8520 unsigned long flags = 0;
8523 spin_lock_irqsave(&dhd->dhd_lock, flags);
8528 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
8530 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8533 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
8536 /* Linux specific multipurpose spinlock API */
8538 dhd_os_spin_lock_init(osl_t *osh)
8540 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
8541 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
8542 /* and this results in kernel asserts in internal builds */
8543 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
8545 spin_lock_init(lock);
8546 return ((void *)lock);
8549 dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
8551 MFREE(osh, lock, sizeof(spinlock_t) + 4);
8554 dhd_os_spin_lock(void *lock)
8556 unsigned long flags = 0;
8559 spin_lock_irqsave((spinlock_t *)lock, flags);
8564 dhd_os_spin_unlock(void *lock, unsigned long flags)
8567 spin_unlock_irqrestore((spinlock_t *)lock, flags);
8571 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
8573 return (atomic_read(&dhd->pend_8021x_cnt));
8576 #define MAX_WAIT_FOR_8021X_TX 100
8579 dhd_wait_pend8021x(struct net_device *dev)
8581 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8582 int timeout = msecs_to_jiffies(10);
8583 int ntimes = MAX_WAIT_FOR_8021X_TX;
8584 int pend = dhd_get_pend_8021x_cnt(dhd);
8586 while (ntimes && pend) {
8588 set_current_state(TASK_INTERRUPTIBLE);
8589 DHD_PERIM_UNLOCK(&dhd->pub);
8590 schedule_timeout(timeout);
8591 DHD_PERIM_LOCK(&dhd->pub);
8592 set_current_state(TASK_RUNNING);
8595 pend = dhd_get_pend_8021x_cnt(dhd);
8599 atomic_set(&dhd->pend_8021x_cnt, 0);
8600 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
8607 write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
8611 mm_segment_t old_fs;
8614 /* change to KERNEL_DS address limit */
8618 /* open file to write */
8619 #if defined(CUSTOMER_HW5)
8620 fp = filp_open("/data/mem_dump", O_WRONLY|O_CREAT, 0640);
8622 fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
8627 printf("%s: open file error\n", __FUNCTION__);
8632 /* Write buf to file */
8633 fp->f_op->write(fp, buf, size, &pos);
8634 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
8635 fp->f_op->fsync(fp, 0, size-1, 1);
8637 fp->f_op->fsync(fp, 1);
8638 #endif /* KERNEL_VERSION(3, 1, 0) */
8641 /* free buf before return */
8643 MFREE(dhd->osh, buf, size);
8645 /* close file before return */
8647 filp_close(fp, current->files);
8648 /* restore previous address limit */
8653 #endif /* DHD_DEBUG */
8655 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
8657 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8658 unsigned long flags;
8662 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8663 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
8664 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
8665 #ifdef CONFIG_HAS_WAKELOCK
8666 if (dhd->wakelock_rx_timeout_enable)
8667 wake_lock_timeout(&dhd->wl_rxwake,
8668 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
8669 if (dhd->wakelock_ctrl_timeout_enable)
8670 wake_lock_timeout(&dhd->wl_ctrlwake,
8671 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
8673 dhd->wakelock_rx_timeout_enable = 0;
8674 dhd->wakelock_ctrl_timeout_enable = 0;
8675 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8680 int net_os_wake_lock_timeout(struct net_device *dev)
8682 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8686 ret = dhd_os_wake_lock_timeout(&dhd->pub);
8690 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
8692 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8693 unsigned long flags;
8696 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8697 if (val > dhd->wakelock_rx_timeout_enable)
8698 dhd->wakelock_rx_timeout_enable = val;
8699 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8704 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
8706 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8707 unsigned long flags;
8710 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8711 if (val > dhd->wakelock_ctrl_timeout_enable)
8712 dhd->wakelock_ctrl_timeout_enable = val;
8713 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8718 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
8720 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8721 unsigned long flags;
8724 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8725 dhd->wakelock_ctrl_timeout_enable = 0;
8726 #ifdef CONFIG_HAS_WAKELOCK
8727 if (wake_lock_active(&dhd->wl_ctrlwake))
8728 wake_unlock(&dhd->wl_ctrlwake);
8730 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8735 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
8737 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8741 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
8745 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
8747 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8751 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
8755 int dhd_os_wake_lock(dhd_pub_t *pub)
8757 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8758 unsigned long flags;
8762 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8764 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
8765 #ifdef CONFIG_HAS_WAKELOCK
8766 wake_lock(&dhd->wl_wifi);
8767 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
8768 dhd_bus_dev_pm_stay_awake(pub);
8771 dhd->wakelock_counter++;
8772 ret = dhd->wakelock_counter;
8773 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8778 int net_os_wake_lock(struct net_device *dev)
8780 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8784 ret = dhd_os_wake_lock(&dhd->pub);
8788 int dhd_os_wake_unlock(dhd_pub_t *pub)
8790 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8791 unsigned long flags;
8794 dhd_os_wake_lock_timeout(pub);
8796 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8797 if (dhd->wakelock_counter > 0) {
8798 dhd->wakelock_counter--;
8799 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
8800 #ifdef CONFIG_HAS_WAKELOCK
8801 wake_unlock(&dhd->wl_wifi);
8802 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
8803 dhd_bus_dev_pm_relax(pub);
8806 ret = dhd->wakelock_counter;
8808 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8813 int dhd_os_check_wakelock(dhd_pub_t *pub)
8815 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
8816 KERNEL_VERSION(2, 6, 36)))
8821 dhd = (dhd_info_t *)(pub->info);
8822 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
8824 #ifdef CONFIG_HAS_WAKELOCK
8825 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
8826 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
8827 (wake_lock_active(&dhd->wl_wdwake))))
8829 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
8830 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
8836 int dhd_os_check_wakelock_all(dhd_pub_t *pub)
8838 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
8839 KERNEL_VERSION(2, 6, 36)))
8844 dhd = (dhd_info_t *)(pub->info);
8845 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
8847 #ifdef CONFIG_HAS_WAKELOCK
8848 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
8849 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
8850 wake_lock_active(&dhd->wl_wdwake) ||
8851 wake_lock_active(&dhd->wl_rxwake) ||
8852 wake_lock_active(&dhd->wl_ctrlwake))) {
8855 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
8856 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
8862 int net_os_wake_unlock(struct net_device *dev)
8864 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8868 ret = dhd_os_wake_unlock(&dhd->pub);
8872 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
8874 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8875 unsigned long flags;
8879 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8880 #ifdef CONFIG_HAS_WAKELOCK
8881 /* if wakelock_wd_counter was never used : lock it at once */
8882 if (!dhd->wakelock_wd_counter)
8883 wake_lock(&dhd->wl_wdwake);
8885 dhd->wakelock_wd_counter++;
8886 ret = dhd->wakelock_wd_counter;
8887 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8892 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
8894 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8895 unsigned long flags;
8899 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8900 if (dhd->wakelock_wd_counter) {
8901 dhd->wakelock_wd_counter = 0;
8902 #ifdef CONFIG_HAS_WAKELOCK
8903 wake_unlock(&dhd->wl_wdwake);
8906 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8911 #ifdef BCMPCIE_OOB_HOST_WAKE
8912 int dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
8914 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8918 #ifdef CONFIG_HAS_WAKELOCK
8919 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
8925 int dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
8927 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8931 #ifdef CONFIG_HAS_WAKELOCK
8932 /* if wl_intrwake is active, unlock it */
8933 if (wake_lock_active(&dhd->wl_intrwake)) {
8934 wake_unlock(&dhd->wl_intrwake);
8940 #endif /* BCMPCIE_OOB_HOST_WAKE */
8942 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
8943 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
8945 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
8947 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8948 unsigned long flags;
8952 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8953 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
8954 if (dhd->waive_wakelock == FALSE) {
8955 /* record current lock status */
8956 dhd->wakelock_before_waive = dhd->wakelock_counter;
8957 dhd->waive_wakelock = TRUE;
8959 ret = dhd->wakelock_wd_counter;
8960 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8965 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
8967 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8968 unsigned long flags;
8974 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8975 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
8976 if (!dhd->waive_wakelock)
8979 dhd->waive_wakelock = FALSE;
8980 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
8981 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
8982 * the lock in between, do the same by calling wake_unlock or pm_relax
8984 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
8985 #ifdef CONFIG_HAS_WAKELOCK
8986 wake_lock(&dhd->wl_wifi);
8987 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
8988 dhd_bus_dev_pm_stay_awake(&dhd->pub);
8990 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
8991 #ifdef CONFIG_HAS_WAKELOCK
8992 wake_unlock(&dhd->wl_wifi);
8993 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
8994 dhd_bus_dev_pm_relax(&dhd->pub);
8997 dhd->wakelock_before_waive = 0;
8999 ret = dhd->wakelock_wd_counter;
9000 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9004 bool dhd_os_check_if_up(dhd_pub_t *pub)
9011 #if defined(BCMSDIO)
9012 /* function to collect firmware, chip id and chip version info */
9013 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
9017 i = snprintf(info_string, sizeof(info_string),
9018 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
9023 i = snprintf(&info_string[i], sizeof(info_string) - i,
9024 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
9025 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
9027 #endif /* defined(BCMSDIO) */
9028 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
9032 dhd_info_t *dhd = NULL;
9034 if (!net || !DEV_PRIV(net)) {
9035 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
9039 dhd = DHD_DEV_INFO(net);
9043 ifidx = dhd_net2idx(dhd, net);
9044 if (ifidx == DHD_BAD_IF) {
9045 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
9049 DHD_OS_WAKE_LOCK(&dhd->pub);
9050 DHD_PERIM_LOCK(&dhd->pub);
9052 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
9053 dhd_check_hang(net, &dhd->pub, ret);
9055 DHD_PERIM_UNLOCK(&dhd->pub);
9056 DHD_OS_WAKE_UNLOCK(&dhd->pub);
9061 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
9063 struct net_device *net;
9065 net = dhd_idx2net(dhdp, ifidx);
9067 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
9071 return dhd_check_hang(net, dhdp, ret);
9074 /* Return instance */
9075 int dhd_get_instance(dhd_pub_t *dhdp)
9077 return dhdp->info->unit;
9081 #ifdef PROP_TXSTATUS
9083 void dhd_wlfc_plat_init(void *dhd)
9088 void dhd_wlfc_plat_deinit(void *dhd)
9093 bool dhd_wlfc_skip_fc(void)
9097 #endif /* PROP_TXSTATUS */
9101 #include <linux/debugfs.h>
9103 extern uint32 dhd_readregl(void *bp, uint32 addr);
9104 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
9106 typedef struct dhd_dbgfs {
9107 struct dentry *debugfs_dir;
9108 struct dentry *debugfs_mem;
9113 dhd_dbgfs_t g_dbgfs;
9116 dhd_dbg_state_open(struct inode *inode, struct file *file)
9118 file->private_data = inode->i_private;
9123 dhd_dbg_state_read(struct file *file, char __user *ubuf,
9124 size_t count, loff_t *ppos)
9133 if (pos >= g_dbgfs.size || !count)
9135 if (count > g_dbgfs.size - pos)
9136 count = g_dbgfs.size - pos;
9138 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
9139 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
9141 ret = copy_to_user(ubuf, &tmp, 4);
9146 *ppos = pos + count;
9154 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
9162 if (pos >= g_dbgfs.size || !count)
9164 if (count > g_dbgfs.size - pos)
9165 count = g_dbgfs.size - pos;
9167 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
9171 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
9172 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
9179 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
9188 pos = file->f_pos + off;
9191 pos = g_dbgfs.size - off;
9193 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
9196 static const struct file_operations dhd_dbg_state_ops = {
9197 .read = dhd_dbg_state_read,
9198 .write = dhd_debugfs_write,
9199 .open = dhd_dbg_state_open,
9200 .llseek = dhd_debugfs_lseek
9203 static void dhd_dbg_create(void)
9205 if (g_dbgfs.debugfs_dir) {
9206 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
9207 NULL, &dhd_dbg_state_ops);
9211 void dhd_dbg_init(dhd_pub_t *dhdp)
9215 g_dbgfs.dhdp = dhdp;
9216 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
9218 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
9219 if (IS_ERR(g_dbgfs.debugfs_dir)) {
9220 err = PTR_ERR(g_dbgfs.debugfs_dir);
9221 g_dbgfs.debugfs_dir = NULL;
9230 void dhd_dbg_remove(void)
9232 debugfs_remove(g_dbgfs.debugfs_mem);
9233 debugfs_remove(g_dbgfs.debugfs_dir);
9235 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
9238 #endif /* ifdef BCMDBGFS */
9243 void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
9245 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
9246 struct sk_buff *skb;
9248 uint16 dport = 0, oldmagic = 0xACAC;
9252 /* timestamp packet */
9254 p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
9256 if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
9257 /* memcpy(&proto, p1+26, 4); */
9258 memcpy(&dport, p1+40, 2);
9259 /* proto = ((ntoh32(proto))>> 16) & 0xFF; */
9260 dport = ntoh16(dport);
9263 /* timestamp only if icmp or udb iperf with port 5555 */
9264 /* if (proto == 17 && dport == tsport) { */
9265 if (dport >= tsport && dport <= tsport + 20) {
9267 skb = (struct sk_buff *) pktbuf;
9269 htsf = dhd_get_htsf(dhd, 0);
9270 memset(skb->data + 44, 0, 2); /* clear checksum */
9271 memcpy(skb->data+82, &oldmagic, 2);
9272 memcpy(skb->data+84, &htsf, 4);
9274 memset(&ts, 0, sizeof(htsfts_t));
9275 ts.magic = HTSFMAGIC;
9276 ts.prio = PKTPRIO(pktbuf);
9277 ts.seqnum = htsf_seqnum++;
9278 ts.c10 = get_cycles();
9280 ts.endmagic = HTSFENDMAGIC;
9282 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
9286 static void dhd_dump_htsfhisto(histo_t *his, char *s)
9288 int pktcnt = 0, curval = 0, i;
9289 for (i = 0; i < (NUMBIN-2); i++) {
9291 printf("%d ", his->bin[i]);
9292 pktcnt += his->bin[i];
9294 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
9295 his->bin[NUMBIN-1], s);
9299 void sorttobin(int value, histo_t *histo)
9304 histo->bin[NUMBIN-1]++;
9307 if (value > histo->bin[NUMBIN-2]) /* store the max value */
9308 histo->bin[NUMBIN-2] = value;
9310 for (i = 0; i < (NUMBIN-2); i++) {
9311 binval += 500; /* 500m s bins */
9312 if (value <= binval) {
9317 histo->bin[NUMBIN-3]++;
9321 void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
9323 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
9324 struct sk_buff *skb;
9327 int d1, d2, d3, end2end;
9331 skb = PKTTONATIVE(dhdp->osh, pktbuf);
9332 p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
9334 if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
9335 memcpy(&old_magic, p1+78, 2);
9336 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
9341 if (htsf_ts->magic == HTSFMAGIC) {
9342 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
9343 htsf_ts->cE0 = get_cycles();
9346 if (old_magic == 0xACAC) {
9349 htsf = dhd_get_htsf(dhd, 0);
9350 memcpy(skb->data+92, &htsf, sizeof(uint32));
9352 memcpy(&ts[tsidx].t1, skb->data+80, 16);
9354 d1 = ts[tsidx].t2 - ts[tsidx].t1;
9355 d2 = ts[tsidx].t3 - ts[tsidx].t2;
9356 d3 = ts[tsidx].t4 - ts[tsidx].t3;
9357 end2end = ts[tsidx].t4 - ts[tsidx].t1;
9359 sorttobin(d1, &vi_d1);
9360 sorttobin(d2, &vi_d2);
9361 sorttobin(d3, &vi_d3);
9362 sorttobin(end2end, &vi_d4);
9364 if (end2end > 0 && end2end > maxdelay) {
9366 maxdelaypktno = tspktcnt;
9367 memcpy(&maxdelayts, &ts[tsidx], 16);
9369 if (++tsidx >= TSMAX)
9374 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
9376 uint32 htsf = 0, cur_cycle, delta, delta_us;
9377 uint32 factor, baseval, baseval2;
9383 if (cur_cycle > dhd->htsf.last_cycle)
9384 delta = cur_cycle - dhd->htsf.last_cycle;
9386 delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
9391 if (dhd->htsf.coef) {
9392 /* times ten to get the first digit */
9393 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
9394 baseval = (delta*10)/factor;
9395 baseval2 = (delta*10)/(factor+1);
9396 delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
9397 htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
9400 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
9406 static void dhd_dump_latency(void)
9409 int d1, d2, d3, d4, d5;
9411 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
9412 for (i = 0; i < TSMAX; i++) {
9413 d1 = ts[i].t2 - ts[i].t1;
9414 d2 = ts[i].t3 - ts[i].t2;
9415 d3 = ts[i].t4 - ts[i].t3;
9416 d4 = ts[i].t4 - ts[i].t1;
9417 d5 = ts[max].t4-ts[max].t1;
9418 if (d4 > d5 && d4 > 0) {
9421 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
9422 ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
9426 printf("current idx = %d \n", tsidx);
9428 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
9429 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
9430 maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
9431 maxdelayts.t2 - maxdelayts.t1,
9432 maxdelayts.t3 - maxdelayts.t2,
9433 maxdelayts.t4 - maxdelayts.t3,
9434 maxdelayts.t4 - maxdelayts.t1);
9439 dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
9451 memset(&ioc, 0, sizeof(ioc));
9452 memset(&tsf_buf, 0, sizeof(tsf_buf));
9454 ioc.cmd = WLC_GET_VAR;
9456 ioc.len = (uint)sizeof(buf);
9459 strncpy(buf, "tsf", sizeof(buf) - 1);
9460 buf[sizeof(buf) - 1] = '\0';
9461 s1 = dhd_get_htsf(dhd, 0);
9462 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
9464 DHD_ERROR(("%s: tsf is not supported by device\n",
9465 dhd_ifname(&dhd->pub, ifidx)));
9470 s2 = dhd_get_htsf(dhd, 0);
9472 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
9473 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
9474 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
9475 dhd->htsf.coefdec2, s2-tsf_buf.low);
9476 printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
9480 void htsf_update(dhd_info_t *dhd, void *data)
9482 static ulong cur_cycle = 0, prev_cycle = 0;
9483 uint32 htsf, tsf_delta = 0;
9484 uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
9488 /* cycles_t in inlcude/mips/timex.h */
9492 prev_cycle = cur_cycle;
9495 if (cur_cycle > prev_cycle)
9496 cyc_delta = cur_cycle - prev_cycle;
9500 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
9504 printf(" tsf update ata point er is null \n");
9506 memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
9507 memcpy(&cur_tsf, data, sizeof(tsf_t));
9509 if (cur_tsf.low == 0) {
9510 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
9514 if (cur_tsf.low > prev_tsf.low)
9515 tsf_delta = (cur_tsf.low - prev_tsf.low);
9517 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
9518 cur_tsf.low, prev_tsf.low));
9519 if (cur_tsf.high > prev_tsf.high) {
9520 tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
9521 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
9524 return; /* do not update */
9528 hfactor = cyc_delta / tsf_delta;
9529 tmp = (cyc_delta - (hfactor * tsf_delta))*10;
9530 dec1 = tmp/tsf_delta;
9531 dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
9532 tmp = (tmp - (dec1*tsf_delta))*10;
9533 dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
9552 htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
9553 dhd->htsf.coef = hfactor;
9554 dhd->htsf.last_cycle = cur_cycle;
9555 dhd->htsf.last_tsf = cur_tsf.low;
9556 dhd->htsf.coefdec1 = dec1;
9557 dhd->htsf.coefdec2 = dec2;
9560 htsf = prev_tsf.low;
9564 #endif /* WLMEDIA_HTSF */
9566 #ifdef CUSTOM_SET_CPUCORE
9567 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
9569 int e_dpc = 0, e_rxf = 0, retry_set = 0;
9571 if (!(dhd->chan_isvht80)) {
9572 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
9579 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
9580 cpumask_of(DPC_CPUCORE));
9582 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
9583 cpumask_of(PRIMARY_CPUCORE));
9585 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
9586 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
9591 } while (e_dpc < 0);
9596 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
9597 cpumask_of(RXF_CPUCORE));
9599 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
9600 cpumask_of(PRIMARY_CPUCORE));
9602 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
9603 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
9608 } while (e_rxf < 0);
9610 #ifdef DHD_OF_SUPPORT
9611 interrupt_set_cpucore(set);
9612 #endif /* DHD_OF_SUPPORT */
9613 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
9617 #endif /* CUSTOM_SET_CPUCORE */
9619 /* Get interface specific ap_isolate configuration */
9620 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
9622 dhd_info_t *dhd = dhdp->info;
9625 ASSERT(idx < DHD_MAX_IFS);
9627 ifp = dhd->iflist[idx];
9629 return ifp->ap_isolate;
9632 /* Set interface specific ap_isolate configuration */
9633 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
9635 dhd_info_t *dhd = dhdp->info;
9638 ASSERT(idx < DHD_MAX_IFS);
9640 ifp = dhd->iflist[idx];
9642 ifp->ap_isolate = val;
9647 #if defined(DHD_DEBUG)
9648 void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
9650 dhd_dump_t *dump = NULL;
9651 dump = MALLOC(dhdp->osh, sizeof(dhd_dump_t));
9653 dump->bufsize = size;
9654 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
9655 DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WORK_PRIORITY_HIGH);
9659 dhd_mem_dump(void *handle, void *event_info, u8 event)
9661 dhd_info_t *dhd = handle;
9662 dhd_dump_t *dump = event_info;
9667 if (write_to_file(&dhd->pub, dump->buf, dump->bufsize)) {
9668 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
9670 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
9672 #endif /* DHD_DEBUG */
9675 /* Returns interface specific WMF configuration */
9676 dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
9678 dhd_info_t *dhd = dhdp->info;
9681 ASSERT(idx < DHD_MAX_IFS);
9683 ifp = dhd->iflist[idx];
9686 #endif /* DHD_WMF */
9689 #ifdef DHD_UNICAST_DHCP
9691 dhd_get_pkt_ether_type(dhd_pub_t *pub, void *pktbuf,
9692 uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr)
9694 uint8 *frame = PKTDATA(pub->osh, pktbuf);
9695 int length = PKTLEN(pub->osh, pktbuf);
9696 uint8 *pt; /* Pointer to type field */
9699 /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
9700 if (length < ETHER_HDR_LEN) {
9701 DHD_ERROR(("dhd: %s: short eth frame (%d)\n",
9702 __FUNCTION__, length));
9704 } else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) {
9705 /* Frame is Ethernet II */
9706 pt = frame + ETHER_TYPE_OFFSET;
9707 } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
9708 !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
9709 pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
9712 DHD_INFO(("DHD: %s: non-SNAP 802.3 frame\n",
9717 ethertype = ntoh16_ua(pt);
9719 /* Skip VLAN tag, if any */
9720 if (ethertype == ETHER_TYPE_8021Q) {
9723 if ((pt + ETHER_TYPE_LEN) > (frame + length)) {
9724 DHD_ERROR(("dhd: %s: short VLAN frame (%d)\n",
9725 __FUNCTION__, length));
9729 ethertype = ntoh16_ua(pt);
9732 *data_ptr = pt + ETHER_TYPE_LEN;
9733 *len_ptr = length - (pt + ETHER_TYPE_LEN - frame);
9734 *et_ptr = ethertype;
9740 dhd_get_pkt_ip_type(dhd_pub_t *pub, void *pktbuf,
9741 uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr)
9743 struct ipv4_hdr *iph; /* IP frame pointer */
9744 int iplen; /* IP frame length */
9745 uint16 ethertype, iphdrlen, ippktlen;
9750 if (dhd_get_pkt_ether_type(pub, pktbuf, (uint8 **)&iph,
9751 &iplen, ðertype, &snap) != 0)
9754 if (ethertype != ETHER_TYPE_IP) {
9758 /* We support IPv4 only */
9759 if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) {
9763 /* Header length sanity */
9764 iphdrlen = IPV4_HLEN(iph);
9767 * Packet length sanity; sometimes we receive eth-frame size bigger
9768 * than the IP content, which results in a bad tcp chksum
9770 ippktlen = ntoh16(iph->tot_len);
9771 if (ippktlen < iplen) {
9773 DHD_INFO(("%s: extra frame length ignored\n",
9776 } else if (ippktlen > iplen) {
9777 DHD_ERROR(("dhd: %s: truncated IP packet (%d)\n",
9778 __FUNCTION__, ippktlen - iplen));
9782 if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) {
9783 DHD_ERROR(("DHD: %s: IP-header-len (%d) out of range (%d-%d)\n",
9784 __FUNCTION__, iphdrlen, IPV4_OPTIONS_OFFSET, iplen));
9789 * We don't handle fragmented IP packets. A first frag is indicated by the MF
9790 * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
9792 iph_frag = ntoh16(iph->frag);
9794 if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) {
9795 DHD_INFO(("DHD:%s: IP fragment not handled\n",
9800 prot = IPV4_PROT(iph);
9802 *data_ptr = (((uint8 *)iph) + iphdrlen);
9803 *len_ptr = iplen - iphdrlen;
9808 /** check the packet type, if it is DHCP ACK/REPLY, convert into unicast packet */
9810 int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx)
9813 uint8 *eh = PKTDATA(pub->osh, pktbuf);
9822 if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET))
9824 if (dhd_get_pkt_ip_type(pub, pktbuf, &udph, &udpl, &prot) != 0)
9826 if (prot != IP_PROT_UDP)
9828 /* check frame length, at least UDP_HDR_LEN */
9829 if (udpl < UDP_HDR_LEN) {
9830 DHD_ERROR(("DHD: %s: short UDP frame, ignored\n",
9834 port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET);
9835 /* only process DHCP packets from server to client */
9836 if (port != DHCP_PORT_CLIENT)
9839 dhcp = udph + UDP_HDR_LEN;
9840 dhcpl = udpl - UDP_HDR_LEN;
9842 if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) {
9843 DHD_ERROR(("DHD: %s: short DHCP frame, ignored\n",
9847 /* only process DHCP reply(offer/ack) packets */
9848 if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
9850 chaddr = dhcp + DHCP_CHADDR_OFFSET;
9851 stainfo = dhd_find_sta(pub, ifidx, chaddr);
9853 bcopy(chaddr, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
9858 #endif /* DHD_UNICAST_DHD */
9859 #ifdef DHD_L2_FILTER
9860 /* Check if packet type is ICMP ECHO */
9862 int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx)
9864 struct bcmicmp_hdr *icmph;
9868 if (dhd_get_pkt_ip_type(pub, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0)
9870 if (prot == IP_PROT_ICMP) {
9871 if (icmph->type == ICMP_TYPE_ECHO_REQUEST)
9876 #endif /* DHD_L2_FILTER */
9879 dhd_set_slpauto_mode(struct net_device *dev, s32 val)
9882 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
9888 DHD_ERROR(("Setting dhd auto sleep(dhd_slpauto) to %s\n",
9889 val ? "enable" : "disable"));
9890 dhd_os_sdlock(&dhd->pub);
9892 ret = dhd_slpauto_config(&dhd->pub, val);
9893 dhd_os_sdunlock(&dhd->pub);
9897 return BCME_UNSUPPORTED;
9901 #if defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL)
9902 int dhd_rps_cpus_enable(struct net_device *net, int enable)
9904 dhd_info_t *dhd = DHD_DEV_INFO(net);
9907 char * RPS_CPU_SETBUF;
9909 ifidx = dhd_net2idx(dhd, net);
9910 if (ifidx == DHD_BAD_IF) {
9911 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
9915 if (ifidx == PRIMARY_INF) {
9916 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
9917 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
9918 RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
9920 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
9921 RPS_CPU_SETBUF = RPS_CPUS_MASK;
9923 } else if (ifidx == VIRTUAL_INF) {
9924 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
9925 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
9927 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
9931 ifp = dhd->iflist[ifidx];
9934 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
9935 custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
9937 custom_rps_map_clear(ifp->net->_rx);
9940 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
9946 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
9948 struct rps_map *old_map, *map;
9951 static DEFINE_SPINLOCK(rps_map_lock);
9953 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
9955 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
9956 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
9960 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
9962 free_cpumask_var(mask);
9963 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
9967 map = kzalloc(max_t(unsigned int,
9968 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
9971 free_cpumask_var(mask);
9972 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
9977 for_each_cpu(cpu, mask)
9978 map->cpus[i++] = cpu;
9985 free_cpumask_var(mask);
9986 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
9990 spin_lock(&rps_map_lock);
9991 old_map = rcu_dereference_protected(queue->rps_map,
9992 lockdep_is_held(&rps_map_lock));
9993 rcu_assign_pointer(queue->rps_map, map);
9994 spin_unlock(&rps_map_lock);
9997 static_key_slow_inc(&rps_needed);
9999 kfree_rcu(old_map, rcu);
10000 static_key_slow_dec(&rps_needed);
10002 free_cpumask_var(mask);
10004 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
10008 void custom_rps_map_clear(struct netdev_rx_queue *queue)
10010 struct rps_map *map;
10012 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
10014 map = rcu_dereference_protected(queue->rps_map, 1);
10016 RCU_INIT_POINTER(queue->rps_map, NULL);
10017 kfree_rcu(map, rcu);
10018 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
10021 #endif /* SET_RPS_CPUS || ARGOS_RPS_CPU_CTL */
10024 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
10026 SDA_setSharedMemory4Send(unsigned int buffer_id,
10027 unsigned char *buffer, unsigned int buffer_size,
10028 unsigned int packet_size, unsigned int headroom_size)
10030 dhd_info_t *dhd = dhd_global;
10032 sda_packet_length = packet_size;
10040 SDA_registerCallback4SendDone(SDA_SendDoneCallBack packet_cb)
10042 dhd_info_t *dhd = dhd_global;
10051 SDA_getTsf(unsigned char vif_id)
10053 dhd_info_t *dhd = dhd_global;
10055 char buf[WLC_IOCTL_SMLEN];
10063 memset(buf, 0, sizeof(buf));
10065 if (vif_id == 0) /* wlan0 tsf */
10066 ifidx = dhd_ifname2idx(dhd, "wlan0");
10067 else if (vif_id == 1) /* p2p0 tsf */
10068 ifidx = dhd_ifname2idx(dhd, "p2p0");
10070 bcm_mkiovar("tsf_bss", 0, 0, buf, sizeof(buf));
10072 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifidx) < 0) {
10073 DHD_ERROR(("%s wl ioctl error\n", __FUNCTION__));
10077 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
10078 tsf_val = (uint64)tsf_buf.high;
10079 DHD_TRACE(("%s tsf high 0x%08x, low 0x%08x\n",
10080 __FUNCTION__, tsf_buf.high, tsf_buf.low));
10082 return ((tsf_val << 32) | tsf_buf.low);
10084 EXPORT_SYMBOL(SDA_getTsf);
10089 dhd_info_t *dhd = dhd_global;
10091 char iovbuf[WLC_IOCTL_SMLEN];
10093 bcm_mkiovar("wa_tsf_sync", (char *)&tsf_sync, 4, iovbuf, sizeof(iovbuf));
10094 dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
10096 DHD_TRACE(("%s\n", __FUNCTION__));
10100 extern struct net_device *wl0dot1_dev;
10103 BCMFASTPATH SDA_function4Send(uint buffer_id, void *packet, uint packet_size)
10105 struct sk_buff *skb;
10106 sda_packet_t *shm_packet = packet;
10107 dhd_info_t *dhd = dhd_global;
10110 static unsigned int cnt_t = 1;
10116 if (dhd->is_wlanaudio_blist) {
10117 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
10118 if (dhd->wlanaudio_blist[cnt].is_blacklist == true) {
10119 if (!bcmp(dhd->wlanaudio_blist[cnt].blacklist_addr.octet,
10120 shm_packet->headroom.ether_dhost, ETHER_ADDR_LEN))
10126 if ((cnt_t % 10000) == 0)
10131 /* packet_size may be smaller than SDA_SHM_PKT_SIZE, remaining will be garbage */
10133 skb = __dev_alloc_skb(TXOFF + sda_packet_length - SDA_PKT_HEADER_SIZE, GFP_ATOMIC);
10135 skb_reserve(skb, TXOFF - SDA_HEADROOM_SIZE);
10136 skb_put(skb, sda_packet_length - SDA_PKT_HEADER_SIZE + SDA_HEADROOM_SIZE);
10137 skb->priority = PRIO_8021D_VO; /* PRIO_8021D_VO or PRIO_8021D_VI */
10140 skb->dev = wl0dot1_dev;
10141 shm_packet->txTsf = 0x0;
10142 shm_packet->rxTsf = 0x0;
10143 memcpy(skb->data, &shm_packet->headroom,
10144 sda_packet_length - OFFSETOF(sda_packet_t, headroom));
10145 shm_packet->desc.ready_to_copy = 0;
10147 dhd_start_xmit(skb, skb->dev);
10151 SDA_registerCallback4Recv(unsigned char *pBufferTotal,
10152 unsigned int BufferTotalSize)
10154 dhd_info_t *dhd = dhd_global;
10163 SDA_setSharedMemory4Recv(unsigned char *pBufferTotal,
10164 unsigned int BufferTotalSize,
10165 unsigned int BufferUnitSize,
10166 unsigned int Headroomsize)
10168 dhd_info_t *dhd = dhd_global;
10177 SDA_function4RecvDone(unsigned char * pBuffer, unsigned int BufferSize)
10179 dhd_info_t *dhd = dhd_global;
10186 EXPORT_SYMBOL(SDA_setSharedMemory4Send);
10187 EXPORT_SYMBOL(SDA_registerCallback4SendDone);
10188 EXPORT_SYMBOL(SDA_syncTsf);
10189 EXPORT_SYMBOL(SDA_function4Send);
10190 EXPORT_SYMBOL(SDA_registerCallback4Recv);
10191 EXPORT_SYMBOL(SDA_setSharedMemory4Recv);
10192 EXPORT_SYMBOL(SDA_function4RecvDone);
10194 #endif /* CUSTOMER_HW20 && WLANAUDIO */