2 * Copyright (c) 2008-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/dma-mapping.h>
19 #include "ar9003_mac.h"
21 #define BITS_PER_BYTE 8
22 #define OFDM_PLCP_BITS 22
23 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
29 #define HT_LTF(_ns) (4 * (_ns))
30 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32 #define TIME_SYMBOLS(t) ((t) >> 2)
33 #define TIME_SYMBOLS_HALFGI(t) (((t) * 5 - 4) / 18)
34 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
35 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
38 static u16 bits_per_symbol[][2] = {
40 { 26, 54 }, /* 0: BPSK */
41 { 52, 108 }, /* 1: QPSK 1/2 */
42 { 78, 162 }, /* 2: QPSK 3/4 */
43 { 104, 216 }, /* 3: 16-QAM 1/2 */
44 { 156, 324 }, /* 4: 16-QAM 3/4 */
45 { 208, 432 }, /* 5: 64-QAM 2/3 */
46 { 234, 486 }, /* 6: 64-QAM 3/4 */
47 { 260, 540 }, /* 7: 64-QAM 5/6 */
50 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
52 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
53 struct ath_atx_tid *tid, struct sk_buff *skb);
54 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
55 int tx_flags, struct ath_txq *txq);
56 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
57 struct ath_txq *txq, struct list_head *bf_q,
58 struct ath_tx_status *ts, int txok);
59 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
60 struct list_head *head, bool internal);
61 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
62 struct ath_tx_status *ts, int nframes, int nbad,
64 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
66 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
68 struct ath_atx_tid *tid,
78 /*********************/
79 /* Aggregation logic */
80 /*********************/
82 void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
83 __acquires(&txq->axq_lock)
85 spin_lock_bh(&txq->axq_lock);
88 void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
89 __releases(&txq->axq_lock)
91 spin_unlock_bh(&txq->axq_lock);
94 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
95 __releases(&txq->axq_lock)
97 struct sk_buff_head q;
100 __skb_queue_head_init(&q);
101 skb_queue_splice_init(&txq->complete_q, &q);
102 spin_unlock_bh(&txq->axq_lock);
104 while ((skb = __skb_dequeue(&q)))
105 ieee80211_tx_status(sc->hw, skb);
108 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
110 struct ath_atx_ac *ac = tid->ac;
119 list_add_tail(&tid->list, &ac->tid_q);
125 list_add_tail(&ac->list, &txq->axq_acq);
128 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
130 struct ath_txq *txq = tid->ac->txq;
132 WARN_ON(!tid->paused);
134 ath_txq_lock(sc, txq);
137 if (skb_queue_empty(&tid->buf_q))
140 ath_tx_queue_tid(txq, tid);
141 ath_txq_schedule(sc, txq);
143 ath_txq_unlock_complete(sc, txq);
146 static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
148 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
149 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
150 sizeof(tx_info->rate_driver_data));
151 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
154 static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
156 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
157 seqno << IEEE80211_SEQ_SEQ_SHIFT);
160 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
162 struct ath_txq *txq = tid->ac->txq;
165 struct list_head bf_head;
166 struct ath_tx_status ts;
167 struct ath_frame_info *fi;
168 bool sendbar = false;
170 INIT_LIST_HEAD(&bf_head);
172 memset(&ts, 0, sizeof(ts));
174 while ((skb = __skb_dequeue(&tid->buf_q))) {
175 fi = get_frame_info(skb);
179 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
181 ieee80211_free_txskb(sc->hw, skb);
187 list_add_tail(&bf->list, &bf_head);
188 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
189 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
192 ath_tx_send_normal(sc, txq, NULL, skb);
196 if (tid->baw_head == tid->baw_tail) {
197 tid->state &= ~AGGR_ADDBA_COMPLETE;
198 tid->state &= ~AGGR_CLEANUP;
202 ath_txq_unlock(sc, txq);
203 ath_send_bar(tid, tid->seq_start);
204 ath_txq_lock(sc, txq);
208 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
213 index = ATH_BA_INDEX(tid->seq_start, seqno);
214 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
216 __clear_bit(cindex, tid->tx_buf);
218 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
219 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
220 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
221 if (tid->bar_index >= 0)
226 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
231 index = ATH_BA_INDEX(tid->seq_start, seqno);
232 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
233 __set_bit(cindex, tid->tx_buf);
235 if (index >= ((tid->baw_tail - tid->baw_head) &
236 (ATH_TID_MAX_BUFS - 1))) {
237 tid->baw_tail = cindex;
238 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
243 * TODO: For frame(s) that are in the retry state, we will reuse the
244 * sequence number(s) without setting the retry bit. The
245 * alternative is to give up on these and BAR the receiver's window
248 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
249 struct ath_atx_tid *tid)
254 struct list_head bf_head;
255 struct ath_tx_status ts;
256 struct ath_frame_info *fi;
258 memset(&ts, 0, sizeof(ts));
259 INIT_LIST_HEAD(&bf_head);
261 while ((skb = __skb_dequeue(&tid->buf_q))) {
262 fi = get_frame_info(skb);
266 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
270 list_add_tail(&bf->list, &bf_head);
273 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
275 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
278 tid->seq_next = tid->seq_start;
279 tid->baw_tail = tid->baw_head;
283 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
284 struct sk_buff *skb, int count)
286 struct ath_frame_info *fi = get_frame_info(skb);
287 struct ath_buf *bf = fi->bf;
288 struct ieee80211_hdr *hdr;
289 int prev = fi->retries;
291 TX_STAT_INC(txq->axq_qnum, a_retries);
292 fi->retries += count;
297 hdr = (struct ieee80211_hdr *)skb->data;
298 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
299 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
300 sizeof(*hdr), DMA_TO_DEVICE);
303 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
305 struct ath_buf *bf = NULL;
307 spin_lock_bh(&sc->tx.txbuflock);
309 if (unlikely(list_empty(&sc->tx.txbuf))) {
310 spin_unlock_bh(&sc->tx.txbuflock);
314 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
317 spin_unlock_bh(&sc->tx.txbuflock);
322 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
324 spin_lock_bh(&sc->tx.txbuflock);
325 list_add_tail(&bf->list, &sc->tx.txbuf);
326 spin_unlock_bh(&sc->tx.txbuflock);
329 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
333 tbf = ath_tx_get_buffer(sc);
337 ATH_TXBUF_RESET(tbf);
339 tbf->bf_mpdu = bf->bf_mpdu;
340 tbf->bf_buf_addr = bf->bf_buf_addr;
341 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
342 tbf->bf_state = bf->bf_state;
347 static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
348 struct ath_tx_status *ts, int txok,
349 int *nframes, int *nbad)
351 struct ath_frame_info *fi;
353 u32 ba[WME_BA_BMP_SIZE >> 5];
360 isaggr = bf_isaggr(bf);
362 seq_st = ts->ts_seqnum;
363 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
367 fi = get_frame_info(bf->bf_mpdu);
368 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
371 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
379 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
380 struct ath_buf *bf, struct list_head *bf_q,
381 struct ath_tx_status *ts, int txok, bool retry)
383 struct ath_node *an = NULL;
385 struct ieee80211_sta *sta;
386 struct ieee80211_hw *hw = sc->hw;
387 struct ieee80211_hdr *hdr;
388 struct ieee80211_tx_info *tx_info;
389 struct ath_atx_tid *tid = NULL;
390 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
391 struct list_head bf_head;
392 struct sk_buff_head bf_pending;
393 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
394 u32 ba[WME_BA_BMP_SIZE >> 5];
395 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
396 bool rc_update = true;
397 struct ieee80211_tx_rate rates[4];
398 struct ath_frame_info *fi;
401 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
406 hdr = (struct ieee80211_hdr *)skb->data;
408 tx_info = IEEE80211_SKB_CB(skb);
410 memcpy(rates, tx_info->control.rates, sizeof(rates));
412 retries = ts->ts_longretry + 1;
413 for (i = 0; i < ts->ts_rateindex; i++)
414 retries += rates[i].count;
418 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
422 INIT_LIST_HEAD(&bf_head);
424 bf_next = bf->bf_next;
426 if (!bf->bf_stale || bf_next != NULL)
427 list_move_tail(&bf->list, &bf_head);
429 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
436 an = (struct ath_node *)sta->drv_priv;
437 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
438 tid = ATH_AN_2_TID(an, tidno);
439 seq_first = tid->seq_start;
442 * The hardware occasionally sends a tx status for the wrong TID.
443 * In this case, the BA status cannot be considered valid and all
444 * subframes need to be retransmitted
446 if (tidno != ts->tid)
449 isaggr = bf_isaggr(bf);
450 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
452 if (isaggr && txok) {
453 if (ts->ts_flags & ATH9K_TX_BA) {
454 seq_st = ts->ts_seqnum;
455 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
458 * AR5416 can become deaf/mute when BA
459 * issue happens. Chip needs to be reset.
460 * But AP code may have sychronization issues
461 * when perform internal reset in this routine.
462 * Only enable reset in STA mode for now.
464 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
469 __skb_queue_head_init(&bf_pending);
471 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
473 u16 seqno = bf->bf_state.seqno;
475 txfail = txpending = sendbar = 0;
476 bf_next = bf->bf_next;
479 tx_info = IEEE80211_SKB_CB(skb);
480 fi = get_frame_info(skb);
482 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
483 /* transmit completion, subframe is
484 * acked by block ack */
486 } else if (!isaggr && txok) {
487 /* transmit completion */
489 } else if ((tid->state & AGGR_CLEANUP) || !retry) {
491 * cleanup in progress, just fail
492 * the un-acked sub-frames
497 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
498 if (txok || !an->sleeping)
499 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
506 bar_index = max_t(int, bar_index,
507 ATH_BA_INDEX(seq_first, seqno));
511 * Make sure the last desc is reclaimed if it
512 * not a holding desc.
514 INIT_LIST_HEAD(&bf_head);
515 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
516 bf_next != NULL || !bf_last->bf_stale)
517 list_move_tail(&bf->list, &bf_head);
519 if (!txpending || (tid->state & AGGR_CLEANUP)) {
521 * complete the acked-ones/xretried ones; update
524 ath_tx_update_baw(sc, tid, seqno);
526 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
527 memcpy(tx_info->control.rates, rates, sizeof(rates));
528 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
532 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
535 /* retry the un-acked ones */
536 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
537 bf->bf_next == NULL && bf_last->bf_stale) {
540 tbf = ath_clone_txbuf(sc, bf_last);
542 * Update tx baw and complete the
543 * frame with failed status if we
547 ath_tx_update_baw(sc, tid, seqno);
549 ath_tx_complete_buf(sc, bf, txq,
551 bar_index = max_t(int, bar_index,
552 ATH_BA_INDEX(seq_first, seqno));
560 * Put this buffer to the temporary pending
561 * queue to retain ordering
563 __skb_queue_tail(&bf_pending, skb);
569 /* prepend un-acked frames to the beginning of the pending frame queue */
570 if (!skb_queue_empty(&bf_pending)) {
572 ieee80211_sta_set_buffered(sta, tid->tidno, true);
574 skb_queue_splice(&bf_pending, &tid->buf_q);
576 ath_tx_queue_tid(txq, tid);
578 if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
579 tid->ac->clear_ps_filter = true;
583 if (bar_index >= 0) {
584 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
586 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
587 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
589 ath_txq_unlock(sc, txq);
590 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
591 ath_txq_lock(sc, txq);
594 if (tid->state & AGGR_CLEANUP)
595 ath_tx_flush_tid(sc, tid);
600 ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
603 static bool ath_lookup_legacy(struct ath_buf *bf)
606 struct ieee80211_tx_info *tx_info;
607 struct ieee80211_tx_rate *rates;
611 tx_info = IEEE80211_SKB_CB(skb);
612 rates = tx_info->control.rates;
614 for (i = 0; i < 4; i++) {
615 if (!rates[i].count || rates[i].idx < 0)
618 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
625 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
626 struct ath_atx_tid *tid)
629 struct ieee80211_tx_info *tx_info;
630 struct ieee80211_tx_rate *rates;
631 u32 max_4ms_framelen, frmlen;
632 u16 aggr_limit, bt_aggr_limit, legacy = 0;
633 int q = tid->ac->txq->mac80211_qnum;
637 tx_info = IEEE80211_SKB_CB(skb);
638 rates = tx_info->control.rates;
641 * Find the lowest frame length among the rate series that will have a
642 * 4ms (or TXOP limited) transmit duration.
644 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
646 for (i = 0; i < 4; i++) {
652 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
657 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
662 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
665 frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
666 max_4ms_framelen = min(max_4ms_framelen, frmlen);
670 * limit aggregate size by the minimum rate if rate selected is
671 * not a probe rate, if rate selected is a probe rate then
672 * avoid aggregation of this packet.
674 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
677 aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);
680 * Override the default aggregation limit for BTCOEX.
682 bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
684 aggr_limit = bt_aggr_limit;
687 * h/w can accept aggregates up to 16 bit lengths (65535).
688 * The IE, however can hold up to 65536, which shows up here
689 * as zero. Ignore 65536 since we are constrained by hw.
691 if (tid->an->maxampdu)
692 aggr_limit = min(aggr_limit, tid->an->maxampdu);
698 * Returns the number of delimiters to be added to
699 * meet the minimum required mpdudensity.
701 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
702 struct ath_buf *bf, u16 frmlen,
705 #define FIRST_DESC_NDELIMS 60
706 struct sk_buff *skb = bf->bf_mpdu;
707 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
708 u32 nsymbits, nsymbols;
711 int width, streams, half_gi, ndelim, mindelim;
712 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
714 /* Select standard number of delimiters based on frame length alone */
715 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
718 * If encryption enabled, hardware requires some more padding between
720 * TODO - this could be improved to be dependent on the rate.
721 * The hardware can keep up at lower rates, but not higher rates
723 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
724 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
725 ndelim += ATH_AGGR_ENCRYPTDELIM;
728 * Add delimiter when using RTS/CTS with aggregation
729 * and non enterprise AR9003 card
731 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
732 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
733 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
736 * Convert desired mpdu density from microeconds to bytes based
737 * on highest rate in rate series (i.e. first rate) to determine
738 * required minimum length for subframe. Take into account
739 * whether high rate is 20 or 40Mhz and half or full GI.
741 * If there is no mpdu density restriction, no further calculation
745 if (tid->an->mpdudensity == 0)
748 rix = tx_info->control.rates[0].idx;
749 flags = tx_info->control.rates[0].flags;
750 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
751 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
754 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
756 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
761 streams = HT_RC_2_STREAMS(rix);
762 nsymbits = bits_per_symbol[rix % 8][width] * streams;
763 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
765 if (frmlen < minlen) {
766 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
767 ndelim = max(mindelim, ndelim);
773 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
775 struct ath_atx_tid *tid,
776 struct list_head *bf_q,
779 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
780 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
781 int rl = 0, nframes = 0, ndelim, prev_al = 0;
782 u16 aggr_limit = 0, al = 0, bpad = 0,
783 al_delta, h_baw = tid->baw_size / 2;
784 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
785 struct ieee80211_tx_info *tx_info;
786 struct ath_frame_info *fi;
791 skb = skb_peek(&tid->buf_q);
792 fi = get_frame_info(skb);
795 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
798 __skb_unlink(skb, &tid->buf_q);
799 ieee80211_free_txskb(sc->hw, skb);
803 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
804 seqno = bf->bf_state.seqno;
806 /* do not step over block-ack window */
807 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
808 status = ATH_AGGR_BAW_CLOSED;
812 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
813 struct ath_tx_status ts = {};
814 struct list_head bf_head;
816 INIT_LIST_HEAD(&bf_head);
817 list_add(&bf->list, &bf_head);
818 __skb_unlink(skb, &tid->buf_q);
819 ath_tx_update_baw(sc, tid, seqno);
820 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
828 aggr_limit = ath_lookup_rate(sc, bf, tid);
832 /* do not exceed aggregation limit */
833 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
836 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
837 ath_lookup_legacy(bf))) {
838 status = ATH_AGGR_LIMITED;
842 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
843 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
846 /* do not exceed subframe limit */
847 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
848 status = ATH_AGGR_LIMITED;
852 /* add padding for previous frame to aggregation length */
853 al += bpad + al_delta;
856 * Get the delimiters needed to meet the MPDU
857 * density for this node.
859 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
861 bpad = PADBYTES(al_delta) + (ndelim << 2);
866 /* link buffers of this frame to the aggregate */
868 ath_tx_addto_baw(sc, tid, seqno);
869 bf->bf_state.ndelim = ndelim;
871 __skb_unlink(skb, &tid->buf_q);
872 list_add_tail(&bf->list, bf_q);
874 bf_prev->bf_next = bf;
878 } while (!skb_queue_empty(&tid->buf_q));
888 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
889 * width - 0 for 20 MHz, 1 for 40 MHz
890 * half_gi - to use 4us v/s 3.6 us for symbol time
892 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
893 int width, int half_gi, bool shortPreamble)
895 u32 nbits, nsymbits, duration, nsymbols;
898 /* find number of symbols: PLCP + data */
899 streams = HT_RC_2_STREAMS(rix);
900 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
901 nsymbits = bits_per_symbol[rix % 8][width] * streams;
902 nsymbols = (nbits + nsymbits - 1) / nsymbits;
905 duration = SYMBOL_TIME(nsymbols);
907 duration = SYMBOL_TIME_HALFGI(nsymbols);
909 /* addup duration for legacy/ht training and signal fields */
910 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
915 static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
917 int streams = HT_RC_2_STREAMS(mcs);
921 symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
922 bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
923 bits -= OFDM_PLCP_BITS;
925 bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
932 void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
934 u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
937 /* 4ms is the default (and maximum) duration */
938 if (!txop || txop > 4096)
941 cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
942 cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
943 cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
944 cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
945 for (mcs = 0; mcs < 32; mcs++) {
946 cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
947 cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
948 cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
949 cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
953 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
954 struct ath_tx_info *info, int len)
956 struct ath_hw *ah = sc->sc_ah;
958 struct ieee80211_tx_info *tx_info;
959 struct ieee80211_tx_rate *rates;
960 const struct ieee80211_rate *rate;
961 struct ieee80211_hdr *hdr;
962 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
967 tx_info = IEEE80211_SKB_CB(skb);
968 rates = tx_info->control.rates;
969 hdr = (struct ieee80211_hdr *)skb->data;
971 /* set dur_update_en for l-sig computation except for PS-Poll frames */
972 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
973 info->rtscts_rate = fi->rtscts_rate;
975 for (i = 0; i < 4; i++) {
976 bool is_40, is_sgi, is_sp;
979 if (!rates[i].count || (rates[i].idx < 0))
983 info->rates[i].Tries = rates[i].count;
985 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
986 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
987 info->flags |= ATH9K_TXDESC_RTSENA;
988 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
989 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
990 info->flags |= ATH9K_TXDESC_CTSENA;
993 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
994 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
995 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
996 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
998 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
999 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1000 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1002 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1004 info->rates[i].Rate = rix | 0x80;
1005 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1006 ah->txchainmask, info->rates[i].Rate);
1007 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1008 is_40, is_sgi, is_sp);
1009 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1010 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1015 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1016 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1017 !(rate->flags & IEEE80211_RATE_ERP_G))
1018 phy = WLAN_RC_PHY_CCK;
1020 phy = WLAN_RC_PHY_OFDM;
1022 info->rates[i].Rate = rate->hw_value;
1023 if (rate->hw_value_short) {
1024 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1025 info->rates[i].Rate |= rate->hw_value_short;
1030 if (bf->bf_state.bfs_paprd)
1031 info->rates[i].ChSel = ah->txchainmask;
1033 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1034 ah->txchainmask, info->rates[i].Rate);
1036 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1037 phy, rate->bitrate * 100, len, rix, is_sp);
1040 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1041 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1042 info->flags &= ~ATH9K_TXDESC_RTSENA;
1044 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1045 if (info->flags & ATH9K_TXDESC_RTSENA)
1046 info->flags &= ~ATH9K_TXDESC_CTSENA;
1049 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1051 struct ieee80211_hdr *hdr;
1052 enum ath9k_pkt_type htype;
1055 hdr = (struct ieee80211_hdr *)skb->data;
1056 fc = hdr->frame_control;
1058 if (ieee80211_is_beacon(fc))
1059 htype = ATH9K_PKT_TYPE_BEACON;
1060 else if (ieee80211_is_probe_resp(fc))
1061 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1062 else if (ieee80211_is_atim(fc))
1063 htype = ATH9K_PKT_TYPE_ATIM;
1064 else if (ieee80211_is_pspoll(fc))
1065 htype = ATH9K_PKT_TYPE_PSPOLL;
1067 htype = ATH9K_PKT_TYPE_NORMAL;
1072 static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1073 struct ath_txq *txq, int len)
1075 struct ath_hw *ah = sc->sc_ah;
1076 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1077 struct ath_buf *bf_first = bf;
1078 struct ath_tx_info info;
1079 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1081 memset(&info, 0, sizeof(info));
1082 info.is_first = true;
1083 info.is_last = true;
1084 info.txpower = MAX_RATE_POWER;
1085 info.qcu = txq->axq_qnum;
1087 info.flags = ATH9K_TXDESC_INTREQ;
1088 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1089 info.flags |= ATH9K_TXDESC_NOACK;
1090 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1091 info.flags |= ATH9K_TXDESC_LDPC;
1093 ath_buf_set_rate(sc, bf, &info, len);
1095 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1096 info.flags |= ATH9K_TXDESC_CLRDMASK;
1098 if (bf->bf_state.bfs_paprd)
1099 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1103 struct sk_buff *skb = bf->bf_mpdu;
1104 struct ath_frame_info *fi = get_frame_info(skb);
1106 info.type = get_hw_packet_type(skb);
1108 info.link = bf->bf_next->bf_daddr;
1112 info.buf_addr[0] = bf->bf_buf_addr;
1113 info.buf_len[0] = skb->len;
1114 info.pkt_len = fi->framelen;
1115 info.keyix = fi->keyix;
1116 info.keytype = fi->keytype;
1120 info.aggr = AGGR_BUF_FIRST;
1121 else if (!bf->bf_next)
1122 info.aggr = AGGR_BUF_LAST;
1124 info.aggr = AGGR_BUF_MIDDLE;
1126 info.ndelim = bf->bf_state.ndelim;
1127 info.aggr_len = len;
1130 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1135 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1136 struct ath_atx_tid *tid)
1139 enum ATH_AGGR_STATUS status;
1140 struct ieee80211_tx_info *tx_info;
1141 struct list_head bf_q;
1145 if (skb_queue_empty(&tid->buf_q))
1148 INIT_LIST_HEAD(&bf_q);
1150 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
1153 * no frames picked up to be aggregated;
1154 * block-ack window is not open.
1156 if (list_empty(&bf_q))
1159 bf = list_first_entry(&bf_q, struct ath_buf, list);
1160 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
1161 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1163 if (tid->ac->clear_ps_filter) {
1164 tid->ac->clear_ps_filter = false;
1165 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1167 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
1170 /* if only one frame, send as non-aggregate */
1171 if (bf == bf->bf_lastbf) {
1172 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1173 bf->bf_state.bf_type = BUF_AMPDU;
1175 TX_STAT_INC(txq->axq_qnum, a_aggr);
1178 ath_tx_fill_desc(sc, bf, txq, aggr_len);
1179 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
1180 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
1181 status != ATH_AGGR_BAW_CLOSED);
1184 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1187 struct ath_atx_tid *txtid;
1188 struct ath_node *an;
1191 an = (struct ath_node *)sta->drv_priv;
1192 txtid = ATH_AN_2_TID(an, tid);
1194 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1197 /* update ampdu factor/density, they may have changed. This may happen
1198 * in HT IBSS when a beacon with HT-info is received after the station
1199 * has already been added.
1201 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1202 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
1203 sta->ht_cap.ampdu_factor);
1204 density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
1205 an->mpdudensity = density;
1208 txtid->state |= AGGR_ADDBA_PROGRESS;
1209 txtid->paused = true;
1210 *ssn = txtid->seq_start = txtid->seq_next;
1211 txtid->bar_index = -1;
1213 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1214 txtid->baw_head = txtid->baw_tail = 0;
1219 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1221 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1222 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1223 struct ath_txq *txq = txtid->ac->txq;
1225 if (txtid->state & AGGR_CLEANUP)
1228 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
1229 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1233 ath_txq_lock(sc, txq);
1234 txtid->paused = true;
1237 * If frames are still being transmitted for this TID, they will be
1238 * cleaned up during tx completion. To prevent race conditions, this
1239 * TID can only be reused after all in-progress subframes have been
1242 if (txtid->baw_head != txtid->baw_tail)
1243 txtid->state |= AGGR_CLEANUP;
1245 txtid->state &= ~AGGR_ADDBA_COMPLETE;
1247 ath_tx_flush_tid(sc, txtid);
1248 ath_txq_unlock_complete(sc, txq);
1251 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1252 struct ath_node *an)
1254 struct ath_atx_tid *tid;
1255 struct ath_atx_ac *ac;
1256 struct ath_txq *txq;
1260 for (tidno = 0, tid = &an->tid[tidno];
1261 tidno < WME_NUM_TID; tidno++, tid++) {
1269 ath_txq_lock(sc, txq);
1271 buffered = !skb_queue_empty(&tid->buf_q);
1274 list_del(&tid->list);
1278 list_del(&ac->list);
1281 ath_txq_unlock(sc, txq);
1283 ieee80211_sta_set_buffered(sta, tidno, buffered);
1287 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1289 struct ath_atx_tid *tid;
1290 struct ath_atx_ac *ac;
1291 struct ath_txq *txq;
1294 for (tidno = 0, tid = &an->tid[tidno];
1295 tidno < WME_NUM_TID; tidno++, tid++) {
1300 ath_txq_lock(sc, txq);
1301 ac->clear_ps_filter = true;
1303 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
1304 ath_tx_queue_tid(txq, tid);
1305 ath_txq_schedule(sc, txq);
1308 ath_txq_unlock_complete(sc, txq);
1312 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1314 struct ath_atx_tid *txtid;
1315 struct ath_node *an;
1317 an = (struct ath_node *)sta->drv_priv;
1319 txtid = ATH_AN_2_TID(an, tid);
1320 txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1321 txtid->state |= AGGR_ADDBA_COMPLETE;
1322 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1323 ath_tx_resume_tid(sc, txtid);
1326 /********************/
1327 /* Queue Management */
1328 /********************/
1330 static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1331 struct ath_txq *txq)
1333 struct ath_atx_ac *ac, *ac_tmp;
1334 struct ath_atx_tid *tid, *tid_tmp;
1336 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1337 list_del(&ac->list);
1339 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1340 list_del(&tid->list);
1342 ath_tid_drain(sc, txq, tid);
1347 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1349 struct ath_hw *ah = sc->sc_ah;
1350 struct ath9k_tx_queue_info qi;
1351 static const int subtype_txq_to_hwq[] = {
1352 [WME_AC_BE] = ATH_TXQ_AC_BE,
1353 [WME_AC_BK] = ATH_TXQ_AC_BK,
1354 [WME_AC_VI] = ATH_TXQ_AC_VI,
1355 [WME_AC_VO] = ATH_TXQ_AC_VO,
1359 memset(&qi, 0, sizeof(qi));
1360 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
1361 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1362 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1363 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1364 qi.tqi_physCompBuf = 0;
1367 * Enable interrupts only for EOL and DESC conditions.
1368 * We mark tx descriptors to receive a DESC interrupt
1369 * when a tx queue gets deep; otherwise waiting for the
1370 * EOL to reap descriptors. Note that this is done to
1371 * reduce interrupt load and this only defers reaping
1372 * descriptors, never transmitting frames. Aside from
1373 * reducing interrupts this also permits more concurrency.
1374 * The only potential downside is if the tx queue backs
1375 * up in which case the top half of the kernel may backup
1376 * due to a lack of tx descriptors.
1378 * The UAPSD queue is an exception, since we take a desc-
1379 * based intr on the EOSP frames.
1381 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1382 qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
1384 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1385 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1387 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1388 TXQ_FLAG_TXDESCINT_ENABLE;
1390 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1391 if (axq_qnum == -1) {
1393 * NB: don't print a message, this happens
1394 * normally on parts with too few tx queues
1398 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1399 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1401 txq->axq_qnum = axq_qnum;
1402 txq->mac80211_qnum = -1;
1403 txq->axq_link = NULL;
1404 __skb_queue_head_init(&txq->complete_q);
1405 INIT_LIST_HEAD(&txq->axq_q);
1406 INIT_LIST_HEAD(&txq->axq_acq);
1407 spin_lock_init(&txq->axq_lock);
1409 txq->axq_ampdu_depth = 0;
1410 txq->axq_tx_inprogress = false;
1411 sc->tx.txqsetup |= 1<<axq_qnum;
1413 txq->txq_headidx = txq->txq_tailidx = 0;
1414 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1415 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1417 return &sc->tx.txq[axq_qnum];
1420 int ath_txq_update(struct ath_softc *sc, int qnum,
1421 struct ath9k_tx_queue_info *qinfo)
1423 struct ath_hw *ah = sc->sc_ah;
1425 struct ath9k_tx_queue_info qi;
1427 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
1429 ath9k_hw_get_txq_props(ah, qnum, &qi);
1430 qi.tqi_aifs = qinfo->tqi_aifs;
1431 qi.tqi_cwmin = qinfo->tqi_cwmin;
1432 qi.tqi_cwmax = qinfo->tqi_cwmax;
1433 qi.tqi_burstTime = qinfo->tqi_burstTime;
1434 qi.tqi_readyTime = qinfo->tqi_readyTime;
1436 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1437 ath_err(ath9k_hw_common(sc->sc_ah),
1438 "Unable to update hardware queue %u!\n", qnum);
1441 ath9k_hw_resettxqueue(ah, qnum);
1447 int ath_cabq_update(struct ath_softc *sc)
1449 struct ath9k_tx_queue_info qi;
1450 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
1451 int qnum = sc->beacon.cabq->axq_qnum;
1453 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1455 * Ensure the readytime % is within the bounds.
1457 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1458 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1459 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1460 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1462 qi.tqi_readyTime = (cur_conf->beacon_interval *
1463 sc->config.cabqReadytime) / 100;
1464 ath_txq_update(sc, qnum, &qi);
1469 static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1471 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1472 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1475 static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1476 struct list_head *list, bool retry_tx)
1478 struct ath_buf *bf, *lastbf;
1479 struct list_head bf_head;
1480 struct ath_tx_status ts;
1482 memset(&ts, 0, sizeof(ts));
1483 ts.ts_status = ATH9K_TX_FLUSH;
1484 INIT_LIST_HEAD(&bf_head);
1486 while (!list_empty(list)) {
1487 bf = list_first_entry(list, struct ath_buf, list);
1490 list_del(&bf->list);
1492 ath_tx_return_buffer(sc, bf);
1496 lastbf = bf->bf_lastbf;
1497 list_cut_position(&bf_head, list, &lastbf->list);
1500 if (bf_is_ampdu_not_probing(bf))
1501 txq->axq_ampdu_depth--;
1504 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1507 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
1512 * Drain a given TX queue (could be Beacon or Data)
1514 * This assumes output has been stopped and
1515 * we do not need to block ath_tx_tasklet.
1517 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1519 ath_txq_lock(sc, txq);
1521 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1522 int idx = txq->txq_tailidx;
1524 while (!list_empty(&txq->txq_fifo[idx])) {
1525 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1528 INCR(idx, ATH_TXFIFO_DEPTH);
1530 txq->txq_tailidx = idx;
1533 txq->axq_link = NULL;
1534 txq->axq_tx_inprogress = false;
1535 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
1537 /* flush any pending frames if aggregation is enabled */
1538 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx)
1539 ath_txq_drain_pending_buffers(sc, txq);
1541 ath_txq_unlock_complete(sc, txq);
1544 bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1546 struct ath_hw *ah = sc->sc_ah;
1547 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1548 struct ath_txq *txq;
1552 if (test_bit(SC_OP_INVALID, &sc->sc_flags))
1555 ath9k_hw_abort_tx_dma(ah);
1557 /* Check if any queue remains active */
1558 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1559 if (!ATH_TXQ_SETUP(sc, i))
1562 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1567 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
1569 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1570 if (!ATH_TXQ_SETUP(sc, i))
1574 * The caller will resume queues with ieee80211_wake_queues.
1575 * Mark the queue as not stopped to prevent ath_tx_complete
1576 * from waking the queue too early.
1578 txq = &sc->tx.txq[i];
1579 txq->stopped = false;
1580 ath_draintxq(sc, txq, retry_tx);
1586 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1588 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1589 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1592 /* For each axq_acq entry, for each tid, try to schedule packets
1593 * for transmit until ampdu_depth has reached min Q depth.
1595 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1597 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1598 struct ath_atx_tid *tid, *last_tid;
1600 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
1601 list_empty(&txq->axq_acq) ||
1602 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1605 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1606 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1608 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1609 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1610 list_del(&ac->list);
1613 while (!list_empty(&ac->tid_q)) {
1614 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1616 list_del(&tid->list);
1622 ath_tx_sched_aggr(sc, txq, tid);
1625 * add tid to round-robin queue if more frames
1626 * are pending for the tid
1628 if (!skb_queue_empty(&tid->buf_q))
1629 ath_tx_queue_tid(txq, tid);
1631 if (tid == last_tid ||
1632 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1636 if (!list_empty(&ac->tid_q) && !ac->sched) {
1638 list_add_tail(&ac->list, &txq->axq_acq);
1641 if (ac == last_ac ||
1642 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1652 * Insert a chain of ath_buf (descriptors) on a txq and
1653 * assume the descriptors are already chained together by caller.
1655 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1656 struct list_head *head, bool internal)
1658 struct ath_hw *ah = sc->sc_ah;
1659 struct ath_common *common = ath9k_hw_common(ah);
1660 struct ath_buf *bf, *bf_last;
1661 bool puttxbuf = false;
1665 * Insert the frame on the outbound list and
1666 * pass it on to the hardware.
1669 if (list_empty(head))
1672 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1673 bf = list_first_entry(head, struct ath_buf, list);
1674 bf_last = list_entry(head->prev, struct ath_buf, list);
1676 ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
1677 txq->axq_qnum, txq->axq_depth);
1679 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1680 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1681 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1684 list_splice_tail_init(head, &txq->axq_q);
1686 if (txq->axq_link) {
1687 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
1688 ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
1689 txq->axq_qnum, txq->axq_link,
1690 ito64(bf->bf_daddr), bf->bf_desc);
1694 txq->axq_link = bf_last->bf_desc;
1698 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1699 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1700 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
1701 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1705 TX_STAT_INC(txq->axq_qnum, txstart);
1706 ath9k_hw_txstart(ah, txq->axq_qnum);
1711 if (bf_is_ampdu_not_probing(bf))
1712 txq->axq_ampdu_depth++;
1716 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1717 struct sk_buff *skb, struct ath_tx_control *txctl)
1719 struct ath_frame_info *fi = get_frame_info(skb);
1720 struct list_head bf_head;
1724 * Do not queue to h/w when any of the following conditions is true:
1725 * - there are pending frames in software queue
1726 * - the TID is currently paused for ADDBA/BAR request
1727 * - seqno is not within block-ack window
1728 * - h/w queue depth exceeds low water mark
1730 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
1731 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
1732 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
1734 * Add this frame to software queue for scheduling later
1737 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
1738 __skb_queue_tail(&tid->buf_q, skb);
1739 if (!txctl->an || !txctl->an->sleeping)
1740 ath_tx_queue_tid(txctl->txq, tid);
1744 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1746 ieee80211_free_txskb(sc->hw, skb);
1750 bf->bf_state.bf_type = BUF_AMPDU;
1751 INIT_LIST_HEAD(&bf_head);
1752 list_add(&bf->list, &bf_head);
1754 /* Add sub-frame to BAW */
1755 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
1757 /* Queue to h/w without aggregation */
1758 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
1760 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
1761 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
1764 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1765 struct ath_atx_tid *tid, struct sk_buff *skb)
1767 struct ath_frame_info *fi = get_frame_info(skb);
1768 struct list_head bf_head;
1773 INIT_LIST_HEAD(&bf_head);
1774 list_add_tail(&bf->list, &bf_head);
1775 bf->bf_state.bf_type = 0;
1778 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1779 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
1780 TX_STAT_INC(txq->axq_qnum, queued);
1783 static void setup_frame_info(struct ieee80211_hw *hw,
1784 struct ieee80211_sta *sta,
1785 struct sk_buff *skb,
1788 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1789 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1790 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1791 const struct ieee80211_rate *rate;
1792 struct ath_frame_info *fi = get_frame_info(skb);
1793 struct ath_node *an = NULL;
1794 enum ath9k_key_type keytype;
1795 bool short_preamble = false;
1798 * We check if Short Preamble is needed for the CTS rate by
1799 * checking the BSS's global flag.
1800 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1802 if (tx_info->control.vif &&
1803 tx_info->control.vif->bss_conf.use_short_preamble)
1804 short_preamble = true;
1806 rate = ieee80211_get_rts_cts_rate(hw, tx_info);
1807 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1810 an = (struct ath_node *) sta->drv_priv;
1812 memset(fi, 0, sizeof(*fi));
1814 fi->keyix = hw_key->hw_key_idx;
1815 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1816 fi->keyix = an->ps_key;
1818 fi->keyix = ATH9K_TXKEYIX_INVALID;
1819 fi->keytype = keytype;
1820 fi->framelen = framelen;
1821 fi->rtscts_rate = rate->hw_value;
1823 fi->rtscts_rate |= rate->hw_value_short;
1826 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1828 struct ath_hw *ah = sc->sc_ah;
1829 struct ath9k_channel *curchan = ah->curchan;
1831 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1832 (curchan->channelFlags & CHANNEL_5GHZ) &&
1833 (chainmask == 0x7) && (rate < 0x90))
1835 else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
1843 * Assign a descriptor (and sequence number if necessary,
1844 * and map buffer for DMA. Frees skb on error
1846 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1847 struct ath_txq *txq,
1848 struct ath_atx_tid *tid,
1849 struct sk_buff *skb)
1851 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1852 struct ath_frame_info *fi = get_frame_info(skb);
1853 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1858 bf = ath_tx_get_buffer(sc);
1860 ath_dbg(common, XMIT, "TX buffers are full\n");
1864 ATH_TXBUF_RESET(bf);
1867 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1868 seqno = tid->seq_next;
1869 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1872 hdr->seq_ctrl |= cpu_to_le16(fragno);
1874 if (!ieee80211_has_morefrags(hdr->frame_control))
1875 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1877 bf->bf_state.seqno = seqno;
1882 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1883 skb->len, DMA_TO_DEVICE);
1884 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
1886 bf->bf_buf_addr = 0;
1887 ath_err(ath9k_hw_common(sc->sc_ah),
1888 "dma_mapping_error() on TX\n");
1889 ath_tx_return_buffer(sc, bf);
1898 /* FIXME: tx power */
1899 static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1900 struct ath_tx_control *txctl)
1902 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1903 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1904 struct ath_atx_tid *tid = NULL;
1908 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an &&
1909 ieee80211_is_data_qos(hdr->frame_control)) {
1910 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1911 IEEE80211_QOS_CTL_TID_MASK;
1912 tid = ATH_AN_2_TID(txctl->an, tidno);
1914 WARN_ON(tid->ac->txq != txctl->txq);
1917 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
1919 * Try aggregation if it's a unicast data frame
1920 * and the destination is HT capable.
1922 ath_tx_send_ampdu(sc, tid, skb, txctl);
1924 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1927 dev_kfree_skb_any(skb);
1929 ieee80211_free_txskb(sc->hw, skb);
1933 bf->bf_state.bfs_paprd = txctl->paprd;
1936 bf->bf_state.bfs_paprd_timestamp = jiffies;
1938 ath_tx_send_normal(sc, txctl->txq, tid, skb);
1942 /* Upon failure caller should free skb */
1943 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1944 struct ath_tx_control *txctl)
1946 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1947 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1948 struct ieee80211_sta *sta = txctl->sta;
1949 struct ieee80211_vif *vif = info->control.vif;
1950 struct ath_softc *sc = hw->priv;
1951 struct ath_txq *txq = txctl->txq;
1952 int padpos, padsize;
1953 int frmlen = skb->len + FCS_LEN;
1956 /* NOTE: sta can be NULL according to net/mac80211.h */
1958 txctl->an = (struct ath_node *)sta->drv_priv;
1960 if (info->control.hw_key)
1961 frmlen += info->control.hw_key->icv_len;
1964 * As a temporary workaround, assign seq# here; this will likely need
1965 * to be cleaned up to work better with Beacon transmission and virtual
1968 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1969 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1970 sc->tx.seq_no += 0x10;
1971 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1972 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1975 /* Add the padding after the header if this is not already done */
1976 padpos = ath9k_cmn_padpos(hdr->frame_control);
1977 padsize = padpos & 3;
1978 if (padsize && skb->len > padpos) {
1979 if (skb_headroom(skb) < padsize)
1982 skb_push(skb, padsize);
1983 memmove(skb->data, skb->data + padsize, padpos);
1984 hdr = (struct ieee80211_hdr *) skb->data;
1987 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1988 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1989 !ieee80211_is_data(hdr->frame_control))
1990 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1992 setup_frame_info(hw, sta, skb, frmlen);
1995 * At this point, the vif, hw_key and sta pointers in the tx control
1996 * info are no longer valid (overwritten by the ath_frame_info data.
1999 q = skb_get_queue_mapping(skb);
2001 ath_txq_lock(sc, txq);
2002 if (txq == sc->tx.txq_map[q] &&
2003 ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
2005 ieee80211_stop_queue(sc->hw, q);
2006 txq->stopped = true;
2009 ath_tx_start_dma(sc, skb, txctl);
2011 ath_txq_unlock(sc, txq);
2020 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2021 int tx_flags, struct ath_txq *txq)
2023 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2024 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2025 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2026 int q, padpos, padsize;
2027 unsigned long flags;
2029 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
2031 if (sc->sc_ah->caldata)
2032 sc->sc_ah->caldata->paprd_packet_sent = true;
2034 if (!(tx_flags & ATH_TX_ERROR))
2035 /* Frame was ACKed */
2036 tx_info->flags |= IEEE80211_TX_STAT_ACK;
2038 padpos = ath9k_cmn_padpos(hdr->frame_control);
2039 padsize = padpos & 3;
2040 if (padsize && skb->len>padpos+padsize) {
2042 * Remove MAC header padding before giving the frame back to
2045 memmove(skb->data + padsize, skb->data, padpos);
2046 skb_pull(skb, padsize);
2049 spin_lock_irqsave(&sc->sc_pm_lock, flags);
2050 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
2051 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2053 "Going back to sleep after having received TX status (0x%lx)\n",
2054 sc->ps_flags & (PS_WAIT_FOR_BEACON |
2056 PS_WAIT_FOR_PSPOLL_DATA |
2057 PS_WAIT_FOR_TX_ACK));
2059 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2061 q = skb_get_queue_mapping(skb);
2062 if (txq == sc->tx.txq_map[q]) {
2063 if (WARN_ON(--txq->pending_frames < 0))
2064 txq->pending_frames = 0;
2067 txq->pending_frames < sc->tx.txq_max_pending[q]) {
2068 ieee80211_wake_queue(sc->hw, q);
2069 txq->stopped = false;
2073 __skb_queue_tail(&txq->complete_q, skb);
2076 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2077 struct ath_txq *txq, struct list_head *bf_q,
2078 struct ath_tx_status *ts, int txok)
2080 struct sk_buff *skb = bf->bf_mpdu;
2081 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2082 unsigned long flags;
2086 tx_flags |= ATH_TX_ERROR;
2088 if (ts->ts_status & ATH9K_TXERR_FILT)
2089 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2091 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2092 bf->bf_buf_addr = 0;
2094 if (bf->bf_state.bfs_paprd) {
2095 if (time_after(jiffies,
2096 bf->bf_state.bfs_paprd_timestamp +
2097 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2098 dev_kfree_skb_any(skb);
2100 complete(&sc->paprd_complete);
2102 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2103 ath_tx_complete(sc, skb, tx_flags, txq);
2105 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2106 * accidentally reference it later.
2111 * Return the list of ath_buf of this mpdu to free queue
2113 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2114 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2115 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2118 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2119 struct ath_tx_status *ts, int nframes, int nbad,
2122 struct sk_buff *skb = bf->bf_mpdu;
2123 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2124 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2125 struct ieee80211_hw *hw = sc->hw;
2126 struct ath_hw *ah = sc->sc_ah;
2130 tx_info->status.ack_signal = ts->ts_rssi;
2132 tx_rateindex = ts->ts_rateindex;
2133 WARN_ON(tx_rateindex >= hw->max_rates);
2135 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2136 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2138 BUG_ON(nbad > nframes);
2140 tx_info->status.ampdu_len = nframes;
2141 tx_info->status.ampdu_ack_len = nframes - nbad;
2143 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2144 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2146 * If an underrun error is seen assume it as an excessive
2147 * retry only if max frame trigger level has been reached
2148 * (2 KB for single stream, and 4 KB for dual stream).
2149 * Adjust the long retry as if the frame was tried
2150 * hw->max_rate_tries times to affect how rate control updates
2151 * PER for the failed rate.
2152 * In case of congestion on the bus penalizing this type of
2153 * underruns should help hardware actually transmit new frames
2154 * successfully by eventually preferring slower rates.
2155 * This itself should also alleviate congestion on the bus.
2157 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2158 ATH9K_TX_DELIM_UNDERRUN)) &&
2159 ieee80211_is_data(hdr->frame_control) &&
2160 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2161 tx_info->status.rates[tx_rateindex].count =
2165 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2166 tx_info->status.rates[i].count = 0;
2167 tx_info->status.rates[i].idx = -1;
2170 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2173 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2174 struct ath_tx_status *ts, struct ath_buf *bf,
2175 struct list_head *bf_head)
2180 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2181 txq->axq_tx_inprogress = false;
2182 if (bf_is_ampdu_not_probing(bf))
2183 txq->axq_ampdu_depth--;
2185 if (!bf_isampdu(bf)) {
2186 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
2187 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
2189 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2191 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
2192 ath_txq_schedule(sc, txq);
2195 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2197 struct ath_hw *ah = sc->sc_ah;
2198 struct ath_common *common = ath9k_hw_common(ah);
2199 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2200 struct list_head bf_head;
2201 struct ath_desc *ds;
2202 struct ath_tx_status ts;
2205 ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
2206 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2209 ath_txq_lock(sc, txq);
2211 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
2214 if (list_empty(&txq->axq_q)) {
2215 txq->axq_link = NULL;
2216 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
2217 ath_txq_schedule(sc, txq);
2220 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2223 * There is a race condition that a BH gets scheduled
2224 * after sw writes TxE and before hw re-load the last
2225 * descriptor to get the newly chained one.
2226 * Software must keep the last DONE descriptor as a
2227 * holding descriptor - software does so by marking
2228 * it with the STALE flag.
2233 if (list_is_last(&bf_held->list, &txq->axq_q))
2236 bf = list_entry(bf_held->list.next, struct ath_buf,
2240 lastbf = bf->bf_lastbf;
2241 ds = lastbf->bf_desc;
2243 memset(&ts, 0, sizeof(ts));
2244 status = ath9k_hw_txprocdesc(ah, ds, &ts);
2245 if (status == -EINPROGRESS)
2248 TX_STAT_INC(txq->axq_qnum, txprocdesc);
2251 * Remove ath_buf's of the same transmit unit from txq,
2252 * however leave the last descriptor back as the holding
2253 * descriptor for hw.
2255 lastbf->bf_stale = true;
2256 INIT_LIST_HEAD(&bf_head);
2257 if (!list_is_singular(&lastbf->list))
2258 list_cut_position(&bf_head,
2259 &txq->axq_q, lastbf->list.prev);
2262 list_del(&bf_held->list);
2263 ath_tx_return_buffer(sc, bf_held);
2266 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2268 ath_txq_unlock_complete(sc, txq);
2271 void ath_tx_tasklet(struct ath_softc *sc)
2273 struct ath_hw *ah = sc->sc_ah;
2274 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
2277 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2278 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2279 ath_tx_processq(sc, &sc->tx.txq[i]);
2283 void ath_tx_edma_tasklet(struct ath_softc *sc)
2285 struct ath_tx_status ts;
2286 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2287 struct ath_hw *ah = sc->sc_ah;
2288 struct ath_txq *txq;
2289 struct ath_buf *bf, *lastbf;
2290 struct list_head bf_head;
2294 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
2297 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2298 if (status == -EINPROGRESS)
2300 if (status == -EIO) {
2301 ath_dbg(common, XMIT, "Error processing tx status\n");
2305 /* Process beacon completions separately */
2306 if (ts.qid == sc->beacon.beaconq) {
2307 sc->beacon.tx_processed = true;
2308 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2312 txq = &sc->tx.txq[ts.qid];
2314 ath_txq_lock(sc, txq);
2316 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2317 ath_txq_unlock(sc, txq);
2321 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2322 struct ath_buf, list);
2323 lastbf = bf->bf_lastbf;
2325 INIT_LIST_HEAD(&bf_head);
2326 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2329 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2330 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2332 if (!list_empty(&txq->axq_q)) {
2333 struct list_head bf_q;
2335 INIT_LIST_HEAD(&bf_q);
2336 txq->axq_link = NULL;
2337 list_splice_tail_init(&txq->axq_q, &bf_q);
2338 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2342 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2343 ath_txq_unlock_complete(sc, txq);
2351 static int ath_txstatus_setup(struct ath_softc *sc, int size)
2353 struct ath_descdma *dd = &sc->txsdma;
2354 u8 txs_len = sc->sc_ah->caps.txs_len;
2356 dd->dd_desc_len = size * txs_len;
2357 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2358 &dd->dd_desc_paddr, GFP_KERNEL);
2365 static int ath_tx_edma_init(struct ath_softc *sc)
2369 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2371 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2372 sc->txsdma.dd_desc_paddr,
2373 ATH_TXSTATUS_RING_SIZE);
2378 static void ath_tx_edma_cleanup(struct ath_softc *sc)
2380 struct ath_descdma *dd = &sc->txsdma;
2382 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2386 int ath_tx_init(struct ath_softc *sc, int nbufs)
2388 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2391 spin_lock_init(&sc->tx.txbuflock);
2393 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2397 "Failed to allocate tx descriptors: %d\n", error);
2401 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2402 "beacon", ATH_BCBUF, 1, 1);
2405 "Failed to allocate beacon descriptors: %d\n", error);
2409 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2411 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2412 error = ath_tx_edma_init(sc);
2424 void ath_tx_cleanup(struct ath_softc *sc)
2426 if (sc->beacon.bdma.dd_desc_len != 0)
2427 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2429 if (sc->tx.txdma.dd_desc_len != 0)
2430 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
2432 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2433 ath_tx_edma_cleanup(sc);
2436 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2438 struct ath_atx_tid *tid;
2439 struct ath_atx_ac *ac;
2442 for (tidno = 0, tid = &an->tid[tidno];
2443 tidno < WME_NUM_TID;
2447 tid->seq_start = tid->seq_next = 0;
2448 tid->baw_size = WME_MAX_BA;
2449 tid->baw_head = tid->baw_tail = 0;
2451 tid->paused = false;
2452 tid->state &= ~AGGR_CLEANUP;
2453 __skb_queue_head_init(&tid->buf_q);
2454 acno = TID_TO_WME_AC(tidno);
2455 tid->ac = &an->ac[acno];
2456 tid->state &= ~AGGR_ADDBA_COMPLETE;
2457 tid->state &= ~AGGR_ADDBA_PROGRESS;
2460 for (acno = 0, ac = &an->ac[acno];
2461 acno < WME_NUM_AC; acno++, ac++) {
2463 ac->txq = sc->tx.txq_map[acno];
2464 INIT_LIST_HEAD(&ac->tid_q);
2468 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2470 struct ath_atx_ac *ac;
2471 struct ath_atx_tid *tid;
2472 struct ath_txq *txq;
2475 for (tidno = 0, tid = &an->tid[tidno];
2476 tidno < WME_NUM_TID; tidno++, tid++) {
2481 ath_txq_lock(sc, txq);
2484 list_del(&tid->list);
2489 list_del(&ac->list);
2490 tid->ac->sched = false;
2493 ath_tid_drain(sc, txq, tid);
2494 tid->state &= ~AGGR_ADDBA_COMPLETE;
2495 tid->state &= ~AGGR_CLEANUP;
2497 ath_txq_unlock(sc, txq);